transformers 5.3.0
Version history | Download JSON
-
PyPI https://pypi.org/project/transformers/ Repository https://github.com/huggingface/transformers -
py.typed -
Coverage
%%{init: {"pie": {"textPosition": 0.85}, "theme": "neutral", "themeVariables": {"pieStrokeWidth": "1px"}}}%% pie title "Typed" : 44618 "Any" : 77 "Untyped" : 53747 -
Typables
%%{init: {"pie": {"textPosition": 0.85}, "theme": "neutral", "themeVariables": {"pieStrokeWidth": "1px"}}}%% pie title "functions" : 3671 "classes" : 70858 "other" : 370- 1170 functions (+28 overloads)
- 2501 parameters
- 4717 classes
- 13033 methods (+23 overloads)
- 57327 parameters
- 442 properties
- 13033 methods (+23 overloads)
- 1719 modules
- 370 attrs
- 1170 functions (+28 overloads)
Modules
Incomplete Annotations
transformers (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
attr |
import_structure |
1 | 0 | 0 |
transformers.activations (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AccurateGELUActivation.init |
1 | 0 | 0 |
attr |
AccurateGELUActivation.precomputed_constant |
1 | 0 | 0 |
meth |
GELUTanh.init |
2 | 1 | 0 |
attr |
GELUTanh.act |
1 | 0 | 0 |
meth |
GELUActivation.init |
2 | 1 | 0 |
attr |
GELUActivation.act |
1 | 0 | 0 |
attr |
gelu_python |
1 | 0 | 0 |
attr |
mish |
1 | 0 | 0 |
attr |
linear_act |
1 | 0 | 0 |
meth |
XIELUActivation.init |
7 | 0 | 0 |
attr |
XIELUActivation.alpha_p |
1 | 0 | 0 |
attr |
XIELUActivation.alpha_n |
1 | 0 | 0 |
attr |
XIELUActivation.with_vector_loads |
1 | 0 | 0 |
meth |
LaplaceActivation.forward |
4 | 0 | 0 |
attr |
gelu |
1 | 0 | 0 |
meth |
ClassInstantier.getitem |
2 | 0 | 0 |
attr |
gelu_fast |
1 | 0 | 0 |
func |
get_activation |
2 | 0 | 0 |
attr |
quick_gelu |
1 | 0 | 0 |
attr |
silu |
1 | 0 | 0 |
meth |
ReLUSquaredActivation.forward |
2 | 0 | 0 |
attr |
ACT2FN |
1 | 0 | 0 |
meth |
ClippedGELUActivation.init |
3 | 2 | 0 |
attr |
ClippedGELUActivation.min |
1 | 0 | 0 |
attr |
ClippedGELUActivation.max |
1 | 0 | 0 |
meth |
MishActivation.init |
1 | 0 | 0 |
attr |
MishActivation.act |
1 | 0 | 0 |
attr |
gelu_new |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.audio_utils (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
chroma_filter_bank |
8 | 7 | 0 |
func |
load_audio_librosa |
4 | 2 | 0 |
func |
load_audio |
4 | 2 | 0 |
func |
load_audio_torchcodec |
3 | 2 | 0 |
func |
hertz_to_octave |
4 | 3 | 0 |
func |
is_valid_audio |
2 | 0 | 0 |
attr |
TORCHCODEC_VERSION |
1 | 0 | 0 |
func |
is_valid_list_of_audio |
2 | 0 | 0 |
transformers.backbone_utils (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
consolidate_backbone_kwargs_to_config |
7 | 4 | 0 |
meth |
BackboneMixin.init |
3 | 1 | 0 |
meth |
BackboneMixin._init_timm_backbone |
2 | 1 | 0 |
meth |
BackboneMixin.forward_with_filtered_kwargs |
3 | 0 | 0 |
meth |
BackboneMixin.forward |
5 | 3 | 0 |
prop |
BackboneMixin.out_features |
2 | 1 | 0 |
prop |
BackboneMixin.out_indices |
2 | 1 | 0 |
prop |
BackboneMixin.out_feature_channels |
1 | 0 | 0 |
prop |
BackboneMixin.channels |
1 | 0 | 0 |
func |
load_backbone |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
BackboneConfigMixin.set_output_features_output_indices |
3 | 2 | 0 |
meth |
BackboneConfigMixin.verify_out_features_out_indices |
1 | 0 | 0 |
meth |
BackboneConfigMixin.to_dict |
1 | 0 | 0 |
prop |
BackboneConfigMixin.out_features |
2 | 1 | 0 |
prop |
BackboneConfigMixin.out_indices |
2 | 1 | 0 |
transformers.cache_utils (74 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EncoderDecoderCache.init |
2 | 1 | 0 |
meth |
EncoderDecoderCache.iter |
1 | 0 | 0 |
meth |
EncoderDecoderCache.len |
1 | 0 | 0 |
meth |
EncoderDecoderCache.reset |
1 | 0 | 0 |
meth |
EncoderDecoderCache.reorder_cache |
2 | 1 | 0 |
meth |
EncoderDecoderCache.check_dynamic_cache |
2 | 1 | 0 |
meth |
EncoderDecoderCache.crop |
2 | 1 | 0 |
meth |
EncoderDecoderCache.batch_repeat_interleave |
2 | 1 | 0 |
meth |
EncoderDecoderCache.batch_select_indices |
2 | 1 | 0 |
prop |
EncoderDecoderCache.is_sliding |
1 | 0 | 0 |
attr |
EncoderDecoderCache.is_updated |
1 | 0 | 0 |
attr |
EncoderDecoderCache.self_attention_cache |
1 | 0 | 0 |
attr |
EncoderDecoderCache.cross_attention_cache |
1 | 0 | 0 |
meth |
DynamicCache.init |
5 | 4 | 0 |
meth |
DynamicCache.iter |
1 | 0 | 0 |
meth |
Cache.init |
5 | 4 | 0 |
meth |
Cache.repr |
1 | 0 | 0 |
meth |
Cache.prefetch |
3 | 2 | 0 |
meth |
Cache.offload |
3 | 2 | 0 |
meth |
Cache.early_initialization |
6 | 5 | 0 |
meth |
Cache.reset |
1 | 0 | 0 |
meth |
Cache.reorder_cache |
2 | 1 | 0 |
meth |
Cache.crop |
2 | 1 | 0 |
meth |
Cache.batch_repeat_interleave |
2 | 1 | 0 |
meth |
Cache.batch_select_indices |
2 | 1 | 0 |
meth |
Cache.len |
1 | 0 | 0 |
attr |
Cache.layers |
1 | 0 | 0 |
attr |
Cache.layer_class_to_replicate |
1 | 0 | 0 |
attr |
Cache.offloading |
1 | 0 | 0 |
attr |
Cache.only_non_sliding |
1 | 0 | 0 |
attr |
Cache.prefetch_stream |
1 | 0 | 0 |
meth |
StaticCache.init |
6 | 4 | 0 |
meth |
QuantoQuantizedLayer.init |
6 | 5 | 0 |
meth |
QuantoQuantizedLayer._quantize |
3 | 0 | 0 |
meth |
QuantoQuantizedLayer._dequantize |
2 | 0 | 0 |
attr |
QuantoQuantizedLayer.qtype |
1 | 0 | 0 |
attr |
QuantoQuantizedLayer.optimizer |
1 | 0 | 0 |
meth |
StaticLayer.init |
2 | 1 | 0 |
attr |
StaticLayer.max_cache_len |
1 | 0 | 0 |
meth |
HQQQuantizedLayer.init |
6 | 5 | 0 |
meth |
HQQQuantizedLayer._quantize |
3 | 0 | 0 |
meth |
HQQQuantizedLayer._dequantize |
2 | 0 | 0 |
attr |
HQQQuantizedLayer.quantizer |
1 | 0 | 0 |
meth |
QuantizedCache.init |
8 | 7 | 0 |
meth |
StaticSlidingWindowLayer.init |
3 | 2 | 0 |
attr |
StaticSlidingWindowLayer.cumulative_length |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
QuantizedLayer.init |
6 | 5 | 0 |
meth |
QuantizedLayer._quantize |
3 | 0 | 0 |
meth |
QuantizedLayer._dequantize |
2 | 0 | 0 |
attr |
QuantizedLayer.nbits |
1 | 0 | 0 |
attr |
QuantizedLayer.axis_key |
1 | 0 | 0 |
attr |
QuantizedLayer.axis_value |
1 | 0 | 0 |
attr |
QuantizedLayer.q_group_size |
1 | 0 | 0 |
attr |
QuantizedLayer.residual_length |
1 | 0 | 0 |
attr |
QuantizedLayer.cumulative_length |
1 | 0 | 0 |
meth |
DynamicSlidingWindowLayer.init |
2 | 1 | 0 |
attr |
DynamicSlidingWindowLayer.sliding_window |
1 | 0 | 0 |
attr |
DynamicSlidingWindowLayer.cumulative_length |
1 | 0 | 0 |
meth |
CacheLayerMixin.init |
1 | 0 | 0 |
meth |
CacheLayerMixin.repr |
1 | 0 | 0 |
meth |
CacheLayerMixin.offload |
1 | 0 | 0 |
meth |
CacheLayerMixin.prefetch |
1 | 0 | 0 |
attr |
CacheLayerMixin.is_initialized |
1 | 0 | 0 |
transformers.cli.add_fast_image_processor (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
add_fast_image_processor |
2 | 1 | 0 |
func |
write_default_fast_image_processor_file |
4 | 3 | 0 |
func |
add_fast_image_processor_to_tests |
3 | 2 | 0 |
func |
add_fast_image_processor_to_model_init |
4 | 2 | 0 |
func |
add_fast_image_processor_file |
4 | 3 | 0 |
func |
add_fast_image_processor_to_auto |
3 | 2 | 0 |
func |
add_fast_image_processor_to_doc |
3 | 2 | 0 |
transformers.cli.add_new_model_like (23 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
add_model_to_auto_mappings |
6 | 5 | 0 |
attr |
COPYRIGHT |
1 | 0 | 0 |
func |
get_user_input |
1 | 0 | 0 |
func |
add_new_model_like |
2 | 1 | 0 |
func |
create_init_file |
4 | 3 | 0 |
func |
create_test_files |
5 | 3 | 0 |
func |
insert_model_in_doc_toc |
5 | 4 | 0 |
meth |
ClassFinder.init |
1 | 0 | 0 |
meth |
ClassFinder.leave_ClassDef |
2 | 1 | 0 |
meth |
ClassFinder.visit_SimpleStatementLine |
2 | 1 | 0 |
attr |
ClassFinder.is_in_class |
1 | 0 | 0 |
func |
add_content_to_file |
4 | 3 | 0 |
func |
get_user_field |
5 | 5 | 1 |
func |
create_doc_file |
3 | 2 | 0 |
meth |
ModelInfos.init |
2 | 1 | 0 |
attr |
ModelInfos.lowercase_name |
1 | 0 | 0 |
attr |
ModelInfos.paper_name |
1 | 0 | 0 |
attr |
ModelInfos.config_class |
1 | 0 | 0 |
attr |
ModelInfos.camelcase_name |
1 | 0 | 0 |
attr |
ModelInfos.video_processor_class |
1 | 0 | 0 |
attr |
ModelInfos.feature_extractor_class |
1 | 0 | 0 |
attr |
ModelInfos.processor_class |
1 | 0 | 0 |
attr |
ModelInfos.fast_tokenizer_class |
1 | 0 | 0 |
transformers.cli.chat (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Chat.check_health |
2 | 0 | 0 |
meth |
Chat._inner_run |
1 | 0 | 0 |
attr |
Chat.base_url |
1 | 0 | 0 |
attr |
Chat.model_id |
1 | 0 | 0 |
attr |
Chat.system_prompt |
1 | 0 | 0 |
attr |
Chat.save_folder |
1 | 0 | 0 |
attr |
Chat.config |
1 | 0 | 0 |
attr |
Chat.settings |
1 | 0 | 0 |
attr |
Chat.user |
1 | 0 | 0 |
attr |
Chat.examples |
1 | 0 | 0 |
attr |
ALLOWED_KEY_CHARS |
1 | 0 | 0 |
attr |
ALLOWED_VALUE_CHARS |
1 | 0 | 0 |
attr |
HELP_STRING |
1 | 0 | 0 |
meth |
RichInterface.init |
3 | 2 | 0 |
meth |
RichInterface.clear |
1 | 0 | 0 |
meth |
RichInterface.print_user_message |
2 | 1 | 0 |
meth |
RichInterface.print_color |
3 | 2 | 0 |
meth |
RichInterface.print_help |
2 | 1 | 0 |
meth |
RichInterface.print_status |
2 | 1 | 0 |
attr |
RichInterface.model_id |
1 | 0 | 0 |
attr |
RichInterface.user_id |
1 | 0 | 0 |
transformers.cli.download (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
download |
5 | 4 | 0 |
transformers.cli.serve (51 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
serve_dependencies_available |
1 | 0 | 0 |
meth |
TimedModel.init |
4 | 3 | 0 |
meth |
TimedModel.reset_timer |
1 | 0 | 0 |
meth |
TimedModel.delete_model |
1 | 0 | 0 |
meth |
TimedModel.timeout_reached |
1 | 0 | 0 |
meth |
TimedModel.is_deleted |
1 | 0 | 0 |
attr |
TimedModel.model |
1 | 0 | 0 |
attr |
TimedModel.processor |
1 | 0 | 0 |
attr |
TimedModel.timeout_seconds |
1 | 0 | 0 |
meth |
Serve.start_server |
1 | 0 | 0 |
meth |
Serve.kill_server |
1 | 0 | 0 |
meth |
Serve._validate_request |
5 | 4 | 0 |
meth |
Serve.validate_response_request |
2 | 1 | 0 |
meth |
Serve.validate_chat_completion_request |
2 | 1 | 0 |
meth |
Serve.validate_transcription_request |
2 | 1 | 0 |
meth |
Serve.get_model_modality |
3 | 2 | 0 |
meth |
Serve.get_processor_inputs_from_inbound_messages |
3 | 1 | 0 |
meth |
Serve._load_model_and_data_processor |
2 | 1 | 0 |
attr |
Serve.continuous_batching |
1 | 0 | 0 |
attr |
Serve.device |
1 | 0 | 0 |
attr |
Serve.dtype |
1 | 0 | 0 |
attr |
Serve.trust_remote_code |
1 | 0 | 0 |
attr |
Serve.attn_implementation |
1 | 0 | 0 |
attr |
Serve.quantization |
1 | 0 | 0 |
attr |
Serve.host |
1 | 0 | 0 |
attr |
Serve.port |
1 | 0 | 0 |
attr |
Serve.model_timeout |
1 | 0 | 0 |
attr |
Serve.log_level |
1 | 0 | 0 |
attr |
Serve.default_seed |
1 | 0 | 0 |
attr |
Serve.enable_cors |
1 | 0 | 0 |
attr |
Serve.input_validation |
1 | 0 | 0 |
attr |
Serve.force_model |
1 | 0 | 0 |
attr |
Serve.non_blocking |
1 | 0 | 0 |
attr |
Serve.last_messages |
1 | 0 | 0 |
attr |
Serve.last_kv_cache |
1 | 0 | 0 |
attr |
Serve.last_model |
1 | 0 | 0 |
attr |
Serve.server |
1 | 0 | 0 |
func |
set_torch_seed |
2 | 0 | 0 |
attr |
serve |
1 | 0 | 0 |
meth |
ToolState.init |
1 | 0 | 0 |
meth |
ToolState.reset |
1 | 0 | 0 |
func |
create_generation_config_from_req |
4 | 3 | 0 |
attr |
transcription_validator |
1 | 0 | 0 |
attr |
response_validator |
1 | 0 | 0 |
func |
torch_ones_like |
2 | 0 | 0 |
func |
reset_torch_cache |
1 | 0 | 0 |
attr |
completion_validator |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.cli.transformers (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
app |
1 | 0 | 0 |
func |
main |
1 | 0 | 0 |
transformers.configuration_utils (50 missing, 4 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PreTrainedConfig.setattr |
3 | 0 | 0 |
meth |
PreTrainedConfig.getattribute |
2 | 0 | 0 |
meth |
PreTrainedConfig.init |
13 | 11 | 0 |
meth |
PreTrainedConfig._create_id_label_maps |
2 | 1 | 0 |
meth |
PreTrainedConfig.save_pretrained |
4 | 2 | 0 |
meth |
PreTrainedConfig.from_pretrained |
8 | 7 | 0 |
meth |
PreTrainedConfig.get_config_dict |
3 | 2 | 0 |
meth |
PreTrainedConfig._get_config_dict |
3 | 2 | 0 |
meth |
PreTrainedConfig.from_dict |
3 | 2 | 0 |
meth |
PreTrainedConfig._dict_from_json_file |
2 | 1 | 0 |
meth |
PreTrainedConfig._encode_special_floats |
2 | 2 | 2 |
meth |
PreTrainedConfig._decode_special_floats |
2 | 2 | 2 |
meth |
PreTrainedConfig.eq |
2 | 0 | 0 |
meth |
PreTrainedConfig.repr |
1 | 0 | 0 |
meth |
PreTrainedConfig.iter |
1 | 0 | 0 |
meth |
PreTrainedConfig.to_json_file |
3 | 2 | 0 |
meth |
PreTrainedConfig.update |
2 | 1 | 0 |
meth |
PreTrainedConfig.update_from_string |
2 | 1 | 0 |
meth |
PreTrainedConfig.register_for_auto_class |
2 | 0 | 0 |
meth |
PreTrainedConfig.get_text_config |
3 | 1 | 0 |
prop |
PreTrainedConfig.name_or_path |
2 | 1 | 0 |
prop |
PreTrainedConfig.output_attentions |
2 | 1 | 0 |
prop |
PreTrainedConfig._attn_implementation |
2 | 1 | 0 |
prop |
PreTrainedConfig._experts_implementation |
2 | 1 | 0 |
prop |
PreTrainedConfig.torch_dtype |
2 | 0 | 0 |
prop |
PreTrainedConfig.rope_scaling |
2 | 0 | 0 |
attr |
PreTrainedConfig.return_dict |
1 | 0 | 0 |
attr |
PreTrainedConfig.output_hidden_states |
1 | 0 | 0 |
attr |
PreTrainedConfig.dtype |
1 | 0 | 0 |
attr |
PreTrainedConfig.chunk_size_feed_forward |
1 | 0 | 0 |
attr |
PreTrainedConfig.is_encoder_decoder |
1 | 0 | 0 |
attr |
PreTrainedConfig.architectures |
1 | 0 | 0 |
attr |
PreTrainedConfig.id2label |
1 | 0 | 0 |
attr |
PreTrainedConfig.label2id |
1 | 0 | 0 |
attr |
PreTrainedConfig.problem_type |
1 | 0 | 0 |
attr |
PreTrainedConfig.transformers_version |
1 | 0 | 0 |
func |
layer_type_validation |
4 | 3 | 0 |
func |
recursive_diff_dict |
4 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.conversion_mapping (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_checkpoint_conversion_mapping |
2 | 0 | 0 |
transformers.convert_slow_tokenizer (217 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
convert_slow_tokenizer |
3 | 1 | 0 |
meth |
LlamaConverter.vocab |
2 | 0 | 0 |
meth |
LlamaConverter.unk_id |
2 | 0 | 0 |
meth |
LlamaConverter.decoder |
3 | 0 | 0 |
meth |
LlamaConverter.normalizer |
2 | 0 | 0 |
meth |
LlamaConverter.pre_tokenizer |
3 | 0 | 0 |
meth |
LlamaConverter.post_processor |
1 | 0 | 0 |
meth |
MBartConverter.vocab |
2 | 0 | 0 |
meth |
MBartConverter.unk_id |
2 | 0 | 0 |
meth |
MBartConverter.post_processor |
1 | 0 | 0 |
meth |
MBartConverter.convert_from_spm |
3 | 0 | 0 |
meth |
CamembertConverter.vocab |
2 | 0 | 0 |
meth |
CamembertConverter.unk_id |
2 | 0 | 0 |
meth |
CamembertConverter.post_processor |
1 | 0 | 0 |
meth |
CamembertConverter.convert_from_spm |
3 | 0 | 0 |
meth |
SeamlessM4TConverter.vocab |
2 | 0 | 0 |
meth |
SeamlessM4TConverter.unk_id |
2 | 0 | 0 |
meth |
SeamlessM4TConverter.post_processor |
1 | 0 | 0 |
meth |
XLNetConverter.vocab |
2 | 0 | 0 |
meth |
XLNetConverter.normalizer |
2 | 0 | 0 |
meth |
XLNetConverter.post_processor |
1 | 0 | 0 |
meth |
RemBertConverter.normalizer |
2 | 0 | 0 |
meth |
RemBertConverter.post_processor |
1 | 0 | 0 |
meth |
UdopConverter.post_processor |
1 | 0 | 0 |
meth |
ParakeetConverter.init |
3 | 0 | 0 |
meth |
ParakeetConverter.tokenizer |
2 | 0 | 0 |
attr |
ParakeetConverter.vocab_file |
1 | 0 | 0 |
attr |
ParakeetConverter.proto |
1 | 0 | 0 |
meth |
XLMRobertaConverter.vocab |
2 | 0 | 0 |
meth |
XLMRobertaConverter.unk_id |
2 | 0 | 0 |
meth |
XLMRobertaConverter.post_processor |
1 | 0 | 0 |
meth |
XLMRobertaConverter.convert_from_spm |
3 | 0 | 0 |
meth |
SpmConverter.build_tokenizer_from_spm_proto |
4 | 0 | 0 |
meth |
SpmConverter.convert_from_spm |
3 | 0 | 0 |
meth |
SpmConverter.init |
2 | 0 | 0 |
meth |
SpmConverter.vocab |
2 | 0 | 0 |
meth |
SpmConverter.unk_id |
2 | 0 | 0 |
meth |
SpmConverter.tokenizer |
2 | 0 | 0 |
meth |
SpmConverter.normalizer |
2 | 0 | 0 |
meth |
SpmConverter.pre_tokenizer |
3 | 0 | 0 |
meth |
SpmConverter.post_processor |
1 | 0 | 0 |
meth |
SpmConverter.decoder |
3 | 0 | 0 |
attr |
SpmConverter.proto |
1 | 0 | 0 |
meth |
XGLMConverter.vocab |
2 | 0 | 0 |
meth |
XGLMConverter.unk_id |
2 | 0 | 0 |
meth |
XGLMConverter.post_processor |
1 | 0 | 0 |
meth |
TikTokenConverter.init |
6 | 0 | 0 |
meth |
TikTokenConverter.extract_vocab_merges_from_model |
2 | 1 | 0 |
meth |
TikTokenConverter.tokenizer |
1 | 0 | 0 |
attr |
TikTokenConverter.vocab_file |
1 | 0 | 0 |
attr |
TikTokenConverter.pattern |
1 | 0 | 0 |
attr |
TikTokenConverter.add_prefix_space |
1 | 0 | 0 |
attr |
TikTokenConverter.extra_special_tokens |
1 | 0 | 0 |
func |
generate_merges |
4 | 1 | 0 |
meth |
NllbConverter.vocab |
2 | 0 | 0 |
meth |
NllbConverter.unk_id |
2 | 0 | 0 |
meth |
NllbConverter.post_processor |
1 | 0 | 0 |
meth |
NllbConverter.convert_from_spm |
3 | 0 | 0 |
func |
bytes_to_unicode |
1 | 0 | 0 |
meth |
Converter.init |
2 | 0 | 0 |
attr |
Converter.original_tokenizer |
1 | 0 | 0 |
meth |
GemmaSentencePieceExtractor.extract |
2 | 1 | 0 |
meth |
MistralConverter.init |
6 | 0 | 0 |
meth |
MistralConverter.extract_vocab_merges_from_model |
2 | 1 | 0 |
meth |
MistralConverter.tokenizer |
1 | 0 | 0 |
attr |
MistralConverter.vocab_file |
1 | 0 | 0 |
attr |
MistralConverter.pattern |
1 | 0 | 0 |
attr |
MistralConverter.add_prefix_space |
1 | 0 | 0 |
attr |
MistralConverter.additional_special_tokens |
1 | 0 | 0 |
meth |
PegasusConverter.vocab |
2 | 0 | 0 |
meth |
PegasusConverter.unk_id |
2 | 0 | 0 |
meth |
PegasusConverter.pre_tokenizer |
3 | 0 | 0 |
meth |
PegasusConverter.post_processor |
1 | 0 | 0 |
meth |
MoshiConverter.init |
3 | 0 | 0 |
meth |
MoshiConverter.normalizer |
2 | 0 | 0 |
meth |
MoshiConverter.decoder |
3 | 0 | 0 |
meth |
MoshiConverter.pre_tokenizer |
3 | 0 | 0 |
attr |
MoshiConverter.proto |
1 | 0 | 0 |
meth |
BarthezConverter.unk_id |
2 | 0 | 0 |
meth |
BarthezConverter.post_processor |
1 | 0 | 0 |
func |
import_protobuf |
2 | 0 | 0 |
meth |
MBart50Converter.vocab |
2 | 0 | 0 |
meth |
MBart50Converter.unk_id |
2 | 0 | 0 |
meth |
MBart50Converter.post_processor |
1 | 0 | 0 |
meth |
MBart50Converter.convert_from_spm |
3 | 0 | 0 |
meth |
HeliumConverter.init |
3 | 0 | 0 |
meth |
HeliumConverter.tokenizer |
2 | 0 | 0 |
meth |
HeliumConverter.vocab |
2 | 0 | 0 |
meth |
HeliumConverter.unk_id |
2 | 0 | 0 |
meth |
HeliumConverter.decoder |
3 | 0 | 0 |
meth |
HeliumConverter.normalizer |
2 | 0 | 0 |
meth |
HeliumConverter.pre_tokenizer |
3 | 0 | 0 |
meth |
HeliumConverter.post_processor |
1 | 0 | 0 |
attr |
HeliumConverter.proto |
1 | 0 | 0 |
meth |
GemmaConverter.normalizer |
2 | 0 | 0 |
meth |
GemmaConverter.vocab |
2 | 0 | 0 |
meth |
GemmaConverter.pre_tokenizer |
3 | 0 | 0 |
meth |
GemmaConverter.unk_id |
2 | 0 | 0 |
meth |
GemmaConverter.decoder |
3 | 0 | 0 |
meth |
DebertaV2Converter.pre_tokenizer |
3 | 0 | 0 |
meth |
DebertaV2Converter.normalizer |
2 | 0 | 0 |
meth |
DebertaV2Converter.post_processor |
1 | 0 | 0 |
meth |
AlbertConverter.vocab |
2 | 0 | 0 |
meth |
AlbertConverter.normalizer |
2 | 0 | 0 |
meth |
AlbertConverter.post_processor |
1 | 0 | 0 |
meth |
SentencePieceExtractor.init |
2 | 1 | 0 |
meth |
SentencePieceExtractor.extract |
3 | 1 | 0 |
attr |
SentencePieceExtractor.proto |
1 | 0 | 0 |
meth |
T5Converter.vocab |
2 | 0 | 0 |
meth |
T5Converter.post_processor |
1 | 0 | 0 |
meth |
T5Converter.convert_from_spm |
3 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
BigBirdConverter.post_processor |
1 | 0 | 0 |
transformers.convert_slow_tokenizers_checkpoints_to_fast (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
parser |
1 | 0 | 0 |
func |
convert_slow_checkpoint_to_fast |
5 | 0 | 0 |
attr |
args |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.core_model_loading (61 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Chunk.init |
2 | 1 | 0 |
meth |
Chunk.convert |
5 | 4 | 0 |
attr |
Chunk.dim |
1 | 0 | 0 |
meth |
SplitModulelist.init |
2 | 1 | 0 |
meth |
SplitModulelist.convert |
5 | 4 | 0 |
attr |
SplitModulelist.dim |
1 | 0 | 0 |
meth |
MergeModulelist.init |
2 | 1 | 0 |
meth |
MergeModulelist.convert |
5 | 4 | 0 |
attr |
MergeModulelist.dim |
1 | 0 | 0 |
meth |
Concatenate.init |
2 | 1 | 0 |
meth |
Concatenate.convert |
5 | 4 | 0 |
attr |
Concatenate.dim |
1 | 0 | 0 |
meth |
ConversionOps.repr |
1 | 0 | 0 |
meth |
ConversionOps.convert |
5 | 4 | 0 |
meth |
PermuteForRope.init |
1 | 0 | 0 |
meth |
PermuteForRope.convert |
6 | 4 | 0 |
meth |
WeightConverter.post_init |
1 | 0 | 0 |
meth |
WeightConverter.convert |
6 | 2 | 0 |
meth |
WeightTransform.setattr |
3 | 0 | 0 |
meth |
WeightTransform.post_init |
1 | 0 | 0 |
meth |
WeightTransform.add_tensor |
5 | 4 | 0 |
meth |
WeightRenaming.convert |
6 | 2 | 0 |
func |
revert_weight_conversion |
3 | 2 | 0 |
meth |
ErnieFuseAndSplitTextVisionExperts.init |
3 | 2 | 0 |
meth |
ErnieFuseAndSplitTextVisionExperts.split_list_into_chunks |
3 | 2 | 0 |
meth |
ErnieFuseAndSplitTextVisionExperts.convert |
6 | 4 | 0 |
attr |
ErnieFuseAndSplitTextVisionExperts.stack_dim |
1 | 0 | 0 |
attr |
ErnieFuseAndSplitTextVisionExperts.concat_dim |
1 | 0 | 0 |
func |
spawn_tp_materialize |
7 | 3 | 0 |
meth |
Force16BytesAlignment.convert |
5 | 4 | 0 |
func |
spawn_materialize |
5 | 3 | 0 |
func |
log_conversion_errors |
5 | 4 | 1 |
func |
set_param_for_module |
7 | 6 | 0 |
meth |
Transpose.init |
4 | 3 | 0 |
meth |
Transpose.convert |
5 | 4 | 0 |
attr |
Transpose.dim0 |
1 | 0 | 0 |
attr |
Transpose.dim1 |
1 | 0 | 0 |
attr |
Transpose.check_dims |
1 | 0 | 0 |
meth |
ErnieSplitAndDecoupleTextVisionExperts.init |
3 | 2 | 0 |
meth |
ErnieSplitAndDecoupleTextVisionExperts.convert |
6 | 4 | 0 |
attr |
ErnieSplitAndDecoupleTextVisionExperts.stack_dim |
1 | 0 | 0 |
attr |
ErnieSplitAndDecoupleTextVisionExperts.concat_dim |
1 | 0 | 0 |
attr |
GLOBAL_WORKERS |
1 | 0 | 0 |
func |
dot_natural_key |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
convert_and_load_state_dict_in_model |
6 | 5 | 0 |
transformers.data.data_collator (39 missing, 7 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DataCollatorForTokenClassification.torch_call |
2 | 0 | 0 |
meth |
DataCollatorForTokenClassification.numpy_call |
2 | 0 | 0 |
meth |
DataCollatorWithFlattening.init |
7 | 0 | 0 |
meth |
DataCollatorWithFlattening.call |
4 | 0 | 0 |
meth |
DataCollatorForMultipleChoice.torch_call |
2 | 1 | 0 |
meth |
DataCollatorForSeq2Seq.call |
3 | 0 | 0 |
meth |
DataCollatorForLanguageModeling.post_init |
1 | 0 | 0 |
meth |
DataCollatorForLanguageModeling.get_generator |
2 | 0 | 0 |
meth |
DataCollatorForLanguageModeling.create_rng |
1 | 0 | 0 |
meth |
DataCollatorForLanguageModeling.torch_mask_tokens |
4 | 4 | 1 |
meth |
DataCollatorForLanguageModeling.numpy_mask_tokens |
4 | 4 | 1 |
meth |
DataCollatorForLanguageModeling._whole_word_mask |
3 | 3 | 2 |
meth |
DataCollatorForSOP.init |
3 | 0 | 0 |
meth |
DataCollatorForSOP.mask_tokens |
2 | 2 | 1 |
func |
default_data_collator |
3 | 2 | 0 |
meth |
DataCollatorForPermutationLanguageModeling.torch_mask_tokens |
2 | 2 | 1 |
meth |
DataCollatorForPermutationLanguageModeling.numpy_mask_tokens |
2 | 2 | 1 |
meth |
DataCollatorForWholeWordMask.init |
3 | 0 | 0 |
meth |
DefaultDataCollator.call |
3 | 2 | 0 |
func |
pad_without_fast_tokenizer_warning |
4 | 0 | 0 |
meth |
DataCollatorMixin.call |
3 | 1 | 0 |
func |
to_numpy |
2 | 1 | 0 |
func |
tolist |
2 | 1 | 0 |
transformers.data.datasets.glue (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlueDataTrainingArguments.post_init |
1 | 0 | 0 |
meth |
GlueDataset.init |
6 | 5 | 0 |
meth |
GlueDataset.len |
1 | 0 | 0 |
meth |
GlueDataset.getitem |
2 | 1 | 0 |
meth |
GlueDataset.get_labels |
1 | 0 | 0 |
attr |
GlueDataset.processor |
1 | 0 | 0 |
attr |
GlueDataset.label_list |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.data.datasets.squad (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SquadDataset.init |
8 | 7 | 0 |
meth |
SquadDataset.len |
1 | 0 | 0 |
meth |
SquadDataset.getitem |
2 | 1 | 0 |
attr |
SquadDataset.processor |
1 | 0 | 0 |
attr |
SquadDataset.old_features |
1 | 0 | 0 |
attr |
SquadDataset.dataset |
1 | 0 | 0 |
attr |
SquadDataset.examples |
1 | 0 | 0 |
attr |
MODEL_TYPES |
1 | 0 | 0 |
attr |
MODEL_CONFIG_CLASSES |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.data.metrics (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
glue_compute_metrics |
4 | 0 | 0 |
func |
xnli_compute_metrics |
4 | 0 | 0 |
func |
acc_and_f1 |
3 | 0 | 0 |
func |
pearson_and_spearman |
3 | 0 | 0 |
func |
simple_accuracy |
3 | 0 | 0 |
transformers.data.metrics.squad_metrics (89 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
find_all_best_thresh_v2 |
7 | 0 | 0 |
func |
apply_no_ans_threshold |
5 | 0 | 0 |
func |
find_best_thresh_v2 |
5 | 0 | 0 |
func |
find_all_best_thresh |
7 | 0 | 0 |
func |
merge_eval |
4 | 0 | 0 |
func |
find_best_thresh |
5 | 0 | 0 |
func |
squad_evaluate |
5 | 0 | 0 |
func |
normalize_answer |
2 | 0 | 0 |
func |
make_eval_dict |
4 | 0 | 0 |
func |
get_final_text |
5 | 0 | 0 |
func |
compute_predictions_logits |
14 | 0 | 0 |
func |
compute_predictions_log_probs |
14 | 0 | 0 |
func |
get_raw_scores |
3 | 0 | 0 |
func |
get_tokens |
2 | 0 | 0 |
func |
compute_f1 |
3 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
compute_exact |
3 | 0 | 0 |
transformers.data.processors.glue (147 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
glue_convert_examples_to_features |
7 | 3 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
StsbProcessor.init |
3 | 0 | 0 |
meth |
StsbProcessor.get_example_from_tensor_dict |
2 | 0 | 0 |
meth |
StsbProcessor.get_train_examples |
2 | 0 | 0 |
meth |
StsbProcessor.get_dev_examples |
2 | 0 | 0 |
meth |
StsbProcessor.get_test_examples |
2 | 0 | 0 |
meth |
StsbProcessor.get_labels |
1 | 0 | 0 |
meth |
StsbProcessor._create_examples |
3 | 0 | 0 |
meth |
QnliProcessor.init |
3 | 0 | 0 |
meth |
QnliProcessor.get_example_from_tensor_dict |
2 | 0 | 0 |
meth |
QnliProcessor.get_train_examples |
2 | 0 | 0 |
meth |
QnliProcessor.get_dev_examples |
2 | 0 | 0 |
meth |
QnliProcessor.get_test_examples |
2 | 0 | 0 |
meth |
QnliProcessor.get_labels |
1 | 0 | 0 |
meth |
QnliProcessor._create_examples |
3 | 0 | 0 |
meth |
ColaProcessor.init |
3 | 0 | 0 |
meth |
ColaProcessor.get_example_from_tensor_dict |
2 | 0 | 0 |
meth |
ColaProcessor.get_train_examples |
2 | 0 | 0 |
meth |
ColaProcessor.get_dev_examples |
2 | 0 | 0 |
meth |
ColaProcessor.get_test_examples |
2 | 0 | 0 |
meth |
ColaProcessor.get_labels |
1 | 0 | 0 |
meth |
ColaProcessor._create_examples |
3 | 0 | 0 |
meth |
QqpProcessor.init |
3 | 0 | 0 |
meth |
QqpProcessor.get_example_from_tensor_dict |
2 | 0 | 0 |
meth |
QqpProcessor.get_train_examples |
2 | 0 | 0 |
meth |
QqpProcessor.get_dev_examples |
2 | 0 | 0 |
meth |
QqpProcessor.get_test_examples |
2 | 0 | 0 |
meth |
QqpProcessor.get_labels |
1 | 0 | 0 |
meth |
QqpProcessor._create_examples |
3 | 0 | 0 |
meth |
MnliProcessor.init |
3 | 0 | 0 |
meth |
MnliProcessor.get_example_from_tensor_dict |
2 | 0 | 0 |
meth |
MnliProcessor.get_train_examples |
2 | 0 | 0 |
meth |
MnliProcessor.get_dev_examples |
2 | 0 | 0 |
meth |
MnliProcessor.get_test_examples |
2 | 0 | 0 |
meth |
MnliProcessor.get_labels |
1 | 0 | 0 |
meth |
MnliProcessor._create_examples |
3 | 0 | 0 |
meth |
MnliMismatchedProcessor.init |
3 | 0 | 0 |
meth |
MnliMismatchedProcessor.get_dev_examples |
2 | 0 | 0 |
meth |
MnliMismatchedProcessor.get_test_examples |
2 | 0 | 0 |
meth |
Sst2Processor.init |
3 | 0 | 0 |
meth |
Sst2Processor.get_example_from_tensor_dict |
2 | 0 | 0 |
meth |
Sst2Processor.get_train_examples |
2 | 0 | 0 |
meth |
Sst2Processor.get_dev_examples |
2 | 0 | 0 |
meth |
Sst2Processor.get_test_examples |
2 | 0 | 0 |
meth |
Sst2Processor.get_labels |
1 | 0 | 0 |
meth |
Sst2Processor._create_examples |
3 | 0 | 0 |
meth |
MrpcProcessor.init |
3 | 0 | 0 |
meth |
MrpcProcessor.get_example_from_tensor_dict |
2 | 0 | 0 |
meth |
MrpcProcessor.get_train_examples |
2 | 0 | 0 |
meth |
MrpcProcessor.get_dev_examples |
2 | 0 | 0 |
meth |
MrpcProcessor.get_test_examples |
2 | 0 | 0 |
meth |
MrpcProcessor.get_labels |
1 | 0 | 0 |
meth |
MrpcProcessor._create_examples |
3 | 0 | 0 |
meth |
RteProcessor.init |
3 | 0 | 0 |
meth |
RteProcessor.get_example_from_tensor_dict |
2 | 0 | 0 |
meth |
RteProcessor.get_train_examples |
2 | 0 | 0 |
meth |
RteProcessor.get_dev_examples |
2 | 0 | 0 |
meth |
RteProcessor.get_test_examples |
2 | 0 | 0 |
meth |
RteProcessor.get_labels |
1 | 0 | 0 |
meth |
RteProcessor._create_examples |
3 | 0 | 0 |
meth |
WnliProcessor.init |
3 | 0 | 0 |
meth |
WnliProcessor.get_example_from_tensor_dict |
2 | 0 | 0 |
meth |
WnliProcessor.get_train_examples |
2 | 0 | 0 |
meth |
WnliProcessor.get_dev_examples |
2 | 0 | 0 |
meth |
WnliProcessor.get_test_examples |
2 | 0 | 0 |
meth |
WnliProcessor.get_labels |
1 | 0 | 0 |
meth |
WnliProcessor._create_examples |
3 | 0 | 0 |
transformers.data.processors.squad (99 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SquadExample.init |
9 | 0 | 0 |
attr |
SquadExample.qas_id |
1 | 0 | 0 |
attr |
SquadExample.question_text |
1 | 0 | 0 |
attr |
SquadExample.context_text |
1 | 0 | 0 |
attr |
SquadExample.answer_text |
1 | 0 | 0 |
attr |
SquadExample.title |
1 | 0 | 0 |
attr |
SquadExample.is_impossible |
1 | 0 | 0 |
attr |
SquadExample.answers |
1 | 0 | 0 |
attr |
SquadExample.doc_tokens |
1 | 0 | 0 |
attr |
SquadExample.char_to_word_offset |
1 | 0 | 0 |
attr |
SquadExample.start_position |
1 | 0 | 0 |
attr |
SquadExample.end_position |
1 | 0 | 0 |
meth |
SquadFeatures.init |
17 | 2 | 0 |
attr |
SquadFeatures.input_ids |
1 | 0 | 0 |
attr |
SquadFeatures.attention_mask |
1 | 0 | 0 |
attr |
SquadFeatures.token_type_ids |
1 | 0 | 0 |
attr |
SquadFeatures.cls_index |
1 | 0 | 0 |
attr |
SquadFeatures.p_mask |
1 | 0 | 0 |
attr |
SquadFeatures.example_index |
1 | 0 | 0 |
attr |
SquadFeatures.unique_id |
1 | 0 | 0 |
attr |
SquadFeatures.paragraph_len |
1 | 0 | 0 |
attr |
SquadFeatures.token_is_max_context |
1 | 0 | 0 |
attr |
SquadFeatures.tokens |
1 | 0 | 0 |
attr |
SquadFeatures.token_to_orig_map |
1 | 0 | 0 |
attr |
SquadFeatures.start_position |
1 | 0 | 0 |
attr |
SquadFeatures.end_position |
1 | 0 | 0 |
attr |
SquadFeatures.is_impossible |
1 | 0 | 0 |
attr |
SquadFeatures.qas_id |
1 | 0 | 0 |
attr |
SquadFeatures.encoding |
1 | 0 | 0 |
func |
squad_convert_examples_to_features |
11 | 0 | 0 |
meth |
SquadResult.init |
7 | 0 | 0 |
attr |
SquadResult.start_logits |
1 | 0 | 0 |
attr |
SquadResult.end_logits |
1 | 0 | 0 |
attr |
SquadResult.unique_id |
1 | 0 | 0 |
attr |
SquadResult.start_top_index |
1 | 0 | 0 |
attr |
SquadResult.end_top_index |
1 | 0 | 0 |
attr |
SquadResult.cls_logits |
1 | 0 | 0 |
meth |
SquadProcessor._get_example_from_tensor_dict |
3 | 0 | 0 |
meth |
SquadProcessor.get_examples_from_dataset |
3 | 0 | 0 |
meth |
SquadProcessor.get_train_examples |
3 | 0 | 0 |
meth |
SquadProcessor.get_dev_examples |
3 | 0 | 0 |
meth |
SquadProcessor._create_examples |
3 | 0 | 0 |
func |
squad_convert_example_to_features_init |
2 | 1 | 0 |
func |
squad_convert_example_to_features |
7 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.data.processors.utils (63 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InputFeatures.to_json_string |
1 | 0 | 0 |
meth |
SingleSentenceClassificationProcessor.init |
5 | 0 | 0 |
meth |
SingleSentenceClassificationProcessor.len |
1 | 0 | 0 |
meth |
SingleSentenceClassificationProcessor.getitem |
2 | 0 | 0 |
meth |
SingleSentenceClassificationProcessor.create_from_csv |
8 | 0 | 0 |
meth |
SingleSentenceClassificationProcessor.create_from_examples |
4 | 0 | 0 |
meth |
SingleSentenceClassificationProcessor.add_examples_from_csv |
9 | 0 | 0 |
meth |
SingleSentenceClassificationProcessor.add_examples |
6 | 0 | 0 |
meth |
SingleSentenceClassificationProcessor.get_features |
7 | 0 | 0 |
attr |
SingleSentenceClassificationProcessor.labels |
1 | 0 | 0 |
attr |
SingleSentenceClassificationProcessor.examples |
1 | 0 | 0 |
attr |
SingleSentenceClassificationProcessor.mode |
1 | 0 | 0 |
attr |
SingleSentenceClassificationProcessor.verbose |
1 | 0 | 0 |
meth |
InputExample.to_json_string |
1 | 0 | 0 |
meth |
DataProcessor.get_example_from_tensor_dict |
2 | 0 | 0 |
meth |
DataProcessor.get_train_examples |
2 | 0 | 0 |
meth |
DataProcessor.get_dev_examples |
2 | 0 | 0 |
meth |
DataProcessor.get_test_examples |
2 | 0 | 0 |
meth |
DataProcessor.get_labels |
1 | 0 | 0 |
meth |
DataProcessor.tfds_map |
2 | 0 | 0 |
meth |
DataProcessor._read_tsv |
3 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.data.processors.xnli (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XnliProcessor.init |
3 | 0 | 0 |
meth |
XnliProcessor.get_train_examples |
2 | 0 | 0 |
meth |
XnliProcessor.get_test_examples |
2 | 0 | 0 |
meth |
XnliProcessor.get_labels |
1 | 0 | 0 |
attr |
XnliProcessor.language |
1 | 0 | 0 |
attr |
XnliProcessor.train_language |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.debug_utils (45 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_abs_min_max |
3 | 0 | 0 |
meth |
DebugUnderflowOverflow.init |
5 | 0 | 0 |
meth |
DebugUnderflowOverflow.save_frame |
2 | 0 | 0 |
meth |
DebugUnderflowOverflow.expand_frame |
2 | 0 | 0 |
meth |
DebugUnderflowOverflow.trace_frames |
1 | 0 | 0 |
meth |
DebugUnderflowOverflow.reset_saved_frames |
1 | 0 | 0 |
meth |
DebugUnderflowOverflow.dump_saved_frames |
1 | 0 | 0 |
meth |
DebugUnderflowOverflow.analyse_model |
1 | 0 | 0 |
meth |
DebugUnderflowOverflow.analyse_variable |
3 | 0 | 0 |
meth |
DebugUnderflowOverflow.batch_start_frame |
1 | 0 | 0 |
meth |
DebugUnderflowOverflow.batch_end_frame |
1 | 0 | 0 |
meth |
DebugUnderflowOverflow.create_frame |
4 | 0 | 0 |
meth |
DebugUnderflowOverflow.register_forward_hook |
1 | 0 | 0 |
meth |
DebugUnderflowOverflow._register_forward_hook |
2 | 0 | 0 |
meth |
DebugUnderflowOverflow.forward_hook |
4 | 0 | 0 |
attr |
DebugUnderflowOverflow.model |
1 | 0 | 0 |
attr |
DebugUnderflowOverflow.trace_batch_nums |
1 | 0 | 0 |
attr |
DebugUnderflowOverflow.abort_after_batch_num |
1 | 0 | 0 |
attr |
DebugUnderflowOverflow.frames |
1 | 0 | 0 |
attr |
DebugUnderflowOverflow.frame |
1 | 0 | 0 |
attr |
DebugUnderflowOverflow.batch_number |
1 | 0 | 0 |
attr |
DebugUnderflowOverflow.total_calls |
1 | 0 | 0 |
attr |
DebugUnderflowOverflow.detected_overflow |
1 | 0 | 0 |
attr |
DebugUnderflowOverflow.prefix |
1 | 0 | 0 |
func |
detect_overflow |
3 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.dependency_versions_check (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
dep_version_check |
3 | 0 | 0 |
transformers.distributed.configuration_utils (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DistributedConfig.from_dict |
3 | 0 | 0 |
meth |
DistributedConfig.to_json_file |
2 | 1 | 0 |
meth |
DistributedConfig.iter |
1 | 0 | 0 |
meth |
DistributedConfig.repr |
1 | 0 | 0 |
meth |
DistributedConfig.to_json_string |
1 | 0 | 0 |
meth |
DistributedConfig.update |
2 | 0 | 0 |
transformers.dynamic_module_utils (15 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_cached_module_file |
12 | 11 | 0 |
func |
custom_object_save |
4 | 4 | 1 |
func |
check_python_requirements |
4 | 0 | 0 |
func |
resolve_trust_remote_code |
7 | 0 | 0 |
func |
get_class_from_dynamic_module |
12 | 11 | 0 |
func |
init_hf_modules |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.feature_extraction_sequence_utils (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SequenceFeatureExtractor.init |
5 | 3 | 0 |
meth |
SequenceFeatureExtractor._truncate |
5 | 4 | 0 |
meth |
SequenceFeatureExtractor._get_padding_strategies |
3 | 0 | 0 |
meth |
SequenceFeatureExtractor.fetch_audio |
2 | 1 | 0 |
attr |
SequenceFeatureExtractor.feature_size |
1 | 0 | 0 |
attr |
SequenceFeatureExtractor.sampling_rate |
1 | 0 | 0 |
attr |
SequenceFeatureExtractor.padding_value |
1 | 0 | 0 |
attr |
SequenceFeatureExtractor.padding_side |
1 | 0 | 0 |
attr |
SequenceFeatureExtractor.return_attention_mask |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.feature_extraction_utils (22 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FeatureExtractionMixin.init |
2 | 0 | 0 |
meth |
FeatureExtractionMixin.from_pretrained |
8 | 7 | 0 |
meth |
FeatureExtractionMixin.save_pretrained |
4 | 2 | 0 |
meth |
FeatureExtractionMixin.get_feature_extractor_dict |
3 | 2 | 0 |
meth |
FeatureExtractionMixin.from_dict |
3 | 2 | 0 |
meth |
FeatureExtractionMixin.to_json_file |
2 | 1 | 0 |
meth |
FeatureExtractionMixin.repr |
1 | 0 | 0 |
meth |
FeatureExtractionMixin.register_for_auto_class |
2 | 0 | 0 |
meth |
BatchFeature.init |
4 | 3 | 0 |
meth |
BatchFeature.getitem |
2 | 2 | 1 |
meth |
BatchFeature.getattr |
2 | 1 | 0 |
meth |
BatchFeature.getstate |
1 | 0 | 0 |
meth |
BatchFeature.setstate |
2 | 0 | 0 |
meth |
BatchFeature._get_is_as_tensor_fns |
2 | 1 | 0 |
meth |
BatchFeature.convert_to_tensors |
3 | 2 | 0 |
meth |
BatchFeature.to |
3 | 1 | 0 |
attr |
BatchFeature.skip_tensor_conversion |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.generation.candidate_generator (54 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PromptLookupCandidateGenerator.init |
7 | 6 | 0 |
meth |
PromptLookupCandidateGenerator.update_candidate_strategy |
4 | 3 | 0 |
attr |
PromptLookupCandidateGenerator.num_output_tokens |
1 | 0 | 0 |
attr |
PromptLookupCandidateGenerator.max_matching_ngram_size |
1 | 0 | 0 |
attr |
PromptLookupCandidateGenerator.max_length |
1 | 0 | 0 |
attr |
PromptLookupCandidateGenerator.eos_token_id |
1 | 0 | 0 |
attr |
PromptLookupCandidateGenerator.logits_processor |
1 | 0 | 0 |
attr |
PromptLookupCandidateGenerator.vocab_size |
1 | 0 | 0 |
meth |
CandidateGenerator.update_candidate_strategy |
4 | 3 | 0 |
meth |
EarlyExitCandidateGenerator.init |
7 | 6 | 0 |
attr |
EarlyExitCandidateGenerator.assistant_early_exit |
1 | 0 | 0 |
meth |
UniversalSpeculativeDecodingGenerator.init |
10 | 9 | 0 |
meth |
AssistedCandidateGeneratorDifferentTokenizers.init |
9 | 8 | 0 |
meth |
AssistedCandidateGeneratorDifferentTokenizers._get_longest_diag_dict |
3 | 0 | 0 |
meth |
AssistedCandidateGeneratorDifferentTokenizers._get_longest_diag_index |
2 | 0 | 0 |
meth |
AssistedCandidateGeneratorDifferentTokenizers._get_tokens_diag |
3 | 0 | 0 |
meth |
AssistedCandidateGeneratorDifferentTokenizers.convert_source_tokens_to_target_tokens |
4 | 0 | 0 |
attr |
AssistedCandidateGeneratorDifferentTokenizers.target_tokenizer |
1 | 0 | 0 |
attr |
AssistedCandidateGeneratorDifferentTokenizers.assistant_tokenizer |
1 | 0 | 0 |
attr |
AssistedCandidateGeneratorDifferentTokenizers.prev_assistant_ids |
1 | 0 | 0 |
attr |
AssistedCandidateGeneratorDifferentTokenizers.target_lookbehind |
1 | 0 | 0 |
attr |
AssistedCandidateGeneratorDifferentTokenizers.assistant_lookbehind |
1 | 0 | 0 |
meth |
AssistantVocabTranslatorCache.cleanup |
1 | 0 | 0 |
attr |
AssistantVocabTranslatorCache._cache |
1 | 0 | 0 |
meth |
AssistedCandidateGenerator.init |
7 | 6 | 0 |
meth |
AssistedCandidateGenerator.update_candidate_strategy |
4 | 3 | 0 |
attr |
AssistedCandidateGenerator.assistant_model |
1 | 0 | 0 |
attr |
AssistedCandidateGenerator.assistant_generation_config |
1 | 0 | 0 |
attr |
AssistedCandidateGenerator.num_assistant_tokens |
1 | 0 | 0 |
attr |
AssistedCandidateGenerator.assistant_confidence_threshold |
1 | 0 | 0 |
attr |
AssistedCandidateGenerator.assistant_kwargs |
1 | 0 | 0 |
attr |
AssistedCandidateGenerator.logits_processor |
1 | 0 | 0 |
attr |
AssistedCandidateGenerator.generation_config |
1 | 0 | 0 |
attr |
AssistedCandidateGenerator.main_model_min_length |
1 | 0 | 0 |
attr |
AssistedCandidateGenerator.main_model_max_length |
1 | 0 | 0 |
attr |
AssistedCandidateGenerator.input_ids_key |
1 | 0 | 0 |
attr |
AssistedCandidateGenerator.probs |
1 | 0 | 0 |
attr |
AssistedCandidateGenerator.matches |
1 | 0 | 0 |
meth |
AssistantToTargetTranslator.init |
6 | 5 | 0 |
meth |
AssistantToTargetTranslator.unmap_input_ids |
1 | 0 | 0 |
meth |
AssistantToTargetTranslator._get_assistant_to_target_input_ids |
1 | 0 | 0 |
meth |
AssistantToTargetTranslator.get_target_ids |
4 | 2 | 0 |
attr |
AssistantToTargetTranslator.assistant_prune_lm_head |
1 | 0 | 0 |
attr |
AssistantToTargetTranslator.assistant_overlap_token_ids |
1 | 0 | 0 |
attr |
AssistantToTargetTranslator.map_input_embeddings |
1 | 0 | 0 |
transformers.generation.configuration_utils (103 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SynthIDTextWatermarkingConfig.init |
8 | 7 | 0 |
meth |
SynthIDTextWatermarkingConfig.validate |
1 | 0 | 0 |
meth |
SynthIDTextWatermarkingConfig.construct_processor |
3 | 2 | 0 |
meth |
GenerationConfig.init |
2 | 0 | 0 |
meth |
GenerationConfig.hash |
1 | 0 | 0 |
meth |
GenerationConfig.eq |
2 | 0 | 0 |
meth |
GenerationConfig.repr |
1 | 0 | 0 |
meth |
GenerationConfig.validate |
2 | 0 | 0 |
meth |
GenerationConfig.save_pretrained |
5 | 3 | 0 |
meth |
GenerationConfig.from_pretrained |
9 | 8 | 0 |
meth |
GenerationConfig._dict_from_json_file |
2 | 1 | 0 |
meth |
GenerationConfig.from_dict |
3 | 2 | 0 |
meth |
GenerationConfig.update |
4 | 0 | 0 |
attr |
GenerationConfig.max_length |
1 | 0 | 0 |
attr |
GenerationConfig.max_new_tokens |
1 | 0 | 0 |
attr |
GenerationConfig.min_length |
1 | 0 | 0 |
attr |
GenerationConfig.min_new_tokens |
1 | 0 | 0 |
attr |
GenerationConfig.early_stopping |
1 | 0 | 0 |
attr |
GenerationConfig.max_time |
1 | 0 | 0 |
attr |
GenerationConfig.stop_strings |
1 | 0 | 0 |
attr |
GenerationConfig.do_sample |
1 | 0 | 0 |
attr |
GenerationConfig.num_beams |
1 | 0 | 0 |
attr |
GenerationConfig.use_cache |
1 | 0 | 0 |
attr |
GenerationConfig.cache_implementation |
1 | 0 | 0 |
attr |
GenerationConfig.cache_config |
1 | 0 | 0 |
attr |
GenerationConfig.temperature |
1 | 0 | 0 |
attr |
GenerationConfig.top_k |
1 | 0 | 0 |
attr |
GenerationConfig.top_p |
1 | 0 | 0 |
attr |
GenerationConfig.min_p |
1 | 0 | 0 |
attr |
GenerationConfig.top_h |
1 | 0 | 0 |
attr |
GenerationConfig.typical_p |
1 | 0 | 0 |
attr |
GenerationConfig.epsilon_cutoff |
1 | 0 | 0 |
attr |
GenerationConfig.eta_cutoff |
1 | 0 | 0 |
attr |
GenerationConfig.repetition_penalty |
1 | 0 | 0 |
attr |
GenerationConfig.encoder_repetition_penalty |
1 | 0 | 0 |
attr |
GenerationConfig.length_penalty |
1 | 0 | 0 |
attr |
GenerationConfig.no_repeat_ngram_size |
1 | 0 | 0 |
attr |
GenerationConfig.bad_words_ids |
1 | 0 | 0 |
attr |
GenerationConfig.renormalize_logits |
1 | 0 | 0 |
attr |
GenerationConfig.forced_bos_token_id |
1 | 0 | 0 |
attr |
GenerationConfig.forced_eos_token_id |
1 | 0 | 0 |
attr |
GenerationConfig.remove_invalid_values |
1 | 0 | 0 |
attr |
GenerationConfig.exponential_decay_length_penalty |
1 | 0 | 0 |
attr |
GenerationConfig.suppress_tokens |
1 | 0 | 0 |
attr |
GenerationConfig.begin_suppress_tokens |
1 | 0 | 0 |
attr |
GenerationConfig.sequence_bias |
1 | 0 | 0 |
attr |
GenerationConfig.token_healing |
1 | 0 | 0 |
attr |
GenerationConfig.guidance_scale |
1 | 0 | 0 |
attr |
GenerationConfig.watermarking_config |
1 | 0 | 0 |
attr |
GenerationConfig.num_return_sequences |
1 | 0 | 0 |
attr |
GenerationConfig.output_attentions |
1 | 0 | 0 |
attr |
GenerationConfig.output_hidden_states |
1 | 0 | 0 |
attr |
GenerationConfig.output_scores |
1 | 0 | 0 |
attr |
GenerationConfig.output_logits |
1 | 0 | 0 |
attr |
GenerationConfig.return_dict_in_generate |
1 | 0 | 0 |
attr |
GenerationConfig.pad_token_id |
1 | 0 | 0 |
attr |
GenerationConfig.bos_token_id |
1 | 0 | 0 |
attr |
GenerationConfig.eos_token_id |
1 | 0 | 0 |
attr |
GenerationConfig.encoder_no_repeat_ngram_size |
1 | 0 | 0 |
attr |
GenerationConfig.decoder_start_token_id |
1 | 0 | 0 |
attr |
GenerationConfig.is_assistant |
1 | 0 | 0 |
attr |
GenerationConfig.num_assistant_tokens |
1 | 0 | 0 |
attr |
GenerationConfig.num_assistant_tokens_schedule |
1 | 0 | 0 |
attr |
GenerationConfig.assistant_confidence_threshold |
1 | 0 | 0 |
attr |
GenerationConfig.prompt_lookup_num_tokens |
1 | 0 | 0 |
attr |
GenerationConfig.max_matching_ngram_size |
1 | 0 | 0 |
attr |
GenerationConfig.assistant_early_exit |
1 | 0 | 0 |
attr |
GenerationConfig.assistant_lookbehind |
1 | 0 | 0 |
attr |
GenerationConfig.target_lookbehind |
1 | 0 | 0 |
attr |
GenerationConfig.compile_config |
1 | 0 | 0 |
attr |
GenerationConfig.disable_compile |
1 | 0 | 0 |
attr |
GenerationConfig.low_memory |
1 | 0 | 0 |
attr |
GenerationConfig.penalty_alpha |
1 | 0 | 0 |
attr |
GenerationConfig.dola_layers |
1 | 0 | 0 |
attr |
GenerationConfig.diversity_penalty |
1 | 0 | 0 |
attr |
GenerationConfig.num_beam_groups |
1 | 0 | 0 |
attr |
GenerationConfig.constraints |
1 | 0 | 0 |
attr |
GenerationConfig.force_words_ids |
1 | 0 | 0 |
attr |
GenerationConfig.prefill_chunk_size |
1 | 0 | 0 |
attr |
GenerationConfig.transformers_version |
1 | 0 | 0 |
meth |
WatermarkingConfig.init |
6 | 5 | 0 |
meth |
WatermarkingConfig.validate |
1 | 0 | 0 |
meth |
WatermarkingConfig.construct_processor |
3 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
BaseWatermarkingConfig.from_dict |
3 | 0 | 0 |
meth |
BaseWatermarkingConfig.to_json_file |
2 | 1 | 0 |
meth |
BaseWatermarkingConfig.iter |
1 | 0 | 0 |
meth |
BaseWatermarkingConfig.repr |
1 | 0 | 0 |
meth |
BaseWatermarkingConfig.to_json_string |
1 | 0 | 0 |
meth |
BaseWatermarkingConfig.update |
2 | 0 | 0 |
meth |
BaseWatermarkingConfig.validate |
1 | 0 | 0 |
meth |
BaseWatermarkingConfig.construct_processor |
2 | 0 | 0 |
transformers.generation.continuous_batching.cache (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
PagedAttentionCache.config |
1 | 0 | 0 |
attr |
PagedAttentionCache.dtype |
1 | 0 | 0 |
attr |
PagedAttentionCache.device |
1 | 0 | 0 |
attr |
PagedAttentionCache.block_size |
1 | 0 | 0 |
attr |
PagedAttentionCache.num_groups |
1 | 0 | 0 |
attr |
PagedAttentionCache.sliding_windows |
1 | 0 | 0 |
attr |
PagedAttentionCache.layer_index_to_group_indices |
1 | 0 | 0 |
attr |
PagedAttentionCache.num_blocks |
1 | 0 | 0 |
attr |
PagedAttentionCache.max_batch_tokens |
1 | 0 | 0 |
attr |
PagedAttentionCache.num_pages |
1 | 0 | 0 |
attr |
PagedAttentionCache.cache_shape |
1 | 0 | 0 |
attr |
PagedAttentionCache.allow_block_sharing |
1 | 0 | 0 |
attr |
PagedAttentionCache.num_full_attention_groups |
1 | 0 | 0 |
attr |
PagedAttentionCache.num_sliding_attention_groups |
1 | 0 | 0 |
attr |
PagedAttentionCache.max_sliding_window_blocks_per_request |
1 | 0 | 0 |
attr |
PagedAttentionCache.use_prefix_sharing |
1 | 0 | 0 |
attr |
PagedAttentionMemoryHandler.block_size |
1 | 0 | 0 |
attr |
PagedAttentionMemoryHandler.page_size |
1 | 0 | 0 |
attr |
PagedAttentionMemoryHandler.num_groups |
1 | 0 | 0 |
attr |
PagedAttentionMemoryHandler.group_size |
1 | 0 | 0 |
attr |
PagedAttentionMemoryHandler.peak_activation_per_token |
1 | 0 | 0 |
attr |
PagedAttentionMemoryHandler.num_attention_masks |
1 | 0 | 0 |
transformers.generation.continuous_batching.cache_manager (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
SlidingAttentionCacheAllocator.block_size |
1 | 0 | 0 |
attr |
SlidingAttentionCacheAllocator.sliding_window |
1 | 0 | 0 |
attr |
BlockManager.num_blocks |
1 | 0 | 0 |
attr |
BlockManager.block_size |
1 | 0 | 0 |
attr |
FullAttentionCacheAllocator.block_size |
1 | 0 | 0 |
transformers.generation.continuous_batching.continuous_api (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ContinuousBatchingManager.iter |
1 | 0 | 0 |
attr |
ContinuousBatchingManager.model |
1 | 0 | 0 |
attr |
ContinuousBatchingManager.manual_eviction |
1 | 0 | 0 |
attr |
ContinuousBatchingManager.input_queue |
1 | 0 | 0 |
attr |
ContinuousBatchingManager.output_queue |
1 | 0 | 0 |
attr |
ContinuousBatchingManager.stop_event |
1 | 0 | 0 |
attr |
ContinuousBatchingManager.generation_config |
1 | 0 | 0 |
attr |
ContinuousBatchingManager.log_prob_generation |
1 | 0 | 0 |
attr |
ContinuousBatchingManager.do_sample |
1 | 0 | 0 |
attr |
ContinuousBatchingManager.num_return_sequences |
1 | 0 | 0 |
attr |
ContinuousBatchingManager.use_cuda_graph |
1 | 0 | 0 |
attr |
ContinuousBatchingManager.q_padding_interval_size |
1 | 0 | 0 |
attr |
ContinuousBatchingManager.kv_padding_interval_size |
1 | 0 | 0 |
attr |
ContinuousBatchingManager.max_cached_graphs |
1 | 0 | 0 |
attr |
ContinuousBatchingManager.use_async_batching |
1 | 0 | 0 |
meth |
ContinuousMixin.generate_batch |
11 | 10 | 0 |
meth |
ContinuousBatchProcessor.handle_batch_error |
2 | 0 | 0 |
attr |
ContinuousBatchProcessor.cache |
1 | 0 | 0 |
attr |
ContinuousBatchProcessor.config |
1 | 0 | 0 |
attr |
ContinuousBatchProcessor.generation_config |
1 | 0 | 0 |
attr |
ContinuousBatchProcessor.input_queue |
1 | 0 | 0 |
attr |
ContinuousBatchProcessor.output_queue |
1 | 0 | 0 |
attr |
ContinuousBatchProcessor.stop_event |
1 | 0 | 0 |
attr |
ContinuousBatchProcessor.model_device |
1 | 0 | 0 |
attr |
ContinuousBatchProcessor.model_dtype |
1 | 0 | 0 |
attr |
ContinuousBatchProcessor.scheduler |
1 | 0 | 0 |
attr |
ContinuousBatchProcessor.manual_eviction |
1 | 0 | 0 |
attr |
ContinuousBatchProcessor.sliding_window |
1 | 0 | 0 |
attr |
ContinuousBatchProcessor.q_padding_interval_size |
1 | 0 | 0 |
attr |
ContinuousBatchProcessor.kv_padding_interval_size |
1 | 0 | 0 |
attr |
ContinuousBatchProcessor.max_cached_graphs |
1 | 0 | 0 |
attr |
ContinuousBatchProcessor.use_cuda_graph |
1 | 0 | 0 |
attr |
ContinuousBatchProcessor.max_batch_tokens |
1 | 0 | 0 |
attr |
ContinuousBatchProcessor.metrics |
1 | 0 | 0 |
attr |
ContinuousBatchProcessor.use_async_batching |
1 | 0 | 0 |
transformers.generation.continuous_batching.input_outputs (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
HostDeviceIOPair.host_io |
1 | 0 | 0 |
attr |
HostDeviceIOPair.device_io |
1 | 0 | 0 |
attr |
HostDeviceIOPair.h2d_over |
1 | 0 | 0 |
attr |
HostDeviceIOPair.compute_over |
1 | 0 | 0 |
attr |
HostDeviceIOPair.d2h_over |
1 | 0 | 0 |
attr |
ContinuousBatchingIOs.cache |
1 | 0 | 0 |
attr |
ContinuousBatchingIOs.device |
1 | 0 | 0 |
attr |
ContinuousBatchingIOs.config |
1 | 0 | 0 |
attr |
ContinuousBatchingIOs.model_dtype |
1 | 0 | 0 |
attr |
ContinuousBatchingIOs.sliding_window |
1 | 0 | 0 |
attr |
ContinuousBatchingIOs.actual_query_length |
1 | 0 | 0 |
attr |
ContinuousBatchingIOs.actual_key_length |
1 | 0 | 0 |
attr |
ContinuousBatchingIOs.actual_batch_size |
1 | 0 | 0 |
attr |
ContinuousBatchingIOs.actual_read_sizes |
1 | 0 | 0 |
attr |
ContinuousBatchingIOs.actual_write_sizes |
1 | 0 | 0 |
attr |
ContinuousBatchingIOs.compute_stream |
1 | 0 | 0 |
attr |
ContinuousBatchingAsyncIOs.current_pair |
1 | 0 | 0 |
attr |
ContinuousBatchingAsyncIOs.io_pairs |
1 | 0 | 0 |
attr |
ContinuousBatchingAsyncIOs.h2d_stream |
1 | 0 | 0 |
attr |
ContinuousBatchingAsyncIOs.d2h_stream |
1 | 0 | 0 |
attr |
ContinuousBatchingAsyncIOs.compute_stream |
1 | 0 | 0 |
attr |
ContinuousBatchingAsyncIOs.max_batch_tokens |
1 | 0 | 0 |
transformers.generation.continuous_batching.requests (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RequestState.post_init |
1 | 0 | 0 |
meth |
RequestState.log_end_of_request |
1 | 0 | 0 |
meth |
RequestState.repr |
1 | 0 | 0 |
meth |
RequestState.to_generation_output |
1 | 0 | 0 |
attr |
FutureRequestState.state |
1 | 0 | 0 |
attr |
FutureRequestState.has_new_token |
1 | 0 | 0 |
attr |
FutureRequestState.complete_blocks |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.generation.continuous_batching.scheduler (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FIFOScheduler.init |
4 | 3 | 0 |
attr |
FIFOScheduler.safety_margin |
1 | 0 | 0 |
meth |
Scheduler.init |
3 | 2 | 0 |
meth |
Scheduler.add_waiting_request |
2 | 1 | 0 |
meth |
Scheduler.set_request_cancellation |
2 | 1 | 0 |
meth |
Scheduler.clear_cancelled_requests |
1 | 0 | 0 |
attr |
Scheduler.cache |
1 | 0 | 0 |
attr |
Scheduler.retain_cache_on_finish |
1 | 0 | 0 |
attr |
Scheduler.block_new_requests |
1 | 0 | 0 |
attr |
Scheduler.cache_budget_module |
1 | 0 | 0 |
transformers.generation.continuous_batching.utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
CudaGraphBuffer.max_size |
1 | 0 | 0 |
transformers.generation.logits_process (166 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SynthIDTextWatermarkLogitsProcessor.init |
9 | 8 | 0 |
meth |
SynthIDTextWatermarkLogitsProcessor._init_state |
2 | 1 | 0 |
meth |
SynthIDTextWatermarkLogitsProcessor._check_input_ids_shape |
2 | 1 | 0 |
attr |
SynthIDTextWatermarkLogitsProcessor.ngram_len |
1 | 0 | 0 |
attr |
SynthIDTextWatermarkLogitsProcessor.keys |
1 | 0 | 0 |
attr |
SynthIDTextWatermarkLogitsProcessor.sampling_table |
1 | 0 | 0 |
attr |
SynthIDTextWatermarkLogitsProcessor.context_history_size |
1 | 0 | 0 |
attr |
SynthIDTextWatermarkLogitsProcessor.device |
1 | 0 | 0 |
attr |
SynthIDTextWatermarkLogitsProcessor.state |
1 | 0 | 0 |
attr |
SynthIDTextWatermarkLogitsProcessor.skip_first_ngram_calls |
1 | 0 | 0 |
attr |
SynthIDTextWatermarkLogitsProcessor.debug_mode |
1 | 0 | 0 |
meth |
TopKLogitsWarper.init |
4 | 3 | 0 |
attr |
TopKLogitsWarper.top_k |
1 | 0 | 0 |
attr |
TopKLogitsWarper.filter_value |
1 | 0 | 0 |
meth |
MinPLogitsWarper.init |
4 | 3 | 0 |
attr |
MinPLogitsWarper.min_p |
1 | 0 | 0 |
attr |
MinPLogitsWarper.filter_value |
1 | 0 | 0 |
attr |
MinPLogitsWarper.min_tokens_to_keep |
1 | 0 | 0 |
meth |
SuppressTokensAtBeginLogitsProcessor.init |
4 | 1 | 0 |
meth |
SuppressTokensAtBeginLogitsProcessor.set_begin_index |
2 | 0 | 0 |
attr |
SuppressTokensAtBeginLogitsProcessor.begin_suppress_tokens |
1 | 0 | 0 |
attr |
SuppressTokensAtBeginLogitsProcessor.begin_index |
1 | 0 | 0 |
meth |
TopPLogitsWarper.init |
4 | 3 | 0 |
attr |
TopPLogitsWarper.top_p |
1 | 0 | 0 |
attr |
TopPLogitsWarper.filter_value |
1 | 0 | 0 |
attr |
TopPLogitsWarper.min_tokens_to_keep |
1 | 0 | 0 |
meth |
SequenceBiasLogitsProcessor.init |
2 | 1 | 0 |
meth |
SequenceBiasLogitsProcessor._prepare_bias_variables |
2 | 1 | 0 |
meth |
SequenceBiasLogitsProcessor._validate_arguments |
1 | 0 | 0 |
meth |
SequenceBiasLogitsProcessor._convert_list_arguments_into_dict |
1 | 0 | 0 |
attr |
SequenceBiasLogitsProcessor.sequence_bias |
1 | 0 | 0 |
attr |
SequenceBiasLogitsProcessor.length_1_bias |
1 | 0 | 0 |
attr |
SequenceBiasLogitsProcessor.prepared_bias_variables |
1 | 0 | 0 |
meth |
WhisperTimeStampLogitsProcessor.init |
4 | 3 | 0 |
meth |
WhisperTimeStampLogitsProcessor.set_begin_index |
2 | 0 | 0 |
attr |
WhisperTimeStampLogitsProcessor.no_timestamps_token_id |
1 | 0 | 0 |
attr |
WhisperTimeStampLogitsProcessor.timestamp_begin |
1 | 0 | 0 |
attr |
WhisperTimeStampLogitsProcessor.eos_token_id |
1 | 0 | 0 |
attr |
WhisperTimeStampLogitsProcessor.begin_index |
1 | 0 | 0 |
attr |
WhisperTimeStampLogitsProcessor.max_initial_timestamp_index |
1 | 0 | 0 |
meth |
LogitsProcessorList.call |
4 | 3 | 0 |
meth |
PrefixConstrainedLogitsProcessor.init |
3 | 2 | 0 |
meth |
NoBadWordsLogitsProcessor.init |
3 | 2 | 0 |
meth |
NoBadWordsLogitsProcessor._validate_arguments |
1 | 0 | 0 |
attr |
NoBadWordsLogitsProcessor.bad_word_ids |
1 | 0 | 0 |
meth |
EtaLogitsWarper.init |
5 | 4 | 0 |
attr |
EtaLogitsWarper.epsilon |
1 | 0 | 0 |
attr |
EtaLogitsWarper.filter_value |
1 | 0 | 0 |
attr |
EtaLogitsWarper.min_tokens_to_keep |
1 | 0 | 0 |
meth |
ForcedEOSTokenLogitsProcessor.init |
4 | 3 | 0 |
attr |
ForcedEOSTokenLogitsProcessor.max_length |
1 | 0 | 0 |
attr |
ForcedEOSTokenLogitsProcessor.eos_token_id |
1 | 0 | 0 |
meth |
ForcedBOSTokenLogitsProcessor.init |
2 | 1 | 0 |
attr |
ForcedBOSTokenLogitsProcessor.bos_token_id |
1 | 0 | 0 |
meth |
ExponentialDecayLengthPenalty.init |
4 | 3 | 0 |
attr |
ExponentialDecayLengthPenalty.regulation_start |
1 | 0 | 0 |
attr |
ExponentialDecayLengthPenalty.regulation_factor |
1 | 0 | 0 |
attr |
ExponentialDecayLengthPenalty.eos_token_id |
1 | 0 | 0 |
meth |
UnbatchedClassifierFreeGuidanceLogitsProcessor.init |
6 | 4 | 0 |
meth |
UnbatchedClassifierFreeGuidanceLogitsProcessor.get_unconditional_logits |
2 | 0 | 0 |
meth |
UnbatchedClassifierFreeGuidanceLogitsProcessor.call |
3 | 0 | 0 |
attr |
UnbatchedClassifierFreeGuidanceLogitsProcessor.guidance_scale |
1 | 0 | 0 |
attr |
UnbatchedClassifierFreeGuidanceLogitsProcessor.model |
1 | 0 | 0 |
attr |
UnbatchedClassifierFreeGuidanceLogitsProcessor.unconditional_context |
1 | 0 | 0 |
meth |
MinNewTokensLengthLogitsProcessor.init |
5 | 4 | 0 |
attr |
MinNewTokensLengthLogitsProcessor.prompt_length_to_skip |
1 | 0 | 0 |
attr |
MinNewTokensLengthLogitsProcessor.min_new_tokens |
1 | 0 | 0 |
attr |
MinNewTokensLengthLogitsProcessor.eos_token_id |
1 | 0 | 0 |
meth |
MinLengthLogitsProcessor.init |
4 | 3 | 0 |
attr |
MinLengthLogitsProcessor.min_length |
1 | 0 | 0 |
attr |
MinLengthLogitsProcessor.eos_token_id |
1 | 0 | 0 |
meth |
EpsilonLogitsWarper.init |
4 | 3 | 0 |
attr |
EpsilonLogitsWarper.epsilon |
1 | 0 | 0 |
attr |
EpsilonLogitsWarper.filter_value |
1 | 0 | 0 |
attr |
EpsilonLogitsWarper.min_tokens_to_keep |
1 | 0 | 0 |
meth |
TemperatureLogitsWarper.init |
2 | 1 | 0 |
attr |
TemperatureLogitsWarper.temperature |
1 | 0 | 0 |
meth |
WatermarkLogitsProcessor.init |
8 | 5 | 0 |
meth |
WatermarkLogitsProcessor.set_seed |
2 | 1 | 0 |
attr |
WatermarkLogitsProcessor.vocab_size |
1 | 0 | 0 |
attr |
WatermarkLogitsProcessor.greenlist_size |
1 | 0 | 0 |
attr |
WatermarkLogitsProcessor.bias |
1 | 0 | 0 |
attr |
WatermarkLogitsProcessor.seeding_scheme |
1 | 0 | 0 |
attr |
WatermarkLogitsProcessor.rng |
1 | 0 | 0 |
attr |
WatermarkLogitsProcessor.hash_key |
1 | 0 | 0 |
attr |
WatermarkLogitsProcessor.context_width |
1 | 0 | 0 |
attr |
WatermarkLogitsProcessor.table_size |
1 | 0 | 0 |
attr |
WatermarkLogitsProcessor.fixed_table |
1 | 0 | 0 |
meth |
EncoderNoRepeatNGramLogitsProcessor.init |
3 | 2 | 0 |
attr |
EncoderNoRepeatNGramLogitsProcessor.ngram_size |
1 | 0 | 0 |
attr |
EncoderNoRepeatNGramLogitsProcessor.batch_size |
1 | 0 | 0 |
attr |
EncoderNoRepeatNGramLogitsProcessor.generated_ngrams |
1 | 0 | 0 |
meth |
ClassifierFreeGuidanceLogitsProcessor.init |
2 | 0 | 0 |
attr |
ClassifierFreeGuidanceLogitsProcessor.guidance_scale |
1 | 0 | 0 |
meth |
EncoderRepetitionPenaltyLogitsProcessor.init |
3 | 2 | 0 |
attr |
EncoderRepetitionPenaltyLogitsProcessor.penalty |
1 | 0 | 0 |
attr |
EncoderRepetitionPenaltyLogitsProcessor.encoder_input_ids |
1 | 0 | 0 |
meth |
TopHLogitsWarper.init |
3 | 2 | 0 |
attr |
TopHLogitsWarper.top_n |
1 | 0 | 0 |
attr |
TopHLogitsWarper.top_h |
1 | 0 | 0 |
attr |
TopHLogitsWarper.filter_value |
1 | 0 | 0 |
meth |
RepetitionPenaltyLogitsProcessor.init |
3 | 2 | 0 |
meth |
RepetitionPenaltyLogitsProcessor.set_continuous_batching_context |
3 | 2 | 0 |
attr |
RepetitionPenaltyLogitsProcessor.penalty |
1 | 0 | 0 |
attr |
RepetitionPenaltyLogitsProcessor.prompt_ignore_length |
1 | 0 | 0 |
attr |
RepetitionPenaltyLogitsProcessor.logits_indices |
1 | 0 | 0 |
attr |
RepetitionPenaltyLogitsProcessor.cu_seq_lens_q |
1 | 0 | 0 |
meth |
NoRepeatNGramLogitsProcessor.init |
2 | 1 | 0 |
attr |
NoRepeatNGramLogitsProcessor.ngram_size |
1 | 0 | 0 |
meth |
AlternatingCodebooksLogitsProcessor.init |
4 | 3 | 0 |
attr |
AlternatingCodebooksLogitsProcessor.input_start_len |
1 | 0 | 0 |
attr |
AlternatingCodebooksLogitsProcessor.semantic_vocab_size |
1 | 0 | 0 |
attr |
AlternatingCodebooksLogitsProcessor.codebook_size |
1 | 0 | 0 |
meth |
SuppressTokensLogitsProcessor.init |
3 | 1 | 0 |
attr |
SuppressTokensLogitsProcessor.suppress_tokens |
1 | 0 | 0 |
meth |
TypicalLogitsWarper.init |
4 | 3 | 0 |
attr |
TypicalLogitsWarper.filter_value |
1 | 0 | 0 |
attr |
TypicalLogitsWarper.mass |
1 | 0 | 0 |
attr |
TypicalLogitsWarper.min_tokens_to_keep |
1 | 0 | 0 |
meth |
BarkEosPrioritizerLogitsProcessor.init |
4 | 3 | 0 |
attr |
BarkEosPrioritizerLogitsProcessor.eos_token_id |
1 | 0 | 0 |
attr |
BarkEosPrioritizerLogitsProcessor.min_eos_p |
1 | 0 | 0 |
meth |
SynthIDTextWatermarkState.init |
5 | 4 | 0 |
attr |
SynthIDTextWatermarkState.context |
1 | 0 | 0 |
attr |
SynthIDTextWatermarkState.context_history |
1 | 0 | 0 |
attr |
SynthIDTextWatermarkState.num_calls |
1 | 0 | 0 |
meth |
DiaEOSDelayPatternLogitsProcessor.init |
5 | 4 | 0 |
attr |
DiaEOSDelayPatternLogitsProcessor.num_channels |
1 | 0 | 0 |
attr |
DiaEOSDelayPatternLogitsProcessor.active_batches |
1 | 0 | 0 |
attr |
DiaEOSDelayPatternLogitsProcessor.delay_pattern |
1 | 0 | 0 |
attr |
DiaEOSDelayPatternLogitsProcessor.eos_token_id |
1 | 0 | 0 |
attr |
DiaEOSDelayPatternLogitsProcessor.max_generation_len |
1 | 0 | 0 |
attr |
DiaEOSDelayPatternLogitsProcessor.device |
1 | 0 | 0 |
meth |
WhisperNoSpeechDetection.init |
4 | 3 | 0 |
meth |
WhisperNoSpeechDetection.set_model |
2 | 0 | 0 |
meth |
WhisperNoSpeechDetection.set_inputs |
2 | 0 | 0 |
meth |
WhisperNoSpeechDetection.set_begin_index |
2 | 0 | 0 |
prop |
WhisperNoSpeechDetection.no_speech_prob |
1 | 0 | 0 |
attr |
WhisperNoSpeechDetection.no_speech_token |
1 | 0 | 0 |
attr |
WhisperNoSpeechDetection.start_of_trans_offset |
1 | 0 | 0 |
attr |
WhisperNoSpeechDetection.begin_index |
1 | 0 | 0 |
attr |
WhisperNoSpeechDetection.is_scores_logprobs |
1 | 0 | 0 |
attr |
WhisperNoSpeechDetection.model |
1 | 0 | 0 |
attr |
WhisperNoSpeechDetection.inputs |
1 | 0 | 0 |
meth |
DiaClassifierFreeGuidanceLogitsProcessor.init |
3 | 2 | 0 |
attr |
DiaClassifierFreeGuidanceLogitsProcessor.guidance_top_k |
1 | 0 | 0 |
attr |
DiaClassifierFreeGuidanceLogitsProcessor.guidance_scale |
1 | 0 | 0 |
meth |
DiaEOSChannelFilterLogitsProcessor.init |
3 | 2 | 0 |
attr |
DiaEOSChannelFilterLogitsProcessor.num_channels |
1 | 0 | 0 |
attr |
DiaEOSChannelFilterLogitsProcessor.eos_id |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.generation.stopping_criteria (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MaxLengthCriteria.init |
3 | 2 | 0 |
meth |
MaxLengthCriteria.call |
4 | 3 | 0 |
attr |
MaxLengthCriteria.max_length |
1 | 0 | 0 |
attr |
MaxLengthCriteria.max_position_embeddings |
1 | 0 | 0 |
meth |
MaxTimeCriteria.init |
3 | 2 | 0 |
meth |
MaxTimeCriteria.call |
4 | 3 | 0 |
attr |
MaxTimeCriteria.max_time |
1 | 0 | 0 |
attr |
MaxTimeCriteria.initial_timestamp |
1 | 0 | 0 |
meth |
StopStringCriteria.init |
3 | 2 | 0 |
meth |
StopStringCriteria.clean_and_embed_tokens_with_cache |
4 | 0 | 0 |
meth |
StopStringCriteria.clean_tokenizer_vocab |
3 | 0 | 0 |
meth |
StopStringCriteria._stop_string_get_matching_positions |
4 | 1 | 0 |
meth |
StopStringCriteria._stop_string_create_embedding_vec |
4 | 1 | 0 |
meth |
StopStringCriteria.call |
4 | 3 | 0 |
attr |
StopStringCriteria.maximum_token_len |
1 | 0 | 0 |
attr |
StopStringCriteria.num_stop_strings |
1 | 0 | 0 |
attr |
StopStringCriteria.target_lens |
1 | 0 | 0 |
meth |
EosTokenCriteria.init |
2 | 1 | 0 |
meth |
EosTokenCriteria.call |
4 | 3 | 0 |
attr |
EosTokenCriteria.eos_token_id |
1 | 0 | 0 |
meth |
StoppingCriteria.call |
4 | 3 | 0 |
meth |
StoppingCriteriaList.call |
4 | 3 | 0 |
meth |
ConfidenceCriteria.init |
2 | 0 | 0 |
meth |
ConfidenceCriteria.call |
4 | 3 | 0 |
attr |
ConfidenceCriteria.assistant_confidence_threshold |
1 | 0 | 0 |
attr |
STOP_STRING_EMBEDDING_CACHE |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.generation.streamers (35 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AsyncTextIteratorStreamer.init |
5 | 3 | 0 |
meth |
AsyncTextIteratorStreamer.on_finalized_text |
3 | 2 | 0 |
meth |
AsyncTextIteratorStreamer.aiter |
1 | 0 | 0 |
meth |
AsyncTextIteratorStreamer.anext |
1 | 0 | 0 |
attr |
AsyncTextIteratorStreamer.text_queue |
1 | 0 | 0 |
attr |
AsyncTextIteratorStreamer.stop_signal |
1 | 0 | 0 |
attr |
AsyncTextIteratorStreamer.timeout |
1 | 0 | 0 |
attr |
AsyncTextIteratorStreamer.loop |
1 | 0 | 0 |
attr |
AsyncTextIteratorStreamer.has_asyncio_timeout |
1 | 0 | 0 |
meth |
TextStreamer.init |
4 | 2 | 0 |
meth |
TextStreamer.put |
2 | 0 | 0 |
meth |
TextStreamer.end |
1 | 0 | 0 |
meth |
TextStreamer.on_finalized_text |
3 | 2 | 0 |
meth |
TextStreamer._is_chinese_char |
2 | 0 | 0 |
attr |
TextStreamer.tokenizer |
1 | 0 | 0 |
attr |
TextStreamer.skip_prompt |
1 | 0 | 0 |
attr |
TextStreamer.decode_kwargs |
1 | 0 | 0 |
attr |
TextStreamer.token_cache |
1 | 0 | 0 |
attr |
TextStreamer.print_len |
1 | 0 | 0 |
attr |
TextStreamer.next_tokens_are_prompt |
1 | 0 | 0 |
meth |
TextIteratorStreamer.init |
5 | 3 | 0 |
meth |
TextIteratorStreamer.on_finalized_text |
3 | 2 | 0 |
meth |
TextIteratorStreamer.iter |
1 | 0 | 0 |
meth |
TextIteratorStreamer.next |
1 | 0 | 0 |
attr |
TextIteratorStreamer.text_queue |
1 | 0 | 0 |
attr |
TextIteratorStreamer.stop_signal |
1 | 0 | 0 |
attr |
TextIteratorStreamer.timeout |
1 | 0 | 0 |
meth |
BaseStreamer.put |
2 | 0 | 0 |
meth |
BaseStreamer.end |
1 | 0 | 0 |
transformers.generation.utils (58 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GenerationMixin.adjust_generation_fn |
14 | 0 | 0 |
meth |
GenerationMixin.load_custom_generate |
4 | 3 | 0 |
meth |
GenerationMixin.prepare_inputs_for_generation |
9 | 7 | 0 |
meth |
GenerationMixin._prepare_position_ids_for_generation |
3 | 0 | 0 |
meth |
GenerationMixin._prepare_encoder_decoder_kwargs_for_generation |
5 | 4 | 0 |
meth |
GenerationMixin._expand_inputs_for_generation |
5 | 4 | 0 |
meth |
GenerationMixin._validate_generation_mode |
4 | 0 | 0 |
meth |
GenerationMixin._validate_model_kwargs |
2 | 1 | 0 |
meth |
GenerationMixin._validate_generated_length |
4 | 0 | 0 |
meth |
GenerationMixin._prepare_generated_length |
7 | 0 | 0 |
meth |
GenerationMixin._prepare_generation_config |
3 | 3 | 1 |
meth |
GenerationMixin._get_initial_cache_position |
4 | 0 | 0 |
meth |
GenerationMixin._prepare_static_cache |
5 | 4 | 0 |
meth |
GenerationMixin._prepare_special_tokens |
4 | 3 | 0 |
meth |
GenerationMixin._optimize_model_for_decode |
1 | 0 | 0 |
meth |
GenerationMixin._extract_generation_mode_kwargs |
6 | 1 | 0 |
meth |
GenerationMixin.generate |
13 | 12 | 0 |
meth |
GenerationMixin._sample |
8 | 7 | 0 |
meth |
GenerationMixin._check_early_stop_heuristic |
10 | 9 | 0 |
meth |
GenerationMixin._beam_search_has_unfinished_sequences |
5 | 4 | 0 |
meth |
GenerationMixin._beam_search |
7 | 6 | 0 |
meth |
GenerationMixin._assisted_decoding |
12 | 11 | 0 |
meth |
GenerationMixin._prefill |
5 | 4 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.generation.watermarking (40 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BayesianDetectorModel.init |
2 | 0 | 0 |
meth |
BayesianDetectorModel._init_weights |
2 | 0 | 0 |
meth |
BayesianDetectorModel.forward |
6 | 4 | 0 |
attr |
BayesianDetectorModel.watermarking_depth |
1 | 0 | 0 |
attr |
BayesianDetectorModel.base_rate |
1 | 0 | 0 |
attr |
BayesianDetectorModel.likelihood_model_watermarked |
1 | 0 | 0 |
attr |
BayesianDetectorModel.prior |
1 | 0 | 0 |
meth |
WatermarkDetector.init |
6 | 5 | 0 |
meth |
WatermarkDetector._get_ngram_score |
3 | 2 | 0 |
meth |
WatermarkDetector._score_ngrams_in_passage |
2 | 1 | 0 |
meth |
WatermarkDetector._compute_pval |
4 | 0 | 0 |
attr |
WatermarkDetector.bos_token_id |
1 | 0 | 0 |
attr |
WatermarkDetector.greenlist_ratio |
1 | 0 | 0 |
attr |
WatermarkDetector.ignore_repeated_ngrams |
1 | 0 | 0 |
attr |
WatermarkDetector.processor |
1 | 0 | 0 |
meth |
BayesianDetectorConfig.init |
4 | 2 | 0 |
meth |
BayesianDetectorConfig.set_detector_information |
3 | 0 | 0 |
attr |
BayesianDetectorConfig.watermarking_depth |
1 | 0 | 0 |
attr |
BayesianDetectorConfig.base_rate |
1 | 0 | 0 |
attr |
BayesianDetectorConfig.model_name |
1 | 0 | 0 |
attr |
BayesianDetectorConfig.watermarking_config |
1 | 0 | 0 |
meth |
SynthIDTextWatermarkDetector.init |
4 | 3 | 1 |
meth |
SynthIDTextWatermarkDetector.call |
2 | 1 | 0 |
attr |
SynthIDTextWatermarkDetector.detector_module |
1 | 0 | 0 |
attr |
SynthIDTextWatermarkDetector.logits_processor |
1 | 0 | 0 |
attr |
SynthIDTextWatermarkDetector.tokenizer |
1 | 0 | 0 |
meth |
BayesianDetectorWatermarkedLikelihood.init |
2 | 1 | 0 |
attr |
BayesianDetectorWatermarkedLikelihood.watermarking_depth |
1 | 0 | 0 |
attr |
BayesianDetectorWatermarkedLikelihood.beta |
1 | 0 | 0 |
attr |
BayesianDetectorWatermarkedLikelihood.delta |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.hf_argparser (12 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HfArgumentParser.init |
3 | 1 | 0 |
meth |
HfArgumentParser._parse_dataclass_field |
3 | 2 | 0 |
meth |
HfArgumentParser._add_dataclass_arguments |
2 | 1 | 0 |
meth |
HfArgumentParser.parse_args_into_dataclasses |
6 | 1 | 0 |
func |
HfArg |
7 | 6 | 1 |
func |
string_to_bool |
2 | 0 | 0 |
transformers.hyperparameter_search (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RayTuneBackend.is_available |
1 | 0 | 0 |
meth |
RayTuneBackend.run |
5 | 2 | 0 |
meth |
RayTuneBackend.default_hp_space |
2 | 0 | 0 |
meth |
HyperParamSearchBackendBase.is_available |
1 | 0 | 0 |
meth |
HyperParamSearchBackendBase.run |
5 | 2 | 0 |
meth |
HyperParamSearchBackendBase.default_hp_space |
2 | 0 | 0 |
meth |
HyperParamSearchBackendBase.ensure_available |
1 | 0 | 0 |
meth |
HyperParamSearchBackendBase.pip_install |
1 | 0 | 0 |
meth |
OptunaBackend.is_available |
1 | 0 | 0 |
meth |
OptunaBackend.run |
5 | 2 | 0 |
meth |
OptunaBackend.default_hp_space |
2 | 0 | 0 |
meth |
WandbBackend.is_available |
1 | 0 | 0 |
meth |
WandbBackend.run |
5 | 2 | 0 |
meth |
WandbBackend.default_hp_space |
2 | 0 | 0 |
attr |
ALL_HYPERPARAMETER_SEARCH_BACKENDS |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.image_processing_base (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ImageProcessingMixin.init |
2 | 0 | 0 |
meth |
ImageProcessingMixin.from_pretrained |
8 | 7 | 0 |
meth |
ImageProcessingMixin.save_pretrained |
4 | 2 | 0 |
meth |
ImageProcessingMixin.get_image_processor_dict |
3 | 2 | 0 |
meth |
ImageProcessingMixin.from_dict |
3 | 1 | 0 |
meth |
ImageProcessingMixin.from_json_file |
2 | 1 | 0 |
meth |
ImageProcessingMixin.to_json_file |
2 | 1 | 0 |
meth |
ImageProcessingMixin.repr |
1 | 0 | 0 |
meth |
ImageProcessingMixin.register_for_auto_class |
2 | 0 | 0 |
meth |
ImageProcessingMixin.fetch_images |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.image_processing_utils (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BaseImageProcessor.call |
4 | 3 | 0 |
meth |
BaseImageProcessor.preprocess |
3 | 1 | 0 |
meth |
BaseImageProcessor.rescale |
6 | 5 | 0 |
meth |
BaseImageProcessor.normalize |
7 | 6 | 0 |
meth |
BaseImageProcessor.center_crop |
6 | 5 | 0 |
meth |
BaseImageProcessor.to_dict |
1 | 0 | 0 |
func |
get_size_dict |
6 | 5 | 0 |
func |
is_valid_size_dict |
2 | 0 | 0 |
func |
get_patch_output_size |
4 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
convert_to_size_dict |
5 | 3 | 0 |
transformers.image_processing_utils_fast (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BaseImageProcessorFast.init |
2 | 1 | 0 |
meth |
BaseImageProcessorFast.pad |
9 | 8 | 0 |
meth |
BaseImageProcessorFast.resize |
6 | 5 | 0 |
meth |
BaseImageProcessorFast.rescale |
4 | 3 | 0 |
meth |
BaseImageProcessorFast.normalize |
5 | 4 | 0 |
meth |
BaseImageProcessorFast.center_crop |
4 | 3 | 0 |
meth |
BaseImageProcessorFast.filter_out_unused_kwargs |
2 | 1 | 0 |
meth |
BaseImageProcessorFast._further_process_kwargs |
9 | 8 | 0 |
meth |
BaseImageProcessorFast._validate_preprocess_kwargs |
14 | 12 | 0 |
meth |
BaseImageProcessorFast.preprocess |
4 | 3 | 0 |
meth |
BaseImageProcessorFast._preprocess_image_like_inputs |
7 | 6 | 0 |
meth |
BaseImageProcessorFast._preprocess |
17 | 16 | 0 |
meth |
BaseImageProcessorFast.to_dict |
1 | 0 | 0 |
attr |
BaseImageProcessorFast.size |
1 | 0 | 0 |
attr |
BaseImageProcessorFast.crop_size |
1 | 0 | 0 |
attr |
BaseImageProcessorFast.pad_size |
1 | 0 | 0 |
func |
validate_fast_preprocess_arguments |
13 | 12 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.image_transforms (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
id_to_rgb |
2 | 0 | 0 |
func |
group_images_by_shape |
5 | 4 | 0 |
func |
get_size_with_aspect_ratio |
4 | 1 | 0 |
func |
rgb_to_id |
2 | 0 | 0 |
transformers.image_utils (57 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ImageFeatureExtractionMixin._ensure_format_supported |
2 | 0 | 0 |
meth |
ImageFeatureExtractionMixin.to_pil_image |
3 | 0 | 0 |
meth |
ImageFeatureExtractionMixin.convert_rgb |
2 | 0 | 0 |
meth |
ImageFeatureExtractionMixin.to_numpy_array |
4 | 0 | 0 |
meth |
ImageFeatureExtractionMixin.expand_dims |
2 | 0 | 0 |
meth |
ImageFeatureExtractionMixin.normalize |
5 | 0 | 0 |
meth |
ImageFeatureExtractionMixin.resize |
6 | 0 | 0 |
meth |
ImageFeatureExtractionMixin.center_crop |
3 | 0 | 0 |
meth |
ImageFeatureExtractionMixin.flip_channel_order |
2 | 0 | 0 |
meth |
ImageFeatureExtractionMixin.rotate |
8 | 0 | 0 |
func |
to_numpy_array |
2 | 1 | 0 |
func |
validate_preprocess_arguments |
14 | 13 | 0 |
func |
validate_kwargs |
3 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
get_image_type |
2 | 0 | 0 |
func |
make_list_of_images |
3 | 2 | 0 |
func |
is_valid_list_of_images |
2 | 1 | 0 |
func |
is_pil_image |
2 | 0 | 0 |
func |
valid_images |
2 | 0 | 0 |
func |
is_valid_image |
2 | 0 | 0 |
meth |
SizeDict.getitem |
2 | 0 | 0 |
func |
concatenate_list |
2 | 0 | 0 |
func |
is_batched |
2 | 0 | 0 |
transformers.initialization (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
lecun_normal_ |
2 | 0 | 0 |
func |
no_init_weights |
1 | 0 | 0 |
func |
default_flax_embed_init_ |
2 | 0 | 0 |
func |
no_tie_weights |
1 | 0 | 0 |
func |
guard_torch_init_functions |
1 | 0 | 0 |
transformers.integrations.accelerate (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
accelerate_dispatch |
7 | 0 | 0 |
func |
infer_auto_device_map |
9 | 8 | 0 |
func |
get_module_size_with_ties |
5 | 1 | 0 |
func |
check_tied_parameters_on_same_device |
3 | 0 | 0 |
func |
get_balanced_memory |
6 | 5 | 0 |
func |
expand_device_map |
3 | 2 | 0 |
func |
accelerate_disk_offload |
8 | 6 | 0 |
func |
compute_module_total_buffer_size |
3 | 2 | 0 |
transformers.integrations.aqlm (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
replace_with_aqlm_linear |
4 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.integrations.awq (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
replace_with_awq_linear |
5 | 2 | 0 |
func |
replace_quantization_scales |
3 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.integrations.bitnet (49 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
replace_with_bitnet_linear |
4 | 1 | 0 |
meth |
ActQuant.forward |
3 | 0 | 0 |
meth |
ActQuant.backward |
3 | 0 | 0 |
meth |
AutoBitLinear.init |
9 | 6 | 0 |
meth |
AutoBitLinear.load_hook |
5 | 0 | 0 |
meth |
AutoBitLinear.forward |
2 | 0 | 0 |
attr |
AutoBitLinear.online_quant |
1 | 0 | 0 |
attr |
AutoBitLinear.rms_norm |
1 | 0 | 0 |
meth |
BitLinear.init |
8 | 5 | 0 |
meth |
BitLinear.activation_quant |
3 | 0 | 0 |
meth |
BitLinear.post_quant_process |
4 | 0 | 0 |
meth |
BitLinear.forward |
2 | 0 | 0 |
attr |
BitLinear.dtype |
1 | 0 | 0 |
attr |
BitLinear.in_features |
1 | 0 | 0 |
attr |
BitLinear.out_features |
1 | 0 | 0 |
attr |
BitLinear.rms_norm |
1 | 0 | 0 |
attr |
BitLinear.bias |
1 | 0 | 0 |
meth |
WeightQuant.forward |
3 | 0 | 0 |
meth |
WeightQuant.backward |
3 | 0 | 0 |
meth |
BitNetDeserialize.init |
2 | 0 | 0 |
meth |
BitNetDeserialize.convert |
5 | 4 | 0 |
attr |
BitNetDeserialize.hf_quantizer |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.integrations.bitsandbytes (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Bnb4bitQuantize.init |
2 | 0 | 0 |
meth |
Bnb4bitQuantize.convert |
5 | 4 | 0 |
attr |
Bnb4bitQuantize.hf_quantizer |
1 | 0 | 0 |
func |
dequantize_bnb_weight |
3 | 1 | 0 |
func |
replace_with_bnb_linear |
5 | 2 | 0 |
func |
validate_bnb_backend_availability |
2 | 0 | 0 |
meth |
Bnb4bitDeserialize.init |
2 | 0 | 0 |
meth |
Bnb4bitDeserialize.convert |
5 | 4 | 0 |
attr |
Bnb4bitDeserialize.hf_quantizer |
1 | 0 | 0 |
meth |
Bnb8bitDeserialize.init |
2 | 0 | 0 |
meth |
Bnb8bitDeserialize.convert |
5 | 4 | 0 |
attr |
Bnb8bitDeserialize.hf_quantizer |
1 | 0 | 0 |
meth |
Bnb8bitQuantize.init |
2 | 0 | 0 |
meth |
Bnb8bitQuantize.convert |
5 | 4 | 0 |
attr |
Bnb8bitQuantize.hf_quantizer |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
dequantize_and_replace |
4 | 0 | 0 |
transformers.integrations.deepspeed (54 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HfDeepSpeedConfig.init |
2 | 0 | 0 |
func |
propagate_args_to_deepspeed |
4 | 0 | 0 |
func |
deepspeed_optim_sched |
6 | 0 | 0 |
func |
deepspeed_config |
1 | 0 | 0 |
func |
is_deepspeed_available |
1 | 0 | 0 |
func |
is_deepspeed_zero3_enabled |
1 | 0 | 0 |
func |
set_hf_deepspeed_config |
2 | 0 | 0 |
func |
unset_hf_deepspeed_config |
1 | 0 | 0 |
func |
deepspeed_init |
4 | 0 | 0 |
func |
deepspeed_load_checkpoint |
4 | 0 | 0 |
func |
initialize_weights_zero3 |
2 | 0 | 0 |
meth |
HfTrainerDeepSpeedConfig.init |
2 | 0 | 0 |
meth |
HfTrainerDeepSpeedConfig.dtype |
1 | 0 | 0 |
meth |
HfTrainerDeepSpeedConfig.is_auto |
2 | 0 | 0 |
meth |
HfTrainerDeepSpeedConfig.fill_match |
5 | 0 | 0 |
meth |
HfTrainerDeepSpeedConfig.trainer_config_process |
3 | 0 | 0 |
meth |
HfTrainerDeepSpeedConfig.trainer_config_finalize |
4 | 0 | 0 |
attr |
HfTrainerDeepSpeedConfig.mismatches |
1 | 0 | 0 |
attr |
HfTrainerDeepSpeedConfig.fill_only |
1 | 0 | 0 |
func |
deepspeed_sp_compute_loss |
6 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.integrations.eager_paged (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
eager_paged_attention_forward |
8 | 6 | 0 |
transformers.integrations.eetq (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EetqQuantize.init |
2 | 0 | 0 |
meth |
EetqQuantize.convert |
4 | 3 | 0 |
attr |
EetqQuantize.hf_quantizer |
1 | 0 | 0 |
func |
replace_with_eetq_linear |
4 | 1 | 0 |
meth |
EetqLinear.init |
5 | 0 | 0 |
meth |
EetqLinear.forward |
2 | 0 | 0 |
attr |
EetqLinear.weight |
1 | 0 | 0 |
attr |
EetqLinear.weight_scales |
1 | 0 | 0 |
attr |
EetqLinear.bias |
1 | 0 | 0 |
meth |
EetqLinearMMFunction.forward |
6 | 0 | 0 |
meth |
EetqLinearMMFunction.backward |
3 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.integrations.executorch (83 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
convert_and_export_with_cache |
6 | 5 | 0 |
meth |
TorchExportableModuleWithStaticCache.forward |
4 | 3 | 0 |
attr |
TorchExportableModuleWithStaticCache.model |
1 | 0 | 0 |
attr |
TorchExportableModuleWithStaticCache.static_cache |
1 | 0 | 0 |
meth |
Seq2SeqLMDecoderExportableModuleWithStaticCache.init |
4 | 0 | 0 |
meth |
Seq2SeqLMDecoderExportableModuleWithStaticCache.forward |
4 | 0 | 0 |
attr |
Seq2SeqLMDecoderExportableModuleWithStaticCache.decoder |
1 | 0 | 0 |
attr |
Seq2SeqLMDecoderExportableModuleWithStaticCache.lm_head |
1 | 0 | 0 |
attr |
Seq2SeqLMDecoderExportableModuleWithStaticCache.config |
1 | 0 | 0 |
attr |
Seq2SeqLMDecoderExportableModuleWithStaticCache.static_cache |
1 | 0 | 0 |
attr |
Seq2SeqLMDecoderExportableModuleWithStaticCache.cache |
1 | 0 | 0 |
meth |
Seq2SeqLMExportableModule.init |
6 | 0 | 0 |
meth |
Seq2SeqLMExportableModule._export_encoder |
2 | 0 | 0 |
meth |
Seq2SeqLMExportableModule._export_decoder |
4 | 0 | 0 |
meth |
Seq2SeqLMExportableModule.export |
5 | 0 | 0 |
meth |
Seq2SeqLMExportableModule.generate |
3 | 0 | 0 |
attr |
Seq2SeqLMExportableModule.full_model |
1 | 0 | 0 |
attr |
Seq2SeqLMExportableModule.encoder |
1 | 0 | 0 |
attr |
Seq2SeqLMExportableModule.config |
1 | 0 | 0 |
attr |
Seq2SeqLMExportableModule.max_hidden_seq_length |
1 | 0 | 0 |
attr |
Seq2SeqLMExportableModule.generation_config |
1 | 0 | 0 |
attr |
Seq2SeqLMExportableModule.exported_encoder |
1 | 0 | 0 |
attr |
Seq2SeqLMExportableModule.exported_decoder |
1 | 0 | 0 |
attr |
TorchExportableModuleWithHybridCache.model |
1 | 0 | 0 |
attr |
TorchExportableModuleWithHybridCache.cache |
1 | 0 | 0 |
meth |
Seq2SeqLMEncoderExportableModule.init |
2 | 0 | 0 |
meth |
Seq2SeqLMEncoderExportableModule.forward |
2 | 0 | 0 |
attr |
Seq2SeqLMEncoderExportableModule.encoder |
1 | 0 | 0 |
meth |
TorchExportableModuleForVLM.init |
4 | 2 | 0 |
meth |
TorchExportableModuleForVLM.export_vision_encoder |
1 | 0 | 0 |
meth |
TorchExportableModuleForVLM.export_connector |
1 | 0 | 0 |
meth |
TorchExportableModuleForVLM.export_text_decoder |
1 | 0 | 0 |
meth |
TorchExportableModuleForVLM.export |
2 | 0 | 0 |
meth |
TorchExportableModuleForVLM.forward |
4 | 0 | 0 |
meth |
TorchExportableModuleForVLM.generate |
7 | 0 | 0 |
attr |
TorchExportableModuleForVLM.model |
1 | 0 | 0 |
attr |
TorchExportableModuleForVLM.max_batch_size |
1 | 0 | 0 |
attr |
TorchExportableModuleForVLM.max_cache_len |
1 | 0 | 0 |
attr |
TorchExportableModuleForVLM.config |
1 | 0 | 0 |
attr |
TorchExportableModuleForVLM.vision_encoder |
1 | 0 | 0 |
attr |
TorchExportableModuleForVLM.connector |
1 | 0 | 0 |
attr |
TorchExportableModuleForVLM.text_decoder |
1 | 0 | 0 |
attr |
TorchExportableModuleForVLM.exported_vision_encoder |
1 | 0 | 0 |
attr |
TorchExportableModuleForVLM.exported_connector |
1 | 0 | 0 |
attr |
TorchExportableModuleForVLM.exported_text_decoder |
1 | 0 | 0 |
func |
export_with_dynamic_cache |
4 | 3 | 0 |
func |
register_dynamic_cache_export_support |
1 | 0 | 0 |
meth |
TorchExportableModuleForDecoderOnlyLM.generate |
10 | 9 | 0 |
attr |
TorchExportableModuleForDecoderOnlyLM.model |
1 | 0 | 0 |
transformers.integrations.fbgemm_fp8 (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FbgemmFp8Quantize.init |
2 | 0 | 0 |
meth |
FbgemmFp8Quantize.convert |
4 | 3 | 0 |
attr |
FbgemmFp8Quantize.hf_quantizer |
1 | 0 | 0 |
func |
replace_with_fbgemm_fp8_linear |
6 | 1 | 0 |
meth |
FbgemmFp8Llama4TextExperts.init |
3 | 0 | 0 |
meth |
FbgemmFp8Llama4TextExperts.forward |
2 | 0 | 0 |
attr |
FbgemmFp8Llama4TextExperts.num_experts |
1 | 0 | 0 |
attr |
FbgemmFp8Llama4TextExperts.intermediate_size |
1 | 0 | 0 |
attr |
FbgemmFp8Llama4TextExperts.hidden_size |
1 | 0 | 0 |
attr |
FbgemmFp8Llama4TextExperts.expert_dim |
1 | 0 | 0 |
attr |
FbgemmFp8Llama4TextExperts.act_fn |
1 | 0 | 0 |
attr |
FbgemmFp8Llama4TextExperts.gate_up_proj |
1 | 0 | 0 |
attr |
FbgemmFp8Llama4TextExperts.gate_up_proj_scale |
1 | 0 | 0 |
attr |
FbgemmFp8Llama4TextExperts.down_proj |
1 | 0 | 0 |
attr |
FbgemmFp8Llama4TextExperts.down_proj_scale |
1 | 0 | 0 |
meth |
FbgemmFp8Linear.init |
5 | 0 | 0 |
meth |
FbgemmFp8Linear.forward |
2 | 0 | 0 |
attr |
FbgemmFp8Linear.in_features |
1 | 0 | 0 |
attr |
FbgemmFp8Linear.out_features |
1 | 0 | 0 |
attr |
FbgemmFp8Linear.weight |
1 | 0 | 0 |
attr |
FbgemmFp8Linear.weight_scale |
1 | 0 | 0 |
attr |
FbgemmFp8Linear.bias |
1 | 0 | 0 |
func |
get_quantize_fp8_per_row |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.integrations.finegrained_fp8 (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
replace_with_fp8_linear |
5 | 1 | 0 |
meth |
Fp8Dequantize.init |
2 | 0 | 0 |
meth |
Fp8Dequantize.convert |
4 | 3 | 0 |
attr |
Fp8Dequantize.hf_quantizer |
1 | 0 | 0 |
meth |
FP8Expert.init |
4 | 0 | 0 |
attr |
FP8Expert.block_size |
1 | 0 | 0 |
attr |
FP8Expert.num_experts |
1 | 0 | 0 |
attr |
FP8Expert.hidden_dim |
1 | 0 | 0 |
attr |
FP8Expert.intermediate_dim |
1 | 0 | 0 |
attr |
FP8Expert.gate_up_proj |
1 | 0 | 0 |
attr |
FP8Expert.down_proj |
1 | 0 | 0 |
attr |
FP8Expert.gate_up_proj_scale_inv |
1 | 0 | 0 |
attr |
FP8Expert.down_proj_scale_inv |
1 | 0 | 0 |
attr |
FP8Expert.act_fn |
1 | 0 | 0 |
func |
act_quant_kernel |
5 | 1 | 0 |
meth |
Fp8Quantize.init |
2 | 0 | 0 |
meth |
Fp8Quantize.convert |
3 | 2 | 0 |
attr |
Fp8Quantize.hf_quantizer |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
FP8Linear.init |
7 | 4 | 0 |
attr |
FP8Linear.block_size |
1 | 0 | 0 |
attr |
FP8Linear.activation_scheme |
1 | 0 | 0 |
attr |
FP8Linear.weight |
1 | 0 | 0 |
attr |
FP8Linear.weight_scale_inv |
1 | 0 | 0 |
attr |
FP8Linear.activation_scale |
1 | 0 | 0 |
attr |
FP8Linear.bias |
1 | 0 | 0 |
transformers.integrations.flash_attention (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
flash_attention_forward |
12 | 11 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.integrations.flash_paged (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
paged_attention_forward |
12 | 7 | 0 |
transformers.integrations.flex_attention (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
make_flex_block_causal_mask |
7 | 5 | 0 |
meth |
WrappedFlexAttention.new |
3 | 0 | 0 |
meth |
WrappedFlexAttention.init |
2 | 0 | 0 |
meth |
WrappedFlexAttention.call |
1 | 0 | 0 |
attr |
WrappedFlexAttention.training |
1 | 0 | 0 |
func |
compile_friendly_flex_attention |
6 | 4 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
flex_attention_forward |
10 | 9 | 0 |
transformers.integrations.fouroversix (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FourOverSixQuantize.init |
2 | 0 | 0 |
meth |
FourOverSixQuantize.convert |
6 | 5 | 0 |
attr |
FourOverSixQuantize.hf_quantizer |
1 | 0 | 0 |
func |
adapt_fouroversix_config |
2 | 1 | 0 |
transformers.integrations.fp_quant (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FpQuantQuantize.init |
2 | 0 | 0 |
meth |
FpQuantQuantize.convert |
5 | 4 | 0 |
attr |
FpQuantQuantize.hf_quantizer |
1 | 0 | 0 |
meth |
FpQuantDeserialize.init |
2 | 0 | 0 |
meth |
FpQuantDeserialize.convert |
6 | 5 | 0 |
attr |
FpQuantDeserialize.hf_quantizer |
1 | 0 | 0 |
func |
adapt_fp_quant_config |
2 | 1 | 0 |
transformers.integrations.fsdp (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
is_fsdp_enabled |
1 | 0 | 0 |
func |
get_fsdp_ckpt_kwargs |
1 | 0 | 0 |
func |
update_fsdp_plugin_peft |
3 | 0 | 0 |
transformers.integrations.ggml (69 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
GGUFQwen2Converter.init |
2 | 0 | 0 |
attr |
GGUFQwen2Converter.original_tokenizer |
1 | 0 | 0 |
attr |
GGUFQwen2Converter.additional_kwargs |
1 | 0 | 0 |
func |
convert_gguf_tokenizer |
3 | 2 | 0 |
meth |
GGUFLlamaConverter.init |
2 | 0 | 0 |
meth |
GGUFLlamaConverter.vocab |
2 | 0 | 0 |
meth |
GGUFLlamaConverter.merges |
2 | 0 | 0 |
meth |
GGUFLlamaConverter.tokenizer |
2 | 0 | 0 |
meth |
GGUFLlamaConverter.decoder |
3 | 0 | 0 |
meth |
GGUFLlamaConverter.converted |
1 | 0 | 0 |
attr |
GGUFLlamaConverter.proto |
1 | 0 | 0 |
attr |
GGUFLlamaConverter.original_tokenizer |
1 | 0 | 0 |
attr |
GGUFLlamaConverter.additional_kwargs |
1 | 0 | 0 |
attr |
GGUFLlamaConverter.is_llama_3_tokenizer |
1 | 0 | 0 |
meth |
GGUFGemmaConverter.init |
2 | 0 | 0 |
meth |
GGUFGemmaConverter.vocab |
2 | 0 | 0 |
meth |
GGUFGemmaConverter.normalizer |
2 | 0 | 0 |
meth |
GGUFGemmaConverter.decoder |
3 | 0 | 0 |
attr |
GGUFGemmaConverter.proto |
1 | 0 | 0 |
attr |
GGUFGemmaConverter.original_tokenizer |
1 | 0 | 0 |
attr |
GGUFGemmaConverter.additional_kwargs |
1 | 0 | 0 |
meth |
GGUFT5Converter.init |
2 | 0 | 0 |
meth |
GGUFT5Converter.vocab |
2 | 0 | 0 |
meth |
GGUFT5Converter.normalizer |
2 | 0 | 0 |
meth |
GGUFT5Converter.post_processor |
1 | 0 | 0 |
attr |
GGUFT5Converter.proto |
1 | 0 | 0 |
attr |
GGUFT5Converter.token2id |
1 | 0 | 0 |
attr |
GGUFT5Converter.original_tokenizer |
1 | 0 | 0 |
attr |
GGUFT5Converter.additional_kwargs |
1 | 0 | 0 |
meth |
GGUFTokenizerSkeleton.init |
2 | 0 | 0 |
attr |
GGUFTokenizerSkeleton.merges |
1 | 0 | 0 |
attr |
GGUFTokenizerSkeleton.added_tokens |
1 | 0 | 0 |
attr |
GGUFTokenizerSkeleton.unk_token_id |
1 | 0 | 0 |
attr |
GGUFTokenizerSkeleton.scores |
1 | 0 | 0 |
meth |
GGUFPhi3Converter.init |
2 | 0 | 0 |
meth |
GGUFPhi3Converter.vocab |
2 | 0 | 0 |
meth |
GGUFPhi3Converter.merges |
2 | 0 | 0 |
meth |
GGUFPhi3Converter.tokenizer |
2 | 0 | 0 |
meth |
GGUFPhi3Converter.decoder |
3 | 0 | 0 |
attr |
GGUFPhi3Converter.proto |
1 | 0 | 0 |
attr |
GGUFPhi3Converter.original_tokenizer |
1 | 0 | 0 |
attr |
GGUFPhi3Converter.additional_kwargs |
1 | 0 | 0 |
meth |
GGUFGPTConverter.init |
2 | 0 | 0 |
attr |
GGUFGPTConverter.original_tokenizer |
1 | 0 | 0 |
attr |
GGUFGPTConverter.additional_kwargs |
1 | 0 | 0 |
transformers.integrations.higgs (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
dequantize_higgs |
3 | 0 | 0 |
func |
pad_to_block |
5 | 0 | 0 |
func |
replace_with_higgs_linear |
4 | 1 | 0 |
func |
quantize_with_higgs |
6 | 4 | 0 |
meth |
HiggsLinear.init |
9 | 7 | 0 |
meth |
HiggsLinear.forward |
2 | 0 | 0 |
attr |
HiggsLinear.in_features |
1 | 0 | 0 |
attr |
HiggsLinear.out_features |
1 | 0 | 0 |
attr |
HiggsLinear.num_bits |
1 | 0 | 0 |
attr |
HiggsLinear.group_size |
1 | 0 | 0 |
attr |
HiggsLinear.hadamard_size |
1 | 0 | 0 |
attr |
HiggsLinear.weight |
1 | 0 | 0 |
attr |
HiggsLinear.scales |
1 | 0 | 0 |
attr |
HiggsLinear.tables |
1 | 0 | 0 |
attr |
HiggsLinear.tables2 |
1 | 0 | 0 |
attr |
HiggsLinear.workspace |
1 | 0 | 0 |
attr |
HiggsLinear.bias |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.integrations.hqq (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_linear_tags |
2 | 0 | 0 |
func |
prepare_for_hqq_linear |
5 | 0 | 0 |
func |
autoname_modules |
2 | 0 | 0 |
func |
name_to_linear_tag |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.integrations.hub_kernels (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
replace_kernel_forward_from_hub |
3 | 0 | 0 |
func |
register_kernel_mapping_transformers |
2 | 0 | 0 |
func |
use_kernel_func_from_hub |
2 | 1 | 0 |
func |
use_kernel_forward_from_hub |
2 | 1 | 0 |
func |
lazy_load_kernel |
3 | 2 | 0 |
func |
use_kernelized_func |
2 | 1 | 0 |
meth |
LayerRepository.init |
3 | 0 | 0 |
func |
register_kernel_mapping |
3 | 0 | 0 |
transformers.integrations.integration_utils (403 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
is_neptune_available |
1 | 0 | 0 |
func |
is_ray_tune_available |
1 | 0 | 0 |
func |
is_swanlab_available |
1 | 0 | 0 |
func |
is_trackio_available |
1 | 0 | 0 |
func |
is_optuna_available |
1 | 0 | 0 |
func |
is_wandb_available |
1 | 0 | 0 |
func |
is_comet_available |
1 | 0 | 0 |
func |
is_tensorboard_available |
1 | 0 | 0 |
func |
is_dvclive_available |
1 | 0 | 0 |
func |
is_clearml_available |
1 | 0 | 0 |
func |
is_ray_available |
1 | 0 | 0 |
func |
is_azureml_available |
1 | 0 | 0 |
func |
save_model_architecture_to_file |
3 | 2 | 1 |
func |
get_available_reporting_integrations |
1 | 0 | 0 |
meth |
FlyteCallback.init |
3 | 2 | 0 |
meth |
FlyteCallback.on_save |
5 | 0 | 0 |
meth |
FlyteCallback.on_train_end |
5 | 0 | 0 |
attr |
FlyteCallback.cp |
1 | 0 | 0 |
attr |
FlyteCallback.save_log_history |
1 | 0 | 0 |
attr |
FlyteCallback.sync_checkpoints |
1 | 0 | 0 |
func |
is_flyte_deck_standard_available |
1 | 0 | 0 |
func |
is_dagshub_available |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
run_hp_search_wandb |
5 | 3 | 0 |
meth |
CodeCarbonCallback.init |
1 | 0 | 0 |
meth |
CodeCarbonCallback.on_init_end |
5 | 0 | 0 |
meth |
CodeCarbonCallback.on_train_begin |
6 | 0 | 0 |
meth |
CodeCarbonCallback.on_train_end |
5 | 0 | 0 |
attr |
CodeCarbonCallback.tracker |
1 | 0 | 0 |
meth |
MLflowCallback.init |
1 | 0 | 0 |
meth |
MLflowCallback.setup |
4 | 0 | 0 |
meth |
MLflowCallback.on_train_begin |
6 | 0 | 0 |
meth |
MLflowCallback.on_log |
7 | 0 | 0 |
meth |
MLflowCallback.on_train_end |
5 | 0 | 0 |
meth |
MLflowCallback.on_save |
5 | 0 | 0 |
meth |
MLflowCallback.del |
1 | 0 | 0 |
meth |
NeptuneMissingConfiguration.init |
1 | 0 | 0 |
func |
is_codecarbon_available |
1 | 0 | 0 |
func |
is_flytekit_available |
1 | 0 | 0 |
meth |
TrackioCallback.init |
1 | 0 | 0 |
meth |
TrackioCallback.setup |
5 | 0 | 0 |
meth |
TrackioCallback.on_train_begin |
6 | 0 | 0 |
meth |
TrackioCallback.on_train_end |
7 | 1 | 0 |
meth |
TrackioCallback.on_log |
7 | 0 | 0 |
meth |
TrackioCallback.on_save |
5 | 0 | 0 |
meth |
TrackioCallback.on_predict |
6 | 0 | 0 |
meth |
TrackioCallback.on_push_begin |
6 | 0 | 0 |
meth |
DagsHubCallback.init |
1 | 0 | 0 |
meth |
DagsHubCallback.setup |
3 | 0 | 0 |
meth |
DagsHubCallback.on_train_end |
5 | 0 | 0 |
attr |
DagsHubCallback.Repo |
1 | 0 | 0 |
func |
rewrite_logs |
2 | 0 | 0 |
meth |
ClearMLCallback.init |
1 | 0 | 0 |
meth |
ClearMLCallback.setup |
6 | 0 | 0 |
meth |
ClearMLCallback.on_train_begin |
7 | 0 | 0 |
meth |
ClearMLCallback.on_train_end |
5 | 0 | 0 |
meth |
ClearMLCallback.on_log |
8 | 0 | 0 |
meth |
ClearMLCallback.on_save |
5 | 0 | 0 |
meth |
ClearMLCallback.copy_training_args_as_hparams |
3 | 0 | 0 |
func |
run_hp_search_optuna |
5 | 3 | 0 |
func |
hp_params |
2 | 0 | 0 |
meth |
WandbCallback.__init__ |
1 | 0 | 0 |
meth |
WandbCallback.setup |
5 | 0 | 0 |
meth |
WandbCallback.on_train_begin |
6 | 0 | 0 |
meth |
WandbCallback.on_train_end |
7 | 1 | 0 |
meth |
WandbCallback.on_log |
7 | 0 | 0 |
meth |
WandbCallback.on_save |
5 | 0 | 0 |
meth |
WandbCallback.on_predict |
6 | 0 | 0 |
meth |
NeptuneCallback.__init__ |
9 | 6 | 0 |
meth |
NeptuneCallback._stop_run_if_exists |
1 | 0 | 0 |
meth |
NeptuneCallback._initialize_run |
2 | 0 | 0 |
meth |
NeptuneCallback._use_initial_run |
1 | 0 | 0 |
meth |
NeptuneCallback._ensure_run_with_monitoring |
1 | 0 | 0 |
meth |
NeptuneCallback._ensure_at_least_run_without_monitoring |
1 | 0 | 0 |
meth |
NeptuneCallback._log_integration_version |
1 | 0 | 0 |
meth |
NeptuneCallback._log_trainer_parameters |
2 | 0 | 0 |
meth |
NeptuneCallback._log_model_parameters |
2 | 0 | 0 |
meth |
NeptuneCallback._log_hyper_param_search_parameters |
2 | 0 | 0 |
meth |
NeptuneCallback._log_model_checkpoint |
3 | 2 | 0 |
meth |
NeptuneCallback.on_init_end |
5 | 0 | 0 |
meth |
NeptuneCallback.on_train_begin |
6 | 0 | 0 |
meth |
NeptuneCallback.on_train_end |
5 | 0 | 0 |
meth |
NeptuneCallback.__del__ |
1 | 0 | 0 |
meth |
NeptuneCallback.on_save |
5 | 0 | 0 |
meth |
NeptuneCallback.on_evaluate |
6 | 0 | 0 |
meth |
NeptuneCallback.get_run |
2 | 0 | 0 |
meth |
NeptuneCallback.on_log |
6 | 1 | 0 |
prop |
NeptuneCallback.run |
1 | 0 | 0 |
prop |
NeptuneCallback._metadata_namespace |
1 | 0 | 0 |
meth |
CometCallback.__init__ |
1 | 0 | 0 |
meth |
CometCallback.setup |
4 | 0 | 0 |
meth |
CometCallback.on_train_begin |
6 | 0 | 0 |
meth |
CometCallback.on_log |
7 | 0 | 0 |
meth |
CometCallback.on_train_end |
5 | 0 | 0 |
meth |
CometCallback.on_predict |
6 | 0 | 0 |
meth |
SwanLabCallback.__init__ |
1 | 0 | 0 |
meth |
SwanLabCallback.setup |
5 | 0 | 0 |
meth |
SwanLabCallback.on_train_begin |
6 | 0 | 0 |
meth |
SwanLabCallback.on_train_end |
7 | 0 | 0 |
meth |
SwanLabCallback.on_log |
7 | 0 | 0 |
meth |
SwanLabCallback.on_save |
5 | 0 | 0 |
meth |
SwanLabCallback.on_predict |
6 | 0 | 0 |
func |
is_mlflow_available |
1 | 0 | 0 |
meth |
DVCLiveCallback.__init__ |
4 | 2 | 0 |
meth |
DVCLiveCallback.setup |
4 | 0 | 0 |
meth |
DVCLiveCallback.on_train_begin |
6 | 0 | 0 |
meth |
DVCLiveCallback.on_log |
7 | 0 | 0 |
meth |
DVCLiveCallback.on_save |
5 | 0 | 0 |
meth |
DVCLiveCallback.on_train_end |
5 | 0 | 0 |
attr |
DVCLiveCallback.live |
1 | 0 | 0 |
func |
run_hp_search_ray |
5 | 3 | 0 |
meth |
WandbLogModel._missing |
2 | 2 | 1 |
meth |
TensorBoardCallback.init |
2 | 0 | 0 |
meth |
TensorBoardCallback._init_summary_writer |
2 | 0 | 0 |
meth |
TensorBoardCallback.on_train_begin |
5 | 0 | 0 |
meth |
TensorBoardCallback.on_log |
6 | 0 | 0 |
meth |
TensorBoardCallback.on_train_end |
5 | 0 | 0 |
attr |
TensorBoardCallback.tb_writer |
1 | 0 | 0 |
attr |
TensorBoardCallback.logging_dir |
1 | 0 | 0 |
meth |
AzureMLCallback.init |
2 | 0 | 0 |
meth |
AzureMLCallback.on_init_end |
5 | 0 | 0 |
meth |
AzureMLCallback.on_log |
6 | 0 | 0 |
attr |
AzureMLCallback.azureml_run |
1 | 0 | 0 |
func |
get_reporting_integration_callbacks |
2 | 0 | 0 |
transformers.integrations.liger (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
apply_liger_kernel |
3 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.integrations.metal_quantization (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MetalLinear.init |
7 | 5 | 0 |
attr |
MetalLinear.in_features |
1 | 0 | 0 |
attr |
MetalLinear.out_features |
1 | 0 | 0 |
attr |
MetalLinear.bits |
1 | 0 | 0 |
attr |
MetalLinear.group_size |
1 | 0 | 0 |
attr |
MetalLinear.scales |
1 | 0 | 0 |
attr |
MetalLinear.qbiases |
1 | 0 | 0 |
attr |
MetalLinear.weight |
1 | 0 | 0 |
attr |
MetalLinear.bias |
1 | 0 | 0 |
func |
replace_with_metal_linear |
5 | 2 | 0 |
meth |
MetalQuantize.init |
2 | 0 | 0 |
meth |
MetalQuantize.convert |
3 | 2 | 0 |
attr |
MetalQuantize.hf_quantizer |
1 | 0 | 0 |
meth |
MetalDequantize.init |
2 | 0 | 0 |
meth |
MetalDequantize.convert |
4 | 3 | 0 |
attr |
MetalDequantize.hf_quantizer |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.integrations.mistral (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MistralConverter.init |
6 | 0 | 0 |
meth |
MistralConverter.extract_vocab_merges_from_model |
2 | 1 | 0 |
meth |
MistralConverter.tokenizer |
1 | 0 | 0 |
attr |
MistralConverter.vocab |
1 | 0 | 0 |
attr |
MistralConverter.pattern |
1 | 0 | 0 |
attr |
MistralConverter.add_prefix_space |
1 | 0 | 0 |
attr |
MistralConverter.additional_special_tokens |
1 | 0 | 0 |
func |
convert_tekken_tokenizer |
2 | 1 | 0 |
transformers.integrations.moe (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
ALL_EXPERTS_FUNCTIONS |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.integrations.mxfp4 (77 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
on_device |
2 | 0 | 0 |
func |
convert_moe_packed_tensors |
5 | 3 | 0 |
func |
quantize_to_mxfp4 |
3 | 0 | 0 |
meth |
Mxfp4Dequantize.init |
2 | 0 | 0 |
meth |
Mxfp4Dequantize.convert |
6 | 5 | 0 |
attr |
Mxfp4Dequantize.hf_quantizer |
1 | 0 | 0 |
func |
swizzle_mxfp4_convertops |
7 | 0 | 0 |
meth |
Mxfp4Deserialize.init |
2 | 0 | 0 |
meth |
Mxfp4Deserialize.convert |
6 | 5 | 0 |
attr |
Mxfp4Deserialize.hf_quantizer |
1 | 0 | 0 |
func |
routing_torch_dist |
3 | 0 | 0 |
meth |
Mxfp4Quantize.init |
2 | 0 | 0 |
meth |
Mxfp4Quantize.convert |
6 | 5 | 0 |
attr |
Mxfp4Quantize.hf_quantizer |
1 | 0 | 0 |
func |
dequantize_convertops |
3 | 0 | 0 |
func |
replace_with_mxfp4_linear |
4 | 1 | 0 |
func |
mlp_forward |
3 | 0 | 0 |
func |
dequantize |
7 | 0 | 0 |
func |
swizzle_mxfp4 |
4 | 0 | 0 |
meth |
Mxfp4ReverseDeserialize.init |
2 | 0 | 0 |
meth |
Mxfp4ReverseDeserialize.convert |
6 | 5 | 0 |
attr |
Mxfp4ReverseDeserialize.hf_quantizer |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
Mxfp4GptOssExperts.init |
2 | 0 | 0 |
meth |
Mxfp4GptOssExperts.forward |
5 | 2 | 0 |
attr |
Mxfp4GptOssExperts.num_experts |
1 | 0 | 0 |
attr |
Mxfp4GptOssExperts.intermediate_size |
1 | 0 | 0 |
attr |
Mxfp4GptOssExperts.hidden_size |
1 | 0 | 0 |
attr |
Mxfp4GptOssExperts.gate_up_proj |
1 | 0 | 0 |
attr |
Mxfp4GptOssExperts.gate_up_proj_bias |
1 | 0 | 0 |
attr |
Mxfp4GptOssExperts.down_proj |
1 | 0 | 0 |
attr |
Mxfp4GptOssExperts.down_proj_bias |
1 | 0 | 0 |
attr |
Mxfp4GptOssExperts.alpha |
1 | 0 | 0 |
attr |
Mxfp4GptOssExperts.limit |
1 | 0 | 0 |
attr |
Mxfp4GptOssExperts.gate_up_proj_precision_config |
1 | 0 | 0 |
attr |
Mxfp4GptOssExperts.down_proj_precision_config |
1 | 0 | 0 |
func |
load_and_swizzle_mxfp4 |
7 | 0 | 0 |
transformers.integrations.neftune (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
deactivate_neftune |
4 | 0 | 0 |
func |
activate_neftune |
4 | 0 | 0 |
func |
neftune_post_forward_hook |
4 | 0 | 0 |
transformers.integrations.npu_flash_attention (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_attn_mask_npu |
2 | 0 | 0 |
func |
is_npu_fa2_top_left_aligned_causal_mask |
1 | 0 | 0 |
attr |
SPARSE_MODE |
1 | 0 | 0 |
func |
npu_flash_attn_func |
8 | 0 | 0 |
func |
npu_flash_attn_varlen_func |
12 | 0 | 0 |
transformers.integrations.peft (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PermuteDims.init |
2 | 1 | 0 |
meth |
PermuteDims.convert |
6 | 4 | 0 |
meth |
PermuteDims.repr |
1 | 0 | 0 |
attr |
PermuteDims.dims |
1 | 0 | 0 |
func |
patch_moe_parameter_targeting |
3 | 0 | 0 |
meth |
FlattenDims.init |
2 | 1 | 0 |
meth |
FlattenDims.convert |
6 | 4 | 0 |
meth |
FlattenDims.repr |
1 | 0 | 0 |
attr |
FlattenDims.dims |
1 | 0 | 0 |
func |
convert_peft_config_for_transformers |
4 | 2 | 0 |
func |
maybe_load_adapters |
4 | 1 | 0 |
meth |
PeftAdapterMixin.load_adapter |
12 | 11 | 0 |
meth |
PeftAdapterMixin.add_adapter |
3 | 2 | 0 |
meth |
PeftConcatenate.convert |
6 | 5 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.integrations.quanto (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
replace_with_quanto_layers |
4 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
QuantoQuantize.init |
2 | 0 | 0 |
meth |
QuantoQuantize.convert |
6 | 5 | 0 |
attr |
QuantoQuantize.hf_quantizer |
1 | 0 | 0 |
transformers.integrations.quark (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
QuarkDeserialize.init |
2 | 0 | 0 |
meth |
QuarkDeserialize.convert |
6 | 5 | 0 |
attr |
QuarkDeserialize.hf_quantizer |
1 | 0 | 0 |
transformers.integrations.sdpa_attention (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
sdpa_attention_forward |
10 | 9 | 0 |
transformers.integrations.sdpa_paged (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
sdpa_attention_paged_forward |
9 | 8 | 0 |
transformers.integrations.sinq (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SinqQuantize.init |
2 | 0 | 0 |
meth |
SinqQuantize.convert |
6 | 4 | 0 |
attr |
SinqQuantize.hf_quantizer |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
SinqDeserialize.init |
2 | 0 | 0 |
meth |
SinqDeserialize.convert |
5 | 4 | 0 |
attr |
SinqDeserialize.hf_quantizer |
1 | 0 | 0 |
transformers.integrations.spqr (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
replace_with_spqr_linear |
4 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.integrations.tensor_parallel (208 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MoeTensorParalellExperts.init |
2 | 0 | 0 |
meth |
MoeTensorParalellExperts._prepare_input_fn |
4 | 0 | 0 |
meth |
MoeTensorParalellExperts._prepare_output_fn |
4 | 0 | 0 |
meth |
MoeTensorParalellExperts.shard_tensor |
5 | 3 | 0 |
meth |
GroupedGemmParallel.init |
2 | 0 | 0 |
meth |
GroupedGemmParallel.shard_tensor |
5 | 3 | 0 |
func |
add_tensor_parallel_hooks_to_module |
8 | 0 | 0 |
meth |
ColwiseParallel.init |
3 | 1 | 0 |
meth |
ColwiseParallel._prepare_input_fn |
4 | 0 | 0 |
meth |
ColwiseParallel._prepare_output_fn |
4 | 0 | 0 |
meth |
ColwiseParallel.shard_tensor |
5 | 3 | 0 |
attr |
ColwiseParallel.gather_output |
1 | 0 | 0 |
meth |
RouterParallel.init |
2 | 0 | 0 |
meth |
RouterParallel._prepare_input_fn |
4 | 0 | 0 |
meth |
RouterParallel._prepare_output_fn |
4 | 0 | 0 |
meth |
RouterParallel.shard_tensor |
5 | 3 | 0 |
func |
initialize_tensor_parallelism |
5 | 2 | 0 |
meth |
EmbeddingParallel.init |
3 | 1 | 0 |
meth |
EmbeddingParallel._prepare_input_fn |
4 | 0 | 0 |
meth |
EmbeddingParallel._prepare_output_fn |
4 | 0 | 0 |
meth |
EmbeddingParallel.shard_tensor |
5 | 3 | 0 |
attr |
EmbeddingParallel.embedding_dim_sharding |
1 | 0 | 0 |
meth |
RowwiseParallel.init |
3 | 1 | 0 |
meth |
RowwiseParallel._prepare_input_fn |
4 | 0 | 0 |
meth |
RowwiseParallel._prepare_output_fn |
4 | 0 | 0 |
meth |
RowwiseParallel.shard_tensor |
5 | 3 | 0 |
attr |
RowwiseParallel.split_input |
1 | 0 | 0 |
func |
shard_and_distribute_module |
9 | 0 | 0 |
func |
all_reduce_forward |
3 | 0 | 0 |
meth |
ReplicatedWithGradAllReduce._prepare_input_fn |
4 | 0 | 0 |
meth |
ReplicatedWithGradAllReduce._prepare_output_fn |
4 | 0 | 0 |
meth |
ReplicatedWithGradAllReduce.shard_tensor |
5 | 0 | 0 |
meth |
ReplicatedWithGradAllReduce.prepare_module_tp |
4 | 0 | 0 |
func |
distribute_module |
5 | 2 | 0 |
func |
get_tensor_shard |
7 | 1 | 0 |
meth |
PackedColwiseParallel.shard_tensor |
5 | 3 | 0 |
func |
all_gather |
3 | 0 | 0 |
func |
verify_tp_plan |
3 | 2 | 0 |
func |
get_packed_weights |
6 | 0 | 0 |
func |
gather_state_dict_for_save |
5 | 4 | 0 |
meth |
MlaKvAProjParallel._prepare_output_fn |
4 | 0 | 0 |
meth |
MlaKvAProjParallel.shard_tensor |
5 | 0 | 0 |
meth |
MlaKvAProjParallel.prepare_module_tp |
5 | 0 | 0 |
func |
distribute_model |
6 | 0 | 0 |
attr |
ParallelInterface._global_mapping |
1 | 0 | 0 |
meth |
PackedRowwiseParallel.shard_tensor |
5 | 3 | 0 |
func |
split |
3 | 0 | 0 |
meth |
SequenceParallel.init |
5 | 2 | 0 |
meth |
SequenceParallel._prepare_input_fn |
4 | 0 | 0 |
meth |
SequenceParallel._prepare_output_fn |
4 | 0 | 0 |
meth |
SequenceParallel.shard_tensor |
5 | 3 | 0 |
attr |
SequenceParallel.sequence_dim |
1 | 0 | 0 |
meth |
TensorParallelLayer.init |
4 | 0 | 0 |
meth |
TensorParallelLayer._prepare_input_fn |
4 | 0 | 0 |
meth |
TensorParallelLayer._prepare_output_fn |
4 | 0 | 0 |
meth |
TensorParallelLayer.shard_tensor |
5 | 3 | 0 |
meth |
TensorParallelLayer.prepare_module_tp |
4 | 2 | 0 |
attr |
TensorParallelLayer.device_mesh |
1 | 0 | 0 |
attr |
TensorParallelLayer.rank |
1 | 0 | 0 |
attr |
TensorParallelLayer.empty_param |
1 | 0 | 0 |
meth |
MoeIdentityExpertParallel._prepare_input_fn |
4 | 0 | 0 |
meth |
MoeIdentityExpertParallel.shard_tensor |
5 | 0 | 0 |
meth |
MoeIdentityExpertParallel.prepare_module_tp |
4 | 0 | 0 |
func |
reduce_scatter |
3 | 0 | 0 |
func |
all_reduce_backward |
3 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.integrations.tiktoken (1 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
convert_tiktoken_to_fast |
3 | 2 | 1 |
transformers.integrations.torchao (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
TORCHAO_VERSION |
1 | 0 | 0 |
meth |
TorchAoQuantize.init |
2 | 0 | 0 |
meth |
TorchAoQuantize.convert |
6 | 4 | 0 |
attr |
TorchAoQuantize.hf_quantizer |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
TorchAoDeserialize.init |
2 | 0 | 0 |
meth |
TorchAoDeserialize.convert |
7 | 5 | 0 |
attr |
TorchAoDeserialize.hf_quantizer |
1 | 0 | 0 |
transformers.integrations.tpu (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
tpu_spmd_dataloader |
2 | 1 | 0 |
func |
save_tpu_checkpoint |
7 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
wrap_model_xla_fsdp |
4 | 0 | 0 |
transformers.integrations.vptq (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
replace_with_vptq_linear |
4 | 1 | 0 |
transformers.loss.loss_d_fine (50 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DFineLoss.init |
2 | 0 | 0 |
meth |
DFineLoss.unimodal_distribution_focal_loss |
8 | 0 | 0 |
meth |
DFineLoss.loss_local |
6 | 0 | 0 |
meth |
DFineLoss.get_loss |
6 | 0 | 0 |
attr |
DFineLoss.matcher |
1 | 0 | 0 |
attr |
DFineLoss.max_num_bins |
1 | 0 | 0 |
attr |
DFineLoss.weight_dict |
1 | 0 | 0 |
attr |
DFineLoss.losses |
1 | 0 | 0 |
attr |
DFineLoss.reg_scale |
1 | 0 | 0 |
attr |
DFineLoss.up |
1 | 0 | 0 |
func |
bbox2distance |
7 | 0 | 0 |
func |
translate_gt |
5 | 4 | 0 |
func |
DFineForObjectDetectionLoss |
14 | 0 | 0 |
transformers.loss.loss_deformable_detr (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
DeformableDetrForSegmentationLoss |
10 | 0 | 0 |
meth |
DeformableDetrImageLoss.init |
5 | 0 | 0 |
meth |
DeformableDetrImageLoss.loss_cardinality |
5 | 0 | 0 |
meth |
DeformableDetrImageLoss.loss_labels |
5 | 0 | 0 |
attr |
DeformableDetrImageLoss.matcher |
1 | 0 | 0 |
attr |
DeformableDetrImageLoss.num_classes |
1 | 0 | 0 |
attr |
DeformableDetrImageLoss.focal_alpha |
1 | 0 | 0 |
attr |
DeformableDetrImageLoss.losses |
1 | 0 | 0 |
func |
DeformableDetrForObjectDetectionLoss |
9 | 0 | 0 |
meth |
DeformableDetrHungarianMatcher.forward |
3 | 0 | 0 |
transformers.loss.loss_for_object_detection (91 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
sigmoid_focal_loss |
6 | 2 | 0 |
meth |
ImageLoss.init |
5 | 0 | 0 |
meth |
ImageLoss.loss_labels |
5 | 0 | 0 |
meth |
ImageLoss.loss_cardinality |
5 | 0 | 0 |
meth |
ImageLoss.loss_boxes |
5 | 0 | 0 |
meth |
ImageLoss.loss_masks |
5 | 0 | 0 |
meth |
ImageLoss._get_source_permutation_idx |
2 | 0 | 0 |
meth |
ImageLoss._get_target_permutation_idx |
2 | 0 | 0 |
meth |
ImageLoss.get_loss |
6 | 0 | 0 |
meth |
ImageLoss.forward |
3 | 0 | 0 |
attr |
ImageLoss.matcher |
1 | 0 | 0 |
attr |
ImageLoss.num_classes |
1 | 0 | 0 |
attr |
ImageLoss.eos_coef |
1 | 0 | 0 |
attr |
ImageLoss.losses |
1 | 0 | 0 |
func |
nested_tensor_from_tensor_list |
2 | 1 | 0 |
func |
ForSegmentationLoss |
10 | 0 | 0 |
func |
dice_loss |
4 | 0 | 0 |
meth |
HungarianMatcher.init |
4 | 3 | 0 |
meth |
HungarianMatcher.forward |
3 | 0 | 0 |
attr |
HungarianMatcher.class_cost |
1 | 0 | 0 |
attr |
HungarianMatcher.bbox_cost |
1 | 0 | 0 |
attr |
HungarianMatcher.giou_cost |
1 | 0 | 0 |
func |
box_iou |
3 | 0 | 0 |
func |
generalized_box_iou |
3 | 0 | 0 |
func |
ForObjectDetectionLoss |
9 | 0 | 0 |
meth |
NestedTensor.init |
3 | 1 | 0 |
meth |
NestedTensor.to |
2 | 0 | 0 |
meth |
NestedTensor.decompose |
1 | 0 | 0 |
meth |
NestedTensor.repr |
1 | 0 | 0 |
attr |
NestedTensor.tensors |
1 | 0 | 0 |
attr |
NestedTensor.mask |
1 | 0 | 0 |
transformers.loss.loss_grounding_dino (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
sigmoid_focal_loss |
6 | 5 | 0 |
meth |
GroundingDinoImageLoss.init |
4 | 0 | 0 |
meth |
GroundingDinoImageLoss.loss_cardinality |
5 | 0 | 0 |
meth |
GroundingDinoImageLoss._get_target_classes_one_hot |
4 | 0 | 0 |
meth |
GroundingDinoImageLoss.loss_labels |
5 | 0 | 0 |
attr |
GroundingDinoImageLoss.matcher |
1 | 0 | 0 |
attr |
GroundingDinoImageLoss.focal_alpha |
1 | 0 | 0 |
attr |
GroundingDinoImageLoss.losses |
1 | 0 | 0 |
meth |
GroundingDinoHungarianMatcher.forward |
3 | 0 | 0 |
func |
GroundingDinoForObjectDetectionLoss |
12 | 0 | 0 |
transformers.loss.loss_lw_detr (59 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LwDetrImageLoss.init |
6 | 0 | 0 |
meth |
LwDetrImageLoss.loss_labels |
5 | 0 | 0 |
meth |
LwDetrImageLoss.loss_cardinality |
5 | 0 | 0 |
meth |
LwDetrImageLoss.loss_boxes |
5 | 0 | 0 |
meth |
LwDetrImageLoss.loss_masks |
5 | 0 | 0 |
meth |
LwDetrImageLoss._get_source_permutation_idx |
2 | 0 | 0 |
meth |
LwDetrImageLoss._get_target_permutation_idx |
2 | 0 | 0 |
meth |
LwDetrImageLoss.get_loss |
6 | 0 | 0 |
meth |
LwDetrImageLoss.forward |
3 | 0 | 0 |
attr |
LwDetrImageLoss.matcher |
1 | 0 | 0 |
attr |
LwDetrImageLoss.num_classes |
1 | 0 | 0 |
attr |
LwDetrImageLoss.focal_alpha |
1 | 0 | 0 |
attr |
LwDetrImageLoss.losses |
1 | 0 | 0 |
attr |
LwDetrImageLoss.group_detr |
1 | 0 | 0 |
func |
LwDetrForObjectDetectionLoss |
11 | 0 | 0 |
meth |
LwDetrHungarianMatcher.forward |
4 | 0 | 0 |
transformers.loss.loss_rt_detr (87 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RTDetrHungarianMatcher.init |
2 | 0 | 0 |
meth |
RTDetrHungarianMatcher.forward |
3 | 0 | 0 |
attr |
RTDetrHungarianMatcher.class_cost |
1 | 0 | 0 |
attr |
RTDetrHungarianMatcher.bbox_cost |
1 | 0 | 0 |
attr |
RTDetrHungarianMatcher.giou_cost |
1 | 0 | 0 |
attr |
RTDetrHungarianMatcher.use_focal_loss |
1 | 0 | 0 |
attr |
RTDetrHungarianMatcher.alpha |
1 | 0 | 0 |
attr |
RTDetrHungarianMatcher.gamma |
1 | 0 | 0 |
func |
RTDetrForObjectDetectionLoss |
12 | 0 | 0 |
meth |
RTDetrLoss.init |
2 | 0 | 0 |
meth |
RTDetrLoss.loss_labels_vfl |
6 | 0 | 0 |
meth |
RTDetrLoss.loss_labels |
6 | 0 | 0 |
meth |
RTDetrLoss.loss_cardinality |
5 | 0 | 0 |
meth |
RTDetrLoss.loss_boxes |
5 | 0 | 0 |
meth |
RTDetrLoss.loss_masks |
5 | 0 | 0 |
meth |
RTDetrLoss.loss_labels_bce |
6 | 0 | 0 |
meth |
RTDetrLoss._get_source_permutation_idx |
2 | 0 | 0 |
meth |
RTDetrLoss._get_target_permutation_idx |
2 | 0 | 0 |
meth |
RTDetrLoss.loss_labels_focal |
6 | 0 | 0 |
meth |
RTDetrLoss.get_loss |
6 | 0 | 0 |
meth |
RTDetrLoss.get_cdn_matched_indices |
3 | 0 | 0 |
meth |
RTDetrLoss.forward |
3 | 0 | 0 |
attr |
RTDetrLoss.matcher |
1 | 0 | 0 |
attr |
RTDetrLoss.num_classes |
1 | 0 | 0 |
attr |
RTDetrLoss.weight_dict |
1 | 0 | 0 |
attr |
RTDetrLoss.losses |
1 | 0 | 0 |
attr |
RTDetrLoss.eos_coef |
1 | 0 | 0 |
attr |
RTDetrLoss.alpha |
1 | 0 | 0 |
attr |
RTDetrLoss.gamma |
1 | 0 | 0 |
transformers.loss.loss_utils (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
ForTokenClassification |
5 | 1 | 0 |
func |
ForMaskedLMLoss |
7 | 5 | 0 |
func |
ForQuestionAnsweringLoss |
6 | 0 | 0 |
func |
ForCausalLMLoss |
8 | 5 | 0 |
func |
ForSequenceClassificationLoss |
5 | 3 | 0 |
func |
fixed_cross_entropy |
6 | 5 | 0 |
transformers.masking_utils (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
flex_attention_mask |
8 | 7 | 0 |
func |
get_style |
2 | 0 | 0 |
func |
eager_mask |
11 | 10 | 0 |
meth |
AttentionMask.new |
3 | 0 | 0 |
meth |
AttentionMask.init |
2 | 0 | 0 |
meth |
AttentionMask.to_string |
3 | 0 | 0 |
meth |
AttentionMask.repr |
1 | 0 | 0 |
meth |
AttentionMask.str |
1 | 0 | 0 |
attr |
AttentionMask.style |
1 | 0 | 0 |
func |
sdpa_mask |
13 | 12 | 0 |
func |
flash_attention_mask |
8 | 6 | 0 |
func |
tensor_to_mask_visual |
4 | 2 | 0 |
func |
create_masks_for_generate |
10 | 8 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.model_debugging_utils (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
model_addition_debugger_context |
5 | 3 | 0 |
attr |
logger |
1 | 0 | 0 |
attr |
MEMORY_ADDRESS_REGEX |
1 | 0 | 0 |
func |
is_layer_block |
2 | 0 | 0 |
func |
prune_outputs_if_children |
2 | 0 | 0 |
func |
prune_intermediate_layers |
2 | 0 | 0 |
func |
log_model_debug_trace |
3 | 1 | 0 |
attr |
LAYER_SUFFIX_RE |
1 | 0 | 0 |
transformers.modelcard (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
func |
extract_hyperparameters_from_trainer |
2 | 0 | 0 |
func |
is_hf_dataset |
2 | 0 | 0 |
func |
infer_metric_tags_from_eval_results |
2 | 0 | 0 |
func |
parse_log_history |
2 | 0 | 0 |
meth |
TrainingSummary.post_init |
1 | 0 | 0 |
meth |
TrainingSummary.create_model_index |
2 | 0 | 0 |
meth |
TrainingSummary.create_metadata |
1 | 0 | 0 |
meth |
TrainingSummary.to_model_card |
1 | 0 | 0 |
meth |
TrainingSummary.from_trainer |
12 | 0 | 0 |
func |
make_markdown_table |
2 | 0 | 0 |
transformers.modeling_attn_mask_utils (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AttentionMaskConverter.init |
3 | 2 | 0 |
meth |
AttentionMaskConverter._make_causal_mask |
6 | 5 | 0 |
meth |
AttentionMaskConverter._expand_mask |
4 | 3 | 0 |
meth |
AttentionMaskConverter._unmask_unattended |
3 | 2 | 0 |
transformers.modeling_flash_attention_utils (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
is_flash_attn_available |
1 | 0 | 0 |
func |
lazy_import_flash_attention |
4 | 3 | 0 |
func |
prepare_fa_kwargs_from_position_ids |
2 | 0 | 0 |
func |
fa_peft_integration_check |
5 | 4 | 0 |
func |
flash_attn_supports_top_left_mask |
1 | 0 | 0 |
func |
lazy_import_paged_flash_attention |
3 | 2 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.modeling_gguf_pytorch_utils (79 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TensorProcessor.init |
2 | 0 | 0 |
meth |
TensorProcessor.perform_fallback_tensor_mapping |
5 | 4 | 0 |
meth |
TensorProcessor.process |
4 | 0 | 0 |
attr |
TensorProcessor.config |
1 | 0 | 0 |
meth |
BloomTensorProcessor.init |
2 | 0 | 0 |
meth |
BloomTensorProcessor.process |
4 | 0 | 0 |
meth |
BloomTensorProcessor._reverse_reshape_weights |
4 | 3 | 0 |
meth |
BloomTensorProcessor._reverse_reshape_bias |
4 | 3 | 0 |
meth |
Lfm2TensorProcessor.init |
2 | 0 | 0 |
meth |
Lfm2TensorProcessor.process |
4 | 0 | 0 |
meth |
NemotronTensorProcessor.init |
2 | 0 | 0 |
meth |
NemotronTensorProcessor.process |
4 | 0 | 0 |
func |
read_field |
3 | 0 | 0 |
meth |
MambaTensorProcessor.init |
2 | 0 | 0 |
meth |
MambaTensorProcessor.process |
4 | 0 | 0 |
func |
get_gguf_hf_weights_map |
6 | 4 | 0 |
meth |
GPT2TensorProcessor.init |
2 | 0 | 0 |
meth |
GPT2TensorProcessor.process |
4 | 0 | 0 |
meth |
LlamaTensorProcessor.init |
2 | 0 | 0 |
meth |
LlamaTensorProcessor.process |
4 | 0 | 0 |
attr |
GGUF_SUPPORTED_ARCHITECTURES |
1 | 0 | 0 |
meth |
Gemma2TensorProcessor.init |
2 | 0 | 0 |
meth |
Gemma2TensorProcessor.process |
4 | 0 | 0 |
func |
load_gguf_checkpoint |
4 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
Qwen2MoeTensorProcessor.init |
2 | 0 | 0 |
meth |
Qwen2MoeTensorProcessor.perform_fallback_tensor_mapping |
5 | 4 | 0 |
meth |
Qwen2MoeTensorProcessor.process |
4 | 1 | 0 |
meth |
Qwen2MoeTensorProcessor._set_moe_expert_tensor |
5 | 4 | 0 |
attr |
Qwen2MoeTensorProcessor.HF_EXPERT_RENAME_PATTERN |
1 | 0 | 0 |
attr |
Qwen2MoeTensorProcessor.HF_MOE_W13_PATTERN |
1 | 0 | 0 |
attr |
Qwen2MoeTensorProcessor.GGUF_MOE_WEIGHTS_PATTERN |
1 | 0 | 0 |
meth |
T5TensorProcessor.init |
2 | 0 | 0 |
meth |
T5TensorProcessor.process |
4 | 0 | 0 |
transformers.modeling_layers (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GradientCheckpointingLayer.call |
3 | 0 | 0 |
meth |
GenericForTokenClassification.init |
2 | 0 | 0 |
attr |
GenericForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
GenericForTokenClassification.dropout |
1 | 0 | 0 |
attr |
GenericForTokenClassification.score |
1 | 0 | 0 |
meth |
GenericForSequenceClassification.init |
2 | 0 | 0 |
attr |
GenericForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
GenericForSequenceClassification.score |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
GenericForQuestionAnswering.init |
2 | 0 | 0 |
meth |
GenericForQuestionAnswering.get_input_embeddings |
1 | 0 | 0 |
meth |
GenericForQuestionAnswering.set_input_embeddings |
2 | 0 | 0 |
attr |
GenericForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
transformers.modeling_rope_utils (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
dynamic_rope_update |
2 | 0 | 0 |
func |
rope_config_validation |
3 | 2 | 0 |
meth |
RotaryEmbeddingConfigMixin.convert_rope_params_to_dict |
3 | 1 | 0 |
meth |
RotaryEmbeddingConfigMixin.standardize_rope_params |
1 | 0 | 0 |
meth |
RotaryEmbeddingConfigMixin.validate_rope |
2 | 1 | 0 |
meth |
RotaryEmbeddingConfigMixin._validate_default_rope_parameters |
3 | 2 | 0 |
meth |
RotaryEmbeddingConfigMixin._validate_linear_rope_parameters |
3 | 2 | 0 |
meth |
RotaryEmbeddingConfigMixin._validate_dynamic_rope_parameters |
3 | 2 | 0 |
meth |
RotaryEmbeddingConfigMixin._validate_yarn_rope_parameters |
3 | 2 | 0 |
meth |
RotaryEmbeddingConfigMixin._validate_longrope_rope_parameters |
3 | 2 | 0 |
meth |
RotaryEmbeddingConfigMixin._validate_llama3_rope_parameters |
3 | 2 | 0 |
meth |
RotaryEmbeddingConfigMixin._check_received_keys |
6 | 5 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.modeling_utils (137 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PreTrainedModel.init_subclass |
2 | 0 | 0 |
meth |
PreTrainedModel.init |
4 | 1 | 0 |
meth |
PreTrainedModel.post_init |
1 | 0 | 0 |
meth |
PreTrainedModel.dequantize |
2 | 0 | 0 |
meth |
PreTrainedModel._backward_compatibility_gradient_checkpointing |
1 | 0 | 0 |
meth |
PreTrainedModel._from_config |
3 | 0 | 0 |
meth |
PreTrainedModel.set_attn_implementation |
3 | 2 | 0 |
meth |
PreTrainedModel.set_experts_implementation |
2 | 1 | 0 |
meth |
PreTrainedModel.enable_input_require_grads |
1 | 0 | 0 |
meth |
PreTrainedModel.disable_input_require_grads |
1 | 0 | 0 |
meth |
PreTrainedModel.get_encoder |
2 | 1 | 0 |
meth |
PreTrainedModel.set_encoder |
3 | 1 | 0 |
meth |
PreTrainedModel.get_decoder |
1 | 0 | 0 |
meth |
PreTrainedModel.set_decoder |
2 | 0 | 0 |
meth |
PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
PreTrainedModel._initialize_weights |
3 | 1 | 0 |
meth |
PreTrainedModel.initialize_weights |
1 | 0 | 0 |
meth |
PreTrainedModel.tie_weights |
3 | 2 | 0 |
meth |
PreTrainedModel._adjust_bias |
3 | 0 | 0 |
meth |
PreTrainedModel._resize_token_embeddings |
4 | 0 | 0 |
meth |
PreTrainedModel._init_added_embeddings_weights_with_mean |
5 | 0 | 0 |
meth |
PreTrainedModel._init_added_lm_head_weights_with_mean |
7 | 1 | 0 |
meth |
PreTrainedModel._init_added_lm_head_bias_with_mean |
4 | 0 | 0 |
meth |
PreTrainedModel._copy_lm_head_original_to_resized |
6 | 0 | 0 |
meth |
PreTrainedModel.resize_position_embeddings |
2 | 1 | 0 |
meth |
PreTrainedModel.init_weights |
1 | 0 | 0 |
meth |
PreTrainedModel.gradient_checkpointing_enable |
2 | 0 | 0 |
meth |
PreTrainedModel._set_gradient_checkpointing |
3 | 2 | 0 |
meth |
PreTrainedModel.gradient_checkpointing_disable |
1 | 0 | 0 |
meth |
PreTrainedModel.save_pretrained |
11 | 9 | 0 |
meth |
PreTrainedModel.push_to_hub |
3 | 0 | 0 |
meth |
PreTrainedModel.get_memory_footprint |
2 | 0 | 0 |
meth |
PreTrainedModel.cuda |
3 | 0 | 0 |
meth |
PreTrainedModel.to |
3 | 0 | 0 |
meth |
PreTrainedModel.half |
2 | 0 | 0 |
meth |
PreTrainedModel.float |
2 | 0 | 0 |
meth |
PreTrainedModel.get_init_context |
5 | 4 | 0 |
meth |
PreTrainedModel.set_use_kernels |
3 | 1 | 0 |
meth |
PreTrainedModel.from_pretrained |
13 | 11 | 0 |
meth |
PreTrainedModel._finalize_model_loading |
4 | 3 | 0 |
meth |
PreTrainedModel.retrieve_modules_from_names |
4 | 0 | 0 |
meth |
PreTrainedModel.register_for_auto_class |
2 | 0 | 0 |
meth |
PreTrainedModel.warn_if_padding_and_no_attention_mask |
3 | 0 | 0 |
meth |
PreTrainedModel.kernelize |
2 | 0 | 0 |
meth |
PreTrainedModel.is_backend_compatible |
1 | 0 | 0 |
meth |
PreTrainedModel.mark_tied_weights_as_initialized |
2 | 0 | 0 |
meth |
PreTrainedModel.get_parameter_or_buffer |
2 | 1 | 0 |
meth |
PreTrainedModel.train |
2 | 1 | 0 |
meth |
PreTrainedModel.eval |
1 | 0 | 0 |
prop |
PreTrainedModel.supports_tp_plan |
1 | 0 | 0 |
prop |
PreTrainedModel.tp_size |
1 | 0 | 0 |
prop |
PreTrainedModel.supports_pp_plan |
1 | 0 | 0 |
prop |
PreTrainedModel.loss_function |
2 | 0 | 0 |
attr |
PreTrainedModel.config |
1 | 0 | 0 |
attr |
PreTrainedModel.name_or_path |
1 | 0 | 0 |
attr |
PreTrainedModel.loss_type |
1 | 0 | 0 |
attr |
PreTrainedModel.generation_config |
1 | 0 | 0 |
func |
get_state_dict_dtype |
2 | 0 | 0 |
func |
is_local_dist_rank_0 |
1 | 0 | 0 |
attr |
XLA_DOWNCAST_BF16 |
1 | 0 | 0 |
meth |
EmbeddingAccessMixin.set_input_embeddings |
2 | 1 | 0 |
meth |
EmbeddingAccessMixin.get_output_embeddings |
1 | 0 | 0 |
meth |
EmbeddingAccessMixin.set_output_embeddings |
2 | 0 | 0 |
func |
set_quantized_state |
1 | 0 | 0 |
func |
get_torch_context_manager_or_global_device |
1 | 0 | 0 |
meth |
ModuleUtilsMixin.create_extended_attention_mask_for_decoder |
3 | 0 | 0 |
func |
get_total_byte_count |
4 | 3 | 0 |
attr |
XLA_USE_BF16 |
1 | 0 | 0 |
meth |
PreTrainedAudioTokenizerBase.encode |
4 | 1 | 0 |
meth |
PreTrainedAudioTokenizerBase.decode |
4 | 1 | 0 |
func |
set_zero3_state |
1 | 0 | 0 |
attr |
IS_SAGEMAKER_MP_POST_1_10 |
1 | 0 | 0 |
func |
local_torch_dtype |
3 | 2 | 0 |
func |
caching_allocator_warmup |
4 | 3 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.models.afmoe.configuration_afmoe (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AfmoeConfig.init |
31 | 29 | 0 |
attr |
AfmoeConfig.vocab_size |
1 | 0 | 0 |
attr |
AfmoeConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
AfmoeConfig.hidden_size |
1 | 0 | 0 |
attr |
AfmoeConfig.intermediate_size |
1 | 0 | 0 |
attr |
AfmoeConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
AfmoeConfig.num_dense_layers |
1 | 0 | 0 |
attr |
AfmoeConfig.num_attention_heads |
1 | 0 | 0 |
attr |
AfmoeConfig.head_dim |
1 | 0 | 0 |
attr |
AfmoeConfig.hidden_act |
1 | 0 | 0 |
attr |
AfmoeConfig.initializer_range |
1 | 0 | 0 |
attr |
AfmoeConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
AfmoeConfig.use_cache |
1 | 0 | 0 |
attr |
AfmoeConfig.rope_theta |
1 | 0 | 0 |
attr |
AfmoeConfig.rope_parameters |
1 | 0 | 0 |
attr |
AfmoeConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
AfmoeConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
AfmoeConfig.num_experts |
1 | 0 | 0 |
attr |
AfmoeConfig.num_shared_experts |
1 | 0 | 0 |
attr |
AfmoeConfig.route_scale |
1 | 0 | 0 |
attr |
AfmoeConfig.attention_bias |
1 | 0 | 0 |
attr |
AfmoeConfig.attention_dropout |
1 | 0 | 0 |
attr |
AfmoeConfig.global_attn_every_n_layers |
1 | 0 | 0 |
attr |
AfmoeConfig.sliding_window |
1 | 0 | 0 |
attr |
AfmoeConfig.mup_enabled |
1 | 0 | 0 |
attr |
AfmoeConfig.layer_types |
1 | 0 | 0 |
attr |
AfmoeConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
AfmoeConfig.eos_token_id |
1 | 0 | 0 |
attr |
AfmoeConfig.pad_token_id |
1 | 0 | 0 |
attr |
AfmoeConfig.bos_token_id |
1 | 0 | 0 |
attr |
AfmoeConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.afmoe.modeling_afmoe (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AfmoeModel.init |
2 | 1 | 0 |
attr |
AfmoeModel.padding_idx |
1 | 0 | 0 |
attr |
AfmoeModel.vocab_size |
1 | 0 | 0 |
attr |
AfmoeModel.embed_tokens |
1 | 0 | 0 |
attr |
AfmoeModel.layers |
1 | 0 | 0 |
attr |
AfmoeModel.norm |
1 | 0 | 0 |
attr |
AfmoeModel.rotary_emb |
1 | 0 | 0 |
attr |
AfmoeModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
AfmoeForCausalLM.init |
2 | 0 | 0 |
attr |
AfmoeForCausalLM.model |
1 | 0 | 0 |
attr |
AfmoeForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
AfmoeForCausalLM.lm_head |
1 | 0 | 0 |
meth |
AfmoePreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.afmoe.modular_afmoe (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AfmoeModel.init |
2 | 1 | 0 |
attr |
AfmoeModel.padding_idx |
1 | 0 | 0 |
attr |
AfmoeModel.vocab_size |
1 | 0 | 0 |
attr |
AfmoeModel.embed_tokens |
1 | 0 | 0 |
attr |
AfmoeModel.layers |
1 | 0 | 0 |
attr |
AfmoeModel.norm |
1 | 0 | 0 |
attr |
AfmoeModel.rotary_emb |
1 | 0 | 0 |
attr |
AfmoeModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
AfmoeForCausalLM.init |
2 | 0 | 0 |
attr |
AfmoeForCausalLM.model |
1 | 0 | 0 |
attr |
AfmoeForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
AfmoeForCausalLM.lm_head |
1 | 0 | 0 |
meth |
AfmoePreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.aimv2.configuration_aimv2 (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Aimv2Config.init |
6 | 0 | 0 |
attr |
Aimv2Config.projection_dim |
1 | 0 | 0 |
attr |
Aimv2Config.logit_scale_init_value |
1 | 0 | 0 |
attr |
Aimv2Config.max_logit_scale |
1 | 0 | 0 |
attr |
Aimv2Config.text_config |
1 | 0 | 0 |
attr |
Aimv2Config.vision_config |
1 | 0 | 0 |
meth |
Aimv2TextConfig.init |
15 | 13 | 0 |
attr |
Aimv2TextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Aimv2TextConfig.vocab_size |
1 | 0 | 0 |
attr |
Aimv2TextConfig.hidden_size |
1 | 0 | 0 |
attr |
Aimv2TextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Aimv2TextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Aimv2TextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Aimv2TextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Aimv2TextConfig.hidden_act |
1 | 0 | 0 |
attr |
Aimv2TextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Aimv2TextConfig.initializer_range |
1 | 0 | 0 |
attr |
Aimv2TextConfig.mlp_bias |
1 | 0 | 0 |
attr |
Aimv2TextConfig.qkv_bias |
1 | 0 | 0 |
attr |
Aimv2TextConfig.rms_norm_eps |
1 | 0 | 0 |
meth |
Aimv2VisionConfig.init |
17 | 15 | 0 |
attr |
Aimv2VisionConfig.hidden_size |
1 | 0 | 0 |
attr |
Aimv2VisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
Aimv2VisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Aimv2VisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Aimv2VisionConfig.num_channels |
1 | 0 | 0 |
attr |
Aimv2VisionConfig.patch_size |
1 | 0 | 0 |
attr |
Aimv2VisionConfig.image_size |
1 | 0 | 0 |
attr |
Aimv2VisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
Aimv2VisionConfig.hidden_act |
1 | 0 | 0 |
attr |
Aimv2VisionConfig.use_head |
1 | 0 | 0 |
attr |
Aimv2VisionConfig.initializer_range |
1 | 0 | 0 |
attr |
Aimv2VisionConfig.mlp_bias |
1 | 0 | 0 |
attr |
Aimv2VisionConfig.qkv_bias |
1 | 0 | 0 |
attr |
Aimv2VisionConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Aimv2VisionConfig.is_native |
1 | 0 | 0 |
transformers.models.aimv2.modeling_aimv2 (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Aimv2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Aimv2TextModel.init |
2 | 1 | 0 |
meth |
Aimv2TextModel.set_input_embeddings |
2 | 0 | 0 |
meth |
Aimv2TextModel.forward |
4 | 3 | 0 |
attr |
Aimv2TextModel.embeddings |
1 | 0 | 0 |
attr |
Aimv2TextModel.encoder |
1 | 0 | 0 |
attr |
Aimv2TextModel.rms_norm |
1 | 0 | 0 |
attr |
Aimv2TextModel.eos_token_id |
1 | 0 | 0 |
meth |
Aimv2Model.init |
2 | 1 | 0 |
attr |
Aimv2Model.projection_dim |
1 | 0 | 0 |
attr |
Aimv2Model.vision_embed_dim |
1 | 0 | 0 |
attr |
Aimv2Model.text_embed_dim |
1 | 0 | 0 |
attr |
Aimv2Model.vision_model |
1 | 0 | 0 |
attr |
Aimv2Model.text_model |
1 | 0 | 0 |
attr |
Aimv2Model.visual_projection |
1 | 0 | 0 |
attr |
Aimv2Model.text_projection |
1 | 0 | 0 |
attr |
Aimv2Model.logit_scale |
1 | 0 | 0 |
attr |
Aimv2Model.max_log_logit_scale |
1 | 0 | 0 |
meth |
Aimv2VisionModel.init |
2 | 1 | 0 |
meth |
Aimv2VisionModel.forward |
3 | 2 | 0 |
attr |
Aimv2VisionModel.embeddings |
1 | 0 | 0 |
attr |
Aimv2VisionModel.encoder |
1 | 0 | 0 |
attr |
Aimv2VisionModel.rms_norm |
1 | 0 | 0 |
attr |
Aimv2VisionModel.use_head |
1 | 0 | 0 |
attr |
Aimv2VisionModel.head |
1 | 0 | 0 |
transformers.models.aimv2.modular_aimv2 (52 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Aimv2TextModel.init |
2 | 1 | 0 |
meth |
Aimv2TextModel.set_input_embeddings |
2 | 0 | 0 |
meth |
Aimv2TextModel.forward |
4 | 3 | 0 |
attr |
Aimv2TextModel.embeddings |
1 | 0 | 0 |
attr |
Aimv2TextModel.encoder |
1 | 0 | 0 |
attr |
Aimv2TextModel.rms_norm |
1 | 0 | 0 |
attr |
Aimv2TextModel.eos_token_id |
1 | 0 | 0 |
meth |
Aimv2TextConfig.init |
15 | 13 | 0 |
attr |
Aimv2TextConfig.initializer_range |
1 | 0 | 0 |
attr |
Aimv2TextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Aimv2TextConfig.mlp_bias |
1 | 0 | 0 |
attr |
Aimv2TextConfig.qkv_bias |
1 | 0 | 0 |
attr |
Aimv2TextConfig.rms_norm_eps |
1 | 0 | 0 |
meth |
Aimv2Model.init |
2 | 1 | 0 |
attr |
Aimv2Model.projection_dim |
1 | 0 | 0 |
attr |
Aimv2Model.vision_embed_dim |
1 | 0 | 0 |
attr |
Aimv2Model.text_embed_dim |
1 | 0 | 0 |
attr |
Aimv2Model.vision_model |
1 | 0 | 0 |
attr |
Aimv2Model.text_model |
1 | 0 | 0 |
attr |
Aimv2Model.visual_projection |
1 | 0 | 0 |
attr |
Aimv2Model.text_projection |
1 | 0 | 0 |
attr |
Aimv2Model.logit_scale |
1 | 0 | 0 |
attr |
Aimv2Model.max_log_logit_scale |
1 | 0 | 0 |
meth |
Aimv2VisionConfig.init |
17 | 15 | 0 |
attr |
Aimv2VisionConfig.use_head |
1 | 0 | 0 |
attr |
Aimv2VisionConfig.initializer_range |
1 | 0 | 0 |
attr |
Aimv2VisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
Aimv2VisionConfig.mlp_bias |
1 | 0 | 0 |
attr |
Aimv2VisionConfig.qkv_bias |
1 | 0 | 0 |
attr |
Aimv2VisionConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Aimv2VisionConfig.is_native |
1 | 0 | 0 |
meth |
Aimv2Config.init |
6 | 0 | 0 |
attr |
Aimv2Config.projection_dim |
1 | 0 | 0 |
attr |
Aimv2Config.logit_scale_init_value |
1 | 0 | 0 |
attr |
Aimv2Config.max_logit_scale |
1 | 0 | 0 |
meth |
Aimv2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Aimv2VisionModel.init |
2 | 1 | 0 |
meth |
Aimv2VisionModel.forward |
3 | 2 | 0 |
attr |
Aimv2VisionModel.embeddings |
1 | 0 | 0 |
attr |
Aimv2VisionModel.encoder |
1 | 0 | 0 |
attr |
Aimv2VisionModel.rms_norm |
1 | 0 | 0 |
attr |
Aimv2VisionModel.use_head |
1 | 0 | 0 |
attr |
Aimv2VisionModel.head |
1 | 0 | 0 |
transformers.models.albert.configuration_albert (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AlbertConfig.init |
22 | 0 | 0 |
attr |
AlbertConfig.pad_token_id |
1 | 0 | 0 |
attr |
AlbertConfig.bos_token_id |
1 | 0 | 0 |
attr |
AlbertConfig.eos_token_id |
1 | 0 | 0 |
attr |
AlbertConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
AlbertConfig.vocab_size |
1 | 0 | 0 |
attr |
AlbertConfig.embedding_size |
1 | 0 | 0 |
attr |
AlbertConfig.hidden_size |
1 | 0 | 0 |
attr |
AlbertConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
AlbertConfig.num_hidden_groups |
1 | 0 | 0 |
attr |
AlbertConfig.num_attention_heads |
1 | 0 | 0 |
attr |
AlbertConfig.inner_group_num |
1 | 0 | 0 |
attr |
AlbertConfig.hidden_act |
1 | 0 | 0 |
attr |
AlbertConfig.intermediate_size |
1 | 0 | 0 |
attr |
AlbertConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
AlbertConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
AlbertConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
AlbertConfig.type_vocab_size |
1 | 0 | 0 |
attr |
AlbertConfig.initializer_range |
1 | 0 | 0 |
attr |
AlbertConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
AlbertConfig.classifier_dropout_prob |
1 | 0 | 0 |
transformers.models.albert.modeling_albert (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AlbertPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
AlbertForMultipleChoice.init |
2 | 1 | 0 |
attr |
AlbertForMultipleChoice.albert |
1 | 0 | 0 |
attr |
AlbertForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
AlbertForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
AlbertForMaskedLM.init |
2 | 0 | 0 |
attr |
AlbertForMaskedLM.albert |
1 | 0 | 0 |
attr |
AlbertForMaskedLM.predictions |
1 | 0 | 0 |
meth |
AlbertForSequenceClassification.init |
2 | 1 | 0 |
attr |
AlbertForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
AlbertForSequenceClassification.config |
1 | 0 | 0 |
attr |
AlbertForSequenceClassification.albert |
1 | 0 | 0 |
attr |
AlbertForSequenceClassification.dropout |
1 | 0 | 0 |
attr |
AlbertForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
AlbertForQuestionAnswering.init |
2 | 1 | 0 |
attr |
AlbertForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
AlbertForQuestionAnswering.albert |
1 | 0 | 0 |
attr |
AlbertForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
AlbertModel.init |
3 | 2 | 0 |
attr |
AlbertModel.config |
1 | 0 | 0 |
attr |
AlbertModel.embeddings |
1 | 0 | 0 |
attr |
AlbertModel.encoder |
1 | 0 | 0 |
attr |
AlbertModel.attn_implementation |
1 | 0 | 0 |
attr |
AlbertModel.pooler |
1 | 0 | 0 |
attr |
AlbertModel.pooler_activation |
1 | 0 | 0 |
meth |
AlbertForTokenClassification.init |
2 | 1 | 0 |
attr |
AlbertForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
AlbertForTokenClassification.albert |
1 | 0 | 0 |
attr |
AlbertForTokenClassification.dropout |
1 | 0 | 0 |
attr |
AlbertForTokenClassification.classifier |
1 | 0 | 0 |
meth |
AlbertForPreTraining.init |
2 | 1 | 0 |
attr |
AlbertForPreTraining.albert |
1 | 0 | 0 |
attr |
AlbertForPreTraining.predictions |
1 | 0 | 0 |
attr |
AlbertForPreTraining.sop_classifier |
1 | 0 | 0 |
transformers.models.albert.tokenization_albert (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AlbertTokenizer.init |
14 | 12 | 0 |
attr |
AlbertTokenizer.add_prefix_space |
1 | 0 | 0 |
attr |
AlbertTokenizer.trim_offsets |
1 | 0 | 0 |
attr |
AlbertTokenizer.do_lower_case |
1 | 0 | 0 |
attr |
AlbertTokenizer.keep_accents |
1 | 0 | 0 |
transformers.models.align.configuration_align (67 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AlignTextConfig.init |
17 | 0 | 0 |
attr |
AlignTextConfig.vocab_size |
1 | 0 | 0 |
attr |
AlignTextConfig.hidden_size |
1 | 0 | 0 |
attr |
AlignTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
AlignTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
AlignTextConfig.hidden_act |
1 | 0 | 0 |
attr |
AlignTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
AlignTextConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
AlignTextConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
AlignTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
AlignTextConfig.type_vocab_size |
1 | 0 | 0 |
attr |
AlignTextConfig.initializer_range |
1 | 0 | 0 |
attr |
AlignTextConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
AlignTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
AlignTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
AlignTextConfig.eos_token_id |
1 | 0 | 0 |
meth |
AlignConfig.init |
7 | 0 | 0 |
attr |
AlignConfig.text_config |
1 | 0 | 0 |
attr |
AlignConfig.vision_config |
1 | 0 | 0 |
attr |
AlignConfig.projection_dim |
1 | 0 | 0 |
attr |
AlignConfig.temperature_init_value |
1 | 0 | 0 |
attr |
AlignConfig.initializer_range |
1 | 0 | 0 |
meth |
AlignVisionConfig.init |
22 | 20 | 0 |
attr |
AlignVisionConfig.num_channels |
1 | 0 | 0 |
attr |
AlignVisionConfig.image_size |
1 | 0 | 0 |
attr |
AlignVisionConfig.width_coefficient |
1 | 0 | 0 |
attr |
AlignVisionConfig.depth_coefficient |
1 | 0 | 0 |
attr |
AlignVisionConfig.depth_divisor |
1 | 0 | 0 |
attr |
AlignVisionConfig.kernel_sizes |
1 | 0 | 0 |
attr |
AlignVisionConfig.in_channels |
1 | 0 | 0 |
attr |
AlignVisionConfig.out_channels |
1 | 0 | 0 |
attr |
AlignVisionConfig.depthwise_padding |
1 | 0 | 0 |
attr |
AlignVisionConfig.strides |
1 | 0 | 0 |
attr |
AlignVisionConfig.num_block_repeats |
1 | 0 | 0 |
attr |
AlignVisionConfig.expand_ratios |
1 | 0 | 0 |
attr |
AlignVisionConfig.squeeze_expansion_ratio |
1 | 0 | 0 |
attr |
AlignVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
AlignVisionConfig.hidden_dim |
1 | 0 | 0 |
attr |
AlignVisionConfig.pooling_type |
1 | 0 | 0 |
attr |
AlignVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
AlignVisionConfig.batch_norm_eps |
1 | 0 | 0 |
attr |
AlignVisionConfig.batch_norm_momentum |
1 | 0 | 0 |
attr |
AlignVisionConfig.drop_connect_rate |
1 | 0 | 0 |
attr |
AlignVisionConfig.num_hidden_layers |
1 | 0 | 0 |
transformers.models.align.modeling_align (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AlignTextModel.init |
3 | 2 | 0 |
meth |
AlignTextModel.get_input_embeddings |
1 | 0 | 0 |
meth |
AlignTextModel.set_input_embeddings |
2 | 0 | 0 |
attr |
AlignTextModel.embeddings |
1 | 0 | 0 |
attr |
AlignTextModel.encoder |
1 | 0 | 0 |
attr |
AlignTextModel.pooler |
1 | 0 | 0 |
meth |
AlignPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
AlignVisionModel.init |
2 | 1 | 0 |
attr |
AlignVisionModel.embeddings |
1 | 0 | 0 |
attr |
AlignVisionModel.encoder |
1 | 0 | 0 |
attr |
AlignVisionModel.pooler |
1 | 0 | 0 |
meth |
AlignModel.init |
2 | 1 | 0 |
attr |
AlignModel.projection_dim |
1 | 0 | 0 |
attr |
AlignModel.text_embed_dim |
1 | 0 | 0 |
attr |
AlignModel.text_model |
1 | 0 | 0 |
attr |
AlignModel.vision_model |
1 | 0 | 0 |
attr |
AlignModel.text_projection |
1 | 0 | 0 |
attr |
AlignModel.temperature |
1 | 0 | 0 |
transformers.models.align.processing_align (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AlignProcessor.init |
3 | 0 | 0 |
transformers.models.altclip.configuration_altclip (75 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AltCLIPVisionConfig.init |
15 | 0 | 0 |
attr |
AltCLIPVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
AltCLIPVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
AltCLIPVisionConfig.projection_dim |
1 | 0 | 0 |
attr |
AltCLIPVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
AltCLIPVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
AltCLIPVisionConfig.num_channels |
1 | 0 | 0 |
attr |
AltCLIPVisionConfig.patch_size |
1 | 0 | 0 |
attr |
AltCLIPVisionConfig.image_size |
1 | 0 | 0 |
attr |
AltCLIPVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
AltCLIPVisionConfig.initializer_factor |
1 | 0 | 0 |
attr |
AltCLIPVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
AltCLIPVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
AltCLIPVisionConfig.hidden_act |
1 | 0 | 0 |
meth |
AltCLIPConfig.init |
6 | 0 | 0 |
attr |
AltCLIPConfig.text_config |
1 | 0 | 0 |
attr |
AltCLIPConfig.vision_config |
1 | 0 | 0 |
attr |
AltCLIPConfig.projection_dim |
1 | 0 | 0 |
attr |
AltCLIPConfig.logit_scale_init_value |
1 | 0 | 0 |
attr |
AltCLIPConfig.initializer_factor |
1 | 0 | 0 |
meth |
AltCLIPTextConfig.init |
19 | 0 | 0 |
attr |
AltCLIPTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
AltCLIPTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
AltCLIPTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
AltCLIPTextConfig.vocab_size |
1 | 0 | 0 |
attr |
AltCLIPTextConfig.hidden_size |
1 | 0 | 0 |
attr |
AltCLIPTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
AltCLIPTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
AltCLIPTextConfig.hidden_act |
1 | 0 | 0 |
attr |
AltCLIPTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
AltCLIPTextConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
AltCLIPTextConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
AltCLIPTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
AltCLIPTextConfig.type_vocab_size |
1 | 0 | 0 |
attr |
AltCLIPTextConfig.initializer_range |
1 | 0 | 0 |
attr |
AltCLIPTextConfig.initializer_factor |
1 | 0 | 0 |
attr |
AltCLIPTextConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
AltCLIPTextConfig.project_dim |
1 | 0 | 0 |
transformers.models.altclip.modeling_altclip (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AltCLIPTextModel.init |
2 | 0 | 0 |
attr |
AltCLIPTextModel.roberta |
1 | 0 | 0 |
attr |
AltCLIPTextModel.transformation |
1 | 0 | 0 |
attr |
AltCLIPTextModel.pre_LN |
1 | 0 | 0 |
meth |
AltCLIPVisionModel.init |
2 | 1 | 0 |
attr |
AltCLIPVisionModel.vision_model |
1 | 0 | 0 |
meth |
AltCLIPModel.init |
2 | 1 | 0 |
attr |
AltCLIPModel.projection_dim |
1 | 0 | 0 |
attr |
AltCLIPModel.text_embed_dim |
1 | 0 | 0 |
attr |
AltCLIPModel.vision_embed_dim |
1 | 0 | 0 |
attr |
AltCLIPModel.text_model |
1 | 0 | 0 |
attr |
AltCLIPModel.vision_model |
1 | 0 | 0 |
attr |
AltCLIPModel.visual_projection |
1 | 0 | 0 |
attr |
AltCLIPModel.text_projection |
1 | 0 | 0 |
attr |
AltCLIPModel.logit_scale |
1 | 0 | 0 |
meth |
AltCLIPPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.altclip.processing_altclip (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AltCLIPProcessor.init |
3 | 0 | 0 |
transformers.models.apertus.configuration_apertus (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ApertusConfig.init |
20 | 18 | 0 |
attr |
ApertusConfig.vocab_size |
1 | 0 | 0 |
attr |
ApertusConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ApertusConfig.hidden_size |
1 | 0 | 0 |
attr |
ApertusConfig.intermediate_size |
1 | 0 | 0 |
attr |
ApertusConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ApertusConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ApertusConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
ApertusConfig.hidden_act |
1 | 0 | 0 |
attr |
ApertusConfig.initializer_range |
1 | 0 | 0 |
attr |
ApertusConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
ApertusConfig.use_cache |
1 | 0 | 0 |
attr |
ApertusConfig.attention_bias |
1 | 0 | 0 |
attr |
ApertusConfig.attention_dropout |
1 | 0 | 0 |
attr |
ApertusConfig.rope_parameters |
1 | 0 | 0 |
attr |
ApertusConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
ApertusConfig.pad_token_id |
1 | 0 | 0 |
attr |
ApertusConfig.bos_token_id |
1 | 0 | 0 |
attr |
ApertusConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.apertus.modeling_apertus (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ApertusModel.init |
2 | 1 | 0 |
attr |
ApertusModel.padding_idx |
1 | 0 | 0 |
attr |
ApertusModel.vocab_size |
1 | 0 | 0 |
attr |
ApertusModel.embed_tokens |
1 | 0 | 0 |
attr |
ApertusModel.layers |
1 | 0 | 0 |
attr |
ApertusModel.norm |
1 | 0 | 0 |
attr |
ApertusModel.rotary_emb |
1 | 0 | 0 |
attr |
ApertusModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
ApertusForCausalLM.init |
2 | 0 | 0 |
attr |
ApertusForCausalLM.model |
1 | 0 | 0 |
attr |
ApertusForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
ApertusForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.apertus.modular_apertus (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ApertusForCausalLM.forward |
2 | 0 | 0 |
meth |
ApertusConfig.init |
20 | 18 | 0 |
attr |
ApertusConfig.vocab_size |
1 | 0 | 0 |
attr |
ApertusConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ApertusConfig.hidden_size |
1 | 0 | 0 |
attr |
ApertusConfig.intermediate_size |
1 | 0 | 0 |
attr |
ApertusConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ApertusConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ApertusConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
ApertusConfig.hidden_act |
1 | 0 | 0 |
attr |
ApertusConfig.initializer_range |
1 | 0 | 0 |
attr |
ApertusConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
ApertusConfig.use_cache |
1 | 0 | 0 |
attr |
ApertusConfig.attention_bias |
1 | 0 | 0 |
attr |
ApertusConfig.attention_dropout |
1 | 0 | 0 |
attr |
ApertusConfig.rope_parameters |
1 | 0 | 0 |
attr |
ApertusConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
ApertusConfig.pad_token_id |
1 | 0 | 0 |
attr |
ApertusConfig.bos_token_id |
1 | 0 | 0 |
attr |
ApertusConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.arcee.configuration_arcee (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ArceeConfig.init |
22 | 20 | 0 |
attr |
ArceeConfig.vocab_size |
1 | 0 | 0 |
attr |
ArceeConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ArceeConfig.hidden_size |
1 | 0 | 0 |
attr |
ArceeConfig.intermediate_size |
1 | 0 | 0 |
attr |
ArceeConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ArceeConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ArceeConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
ArceeConfig.hidden_act |
1 | 0 | 0 |
attr |
ArceeConfig.initializer_range |
1 | 0 | 0 |
attr |
ArceeConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
ArceeConfig.use_cache |
1 | 0 | 0 |
attr |
ArceeConfig.attention_bias |
1 | 0 | 0 |
attr |
ArceeConfig.attention_dropout |
1 | 0 | 0 |
attr |
ArceeConfig.mlp_bias |
1 | 0 | 0 |
attr |
ArceeConfig.head_dim |
1 | 0 | 0 |
attr |
ArceeConfig.rope_parameters |
1 | 0 | 0 |
attr |
ArceeConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
ArceeConfig.pad_token_id |
1 | 0 | 0 |
attr |
ArceeConfig.bos_token_id |
1 | 0 | 0 |
attr |
ArceeConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.arcee.modeling_arcee (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ArceeModel.init |
2 | 1 | 0 |
attr |
ArceeModel.padding_idx |
1 | 0 | 0 |
attr |
ArceeModel.vocab_size |
1 | 0 | 0 |
attr |
ArceeModel.embed_tokens |
1 | 0 | 0 |
attr |
ArceeModel.layers |
1 | 0 | 0 |
attr |
ArceeModel.norm |
1 | 0 | 0 |
attr |
ArceeModel.rotary_emb |
1 | 0 | 0 |
attr |
ArceeModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
ArceeForCausalLM.init |
2 | 0 | 0 |
attr |
ArceeForCausalLM.model |
1 | 0 | 0 |
attr |
ArceeForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
ArceeForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.arcee.modular_arcee (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
ArceeModel |
1 | 0 | 0 |
meth |
ArceeConfig.init |
22 | 20 | 0 |
attr |
ArceePreTrainedModel |
1 | 0 | 0 |
transformers.models.aria.configuration_aria (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AriaConfig.init |
9 | 6 | 0 |
attr |
AriaConfig.image_token_index |
1 | 0 | 0 |
attr |
AriaConfig.projector_patch_to_query_dict |
1 | 0 | 0 |
attr |
AriaConfig.max_value_projector_patch_to_query_dict |
1 | 0 | 0 |
attr |
AriaConfig.vision_feature_layer |
1 | 0 | 0 |
attr |
AriaConfig.vision_config |
1 | 0 | 0 |
attr |
AriaConfig.initializer_range |
1 | 0 | 0 |
attr |
AriaConfig.text_config |
1 | 0 | 0 |
attr |
AriaConfig.tie_word_embeddings |
1 | 0 | 0 |
meth |
AriaTextConfig.init |
26 | 23 | 0 |
attr |
AriaTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
AriaTextConfig.moe_num_experts |
1 | 0 | 0 |
attr |
AriaTextConfig.moe_topk |
1 | 0 | 0 |
attr |
AriaTextConfig.moe_num_shared_experts |
1 | 0 | 0 |
attr |
AriaTextConfig.vocab_size |
1 | 0 | 0 |
attr |
AriaTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
AriaTextConfig.hidden_size |
1 | 0 | 0 |
attr |
AriaTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
AriaTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
AriaTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
AriaTextConfig.hidden_act |
1 | 0 | 0 |
attr |
AriaTextConfig.initializer_range |
1 | 0 | 0 |
attr |
AriaTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
AriaTextConfig.pretraining_tp |
1 | 0 | 0 |
attr |
AriaTextConfig.use_cache |
1 | 0 | 0 |
attr |
AriaTextConfig.attention_bias |
1 | 0 | 0 |
attr |
AriaTextConfig.attention_dropout |
1 | 0 | 0 |
attr |
AriaTextConfig.mlp_bias |
1 | 0 | 0 |
attr |
AriaTextConfig.head_dim |
1 | 0 | 0 |
attr |
AriaTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
AriaTextConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
AriaTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
AriaTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
AriaTextConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.aria.image_processing_aria (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AriaImageProcessor.init |
13 | 11 | 0 |
meth |
AriaImageProcessor.preprocess |
15 | 14 | 0 |
meth |
AriaImageProcessor._resize_for_patching |
5 | 4 | 0 |
meth |
AriaImageProcessor._get_padding_size |
3 | 2 | 0 |
meth |
AriaImageProcessor.get_number_of_image_patches |
4 | 2 | 0 |
attr |
AriaImageProcessor.max_image_size |
1 | 0 | 0 |
attr |
AriaImageProcessor.min_image_size |
1 | 0 | 0 |
attr |
AriaImageProcessor.image_mean |
1 | 0 | 0 |
attr |
AriaImageProcessor.image_std |
1 | 0 | 0 |
attr |
AriaImageProcessor.split_image |
1 | 0 | 0 |
attr |
AriaImageProcessor.split_resolutions |
1 | 0 | 0 |
attr |
AriaImageProcessor.do_convert_rgb |
1 | 0 | 0 |
attr |
AriaImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
AriaImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
AriaImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
AriaImageProcessor.resample |
1 | 0 | 0 |
transformers.models.aria.modeling_aria (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AriaPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
AriaTextForCausalLM.init |
2 | 1 | 0 |
attr |
AriaTextForCausalLM.model |
1 | 0 | 0 |
attr |
AriaTextForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
AriaTextForCausalLM.lm_head |
1 | 0 | 0 |
meth |
AriaTextPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
AriaTextModel.init |
2 | 1 | 0 |
attr |
AriaTextModel.padding_idx |
1 | 0 | 0 |
attr |
AriaTextModel.vocab_size |
1 | 0 | 0 |
attr |
AriaTextModel.embed_tokens |
1 | 0 | 0 |
attr |
AriaTextModel.layers |
1 | 0 | 0 |
attr |
AriaTextModel.norm |
1 | 0 | 0 |
attr |
AriaTextModel.rotary_emb |
1 | 0 | 0 |
attr |
AriaTextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
AriaForConditionalGeneration.init |
2 | 1 | 0 |
meth |
AriaForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
AriaForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
AriaForConditionalGeneration.prepare_inputs_for_generation |
11 | 0 | 0 |
attr |
AriaForConditionalGeneration.model |
1 | 0 | 0 |
attr |
AriaForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
AriaModel.init |
2 | 1 | 0 |
meth |
AriaModel.get_input_embeddings |
1 | 0 | 0 |
meth |
AriaModel.set_input_embeddings |
2 | 0 | 0 |
meth |
AriaModel.get_placeholder_mask |
4 | 3 | 0 |
meth |
AriaModel._create_patch_attention_mask |
2 | 0 | 0 |
attr |
AriaModel.vision_tower |
1 | 0 | 0 |
attr |
AriaModel.multi_modal_projector |
1 | 0 | 0 |
attr |
AriaModel.language_model |
1 | 0 | 0 |
transformers.models.aria.modular_aria (73 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AriaPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
AriaTextPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
AriaTextModel.init |
2 | 1 | 0 |
attr |
AriaTextModel.layers |
1 | 0 | 0 |
attr |
AriaTextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
AriaConfig.init |
9 | 6 | 0 |
attr |
AriaConfig.image_token_index |
1 | 0 | 0 |
attr |
AriaConfig.projector_patch_to_query_dict |
1 | 0 | 0 |
attr |
AriaConfig.max_value_projector_patch_to_query_dict |
1 | 0 | 0 |
attr |
AriaConfig.vision_feature_layer |
1 | 0 | 0 |
attr |
AriaConfig.vision_config |
1 | 0 | 0 |
attr |
AriaConfig.initializer_range |
1 | 0 | 0 |
attr |
AriaConfig.text_config |
1 | 0 | 0 |
attr |
AriaConfig.tie_word_embeddings |
1 | 0 | 0 |
meth |
AriaModel.init |
2 | 1 | 0 |
meth |
AriaModel._create_patch_attention_mask |
2 | 0 | 0 |
attr |
AriaModel.multi_modal_projector |
1 | 0 | 0 |
meth |
AriaTextForCausalLM.init |
2 | 1 | 0 |
meth |
AriaTextForCausalLM.forward |
2 | 0 | 0 |
attr |
AriaTextForCausalLM.model |
1 | 0 | 0 |
attr |
AriaTextForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
AriaTextForCausalLM.lm_head |
1 | 0 | 0 |
meth |
AriaForConditionalGeneration.prepare_inputs_for_generation |
11 | 0 | 0 |
meth |
AriaImageProcessor.init |
13 | 11 | 0 |
meth |
AriaImageProcessor.preprocess |
15 | 14 | 0 |
meth |
AriaImageProcessor._resize_for_patching |
5 | 4 | 0 |
meth |
AriaImageProcessor._get_padding_size |
3 | 2 | 0 |
meth |
AriaImageProcessor.get_number_of_image_patches |
4 | 2 | 0 |
attr |
AriaImageProcessor.max_image_size |
1 | 0 | 0 |
attr |
AriaImageProcessor.min_image_size |
1 | 0 | 0 |
attr |
AriaImageProcessor.image_mean |
1 | 0 | 0 |
attr |
AriaImageProcessor.image_std |
1 | 0 | 0 |
attr |
AriaImageProcessor.split_image |
1 | 0 | 0 |
attr |
AriaImageProcessor.split_resolutions |
1 | 0 | 0 |
attr |
AriaImageProcessor.do_convert_rgb |
1 | 0 | 0 |
attr |
AriaImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
AriaImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
AriaImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
AriaImageProcessor.resample |
1 | 0 | 0 |
meth |
AriaTextConfig.init |
7 | 4 | 0 |
attr |
AriaTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
AriaTextConfig.moe_num_experts |
1 | 0 | 0 |
attr |
AriaTextConfig.moe_topk |
1 | 0 | 0 |
attr |
AriaTextConfig.moe_num_shared_experts |
1 | 0 | 0 |
meth |
AriaProcessor.init |
5 | 3 | 0 |
meth |
AriaProcessor._get_num_multimodal_tokens |
3 | 0 | 0 |
prop |
AriaProcessor.model_input_names |
1 | 0 | 0 |
attr |
AriaProcessor.size_conversion |
1 | 0 | 0 |
attr |
AriaProcessor.image_token |
1 | 0 | 0 |
attr |
AriaProcessor.image_token_id |
1 | 0 | 0 |
transformers.models.aria.processing_aria (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AriaProcessor.init |
5 | 3 | 0 |
meth |
AriaProcessor._get_num_multimodal_tokens |
3 | 0 | 0 |
prop |
AriaProcessor.model_input_names |
1 | 0 | 0 |
attr |
AriaProcessor.size_conversion |
1 | 0 | 0 |
attr |
AriaProcessor.image_token |
1 | 0 | 0 |
attr |
AriaProcessor.image_token_id |
1 | 0 | 0 |
transformers.models.audio_spectrogram_transformer.configuration_audio_spectrogram_transformer (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ASTConfig.init |
17 | 0 | 0 |
attr |
ASTConfig.hidden_size |
1 | 0 | 0 |
attr |
ASTConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ASTConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ASTConfig.intermediate_size |
1 | 0 | 0 |
attr |
ASTConfig.hidden_act |
1 | 0 | 0 |
attr |
ASTConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
ASTConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
ASTConfig.initializer_range |
1 | 0 | 0 |
attr |
ASTConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
ASTConfig.patch_size |
1 | 0 | 0 |
attr |
ASTConfig.qkv_bias |
1 | 0 | 0 |
attr |
ASTConfig.frequency_stride |
1 | 0 | 0 |
attr |
ASTConfig.time_stride |
1 | 0 | 0 |
attr |
ASTConfig.max_length |
1 | 0 | 0 |
attr |
ASTConfig.num_mel_bins |
1 | 0 | 0 |
transformers.models.audio_spectrogram_transformer.feature_extraction_audio_spectrogram_transformer (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ASTFeatureExtractor.init |
11 | 0 | 0 |
meth |
ASTFeatureExtractor.call |
5 | 4 | 0 |
attr |
ASTFeatureExtractor.num_mel_bins |
1 | 0 | 0 |
attr |
ASTFeatureExtractor.max_length |
1 | 0 | 0 |
attr |
ASTFeatureExtractor.do_normalize |
1 | 0 | 0 |
attr |
ASTFeatureExtractor.mean |
1 | 0 | 0 |
attr |
ASTFeatureExtractor.std |
1 | 0 | 0 |
attr |
ASTFeatureExtractor.return_attention_mask |
1 | 0 | 0 |
attr |
ASTFeatureExtractor.mel_filters |
1 | 0 | 0 |
attr |
ASTFeatureExtractor.window |
1 | 0 | 0 |
transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
ASTForAudioClassification.num_labels |
1 | 0 | 0 |
attr |
ASTForAudioClassification.audio_spectrogram_transformer |
1 | 0 | 0 |
attr |
ASTForAudioClassification.classifier |
1 | 0 | 0 |
attr |
ASTModel.embeddings |
1 | 0 | 0 |
attr |
ASTModel.encoder |
1 | 0 | 0 |
attr |
ASTModel.layernorm |
1 | 0 | 0 |
transformers.models.audioflamingo3.configuration_audioflamingo3 (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AudioFlamingo3EncoderConfig.init |
15 | 0 | 0 |
attr |
AudioFlamingo3EncoderConfig.num_mel_bins |
1 | 0 | 0 |
attr |
AudioFlamingo3EncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
AudioFlamingo3EncoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
AudioFlamingo3EncoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
AudioFlamingo3EncoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
AudioFlamingo3EncoderConfig.dropout |
1 | 0 | 0 |
attr |
AudioFlamingo3EncoderConfig.attention_dropout |
1 | 0 | 0 |
attr |
AudioFlamingo3EncoderConfig.activation_dropout |
1 | 0 | 0 |
attr |
AudioFlamingo3EncoderConfig.activation_function |
1 | 0 | 0 |
attr |
AudioFlamingo3EncoderConfig.initializer_range |
1 | 0 | 0 |
attr |
AudioFlamingo3EncoderConfig.layerdrop |
1 | 0 | 0 |
attr |
AudioFlamingo3EncoderConfig.scale_embedding |
1 | 0 | 0 |
attr |
AudioFlamingo3EncoderConfig.max_source_positions |
1 | 0 | 0 |
meth |
AudioFlamingo3Config.init |
7 | 0 | 0 |
attr |
AudioFlamingo3Config.audio_token_id |
1 | 0 | 0 |
attr |
AudioFlamingo3Config.audio_config |
1 | 0 | 0 |
attr |
AudioFlamingo3Config.text_config |
1 | 0 | 0 |
attr |
AudioFlamingo3Config.projector_hidden_act |
1 | 0 | 0 |
attr |
AudioFlamingo3Config.projector_bias |
1 | 0 | 0 |
transformers.models.audioflamingo3.modeling_audioflamingo3 (35 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AudioFlamingo3Encoder.init |
2 | 1 | 0 |
meth |
AudioFlamingo3Encoder._freeze_parameters |
1 | 0 | 0 |
meth |
AudioFlamingo3Encoder.set_input_embeddings |
2 | 1 | 0 |
meth |
AudioFlamingo3Encoder.forward |
4 | 3 | 0 |
meth |
AudioFlamingo3Encoder._get_feat_extract_output_lengths |
2 | 1 | 0 |
attr |
AudioFlamingo3Encoder.dropout |
1 | 0 | 0 |
attr |
AudioFlamingo3Encoder.layerdrop |
1 | 0 | 0 |
attr |
AudioFlamingo3Encoder.num_mel_bins |
1 | 0 | 0 |
attr |
AudioFlamingo3Encoder.max_source_positions |
1 | 0 | 0 |
attr |
AudioFlamingo3Encoder.embed_scale |
1 | 0 | 0 |
attr |
AudioFlamingo3Encoder.conv1 |
1 | 0 | 0 |
attr |
AudioFlamingo3Encoder.conv2 |
1 | 0 | 0 |
attr |
AudioFlamingo3Encoder.embed_positions |
1 | 0 | 0 |
attr |
AudioFlamingo3Encoder.layers |
1 | 0 | 0 |
attr |
AudioFlamingo3Encoder.layer_norm |
1 | 0 | 0 |
attr |
AudioFlamingo3Encoder.avg_pooler |
1 | 0 | 0 |
attr |
AudioFlamingo3Encoder.gradient_checkpointing |
1 | 0 | 0 |
meth |
AudioFlamingo3ForConditionalGeneration.init |
2 | 0 | 0 |
meth |
AudioFlamingo3ForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
AudioFlamingo3ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
AudioFlamingo3ForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
AudioFlamingo3ForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
AudioFlamingo3ForConditionalGeneration.set_decoder |
2 | 0 | 0 |
meth |
AudioFlamingo3ForConditionalGeneration.get_decoder |
1 | 0 | 0 |
meth |
AudioFlamingo3ForConditionalGeneration.prepare_inputs_for_generation |
3 | 0 | 0 |
attr |
AudioFlamingo3ForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
AudioFlamingo3ForConditionalGeneration.audio_tower |
1 | 0 | 0 |
attr |
AudioFlamingo3ForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
AudioFlamingo3ForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
transformers.models.audioflamingo3.modular_audioflamingo3 (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AudioFlamingo3Encoder.forward |
4 | 3 | 0 |
meth |
AudioFlamingo3ForConditionalGeneration.init |
2 | 0 | 0 |
meth |
AudioFlamingo3ForConditionalGeneration.prepare_inputs_for_generation |
3 | 0 | 0 |
transformers.models.audioflamingo3.processing_audioflamingo3 (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AudioFlamingo3Processor.init |
7 | 0 | 0 |
meth |
AudioFlamingo3Processor.batch_decode |
4 | 0 | 0 |
attr |
AudioFlamingo3Processor.audio_token |
1 | 0 | 0 |
attr |
AudioFlamingo3Processor.audio_token_id |
1 | 0 | 0 |
attr |
AudioFlamingo3Processor.default_transcription_prompt |
1 | 0 | 0 |
attr |
AudioFlamingo3Processor.max_audio_len |
1 | 0 | 0 |
transformers.models.auto.auto_factory (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_values |
2 | 0 | 0 |
transformers.models.auto.configuration_auto (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
CONFIG_MAPPING |
1 | 0 | 0 |
attr |
MODEL_NAMES_MAPPING |
1 | 0 | 0 |
meth |
AutoConfig.for_model |
4 | 2 | 0 |
meth |
AutoConfig.from_pretrained |
3 | 1 | 0 |
meth |
AutoConfig.register |
4 | 1 | 0 |
transformers.models.auto.feature_extraction_auto (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AutoFeatureExtractor.init |
1 | 0 | 0 |
meth |
AutoFeatureExtractor.from_pretrained |
3 | 0 | 0 |
meth |
AutoFeatureExtractor.register |
4 | 0 | 0 |
attr |
FEATURE_EXTRACTOR_MAPPING |
1 | 0 | 0 |
transformers.models.auto.image_processing_auto (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AutoImageProcessor.init |
1 | 0 | 0 |
meth |
AutoImageProcessor.from_pretrained |
4 | 0 | 0 |
meth |
AutoImageProcessor.register |
5 | 0 | 0 |
attr |
IMAGE_PROCESSOR_MAPPING |
1 | 0 | 0 |
transformers.models.auto.modeling_auto (50 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
MODEL_FOR_PRETRAINING_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_CTC_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_MASK_GENERATION_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_KEYPOINT_MATCHING_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_TIME_SERIES_PREDICTION_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_IMAGE_TO_IMAGE_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_KEYPOINT_DETECTION_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_OBJECT_DETECTION_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_IMAGE_SEGMENTATION_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING |
1 | 0 | 0 |
meth |
AutoModelForCausalLM.from_pretrained |
4 | 2 | 0 |
attr |
MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING |
1 | 0 | 0 |
attr |
MODEL_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_DEPTH_ESTIMATION_MAPPING |
1 | 0 | 0 |
meth |
AutoModelForImageTextToText.from_pretrained |
4 | 2 | 0 |
attr |
MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_IMAGE_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_AUDIO_TOKENIZATION_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_BACKBONE_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_RETRIEVAL_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_AUDIO_XVECTOR_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_TEXT_ENCODING_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_MULTIPLE_CHOICE_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_MASKED_LM_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_QUESTION_ANSWERING_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_MULTIMODAL_LM_MAPPING |
1 | 0 | 0 |
attr |
MODEL_FOR_CAUSAL_LM_MAPPING |
1 | 0 | 0 |
transformers.models.auto.processing_auto (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
PROCESSOR_MAPPING |
1 | 0 | 0 |
meth |
AutoProcessor.init |
1 | 0 | 0 |
meth |
AutoProcessor.from_pretrained |
3 | 0 | 0 |
meth |
AutoProcessor.register |
4 | 0 | 0 |
transformers.models.auto.tokenization_auto (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AutoTokenizer.init |
1 | 0 | 0 |
meth |
AutoTokenizer.from_pretrained |
4 | 1 | 0 |
meth |
AutoTokenizer.register |
6 | 0 | 0 |
attr |
TOKENIZER_MAPPING |
1 | 0 | 0 |
transformers.models.auto.video_processing_auto (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
VIDEO_PROCESSOR_MAPPING |
1 | 0 | 0 |
meth |
AutoVideoProcessor.init |
1 | 0 | 0 |
meth |
AutoVideoProcessor.from_pretrained |
4 | 0 | 0 |
meth |
AutoVideoProcessor.register |
4 | 0 | 0 |
transformers.models.autoformer.configuration_autoformer (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AutoformerConfig.init |
35 | 32 | 0 |
attr |
AutoformerConfig.prediction_length |
1 | 0 | 0 |
attr |
AutoformerConfig.context_length |
1 | 0 | 0 |
attr |
AutoformerConfig.distribution_output |
1 | 0 | 0 |
attr |
AutoformerConfig.loss |
1 | 0 | 0 |
attr |
AutoformerConfig.input_size |
1 | 0 | 0 |
attr |
AutoformerConfig.num_time_features |
1 | 0 | 0 |
attr |
AutoformerConfig.lags_sequence |
1 | 0 | 0 |
attr |
AutoformerConfig.scaling |
1 | 0 | 0 |
attr |
AutoformerConfig.num_dynamic_real_features |
1 | 0 | 0 |
attr |
AutoformerConfig.num_static_real_features |
1 | 0 | 0 |
attr |
AutoformerConfig.num_static_categorical_features |
1 | 0 | 0 |
attr |
AutoformerConfig.num_parallel_samples |
1 | 0 | 0 |
attr |
AutoformerConfig.feature_size |
1 | 0 | 0 |
attr |
AutoformerConfig.d_model |
1 | 0 | 0 |
attr |
AutoformerConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
AutoformerConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
AutoformerConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
AutoformerConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
AutoformerConfig.encoder_layers |
1 | 0 | 0 |
attr |
AutoformerConfig.decoder_layers |
1 | 0 | 0 |
attr |
AutoformerConfig.dropout |
1 | 0 | 0 |
attr |
AutoformerConfig.attention_dropout |
1 | 0 | 0 |
attr |
AutoformerConfig.activation_dropout |
1 | 0 | 0 |
attr |
AutoformerConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
AutoformerConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
AutoformerConfig.activation_function |
1 | 0 | 0 |
attr |
AutoformerConfig.init_std |
1 | 0 | 0 |
attr |
AutoformerConfig.use_cache |
1 | 0 | 0 |
attr |
AutoformerConfig.label_length |
1 | 0 | 0 |
attr |
AutoformerConfig.moving_average |
1 | 0 | 0 |
attr |
AutoformerConfig.autocorrelation_factor |
1 | 0 | 0 |
attr |
AutoformerConfig.cardinality |
1 | 0 | 0 |
attr |
AutoformerConfig.embedding_dimension |
1 | 0 | 0 |
transformers.models.autoformer.modeling_autoformer (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AutoformerForPrediction.init |
2 | 1 | 0 |
meth |
AutoformerForPrediction.output_params |
2 | 0 | 0 |
meth |
AutoformerForPrediction.output_distribution |
5 | 1 | 0 |
meth |
AutoformerForPrediction.forward |
17 | 16 | 0 |
attr |
AutoformerForPrediction.model |
1 | 0 | 0 |
attr |
AutoformerForPrediction.parameter_projection |
1 | 0 | 0 |
attr |
AutoformerForPrediction.target_shape |
1 | 0 | 0 |
attr |
AutoformerForPrediction.distribution_output |
1 | 0 | 0 |
attr |
AutoformerForPrediction.loss |
1 | 0 | 0 |
meth |
AutoformerPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
AutoformerModel.init |
2 | 1 | 0 |
meth |
AutoformerModel.forward |
17 | 16 | 0 |
attr |
AutoformerModel.encoder |
1 | 0 | 0 |
attr |
AutoformerModel.decoder |
1 | 0 | 0 |
attr |
AutoformerModel.decomposition_layer |
1 | 0 | 0 |
attr |
AutoformerModel.scaler |
1 | 0 | 0 |
attr |
AutoformerModel.embedder |
1 | 0 | 0 |
transformers.models.aya_vision.configuration_aya_vision (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AyaVisionConfig.init |
10 | 0 | 0 |
attr |
AyaVisionConfig.image_token_index |
1 | 0 | 0 |
attr |
AyaVisionConfig.downsample_factor |
1 | 0 | 0 |
attr |
AyaVisionConfig.adapter_layer_norm_eps |
1 | 0 | 0 |
attr |
AyaVisionConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
AyaVisionConfig.vision_feature_select_strategy |
1 | 0 | 0 |
attr |
AyaVisionConfig.vision_feature_layer |
1 | 0 | 0 |
attr |
AyaVisionConfig.vision_config |
1 | 0 | 0 |
attr |
AyaVisionConfig.text_config |
1 | 0 | 0 |
transformers.models.aya_vision.modeling_aya_vision (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AyaVisionForConditionalGeneration.init |
2 | 1 | 0 |
meth |
AyaVisionForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
AyaVisionForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
AyaVisionForConditionalGeneration.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
AyaVisionForConditionalGeneration.model |
1 | 0 | 0 |
attr |
AyaVisionForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
AyaVisionModel.init |
2 | 1 | 0 |
meth |
AyaVisionModel.get_input_embeddings |
1 | 0 | 0 |
meth |
AyaVisionModel.set_input_embeddings |
2 | 0 | 0 |
meth |
AyaVisionModel.get_placeholder_mask |
4 | 3 | 0 |
attr |
AyaVisionModel.vision_tower |
1 | 0 | 0 |
attr |
AyaVisionModel.multi_modal_projector |
1 | 0 | 0 |
attr |
AyaVisionModel.language_model |
1 | 0 | 0 |
transformers.models.aya_vision.processing_aya_vision (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AyaVisionProcessor.init |
15 | 3 | 0 |
meth |
AyaVisionProcessor._prompt_split_image |
2 | 0 | 0 |
meth |
AyaVisionProcessor._get_num_multimodal_tokens |
3 | 0 | 0 |
attr |
AyaVisionProcessor.image_token |
1 | 0 | 0 |
attr |
AyaVisionProcessor.patch_size |
1 | 0 | 0 |
attr |
AyaVisionProcessor.img_size |
1 | 0 | 0 |
attr |
AyaVisionProcessor.start_of_img_token |
1 | 0 | 0 |
attr |
AyaVisionProcessor.end_of_img_token |
1 | 0 | 0 |
attr |
AyaVisionProcessor.img_patch_token |
1 | 0 | 0 |
attr |
AyaVisionProcessor.img_line_break_token |
1 | 0 | 0 |
attr |
AyaVisionProcessor.tile_token |
1 | 0 | 0 |
attr |
AyaVisionProcessor.tile_global_token |
1 | 0 | 0 |
attr |
AyaVisionProcessor.image_token_id |
1 | 0 | 0 |
attr |
AyaVisionProcessor.image_ids |
1 | 0 | 0 |
transformers.models.bamba.configuration_bamba (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BambaConfig.init |
34 | 32 | 0 |
prop |
BambaConfig.layers_block_type |
1 | 0 | 0 |
attr |
BambaConfig.vocab_size |
1 | 0 | 0 |
attr |
BambaConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
BambaConfig.hidden_size |
1 | 0 | 0 |
attr |
BambaConfig.intermediate_size |
1 | 0 | 0 |
attr |
BambaConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BambaConfig.num_attention_heads |
1 | 0 | 0 |
attr |
BambaConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
BambaConfig.attention_dropout |
1 | 0 | 0 |
attr |
BambaConfig.attention_bias |
1 | 0 | 0 |
attr |
BambaConfig.mlp_bias |
1 | 0 | 0 |
attr |
BambaConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
BambaConfig.hidden_act |
1 | 0 | 0 |
attr |
BambaConfig.initializer_range |
1 | 0 | 0 |
attr |
BambaConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
BambaConfig.use_cache |
1 | 0 | 0 |
attr |
BambaConfig.num_logits_to_keep |
1 | 0 | 0 |
attr |
BambaConfig.attn_layer_indices |
1 | 0 | 0 |
attr |
BambaConfig.mamba_n_heads |
1 | 0 | 0 |
attr |
BambaConfig.mamba_d_head |
1 | 0 | 0 |
attr |
BambaConfig.mamba_n_groups |
1 | 0 | 0 |
attr |
BambaConfig.mamba_d_state |
1 | 0 | 0 |
attr |
BambaConfig.mamba_d_conv |
1 | 0 | 0 |
attr |
BambaConfig.mamba_expand |
1 | 0 | 0 |
attr |
BambaConfig.mamba_chunk_size |
1 | 0 | 0 |
attr |
BambaConfig.mamba_conv_bias |
1 | 0 | 0 |
attr |
BambaConfig.mamba_proj_bias |
1 | 0 | 0 |
attr |
BambaConfig.time_step_min |
1 | 0 | 0 |
attr |
BambaConfig.time_step_max |
1 | 0 | 0 |
attr |
BambaConfig.time_step_limit |
1 | 0 | 0 |
attr |
BambaConfig.z_loss_coefficient |
1 | 0 | 0 |
attr |
BambaConfig.rope_parameters |
1 | 0 | 0 |
attr |
BambaConfig.pad_token_id |
1 | 0 | 0 |
attr |
BambaConfig.bos_token_id |
1 | 0 | 0 |
attr |
BambaConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.bamba.modeling_bamba (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BambaForCausalLM.init |
2 | 0 | 0 |
meth |
BambaForCausalLM.forward |
13 | 12 | 0 |
meth |
BambaForCausalLM.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
BambaForCausalLM.model |
1 | 0 | 0 |
attr |
BambaForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
BambaForCausalLM.lm_head |
1 | 0 | 0 |
attr |
BambaForCausalLM.z_loss_coefficient |
1 | 0 | 0 |
meth |
BambaModel.init |
2 | 1 | 0 |
meth |
BambaModel._update_mamba_mask |
3 | 0 | 0 |
attr |
BambaModel.padding_idx |
1 | 0 | 0 |
attr |
BambaModel.vocab_size |
1 | 0 | 0 |
attr |
BambaModel.embed_tokens |
1 | 0 | 0 |
attr |
BambaModel.layers |
1 | 0 | 0 |
attr |
BambaModel.final_layernorm |
1 | 0 | 0 |
attr |
BambaModel.rotary_emb |
1 | 0 | 0 |
attr |
BambaModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
BambaPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.bamba.modular_bamba (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BambaForCausalLM.init |
2 | 0 | 0 |
meth |
BambaForCausalLM.forward |
13 | 12 | 0 |
meth |
BambaForCausalLM.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
BambaForCausalLM.z_loss_coefficient |
1 | 0 | 0 |
meth |
BambaModel.init |
2 | 1 | 0 |
meth |
BambaModel._update_mamba_mask |
3 | 0 | 0 |
attr |
BambaModel.padding_idx |
1 | 0 | 0 |
attr |
BambaModel.vocab_size |
1 | 0 | 0 |
attr |
BambaModel.embed_tokens |
1 | 0 | 0 |
attr |
BambaModel.layers |
1 | 0 | 0 |
attr |
BambaModel.final_layernorm |
1 | 0 | 0 |
attr |
BambaModel.rotary_emb |
1 | 0 | 0 |
attr |
BambaModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
BambaPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.bark.configuration_bark (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BarkConfig.init |
7 | 4 | 0 |
attr |
BarkConfig.semantic_config |
1 | 0 | 0 |
attr |
BarkConfig.coarse_acoustics_config |
1 | 0 | 0 |
attr |
BarkConfig.fine_acoustics_config |
1 | 0 | 0 |
attr |
BarkConfig.codec_config |
1 | 0 | 0 |
attr |
BarkConfig.initializer_range |
1 | 0 | 0 |
meth |
BarkFineConfig.init |
5 | 0 | 0 |
attr |
BarkFineConfig.n_codes_total |
1 | 0 | 0 |
attr |
BarkFineConfig.n_codes_given |
1 | 0 | 0 |
attr |
BarkFineConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.bark.generation_configuration_bark (71 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BarkFineGenerationConfig.init |
6 | 0 | 0 |
meth |
BarkFineGenerationConfig.validate |
2 | 0 | 0 |
attr |
BarkFineGenerationConfig.max_fine_history_length |
1 | 0 | 0 |
attr |
BarkFineGenerationConfig.max_fine_input_length |
1 | 0 | 0 |
attr |
BarkFineGenerationConfig.n_fine_codebooks |
1 | 0 | 0 |
meth |
BarkSemanticGenerationConfig.init |
18 | 0 | 0 |
attr |
BarkSemanticGenerationConfig.text_encoding_offset |
1 | 0 | 0 |
attr |
BarkSemanticGenerationConfig.text_pad_token |
1 | 0 | 0 |
attr |
BarkSemanticGenerationConfig.semantic_pad_token |
1 | 0 | 0 |
attr |
BarkSemanticGenerationConfig.semantic_infer_token |
1 | 0 | 0 |
attr |
BarkSemanticGenerationConfig.semantic_vocab_size |
1 | 0 | 0 |
attr |
BarkSemanticGenerationConfig.max_input_semantic_length |
1 | 0 | 0 |
attr |
BarkSemanticGenerationConfig.semantic_rate_hz |
1 | 0 | 0 |
attr |
BarkSemanticGenerationConfig.min_eos_p |
1 | 0 | 0 |
meth |
BarkCoarseGenerationConfig.init |
16 | 2 | 0 |
attr |
BarkCoarseGenerationConfig.coarse_semantic_pad_token |
1 | 0 | 0 |
attr |
BarkCoarseGenerationConfig.coarse_rate_hz |
1 | 0 | 0 |
attr |
BarkCoarseGenerationConfig.n_coarse_codebooks |
1 | 0 | 0 |
attr |
BarkCoarseGenerationConfig.coarse_infer_token |
1 | 0 | 0 |
attr |
BarkCoarseGenerationConfig.max_coarse_input_length |
1 | 0 | 0 |
attr |
BarkCoarseGenerationConfig.max_coarse_history |
1 | 0 | 0 |
attr |
BarkCoarseGenerationConfig.sliding_window_len |
1 | 0 | 0 |
meth |
BarkGenerationConfig.init |
7 | 3 | 0 |
meth |
BarkGenerationConfig.from_sub_model_configs |
5 | 3 | 0 |
meth |
BarkGenerationConfig.to_dict |
1 | 0 | 0 |
attr |
BarkGenerationConfig.semantic_config |
1 | 0 | 0 |
attr |
BarkGenerationConfig.coarse_acoustics_config |
1 | 0 | 0 |
attr |
BarkGenerationConfig.fine_acoustics_config |
1 | 0 | 0 |
attr |
BarkGenerationConfig.sample_rate |
1 | 0 | 0 |
attr |
BarkGenerationConfig.codebook_size |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.models.bark.modeling_bark (53 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BarkFineModel.init |
2 | 0 | 0 |
meth |
BarkFineModel.get_input_embeddings |
1 | 0 | 0 |
meth |
BarkFineModel.set_input_embeddings |
2 | 0 | 0 |
meth |
BarkFineModel.get_output_embeddings |
1 | 0 | 0 |
meth |
BarkFineModel.set_output_embeddings |
2 | 0 | 0 |
meth |
BarkFineModel._resize_token_embeddings |
4 | 0 | 0 |
meth |
BarkFineModel.forward |
11 | 10 | 0 |
meth |
BarkFineModel.generate |
8 | 7 | 0 |
attr |
BarkFineModel.input_embeds_layers |
1 | 0 | 0 |
attr |
BarkFineModel.position_embeds_layer |
1 | 0 | 0 |
attr |
BarkFineModel.drop |
1 | 0 | 0 |
attr |
BarkFineModel.layers |
1 | 0 | 0 |
attr |
BarkFineModel.layernorm_final |
1 | 0 | 0 |
attr |
BarkFineModel.lm_heads |
1 | 0 | 0 |
attr |
BarkFineModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
BarkFineModel.n_codes_total |
1 | 0 | 0 |
meth |
BarkSemanticModel.generate |
6 | 5 | 0 |
meth |
BarkModel.init |
2 | 0 | 0 |
meth |
BarkModel.enable_cpu_offload |
3 | 1 | 0 |
meth |
BarkModel.codec_decode |
3 | 0 | 0 |
meth |
BarkModel.generate |
5 | 4 | 0 |
attr |
BarkModel.semantic |
1 | 0 | 0 |
attr |
BarkModel.coarse_acoustics |
1 | 0 | 0 |
attr |
BarkModel.fine_acoustics |
1 | 0 | 0 |
attr |
BarkModel.codec_model |
1 | 0 | 0 |
meth |
BarkPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
BarkCausalModel.init |
2 | 0 | 0 |
meth |
BarkCausalModel.get_output_embeddings |
1 | 0 | 0 |
meth |
BarkCausalModel.get_input_embeddings |
1 | 0 | 0 |
meth |
BarkCausalModel.set_input_embeddings |
2 | 0 | 0 |
meth |
BarkCausalModel.forward |
13 | 12 | 0 |
attr |
BarkCausalModel.input_embeds_layer |
1 | 0 | 0 |
attr |
BarkCausalModel.position_embeds_layer |
1 | 0 | 0 |
attr |
BarkCausalModel.drop |
1 | 0 | 0 |
attr |
BarkCausalModel.layers |
1 | 0 | 0 |
attr |
BarkCausalModel.layernorm_final |
1 | 0 | 0 |
attr |
BarkCausalModel.lm_head |
1 | 0 | 0 |
attr |
BarkCausalModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
BarkCoarseModel.preprocess_histories |
7 | 6 | 0 |
meth |
BarkCoarseModel.generate |
8 | 7 | 0 |
transformers.models.bark.processing_bark (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BarkProcessor.init |
3 | 0 | 0 |
meth |
BarkProcessor.from_pretrained |
4 | 0 | 0 |
meth |
BarkProcessor.save_pretrained |
6 | 1 | 0 |
meth |
BarkProcessor._load_voice_preset |
3 | 1 | 0 |
meth |
BarkProcessor._validate_voice_preset_dict |
2 | 1 | 0 |
meth |
BarkProcessor._verify_speaker_embeddings |
2 | 1 | 0 |
meth |
BarkProcessor.call |
9 | 1 | 0 |
attr |
BarkProcessor.speaker_embeddings |
1 | 0 | 0 |
transformers.models.bart.configuration_bart (56 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BartConfig.init |
30 | 0 | 0 |
attr |
BartConfig.is_decoder |
1 | 0 | 0 |
attr |
BartConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
BartConfig.vocab_size |
1 | 0 | 0 |
attr |
BartConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
BartConfig.d_model |
1 | 0 | 0 |
attr |
BartConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
BartConfig.encoder_layers |
1 | 0 | 0 |
attr |
BartConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
BartConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
BartConfig.decoder_layers |
1 | 0 | 0 |
attr |
BartConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
BartConfig.dropout |
1 | 0 | 0 |
attr |
BartConfig.attention_dropout |
1 | 0 | 0 |
attr |
BartConfig.activation_dropout |
1 | 0 | 0 |
attr |
BartConfig.activation_function |
1 | 0 | 0 |
attr |
BartConfig.init_std |
1 | 0 | 0 |
attr |
BartConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
BartConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
BartConfig.classifier_dropout |
1 | 0 | 0 |
attr |
BartConfig.use_cache |
1 | 0 | 0 |
attr |
BartConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BartConfig.scale_embedding |
1 | 0 | 0 |
attr |
BartConfig.pad_token_id |
1 | 0 | 0 |
attr |
BartConfig.bos_token_id |
1 | 0 | 0 |
attr |
BartConfig.eos_token_id |
1 | 0 | 0 |
attr |
BartConfig.decoder_start_token_id |
1 | 0 | 0 |
transformers.models.bart.modeling_bart (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BartForSequenceClassification.init |
3 | 1 | 0 |
meth |
BartForSequenceClassification.forward |
15 | 14 | 0 |
attr |
BartForSequenceClassification.model |
1 | 0 | 0 |
attr |
BartForSequenceClassification.classification_head |
1 | 0 | 0 |
meth |
BartForQuestionAnswering.init |
2 | 0 | 0 |
meth |
BartForQuestionAnswering.forward |
16 | 15 | 0 |
attr |
BartForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
BartForQuestionAnswering.model |
1 | 0 | 0 |
attr |
BartForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
BartPretrainedModel.init_subclass |
1 | 0 | 0 |
meth |
BartForConditionalGeneration.init |
2 | 1 | 0 |
meth |
BartForConditionalGeneration.forward |
16 | 15 | 0 |
meth |
BartForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
BartForConditionalGeneration.model |
1 | 0 | 0 |
attr |
BartForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
PretrainedBartModel.init_subclass |
1 | 0 | 0 |
meth |
BartModel.init |
2 | 1 | 0 |
meth |
BartModel.get_input_embeddings |
1 | 0 | 0 |
meth |
BartModel.set_input_embeddings |
2 | 0 | 0 |
meth |
BartModel.forward |
15 | 14 | 0 |
attr |
BartModel.shared |
1 | 0 | 0 |
attr |
BartModel.encoder |
1 | 0 | 0 |
attr |
BartModel.decoder |
1 | 0 | 0 |
meth |
BartPreTrainedModel._init_weights |
2 | 0 | 0 |
prop |
BartPreTrainedModel.dummy_inputs |
1 | 0 | 0 |
meth |
BartForCausalLM.init |
2 | 0 | 0 |
meth |
BartForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
BartForCausalLM.set_input_embeddings |
2 | 0 | 0 |
meth |
BartForCausalLM.forward |
15 | 14 | 0 |
attr |
BartForCausalLM.model |
1 | 0 | 0 |
attr |
BartForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.barthez.tokenization_barthez (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BarthezTokenizer.init |
11 | 1 | 0 |
attr |
BarthezTokenizer.add_prefix_space |
1 | 0 | 0 |
transformers.models.bartpho.tokenization_bartpho (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BartphoTokenizer.init |
12 | 2 | 0 |
meth |
BartphoTokenizer.get_vocab |
1 | 0 | 0 |
meth |
BartphoTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
BartphoTokenizer._convert_token_to_id_with_added_voc |
2 | 0 | 0 |
meth |
BartphoTokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
BartphoTokenizer._align_added_tokens_with_fairseq_vocab |
1 | 0 | 0 |
prop |
BartphoTokenizer.vocab_size |
1 | 0 | 0 |
attr |
BartphoTokenizer.monolingual_vocab_file |
1 | 0 | 0 |
attr |
BartphoTokenizer.fairseq_tokens_to_ids |
1 | 0 | 0 |
attr |
BartphoTokenizer.fairseq_ids_to_tokens |
1 | 0 | 0 |
transformers.models.beit.configuration_beit (63 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BeitConfig.init |
33 | 0 | 0 |
attr |
BeitConfig.vocab_size |
1 | 0 | 0 |
attr |
BeitConfig.hidden_size |
1 | 0 | 0 |
attr |
BeitConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BeitConfig.num_attention_heads |
1 | 0 | 0 |
attr |
BeitConfig.intermediate_size |
1 | 0 | 0 |
attr |
BeitConfig.hidden_act |
1 | 0 | 0 |
attr |
BeitConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
BeitConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
BeitConfig.initializer_range |
1 | 0 | 0 |
attr |
BeitConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
BeitConfig.image_size |
1 | 0 | 0 |
attr |
BeitConfig.patch_size |
1 | 0 | 0 |
attr |
BeitConfig.num_channels |
1 | 0 | 0 |
attr |
BeitConfig.use_mask_token |
1 | 0 | 0 |
attr |
BeitConfig.use_absolute_position_embeddings |
1 | 0 | 0 |
attr |
BeitConfig.use_relative_position_bias |
1 | 0 | 0 |
attr |
BeitConfig.use_shared_relative_position_bias |
1 | 0 | 0 |
attr |
BeitConfig.layer_scale_init_value |
1 | 0 | 0 |
attr |
BeitConfig.drop_path_rate |
1 | 0 | 0 |
attr |
BeitConfig.use_mean_pooling |
1 | 0 | 0 |
attr |
BeitConfig.pool_scales |
1 | 0 | 0 |
attr |
BeitConfig.use_auxiliary_head |
1 | 0 | 0 |
attr |
BeitConfig.auxiliary_loss_weight |
1 | 0 | 0 |
attr |
BeitConfig.auxiliary_channels |
1 | 0 | 0 |
attr |
BeitConfig.auxiliary_num_convs |
1 | 0 | 0 |
attr |
BeitConfig.auxiliary_concat_input |
1 | 0 | 0 |
attr |
BeitConfig.semantic_loss_ignore_index |
1 | 0 | 0 |
attr |
BeitConfig.stage_names |
1 | 0 | 0 |
attr |
BeitConfig.add_fpn |
1 | 0 | 0 |
attr |
BeitConfig.reshape_hidden_states |
1 | 0 | 0 |
transformers.models.beit.image_processing_beit (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BeitImageProcessor.init |
13 | 12 | 0 |
meth |
BeitImageProcessor.resize |
7 | 6 | 0 |
meth |
BeitImageProcessor._preprocess |
14 | 13 | 0 |
meth |
BeitImageProcessor._preprocess_segmentation_map |
9 | 8 | 0 |
meth |
BeitImageProcessor.call |
4 | 0 | 0 |
meth |
BeitImageProcessor.post_process_semantic_segmentation |
3 | 1 | 0 |
attr |
BeitImageProcessor.do_resize |
1 | 0 | 0 |
attr |
BeitImageProcessor.size |
1 | 0 | 0 |
attr |
BeitImageProcessor.resample |
1 | 0 | 0 |
attr |
BeitImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
BeitImageProcessor.crop_size |
1 | 0 | 0 |
attr |
BeitImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
BeitImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
BeitImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
BeitImageProcessor.image_mean |
1 | 0 | 0 |
attr |
BeitImageProcessor.image_std |
1 | 0 | 0 |
attr |
BeitImageProcessor.do_reduce_labels |
1 | 0 | 0 |
transformers.models.beit.image_processing_beit_fast (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BeitImageProcessorFast.init |
2 | 1 | 0 |
meth |
BeitImageProcessorFast.reduce_label |
2 | 1 | 0 |
meth |
BeitImageProcessorFast._preprocess |
16 | 15 | 0 |
meth |
BeitImageProcessorFast.post_process_semantic_segmentation |
3 | 1 | 0 |
transformers.models.beit.modeling_beit (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BeitModel.get_input_embeddings |
1 | 0 | 0 |
meth |
BeitModel.forward |
8 | 7 | 0 |
attr |
BeitModel.embeddings |
1 | 0 | 0 |
attr |
BeitModel.encoder |
1 | 0 | 0 |
attr |
BeitModel.layernorm |
1 | 0 | 0 |
attr |
BeitModel.pooler |
1 | 0 | 0 |
meth |
BeitForSemanticSegmentation.compute_loss |
4 | 0 | 0 |
meth |
BeitForSemanticSegmentation.forward |
8 | 7 | 0 |
attr |
BeitForSemanticSegmentation.num_labels |
1 | 0 | 0 |
attr |
BeitForSemanticSegmentation.beit |
1 | 0 | 0 |
attr |
BeitForSemanticSegmentation.fpn1 |
1 | 0 | 0 |
attr |
BeitForSemanticSegmentation.fpn2 |
1 | 0 | 0 |
attr |
BeitForSemanticSegmentation.fpn3 |
1 | 0 | 0 |
attr |
BeitForSemanticSegmentation.fpn4 |
1 | 0 | 0 |
attr |
BeitForSemanticSegmentation.decode_head |
1 | 0 | 0 |
attr |
BeitForSemanticSegmentation.auxiliary_head |
1 | 0 | 0 |
meth |
BeitBackbone.init |
2 | 0 | 0 |
meth |
BeitBackbone.get_input_embeddings |
1 | 0 | 0 |
meth |
BeitBackbone.forward |
6 | 5 | 0 |
attr |
BeitBackbone.num_features |
1 | 0 | 0 |
attr |
BeitBackbone.embeddings |
1 | 0 | 0 |
attr |
BeitBackbone.encoder |
1 | 0 | 0 |
attr |
BeitBackbone.fpn1 |
1 | 0 | 0 |
attr |
BeitBackbone.fpn2 |
1 | 0 | 0 |
attr |
BeitBackbone.fpn3 |
1 | 0 | 0 |
attr |
BeitBackbone.fpn4 |
1 | 0 | 0 |
meth |
BeitForImageClassification.forward |
8 | 7 | 0 |
attr |
BeitForImageClassification.num_labels |
1 | 0 | 0 |
attr |
BeitForImageClassification.beit |
1 | 0 | 0 |
attr |
BeitForImageClassification.classifier |
1 | 0 | 0 |
meth |
BeitForMaskedImageModeling.get_output_embeddings |
1 | 0 | 0 |
meth |
BeitForMaskedImageModeling.forward |
9 | 8 | 0 |
attr |
BeitForMaskedImageModeling.num_labels |
1 | 0 | 0 |
attr |
BeitForMaskedImageModeling.beit |
1 | 0 | 0 |
attr |
BeitForMaskedImageModeling.layernorm |
1 | 0 | 0 |
attr |
BeitForMaskedImageModeling.lm_head |
1 | 0 | 0 |
meth |
BeitPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.bert.configuration_bert (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BertConfig.init |
22 | 0 | 0 |
attr |
BertConfig.pad_token_id |
1 | 0 | 0 |
attr |
BertConfig.is_decoder |
1 | 0 | 0 |
attr |
BertConfig.add_cross_attention |
1 | 0 | 0 |
attr |
BertConfig.bos_token_id |
1 | 0 | 0 |
attr |
BertConfig.eos_token_id |
1 | 0 | 0 |
attr |
BertConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
BertConfig.vocab_size |
1 | 0 | 0 |
attr |
BertConfig.hidden_size |
1 | 0 | 0 |
attr |
BertConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BertConfig.num_attention_heads |
1 | 0 | 0 |
attr |
BertConfig.hidden_act |
1 | 0 | 0 |
attr |
BertConfig.intermediate_size |
1 | 0 | 0 |
attr |
BertConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
BertConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
BertConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
BertConfig.type_vocab_size |
1 | 0 | 0 |
attr |
BertConfig.initializer_range |
1 | 0 | 0 |
attr |
BertConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
BertConfig.use_cache |
1 | 0 | 0 |
attr |
BertConfig.classifier_dropout |
1 | 0 | 0 |
transformers.models.bert.modeling_bert (81 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BertForTokenClassification.init |
2 | 0 | 0 |
attr |
BertForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
BertForTokenClassification.bert |
1 | 0 | 0 |
attr |
BertForTokenClassification.dropout |
1 | 0 | 0 |
attr |
BertForTokenClassification.classifier |
1 | 0 | 0 |
meth |
BertPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
BertLMHeadModel.init |
2 | 0 | 0 |
meth |
BertLMHeadModel.get_output_embeddings |
1 | 0 | 0 |
meth |
BertLMHeadModel.set_output_embeddings |
2 | 0 | 0 |
attr |
BertLMHeadModel.bert |
1 | 0 | 0 |
attr |
BertLMHeadModel.cls |
1 | 0 | 0 |
meth |
BertForMultipleChoice.init |
2 | 0 | 0 |
attr |
BertForMultipleChoice.bert |
1 | 0 | 0 |
attr |
BertForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
BertForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
BertForMaskedLM.init |
2 | 0 | 0 |
meth |
BertForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
BertForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
attr |
BertForMaskedLM.bert |
1 | 0 | 0 |
attr |
BertForMaskedLM.cls |
1 | 0 | 0 |
meth |
BertLayer.init |
3 | 0 | 0 |
meth |
BertLayer.feed_forward_chunk |
2 | 0 | 0 |
attr |
BertLayer.chunk_size_feed_forward |
1 | 0 | 0 |
attr |
BertLayer.seq_len_dim |
1 | 0 | 0 |
attr |
BertLayer.attention |
1 | 0 | 0 |
attr |
BertLayer.is_decoder |
1 | 0 | 0 |
attr |
BertLayer.add_cross_attention |
1 | 0 | 0 |
attr |
BertLayer.intermediate |
1 | 0 | 0 |
attr |
BertLayer.output |
1 | 0 | 0 |
attr |
BertLayer.crossattention |
1 | 0 | 0 |
meth |
BertForPreTraining.init |
2 | 0 | 0 |
meth |
BertForPreTraining.get_output_embeddings |
1 | 0 | 0 |
meth |
BertForPreTraining.set_output_embeddings |
2 | 0 | 0 |
attr |
BertForPreTraining.bert |
1 | 0 | 0 |
attr |
BertForPreTraining.cls |
1 | 0 | 0 |
meth |
BertForSequenceClassification.init |
2 | 0 | 0 |
attr |
BertForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
BertForSequenceClassification.config |
1 | 0 | 0 |
attr |
BertForSequenceClassification.bert |
1 | 0 | 0 |
attr |
BertForSequenceClassification.dropout |
1 | 0 | 0 |
attr |
BertForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
BertModel.init |
3 | 0 | 0 |
meth |
BertModel.get_input_embeddings |
1 | 0 | 0 |
meth |
BertModel.set_input_embeddings |
2 | 0 | 0 |
meth |
BertModel._create_attention_masks |
7 | 0 | 0 |
attr |
BertModel.config |
1 | 0 | 0 |
attr |
BertModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
BertModel.embeddings |
1 | 0 | 0 |
attr |
BertModel.encoder |
1 | 0 | 0 |
attr |
BertModel.pooler |
1 | 0 | 0 |
meth |
BertForQuestionAnswering.init |
2 | 0 | 0 |
attr |
BertForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
BertForQuestionAnswering.bert |
1 | 0 | 0 |
attr |
BertForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
BertForNextSentencePrediction.init |
2 | 0 | 0 |
attr |
BertForNextSentencePrediction.bert |
1 | 0 | 0 |
attr |
BertForNextSentencePrediction.cls |
1 | 0 | 0 |
transformers.models.bert.tokenization_bert (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BertTokenizer.init |
11 | 9 | 0 |
attr |
BertTokenizer.do_lower_case |
1 | 0 | 0 |
attr |
BertTokenizer.tokenize_chinese_chars |
1 | 0 | 0 |
attr |
BertTokenizer.strip_accents |
1 | 0 | 0 |
transformers.models.bert.tokenization_bert_legacy (65 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BasicTokenizer.init |
6 | 0 | 0 |
meth |
BasicTokenizer.tokenize |
3 | 0 | 0 |
meth |
BasicTokenizer._run_strip_accents |
2 | 0 | 0 |
meth |
BasicTokenizer._run_split_on_punc |
3 | 0 | 0 |
meth |
BasicTokenizer._tokenize_chinese_chars |
2 | 0 | 0 |
meth |
BasicTokenizer._is_chinese_char |
2 | 0 | 0 |
meth |
BasicTokenizer._clean_text |
2 | 0 | 0 |
attr |
BasicTokenizer.do_lower_case |
1 | 0 | 0 |
attr |
BasicTokenizer.never_split |
1 | 0 | 0 |
attr |
BasicTokenizer.tokenize_chinese_chars |
1 | 0 | 0 |
attr |
BasicTokenizer.strip_accents |
1 | 0 | 0 |
attr |
BasicTokenizer.do_split_on_punc |
1 | 0 | 0 |
meth |
WordpieceTokenizer.init |
4 | 0 | 0 |
meth |
WordpieceTokenizer.tokenize |
2 | 0 | 0 |
attr |
WordpieceTokenizer.vocab |
1 | 0 | 0 |
attr |
WordpieceTokenizer.unk_token |
1 | 0 | 0 |
attr |
WordpieceTokenizer.max_input_chars_per_word |
1 | 0 | 0 |
meth |
BertTokenizerLegacy.init |
14 | 0 | 0 |
meth |
BertTokenizerLegacy.get_vocab |
1 | 0 | 0 |
meth |
BertTokenizerLegacy._tokenize |
3 | 0 | 0 |
meth |
BertTokenizerLegacy._convert_token_to_id |
2 | 0 | 0 |
meth |
BertTokenizerLegacy._convert_id_to_token |
2 | 0 | 0 |
meth |
BertTokenizerLegacy.convert_tokens_to_string |
2 | 0 | 0 |
prop |
BertTokenizerLegacy.do_lower_case |
1 | 0 | 0 |
prop |
BertTokenizerLegacy.vocab_size |
1 | 0 | 0 |
attr |
BertTokenizerLegacy.vocab |
1 | 0 | 0 |
attr |
BertTokenizerLegacy.ids_to_tokens |
1 | 0 | 0 |
attr |
BertTokenizerLegacy.do_basic_tokenize |
1 | 0 | 0 |
attr |
BertTokenizerLegacy.wordpiece_tokenizer |
1 | 0 | 0 |
attr |
BertTokenizerLegacy.basic_tokenizer |
1 | 0 | 0 |
transformers.models.bert_generation.configuration_bert_generation (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BertGenerationConfig.init |
20 | 0 | 0 |
attr |
BertGenerationConfig.pad_token_id |
1 | 0 | 0 |
attr |
BertGenerationConfig.bos_token_id |
1 | 0 | 0 |
attr |
BertGenerationConfig.eos_token_id |
1 | 0 | 0 |
attr |
BertGenerationConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
BertGenerationConfig.is_decoder |
1 | 0 | 0 |
attr |
BertGenerationConfig.add_cross_attention |
1 | 0 | 0 |
attr |
BertGenerationConfig.vocab_size |
1 | 0 | 0 |
attr |
BertGenerationConfig.hidden_size |
1 | 0 | 0 |
attr |
BertGenerationConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BertGenerationConfig.num_attention_heads |
1 | 0 | 0 |
attr |
BertGenerationConfig.hidden_act |
1 | 0 | 0 |
attr |
BertGenerationConfig.intermediate_size |
1 | 0 | 0 |
attr |
BertGenerationConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
BertGenerationConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
BertGenerationConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
BertGenerationConfig.initializer_range |
1 | 0 | 0 |
attr |
BertGenerationConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
BertGenerationConfig.use_cache |
1 | 0 | 0 |
transformers.models.bert_generation.modeling_bert_generation (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BertGenerationEncoder.init |
2 | 0 | 0 |
meth |
BertGenerationEncoder.get_input_embeddings |
1 | 0 | 0 |
meth |
BertGenerationEncoder.set_input_embeddings |
2 | 0 | 0 |
meth |
BertGenerationEncoder._create_attention_masks |
7 | 0 | 0 |
attr |
BertGenerationEncoder.config |
1 | 0 | 0 |
attr |
BertGenerationEncoder.gradient_checkpointing |
1 | 0 | 0 |
attr |
BertGenerationEncoder.embeddings |
1 | 0 | 0 |
attr |
BertGenerationEncoder.encoder |
1 | 0 | 0 |
meth |
BertGenerationDecoder.init |
2 | 0 | 0 |
meth |
BertGenerationDecoder.get_output_embeddings |
1 | 0 | 0 |
meth |
BertGenerationDecoder.set_output_embeddings |
2 | 0 | 0 |
attr |
BertGenerationDecoder.bert |
1 | 0 | 0 |
attr |
BertGenerationDecoder.lm_head |
1 | 0 | 0 |
meth |
BertGenerationPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.bert_generation.tokenization_bert_generation (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BertGenerationTokenizer.init |
9 | 2 | 0 |
attr |
BertGenerationTokenizer.sp_model_kwargs |
1 | 0 | 0 |
transformers.models.bert_japanese.tokenization_bert_japanese (67 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MecabTokenizer.init |
6 | 2 | 0 |
meth |
MecabTokenizer.tokenize |
4 | 0 | 0 |
attr |
MecabTokenizer.do_lower_case |
1 | 0 | 0 |
attr |
MecabTokenizer.never_split |
1 | 0 | 0 |
attr |
MecabTokenizer.normalize_text |
1 | 0 | 0 |
attr |
MecabTokenizer.mecab |
1 | 0 | 0 |
meth |
BertJapaneseTokenizer.init |
18 | 0 | 0 |
meth |
BertJapaneseTokenizer.getstate |
1 | 0 | 0 |
meth |
BertJapaneseTokenizer.setstate |
2 | 0 | 0 |
meth |
BertJapaneseTokenizer._tokenize |
2 | 0 | 0 |
meth |
BertJapaneseTokenizer.get_vocab |
1 | 0 | 0 |
meth |
BertJapaneseTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
BertJapaneseTokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
BertJapaneseTokenizer.convert_tokens_to_string |
2 | 0 | 0 |
prop |
BertJapaneseTokenizer.do_lower_case |
1 | 0 | 0 |
prop |
BertJapaneseTokenizer.vocab_size |
1 | 0 | 0 |
attr |
BertJapaneseTokenizer.do_word_tokenize |
1 | 0 | 0 |
attr |
BertJapaneseTokenizer.word_tokenizer_type |
1 | 0 | 0 |
attr |
BertJapaneseTokenizer.lower_case |
1 | 0 | 0 |
attr |
BertJapaneseTokenizer.never_split |
1 | 0 | 0 |
attr |
BertJapaneseTokenizer.mecab_kwargs |
1 | 0 | 0 |
attr |
BertJapaneseTokenizer.sudachi_kwargs |
1 | 0 | 0 |
attr |
BertJapaneseTokenizer.jumanpp_kwargs |
1 | 0 | 0 |
attr |
BertJapaneseTokenizer.do_subword_tokenize |
1 | 0 | 0 |
attr |
BertJapaneseTokenizer.subword_tokenizer_type |
1 | 0 | 0 |
attr |
BertJapaneseTokenizer.spm_file |
1 | 0 | 0 |
attr |
BertJapaneseTokenizer.vocab |
1 | 0 | 0 |
attr |
BertJapaneseTokenizer.ids_to_tokens |
1 | 0 | 0 |
attr |
BertJapaneseTokenizer.word_tokenizer |
1 | 0 | 0 |
attr |
BertJapaneseTokenizer.subword_tokenizer |
1 | 0 | 0 |
meth |
CharacterTokenizer.init |
4 | 0 | 0 |
meth |
CharacterTokenizer.tokenize |
2 | 0 | 0 |
attr |
CharacterTokenizer.vocab |
1 | 0 | 0 |
attr |
CharacterTokenizer.unk_token |
1 | 0 | 0 |
attr |
CharacterTokenizer.normalize_text |
1 | 0 | 0 |
transformers.models.bertweet.tokenization_bertweet (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BertweetTokenizer.init |
12 | 0 | 0 |
meth |
BertweetTokenizer.get_vocab |
1 | 0 | 0 |
meth |
BertweetTokenizer.bpe |
2 | 0 | 0 |
meth |
BertweetTokenizer._tokenize |
2 | 0 | 0 |
meth |
BertweetTokenizer.normalizeTweet |
2 | 0 | 0 |
meth |
BertweetTokenizer.normalizeToken |
2 | 0 | 0 |
meth |
BertweetTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
BertweetTokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
BertweetTokenizer.convert_tokens_to_string |
2 | 0 | 0 |
meth |
BertweetTokenizer.add_from_file |
2 | 0 | 0 |
prop |
BertweetTokenizer.vocab_size |
1 | 0 | 0 |
attr |
BertweetTokenizer.vocab_file |
1 | 0 | 0 |
attr |
BertweetTokenizer.merges_file |
1 | 0 | 0 |
attr |
BertweetTokenizer.encoder |
1 | 0 | 0 |
attr |
BertweetTokenizer.decoder |
1 | 0 | 0 |
attr |
BertweetTokenizer.bpe_ranks |
1 | 0 | 0 |
attr |
BertweetTokenizer.cache |
1 | 0 | 0 |
attr |
BertweetTokenizer.normalization |
1 | 0 | 0 |
attr |
BertweetTokenizer.tweetPreprocessor |
1 | 0 | 0 |
attr |
BertweetTokenizer.special_puncts |
1 | 0 | 0 |
attr |
BertweetTokenizer.demojizer |
1 | 0 | 0 |
transformers.models.big_bird.configuration_big_bird (54 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BigBirdConfig.init |
28 | 0 | 0 |
attr |
BigBirdConfig.pad_token_id |
1 | 0 | 0 |
attr |
BigBirdConfig.bos_token_id |
1 | 0 | 0 |
attr |
BigBirdConfig.eos_token_id |
1 | 0 | 0 |
attr |
BigBirdConfig.sep_token_id |
1 | 0 | 0 |
attr |
BigBirdConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
BigBirdConfig.is_decoder |
1 | 0 | 0 |
attr |
BigBirdConfig.add_cross_attention |
1 | 0 | 0 |
attr |
BigBirdConfig.vocab_size |
1 | 0 | 0 |
attr |
BigBirdConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
BigBirdConfig.hidden_size |
1 | 0 | 0 |
attr |
BigBirdConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BigBirdConfig.num_attention_heads |
1 | 0 | 0 |
attr |
BigBirdConfig.intermediate_size |
1 | 0 | 0 |
attr |
BigBirdConfig.hidden_act |
1 | 0 | 0 |
attr |
BigBirdConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
BigBirdConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
BigBirdConfig.initializer_range |
1 | 0 | 0 |
attr |
BigBirdConfig.type_vocab_size |
1 | 0 | 0 |
attr |
BigBirdConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
BigBirdConfig.use_cache |
1 | 0 | 0 |
attr |
BigBirdConfig.rescale_embeddings |
1 | 0 | 0 |
attr |
BigBirdConfig.attention_type |
1 | 0 | 0 |
attr |
BigBirdConfig.use_bias |
1 | 0 | 0 |
attr |
BigBirdConfig.block_size |
1 | 0 | 0 |
attr |
BigBirdConfig.num_random_blocks |
1 | 0 | 0 |
attr |
BigBirdConfig.classifier_dropout |
1 | 0 | 0 |
transformers.models.big_bird.modeling_big_bird (99 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BigBirdLayer.init |
3 | 0 | 0 |
meth |
BigBirdLayer.set_attention_type |
3 | 1 | 0 |
meth |
BigBirdLayer.forward |
12 | 0 | 0 |
meth |
BigBirdLayer.feed_forward_chunk |
2 | 0 | 0 |
attr |
BigBirdLayer.config |
1 | 0 | 0 |
attr |
BigBirdLayer.attention_type |
1 | 0 | 0 |
attr |
BigBirdLayer.chunk_size_feed_forward |
1 | 0 | 0 |
attr |
BigBirdLayer.seq_len_dim |
1 | 0 | 0 |
attr |
BigBirdLayer.attention |
1 | 0 | 0 |
attr |
BigBirdLayer.is_decoder |
1 | 0 | 0 |
attr |
BigBirdLayer.add_cross_attention |
1 | 0 | 0 |
attr |
BigBirdLayer.intermediate |
1 | 0 | 0 |
attr |
BigBirdLayer.output |
1 | 0 | 0 |
attr |
BigBirdLayer.crossattention |
1 | 0 | 0 |
meth |
BigBirdForMultipleChoice.init |
2 | 0 | 0 |
meth |
BigBirdForMultipleChoice.forward |
11 | 10 | 0 |
attr |
BigBirdForMultipleChoice.bert |
1 | 0 | 0 |
attr |
BigBirdForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
BigBirdForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
BigBirdPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
BigBirdModel.init |
3 | 0 | 0 |
meth |
BigBirdModel.get_input_embeddings |
1 | 0 | 0 |
meth |
BigBirdModel.set_input_embeddings |
2 | 0 | 0 |
meth |
BigBirdModel.set_attention_type |
2 | 1 | 0 |
meth |
BigBirdModel.forward |
15 | 14 | 0 |
meth |
BigBirdModel.create_masks_for_block_sparse_attn |
3 | 2 | 0 |
meth |
BigBirdModel._pad_to_block_size |
7 | 6 | 0 |
attr |
BigBirdModel.attention_type |
1 | 0 | 0 |
attr |
BigBirdModel.block_size |
1 | 0 | 0 |
attr |
BigBirdModel.embeddings |
1 | 0 | 0 |
attr |
BigBirdModel.encoder |
1 | 0 | 0 |
attr |
BigBirdModel.pooler |
1 | 0 | 0 |
attr |
BigBirdModel.activation |
1 | 0 | 0 |
meth |
BigBirdForCausalLM.init |
2 | 0 | 0 |
meth |
BigBirdForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
BigBirdForCausalLM.set_output_embeddings |
2 | 0 | 0 |
meth |
BigBirdForCausalLM.forward |
17 | 16 | 0 |
attr |
BigBirdForCausalLM.bert |
1 | 0 | 0 |
attr |
BigBirdForCausalLM.cls |
1 | 0 | 0 |
meth |
BigBirdForPreTraining.init |
2 | 0 | 0 |
meth |
BigBirdForPreTraining.get_output_embeddings |
1 | 0 | 0 |
meth |
BigBirdForPreTraining.set_output_embeddings |
2 | 0 | 0 |
meth |
BigBirdForPreTraining.forward |
12 | 11 | 0 |
attr |
BigBirdForPreTraining.bert |
1 | 0 | 0 |
attr |
BigBirdForPreTraining.cls |
1 | 0 | 0 |
meth |
BigBirdForTokenClassification.init |
2 | 0 | 0 |
meth |
BigBirdForTokenClassification.forward |
11 | 10 | 0 |
attr |
BigBirdForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
BigBirdForTokenClassification.bert |
1 | 0 | 0 |
attr |
BigBirdForTokenClassification.dropout |
1 | 0 | 0 |
attr |
BigBirdForTokenClassification.classifier |
1 | 0 | 0 |
meth |
BigBirdForMaskedLM.init |
2 | 0 | 0 |
meth |
BigBirdForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
BigBirdForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
BigBirdForMaskedLM.forward |
13 | 12 | 0 |
attr |
BigBirdForMaskedLM.bert |
1 | 0 | 0 |
attr |
BigBirdForMaskedLM.cls |
1 | 0 | 0 |
meth |
BigBirdForSequenceClassification.init |
2 | 0 | 0 |
meth |
BigBirdForSequenceClassification.forward |
11 | 10 | 0 |
attr |
BigBirdForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
BigBirdForSequenceClassification.bert |
1 | 0 | 0 |
attr |
BigBirdForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
BigBirdForQuestionAnswering.init |
3 | 0 | 0 |
meth |
BigBirdForQuestionAnswering.forward |
13 | 12 | 0 |
meth |
BigBirdForQuestionAnswering.prepare_question_mask |
3 | 2 | 0 |
attr |
BigBirdForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
BigBirdForQuestionAnswering.sep_token_id |
1 | 0 | 0 |
attr |
BigBirdForQuestionAnswering.bert |
1 | 0 | 0 |
attr |
BigBirdForQuestionAnswering.qa_classifier |
1 | 0 | 0 |
transformers.models.big_bird.tokenization_big_bird (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BigBirdTokenizer.init |
11 | 1 | 0 |
attr |
BigBirdTokenizer.add_prefix_space |
1 | 0 | 0 |
transformers.models.bigbird_pegasus.configuration_bigbird_pegasus (62 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BigBirdPegasusConfig.init |
32 | 0 | 0 |
attr |
BigBirdPegasusConfig.is_decoder |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.vocab_size |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.d_model |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.encoder_layers |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.decoder_layers |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.dropout |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.attention_dropout |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.activation_dropout |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.activation_function |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.init_std |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.classifier_dropout |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.use_cache |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.scale_embedding |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.attention_type |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.block_size |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.num_random_blocks |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.use_bias |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.pad_token_id |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.bos_token_id |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.eos_token_id |
1 | 0 | 0 |
attr |
BigBirdPegasusConfig.decoder_start_token_id |
1 | 0 | 0 |
transformers.models.bigbird_pegasus.modeling_bigbird_pegasus (35 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BigBirdPegasusForCausalLM.init |
2 | 0 | 0 |
meth |
BigBirdPegasusForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
BigBirdPegasusForCausalLM.set_input_embeddings |
2 | 0 | 0 |
meth |
BigBirdPegasusForCausalLM.forward |
15 | 14 | 0 |
attr |
BigBirdPegasusForCausalLM.model |
1 | 0 | 0 |
attr |
BigBirdPegasusForCausalLM.lm_head |
1 | 0 | 0 |
meth |
BigBirdPegasusForConditionalGeneration.init |
2 | 1 | 0 |
meth |
BigBirdPegasusForConditionalGeneration.forward |
16 | 15 | 0 |
meth |
BigBirdPegasusForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
BigBirdPegasusForConditionalGeneration.model |
1 | 0 | 0 |
attr |
BigBirdPegasusForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
BigBirdPegasusPreTrainedModel._init_weights |
2 | 0 | 0 |
prop |
BigBirdPegasusPreTrainedModel.dummy_inputs |
1 | 0 | 0 |
meth |
BigBirdPegasusForSequenceClassification.init |
3 | 1 | 0 |
meth |
BigBirdPegasusForSequenceClassification.forward |
15 | 14 | 0 |
attr |
BigBirdPegasusForSequenceClassification.model |
1 | 0 | 0 |
attr |
BigBirdPegasusForSequenceClassification.classification_head |
1 | 0 | 0 |
meth |
BigBirdPegasusForQuestionAnswering.init |
2 | 0 | 0 |
meth |
BigBirdPegasusForQuestionAnswering.forward |
16 | 15 | 0 |
attr |
BigBirdPegasusForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
BigBirdPegasusForQuestionAnswering.model |
1 | 0 | 0 |
attr |
BigBirdPegasusForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
BigBirdPegasusModel.init |
2 | 1 | 0 |
meth |
BigBirdPegasusModel.get_input_embeddings |
1 | 0 | 0 |
meth |
BigBirdPegasusModel.set_input_embeddings |
2 | 0 | 0 |
meth |
BigBirdPegasusModel.forward |
15 | 14 | 0 |
attr |
BigBirdPegasusModel.shared |
1 | 0 | 0 |
attr |
BigBirdPegasusModel.encoder |
1 | 0 | 0 |
attr |
BigBirdPegasusModel.decoder |
1 | 0 | 0 |
transformers.models.biogpt.configuration_biogpt (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BioGptConfig.init |
21 | 0 | 0 |
attr |
BioGptConfig.vocab_size |
1 | 0 | 0 |
attr |
BioGptConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
BioGptConfig.hidden_size |
1 | 0 | 0 |
attr |
BioGptConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BioGptConfig.num_attention_heads |
1 | 0 | 0 |
attr |
BioGptConfig.intermediate_size |
1 | 0 | 0 |
attr |
BioGptConfig.hidden_act |
1 | 0 | 0 |
attr |
BioGptConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
BioGptConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
BioGptConfig.initializer_range |
1 | 0 | 0 |
attr |
BioGptConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
BioGptConfig.scale_embedding |
1 | 0 | 0 |
attr |
BioGptConfig.use_cache |
1 | 0 | 0 |
attr |
BioGptConfig.layerdrop |
1 | 0 | 0 |
attr |
BioGptConfig.activation_dropout |
1 | 0 | 0 |
attr |
BioGptConfig.pad_token_id |
1 | 0 | 0 |
attr |
BioGptConfig.bos_token_id |
1 | 0 | 0 |
attr |
BioGptConfig.eos_token_id |
1 | 0 | 0 |
attr |
BioGptConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.biogpt.modeling_biogpt (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BioGptForSequenceClassification.init |
2 | 1 | 0 |
meth |
BioGptForSequenceClassification.forward |
14 | 13 | 0 |
meth |
BioGptForSequenceClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
BioGptForSequenceClassification.set_input_embeddings |
2 | 0 | 0 |
attr |
BioGptForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
BioGptForSequenceClassification.biogpt |
1 | 0 | 0 |
attr |
BioGptForSequenceClassification.score |
1 | 0 | 0 |
meth |
BioGptModel.init |
2 | 1 | 0 |
attr |
BioGptModel.layerdrop |
1 | 0 | 0 |
attr |
BioGptModel.dropout |
1 | 0 | 0 |
attr |
BioGptModel.embed_dim |
1 | 0 | 0 |
attr |
BioGptModel.padding_idx |
1 | 0 | 0 |
attr |
BioGptModel.embed_tokens |
1 | 0 | 0 |
attr |
BioGptModel.embed_positions |
1 | 0 | 0 |
attr |
BioGptModel.layers |
1 | 0 | 0 |
attr |
BioGptModel.layer_norm |
1 | 0 | 0 |
attr |
BioGptModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
BioGptForCausalLM.init |
2 | 0 | 0 |
meth |
BioGptForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
BioGptForCausalLM.set_output_embeddings |
2 | 0 | 0 |
attr |
BioGptForCausalLM.biogpt |
1 | 0 | 0 |
attr |
BioGptForCausalLM.output_projection |
1 | 0 | 0 |
meth |
BioGptForTokenClassification.init |
2 | 0 | 0 |
meth |
BioGptForTokenClassification.forward |
14 | 13 | 0 |
attr |
BioGptForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
BioGptForTokenClassification.biogpt |
1 | 0 | 0 |
attr |
BioGptForTokenClassification.dropout |
1 | 0 | 0 |
attr |
BioGptForTokenClassification.classifier |
1 | 0 | 0 |
transformers.models.biogpt.modular_biogpt (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BioGptForSequenceClassification.init |
2 | 1 | 0 |
meth |
BioGptForSequenceClassification.forward |
14 | 13 | 0 |
meth |
BioGptForSequenceClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
BioGptForSequenceClassification.set_input_embeddings |
2 | 0 | 0 |
attr |
BioGptForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
BioGptForSequenceClassification.biogpt |
1 | 0 | 0 |
attr |
BioGptForSequenceClassification.score |
1 | 0 | 0 |
meth |
BioGptModel.init |
2 | 1 | 0 |
attr |
BioGptModel.layerdrop |
1 | 0 | 0 |
attr |
BioGptModel.dropout |
1 | 0 | 0 |
attr |
BioGptModel.embed_dim |
1 | 0 | 0 |
attr |
BioGptModel.padding_idx |
1 | 0 | 0 |
attr |
BioGptModel.embed_tokens |
1 | 0 | 0 |
attr |
BioGptModel.embed_positions |
1 | 0 | 0 |
attr |
BioGptModel.layers |
1 | 0 | 0 |
attr |
BioGptModel.layer_norm |
1 | 0 | 0 |
attr |
BioGptModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
BioGptForCausalLM.init |
2 | 0 | 0 |
meth |
BioGptForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
BioGptForCausalLM.set_output_embeddings |
2 | 0 | 0 |
attr |
BioGptForCausalLM.biogpt |
1 | 0 | 0 |
attr |
BioGptForCausalLM.output_projection |
1 | 0 | 0 |
meth |
BioGptForTokenClassification.init |
2 | 0 | 0 |
meth |
BioGptForTokenClassification.forward |
14 | 13 | 0 |
attr |
BioGptForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
BioGptForTokenClassification.biogpt |
1 | 0 | 0 |
attr |
BioGptForTokenClassification.dropout |
1 | 0 | 0 |
attr |
BioGptForTokenClassification.classifier |
1 | 0 | 0 |
transformers.models.biogpt.tokenization_biogpt (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BioGptTokenizer.init |
9 | 0 | 0 |
meth |
BioGptTokenizer.get_vocab |
1 | 0 | 0 |
meth |
BioGptTokenizer.moses_tokenize |
3 | 0 | 0 |
meth |
BioGptTokenizer.moses_detokenize |
3 | 0 | 0 |
meth |
BioGptTokenizer.bpe |
2 | 0 | 0 |
meth |
BioGptTokenizer._tokenize |
3 | 0 | 0 |
meth |
BioGptTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
BioGptTokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
BioGptTokenizer.convert_tokens_to_string |
2 | 0 | 0 |
meth |
BioGptTokenizer.getstate |
1 | 0 | 0 |
meth |
BioGptTokenizer.setstate |
2 | 0 | 0 |
prop |
BioGptTokenizer.vocab_size |
1 | 0 | 0 |
attr |
BioGptTokenizer.lang |
1 | 0 | 0 |
attr |
BioGptTokenizer.sm |
1 | 0 | 0 |
attr |
BioGptTokenizer.cache_moses_tokenizer |
1 | 0 | 0 |
attr |
BioGptTokenizer.cache_moses_detokenizer |
1 | 0 | 0 |
attr |
BioGptTokenizer.decoder |
1 | 0 | 0 |
attr |
BioGptTokenizer.bpe_ranks |
1 | 0 | 0 |
attr |
BioGptTokenizer.cache |
1 | 0 | 0 |
attr |
BioGptTokenizer.encoder |
1 | 0 | 0 |
transformers.models.bit.configuration_bit (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BitConfig.init |
16 | 0 | 0 |
attr |
BitConfig.num_channels |
1 | 0 | 0 |
attr |
BitConfig.embedding_size |
1 | 0 | 0 |
attr |
BitConfig.hidden_sizes |
1 | 0 | 0 |
attr |
BitConfig.depths |
1 | 0 | 0 |
attr |
BitConfig.layer_type |
1 | 0 | 0 |
attr |
BitConfig.hidden_act |
1 | 0 | 0 |
attr |
BitConfig.global_padding |
1 | 0 | 0 |
attr |
BitConfig.num_groups |
1 | 0 | 0 |
attr |
BitConfig.drop_path_rate |
1 | 0 | 0 |
attr |
BitConfig.embedding_dynamic_padding |
1 | 0 | 0 |
attr |
BitConfig.output_stride |
1 | 0 | 0 |
attr |
BitConfig.width_factor |
1 | 0 | 0 |
attr |
BitConfig.stage_names |
1 | 0 | 0 |
transformers.models.bit.image_processing_bit (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BitImageProcessor.init |
13 | 12 | 0 |
meth |
BitImageProcessor.resize |
7 | 6 | 0 |
attr |
BitImageProcessor.do_resize |
1 | 0 | 0 |
attr |
BitImageProcessor.size |
1 | 0 | 0 |
attr |
BitImageProcessor.resample |
1 | 0 | 0 |
attr |
BitImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
BitImageProcessor.crop_size |
1 | 0 | 0 |
attr |
BitImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
BitImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
BitImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
BitImageProcessor.image_mean |
1 | 0 | 0 |
attr |
BitImageProcessor.image_std |
1 | 0 | 0 |
attr |
BitImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.bit.modeling_bit (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BitForImageClassification.init |
2 | 0 | 0 |
meth |
BitForImageClassification.forward |
6 | 5 | 0 |
attr |
BitForImageClassification.num_labels |
1 | 0 | 0 |
attr |
BitForImageClassification.bit |
1 | 0 | 0 |
attr |
BitForImageClassification.classifier |
1 | 0 | 0 |
meth |
BitPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
BitModel.init |
2 | 0 | 0 |
meth |
BitModel.forward |
5 | 4 | 0 |
attr |
BitModel.embedder |
1 | 0 | 0 |
attr |
BitModel.encoder |
1 | 0 | 0 |
attr |
BitModel.norm |
1 | 0 | 0 |
attr |
BitModel.pooler |
1 | 0 | 0 |
meth |
BitBackbone.init |
2 | 0 | 0 |
meth |
BitBackbone.forward |
5 | 4 | 0 |
attr |
BitBackbone.bit |
1 | 0 | 0 |
attr |
BitBackbone.num_features |
1 | 0 | 0 |
transformers.models.bitnet.configuration_bitnet (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BitNetConfig.init |
20 | 18 | 0 |
attr |
BitNetConfig.vocab_size |
1 | 0 | 0 |
attr |
BitNetConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
BitNetConfig.hidden_size |
1 | 0 | 0 |
attr |
BitNetConfig.intermediate_size |
1 | 0 | 0 |
attr |
BitNetConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BitNetConfig.num_attention_heads |
1 | 0 | 0 |
attr |
BitNetConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
BitNetConfig.hidden_act |
1 | 0 | 0 |
attr |
BitNetConfig.initializer_range |
1 | 0 | 0 |
attr |
BitNetConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
BitNetConfig.use_cache |
1 | 0 | 0 |
attr |
BitNetConfig.attention_bias |
1 | 0 | 0 |
attr |
BitNetConfig.attention_dropout |
1 | 0 | 0 |
attr |
BitNetConfig.rope_parameters |
1 | 0 | 0 |
attr |
BitNetConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
BitNetConfig.pad_token_id |
1 | 0 | 0 |
attr |
BitNetConfig.bos_token_id |
1 | 0 | 0 |
attr |
BitNetConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.bitnet.modeling_bitnet (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BitNetModel.init |
2 | 1 | 0 |
attr |
BitNetModel.padding_idx |
1 | 0 | 0 |
attr |
BitNetModel.vocab_size |
1 | 0 | 0 |
attr |
BitNetModel.embed_tokens |
1 | 0 | 0 |
attr |
BitNetModel.layers |
1 | 0 | 0 |
attr |
BitNetModel.norm |
1 | 0 | 0 |
attr |
BitNetModel.rotary_emb |
1 | 0 | 0 |
attr |
BitNetModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
BitNetForCausalLM.init |
2 | 0 | 0 |
attr |
BitNetForCausalLM.model |
1 | 0 | 0 |
attr |
BitNetForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
BitNetForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.bitnet.modular_bitnet (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
BitNetPreTrainedModel |
1 | 0 | 0 |
meth |
BitNetForCausalLM.forward |
2 | 1 | 0 |
transformers.models.blenderbot.configuration_blenderbot (54 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BlenderbotConfig.init |
29 | 0 | 0 |
attr |
BlenderbotConfig.is_decoder |
1 | 0 | 0 |
attr |
BlenderbotConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
BlenderbotConfig.vocab_size |
1 | 0 | 0 |
attr |
BlenderbotConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
BlenderbotConfig.d_model |
1 | 0 | 0 |
attr |
BlenderbotConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
BlenderbotConfig.encoder_layers |
1 | 0 | 0 |
attr |
BlenderbotConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
BlenderbotConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
BlenderbotConfig.decoder_layers |
1 | 0 | 0 |
attr |
BlenderbotConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
BlenderbotConfig.dropout |
1 | 0 | 0 |
attr |
BlenderbotConfig.attention_dropout |
1 | 0 | 0 |
attr |
BlenderbotConfig.activation_dropout |
1 | 0 | 0 |
attr |
BlenderbotConfig.activation_function |
1 | 0 | 0 |
attr |
BlenderbotConfig.init_std |
1 | 0 | 0 |
attr |
BlenderbotConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
BlenderbotConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
BlenderbotConfig.use_cache |
1 | 0 | 0 |
attr |
BlenderbotConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BlenderbotConfig.scale_embedding |
1 | 0 | 0 |
attr |
BlenderbotConfig.pad_token_id |
1 | 0 | 0 |
attr |
BlenderbotConfig.bos_token_id |
1 | 0 | 0 |
attr |
BlenderbotConfig.eos_token_id |
1 | 0 | 0 |
attr |
BlenderbotConfig.decoder_start_token_id |
1 | 0 | 0 |
transformers.models.blenderbot.modeling_blenderbot (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BlenderbotModel.init |
2 | 1 | 0 |
meth |
BlenderbotModel.get_input_embeddings |
1 | 0 | 0 |
meth |
BlenderbotModel.set_input_embeddings |
2 | 0 | 0 |
meth |
BlenderbotModel.forward |
15 | 14 | 0 |
attr |
BlenderbotModel.shared |
1 | 0 | 0 |
attr |
BlenderbotModel.encoder |
1 | 0 | 0 |
attr |
BlenderbotModel.decoder |
1 | 0 | 0 |
meth |
BlenderbotForCausalLM.init |
2 | 0 | 0 |
meth |
BlenderbotForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
BlenderbotForCausalLM.set_input_embeddings |
2 | 0 | 0 |
meth |
BlenderbotForCausalLM.forward |
15 | 14 | 0 |
attr |
BlenderbotForCausalLM.model |
1 | 0 | 0 |
attr |
BlenderbotForCausalLM.lm_head |
1 | 0 | 0 |
meth |
BlenderbotPreTrainedModel._init_weights |
2 | 0 | 0 |
prop |
BlenderbotPreTrainedModel.dummy_inputs |
1 | 0 | 0 |
meth |
BlenderbotForConditionalGeneration.init |
2 | 1 | 0 |
meth |
BlenderbotForConditionalGeneration.forward |
16 | 15 | 0 |
attr |
BlenderbotForConditionalGeneration.model |
1 | 0 | 0 |
attr |
BlenderbotForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.blenderbot.tokenization_blenderbot (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BlenderbotTokenizer.init |
12 | 0 | 0 |
attr |
BlenderbotTokenizer.add_prefix_space |
1 | 0 | 0 |
transformers.models.blenderbot_small.configuration_blenderbot_small (53 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BlenderbotSmallConfig.init |
28 | 0 | 0 |
attr |
BlenderbotSmallConfig.is_decoder |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.vocab_size |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.d_model |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.encoder_layers |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.decoder_layers |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.dropout |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.attention_dropout |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.activation_dropout |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.activation_function |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.init_std |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.use_cache |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.scale_embedding |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.pad_token_id |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.bos_token_id |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.eos_token_id |
1 | 0 | 0 |
attr |
BlenderbotSmallConfig.decoder_start_token_id |
1 | 0 | 0 |
transformers.models.blenderbot_small.modeling_blenderbot_small (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BlenderbotSmallPreTrainedModel._init_weights |
2 | 0 | 0 |
prop |
BlenderbotSmallPreTrainedModel.dummy_inputs |
1 | 0 | 0 |
meth |
BlenderbotSmallModel.init |
2 | 1 | 0 |
meth |
BlenderbotSmallModel.get_input_embeddings |
1 | 0 | 0 |
meth |
BlenderbotSmallModel.set_input_embeddings |
2 | 0 | 0 |
meth |
BlenderbotSmallModel.forward |
15 | 14 | 0 |
attr |
BlenderbotSmallModel.shared |
1 | 0 | 0 |
attr |
BlenderbotSmallModel.encoder |
1 | 0 | 0 |
attr |
BlenderbotSmallModel.decoder |
1 | 0 | 0 |
meth |
BlenderbotSmallForCausalLM.init |
2 | 0 | 0 |
meth |
BlenderbotSmallForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
BlenderbotSmallForCausalLM.set_input_embeddings |
2 | 0 | 0 |
meth |
BlenderbotSmallForCausalLM.forward |
15 | 14 | 0 |
attr |
BlenderbotSmallForCausalLM.model |
1 | 0 | 0 |
attr |
BlenderbotSmallForCausalLM.lm_head |
1 | 0 | 0 |
meth |
BlenderbotSmallForConditionalGeneration.init |
2 | 1 | 0 |
meth |
BlenderbotSmallForConditionalGeneration.forward |
16 | 15 | 0 |
attr |
BlenderbotSmallForConditionalGeneration.model |
1 | 0 | 0 |
attr |
BlenderbotSmallForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.blenderbot_small.tokenization_blenderbot_small (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BlenderbotSmallTokenizer.init |
8 | 0 | 0 |
attr |
BlenderbotSmallTokenizer.decoder |
1 | 0 | 0 |
attr |
BlenderbotSmallTokenizer.bpe_ranks |
1 | 0 | 0 |
attr |
BlenderbotSmallTokenizer.cache |
1 | 0 | 0 |
attr |
BlenderbotSmallTokenizer.special_tokens_pattern |
1 | 0 | 0 |
attr |
BlenderbotSmallTokenizer.encoder |
1 | 0 | 0 |
transformers.models.blip.configuration_blip (84 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BlipVisionConfig.init |
13 | 0 | 0 |
attr |
BlipVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
BlipVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
BlipVisionConfig.projection_dim |
1 | 0 | 0 |
attr |
BlipVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BlipVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
BlipVisionConfig.patch_size |
1 | 0 | 0 |
attr |
BlipVisionConfig.image_size |
1 | 0 | 0 |
attr |
BlipVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
BlipVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
BlipVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
BlipVisionConfig.hidden_act |
1 | 0 | 0 |
meth |
BlipConfig.init |
9 | 0 | 0 |
attr |
BlipConfig.text_config |
1 | 0 | 0 |
attr |
BlipConfig.vision_config |
1 | 0 | 0 |
attr |
BlipConfig.projection_dim |
1 | 0 | 0 |
attr |
BlipConfig.logit_scale_init_value |
1 | 0 | 0 |
attr |
BlipConfig.initializer_factor |
1 | 0 | 0 |
attr |
BlipConfig.initializer_range |
1 | 0 | 0 |
attr |
BlipConfig.image_text_hidden_size |
1 | 0 | 0 |
attr |
BlipConfig.label_smoothing |
1 | 0 | 0 |
attr |
BlipConfig.tie_word_embeddings |
1 | 0 | 0 |
meth |
BlipTextConfig.init |
22 | 0 | 0 |
attr |
BlipTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
BlipTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
BlipTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
BlipTextConfig.sep_token_id |
1 | 0 | 0 |
attr |
BlipTextConfig.vocab_size |
1 | 0 | 0 |
attr |
BlipTextConfig.hidden_size |
1 | 0 | 0 |
attr |
BlipTextConfig.encoder_hidden_size |
1 | 0 | 0 |
attr |
BlipTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
BlipTextConfig.projection_dim |
1 | 0 | 0 |
attr |
BlipTextConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
BlipTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BlipTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
BlipTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
BlipTextConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
BlipTextConfig.hidden_act |
1 | 0 | 0 |
attr |
BlipTextConfig.initializer_range |
1 | 0 | 0 |
attr |
BlipTextConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
BlipTextConfig.is_decoder |
1 | 0 | 0 |
attr |
BlipTextConfig.use_cache |
1 | 0 | 0 |
attr |
BlipTextConfig.label_smoothing |
1 | 0 | 0 |
transformers.models.blip.image_processing_blip (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BlipImageProcessor.init |
11 | 10 | 0 |
meth |
BlipImageProcessor.resize |
7 | 6 | 0 |
attr |
BlipImageProcessor.do_resize |
1 | 0 | 0 |
attr |
BlipImageProcessor.size |
1 | 0 | 0 |
attr |
BlipImageProcessor.resample |
1 | 0 | 0 |
attr |
BlipImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
BlipImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
BlipImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
BlipImageProcessor.image_mean |
1 | 0 | 0 |
attr |
BlipImageProcessor.image_std |
1 | 0 | 0 |
attr |
BlipImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.blip.modeling_blip (49 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BlipVisionModel.init |
2 | 1 | 0 |
meth |
BlipVisionModel.get_input_embeddings |
1 | 0 | 0 |
attr |
BlipVisionModel.embeddings |
1 | 0 | 0 |
attr |
BlipVisionModel.encoder |
1 | 0 | 0 |
attr |
BlipVisionModel.post_layernorm |
1 | 0 | 0 |
meth |
BlipForQuestionAnswering.init |
2 | 1 | 0 |
meth |
BlipForQuestionAnswering.set_input_embeddings |
2 | 0 | 0 |
meth |
BlipForQuestionAnswering.get_input_embeddings |
1 | 0 | 0 |
meth |
BlipForQuestionAnswering.generate |
6 | 5 | 0 |
attr |
BlipForQuestionAnswering.vision_model |
1 | 0 | 0 |
attr |
BlipForQuestionAnswering.text_encoder |
1 | 0 | 0 |
attr |
BlipForQuestionAnswering.text_decoder |
1 | 0 | 0 |
attr |
BlipForQuestionAnswering.decoder_pad_token_id |
1 | 0 | 0 |
attr |
BlipForQuestionAnswering.decoder_start_token_id |
1 | 0 | 0 |
meth |
BlipForConditionalGeneration.init |
2 | 1 | 0 |
meth |
BlipForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
BlipForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
BlipForConditionalGeneration.generate |
6 | 5 | 0 |
attr |
BlipForConditionalGeneration.vision_model |
1 | 0 | 0 |
attr |
BlipForConditionalGeneration.text_decoder |
1 | 0 | 0 |
attr |
BlipForConditionalGeneration.decoder_input_ids |
1 | 0 | 0 |
attr |
BlipForConditionalGeneration.decoder_pad_token_id |
1 | 0 | 0 |
meth |
BlipForImageTextRetrieval.init |
2 | 1 | 0 |
meth |
BlipForImageTextRetrieval.get_input_embeddings |
1 | 0 | 0 |
meth |
BlipForImageTextRetrieval.set_input_embeddings |
2 | 0 | 0 |
attr |
BlipForImageTextRetrieval.vision_model |
1 | 0 | 0 |
attr |
BlipForImageTextRetrieval.text_encoder |
1 | 0 | 0 |
attr |
BlipForImageTextRetrieval.vision_proj |
1 | 0 | 0 |
attr |
BlipForImageTextRetrieval.text_proj |
1 | 0 | 0 |
attr |
BlipForImageTextRetrieval.itm_head |
1 | 0 | 0 |
attr |
BlipForImageTextRetrieval.decoder_pad_token_id |
1 | 0 | 0 |
attr |
BlipForImageTextRetrieval.decoder_start_token_id |
1 | 0 | 0 |
meth |
BlipPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
BlipModel.init |
2 | 1 | 0 |
meth |
BlipModel.get_input_embeddings |
1 | 0 | 0 |
meth |
BlipModel.set_input_embeddings |
2 | 0 | 0 |
attr |
BlipModel.projection_dim |
1 | 0 | 0 |
attr |
BlipModel.text_embed_dim |
1 | 0 | 0 |
attr |
BlipModel.vision_embed_dim |
1 | 0 | 0 |
attr |
BlipModel.text_model |
1 | 0 | 0 |
attr |
BlipModel.vision_model |
1 | 0 | 0 |
attr |
BlipModel.visual_projection |
1 | 0 | 0 |
attr |
BlipModel.text_projection |
1 | 0 | 0 |
attr |
BlipModel.logit_scale |
1 | 0 | 0 |
transformers.models.blip.modeling_blip_text (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BlipTextModel.init |
3 | 0 | 0 |
meth |
BlipTextModel.get_input_embeddings |
1 | 0 | 0 |
meth |
BlipTextModel.set_input_embeddings |
2 | 0 | 0 |
meth |
BlipTextModel.forward |
16 | 15 | 0 |
attr |
BlipTextModel.embeddings |
1 | 0 | 0 |
attr |
BlipTextModel.encoder |
1 | 0 | 0 |
attr |
BlipTextModel.pooler |
1 | 0 | 0 |
meth |
BlipTextPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
BlipTextLMHeadModel.init |
2 | 0 | 0 |
meth |
BlipTextLMHeadModel.get_input_embeddings |
1 | 0 | 0 |
meth |
BlipTextLMHeadModel.set_input_embeddings |
2 | 0 | 0 |
meth |
BlipTextLMHeadModel.get_output_embeddings |
1 | 0 | 0 |
meth |
BlipTextLMHeadModel.set_output_embeddings |
2 | 0 | 0 |
meth |
BlipTextLMHeadModel.forward |
19 | 18 | 0 |
meth |
BlipTextLMHeadModel.prepare_inputs_for_generation |
5 | 0 | 0 |
attr |
BlipTextLMHeadModel.bert |
1 | 0 | 0 |
attr |
BlipTextLMHeadModel.cls |
1 | 0 | 0 |
attr |
BlipTextLMHeadModel.label_smoothing |
1 | 0 | 0 |
transformers.models.blip.processing_blip (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BlipProcessor.init |
4 | 0 | 0 |
prop |
BlipProcessor.model_input_names |
1 | 0 | 0 |
transformers.models.blip_2.configuration_blip_2 (73 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Blip2QFormerConfig.init |
17 | 0 | 0 |
attr |
Blip2QFormerConfig.pad_token_id |
1 | 0 | 0 |
attr |
Blip2QFormerConfig.vocab_size |
1 | 0 | 0 |
attr |
Blip2QFormerConfig.hidden_size |
1 | 0 | 0 |
attr |
Blip2QFormerConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Blip2QFormerConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Blip2QFormerConfig.hidden_act |
1 | 0 | 0 |
attr |
Blip2QFormerConfig.intermediate_size |
1 | 0 | 0 |
attr |
Blip2QFormerConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
Blip2QFormerConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
Blip2QFormerConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Blip2QFormerConfig.initializer_range |
1 | 0 | 0 |
attr |
Blip2QFormerConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Blip2QFormerConfig.cross_attention_frequency |
1 | 0 | 0 |
attr |
Blip2QFormerConfig.encoder_hidden_size |
1 | 0 | 0 |
attr |
Blip2QFormerConfig.use_qformer_text_input |
1 | 0 | 0 |
meth |
Blip2VisionConfig.init |
13 | 0 | 0 |
attr |
Blip2VisionConfig.hidden_size |
1 | 0 | 0 |
attr |
Blip2VisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
Blip2VisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Blip2VisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Blip2VisionConfig.patch_size |
1 | 0 | 0 |
attr |
Blip2VisionConfig.image_size |
1 | 0 | 0 |
attr |
Blip2VisionConfig.initializer_range |
1 | 0 | 0 |
attr |
Blip2VisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
Blip2VisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Blip2VisionConfig.hidden_act |
1 | 0 | 0 |
attr |
Blip2VisionConfig.qkv_bias |
1 | 0 | 0 |
meth |
Blip2Config.init |
8 | 0 | 0 |
attr |
Blip2Config.text_config |
1 | 0 | 0 |
attr |
Blip2Config.vision_config |
1 | 0 | 0 |
attr |
Blip2Config.qformer_config |
1 | 0 | 0 |
attr |
Blip2Config.num_query_tokens |
1 | 0 | 0 |
attr |
Blip2Config.image_text_hidden_size |
1 | 0 | 0 |
attr |
Blip2Config.image_token_index |
1 | 0 | 0 |
attr |
Blip2Config.use_decoder_only_language_model |
1 | 0 | 0 |
attr |
Blip2Config.initializer_factor |
1 | 0 | 0 |
attr |
Blip2Config.initializer_range |
1 | 0 | 0 |
transformers.models.blip_2.modeling_blip_2 (70 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Blip2TextModelWithProjection.init |
2 | 1 | 0 |
meth |
Blip2TextModelWithProjection.get_input_embeddings |
1 | 0 | 0 |
meth |
Blip2TextModelWithProjection.set_input_embeddings |
2 | 0 | 0 |
attr |
Blip2TextModelWithProjection.query_tokens |
1 | 0 | 0 |
attr |
Blip2TextModelWithProjection.embeddings |
1 | 0 | 0 |
attr |
Blip2TextModelWithProjection.qformer |
1 | 0 | 0 |
attr |
Blip2TextModelWithProjection.text_projection |
1 | 0 | 0 |
meth |
Blip2Model.init |
2 | 1 | 0 |
meth |
Blip2Model.get_input_embeddings |
1 | 0 | 0 |
meth |
Blip2Model.set_input_embeddings |
2 | 0 | 0 |
meth |
Blip2Model.set_output_embeddings |
2 | 0 | 0 |
meth |
Blip2Model.get_encoder |
2 | 0 | 0 |
meth |
Blip2Model.get_placeholder_mask |
3 | 2 | 0 |
attr |
Blip2Model.vision_model |
1 | 0 | 0 |
attr |
Blip2Model.query_tokens |
1 | 0 | 0 |
attr |
Blip2Model.qformer |
1 | 0 | 0 |
attr |
Blip2Model.language_projection |
1 | 0 | 0 |
attr |
Blip2Model.language_model |
1 | 0 | 0 |
meth |
Blip2QFormerModel.init |
2 | 1 | 0 |
meth |
Blip2QFormerModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Blip2QFormerModel.set_input_embeddings |
2 | 0 | 0 |
attr |
Blip2QFormerModel._can_record_outputs |
1 | 0 | 0 |
attr |
Blip2QFormerModel.layernorm |
1 | 0 | 0 |
attr |
Blip2QFormerModel.dropout |
1 | 0 | 0 |
attr |
Blip2QFormerModel.encoder |
1 | 0 | 0 |
meth |
Blip2VisionModelWithProjection.init |
2 | 1 | 0 |
attr |
Blip2VisionModelWithProjection.vision_model |
1 | 0 | 0 |
attr |
Blip2VisionModelWithProjection.query_tokens |
1 | 0 | 0 |
attr |
Blip2VisionModelWithProjection.qformer |
1 | 0 | 0 |
attr |
Blip2VisionModelWithProjection.vision_projection |
1 | 0 | 0 |
meth |
Blip2ForImageTextRetrieval.init |
2 | 1 | 0 |
meth |
Blip2ForImageTextRetrieval.get_input_embeddings |
1 | 0 | 0 |
meth |
Blip2ForImageTextRetrieval.set_input_embeddings |
2 | 0 | 0 |
meth |
Blip2ForImageTextRetrieval.forward |
9 | 8 | 0 |
attr |
Blip2ForImageTextRetrieval.vision_model |
1 | 0 | 0 |
attr |
Blip2ForImageTextRetrieval.query_tokens |
1 | 0 | 0 |
attr |
Blip2ForImageTextRetrieval.embeddings |
1 | 0 | 0 |
attr |
Blip2ForImageTextRetrieval.qformer |
1 | 0 | 0 |
attr |
Blip2ForImageTextRetrieval.vision_projection |
1 | 0 | 0 |
attr |
Blip2ForImageTextRetrieval.text_projection |
1 | 0 | 0 |
attr |
Blip2ForImageTextRetrieval.itm_head |
1 | 0 | 0 |
meth |
Blip2VisionModel.init |
2 | 1 | 0 |
meth |
Blip2VisionModel.get_input_embeddings |
1 | 0 | 0 |
attr |
Blip2VisionModel.embeddings |
1 | 0 | 0 |
attr |
Blip2VisionModel.encoder |
1 | 0 | 0 |
attr |
Blip2VisionModel.post_layernorm |
1 | 0 | 0 |
meth |
Blip2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Blip2ForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Blip2ForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Blip2ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Blip2ForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
Blip2ForConditionalGeneration.get_encoder |
2 | 0 | 0 |
meth |
Blip2ForConditionalGeneration._preprocess_accelerate |
1 | 0 | 0 |
meth |
Blip2ForConditionalGeneration.get_placeholder_mask |
3 | 2 | 0 |
meth |
Blip2ForConditionalGeneration.generate |
7 | 6 | 0 |
attr |
Blip2ForConditionalGeneration.vision_model |
1 | 0 | 0 |
attr |
Blip2ForConditionalGeneration.query_tokens |
1 | 0 | 0 |
attr |
Blip2ForConditionalGeneration.qformer |
1 | 0 | 0 |
attr |
Blip2ForConditionalGeneration.language_projection |
1 | 0 | 0 |
attr |
Blip2ForConditionalGeneration.language_model |
1 | 0 | 0 |
transformers.models.blip_2.processing_blip_2 (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Blip2Processor.init |
5 | 0 | 0 |
attr |
Blip2Processor.num_query_tokens |
1 | 0 | 0 |
attr |
Blip2Processor.image_token |
1 | 0 | 0 |
transformers.models.bloom.configuration_bloom (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BloomConfig.init |
18 | 0 | 0 |
attr |
BloomConfig.vocab_size |
1 | 0 | 0 |
attr |
BloomConfig.hidden_size |
1 | 0 | 0 |
attr |
BloomConfig.n_layer |
1 | 0 | 0 |
attr |
BloomConfig.n_head |
1 | 0 | 0 |
attr |
BloomConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
BloomConfig.initializer_range |
1 | 0 | 0 |
attr |
BloomConfig.use_cache |
1 | 0 | 0 |
attr |
BloomConfig.pretraining_tp |
1 | 0 | 0 |
attr |
BloomConfig.apply_residual_connection_post_layernorm |
1 | 0 | 0 |
attr |
BloomConfig.hidden_dropout |
1 | 0 | 0 |
attr |
BloomConfig.attention_dropout |
1 | 0 | 0 |
attr |
BloomConfig.bos_token_id |
1 | 0 | 0 |
attr |
BloomConfig.eos_token_id |
1 | 0 | 0 |
attr |
BloomConfig.pad_token_id |
1 | 0 | 0 |
attr |
BloomConfig.slow_but_exact |
1 | 0 | 0 |
attr |
BloomConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.bloom.modeling_bloom (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BloomForQuestionAnswering.init |
2 | 0 | 0 |
meth |
BloomForQuestionAnswering.forward |
10 | 9 | 0 |
attr |
BloomForQuestionAnswering.transformer |
1 | 0 | 0 |
attr |
BloomForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
BloomModel.init |
2 | 1 | 0 |
meth |
BloomModel.get_input_embeddings |
1 | 0 | 0 |
meth |
BloomModel.set_input_embeddings |
2 | 1 | 0 |
meth |
BloomModel.forward |
11 | 10 | 0 |
attr |
BloomModel.embed_dim |
1 | 0 | 0 |
attr |
BloomModel.num_heads |
1 | 0 | 0 |
attr |
BloomModel.word_embeddings |
1 | 0 | 0 |
attr |
BloomModel.word_embeddings_layernorm |
1 | 0 | 0 |
attr |
BloomModel.h |
1 | 0 | 0 |
attr |
BloomModel.ln_f |
1 | 0 | 0 |
attr |
BloomModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
BloomForSequenceClassification.init |
2 | 1 | 0 |
meth |
BloomForSequenceClassification.forward |
11 | 10 | 0 |
attr |
BloomForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
BloomForSequenceClassification.transformer |
1 | 0 | 0 |
attr |
BloomForSequenceClassification.score |
1 | 0 | 0 |
meth |
BloomForCausalLM.init |
2 | 1 | 0 |
meth |
BloomForCausalLM.set_output_embeddings |
2 | 1 | 0 |
meth |
BloomForCausalLM.prepare_inputs_for_generation |
9 | 0 | 0 |
meth |
BloomForCausalLM.forward |
13 | 12 | 0 |
attr |
BloomForCausalLM.transformer |
1 | 0 | 0 |
attr |
BloomForCausalLM.lm_head |
1 | 0 | 0 |
meth |
BloomForTokenClassification.init |
2 | 1 | 0 |
meth |
BloomForTokenClassification.forward |
11 | 10 | 0 |
attr |
BloomForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
BloomForTokenClassification.transformer |
1 | 0 | 0 |
attr |
BloomForTokenClassification.dropout |
1 | 0 | 0 |
attr |
BloomForTokenClassification.classifier |
1 | 0 | 0 |
transformers.models.blt.configuration_blt (99 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BltLocalEncoderConfig.init |
17 | 15 | 0 |
attr |
BltLocalEncoderConfig.vocab_size |
1 | 0 | 0 |
attr |
BltLocalEncoderConfig.cross_attn_all_layers |
1 | 0 | 0 |
attr |
BltLocalEncoderConfig.cross_attn_k |
1 | 0 | 0 |
attr |
BltLocalEncoderConfig.hidden_size_global |
1 | 0 | 0 |
attr |
BltLocalEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
BltLocalEncoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
BltLocalEncoderConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
BltLocalEncoderConfig.head_dim |
1 | 0 | 0 |
attr |
BltLocalEncoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
BltLocalEncoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BltLocalEncoderConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
BltLocalEncoderConfig.dropout |
1 | 0 | 0 |
attr |
BltLocalEncoderConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
BltLocalEncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
BltLocalEncoderConfig.initializer_range |
1 | 0 | 0 |
attr |
BltLocalEncoderConfig.rope_parameters |
1 | 0 | 0 |
meth |
BltGlobalTransformerConfig.init |
14 | 12 | 0 |
attr |
BltGlobalTransformerConfig.hidden_size |
1 | 0 | 0 |
attr |
BltGlobalTransformerConfig.num_attention_heads |
1 | 0 | 0 |
attr |
BltGlobalTransformerConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
BltGlobalTransformerConfig.head_dim |
1 | 0 | 0 |
attr |
BltGlobalTransformerConfig.intermediate_size |
1 | 0 | 0 |
attr |
BltGlobalTransformerConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BltGlobalTransformerConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
BltGlobalTransformerConfig.dropout |
1 | 0 | 0 |
attr |
BltGlobalTransformerConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
BltGlobalTransformerConfig.hidden_act |
1 | 0 | 0 |
attr |
BltGlobalTransformerConfig.initializer_range |
1 | 0 | 0 |
attr |
BltGlobalTransformerConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
BltGlobalTransformerConfig.rope_parameters |
1 | 0 | 0 |
meth |
BltLocalDecoderConfig.init |
21 | 19 | 0 |
attr |
BltLocalDecoderConfig.vocab_size |
1 | 0 | 0 |
attr |
BltLocalDecoderConfig.cross_attn_all_layers |
1 | 0 | 0 |
attr |
BltLocalDecoderConfig.cross_attn_k |
1 | 0 | 0 |
attr |
BltLocalDecoderConfig.hidden_size_global |
1 | 0 | 0 |
attr |
BltLocalDecoderConfig.hidden_size |
1 | 0 | 0 |
attr |
BltLocalDecoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
BltLocalDecoderConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
BltLocalDecoderConfig.head_dim |
1 | 0 | 0 |
attr |
BltLocalDecoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
BltLocalDecoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BltLocalDecoderConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
BltLocalDecoderConfig.dropout |
1 | 0 | 0 |
attr |
BltLocalDecoderConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
BltLocalDecoderConfig.hidden_act |
1 | 0 | 0 |
attr |
BltLocalDecoderConfig.initializer_range |
1 | 0 | 0 |
attr |
BltLocalDecoderConfig.pad_token_id |
1 | 0 | 0 |
attr |
BltLocalDecoderConfig.bos_token_id |
1 | 0 | 0 |
attr |
BltLocalDecoderConfig.eos_token_id |
1 | 0 | 0 |
attr |
BltLocalDecoderConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
BltLocalDecoderConfig.rope_parameters |
1 | 0 | 0 |
meth |
BltConfig.init |
24 | 22 | 0 |
attr |
BltConfig.vocab_size |
1 | 0 | 0 |
attr |
BltConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
BltConfig.initializer_range |
1 | 0 | 0 |
attr |
BltConfig.patch_in_forward |
1 | 0 | 0 |
attr |
BltConfig.patch_size |
1 | 0 | 0 |
attr |
BltConfig.patching_mode |
1 | 0 | 0 |
attr |
BltConfig.patching_threshold |
1 | 0 | 0 |
attr |
BltConfig.patching_batch_size |
1 | 0 | 0 |
attr |
BltConfig.max_patch_length |
1 | 0 | 0 |
attr |
BltConfig.patching_device |
1 | 0 | 0 |
attr |
BltConfig.realtime_patching |
1 | 0 | 0 |
attr |
BltConfig.patching_threshold_add |
1 | 0 | 0 |
attr |
BltConfig.monotonicity |
1 | 0 | 0 |
attr |
BltConfig.cross_attn_k |
1 | 0 | 0 |
attr |
BltConfig.encoder_hash_byte_group_size |
1 | 0 | 0 |
attr |
BltConfig.encoder_hash_byte_group_vocab |
1 | 0 | 0 |
attr |
BltConfig.encoder_hash_byte_group_nb_functions |
1 | 0 | 0 |
attr |
BltConfig.pad_token_id |
1 | 0 | 0 |
attr |
BltConfig.bos_token_id |
1 | 0 | 0 |
attr |
BltConfig.eos_token_id |
1 | 0 | 0 |
attr |
BltConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
BltConfig.rope_parameters |
1 | 0 | 0 |
attr |
BltConfig.patcher_config |
1 | 0 | 0 |
attr |
BltConfig.encoder_config |
1 | 0 | 0 |
attr |
BltConfig.decoder_config |
1 | 0 | 0 |
attr |
BltConfig.global_config |
1 | 0 | 0 |
meth |
BltPatcherConfig.init |
14 | 12 | 0 |
attr |
BltPatcherConfig.vocab_size |
1 | 0 | 0 |
attr |
BltPatcherConfig.hidden_size |
1 | 0 | 0 |
attr |
BltPatcherConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BltPatcherConfig.num_attention_heads |
1 | 0 | 0 |
attr |
BltPatcherConfig.head_dim |
1 | 0 | 0 |
attr |
BltPatcherConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
BltPatcherConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
BltPatcherConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
BltPatcherConfig.dropout |
1 | 0 | 0 |
attr |
BltPatcherConfig.hidden_act |
1 | 0 | 0 |
attr |
BltPatcherConfig.intermediate_size |
1 | 0 | 0 |
attr |
BltPatcherConfig.initializer_range |
1 | 0 | 0 |
attr |
BltPatcherConfig.rope_parameters |
1 | 0 | 0 |
attr |
BltPatcherConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.blt.modeling_blt (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BltForCausalLM.init |
2 | 1 | 0 |
attr |
BltForCausalLM.text_config |
1 | 0 | 0 |
attr |
BltForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
BltForCausalLM.model |
1 | 0 | 0 |
attr |
BltForCausalLM.lm_head |
1 | 0 | 0 |
meth |
BltPreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
BltPreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
BltPatcher.init |
2 | 1 | 0 |
meth |
BltPatcher.forward |
12 | 11 | 0 |
meth |
BltPatcher.patch_lengths_from_entropies |
5 | 0 | 0 |
attr |
BltPatcher.rotary_emb |
1 | 0 | 0 |
attr |
BltPatcher.layers |
1 | 0 | 0 |
attr |
BltPatcher.embed_tokens |
1 | 0 | 0 |
attr |
BltPatcher.norm |
1 | 0 | 0 |
attr |
BltPatcher.lm_head |
1 | 0 | 0 |
meth |
BltModel.init |
2 | 1 | 0 |
meth |
BltModel.get_input_embeddings |
1 | 0 | 0 |
meth |
BltModel.set_input_embeddings |
2 | 0 | 0 |
attr |
BltModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
BltModel.local_encoder |
1 | 0 | 0 |
attr |
BltModel.global_transformer |
1 | 0 | 0 |
attr |
BltModel.local_decoder |
1 | 0 | 0 |
attr |
BltModel.encoder_hash_tok_embedding |
1 | 0 | 0 |
attr |
BltModel.patcher |
1 | 0 | 0 |
transformers.models.blt.modular_blt (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BltForCausalLM.init |
2 | 1 | 0 |
attr |
BltForCausalLM.text_config |
1 | 0 | 0 |
attr |
BltForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
BltForCausalLM.model |
1 | 0 | 0 |
attr |
BltForCausalLM.lm_head |
1 | 0 | 0 |
meth |
BltPreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
BltPreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
BltPatcher.init |
2 | 1 | 0 |
meth |
BltPatcher.forward |
12 | 11 | 0 |
meth |
BltPatcher.patch_lengths_from_entropies |
5 | 0 | 0 |
attr |
BltPatcher.rotary_emb |
1 | 0 | 0 |
attr |
BltPatcher.layers |
1 | 0 | 0 |
attr |
BltPatcher.embed_tokens |
1 | 0 | 0 |
attr |
BltPatcher.norm |
1 | 0 | 0 |
attr |
BltPatcher.lm_head |
1 | 0 | 0 |
meth |
BltModel.init |
2 | 1 | 0 |
meth |
BltModel.get_input_embeddings |
1 | 0 | 0 |
meth |
BltModel.set_input_embeddings |
2 | 0 | 0 |
attr |
BltModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
BltModel.local_encoder |
1 | 0 | 0 |
attr |
BltModel.global_transformer |
1 | 0 | 0 |
attr |
BltModel.local_decoder |
1 | 0 | 0 |
attr |
BltModel.encoder_hash_tok_embedding |
1 | 0 | 0 |
attr |
BltModel.patcher |
1 | 0 | 0 |
transformers.models.bridgetower.configuration_bridgetower (88 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BridgeTowerTextConfig.init |
20 | 0 | 0 |
attr |
BridgeTowerTextConfig.is_decoder |
1 | 0 | 0 |
attr |
BridgeTowerTextConfig.add_cross_attention |
1 | 0 | 0 |
attr |
BridgeTowerTextConfig.vocab_size |
1 | 0 | 0 |
attr |
BridgeTowerTextConfig.hidden_size |
1 | 0 | 0 |
attr |
BridgeTowerTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BridgeTowerTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
BridgeTowerTextConfig.hidden_act |
1 | 0 | 0 |
attr |
BridgeTowerTextConfig.initializer_factor |
1 | 0 | 0 |
attr |
BridgeTowerTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
BridgeTowerTextConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
BridgeTowerTextConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
BridgeTowerTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
BridgeTowerTextConfig.type_vocab_size |
1 | 0 | 0 |
attr |
BridgeTowerTextConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
BridgeTowerTextConfig.use_cache |
1 | 0 | 0 |
attr |
BridgeTowerTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
BridgeTowerTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
BridgeTowerTextConfig.eos_token_id |
1 | 0 | 0 |
meth |
BridgeTowerConfig.init |
15 | 0 | 0 |
attr |
BridgeTowerConfig.share_cross_modal_transformer_layers |
1 | 0 | 0 |
attr |
BridgeTowerConfig.hidden_act |
1 | 0 | 0 |
attr |
BridgeTowerConfig.hidden_size |
1 | 0 | 0 |
attr |
BridgeTowerConfig.initializer_factor |
1 | 0 | 0 |
attr |
BridgeTowerConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
BridgeTowerConfig.share_link_tower_layers |
1 | 0 | 0 |
attr |
BridgeTowerConfig.link_tower_type |
1 | 0 | 0 |
attr |
BridgeTowerConfig.num_attention_heads |
1 | 0 | 0 |
attr |
BridgeTowerConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BridgeTowerConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
BridgeTowerConfig.init_layernorm_from_vision_encoder |
1 | 0 | 0 |
attr |
BridgeTowerConfig.text_config |
1 | 0 | 0 |
attr |
BridgeTowerConfig.vision_config |
1 | 0 | 0 |
meth |
BridgeTowerVisionConfig.init |
12 | 0 | 0 |
attr |
BridgeTowerVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
BridgeTowerVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BridgeTowerVisionConfig.num_channels |
1 | 0 | 0 |
attr |
BridgeTowerVisionConfig.patch_size |
1 | 0 | 0 |
attr |
BridgeTowerVisionConfig.image_size |
1 | 0 | 0 |
attr |
BridgeTowerVisionConfig.initializer_factor |
1 | 0 | 0 |
attr |
BridgeTowerVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
BridgeTowerVisionConfig.stop_gradient |
1 | 0 | 0 |
attr |
BridgeTowerVisionConfig.share_layernorm |
1 | 0 | 0 |
attr |
BridgeTowerVisionConfig.remove_last_layer |
1 | 0 | 0 |
transformers.models.bridgetower.image_processing_bridgetower (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BridgeTowerImageProcessor.init |
14 | 13 | 0 |
meth |
BridgeTowerImageProcessor.resize |
8 | 7 | 0 |
meth |
BridgeTowerImageProcessor.center_crop |
6 | 5 | 0 |
attr |
BridgeTowerImageProcessor.do_resize |
1 | 0 | 0 |
attr |
BridgeTowerImageProcessor.size |
1 | 0 | 0 |
attr |
BridgeTowerImageProcessor.size_divisor |
1 | 0 | 0 |
attr |
BridgeTowerImageProcessor.resample |
1 | 0 | 0 |
attr |
BridgeTowerImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
BridgeTowerImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
BridgeTowerImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
BridgeTowerImageProcessor.image_mean |
1 | 0 | 0 |
attr |
BridgeTowerImageProcessor.image_std |
1 | 0 | 0 |
attr |
BridgeTowerImageProcessor.do_pad |
1 | 0 | 0 |
attr |
BridgeTowerImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
BridgeTowerImageProcessor.crop_size |
1 | 0 | 0 |
transformers.models.bridgetower.image_processing_bridgetower_fast (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BridgeTowerImageProcessorFast.init |
2 | 1 | 0 |
meth |
BridgeTowerImageProcessorFast.resize |
7 | 6 | 0 |
meth |
BridgeTowerImageProcessorFast.center_crop |
4 | 3 | 0 |
meth |
BridgeTowerImageProcessorFast._preprocess |
17 | 16 | 0 |
meth |
BridgeTowerImageProcessorFast.to_dict |
1 | 0 | 0 |
transformers.models.bridgetower.modeling_bridgetower (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BridgeTowerPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
BridgeTowerForContrastiveLearning.init |
2 | 0 | 0 |
meth |
BridgeTowerForContrastiveLearning.forward |
13 | 12 | 0 |
attr |
BridgeTowerForContrastiveLearning.bridgetower |
1 | 0 | 0 |
attr |
BridgeTowerForContrastiveLearning.itc_text_head |
1 | 0 | 0 |
attr |
BridgeTowerForContrastiveLearning.itc_image_head |
1 | 0 | 0 |
attr |
BridgeTowerForContrastiveLearning.itc_cross_modal_head |
1 | 0 | 0 |
attr |
BridgeTowerForContrastiveLearning.logit_scale |
1 | 0 | 0 |
meth |
BridgeTowerForImageAndTextRetrieval.init |
2 | 0 | 0 |
meth |
BridgeTowerForImageAndTextRetrieval.forward |
13 | 12 | 0 |
attr |
BridgeTowerForImageAndTextRetrieval.bridgetower |
1 | 0 | 0 |
attr |
BridgeTowerForImageAndTextRetrieval.itm_score |
1 | 0 | 0 |
meth |
BridgeTowerModel.init |
2 | 0 | 0 |
meth |
BridgeTowerModel.get_input_embeddings |
1 | 0 | 0 |
meth |
BridgeTowerModel.set_input_embeddings |
2 | 0 | 0 |
meth |
BridgeTowerModel.forward |
15 | 14 | 0 |
meth |
BridgeTowerModel.get_cls_features |
3 | 0 | 0 |
attr |
BridgeTowerModel.token_type_embeddings |
1 | 0 | 0 |
attr |
BridgeTowerModel.vision_model |
1 | 0 | 0 |
attr |
BridgeTowerModel.text_model |
1 | 0 | 0 |
attr |
BridgeTowerModel.cross_modal_image_layers |
1 | 0 | 0 |
attr |
BridgeTowerModel.cross_modal_text_layers |
1 | 0 | 0 |
attr |
BridgeTowerModel.cross_modal_image_pooler |
1 | 0 | 0 |
attr |
BridgeTowerModel.cross_modal_text_pooler |
1 | 0 | 0 |
attr |
BridgeTowerModel.cross_modal_text_layernorm |
1 | 0 | 0 |
attr |
BridgeTowerModel.cross_modal_image_layernorm |
1 | 0 | 0 |
attr |
BridgeTowerModel.cross_modal_text_transform |
1 | 0 | 0 |
attr |
BridgeTowerModel.cross_modal_image_transform |
1 | 0 | 0 |
attr |
BridgeTowerModel.cross_modal_text_link_tower |
1 | 0 | 0 |
attr |
BridgeTowerModel.cross_modal_image_link_tower |
1 | 0 | 0 |
meth |
BridgeTowerForMaskedLM.init |
2 | 0 | 0 |
meth |
BridgeTowerForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
BridgeTowerForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
BridgeTowerForMaskedLM.forward |
13 | 12 | 0 |
attr |
BridgeTowerForMaskedLM.bridgetower |
1 | 0 | 0 |
attr |
BridgeTowerForMaskedLM.mlm_score |
1 | 0 | 0 |
transformers.models.bridgetower.processing_bridgetower (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BridgeTowerProcessor.init |
3 | 0 | 0 |
transformers.models.bros.configuration_bros (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BrosConfig.init |
21 | 0 | 0 |
attr |
BrosConfig.is_decoder |
1 | 0 | 0 |
attr |
BrosConfig.add_cross_attention |
1 | 0 | 0 |
attr |
BrosConfig.vocab_size |
1 | 0 | 0 |
attr |
BrosConfig.hidden_size |
1 | 0 | 0 |
attr |
BrosConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
BrosConfig.num_attention_heads |
1 | 0 | 0 |
attr |
BrosConfig.intermediate_size |
1 | 0 | 0 |
attr |
BrosConfig.hidden_act |
1 | 0 | 0 |
attr |
BrosConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
BrosConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
BrosConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
BrosConfig.type_vocab_size |
1 | 0 | 0 |
attr |
BrosConfig.initializer_range |
1 | 0 | 0 |
attr |
BrosConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
BrosConfig.pad_token_id |
1 | 0 | 0 |
attr |
BrosConfig.dim_bbox |
1 | 0 | 0 |
attr |
BrosConfig.bbox_scale |
1 | 0 | 0 |
attr |
BrosConfig.n_relations |
1 | 0 | 0 |
attr |
BrosConfig.dim_bbox_sinusoid_emb_2d |
1 | 0 | 0 |
attr |
BrosConfig.dim_bbox_sinusoid_emb_1d |
1 | 0 | 0 |
attr |
BrosConfig.dim_bbox_projection |
1 | 0 | 0 |
attr |
BrosConfig.classifier_dropout_prob |
1 | 0 | 0 |
transformers.models.bros.modeling_bros (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BrosPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
BrosSpadeELForTokenClassification.init |
2 | 0 | 0 |
meth |
BrosSpadeELForTokenClassification.forward |
13 | 12 | 0 |
attr |
BrosSpadeELForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
BrosSpadeELForTokenClassification.n_relations |
1 | 0 | 0 |
attr |
BrosSpadeELForTokenClassification.backbone_hidden_size |
1 | 0 | 0 |
attr |
BrosSpadeELForTokenClassification.bros |
1 | 0 | 0 |
attr |
BrosSpadeELForTokenClassification.entity_linker |
1 | 0 | 0 |
meth |
BrosForTokenClassification.init |
2 | 0 | 0 |
meth |
BrosForTokenClassification.forward |
13 | 12 | 0 |
attr |
BrosForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
BrosForTokenClassification.bros |
1 | 0 | 0 |
attr |
BrosForTokenClassification.dropout |
1 | 0 | 0 |
attr |
BrosForTokenClassification.classifier |
1 | 0 | 0 |
meth |
BrosSpadeEEForTokenClassification.init |
2 | 0 | 0 |
meth |
BrosSpadeEEForTokenClassification.forward |
14 | 13 | 0 |
attr |
BrosSpadeEEForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
BrosSpadeEEForTokenClassification.n_relations |
1 | 0 | 0 |
attr |
BrosSpadeEEForTokenClassification.backbone_hidden_size |
1 | 0 | 0 |
attr |
BrosSpadeEEForTokenClassification.bros |
1 | 0 | 0 |
attr |
BrosSpadeEEForTokenClassification.initial_token_classifier |
1 | 0 | 0 |
attr |
BrosSpadeEEForTokenClassification.subsequent_token_classifier |
1 | 0 | 0 |
meth |
BrosModel.init |
3 | 0 | 0 |
meth |
BrosModel.get_input_embeddings |
1 | 0 | 0 |
meth |
BrosModel.set_input_embeddings |
2 | 0 | 0 |
meth |
BrosModel.forward |
13 | 12 | 0 |
attr |
BrosModel.embeddings |
1 | 0 | 0 |
attr |
BrosModel.bbox_embeddings |
1 | 0 | 0 |
attr |
BrosModel.encoder |
1 | 0 | 0 |
attr |
BrosModel.pooler |
1 | 0 | 0 |
transformers.models.bros.processing_bros (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BrosProcessor.init |
3 | 0 | 0 |
transformers.models.byt5.tokenization_byt5 (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ByT5Tokenizer.init |
7 | 1 | 0 |
meth |
ByT5Tokenizer.get_vocab |
1 | 0 | 0 |
meth |
ByT5Tokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
ByT5Tokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
ByT5Tokenizer.convert_tokens_to_string |
2 | 0 | 0 |
prop |
ByT5Tokenizer.vocab_size |
1 | 0 | 0 |
attr |
ByT5Tokenizer.offset |
1 | 0 | 0 |
transformers.models.camembert.configuration_camembert (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CamembertConfig.init |
21 | 0 | 0 |
attr |
CamembertConfig.pad_token_id |
1 | 0 | 0 |
attr |
CamembertConfig.bos_token_id |
1 | 0 | 0 |
attr |
CamembertConfig.eos_token_id |
1 | 0 | 0 |
attr |
CamembertConfig.is_decoder |
1 | 0 | 0 |
attr |
CamembertConfig.add_cross_attention |
1 | 0 | 0 |
attr |
CamembertConfig.vocab_size |
1 | 0 | 0 |
attr |
CamembertConfig.hidden_size |
1 | 0 | 0 |
attr |
CamembertConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
CamembertConfig.num_attention_heads |
1 | 0 | 0 |
attr |
CamembertConfig.hidden_act |
1 | 0 | 0 |
attr |
CamembertConfig.intermediate_size |
1 | 0 | 0 |
attr |
CamembertConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
CamembertConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
CamembertConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
CamembertConfig.type_vocab_size |
1 | 0 | 0 |
attr |
CamembertConfig.initializer_range |
1 | 0 | 0 |
attr |
CamembertConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
CamembertConfig.use_cache |
1 | 0 | 0 |
attr |
CamembertConfig.classifier_dropout |
1 | 0 | 0 |
transformers.models.camembert.modeling_camembert (56 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CamembertForCausalLM.init |
2 | 0 | 0 |
meth |
CamembertForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
CamembertForCausalLM.set_output_embeddings |
2 | 0 | 0 |
attr |
CamembertForCausalLM.lm_head |
1 | 0 | 0 |
attr |
CamembertForCausalLM.roberta |
1 | 0 | 0 |
meth |
CamembertPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
CamembertForMultipleChoice.init |
2 | 0 | 0 |
attr |
CamembertForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
CamembertForMultipleChoice.classifier |
1 | 0 | 0 |
attr |
CamembertForMultipleChoice.roberta |
1 | 0 | 0 |
meth |
CamembertForSequenceClassification.init |
2 | 0 | 0 |
attr |
CamembertForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
CamembertForSequenceClassification.config |
1 | 0 | 0 |
attr |
CamembertForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
CamembertForSequenceClassification.roberta |
1 | 0 | 0 |
meth |
CamembertForTokenClassification.init |
2 | 0 | 0 |
attr |
CamembertForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
CamembertForTokenClassification.dropout |
1 | 0 | 0 |
attr |
CamembertForTokenClassification.classifier |
1 | 0 | 0 |
attr |
CamembertForTokenClassification.roberta |
1 | 0 | 0 |
meth |
CamembertModel.init |
3 | 0 | 0 |
meth |
CamembertModel.get_input_embeddings |
1 | 0 | 0 |
meth |
CamembertModel.set_input_embeddings |
2 | 0 | 0 |
meth |
CamembertModel._create_attention_masks |
7 | 0 | 0 |
attr |
CamembertModel.config |
1 | 0 | 0 |
attr |
CamembertModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
CamembertModel.embeddings |
1 | 0 | 0 |
attr |
CamembertModel.encoder |
1 | 0 | 0 |
attr |
CamembertModel.pooler |
1 | 0 | 0 |
meth |
CamembertForQuestionAnswering.init |
2 | 0 | 0 |
attr |
CamembertForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
CamembertForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
attr |
CamembertForQuestionAnswering.roberta |
1 | 0 | 0 |
meth |
CamembertForMaskedLM.init |
2 | 0 | 0 |
meth |
CamembertForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
CamembertForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
attr |
CamembertForMaskedLM.lm_head |
1 | 0 | 0 |
attr |
CamembertForMaskedLM.roberta |
1 | 0 | 0 |
transformers.models.camembert.modular_camembert (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CamembertForCausalLM.init |
2 | 0 | 0 |
attr |
CamembertForCausalLM.roberta |
1 | 0 | 0 |
meth |
CamembertForMultipleChoice.init |
2 | 0 | 0 |
attr |
CamembertForMultipleChoice.roberta |
1 | 0 | 0 |
meth |
CamembertForSequenceClassification.init |
2 | 0 | 0 |
attr |
CamembertForSequenceClassification.roberta |
1 | 0 | 0 |
meth |
CamembertForTokenClassification.init |
2 | 0 | 0 |
attr |
CamembertForTokenClassification.roberta |
1 | 0 | 0 |
meth |
CamembertForQuestionAnswering.init |
2 | 0 | 0 |
attr |
CamembertForQuestionAnswering.roberta |
1 | 0 | 0 |
meth |
CamembertForMaskedLM.init |
2 | 0 | 0 |
attr |
CamembertForMaskedLM.roberta |
1 | 0 | 0 |
transformers.models.camembert.tokenization_camembert (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CamembertTokenizer.init |
13 | 1 | 0 |
attr |
CamembertTokenizer.vocab_file |
1 | 0 | 0 |
attr |
CamembertTokenizer.add_prefix_space |
1 | 0 | 0 |
transformers.models.canine.configuration_canine (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CanineConfig.init |
21 | 0 | 0 |
attr |
CanineConfig.pad_token_id |
1 | 0 | 0 |
attr |
CanineConfig.bos_token_id |
1 | 0 | 0 |
attr |
CanineConfig.eos_token_id |
1 | 0 | 0 |
attr |
CanineConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
CanineConfig.hidden_size |
1 | 0 | 0 |
attr |
CanineConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
CanineConfig.num_attention_heads |
1 | 0 | 0 |
attr |
CanineConfig.intermediate_size |
1 | 0 | 0 |
attr |
CanineConfig.hidden_act |
1 | 0 | 0 |
attr |
CanineConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
CanineConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
CanineConfig.initializer_range |
1 | 0 | 0 |
attr |
CanineConfig.type_vocab_size |
1 | 0 | 0 |
attr |
CanineConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
CanineConfig.downsampling_rate |
1 | 0 | 0 |
attr |
CanineConfig.upsampling_kernel_size |
1 | 0 | 0 |
attr |
CanineConfig.num_hash_functions |
1 | 0 | 0 |
attr |
CanineConfig.num_hash_buckets |
1 | 0 | 0 |
attr |
CanineConfig.local_transformer_stride |
1 | 0 | 0 |
transformers.models.canine.modeling_canine (59 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CanineForSequenceClassification.init |
2 | 0 | 0 |
meth |
CanineForSequenceClassification.forward |
11 | 10 | 0 |
attr |
CanineForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
CanineForSequenceClassification.canine |
1 | 0 | 0 |
attr |
CanineForSequenceClassification.dropout |
1 | 0 | 0 |
attr |
CanineForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
CanineLayer.init |
9 | 0 | 0 |
meth |
CanineLayer.feed_forward_chunk |
2 | 0 | 0 |
attr |
CanineLayer.chunk_size_feed_forward |
1 | 0 | 0 |
attr |
CanineLayer.seq_len_dim |
1 | 0 | 0 |
attr |
CanineLayer.attention |
1 | 0 | 0 |
attr |
CanineLayer.intermediate |
1 | 0 | 0 |
attr |
CanineLayer.output |
1 | 0 | 0 |
meth |
CanineForMultipleChoice.init |
2 | 0 | 0 |
meth |
CanineForMultipleChoice.forward |
11 | 10 | 0 |
attr |
CanineForMultipleChoice.canine |
1 | 0 | 0 |
attr |
CanineForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
CanineForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
CanineForQuestionAnswering.init |
2 | 0 | 0 |
meth |
CanineForQuestionAnswering.forward |
12 | 11 | 0 |
attr |
CanineForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
CanineForQuestionAnswering.canine |
1 | 0 | 0 |
attr |
CanineForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
CanineForTokenClassification.init |
2 | 0 | 0 |
meth |
CanineForTokenClassification.forward |
11 | 10 | 0 |
attr |
CanineForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
CanineForTokenClassification.canine |
1 | 0 | 0 |
attr |
CanineForTokenClassification.dropout |
1 | 0 | 0 |
attr |
CanineForTokenClassification.classifier |
1 | 0 | 0 |
meth |
CanineModel.init |
3 | 0 | 0 |
meth |
CanineModel._create_3d_attention_mask_from_input_mask |
3 | 0 | 0 |
meth |
CanineModel._downsample_attention_mask |
3 | 2 | 0 |
meth |
CanineModel.forward |
10 | 9 | 0 |
attr |
CanineModel.char_embeddings |
1 | 0 | 0 |
attr |
CanineModel.initial_char_encoder |
1 | 0 | 0 |
attr |
CanineModel.chars_to_molecules |
1 | 0 | 0 |
attr |
CanineModel.encoder |
1 | 0 | 0 |
attr |
CanineModel.projection |
1 | 0 | 0 |
attr |
CanineModel.final_char_encoder |
1 | 0 | 0 |
attr |
CanineModel.pooler |
1 | 0 | 0 |
meth |
CaninePreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.canine.tokenization_canine (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CanineTokenizer.init |
10 | 0 | 0 |
meth |
CanineTokenizer.get_vocab |
1 | 0 | 0 |
meth |
CanineTokenizer.convert_tokens_to_string |
2 | 0 | 0 |
transformers.models.chameleon.configuration_chameleon (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ChameleonVQVAEConfig.init |
15 | 12 | 0 |
attr |
ChameleonVQVAEConfig.embed_dim |
1 | 0 | 0 |
attr |
ChameleonVQVAEConfig.num_embeddings |
1 | 0 | 0 |
attr |
ChameleonVQVAEConfig.double_latent |
1 | 0 | 0 |
attr |
ChameleonVQVAEConfig.latent_channels |
1 | 0 | 0 |
attr |
ChameleonVQVAEConfig.resolution |
1 | 0 | 0 |
attr |
ChameleonVQVAEConfig.in_channels |
1 | 0 | 0 |
attr |
ChameleonVQVAEConfig.base_channels |
1 | 0 | 0 |
attr |
ChameleonVQVAEConfig.channel_multiplier |
1 | 0 | 0 |
attr |
ChameleonVQVAEConfig.num_res_blocks |
1 | 0 | 0 |
attr |
ChameleonVQVAEConfig.attn_resolutions |
1 | 0 | 0 |
attr |
ChameleonVQVAEConfig.dropout |
1 | 0 | 0 |
attr |
ChameleonVQVAEConfig.attn_type |
1 | 0 | 0 |
attr |
ChameleonVQVAEConfig.initializer_range |
1 | 0 | 0 |
meth |
ChameleonConfig.init |
25 | 23 | 0 |
attr |
ChameleonConfig.vocab_size |
1 | 0 | 0 |
attr |
ChameleonConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ChameleonConfig.hidden_size |
1 | 0 | 0 |
attr |
ChameleonConfig.intermediate_size |
1 | 0 | 0 |
attr |
ChameleonConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ChameleonConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ChameleonConfig.mlp_bias |
1 | 0 | 0 |
attr |
ChameleonConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
ChameleonConfig.hidden_act |
1 | 0 | 0 |
attr |
ChameleonConfig.initializer_range |
1 | 0 | 0 |
attr |
ChameleonConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
ChameleonConfig.use_cache |
1 | 0 | 0 |
attr |
ChameleonConfig.attention_bias |
1 | 0 | 0 |
attr |
ChameleonConfig.attention_dropout |
1 | 0 | 0 |
attr |
ChameleonConfig.model_parallel_size |
1 | 0 | 0 |
attr |
ChameleonConfig.swin_norm |
1 | 0 | 0 |
attr |
ChameleonConfig.rope_parameters |
1 | 0 | 0 |
attr |
ChameleonConfig.vq_config |
1 | 0 | 0 |
attr |
ChameleonConfig.vocabulary_map |
1 | 0 | 0 |
attr |
ChameleonConfig.image_token_id |
1 | 0 | 0 |
attr |
ChameleonConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
ChameleonConfig.pad_token_id |
1 | 0 | 0 |
attr |
ChameleonConfig.bos_token_id |
1 | 0 | 0 |
attr |
ChameleonConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.chameleon.image_processing_chameleon (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ChameleonImageProcessor.init |
13 | 12 | 0 |
meth |
ChameleonImageProcessor.resize |
7 | 6 | 0 |
attr |
ChameleonImageProcessor.do_resize |
1 | 0 | 0 |
attr |
ChameleonImageProcessor.size |
1 | 0 | 0 |
attr |
ChameleonImageProcessor.resample |
1 | 0 | 0 |
attr |
ChameleonImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
ChameleonImageProcessor.crop_size |
1 | 0 | 0 |
attr |
ChameleonImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
ChameleonImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
ChameleonImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
ChameleonImageProcessor.image_mean |
1 | 0 | 0 |
attr |
ChameleonImageProcessor.image_std |
1 | 0 | 0 |
attr |
ChameleonImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.chameleon.image_processing_chameleon_fast (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ChameleonImageProcessorFast.resize |
5 | 4 | 0 |
transformers.models.chameleon.modeling_chameleon (35 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ChameleonVQVAE.init |
2 | 1 | 0 |
attr |
ChameleonVQVAE.encoder |
1 | 0 | 0 |
attr |
ChameleonVQVAE.quantize |
1 | 0 | 0 |
attr |
ChameleonVQVAE.quant_conv |
1 | 0 | 0 |
attr |
ChameleonVQVAE.post_quant_conv |
1 | 0 | 0 |
meth |
ChameleonModel.init |
2 | 1 | 0 |
meth |
ChameleonModel.get_image_tokens |
2 | 1 | 0 |
meth |
ChameleonModel.get_placeholder_mask |
4 | 3 | 0 |
attr |
ChameleonModel.padding_idx |
1 | 0 | 0 |
attr |
ChameleonModel.vocab_size |
1 | 0 | 0 |
attr |
ChameleonModel.embed_tokens |
1 | 0 | 0 |
attr |
ChameleonModel.vocabulary_mapping |
1 | 0 | 0 |
attr |
ChameleonModel.layers |
1 | 0 | 0 |
attr |
ChameleonModel.norm |
1 | 0 | 0 |
attr |
ChameleonModel.vqmodel |
1 | 0 | 0 |
attr |
ChameleonModel.rotary_emb |
1 | 0 | 0 |
attr |
ChameleonModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
ChameleonForConditionalGeneration.init |
2 | 0 | 0 |
meth |
ChameleonForConditionalGeneration.get_image_tokens |
2 | 0 | 0 |
meth |
ChameleonForConditionalGeneration.prepare_inputs_for_generation |
11 | 0 | 0 |
attr |
ChameleonForConditionalGeneration.model |
1 | 0 | 0 |
attr |
ChameleonForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
ChameleonForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.chameleon.processing_chameleon (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ChameleonProcessor.init |
5 | 2 | 0 |
meth |
ChameleonProcessor._get_num_multimodal_tokens |
3 | 0 | 0 |
attr |
ChameleonProcessor.image_seq_length |
1 | 0 | 0 |
attr |
ChameleonProcessor.image_token |
1 | 0 | 0 |
attr |
ChameleonProcessor.image_token_id |
1 | 0 | 0 |
attr |
ChameleonProcessor.image_start_token |
1 | 0 | 0 |
attr |
ChameleonProcessor.image_end_token |
1 | 0 | 0 |
attr |
ChameleonProcessor.image_start_token_id |
1 | 0 | 0 |
attr |
ChameleonProcessor.image_end_token_id |
1 | 0 | 0 |
attr |
ChameleonProcessor.image_ids |
1 | 0 | 0 |
transformers.models.chinese_clip.configuration_chinese_clip (74 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ChineseCLIPVisionConfig.init |
15 | 0 | 0 |
attr |
ChineseCLIPVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
ChineseCLIPVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
ChineseCLIPVisionConfig.projection_dim |
1 | 0 | 0 |
attr |
ChineseCLIPVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ChineseCLIPVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ChineseCLIPVisionConfig.num_channels |
1 | 0 | 0 |
attr |
ChineseCLIPVisionConfig.patch_size |
1 | 0 | 0 |
attr |
ChineseCLIPVisionConfig.image_size |
1 | 0 | 0 |
attr |
ChineseCLIPVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
ChineseCLIPVisionConfig.initializer_factor |
1 | 0 | 0 |
attr |
ChineseCLIPVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
ChineseCLIPVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
ChineseCLIPVisionConfig.hidden_act |
1 | 0 | 0 |
meth |
ChineseCLIPTextConfig.init |
18 | 0 | 0 |
attr |
ChineseCLIPTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
ChineseCLIPTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
ChineseCLIPTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
ChineseCLIPTextConfig.vocab_size |
1 | 0 | 0 |
attr |
ChineseCLIPTextConfig.hidden_size |
1 | 0 | 0 |
attr |
ChineseCLIPTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ChineseCLIPTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ChineseCLIPTextConfig.hidden_act |
1 | 0 | 0 |
attr |
ChineseCLIPTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
ChineseCLIPTextConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
ChineseCLIPTextConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
ChineseCLIPTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ChineseCLIPTextConfig.type_vocab_size |
1 | 0 | 0 |
attr |
ChineseCLIPTextConfig.initializer_range |
1 | 0 | 0 |
attr |
ChineseCLIPTextConfig.initializer_factor |
1 | 0 | 0 |
attr |
ChineseCLIPTextConfig.layer_norm_eps |
1 | 0 | 0 |
meth |
ChineseCLIPConfig.init |
6 | 0 | 0 |
attr |
ChineseCLIPConfig.text_config |
1 | 0 | 0 |
attr |
ChineseCLIPConfig.vision_config |
1 | 0 | 0 |
attr |
ChineseCLIPConfig.projection_dim |
1 | 0 | 0 |
attr |
ChineseCLIPConfig.logit_scale_init_value |
1 | 0 | 0 |
attr |
ChineseCLIPConfig.initializer_factor |
1 | 0 | 0 |
attr |
ChineseCLIPConfig.initializer_range |
1 | 0 | 0 |
transformers.models.chinese_clip.image_processing_chinese_clip (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ChineseCLIPImageProcessor.init |
13 | 12 | 0 |
meth |
ChineseCLIPImageProcessor.resize |
7 | 6 | 0 |
attr |
ChineseCLIPImageProcessor.do_resize |
1 | 0 | 0 |
attr |
ChineseCLIPImageProcessor.size |
1 | 0 | 0 |
attr |
ChineseCLIPImageProcessor.resample |
1 | 0 | 0 |
attr |
ChineseCLIPImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
ChineseCLIPImageProcessor.crop_size |
1 | 0 | 0 |
attr |
ChineseCLIPImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
ChineseCLIPImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
ChineseCLIPImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
ChineseCLIPImageProcessor.image_mean |
1 | 0 | 0 |
attr |
ChineseCLIPImageProcessor.image_std |
1 | 0 | 0 |
attr |
ChineseCLIPImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.chinese_clip.modeling_chinese_clip (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ChineseCLIPTextModel.init |
3 | 0 | 0 |
meth |
ChineseCLIPTextModel.get_input_embeddings |
1 | 0 | 0 |
meth |
ChineseCLIPTextModel.set_input_embeddings |
2 | 0 | 0 |
meth |
ChineseCLIPTextModel.forward |
14 | 13 | 0 |
attr |
ChineseCLIPTextModel.embeddings |
1 | 0 | 0 |
attr |
ChineseCLIPTextModel.encoder |
1 | 0 | 0 |
attr |
ChineseCLIPTextModel.pooler |
1 | 0 | 0 |
meth |
ChineseCLIPModel.init |
2 | 1 | 0 |
meth |
ChineseCLIPModel.forward |
12 | 11 | 0 |
attr |
ChineseCLIPModel.projection_dim |
1 | 0 | 0 |
attr |
ChineseCLIPModel.text_embed_dim |
1 | 0 | 0 |
attr |
ChineseCLIPModel.vision_embed_dim |
1 | 0 | 0 |
attr |
ChineseCLIPModel.text_model |
1 | 0 | 0 |
attr |
ChineseCLIPModel.vision_model |
1 | 0 | 0 |
attr |
ChineseCLIPModel.visual_projection |
1 | 0 | 0 |
attr |
ChineseCLIPModel.text_projection |
1 | 0 | 0 |
attr |
ChineseCLIPModel.logit_scale |
1 | 0 | 0 |
meth |
ChineseCLIPVisionModel.init |
2 | 1 | 0 |
meth |
ChineseCLIPVisionModel.forward |
7 | 6 | 0 |
attr |
ChineseCLIPVisionModel.vision_model |
1 | 0 | 0 |
meth |
ChineseCLIPPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.chinese_clip.processing_chinese_clip (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ChineseCLIPProcessor.init |
4 | 0 | 0 |
transformers.models.clap.configuration_clap (108 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ClapTextConfig.init |
19 | 0 | 0 |
attr |
ClapTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
ClapTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
ClapTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
ClapTextConfig.vocab_size |
1 | 0 | 0 |
attr |
ClapTextConfig.hidden_size |
1 | 0 | 0 |
attr |
ClapTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ClapTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ClapTextConfig.hidden_act |
1 | 0 | 0 |
attr |
ClapTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
ClapTextConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
ClapTextConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
ClapTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ClapTextConfig.type_vocab_size |
1 | 0 | 0 |
attr |
ClapTextConfig.initializer_factor |
1 | 0 | 0 |
attr |
ClapTextConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
ClapTextConfig.projection_hidden_act |
1 | 0 | 0 |
attr |
ClapTextConfig.projection_dim |
1 | 0 | 0 |
meth |
ClapConfig.init |
8 | 0 | 0 |
attr |
ClapConfig.text_config |
1 | 0 | 0 |
attr |
ClapConfig.audio_config |
1 | 0 | 0 |
attr |
ClapConfig.projection_dim |
1 | 0 | 0 |
attr |
ClapConfig.projection_hidden_act |
1 | 0 | 0 |
attr |
ClapConfig.hidden_size |
1 | 0 | 0 |
attr |
ClapConfig.logit_scale_init_value |
1 | 0 | 0 |
attr |
ClapConfig.initializer_factor |
1 | 0 | 0 |
attr |
ClapConfig.num_hidden_layers |
1 | 0 | 0 |
meth |
ClapAudioConfig.init |
29 | 0 | 0 |
attr |
ClapAudioConfig.window_size |
1 | 0 | 0 |
attr |
ClapAudioConfig.num_mel_bins |
1 | 0 | 0 |
attr |
ClapAudioConfig.spec_size |
1 | 0 | 0 |
attr |
ClapAudioConfig.patch_size |
1 | 0 | 0 |
attr |
ClapAudioConfig.patch_stride |
1 | 0 | 0 |
attr |
ClapAudioConfig.num_classes |
1 | 0 | 0 |
attr |
ClapAudioConfig.hidden_size |
1 | 0 | 0 |
attr |
ClapAudioConfig.depths |
1 | 0 | 0 |
attr |
ClapAudioConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ClapAudioConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ClapAudioConfig.enable_fusion |
1 | 0 | 0 |
attr |
ClapAudioConfig.fusion_type |
1 | 0 | 0 |
attr |
ClapAudioConfig.hidden_act |
1 | 0 | 0 |
attr |
ClapAudioConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
ClapAudioConfig.projection_dim |
1 | 0 | 0 |
attr |
ClapAudioConfig.flatten_patch_embeds |
1 | 0 | 0 |
attr |
ClapAudioConfig.patch_embeds_hidden_size |
1 | 0 | 0 |
attr |
ClapAudioConfig.enable_patch_layer_norm |
1 | 0 | 0 |
attr |
ClapAudioConfig.drop_path_rate |
1 | 0 | 0 |
attr |
ClapAudioConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
ClapAudioConfig.qkv_bias |
1 | 0 | 0 |
attr |
ClapAudioConfig.mlp_ratio |
1 | 0 | 0 |
attr |
ClapAudioConfig.patch_embed_input_channels |
1 | 0 | 0 |
attr |
ClapAudioConfig.aff_block_r |
1 | 0 | 0 |
attr |
ClapAudioConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
ClapAudioConfig.initializer_factor |
1 | 0 | 0 |
attr |
ClapAudioConfig.projection_hidden_act |
1 | 0 | 0 |
transformers.models.clap.feature_extraction_clap (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ClapFeatureExtractor.init |
14 | 5 | 0 |
meth |
ClapFeatureExtractor._random_mel_fusion |
4 | 0 | 0 |
meth |
ClapFeatureExtractor._get_input_mel |
5 | 2 | 0 |
meth |
ClapFeatureExtractor.call |
8 | 7 | 0 |
attr |
ClapFeatureExtractor.top_db |
1 | 0 | 0 |
attr |
ClapFeatureExtractor.truncation |
1 | 0 | 0 |
attr |
ClapFeatureExtractor.padding |
1 | 0 | 0 |
attr |
ClapFeatureExtractor.fft_window_size |
1 | 0 | 0 |
attr |
ClapFeatureExtractor.nb_frequency_bins |
1 | 0 | 0 |
attr |
ClapFeatureExtractor.hop_length |
1 | 0 | 0 |
attr |
ClapFeatureExtractor.max_length_s |
1 | 0 | 0 |
attr |
ClapFeatureExtractor.nb_max_samples |
1 | 0 | 0 |
attr |
ClapFeatureExtractor.sampling_rate |
1 | 0 | 0 |
attr |
ClapFeatureExtractor.frequency_min |
1 | 0 | 0 |
attr |
ClapFeatureExtractor.frequency_max |
1 | 0 | 0 |
attr |
ClapFeatureExtractor.mel_filters |
1 | 0 | 0 |
attr |
ClapFeatureExtractor.mel_filters_slaney |
1 | 0 | 0 |
transformers.models.clap.modeling_clap (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ClapAudioModelWithProjection.init |
2 | 1 | 0 |
meth |
ClapAudioModelWithProjection.forward |
7 | 6 | 0 |
attr |
ClapAudioModelWithProjection.audio_model |
1 | 0 | 0 |
attr |
ClapAudioModelWithProjection.audio_projection |
1 | 0 | 0 |
meth |
ClapTextModelWithProjection.init |
2 | 1 | 0 |
meth |
ClapTextModelWithProjection.set_input_embeddings |
2 | 0 | 0 |
meth |
ClapTextModelWithProjection.forward |
8 | 7 | 0 |
attr |
ClapTextModelWithProjection.text_model |
1 | 0 | 0 |
attr |
ClapTextModelWithProjection.text_projection |
1 | 0 | 0 |
meth |
ClapModel.init |
2 | 1 | 0 |
meth |
ClapModel.forward |
11 | 10 | 0 |
attr |
ClapModel.logit_scale_a |
1 | 0 | 0 |
attr |
ClapModel.logit_scale_t |
1 | 0 | 0 |
attr |
ClapModel.projection_dim |
1 | 0 | 0 |
attr |
ClapModel.text_model |
1 | 0 | 0 |
attr |
ClapModel.text_projection |
1 | 0 | 0 |
attr |
ClapModel.audio_model |
1 | 0 | 0 |
attr |
ClapModel.audio_projection |
1 | 0 | 0 |
meth |
ClapAudioModel.init |
2 | 1 | 0 |
meth |
ClapAudioModel.forward |
7 | 6 | 0 |
attr |
ClapAudioModel.audio_encoder |
1 | 0 | 0 |
meth |
ClapPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
ClapTextModel.init |
3 | 0 | 0 |
meth |
ClapTextModel.get_input_embeddings |
1 | 0 | 0 |
meth |
ClapTextModel.set_input_embeddings |
2 | 0 | 0 |
meth |
ClapTextModel.forward |
10 | 9 | 0 |
attr |
ClapTextModel.embeddings |
1 | 0 | 0 |
attr |
ClapTextModel.encoder |
1 | 0 | 0 |
attr |
ClapTextModel.pooler |
1 | 0 | 0 |
transformers.models.clap.processing_clap (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ClapProcessor.init |
3 | 0 | 0 |
transformers.models.clip.configuration_clip (71 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CLIPTextConfig.init |
17 | 0 | 0 |
attr |
CLIPTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
CLIPTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
CLIPTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
CLIPTextConfig.vocab_size |
1 | 0 | 0 |
attr |
CLIPTextConfig.hidden_size |
1 | 0 | 0 |
attr |
CLIPTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
CLIPTextConfig.projection_dim |
1 | 0 | 0 |
attr |
CLIPTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
CLIPTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
CLIPTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
CLIPTextConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
CLIPTextConfig.hidden_act |
1 | 0 | 0 |
attr |
CLIPTextConfig.initializer_range |
1 | 0 | 0 |
attr |
CLIPTextConfig.initializer_factor |
1 | 0 | 0 |
attr |
CLIPTextConfig.attention_dropout |
1 | 0 | 0 |
meth |
CLIPVisionConfig.init |
15 | 0 | 0 |
attr |
CLIPVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
CLIPVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
CLIPVisionConfig.projection_dim |
1 | 0 | 0 |
attr |
CLIPVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
CLIPVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
CLIPVisionConfig.num_channels |
1 | 0 | 0 |
attr |
CLIPVisionConfig.patch_size |
1 | 0 | 0 |
attr |
CLIPVisionConfig.image_size |
1 | 0 | 0 |
attr |
CLIPVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
CLIPVisionConfig.initializer_factor |
1 | 0 | 0 |
attr |
CLIPVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
CLIPVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
CLIPVisionConfig.hidden_act |
1 | 0 | 0 |
meth |
CLIPConfig.init |
6 | 0 | 0 |
attr |
CLIPConfig.text_config |
1 | 0 | 0 |
attr |
CLIPConfig.vision_config |
1 | 0 | 0 |
attr |
CLIPConfig.projection_dim |
1 | 0 | 0 |
attr |
CLIPConfig.logit_scale_init_value |
1 | 0 | 0 |
attr |
CLIPConfig.initializer_factor |
1 | 0 | 0 |
transformers.models.clip.image_processing_clip (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CLIPImageProcessor.init |
13 | 12 | 0 |
meth |
CLIPImageProcessor.resize |
7 | 6 | 0 |
meth |
CLIPImageProcessor.preprocess |
17 | 16 | 0 |
attr |
CLIPImageProcessor.do_resize |
1 | 0 | 0 |
attr |
CLIPImageProcessor.size |
1 | 0 | 0 |
attr |
CLIPImageProcessor.resample |
1 | 0 | 0 |
attr |
CLIPImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
CLIPImageProcessor.crop_size |
1 | 0 | 0 |
attr |
CLIPImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
CLIPImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
CLIPImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
CLIPImageProcessor.image_mean |
1 | 0 | 0 |
attr |
CLIPImageProcessor.image_std |
1 | 0 | 0 |
attr |
CLIPImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.clip.image_processing_clip_fast (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CLIPImageProcessorFast.init |
2 | 1 | 0 |
transformers.models.clip.modeling_clip (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CLIPTextModel.init |
2 | 1 | 0 |
meth |
CLIPTextModel.set_input_embeddings |
2 | 0 | 0 |
attr |
CLIPTextModel.text_model |
1 | 0 | 0 |
meth |
CLIPTextModelWithProjection.init |
2 | 1 | 0 |
meth |
CLIPTextModelWithProjection.set_input_embeddings |
2 | 0 | 0 |
attr |
CLIPTextModelWithProjection.text_model |
1 | 0 | 0 |
attr |
CLIPTextModelWithProjection.text_projection |
1 | 0 | 0 |
meth |
CLIPVisionModelWithProjection.init |
2 | 1 | 0 |
attr |
CLIPVisionModelWithProjection.vision_model |
1 | 0 | 0 |
attr |
CLIPVisionModelWithProjection.visual_projection |
1 | 0 | 0 |
meth |
CLIPModel.init |
2 | 1 | 0 |
attr |
CLIPModel.projection_dim |
1 | 0 | 0 |
attr |
CLIPModel.text_embed_dim |
1 | 0 | 0 |
attr |
CLIPModel.vision_embed_dim |
1 | 0 | 0 |
attr |
CLIPModel.text_model |
1 | 0 | 0 |
attr |
CLIPModel.vision_model |
1 | 0 | 0 |
attr |
CLIPModel.visual_projection |
1 | 0 | 0 |
attr |
CLIPModel.text_projection |
1 | 0 | 0 |
attr |
CLIPModel.logit_scale |
1 | 0 | 0 |
attr |
CLIPForImageClassification.num_labels |
1 | 0 | 0 |
attr |
CLIPForImageClassification.vision_model |
1 | 0 | 0 |
attr |
CLIPForImageClassification.classifier |
1 | 0 | 0 |
meth |
CLIPVisionModel.init |
2 | 1 | 0 |
attr |
CLIPVisionModel.vision_model |
1 | 0 | 0 |
meth |
CLIPPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.clip.processing_clip (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CLIPProcessor.init |
4 | 0 | 0 |
transformers.models.clip.tokenization_clip (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CLIPTokenizer.init |
8 | 6 | 0 |
meth |
CLIPTokenizer._wrap_decode_method_backend_tokenizer |
1 | 0 | 0 |
transformers.models.clipseg.configuration_clipseg (83 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CLIPSegVisionConfig.init |
14 | 0 | 0 |
attr |
CLIPSegVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
CLIPSegVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
CLIPSegVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
CLIPSegVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
CLIPSegVisionConfig.num_channels |
1 | 0 | 0 |
attr |
CLIPSegVisionConfig.patch_size |
1 | 0 | 0 |
attr |
CLIPSegVisionConfig.image_size |
1 | 0 | 0 |
attr |
CLIPSegVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
CLIPSegVisionConfig.initializer_factor |
1 | 0 | 0 |
attr |
CLIPSegVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
CLIPSegVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
CLIPSegVisionConfig.hidden_act |
1 | 0 | 0 |
meth |
CLIPSegTextConfig.init |
16 | 0 | 0 |
attr |
CLIPSegTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
CLIPSegTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
CLIPSegTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
CLIPSegTextConfig.vocab_size |
1 | 0 | 0 |
attr |
CLIPSegTextConfig.hidden_size |
1 | 0 | 0 |
attr |
CLIPSegTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
CLIPSegTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
CLIPSegTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
CLIPSegTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
CLIPSegTextConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
CLIPSegTextConfig.hidden_act |
1 | 0 | 0 |
attr |
CLIPSegTextConfig.initializer_range |
1 | 0 | 0 |
attr |
CLIPSegTextConfig.initializer_factor |
1 | 0 | 0 |
attr |
CLIPSegTextConfig.attention_dropout |
1 | 0 | 0 |
meth |
CLIPSegConfig.init |
14 | 0 | 0 |
attr |
CLIPSegConfig.text_config |
1 | 0 | 0 |
attr |
CLIPSegConfig.vision_config |
1 | 0 | 0 |
attr |
CLIPSegConfig.projection_dim |
1 | 0 | 0 |
attr |
CLIPSegConfig.logit_scale_init_value |
1 | 0 | 0 |
attr |
CLIPSegConfig.extract_layers |
1 | 0 | 0 |
attr |
CLIPSegConfig.reduce_dim |
1 | 0 | 0 |
attr |
CLIPSegConfig.decoder_num_attention_heads |
1 | 0 | 0 |
attr |
CLIPSegConfig.decoder_attention_dropout |
1 | 0 | 0 |
attr |
CLIPSegConfig.decoder_hidden_act |
1 | 0 | 0 |
attr |
CLIPSegConfig.decoder_intermediate_size |
1 | 0 | 0 |
attr |
CLIPSegConfig.conditional_layer |
1 | 0 | 0 |
attr |
CLIPSegConfig.initializer_factor |
1 | 0 | 0 |
attr |
CLIPSegConfig.use_complex_transposed_convolution |
1 | 0 | 0 |
transformers.models.clipseg.modeling_clipseg (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CLIPSegVisionModel.init |
2 | 1 | 0 |
meth |
CLIPSegVisionModel.forward |
7 | 6 | 0 |
attr |
CLIPSegVisionModel.vision_model |
1 | 0 | 0 |
meth |
CLIPSegModel.init |
2 | 1 | 0 |
meth |
CLIPSegModel.forward |
11 | 10 | 0 |
attr |
CLIPSegModel.projection_dim |
1 | 0 | 0 |
attr |
CLIPSegModel.text_embed_dim |
1 | 0 | 0 |
attr |
CLIPSegModel.vision_embed_dim |
1 | 0 | 0 |
attr |
CLIPSegModel.text_model |
1 | 0 | 0 |
attr |
CLIPSegModel.vision_model |
1 | 0 | 0 |
attr |
CLIPSegModel.visual_projection |
1 | 0 | 0 |
attr |
CLIPSegModel.text_projection |
1 | 0 | 0 |
attr |
CLIPSegModel.logit_scale |
1 | 0 | 0 |
meth |
CLIPSegPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
CLIPSegForImageSegmentation.init |
2 | 1 | 0 |
meth |
CLIPSegForImageSegmentation.get_conditional_embeddings |
6 | 5 | 0 |
meth |
CLIPSegForImageSegmentation.forward |
13 | 12 | 0 |
attr |
CLIPSegForImageSegmentation.clip |
1 | 0 | 0 |
attr |
CLIPSegForImageSegmentation.extract_layers |
1 | 0 | 0 |
attr |
CLIPSegForImageSegmentation.decoder |
1 | 0 | 0 |
meth |
CLIPSegTextModel.init |
2 | 1 | 0 |
meth |
CLIPSegTextModel.set_input_embeddings |
2 | 0 | 0 |
meth |
CLIPSegTextModel.forward |
8 | 7 | 0 |
attr |
CLIPSegTextModel.text_model |
1 | 0 | 0 |
transformers.models.clipseg.processing_clipseg (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CLIPSegProcessor.init |
4 | 0 | 0 |
meth |
CLIPSegProcessor.call |
6 | 0 | 0 |
transformers.models.clvp.configuration_clvp (110 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ClvpEncoderConfig.init |
19 | 0 | 0 |
meth |
ClvpEncoderConfig.from_pretrained |
4 | 2 | 0 |
attr |
ClvpEncoderConfig.vocab_size |
1 | 0 | 0 |
attr |
ClvpEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
ClvpEncoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
ClvpEncoderConfig.projection_dim |
1 | 0 | 0 |
attr |
ClvpEncoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ClvpEncoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ClvpEncoderConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
ClvpEncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
ClvpEncoderConfig.initializer_factor |
1 | 0 | 0 |
attr |
ClvpEncoderConfig.attention_dropout |
1 | 0 | 0 |
attr |
ClvpEncoderConfig.dropout |
1 | 0 | 0 |
attr |
ClvpEncoderConfig.use_rotary_embedding |
1 | 0 | 0 |
attr |
ClvpEncoderConfig.use_attention_bias |
1 | 0 | 0 |
attr |
ClvpEncoderConfig.summary_type |
1 | 0 | 0 |
attr |
ClvpEncoderConfig.bos_token_id |
1 | 0 | 0 |
attr |
ClvpEncoderConfig.eos_token_id |
1 | 0 | 0 |
attr |
ClvpEncoderConfig.pad_token_id |
1 | 0 | 0 |
meth |
ClvpDecoderConfig.init |
30 | 0 | 0 |
attr |
ClvpDecoderConfig.vocab_size |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.max_text_tokens |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.hidden_size |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.n_inner |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.num_mel_attn_blocks |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.activation_function |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.resid_pdrop |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.embd_pdrop |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.attention_dropout |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.initializer_range |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.summary_type |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.summary_use_proj |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.summary_activation |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.summary_first_dropout |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.summary_proj_to_labels |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.use_cache |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.feature_size |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.use_attention_bias |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.initializer_factor |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.decoder_fixing_codes |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.bos_token_id |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.eos_token_id |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.pad_token_id |
1 | 0 | 0 |
attr |
ClvpDecoderConfig.add_cross_attention |
1 | 0 | 0 |
meth |
ClvpConfig.init |
8 | 0 | 0 |
attr |
ClvpConfig.text_config |
1 | 0 | 0 |
attr |
ClvpConfig.speech_config |
1 | 0 | 0 |
attr |
ClvpConfig.decoder_config |
1 | 0 | 0 |
attr |
ClvpConfig.projection_dim |
1 | 0 | 0 |
attr |
ClvpConfig.logit_scale_init_value |
1 | 0 | 0 |
attr |
ClvpConfig.initializer_factor |
1 | 0 | 0 |
transformers.models.clvp.feature_extraction_clvp (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ClvpFeatureExtractor.init |
11 | 0 | 0 |
meth |
ClvpFeatureExtractor.call |
10 | 9 | 0 |
attr |
ClvpFeatureExtractor.n_fft |
1 | 0 | 0 |
attr |
ClvpFeatureExtractor.hop_length |
1 | 0 | 0 |
attr |
ClvpFeatureExtractor.chunk_length |
1 | 0 | 0 |
attr |
ClvpFeatureExtractor.n_samples |
1 | 0 | 0 |
attr |
ClvpFeatureExtractor.nb_max_frames |
1 | 0 | 0 |
attr |
ClvpFeatureExtractor.sampling_rate |
1 | 0 | 0 |
attr |
ClvpFeatureExtractor.default_audio_length |
1 | 0 | 0 |
attr |
ClvpFeatureExtractor.mel_norms |
1 | 0 | 0 |
attr |
ClvpFeatureExtractor.mel_filters |
1 | 0 | 0 |
transformers.models.clvp.modeling_clvp (59 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ClvpDecoder.init |
2 | 0 | 0 |
meth |
ClvpDecoder.get_input_embeddings |
1 | 0 | 0 |
meth |
ClvpDecoder.set_input_embeddings |
2 | 0 | 0 |
meth |
ClvpDecoder.forward |
13 | 12 | 0 |
attr |
ClvpDecoder.input_embeds_layer |
1 | 0 | 0 |
attr |
ClvpDecoder.position_embeds_layer |
1 | 0 | 0 |
attr |
ClvpDecoder.drop |
1 | 0 | 0 |
attr |
ClvpDecoder.layers |
1 | 0 | 0 |
attr |
ClvpDecoder.layer_norm |
1 | 0 | 0 |
attr |
ClvpDecoder.gradient_checkpointing |
1 | 0 | 0 |
meth |
ClvpModelForConditionalGeneration.init |
2 | 1 | 0 |
meth |
ClvpModelForConditionalGeneration.get_speech_features |
8 | 7 | 0 |
meth |
ClvpModelForConditionalGeneration.forward |
12 | 11 | 0 |
meth |
ClvpModelForConditionalGeneration.generate |
8 | 6 | 0 |
attr |
ClvpModelForConditionalGeneration.conditioning_encoder |
1 | 0 | 0 |
attr |
ClvpModelForConditionalGeneration.speech_decoder_model |
1 | 0 | 0 |
attr |
ClvpModelForConditionalGeneration.text_encoder_model |
1 | 0 | 0 |
attr |
ClvpModelForConditionalGeneration.speech_encoder_model |
1 | 0 | 0 |
attr |
ClvpModelForConditionalGeneration.logit_scale |
1 | 0 | 0 |
meth |
ClvpForCausalLM.init |
2 | 0 | 0 |
meth |
ClvpForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
ClvpForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
ClvpForCausalLM.set_input_embeddings |
2 | 0 | 0 |
meth |
ClvpForCausalLM.prepare_inputs_for_generation |
8 | 0 | 0 |
meth |
ClvpForCausalLM.forward |
14 | 13 | 0 |
attr |
ClvpForCausalLM.model |
1 | 0 | 0 |
attr |
ClvpForCausalLM.final_norm |
1 | 0 | 0 |
attr |
ClvpForCausalLM.lm_head |
1 | 0 | 0 |
meth |
ClvpEncoder.init |
2 | 1 | 0 |
meth |
ClvpEncoder.get_input_embeddings |
1 | 0 | 0 |
meth |
ClvpEncoder.set_input_embeddings |
2 | 0 | 0 |
meth |
ClvpEncoder.forward |
9 | 8 | 0 |
attr |
ClvpEncoder.token_embedding |
1 | 0 | 0 |
attr |
ClvpEncoder.rotary_pos_emb |
1 | 0 | 0 |
attr |
ClvpEncoder.layers |
1 | 0 | 0 |
attr |
ClvpEncoder.sequence_summary |
1 | 0 | 0 |
attr |
ClvpEncoder.final_layer_norm |
1 | 0 | 0 |
attr |
ClvpEncoder.projection |
1 | 0 | 0 |
attr |
ClvpEncoder.gradient_checkpointing |
1 | 0 | 0 |
meth |
ClvpPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
ClvpModel.init |
2 | 1 | 0 |
meth |
ClvpModel.get_input_embeddings |
1 | 0 | 0 |
meth |
ClvpModel.set_input_embeddings |
2 | 0 | 0 |
meth |
ClvpModel.forward |
13 | 12 | 0 |
attr |
ClvpModel.decoder |
1 | 0 | 0 |
transformers.models.clvp.number_normalizer (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EnglishNormalizer.init |
1 | 0 | 0 |
meth |
EnglishNormalizer.call |
2 | 0 | 0 |
attr |
EnglishNormalizer.ones |
1 | 0 | 0 |
attr |
EnglishNormalizer.teens |
1 | 0 | 0 |
attr |
EnglishNormalizer.tens |
1 | 0 | 0 |
transformers.models.clvp.processing_clvp (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ClvpProcessor.init |
3 | 0 | 0 |
meth |
ClvpProcessor.call |
3 | 0 | 0 |
transformers.models.clvp.tokenization_clvp (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ClvpTokenizer.init |
10 | 0 | 0 |
meth |
ClvpTokenizer.get_vocab |
1 | 0 | 0 |
meth |
ClvpTokenizer.bpe |
2 | 0 | 0 |
meth |
ClvpTokenizer._tokenize |
2 | 0 | 0 |
meth |
ClvpTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
ClvpTokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
ClvpTokenizer.convert_tokens_to_string |
2 | 0 | 0 |
meth |
ClvpTokenizer.clean_up_tokenization |
2 | 0 | 0 |
prop |
ClvpTokenizer.vocab_size |
1 | 0 | 0 |
prop |
ClvpTokenizer.normalizer |
1 | 0 | 0 |
attr |
ClvpTokenizer.decoder |
1 | 0 | 0 |
attr |
ClvpTokenizer.errors |
1 | 0 | 0 |
attr |
ClvpTokenizer.byte_encoder |
1 | 0 | 0 |
attr |
ClvpTokenizer.byte_decoder |
1 | 0 | 0 |
attr |
ClvpTokenizer.bpe_ranks |
1 | 0 | 0 |
attr |
ClvpTokenizer.cache |
1 | 0 | 0 |
attr |
ClvpTokenizer.add_prefix_space |
1 | 0 | 0 |
attr |
ClvpTokenizer.pat |
1 | 0 | 0 |
attr |
ClvpTokenizer.encoder |
1 | 0 | 0 |
transformers.models.code_llama.tokenization_code_llama (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CodeLlamaTokenizer.init |
17 | 5 | 0 |
meth |
CodeLlamaTokenizer.set_infilling_processor |
4 | 0 | 0 |
meth |
CodeLlamaTokenizer.tokenize |
5 | 0 | 0 |
meth |
CodeLlamaTokenizer._encode_plus |
7 | 0 | 0 |
prop |
CodeLlamaTokenizer.prefix_token |
1 | 0 | 0 |
prop |
CodeLlamaTokenizer.prefix_id |
1 | 0 | 0 |
prop |
CodeLlamaTokenizer.middle_token |
1 | 0 | 0 |
prop |
CodeLlamaTokenizer.middle_id |
1 | 0 | 0 |
prop |
CodeLlamaTokenizer.suffix_token |
1 | 0 | 0 |
prop |
CodeLlamaTokenizer.suffix_id |
1 | 0 | 0 |
prop |
CodeLlamaTokenizer.eot_id |
1 | 0 | 0 |
prop |
CodeLlamaTokenizer.eot_token |
1 | 0 | 0 |
attr |
CodeLlamaTokenizer.add_prefix_space |
1 | 0 | 0 |
attr |
CodeLlamaTokenizer.use_default_system_prompt |
1 | 0 | 0 |
attr |
CodeLlamaTokenizer.fill_token |
1 | 0 | 0 |
transformers.models.codegen.configuration_codegen (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CodeGenConfig.init |
20 | 0 | 0 |
attr |
CodeGenConfig.vocab_size |
1 | 0 | 0 |
attr |
CodeGenConfig.n_ctx |
1 | 0 | 0 |
attr |
CodeGenConfig.n_positions |
1 | 0 | 0 |
attr |
CodeGenConfig.n_embd |
1 | 0 | 0 |
attr |
CodeGenConfig.n_layer |
1 | 0 | 0 |
attr |
CodeGenConfig.n_head |
1 | 0 | 0 |
attr |
CodeGenConfig.n_inner |
1 | 0 | 0 |
attr |
CodeGenConfig.rotary_dim |
1 | 0 | 0 |
attr |
CodeGenConfig.activation_function |
1 | 0 | 0 |
attr |
CodeGenConfig.resid_pdrop |
1 | 0 | 0 |
attr |
CodeGenConfig.embd_pdrop |
1 | 0 | 0 |
attr |
CodeGenConfig.attn_pdrop |
1 | 0 | 0 |
attr |
CodeGenConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
CodeGenConfig.initializer_range |
1 | 0 | 0 |
attr |
CodeGenConfig.use_cache |
1 | 0 | 0 |
attr |
CodeGenConfig.bos_token_id |
1 | 0 | 0 |
attr |
CodeGenConfig.eos_token_id |
1 | 0 | 0 |
attr |
CodeGenConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.codegen.modeling_codegen (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CodeGenForCausalLM.init |
2 | 0 | 0 |
meth |
CodeGenForCausalLM.forward |
15 | 14 | 0 |
attr |
CodeGenForCausalLM.transformer |
1 | 0 | 0 |
attr |
CodeGenForCausalLM.lm_head |
1 | 0 | 0 |
meth |
CodeGenModel.init |
2 | 0 | 0 |
meth |
CodeGenModel.get_input_embeddings |
1 | 0 | 0 |
meth |
CodeGenModel.set_input_embeddings |
2 | 0 | 0 |
meth |
CodeGenModel.forward |
13 | 12 | 0 |
attr |
CodeGenModel.embed_dim |
1 | 0 | 0 |
attr |
CodeGenModel.vocab_size |
1 | 0 | 0 |
attr |
CodeGenModel.wte |
1 | 0 | 0 |
attr |
CodeGenModel.drop |
1 | 0 | 0 |
attr |
CodeGenModel.h |
1 | 0 | 0 |
attr |
CodeGenModel.ln_f |
1 | 0 | 0 |
attr |
CodeGenModel.rotary_dim |
1 | 0 | 0 |
attr |
CodeGenModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
CodeGenPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.codegen.tokenization_codegen (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CodeGenTokenizer.init |
10 | 7 | 0 |
meth |
CodeGenTokenizer.decode |
6 | 5 | 0 |
meth |
CodeGenTokenizer.truncate |
3 | 0 | 0 |
attr |
CodeGenTokenizer.return_token_type_ids |
1 | 0 | 0 |
attr |
CodeGenTokenizer.add_prefix_space |
1 | 0 | 0 |
transformers.models.cohere.configuration_cohere (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CohereConfig.init |
22 | 20 | 0 |
attr |
CohereConfig.vocab_size |
1 | 0 | 0 |
attr |
CohereConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
CohereConfig.hidden_size |
1 | 0 | 0 |
attr |
CohereConfig.logit_scale |
1 | 0 | 0 |
attr |
CohereConfig.intermediate_size |
1 | 0 | 0 |
attr |
CohereConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
CohereConfig.num_attention_heads |
1 | 0 | 0 |
attr |
CohereConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
CohereConfig.hidden_act |
1 | 0 | 0 |
attr |
CohereConfig.initializer_range |
1 | 0 | 0 |
attr |
CohereConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
CohereConfig.use_cache |
1 | 0 | 0 |
attr |
CohereConfig.attention_bias |
1 | 0 | 0 |
attr |
CohereConfig.attention_dropout |
1 | 0 | 0 |
attr |
CohereConfig.use_qk_norm |
1 | 0 | 0 |
attr |
CohereConfig.rope_parameters |
1 | 0 | 0 |
attr |
CohereConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
CohereConfig.pad_token_id |
1 | 0 | 0 |
attr |
CohereConfig.bos_token_id |
1 | 0 | 0 |
attr |
CohereConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.cohere.modeling_cohere (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CohereForCausalLM.init |
2 | 0 | 0 |
attr |
CohereForCausalLM.model |
1 | 0 | 0 |
attr |
CohereForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
CohereForCausalLM.lm_head |
1 | 0 | 0 |
attr |
CohereForCausalLM.logit_scale |
1 | 0 | 0 |
attr |
CohereForCausalLM.tie_word_embeddings |
1 | 0 | 0 |
meth |
CohereModel.init |
2 | 1 | 0 |
attr |
CohereModel.padding_idx |
1 | 0 | 0 |
attr |
CohereModel.vocab_size |
1 | 0 | 0 |
attr |
CohereModel.embed_tokens |
1 | 0 | 0 |
attr |
CohereModel.layers |
1 | 0 | 0 |
attr |
CohereModel.norm |
1 | 0 | 0 |
attr |
CohereModel.rotary_emb |
1 | 0 | 0 |
attr |
CohereModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.cohere.modular_cohere (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CohereForCausalLM.init |
2 | 0 | 0 |
attr |
CohereForCausalLM.model |
1 | 0 | 0 |
attr |
CohereForCausalLM.logit_scale |
1 | 0 | 0 |
attr |
CohereForCausalLM.tie_word_embeddings |
1 | 0 | 0 |
attr |
CoherePreTrainedModel |
1 | 0 | 0 |
meth |
CohereModel.init |
2 | 1 | 0 |
attr |
CohereModel.layers |
1 | 0 | 0 |
attr |
CohereModel.norm |
1 | 0 | 0 |
transformers.models.cohere.tokenization_cohere (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CohereTokenizer.init |
14 | 12 | 0 |
meth |
CohereTokenizer.apply_tool_use_template |
4 | 3 | 0 |
meth |
CohereTokenizer.apply_grounded_generation_template |
5 | 4 | 0 |
attr |
CohereTokenizer.use_default_system_prompt |
1 | 0 | 0 |
attr |
CohereTokenizer.add_prefix_space |
1 | 0 | 0 |
attr |
CohereTokenizer.grounded_generation_template |
1 | 0 | 0 |
attr |
CohereTokenizer.tool_use_template |
1 | 0 | 0 |
transformers.models.cohere2.configuration_cohere2 (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Cohere2Config.init |
23 | 21 | 0 |
attr |
Cohere2Config.vocab_size |
1 | 0 | 0 |
attr |
Cohere2Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Cohere2Config.hidden_size |
1 | 0 | 0 |
attr |
Cohere2Config.logit_scale |
1 | 0 | 0 |
attr |
Cohere2Config.intermediate_size |
1 | 0 | 0 |
attr |
Cohere2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Cohere2Config.num_attention_heads |
1 | 0 | 0 |
attr |
Cohere2Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Cohere2Config.hidden_act |
1 | 0 | 0 |
attr |
Cohere2Config.initializer_range |
1 | 0 | 0 |
attr |
Cohere2Config.layer_norm_eps |
1 | 0 | 0 |
attr |
Cohere2Config.use_cache |
1 | 0 | 0 |
attr |
Cohere2Config.attention_bias |
1 | 0 | 0 |
attr |
Cohere2Config.attention_dropout |
1 | 0 | 0 |
attr |
Cohere2Config.sliding_window |
1 | 0 | 0 |
attr |
Cohere2Config.layer_types |
1 | 0 | 0 |
attr |
Cohere2Config.head_dim |
1 | 0 | 0 |
attr |
Cohere2Config.pad_token_id |
1 | 0 | 0 |
attr |
Cohere2Config.bos_token_id |
1 | 0 | 0 |
attr |
Cohere2Config.eos_token_id |
1 | 0 | 0 |
attr |
Cohere2Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Cohere2Config.rope_parameters |
1 | 0 | 0 |
transformers.models.cohere2.modeling_cohere2 (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Cohere2ForCausalLM.init |
2 | 0 | 0 |
attr |
Cohere2ForCausalLM.model |
1 | 0 | 0 |
attr |
Cohere2ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Cohere2ForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Cohere2ForCausalLM.logit_scale |
1 | 0 | 0 |
attr |
Cohere2ForCausalLM.tie_word_embeddings |
1 | 0 | 0 |
meth |
Cohere2Model.init |
2 | 1 | 0 |
attr |
Cohere2Model.padding_idx |
1 | 0 | 0 |
attr |
Cohere2Model.vocab_size |
1 | 0 | 0 |
attr |
Cohere2Model.embed_tokens |
1 | 0 | 0 |
attr |
Cohere2Model.layers |
1 | 0 | 0 |
attr |
Cohere2Model.norm |
1 | 0 | 0 |
attr |
Cohere2Model.rotary_emb |
1 | 0 | 0 |
attr |
Cohere2Model.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.cohere2.modular_cohere2 (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Cohere2Config.init |
23 | 21 | 0 |
attr |
Cohere2Config.vocab_size |
1 | 0 | 0 |
attr |
Cohere2Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Cohere2Config.hidden_size |
1 | 0 | 0 |
attr |
Cohere2Config.logit_scale |
1 | 0 | 0 |
attr |
Cohere2Config.intermediate_size |
1 | 0 | 0 |
attr |
Cohere2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Cohere2Config.num_attention_heads |
1 | 0 | 0 |
attr |
Cohere2Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Cohere2Config.hidden_act |
1 | 0 | 0 |
attr |
Cohere2Config.initializer_range |
1 | 0 | 0 |
attr |
Cohere2Config.layer_norm_eps |
1 | 0 | 0 |
attr |
Cohere2Config.use_cache |
1 | 0 | 0 |
attr |
Cohere2Config.attention_bias |
1 | 0 | 0 |
attr |
Cohere2Config.attention_dropout |
1 | 0 | 0 |
attr |
Cohere2Config.sliding_window |
1 | 0 | 0 |
attr |
Cohere2Config.layer_types |
1 | 0 | 0 |
attr |
Cohere2Config.head_dim |
1 | 0 | 0 |
attr |
Cohere2Config.pad_token_id |
1 | 0 | 0 |
attr |
Cohere2Config.bos_token_id |
1 | 0 | 0 |
attr |
Cohere2Config.eos_token_id |
1 | 0 | 0 |
attr |
Cohere2Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Cohere2Config.rope_parameters |
1 | 0 | 0 |
meth |
Cohere2Model.init |
2 | 1 | 0 |
attr |
Cohere2Model.norm |
1 | 0 | 0 |
transformers.models.cohere2_vision.configuration_cohere2_vision (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Cohere2VisionConfig.init |
8 | 0 | 0 |
attr |
Cohere2VisionConfig.downsample_factor |
1 | 0 | 0 |
attr |
Cohere2VisionConfig.image_token_id |
1 | 0 | 0 |
attr |
Cohere2VisionConfig.alignment_intermediate_size |
1 | 0 | 0 |
attr |
Cohere2VisionConfig.vision_config |
1 | 0 | 0 |
attr |
Cohere2VisionConfig.text_config |
1 | 0 | 0 |
attr |
Cohere2VisionConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.cohere2_vision.image_processing_cohere2_vision_fast (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Cohere2VisionImageProcessorFast.init |
2 | 1 | 0 |
meth |
Cohere2VisionImageProcessorFast.crop_image_to_patches |
7 | 6 | 0 |
meth |
Cohere2VisionImageProcessorFast._preprocess |
18 | 17 | 0 |
meth |
Cohere2VisionImageProcessorFast.get_number_of_image_patches |
4 | 2 | 0 |
transformers.models.cohere2_vision.modeling_cohere2_vision (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Cohere2VisionForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Cohere2VisionForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Cohere2VisionForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Cohere2VisionForConditionalGeneration.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
Cohere2VisionForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Cohere2VisionForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
Cohere2VisionModel.init |
2 | 1 | 0 |
meth |
Cohere2VisionModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Cohere2VisionModel.set_input_embeddings |
2 | 0 | 0 |
meth |
Cohere2VisionModel.get_placeholder_mask |
4 | 3 | 0 |
attr |
Cohere2VisionModel.vision_tower |
1 | 0 | 0 |
attr |
Cohere2VisionModel.multi_modal_projector |
1 | 0 | 0 |
attr |
Cohere2VisionModel.language_model |
1 | 0 | 0 |
transformers.models.cohere2_vision.modular_cohere2_vision (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Cohere2VisionImageProcessorFast.init |
2 | 1 | 0 |
transformers.models.cohere2_vision.processing_cohere2_vision (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Cohere2VisionProcessor.init |
5 | 0 | 0 |
meth |
Cohere2VisionProcessor._get_num_multimodal_tokens |
3 | 0 | 0 |
meth |
Cohere2VisionProcessor.batch_decode |
3 | 0 | 0 |
meth |
Cohere2VisionProcessor.decode |
3 | 0 | 0 |
prop |
Cohere2VisionProcessor.model_input_names |
1 | 0 | 0 |
attr |
Cohere2VisionProcessor.patch_size |
1 | 0 | 0 |
attr |
Cohere2VisionProcessor.boi_token |
1 | 0 | 0 |
attr |
Cohere2VisionProcessor.eoi_token |
1 | 0 | 0 |
attr |
Cohere2VisionProcessor.image_token |
1 | 0 | 0 |
attr |
Cohere2VisionProcessor.img_line_break_token |
1 | 0 | 0 |
attr |
Cohere2VisionProcessor.image_token_id |
1 | 0 | 0 |
attr |
Cohere2VisionProcessor.image_ids |
1 | 0 | 0 |
transformers.models.colmodernvbert.configuration_colmodernvbert (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ColModernVBertConfig.init |
5 | 2 | 0 |
meth |
ColModernVBertConfig.get_text_config |
3 | 1 | 0 |
attr |
ColModernVBertConfig.vlm_config |
1 | 0 | 0 |
attr |
ColModernVBertConfig.embedding_dim |
1 | 0 | 0 |
attr |
ColModernVBertConfig.initializer_range |
1 | 0 | 0 |
transformers.models.colmodernvbert.modeling_colmodernvbert (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ColModernVBertForRetrieval.init |
2 | 1 | 0 |
meth |
ColModernVBertForRetrieval.get_input_embeddings |
1 | 0 | 0 |
meth |
ColModernVBertForRetrieval.set_input_embeddings |
2 | 0 | 0 |
meth |
ColModernVBertForRetrieval.get_output_embeddings |
1 | 0 | 0 |
meth |
ColModernVBertForRetrieval.set_output_embeddings |
2 | 0 | 0 |
attr |
ColModernVBertForRetrieval.vocab_size |
1 | 0 | 0 |
attr |
ColModernVBertForRetrieval.vlm |
1 | 0 | 0 |
attr |
ColModernVBertForRetrieval.embedding_dim |
1 | 0 | 0 |
attr |
ColModernVBertForRetrieval.embedding_proj_layer |
1 | 0 | 0 |
meth |
ColModernVBertPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.colmodernvbert.modular_colmodernvbert (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ColModernVBertConfig.init |
5 | 2 | 0 |
attr |
ColModernVBertConfig.vlm_config |
1 | 0 | 0 |
attr |
ColModernVBertConfig.embedding_dim |
1 | 0 | 0 |
attr |
ColModernVBertConfig.initializer_range |
1 | 0 | 0 |
meth |
ColModernVBertForRetrieval.init |
2 | 1 | 0 |
attr |
ColModernVBertForRetrieval.vlm |
1 | 0 | 0 |
meth |
ColModernVBertProcessor.init |
8 | 3 | 0 |
attr |
ColModernVBertProcessor.visual_prompt_prefix |
1 | 0 | 0 |
attr |
ColModernVBertProcessor.query_prefix |
1 | 0 | 0 |
attr |
ColModernVBertProcessor.query_augmentation_token |
1 | 0 | 0 |
transformers.models.colmodernvbert.processing_colmodernvbert (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ColModernVBertProcessor.init |
8 | 3 | 0 |
meth |
ColModernVBertProcessor._extract_images_from_prompts |
2 | 0 | 0 |
meth |
ColModernVBertProcessor._get_num_multimodal_tokens |
3 | 0 | 0 |
attr |
ColModernVBertProcessor.fake_image_token |
1 | 0 | 0 |
attr |
ColModernVBertProcessor.image_token |
1 | 0 | 0 |
attr |
ColModernVBertProcessor.end_of_utterance_token |
1 | 0 | 0 |
attr |
ColModernVBertProcessor.global_image_tag |
1 | 0 | 0 |
attr |
ColModernVBertProcessor.image_seq_len |
1 | 0 | 0 |
attr |
ColModernVBertProcessor.image_token_id |
1 | 0 | 0 |
attr |
ColModernVBertProcessor.fake_image_token_id |
1 | 0 | 0 |
attr |
ColModernVBertProcessor.global_image_token_id |
1 | 0 | 0 |
attr |
ColModernVBertProcessor.row_col_ids |
1 | 0 | 0 |
attr |
ColModernVBertProcessor.visual_prompt_prefix |
1 | 0 | 0 |
attr |
ColModernVBertProcessor.query_prefix |
1 | 0 | 0 |
attr |
ColModernVBertProcessor.query_augmentation_token |
1 | 0 | 0 |
transformers.models.colpali.configuration_colpali (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ColPaliConfig.init |
5 | 1 | 0 |
attr |
ColPaliConfig.vlm_config |
1 | 0 | 0 |
attr |
ColPaliConfig.text_config |
1 | 0 | 0 |
attr |
ColPaliConfig.embedding_dim |
1 | 0 | 0 |
transformers.models.colpali.modeling_colpali (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ColPaliPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
ColPaliForRetrieval.init |
2 | 1 | 0 |
meth |
ColPaliForRetrieval.forward |
8 | 7 | 0 |
meth |
ColPaliForRetrieval.get_input_embeddings |
1 | 0 | 0 |
meth |
ColPaliForRetrieval.set_input_embeddings |
2 | 0 | 0 |
meth |
ColPaliForRetrieval.get_output_embeddings |
1 | 0 | 0 |
meth |
ColPaliForRetrieval.set_output_embeddings |
2 | 0 | 0 |
attr |
ColPaliForRetrieval.vocab_size |
1 | 0 | 0 |
attr |
ColPaliForRetrieval.vlm |
1 | 0 | 0 |
attr |
ColPaliForRetrieval.embedding_dim |
1 | 0 | 0 |
attr |
ColPaliForRetrieval.embedding_proj_layer |
1 | 0 | 0 |
transformers.models.colpali.modular_colpali (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ColPaliProcessor.init |
6 | 2 | 0 |
attr |
ColPaliProcessor.visual_prompt_prefix |
1 | 0 | 0 |
attr |
ColPaliProcessor.query_prefix |
1 | 0 | 0 |
transformers.models.colpali.processing_colpali (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ColPaliProcessor.init |
6 | 2 | 0 |
meth |
ColPaliProcessor._get_num_multimodal_tokens |
3 | 0 | 0 |
prop |
ColPaliProcessor.model_input_names |
1 | 0 | 0 |
attr |
ColPaliProcessor.visual_prompt_prefix |
1 | 0 | 0 |
attr |
ColPaliProcessor.query_prefix |
1 | 0 | 0 |
attr |
ColPaliProcessor.image_seq_length |
1 | 0 | 0 |
attr |
ColPaliProcessor.image_token_id |
1 | 0 | 0 |
attr |
ColPaliProcessor.image_token |
1 | 0 | 0 |
transformers.models.colqwen2.configuration_colqwen2 (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ColQwen2Config.init |
5 | 2 | 0 |
meth |
ColQwen2Config.get_text_config |
3 | 1 | 0 |
attr |
ColQwen2Config.vlm_config |
1 | 0 | 0 |
attr |
ColQwen2Config.embedding_dim |
1 | 0 | 0 |
attr |
ColQwen2Config.initializer_range |
1 | 0 | 0 |
transformers.models.colqwen2.modeling_colqwen2 (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ColQwen2ForRetrieval.init |
2 | 1 | 0 |
meth |
ColQwen2ForRetrieval.forward |
15 | 14 | 0 |
meth |
ColQwen2ForRetrieval.get_input_embeddings |
1 | 0 | 0 |
meth |
ColQwen2ForRetrieval.set_input_embeddings |
2 | 0 | 0 |
meth |
ColQwen2ForRetrieval.get_output_embeddings |
1 | 0 | 0 |
meth |
ColQwen2ForRetrieval.set_output_embeddings |
2 | 0 | 0 |
attr |
ColQwen2ForRetrieval.vocab_size |
1 | 0 | 0 |
attr |
ColQwen2ForRetrieval.vlm |
1 | 0 | 0 |
attr |
ColQwen2ForRetrieval.embedding_dim |
1 | 0 | 0 |
attr |
ColQwen2ForRetrieval.embedding_proj_layer |
1 | 0 | 0 |
meth |
ColQwen2PreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.colqwen2.modular_colqwen2 (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ColQwen2Processor.init |
7 | 2 | 0 |
meth |
ColQwen2Processor._get_num_multimodal_tokens |
3 | 0 | 0 |
prop |
ColQwen2Processor.model_input_names |
1 | 0 | 0 |
attr |
ColQwen2Processor.image_token |
1 | 0 | 0 |
attr |
ColQwen2Processor.video_token |
1 | 0 | 0 |
attr |
ColQwen2Processor.visual_prompt_prefix |
1 | 0 | 0 |
attr |
ColQwen2Processor.query_prefix |
1 | 0 | 0 |
meth |
ColQwen2ForRetrieval.init |
2 | 1 | 0 |
meth |
ColQwen2ForRetrieval.forward |
15 | 14 | 0 |
transformers.models.colqwen2.processing_colqwen2 (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ColQwen2Processor.init |
7 | 2 | 0 |
meth |
ColQwen2Processor._get_num_multimodal_tokens |
3 | 0 | 0 |
prop |
ColQwen2Processor.model_input_names |
1 | 0 | 0 |
attr |
ColQwen2Processor.image_token |
1 | 0 | 0 |
attr |
ColQwen2Processor.video_token |
1 | 0 | 0 |
attr |
ColQwen2Processor.visual_prompt_prefix |
1 | 0 | 0 |
attr |
ColQwen2Processor.query_prefix |
1 | 0 | 0 |
transformers.models.conditional_detr.configuration_conditional_detr (63 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ConditionalDetrConfig.init |
33 | 0 | 0 |
attr |
ConditionalDetrConfig.backbone_config |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.num_channels |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.num_queries |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.d_model |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.encoder_layers |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.decoder_layers |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.dropout |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.attention_dropout |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.activation_dropout |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.activation_function |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.init_std |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.init_xavier_std |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.auxiliary_loss |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.position_embedding_type |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.class_cost |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.bbox_cost |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.giou_cost |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.mask_loss_coefficient |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.dice_loss_coefficient |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.cls_loss_coefficient |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.bbox_loss_coefficient |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.giou_loss_coefficient |
1 | 0 | 0 |
attr |
ConditionalDetrConfig.focal_alpha |
1 | 0 | 0 |
transformers.models.conditional_detr.image_processing_conditional_detr (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ConditionalDetrImageProcessor.init |
14 | 13 | 0 |
meth |
ConditionalDetrImageProcessor.resize |
7 | 6 | 0 |
meth |
ConditionalDetrImageProcessor.resize_annotation |
5 | 2 | 0 |
meth |
ConditionalDetrImageProcessor._update_annotation_for_padded_image |
6 | 4 | 0 |
meth |
ConditionalDetrImageProcessor.preprocess |
21 | 19 | 0 |
meth |
ConditionalDetrImageProcessor.post_process_object_detection |
5 | 3 | 0 |
meth |
ConditionalDetrImageProcessor.post_process_semantic_segmentation |
3 | 1 | 0 |
meth |
ConditionalDetrImageProcessor.post_process_instance_segmentation |
7 | 6 | 0 |
meth |
ConditionalDetrImageProcessor.post_process_panoptic_segmentation |
7 | 6 | 0 |
attr |
ConditionalDetrImageProcessor.format |
1 | 0 | 0 |
attr |
ConditionalDetrImageProcessor.do_resize |
1 | 0 | 0 |
attr |
ConditionalDetrImageProcessor.size |
1 | 0 | 0 |
attr |
ConditionalDetrImageProcessor.resample |
1 | 0 | 0 |
attr |
ConditionalDetrImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
ConditionalDetrImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
ConditionalDetrImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
ConditionalDetrImageProcessor.do_convert_annotations |
1 | 0 | 0 |
attr |
ConditionalDetrImageProcessor.image_mean |
1 | 0 | 0 |
attr |
ConditionalDetrImageProcessor.image_std |
1 | 0 | 0 |
attr |
ConditionalDetrImageProcessor.do_pad |
1 | 0 | 0 |
attr |
ConditionalDetrImageProcessor.pad_size |
1 | 0 | 0 |
transformers.models.conditional_detr.image_processing_conditional_detr_fast (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ConditionalDetrImageProcessorFast.resize |
5 | 4 | 0 |
meth |
ConditionalDetrImageProcessorFast.resize_annotation |
6 | 5 | 0 |
meth |
ConditionalDetrImageProcessorFast._update_annotation_for_padded_image |
6 | 4 | 0 |
meth |
ConditionalDetrImageProcessorFast.pad |
6 | 5 | 0 |
meth |
ConditionalDetrImageProcessorFast._preprocess |
19 | 18 | 0 |
meth |
ConditionalDetrImageProcessorFast.post_process_object_detection |
5 | 3 | 0 |
meth |
ConditionalDetrImageProcessorFast.post_process_semantic_segmentation |
3 | 1 | 0 |
meth |
ConditionalDetrImageProcessorFast.post_process_instance_segmentation |
7 | 6 | 0 |
meth |
ConditionalDetrImageProcessorFast.post_process_panoptic_segmentation |
7 | 6 | 0 |
attr |
ConditionalDetrImageProcessorFast.size |
1 | 0 | 0 |
attr |
ConditionalDetrImageProcessorFast.do_convert_annotations |
1 | 0 | 0 |
transformers.models.conditional_detr.modeling_conditional_detr (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ConditionalDetrPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
ConditionalDetrForSegmentation.init |
2 | 1 | 0 |
attr |
ConditionalDetrForSegmentation.conditional_detr |
1 | 0 | 0 |
attr |
ConditionalDetrForSegmentation.mask_head |
1 | 0 | 0 |
attr |
ConditionalDetrForSegmentation.bbox_attention |
1 | 0 | 0 |
meth |
ConditionalDetrModel.init |
2 | 1 | 0 |
meth |
ConditionalDetrModel.freeze_backbone |
1 | 0 | 0 |
meth |
ConditionalDetrModel.unfreeze_backbone |
1 | 0 | 0 |
attr |
ConditionalDetrModel.backbone |
1 | 0 | 0 |
attr |
ConditionalDetrModel.query_position_embeddings |
1 | 0 | 0 |
attr |
ConditionalDetrModel.input_projection |
1 | 0 | 0 |
attr |
ConditionalDetrModel.encoder |
1 | 0 | 0 |
attr |
ConditionalDetrModel.decoder |
1 | 0 | 0 |
attr |
ConditionalDetrModel.position_embedding |
1 | 0 | 0 |
meth |
ConditionalDetrForObjectDetection.init |
2 | 1 | 0 |
meth |
ConditionalDetrForObjectDetection._set_aux_loss |
3 | 0 | 0 |
attr |
ConditionalDetrForObjectDetection.model |
1 | 0 | 0 |
attr |
ConditionalDetrForObjectDetection.class_labels_classifier |
1 | 0 | 0 |
attr |
ConditionalDetrForObjectDetection.bbox_predictor |
1 | 0 | 0 |
transformers.models.conditional_detr.modular_conditional_detr (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ConditionalDetrImageProcessorFast.post_process_object_detection |
5 | 3 | 0 |
meth |
ConditionalDetrImageProcessorFast.post_process_semantic_segmentation |
3 | 1 | 0 |
meth |
ConditionalDetrModel.init |
2 | 1 | 0 |
attr |
ConditionalDetrModel.query_position_embeddings |
1 | 0 | 0 |
meth |
ConditionalDetrForObjectDetection.init |
2 | 1 | 0 |
meth |
ConditionalDetrForObjectDetection._set_aux_loss |
3 | 0 | 0 |
attr |
ConditionalDetrForObjectDetection.class_labels_classifier |
1 | 0 | 0 |
transformers.models.convbert.configuration_convbert (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ConvBertConfig.init |
25 | 0 | 0 |
attr |
ConvBertConfig.pad_token_id |
1 | 0 | 0 |
attr |
ConvBertConfig.bos_token_id |
1 | 0 | 0 |
attr |
ConvBertConfig.eos_token_id |
1 | 0 | 0 |
attr |
ConvBertConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
ConvBertConfig.is_decoder |
1 | 0 | 0 |
attr |
ConvBertConfig.add_cross_attention |
1 | 0 | 0 |
attr |
ConvBertConfig.vocab_size |
1 | 0 | 0 |
attr |
ConvBertConfig.hidden_size |
1 | 0 | 0 |
attr |
ConvBertConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ConvBertConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ConvBertConfig.intermediate_size |
1 | 0 | 0 |
attr |
ConvBertConfig.hidden_act |
1 | 0 | 0 |
attr |
ConvBertConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
ConvBertConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
ConvBertConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ConvBertConfig.type_vocab_size |
1 | 0 | 0 |
attr |
ConvBertConfig.initializer_range |
1 | 0 | 0 |
attr |
ConvBertConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
ConvBertConfig.embedding_size |
1 | 0 | 0 |
attr |
ConvBertConfig.head_ratio |
1 | 0 | 0 |
attr |
ConvBertConfig.conv_kernel_size |
1 | 0 | 0 |
attr |
ConvBertConfig.num_groups |
1 | 0 | 0 |
attr |
ConvBertConfig.classifier_dropout |
1 | 0 | 0 |
transformers.models.convbert.modeling_convbert (57 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ConvBertPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
ConvBertForSequenceClassification.init |
2 | 0 | 0 |
meth |
ConvBertForSequenceClassification.forward |
11 | 10 | 0 |
attr |
ConvBertForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
ConvBertForSequenceClassification.convbert |
1 | 0 | 0 |
attr |
ConvBertForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
ConvBertForMultipleChoice.init |
2 | 0 | 0 |
meth |
ConvBertForMultipleChoice.forward |
11 | 10 | 0 |
attr |
ConvBertForMultipleChoice.convbert |
1 | 0 | 0 |
attr |
ConvBertForMultipleChoice.sequence_summary |
1 | 0 | 0 |
attr |
ConvBertForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
ConvBertForQuestionAnswering.init |
2 | 0 | 0 |
meth |
ConvBertForQuestionAnswering.forward |
12 | 11 | 0 |
attr |
ConvBertForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
ConvBertForQuestionAnswering.convbert |
1 | 0 | 0 |
attr |
ConvBertForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
ConvBertForMaskedLM.init |
2 | 0 | 0 |
meth |
ConvBertForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
ConvBertForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
ConvBertForMaskedLM.forward |
11 | 10 | 0 |
attr |
ConvBertForMaskedLM.convbert |
1 | 0 | 0 |
attr |
ConvBertForMaskedLM.generator_predictions |
1 | 0 | 0 |
attr |
ConvBertForMaskedLM.generator_lm_head |
1 | 0 | 0 |
meth |
ConvBertModel.init |
2 | 0 | 0 |
meth |
ConvBertModel.get_input_embeddings |
1 | 0 | 0 |
meth |
ConvBertModel.set_input_embeddings |
2 | 0 | 0 |
meth |
ConvBertModel.forward |
10 | 9 | 0 |
attr |
ConvBertModel.embeddings |
1 | 0 | 0 |
attr |
ConvBertModel.encoder |
1 | 0 | 0 |
attr |
ConvBertModel.embeddings_project |
1 | 0 | 0 |
meth |
ConvBertLayer.init |
2 | 0 | 0 |
meth |
ConvBertLayer.feed_forward_chunk |
2 | 0 | 0 |
attr |
ConvBertLayer.chunk_size_feed_forward |
1 | 0 | 0 |
attr |
ConvBertLayer.seq_len_dim |
1 | 0 | 0 |
attr |
ConvBertLayer.attention |
1 | 0 | 0 |
attr |
ConvBertLayer.is_decoder |
1 | 0 | 0 |
attr |
ConvBertLayer.add_cross_attention |
1 | 0 | 0 |
attr |
ConvBertLayer.intermediate |
1 | 0 | 0 |
attr |
ConvBertLayer.output |
1 | 0 | 0 |
attr |
ConvBertLayer.crossattention |
1 | 0 | 0 |
meth |
ConvBertForTokenClassification.init |
2 | 0 | 0 |
meth |
ConvBertForTokenClassification.forward |
11 | 10 | 0 |
attr |
ConvBertForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
ConvBertForTokenClassification.convbert |
1 | 0 | 0 |
attr |
ConvBertForTokenClassification.dropout |
1 | 0 | 0 |
attr |
ConvBertForTokenClassification.classifier |
1 | 0 | 0 |
transformers.models.convnext.configuration_convnext (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ConvNextConfig.init |
15 | 0 | 0 |
attr |
ConvNextConfig.num_channels |
1 | 0 | 0 |
attr |
ConvNextConfig.patch_size |
1 | 0 | 0 |
attr |
ConvNextConfig.num_stages |
1 | 0 | 0 |
attr |
ConvNextConfig.hidden_sizes |
1 | 0 | 0 |
attr |
ConvNextConfig.depths |
1 | 0 | 0 |
attr |
ConvNextConfig.hidden_act |
1 | 0 | 0 |
attr |
ConvNextConfig.initializer_range |
1 | 0 | 0 |
attr |
ConvNextConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
ConvNextConfig.layer_scale_init_value |
1 | 0 | 0 |
attr |
ConvNextConfig.drop_path_rate |
1 | 0 | 0 |
attr |
ConvNextConfig.image_size |
1 | 0 | 0 |
attr |
ConvNextConfig.stage_names |
1 | 0 | 0 |
transformers.models.convnext.image_processing_convnext (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ConvNextImageProcessor.init |
11 | 10 | 0 |
meth |
ConvNextImageProcessor.resize |
8 | 7 | 0 |
attr |
ConvNextImageProcessor.do_resize |
1 | 0 | 0 |
attr |
ConvNextImageProcessor.size |
1 | 0 | 0 |
attr |
ConvNextImageProcessor.crop_pct |
1 | 0 | 0 |
attr |
ConvNextImageProcessor.resample |
1 | 0 | 0 |
attr |
ConvNextImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
ConvNextImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
ConvNextImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
ConvNextImageProcessor.image_mean |
1 | 0 | 0 |
attr |
ConvNextImageProcessor.image_std |
1 | 0 | 0 |
transformers.models.convnext.image_processing_convnext_fast (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ConvNextImageProcessorFast.init |
2 | 1 | 0 |
meth |
ConvNextImageProcessorFast.resize |
6 | 5 | 0 |
meth |
ConvNextImageProcessorFast._preprocess |
16 | 15 | 0 |
transformers.models.convnext.modeling_convnext (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ConvNextModel.init |
2 | 0 | 0 |
meth |
ConvNextModel.forward |
4 | 3 | 0 |
attr |
ConvNextModel.embeddings |
1 | 0 | 0 |
attr |
ConvNextModel.encoder |
1 | 0 | 0 |
attr |
ConvNextModel.layernorm |
1 | 0 | 0 |
meth |
ConvNextBackbone.init |
2 | 0 | 0 |
meth |
ConvNextBackbone.forward |
4 | 3 | 0 |
attr |
ConvNextBackbone.embeddings |
1 | 0 | 0 |
attr |
ConvNextBackbone.encoder |
1 | 0 | 0 |
attr |
ConvNextBackbone.num_features |
1 | 0 | 0 |
attr |
ConvNextBackbone.hidden_states_norms |
1 | 0 | 0 |
meth |
ConvNextForImageClassification.init |
2 | 0 | 0 |
meth |
ConvNextForImageClassification.forward |
4 | 3 | 0 |
attr |
ConvNextForImageClassification.num_labels |
1 | 0 | 0 |
attr |
ConvNextForImageClassification.convnext |
1 | 0 | 0 |
attr |
ConvNextForImageClassification.classifier |
1 | 0 | 0 |
meth |
ConvNextPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.convnextv2.configuration_convnextv2 (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ConvNextV2Config.init |
14 | 0 | 0 |
attr |
ConvNextV2Config.num_channels |
1 | 0 | 0 |
attr |
ConvNextV2Config.patch_size |
1 | 0 | 0 |
attr |
ConvNextV2Config.num_stages |
1 | 0 | 0 |
attr |
ConvNextV2Config.hidden_sizes |
1 | 0 | 0 |
attr |
ConvNextV2Config.depths |
1 | 0 | 0 |
attr |
ConvNextV2Config.hidden_act |
1 | 0 | 0 |
attr |
ConvNextV2Config.initializer_range |
1 | 0 | 0 |
attr |
ConvNextV2Config.layer_norm_eps |
1 | 0 | 0 |
attr |
ConvNextV2Config.drop_path_rate |
1 | 0 | 0 |
attr |
ConvNextV2Config.image_size |
1 | 0 | 0 |
attr |
ConvNextV2Config.stage_names |
1 | 0 | 0 |
transformers.models.convnextv2.modeling_convnextv2 (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ConvNextV2ForImageClassification.init |
2 | 0 | 0 |
meth |
ConvNextV2ForImageClassification.forward |
4 | 3 | 0 |
attr |
ConvNextV2ForImageClassification.num_labels |
1 | 0 | 0 |
attr |
ConvNextV2ForImageClassification.convnextv2 |
1 | 0 | 0 |
attr |
ConvNextV2ForImageClassification.classifier |
1 | 0 | 0 |
meth |
ConvNextV2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
ConvNextV2Backbone.init |
2 | 0 | 0 |
meth |
ConvNextV2Backbone.forward |
4 | 3 | 0 |
attr |
ConvNextV2Backbone.embeddings |
1 | 0 | 0 |
attr |
ConvNextV2Backbone.encoder |
1 | 0 | 0 |
attr |
ConvNextV2Backbone.num_features |
1 | 0 | 0 |
attr |
ConvNextV2Backbone.hidden_states_norms |
1 | 0 | 0 |
meth |
ConvNextV2Model.init |
2 | 0 | 0 |
meth |
ConvNextV2Model.forward |
4 | 3 | 0 |
attr |
ConvNextV2Model.embeddings |
1 | 0 | 0 |
attr |
ConvNextV2Model.encoder |
1 | 0 | 0 |
attr |
ConvNextV2Model.layernorm |
1 | 0 | 0 |
transformers.models.cpm.tokenization_cpm (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CpmTokenizer.init |
15 | 2 | 0 |
meth |
CpmTokenizer.get_vocab |
1 | 0 | 0 |
meth |
CpmTokenizer.getstate |
1 | 0 | 0 |
meth |
CpmTokenizer.setstate |
2 | 0 | 0 |
meth |
CpmTokenizer.preprocess_text |
2 | 0 | 0 |
meth |
CpmTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
CpmTokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
CpmTokenizer.convert_tokens_to_string |
2 | 0 | 0 |
meth |
CpmTokenizer._decode |
3 | 0 | 0 |
prop |
CpmTokenizer.vocab_size |
1 | 0 | 0 |
attr |
CpmTokenizer.sp_model_kwargs |
1 | 0 | 0 |
attr |
CpmTokenizer.do_lower_case |
1 | 0 | 0 |
attr |
CpmTokenizer.remove_space |
1 | 0 | 0 |
attr |
CpmTokenizer.keep_accents |
1 | 0 | 0 |
attr |
CpmTokenizer.vocab_file |
1 | 0 | 0 |
attr |
CpmTokenizer.sp_model |
1 | 0 | 0 |
attr |
CpmTokenizer.jieba |
1 | 0 | 0 |
attr |
CpmTokenizer.translator |
1 | 0 | 0 |
transformers.models.cpm.tokenization_cpm_fast (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CpmTokenizerFast.init |
15 | 0 | 0 |
meth |
CpmTokenizerFast._batch_encode_plus |
4 | 0 | 0 |
meth |
CpmTokenizerFast._decode |
3 | 0 | 0 |
attr |
CpmTokenizerFast.do_lower_case |
1 | 0 | 0 |
attr |
CpmTokenizerFast.remove_space |
1 | 0 | 0 |
attr |
CpmTokenizerFast.keep_accents |
1 | 0 | 0 |
attr |
CpmTokenizerFast.vocab_file |
1 | 0 | 0 |
attr |
CpmTokenizerFast.jieba |
1 | 0 | 0 |
attr |
CpmTokenizerFast.translator |
1 | 0 | 0 |
transformers.models.cpmant.configuration_cpmant (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CpmAntConfig.init |
18 | 15 | 0 |
attr |
CpmAntConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
CpmAntConfig.prompt_types |
1 | 0 | 0 |
attr |
CpmAntConfig.prompt_length |
1 | 0 | 0 |
attr |
CpmAntConfig.segment_types |
1 | 0 | 0 |
attr |
CpmAntConfig.hidden_size |
1 | 0 | 0 |
attr |
CpmAntConfig.num_attention_heads |
1 | 0 | 0 |
attr |
CpmAntConfig.dim_head |
1 | 0 | 0 |
attr |
CpmAntConfig.dim_ff |
1 | 0 | 0 |
attr |
CpmAntConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
CpmAntConfig.position_bias_num_buckets |
1 | 0 | 0 |
attr |
CpmAntConfig.position_bias_max_distance |
1 | 0 | 0 |
attr |
CpmAntConfig.dropout_p |
1 | 0 | 0 |
attr |
CpmAntConfig.eps |
1 | 0 | 0 |
attr |
CpmAntConfig.use_cache |
1 | 0 | 0 |
attr |
CpmAntConfig.vocab_size |
1 | 0 | 0 |
attr |
CpmAntConfig.init_std |
1 | 0 | 0 |
transformers.models.cpmant.modeling_cpmant (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CpmAntModel.init |
2 | 1 | 0 |
meth |
CpmAntModel.get_input_embeddings |
1 | 0 | 0 |
meth |
CpmAntModel.set_input_embeddings |
3 | 0 | 0 |
meth |
CpmAntModel._prepare_attention_mask |
5 | 0 | 0 |
meth |
CpmAntModel.forward |
9 | 8 | 0 |
attr |
CpmAntModel.encoder |
1 | 0 | 0 |
attr |
CpmAntModel.segment_embedding |
1 | 0 | 0 |
attr |
CpmAntModel.input_embedding |
1 | 0 | 0 |
attr |
CpmAntModel.position_bias |
1 | 0 | 0 |
attr |
CpmAntModel.prompt_length |
1 | 0 | 0 |
attr |
CpmAntModel.vocab_size |
1 | 0 | 0 |
meth |
CpmAntPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
CpmAntForCausalLM.init |
2 | 1 | 0 |
meth |
CpmAntForCausalLM.forward |
12 | 11 | 0 |
meth |
CpmAntForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
CpmAntForCausalLM.set_input_embeddings |
2 | 0 | 0 |
attr |
CpmAntForCausalLM.cpmant |
1 | 0 | 0 |
attr |
CpmAntForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.cpmant.tokenization_cpmant (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CpmAntTokenizer.init |
12 | 0 | 0 |
meth |
CpmAntTokenizer.get_vocab |
1 | 0 | 0 |
meth |
CpmAntTokenizer._tokenize |
2 | 0 | 0 |
meth |
CpmAntTokenizer._decode |
3 | 0 | 0 |
meth |
CpmAntTokenizer.check |
2 | 0 | 0 |
meth |
CpmAntTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
CpmAntTokenizer._convert_id_to_token |
2 | 0 | 0 |
prop |
CpmAntTokenizer.bod_token_id |
1 | 0 | 0 |
prop |
CpmAntTokenizer.eod_token_id |
1 | 0 | 0 |
prop |
CpmAntTokenizer.newline_id |
1 | 0 | 0 |
attr |
CpmAntTokenizer.bod_token |
1 | 0 | 0 |
attr |
CpmAntTokenizer.eod_token |
1 | 0 | 0 |
attr |
CpmAntTokenizer.encoder |
1 | 0 | 0 |
attr |
CpmAntTokenizer.decoder |
1 | 0 | 0 |
attr |
CpmAntTokenizer.wordpiece_tokenizer |
1 | 0 | 0 |
transformers.models.csm.configuration_csm (54 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CsmConfig.init |
30 | 28 | 0 |
attr |
CsmConfig.text_vocab_size |
1 | 0 | 0 |
attr |
CsmConfig.num_codebooks |
1 | 0 | 0 |
attr |
CsmConfig.audio_token_id |
1 | 0 | 0 |
attr |
CsmConfig.audio_eos_token_id |
1 | 0 | 0 |
attr |
CsmConfig.codebook_pad_token_id |
1 | 0 | 0 |
attr |
CsmConfig.codebook_eos_token_id |
1 | 0 | 0 |
attr |
CsmConfig.tie_codebooks_embeddings |
1 | 0 | 0 |
attr |
CsmConfig.vocab_size |
1 | 0 | 0 |
attr |
CsmConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
CsmConfig.hidden_size |
1 | 0 | 0 |
attr |
CsmConfig.intermediate_size |
1 | 0 | 0 |
attr |
CsmConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
CsmConfig.num_attention_heads |
1 | 0 | 0 |
attr |
CsmConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
CsmConfig.hidden_act |
1 | 0 | 0 |
attr |
CsmConfig.initializer_range |
1 | 0 | 0 |
attr |
CsmConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
CsmConfig.use_cache |
1 | 0 | 0 |
attr |
CsmConfig.attention_bias |
1 | 0 | 0 |
attr |
CsmConfig.attention_dropout |
1 | 0 | 0 |
attr |
CsmConfig.mlp_bias |
1 | 0 | 0 |
attr |
CsmConfig.head_dim |
1 | 0 | 0 |
attr |
CsmConfig.rope_parameters |
1 | 0 | 0 |
attr |
CsmConfig.pad_token_id |
1 | 0 | 0 |
attr |
CsmConfig.bos_token_id |
1 | 0 | 0 |
attr |
CsmConfig.eos_token_id |
1 | 0 | 0 |
attr |
CsmConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
CsmConfig.depth_decoder_config |
1 | 0 | 0 |
attr |
CsmConfig.codec_config |
1 | 0 | 0 |
meth |
CsmDepthDecoderConfig.init |
23 | 21 | 0 |
attr |
CsmDepthDecoderConfig.pad_token_id |
1 | 0 | 0 |
attr |
CsmDepthDecoderConfig.bos_token_id |
1 | 0 | 0 |
attr |
CsmDepthDecoderConfig.eos_token_id |
1 | 0 | 0 |
attr |
CsmDepthDecoderConfig.num_codebooks |
1 | 0 | 0 |
attr |
CsmDepthDecoderConfig.vocab_size |
1 | 0 | 0 |
attr |
CsmDepthDecoderConfig.backbone_hidden_size |
1 | 0 | 0 |
attr |
CsmDepthDecoderConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
CsmDepthDecoderConfig.hidden_size |
1 | 0 | 0 |
attr |
CsmDepthDecoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
CsmDepthDecoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
CsmDepthDecoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
CsmDepthDecoderConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
CsmDepthDecoderConfig.hidden_act |
1 | 0 | 0 |
attr |
CsmDepthDecoderConfig.initializer_range |
1 | 0 | 0 |
attr |
CsmDepthDecoderConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
CsmDepthDecoderConfig.use_cache |
1 | 0 | 0 |
attr |
CsmDepthDecoderConfig.attention_bias |
1 | 0 | 0 |
attr |
CsmDepthDecoderConfig.attention_dropout |
1 | 0 | 0 |
attr |
CsmDepthDecoderConfig.mlp_bias |
1 | 0 | 0 |
attr |
CsmDepthDecoderConfig.head_dim |
1 | 0 | 0 |
attr |
CsmDepthDecoderConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.csm.generation_csm (5 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
CsmGenerationMixin._get_stopping_criteria |
3 | 1 | 0 |
meth |
CsmGenerationMixin._prepare_generation_config |
3 | 3 | 1 |
meth |
CsmGenerationMixin._sample |
8 | 7 | 0 |
meth |
CsmGenerationMixin.generate |
11 | 10 | 0 |
transformers.models.csm.modeling_csm (47 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CsmDepthDecoderForCausalLM.init |
2 | 0 | 0 |
meth |
CsmDepthDecoderForCausalLM.prepare_inputs_for_generation |
8 | 6 | 0 |
attr |
CsmDepthDecoderForCausalLM.model |
1 | 0 | 0 |
attr |
CsmDepthDecoderForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
CsmDepthDecoderForCausalLM.codebooks_head |
1 | 0 | 0 |
meth |
CsmDepthDecoderModel.init |
2 | 0 | 0 |
attr |
CsmDepthDecoderModel.padding_idx |
1 | 0 | 0 |
attr |
CsmDepthDecoderModel.vocab_size |
1 | 0 | 0 |
attr |
CsmDepthDecoderModel.embed_tokens |
1 | 0 | 0 |
attr |
CsmDepthDecoderModel.layers |
1 | 0 | 0 |
attr |
CsmDepthDecoderModel.norm |
1 | 0 | 0 |
attr |
CsmDepthDecoderModel.rotary_emb |
1 | 0 | 0 |
attr |
CsmDepthDecoderModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
CsmDepthDecoderModel.inputs_embeds_projector |
1 | 0 | 0 |
meth |
CsmForConditionalGeneration.init |
2 | 0 | 0 |
meth |
CsmForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
CsmForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
CsmForConditionalGeneration.from_pretrained |
3 | 0 | 0 |
meth |
CsmForConditionalGeneration.save_pretrained |
3 | 0 | 0 |
meth |
CsmForConditionalGeneration.prepare_inputs_for_generation |
8 | 6 | 0 |
attr |
CsmForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
CsmForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
CsmForConditionalGeneration.embed_text_tokens |
1 | 0 | 0 |
attr |
CsmForConditionalGeneration.backbone_model |
1 | 0 | 0 |
attr |
CsmForConditionalGeneration.depth_decoder |
1 | 0 | 0 |
attr |
CsmForConditionalGeneration.codec_model |
1 | 0 | 0 |
meth |
CsmBackboneModel.init |
2 | 0 | 0 |
attr |
CsmBackboneModel.padding_idx |
1 | 0 | 0 |
attr |
CsmBackboneModel.vocab_size |
1 | 0 | 0 |
attr |
CsmBackboneModel.embed_tokens |
1 | 0 | 0 |
attr |
CsmBackboneModel.layers |
1 | 0 | 0 |
attr |
CsmBackboneModel.norm |
1 | 0 | 0 |
attr |
CsmBackboneModel.rotary_emb |
1 | 0 | 0 |
attr |
CsmBackboneModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
CsmPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.csm.modular_csm (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CsmDepthDecoderForCausalLM.init |
2 | 0 | 0 |
meth |
CsmDepthDecoderForCausalLM.prepare_inputs_for_generation |
8 | 6 | 0 |
attr |
CsmDepthDecoderForCausalLM.codebooks_head |
1 | 0 | 0 |
attr |
CsmDepthDecoderForCausalLM.model |
1 | 0 | 0 |
meth |
CsmDepthDecoderModel.init |
2 | 0 | 0 |
attr |
CsmDepthDecoderModel.embed_tokens |
1 | 0 | 0 |
attr |
CsmDepthDecoderModel.inputs_embeds_projector |
1 | 0 | 0 |
meth |
CsmForConditionalGeneration.init |
2 | 0 | 0 |
meth |
CsmForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
CsmForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
CsmForConditionalGeneration.from_pretrained |
3 | 0 | 0 |
meth |
CsmForConditionalGeneration.save_pretrained |
3 | 0 | 0 |
meth |
CsmForConditionalGeneration.prepare_inputs_for_generation |
8 | 6 | 0 |
attr |
CsmForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
CsmForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
CsmForConditionalGeneration.embed_text_tokens |
1 | 0 | 0 |
attr |
CsmForConditionalGeneration.backbone_model |
1 | 0 | 0 |
attr |
CsmForConditionalGeneration.depth_decoder |
1 | 0 | 0 |
attr |
CsmForConditionalGeneration.codec_model |
1 | 0 | 0 |
meth |
CsmBackboneModel.init |
2 | 0 | 0 |
meth |
CsmBackboneModel.forward |
2 | 0 | 0 |
attr |
CsmBackboneModel.embed_tokens |
1 | 0 | 0 |
meth |
CsmPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.csm.processing_csm (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CsmProcessor.init |
4 | 0 | 0 |
meth |
CsmProcessor._get_encoded_length |
6 | 0 | 0 |
meth |
CsmProcessor.save_audio |
4 | 3 | 0 |
meth |
CsmProcessor.call |
6 | 5 | 0 |
prop |
CsmProcessor.model_input_names |
1 | 0 | 0 |
attr |
CsmProcessor.audio_token |
1 | 0 | 0 |
attr |
CsmProcessor.audio_token_id |
1 | 0 | 0 |
attr |
CsmProcessor.audio_eos_token |
1 | 0 | 0 |
attr |
CsmProcessor.audio_eos_token_id |
1 | 0 | 0 |
transformers.models.ctrl.configuration_ctrl (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CTRLConfig.init |
17 | 0 | 0 |
attr |
CTRLConfig.vocab_size |
1 | 0 | 0 |
attr |
CTRLConfig.n_positions |
1 | 0 | 0 |
attr |
CTRLConfig.n_embd |
1 | 0 | 0 |
attr |
CTRLConfig.n_layer |
1 | 0 | 0 |
attr |
CTRLConfig.n_head |
1 | 0 | 0 |
attr |
CTRLConfig.dff |
1 | 0 | 0 |
attr |
CTRLConfig.resid_pdrop |
1 | 0 | 0 |
attr |
CTRLConfig.embd_pdrop |
1 | 0 | 0 |
attr |
CTRLConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
CTRLConfig.initializer_range |
1 | 0 | 0 |
attr |
CTRLConfig.pad_token_id |
1 | 0 | 0 |
attr |
CTRLConfig.bos_token_id |
1 | 0 | 0 |
attr |
CTRLConfig.eos_token_id |
1 | 0 | 0 |
attr |
CTRLConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
CTRLConfig.use_cache |
1 | 0 | 0 |
transformers.models.ctrl.modeling_ctrl (31 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CTRLModel.init |
2 | 0 | 0 |
meth |
CTRLModel.get_input_embeddings |
1 | 0 | 0 |
meth |
CTRLModel.set_input_embeddings |
2 | 0 | 0 |
meth |
CTRLModel.forward |
13 | 12 | 0 |
attr |
CTRLModel.d_model_size |
1 | 0 | 0 |
attr |
CTRLModel.num_layers |
1 | 0 | 0 |
attr |
CTRLModel.w |
1 | 0 | 0 |
attr |
CTRLModel.dropout |
1 | 0 | 0 |
attr |
CTRLModel.h |
1 | 0 | 0 |
attr |
CTRLModel.layernorm |
1 | 0 | 0 |
meth |
CTRLPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
CTRLForSequenceClassification.init |
2 | 0 | 0 |
meth |
CTRLForSequenceClassification.forward |
13 | 12 | 0 |
attr |
CTRLForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
CTRLForSequenceClassification.transformer |
1 | 0 | 0 |
attr |
CTRLForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
CTRLLMHeadModel.init |
2 | 0 | 0 |
meth |
CTRLLMHeadModel.forward |
15 | 14 | 0 |
meth |
CTRLLMHeadModel.prepare_inputs_for_generation |
6 | 0 | 0 |
attr |
CTRLLMHeadModel.transformer |
1 | 0 | 0 |
attr |
CTRLLMHeadModel.lm_head |
1 | 0 | 0 |
transformers.models.ctrl.tokenization_ctrl (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CTRLTokenizer.init |
5 | 0 | 0 |
meth |
CTRLTokenizer.get_vocab |
1 | 0 | 0 |
meth |
CTRLTokenizer.bpe |
2 | 0 | 0 |
meth |
CTRLTokenizer._tokenize |
2 | 0 | 0 |
meth |
CTRLTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
CTRLTokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
CTRLTokenizer.convert_tokens_to_string |
2 | 0 | 0 |
prop |
CTRLTokenizer.vocab_size |
1 | 0 | 0 |
attr |
CTRLTokenizer.decoder |
1 | 0 | 0 |
attr |
CTRLTokenizer.bpe_ranks |
1 | 0 | 0 |
attr |
CTRLTokenizer.cache |
1 | 0 | 0 |
attr |
CTRLTokenizer.add_bpe_version_header |
1 | 0 | 0 |
attr |
CTRLTokenizer.encoder |
1 | 0 | 0 |
transformers.models.cvt.configuration_cvt (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CvtConfig.init |
23 | 0 | 0 |
attr |
CvtConfig.num_channels |
1 | 0 | 0 |
attr |
CvtConfig.patch_sizes |
1 | 0 | 0 |
attr |
CvtConfig.patch_stride |
1 | 0 | 0 |
attr |
CvtConfig.patch_padding |
1 | 0 | 0 |
attr |
CvtConfig.embed_dim |
1 | 0 | 0 |
attr |
CvtConfig.num_heads |
1 | 0 | 0 |
attr |
CvtConfig.depth |
1 | 0 | 0 |
attr |
CvtConfig.mlp_ratio |
1 | 0 | 0 |
attr |
CvtConfig.attention_drop_rate |
1 | 0 | 0 |
attr |
CvtConfig.drop_rate |
1 | 0 | 0 |
attr |
CvtConfig.drop_path_rate |
1 | 0 | 0 |
attr |
CvtConfig.qkv_bias |
1 | 0 | 0 |
attr |
CvtConfig.cls_token |
1 | 0 | 0 |
attr |
CvtConfig.qkv_projection_method |
1 | 0 | 0 |
attr |
CvtConfig.kernel_qkv |
1 | 0 | 0 |
attr |
CvtConfig.padding_kv |
1 | 0 | 0 |
attr |
CvtConfig.stride_kv |
1 | 0 | 0 |
attr |
CvtConfig.padding_q |
1 | 0 | 0 |
attr |
CvtConfig.stride_q |
1 | 0 | 0 |
attr |
CvtConfig.initializer_range |
1 | 0 | 0 |
attr |
CvtConfig.layer_norm_eps |
1 | 0 | 0 |
transformers.models.cvt.modeling_cvt (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CvtModel.init |
3 | 0 | 0 |
meth |
CvtModel.forward |
5 | 4 | 0 |
attr |
CvtModel.encoder |
1 | 0 | 0 |
meth |
CvtPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
CvtForImageClassification.init |
2 | 0 | 0 |
meth |
CvtForImageClassification.forward |
6 | 5 | 0 |
attr |
CvtForImageClassification.num_labels |
1 | 0 | 0 |
attr |
CvtForImageClassification.cvt |
1 | 0 | 0 |
attr |
CvtForImageClassification.layernorm |
1 | 0 | 0 |
attr |
CvtForImageClassification.classifier |
1 | 0 | 0 |
transformers.models.cwm.configuration_cwm (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CwmConfig.init |
24 | 21 | 0 |
attr |
CwmConfig.sliding_window |
1 | 0 | 0 |
attr |
CwmConfig.layer_types |
1 | 0 | 0 |
attr |
CwmConfig.vocab_size |
1 | 0 | 0 |
attr |
CwmConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
CwmConfig.hidden_size |
1 | 0 | 0 |
attr |
CwmConfig.intermediate_size |
1 | 0 | 0 |
attr |
CwmConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
CwmConfig.num_attention_heads |
1 | 0 | 0 |
attr |
CwmConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
CwmConfig.hidden_act |
1 | 0 | 0 |
attr |
CwmConfig.initializer_range |
1 | 0 | 0 |
attr |
CwmConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
CwmConfig.pretraining_tp |
1 | 0 | 0 |
attr |
CwmConfig.use_cache |
1 | 0 | 0 |
attr |
CwmConfig.attention_dropout |
1 | 0 | 0 |
attr |
CwmConfig.mlp_bias |
1 | 0 | 0 |
attr |
CwmConfig.head_dim |
1 | 0 | 0 |
attr |
CwmConfig.rope_parameters |
1 | 0 | 0 |
attr |
CwmConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
CwmConfig.pad_token_id |
1 | 0 | 0 |
attr |
CwmConfig.bos_token_id |
1 | 0 | 0 |
attr |
CwmConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.cwm.modeling_cwm (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CwmModel.init |
2 | 1 | 0 |
attr |
CwmModel.padding_idx |
1 | 0 | 0 |
attr |
CwmModel.vocab_size |
1 | 0 | 0 |
attr |
CwmModel.embed_tokens |
1 | 0 | 0 |
attr |
CwmModel.layers |
1 | 0 | 0 |
attr |
CwmModel.norm |
1 | 0 | 0 |
attr |
CwmModel.rotary_emb |
1 | 0 | 0 |
attr |
CwmModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
CwmForCausalLM.init |
2 | 0 | 0 |
attr |
CwmForCausalLM.model |
1 | 0 | 0 |
attr |
CwmForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
CwmForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.cwm.modular_cwm (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CwmModel.init |
2 | 1 | 0 |
attr |
CwmModel.layers |
1 | 0 | 0 |
meth |
CwmConfig.init |
24 | 21 | 0 |
attr |
CwmConfig.sliding_window |
1 | 0 | 0 |
attr |
CwmConfig.layer_types |
1 | 0 | 0 |
transformers.models.d_fine.configuration_d_fine (131 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DFineConfig.init |
67 | 0 | 0 |
attr |
DFineConfig.initializer_range |
1 | 0 | 0 |
attr |
DFineConfig.initializer_bias_prior_prob |
1 | 0 | 0 |
attr |
DFineConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
DFineConfig.batch_norm_eps |
1 | 0 | 0 |
attr |
DFineConfig.backbone_config |
1 | 0 | 0 |
attr |
DFineConfig.freeze_backbone_batch_norms |
1 | 0 | 0 |
attr |
DFineConfig.encoder_hidden_dim |
1 | 0 | 0 |
attr |
DFineConfig.encoder_in_channels |
1 | 0 | 0 |
attr |
DFineConfig.feat_strides |
1 | 0 | 0 |
attr |
DFineConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
DFineConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
DFineConfig.dropout |
1 | 0 | 0 |
attr |
DFineConfig.activation_dropout |
1 | 0 | 0 |
attr |
DFineConfig.encode_proj_layers |
1 | 0 | 0 |
attr |
DFineConfig.encoder_layers |
1 | 0 | 0 |
attr |
DFineConfig.positional_encoding_temperature |
1 | 0 | 0 |
attr |
DFineConfig.eval_size |
1 | 0 | 0 |
attr |
DFineConfig.normalize_before |
1 | 0 | 0 |
attr |
DFineConfig.encoder_activation_function |
1 | 0 | 0 |
attr |
DFineConfig.activation_function |
1 | 0 | 0 |
attr |
DFineConfig.hidden_expansion |
1 | 0 | 0 |
attr |
DFineConfig.d_model |
1 | 0 | 0 |
attr |
DFineConfig.num_queries |
1 | 0 | 0 |
attr |
DFineConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
DFineConfig.decoder_in_channels |
1 | 0 | 0 |
attr |
DFineConfig.num_feature_levels |
1 | 0 | 0 |
attr |
DFineConfig.decoder_n_points |
1 | 0 | 0 |
attr |
DFineConfig.decoder_layers |
1 | 0 | 0 |
attr |
DFineConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
DFineConfig.decoder_activation_function |
1 | 0 | 0 |
attr |
DFineConfig.attention_dropout |
1 | 0 | 0 |
attr |
DFineConfig.num_denoising |
1 | 0 | 0 |
attr |
DFineConfig.label_noise_ratio |
1 | 0 | 0 |
attr |
DFineConfig.box_noise_scale |
1 | 0 | 0 |
attr |
DFineConfig.learn_initial_query |
1 | 0 | 0 |
attr |
DFineConfig.anchor_image_size |
1 | 0 | 0 |
attr |
DFineConfig.auxiliary_loss |
1 | 0 | 0 |
attr |
DFineConfig.with_box_refine |
1 | 0 | 0 |
attr |
DFineConfig.matcher_alpha |
1 | 0 | 0 |
attr |
DFineConfig.matcher_gamma |
1 | 0 | 0 |
attr |
DFineConfig.matcher_class_cost |
1 | 0 | 0 |
attr |
DFineConfig.matcher_bbox_cost |
1 | 0 | 0 |
attr |
DFineConfig.matcher_giou_cost |
1 | 0 | 0 |
attr |
DFineConfig.use_focal_loss |
1 | 0 | 0 |
attr |
DFineConfig.focal_loss_alpha |
1 | 0 | 0 |
attr |
DFineConfig.focal_loss_gamma |
1 | 0 | 0 |
attr |
DFineConfig.weight_loss_vfl |
1 | 0 | 0 |
attr |
DFineConfig.weight_loss_bbox |
1 | 0 | 0 |
attr |
DFineConfig.weight_loss_giou |
1 | 0 | 0 |
attr |
DFineConfig.weight_loss_fgl |
1 | 0 | 0 |
attr |
DFineConfig.weight_loss_ddf |
1 | 0 | 0 |
attr |
DFineConfig.eos_coefficient |
1 | 0 | 0 |
attr |
DFineConfig.eval_idx |
1 | 0 | 0 |
attr |
DFineConfig.layer_scale |
1 | 0 | 0 |
attr |
DFineConfig.max_num_bins |
1 | 0 | 0 |
attr |
DFineConfig.reg_scale |
1 | 0 | 0 |
attr |
DFineConfig.depth_mult |
1 | 0 | 0 |
attr |
DFineConfig.decoder_offset_scale |
1 | 0 | 0 |
attr |
DFineConfig.decoder_method |
1 | 0 | 0 |
attr |
DFineConfig.top_prob_values |
1 | 0 | 0 |
attr |
DFineConfig.lqe_hidden_dim |
1 | 0 | 0 |
attr |
DFineConfig.lqe_layers |
1 | 0 | 0 |
attr |
DFineConfig.up |
1 | 0 | 0 |
attr |
DFineConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.d_fine.modeling_d_fine (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DFineModel.init |
2 | 1 | 0 |
meth |
DFineModel.freeze_backbone |
1 | 0 | 0 |
meth |
DFineModel.unfreeze_backbone |
1 | 0 | 0 |
meth |
DFineModel.generate_anchors |
5 | 0 | 0 |
attr |
DFineModel.backbone |
1 | 0 | 0 |
attr |
DFineModel.encoder_input_proj |
1 | 0 | 0 |
attr |
DFineModel.encoder |
1 | 0 | 0 |
attr |
DFineModel.enc_output |
1 | 0 | 0 |
attr |
DFineModel.enc_score_head |
1 | 0 | 0 |
attr |
DFineModel.enc_bbox_head |
1 | 0 | 0 |
attr |
DFineModel.decoder |
1 | 0 | 0 |
attr |
DFineModel.decoder_input_proj |
1 | 0 | 0 |
attr |
DFineModel.denoising_class_embed |
1 | 0 | 0 |
attr |
DFineModel.weight_embedding |
1 | 0 | 0 |
meth |
DFineForObjectDetection.init |
2 | 1 | 0 |
meth |
DFineForObjectDetection._set_aux_loss |
3 | 0 | 0 |
attr |
DFineForObjectDetection.eval_idx |
1 | 0 | 0 |
attr |
DFineForObjectDetection.model |
1 | 0 | 0 |
attr |
DFineForObjectDetection.class_embed |
1 | 0 | 0 |
attr |
DFineForObjectDetection.bbox_embed |
1 | 0 | 0 |
meth |
DFinePreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.d_fine.modular_d_fine (144 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DFineModel.init |
2 | 1 | 0 |
attr |
DFineModel.encoder |
1 | 0 | 0 |
attr |
DFineModel.decoder_input_proj |
1 | 0 | 0 |
attr |
DFineModel.decoder |
1 | 0 | 0 |
meth |
DFineForObjectDetection.init |
2 | 1 | 0 |
meth |
DFineForObjectDetection.forward |
2 | 0 | 0 |
attr |
DFineForObjectDetection.eval_idx |
1 | 0 | 0 |
attr |
DFineForObjectDetection.model |
1 | 0 | 0 |
attr |
DFineForObjectDetection.class_embed |
1 | 0 | 0 |
attr |
DFineForObjectDetection.bbox_embed |
1 | 0 | 0 |
meth |
DFineConfig.init |
67 | 0 | 0 |
attr |
DFineConfig.initializer_range |
1 | 0 | 0 |
attr |
DFineConfig.initializer_bias_prior_prob |
1 | 0 | 0 |
attr |
DFineConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
DFineConfig.batch_norm_eps |
1 | 0 | 0 |
attr |
DFineConfig.backbone_config |
1 | 0 | 0 |
attr |
DFineConfig.freeze_backbone_batch_norms |
1 | 0 | 0 |
attr |
DFineConfig.encoder_hidden_dim |
1 | 0 | 0 |
attr |
DFineConfig.encoder_in_channels |
1 | 0 | 0 |
attr |
DFineConfig.feat_strides |
1 | 0 | 0 |
attr |
DFineConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
DFineConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
DFineConfig.dropout |
1 | 0 | 0 |
attr |
DFineConfig.activation_dropout |
1 | 0 | 0 |
attr |
DFineConfig.encode_proj_layers |
1 | 0 | 0 |
attr |
DFineConfig.encoder_layers |
1 | 0 | 0 |
attr |
DFineConfig.positional_encoding_temperature |
1 | 0 | 0 |
attr |
DFineConfig.eval_size |
1 | 0 | 0 |
attr |
DFineConfig.normalize_before |
1 | 0 | 0 |
attr |
DFineConfig.encoder_activation_function |
1 | 0 | 0 |
attr |
DFineConfig.activation_function |
1 | 0 | 0 |
attr |
DFineConfig.hidden_expansion |
1 | 0 | 0 |
attr |
DFineConfig.d_model |
1 | 0 | 0 |
attr |
DFineConfig.num_queries |
1 | 0 | 0 |
attr |
DFineConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
DFineConfig.decoder_in_channels |
1 | 0 | 0 |
attr |
DFineConfig.num_feature_levels |
1 | 0 | 0 |
attr |
DFineConfig.decoder_n_points |
1 | 0 | 0 |
attr |
DFineConfig.decoder_layers |
1 | 0 | 0 |
attr |
DFineConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
DFineConfig.decoder_activation_function |
1 | 0 | 0 |
attr |
DFineConfig.attention_dropout |
1 | 0 | 0 |
attr |
DFineConfig.num_denoising |
1 | 0 | 0 |
attr |
DFineConfig.label_noise_ratio |
1 | 0 | 0 |
attr |
DFineConfig.box_noise_scale |
1 | 0 | 0 |
attr |
DFineConfig.learn_initial_query |
1 | 0 | 0 |
attr |
DFineConfig.anchor_image_size |
1 | 0 | 0 |
attr |
DFineConfig.auxiliary_loss |
1 | 0 | 0 |
attr |
DFineConfig.with_box_refine |
1 | 0 | 0 |
attr |
DFineConfig.matcher_alpha |
1 | 0 | 0 |
attr |
DFineConfig.matcher_gamma |
1 | 0 | 0 |
attr |
DFineConfig.matcher_class_cost |
1 | 0 | 0 |
attr |
DFineConfig.matcher_bbox_cost |
1 | 0 | 0 |
attr |
DFineConfig.matcher_giou_cost |
1 | 0 | 0 |
attr |
DFineConfig.use_focal_loss |
1 | 0 | 0 |
attr |
DFineConfig.focal_loss_alpha |
1 | 0 | 0 |
attr |
DFineConfig.focal_loss_gamma |
1 | 0 | 0 |
attr |
DFineConfig.weight_loss_vfl |
1 | 0 | 0 |
attr |
DFineConfig.weight_loss_bbox |
1 | 0 | 0 |
attr |
DFineConfig.weight_loss_giou |
1 | 0 | 0 |
attr |
DFineConfig.weight_loss_fgl |
1 | 0 | 0 |
attr |
DFineConfig.weight_loss_ddf |
1 | 0 | 0 |
attr |
DFineConfig.eos_coefficient |
1 | 0 | 0 |
attr |
DFineConfig.eval_idx |
1 | 0 | 0 |
attr |
DFineConfig.layer_scale |
1 | 0 | 0 |
attr |
DFineConfig.max_num_bins |
1 | 0 | 0 |
attr |
DFineConfig.reg_scale |
1 | 0 | 0 |
attr |
DFineConfig.depth_mult |
1 | 0 | 0 |
attr |
DFineConfig.decoder_offset_scale |
1 | 0 | 0 |
attr |
DFineConfig.decoder_method |
1 | 0 | 0 |
attr |
DFineConfig.top_prob_values |
1 | 0 | 0 |
attr |
DFineConfig.lqe_hidden_dim |
1 | 0 | 0 |
attr |
DFineConfig.lqe_layers |
1 | 0 | 0 |
attr |
DFineConfig.up |
1 | 0 | 0 |
attr |
DFineConfig.tie_word_embeddings |
1 | 0 | 0 |
meth |
DFinePreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.dab_detr.configuration_dab_detr (71 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DabDetrConfig.init |
37 | 0 | 0 |
attr |
DabDetrConfig.backbone_config |
1 | 0 | 0 |
attr |
DabDetrConfig.num_queries |
1 | 0 | 0 |
attr |
DabDetrConfig.hidden_size |
1 | 0 | 0 |
attr |
DabDetrConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
DabDetrConfig.encoder_layers |
1 | 0 | 0 |
attr |
DabDetrConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
DabDetrConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
DabDetrConfig.decoder_layers |
1 | 0 | 0 |
attr |
DabDetrConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
DabDetrConfig.dropout |
1 | 0 | 0 |
attr |
DabDetrConfig.attention_dropout |
1 | 0 | 0 |
attr |
DabDetrConfig.activation_dropout |
1 | 0 | 0 |
attr |
DabDetrConfig.activation_function |
1 | 0 | 0 |
attr |
DabDetrConfig.init_std |
1 | 0 | 0 |
attr |
DabDetrConfig.init_xavier_std |
1 | 0 | 0 |
attr |
DabDetrConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
DabDetrConfig.auxiliary_loss |
1 | 0 | 0 |
attr |
DabDetrConfig.class_cost |
1 | 0 | 0 |
attr |
DabDetrConfig.bbox_cost |
1 | 0 | 0 |
attr |
DabDetrConfig.giou_cost |
1 | 0 | 0 |
attr |
DabDetrConfig.cls_loss_coefficient |
1 | 0 | 0 |
attr |
DabDetrConfig.bbox_loss_coefficient |
1 | 0 | 0 |
attr |
DabDetrConfig.giou_loss_coefficient |
1 | 0 | 0 |
attr |
DabDetrConfig.focal_alpha |
1 | 0 | 0 |
attr |
DabDetrConfig.query_dim |
1 | 0 | 0 |
attr |
DabDetrConfig.random_refpoints_xy |
1 | 0 | 0 |
attr |
DabDetrConfig.keep_query_pos |
1 | 0 | 0 |
attr |
DabDetrConfig.num_patterns |
1 | 0 | 0 |
attr |
DabDetrConfig.normalize_before |
1 | 0 | 0 |
attr |
DabDetrConfig.temperature_width |
1 | 0 | 0 |
attr |
DabDetrConfig.temperature_height |
1 | 0 | 0 |
attr |
DabDetrConfig.sine_position_embedding_scale |
1 | 0 | 0 |
attr |
DabDetrConfig.initializer_bias_prior_prob |
1 | 0 | 0 |
attr |
DabDetrConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.dab_detr.modeling_dab_detr (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DabDetrModel.init |
2 | 1 | 0 |
meth |
DabDetrModel.freeze_backbone |
1 | 0 | 0 |
meth |
DabDetrModel.unfreeze_backbone |
1 | 0 | 0 |
meth |
DabDetrModel.forward |
11 | 10 | 0 |
attr |
DabDetrModel.auxiliary_loss |
1 | 0 | 0 |
attr |
DabDetrModel.backbone |
1 | 0 | 0 |
attr |
DabDetrModel.query_refpoint_embeddings |
1 | 0 | 0 |
attr |
DabDetrModel.random_refpoints_xy |
1 | 0 | 0 |
attr |
DabDetrModel.input_projection |
1 | 0 | 0 |
attr |
DabDetrModel.encoder |
1 | 0 | 0 |
attr |
DabDetrModel.decoder |
1 | 0 | 0 |
attr |
DabDetrModel.hidden_size |
1 | 0 | 0 |
attr |
DabDetrModel.num_queries |
1 | 0 | 0 |
attr |
DabDetrModel.num_patterns |
1 | 0 | 0 |
attr |
DabDetrModel.aux_loss |
1 | 0 | 0 |
attr |
DabDetrModel.patterns |
1 | 0 | 0 |
meth |
DabDetrPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
DabDetrForObjectDetection.init |
2 | 1 | 0 |
meth |
DabDetrForObjectDetection._set_aux_loss |
3 | 0 | 0 |
meth |
DabDetrForObjectDetection.forward |
12 | 11 | 0 |
attr |
DabDetrForObjectDetection.auxiliary_loss |
1 | 0 | 0 |
attr |
DabDetrForObjectDetection.query_dim |
1 | 0 | 0 |
attr |
DabDetrForObjectDetection.model |
1 | 0 | 0 |
attr |
DabDetrForObjectDetection.class_embed |
1 | 0 | 0 |
attr |
DabDetrForObjectDetection.bbox_predictor |
1 | 0 | 0 |
transformers.models.dac.configuration_dac (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DacConfig.init |
12 | 0 | 0 |
attr |
DacConfig.encoder_hidden_size |
1 | 0 | 0 |
attr |
DacConfig.downsampling_ratios |
1 | 0 | 0 |
attr |
DacConfig.decoder_hidden_size |
1 | 0 | 0 |
attr |
DacConfig.upsampling_ratios |
1 | 0 | 0 |
attr |
DacConfig.n_codebooks |
1 | 0 | 0 |
attr |
DacConfig.codebook_size |
1 | 0 | 0 |
attr |
DacConfig.codebook_dim |
1 | 0 | 0 |
attr |
DacConfig.quantizer_dropout |
1 | 0 | 0 |
attr |
DacConfig.sampling_rate |
1 | 0 | 0 |
attr |
DacConfig.hidden_size |
1 | 0 | 0 |
attr |
DacConfig.hop_length |
1 | 0 | 0 |
attr |
DacConfig.commitment_loss_weight |
1 | 0 | 0 |
attr |
DacConfig.codebook_loss_weight |
1 | 0 | 0 |
transformers.models.dac.feature_extraction_dac (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DacFeatureExtractor.init |
6 | 4 | 0 |
attr |
DacFeatureExtractor.hop_length |
1 | 0 | 0 |
transformers.models.dac.modeling_dac (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DacPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
DacPreTrainedModel.apply_weight_norm |
1 | 0 | 0 |
meth |
DacPreTrainedModel.remove_weight_norm |
1 | 0 | 0 |
meth |
DacModel.init |
2 | 1 | 0 |
attr |
DacModel.encoder |
1 | 0 | 0 |
attr |
DacModel.decoder |
1 | 0 | 0 |
attr |
DacModel.quantizer |
1 | 0 | 0 |
attr |
DacModel.bits_per_codebook |
1 | 0 | 0 |
transformers.models.data2vec.configuration_data2vec_audio (92 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Data2VecAudioConfig.init |
46 | 0 | 0 |
prop |
Data2VecAudioConfig.inputs_to_logits_ratio |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.pad_token_id |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.bos_token_id |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.eos_token_id |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.hidden_size |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.feat_extract_activation |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.conv_dim |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.conv_stride |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.conv_kernel |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.conv_bias |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.num_conv_pos_embeddings |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.num_conv_pos_embedding_groups |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.conv_pos_kernel_size |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.num_feat_extract_layers |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.intermediate_size |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.hidden_act |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.hidden_dropout |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.attention_dropout |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.activation_dropout |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.feat_proj_dropout |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.final_dropout |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.layerdrop |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.initializer_range |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.vocab_size |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.use_weighted_layer_sum |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.mask_time_prob |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.mask_time_length |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.mask_time_min_masks |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.mask_feature_prob |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.mask_feature_length |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.mask_feature_min_masks |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.ctc_loss_reduction |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.ctc_zero_infinity |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.add_adapter |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.adapter_kernel_size |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.adapter_stride |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.num_adapter_layers |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.output_hidden_size |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.classifier_proj_size |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.tdnn_dim |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.tdnn_kernel |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.tdnn_dilation |
1 | 0 | 0 |
attr |
Data2VecAudioConfig.xvector_output_dim |
1 | 0 | 0 |
transformers.models.data2vec.configuration_data2vec_text (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Data2VecTextConfig.init |
22 | 0 | 0 |
attr |
Data2VecTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Data2VecTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
Data2VecTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Data2VecTextConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Data2VecTextConfig.is_decoder |
1 | 0 | 0 |
attr |
Data2VecTextConfig.add_cross_attention |
1 | 0 | 0 |
attr |
Data2VecTextConfig.vocab_size |
1 | 0 | 0 |
attr |
Data2VecTextConfig.hidden_size |
1 | 0 | 0 |
attr |
Data2VecTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Data2VecTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Data2VecTextConfig.hidden_act |
1 | 0 | 0 |
attr |
Data2VecTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Data2VecTextConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
Data2VecTextConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
Data2VecTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Data2VecTextConfig.type_vocab_size |
1 | 0 | 0 |
attr |
Data2VecTextConfig.initializer_range |
1 | 0 | 0 |
attr |
Data2VecTextConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Data2VecTextConfig.use_cache |
1 | 0 | 0 |
attr |
Data2VecTextConfig.classifier_dropout |
1 | 0 | 0 |
transformers.models.data2vec.configuration_data2vec_vision (56 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Data2VecVisionConfig.init |
29 | 0 | 0 |
attr |
Data2VecVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.image_size |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.patch_size |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.num_channels |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.use_mask_token |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.use_absolute_position_embeddings |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.use_relative_position_bias |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.use_shared_relative_position_bias |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.layer_scale_init_value |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.drop_path_rate |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.use_mean_pooling |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.out_indices |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.pool_scales |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.use_auxiliary_head |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.auxiliary_loss_weight |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.auxiliary_channels |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.auxiliary_num_convs |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.auxiliary_concat_input |
1 | 0 | 0 |
attr |
Data2VecVisionConfig.semantic_loss_ignore_index |
1 | 0 | 0 |
transformers.models.data2vec.modeling_data2vec_audio (52 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Data2VecAudioModel.init |
2 | 1 | 0 |
meth |
Data2VecAudioModel.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Data2VecAudioModel._mask_hidden_states |
4 | 3 | 0 |
meth |
Data2VecAudioModel.forward |
8 | 7 | 0 |
attr |
Data2VecAudioModel.feature_extractor |
1 | 0 | 0 |
attr |
Data2VecAudioModel.feature_projection |
1 | 0 | 0 |
attr |
Data2VecAudioModel.encoder |
1 | 0 | 0 |
attr |
Data2VecAudioModel.adapter |
1 | 0 | 0 |
attr |
Data2VecAudioModel.masked_spec_embed |
1 | 0 | 0 |
meth |
Data2VecAudioForAudioFrameClassification.init |
2 | 0 | 0 |
meth |
Data2VecAudioForAudioFrameClassification.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Data2VecAudioForAudioFrameClassification.freeze_base_model |
1 | 0 | 0 |
meth |
Data2VecAudioForAudioFrameClassification.forward |
8 | 7 | 0 |
attr |
Data2VecAudioForAudioFrameClassification.data2vec_audio |
1 | 0 | 0 |
attr |
Data2VecAudioForAudioFrameClassification.classifier |
1 | 0 | 0 |
attr |
Data2VecAudioForAudioFrameClassification.num_labels |
1 | 0 | 0 |
attr |
Data2VecAudioForAudioFrameClassification.layer_weights |
1 | 0 | 0 |
meth |
Data2VecAudioForCTC.init |
2 | 0 | 0 |
meth |
Data2VecAudioForCTC.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Data2VecAudioForCTC.forward |
8 | 7 | 0 |
attr |
Data2VecAudioForCTC.data2vec_audio |
1 | 0 | 0 |
attr |
Data2VecAudioForCTC.dropout |
1 | 0 | 0 |
attr |
Data2VecAudioForCTC.lm_head |
1 | 0 | 0 |
meth |
Data2VecAudioPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Data2VecAudioPreTrainedModel._get_feat_extract_output_lengths |
3 | 2 | 0 |
meth |
Data2VecAudioPreTrainedModel._get_feature_vector_attention_mask |
4 | 2 | 0 |
meth |
Data2VecAudioForSequenceClassification.init |
2 | 0 | 0 |
meth |
Data2VecAudioForSequenceClassification.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Data2VecAudioForSequenceClassification.freeze_base_model |
1 | 0 | 0 |
meth |
Data2VecAudioForSequenceClassification.forward |
8 | 7 | 0 |
attr |
Data2VecAudioForSequenceClassification.data2vec_audio |
1 | 0 | 0 |
attr |
Data2VecAudioForSequenceClassification.projector |
1 | 0 | 0 |
attr |
Data2VecAudioForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
Data2VecAudioForSequenceClassification.layer_weights |
1 | 0 | 0 |
meth |
Data2VecAudioForXVector.init |
2 | 0 | 0 |
meth |
Data2VecAudioForXVector.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Data2VecAudioForXVector.freeze_base_model |
1 | 0 | 0 |
meth |
Data2VecAudioForXVector._get_tdnn_output_lengths |
2 | 1 | 0 |
meth |
Data2VecAudioForXVector.forward |
8 | 7 | 0 |
attr |
Data2VecAudioForXVector.data2vec_audio |
1 | 0 | 0 |
attr |
Data2VecAudioForXVector.projector |
1 | 0 | 0 |
attr |
Data2VecAudioForXVector.tdnn |
1 | 0 | 0 |
attr |
Data2VecAudioForXVector.feature_extractor |
1 | 0 | 0 |
attr |
Data2VecAudioForXVector.classifier |
1 | 0 | 0 |
attr |
Data2VecAudioForXVector.objective |
1 | 0 | 0 |
attr |
Data2VecAudioForXVector.layer_weights |
1 | 0 | 0 |
transformers.models.data2vec.modeling_data2vec_text (56 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Data2VecTextForTokenClassification.init |
2 | 0 | 0 |
attr |
Data2VecTextForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
Data2VecTextForTokenClassification.data2vec_text |
1 | 0 | 0 |
attr |
Data2VecTextForTokenClassification.dropout |
1 | 0 | 0 |
attr |
Data2VecTextForTokenClassification.classifier |
1 | 0 | 0 |
meth |
Data2VecTextForMultipleChoice.init |
2 | 0 | 0 |
attr |
Data2VecTextForMultipleChoice.data2vec_text |
1 | 0 | 0 |
attr |
Data2VecTextForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
Data2VecTextForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
Data2VecTextForCausalLM.init |
2 | 0 | 0 |
meth |
Data2VecTextForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
Data2VecTextForCausalLM.set_output_embeddings |
2 | 0 | 0 |
attr |
Data2VecTextForCausalLM.data2vec_text |
1 | 0 | 0 |
attr |
Data2VecTextForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Data2VecTextForSequenceClassification.init |
2 | 0 | 0 |
attr |
Data2VecTextForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
Data2VecTextForSequenceClassification.config |
1 | 0 | 0 |
attr |
Data2VecTextForSequenceClassification.data2vec_text |
1 | 0 | 0 |
attr |
Data2VecTextForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
Data2VecTextForQuestionAnswering.init |
2 | 0 | 0 |
attr |
Data2VecTextForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
Data2VecTextForQuestionAnswering.data2vec_text |
1 | 0 | 0 |
attr |
Data2VecTextForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
Data2VecTextModel.init |
3 | 0 | 0 |
meth |
Data2VecTextModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Data2VecTextModel.set_input_embeddings |
2 | 0 | 0 |
meth |
Data2VecTextModel._create_attention_masks |
7 | 0 | 0 |
attr |
Data2VecTextModel.config |
1 | 0 | 0 |
attr |
Data2VecTextModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
Data2VecTextModel.embeddings |
1 | 0 | 0 |
attr |
Data2VecTextModel.encoder |
1 | 0 | 0 |
attr |
Data2VecTextModel.pooler |
1 | 0 | 0 |
meth |
Data2VecTextPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Data2VecTextForMaskedLM.init |
2 | 0 | 0 |
meth |
Data2VecTextForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
Data2VecTextForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
attr |
Data2VecTextForMaskedLM.data2vec_text |
1 | 0 | 0 |
attr |
Data2VecTextForMaskedLM.lm_head |
1 | 0 | 0 |
transformers.models.data2vec.modeling_data2vec_vision (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Data2VecVisionPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Data2VecVisionForImageClassification.forward |
8 | 7 | 0 |
attr |
Data2VecVisionForImageClassification.num_labels |
1 | 0 | 0 |
attr |
Data2VecVisionForImageClassification.data2vec_vision |
1 | 0 | 0 |
attr |
Data2VecVisionForImageClassification.classifier |
1 | 0 | 0 |
meth |
Data2VecVisionForSemanticSegmentation.compute_loss |
4 | 0 | 0 |
meth |
Data2VecVisionForSemanticSegmentation.forward |
8 | 7 | 0 |
attr |
Data2VecVisionForSemanticSegmentation.num_labels |
1 | 0 | 0 |
attr |
Data2VecVisionForSemanticSegmentation.data2vec_vision |
1 | 0 | 0 |
attr |
Data2VecVisionForSemanticSegmentation.fpn1 |
1 | 0 | 0 |
attr |
Data2VecVisionForSemanticSegmentation.fpn2 |
1 | 0 | 0 |
attr |
Data2VecVisionForSemanticSegmentation.fpn3 |
1 | 0 | 0 |
attr |
Data2VecVisionForSemanticSegmentation.fpn4 |
1 | 0 | 0 |
attr |
Data2VecVisionForSemanticSegmentation.decode_head |
1 | 0 | 0 |
attr |
Data2VecVisionForSemanticSegmentation.auxiliary_head |
1 | 0 | 0 |
meth |
Data2VecVisionModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Data2VecVisionModel.forward |
8 | 7 | 0 |
attr |
Data2VecVisionModel.embeddings |
1 | 0 | 0 |
attr |
Data2VecVisionModel.encoder |
1 | 0 | 0 |
attr |
Data2VecVisionModel.layernorm |
1 | 0 | 0 |
attr |
Data2VecVisionModel.pooler |
1 | 0 | 0 |
transformers.models.data2vec.modular_data2vec_audio (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Data2VecAudioModel.init |
2 | 1 | 0 |
meth |
Data2VecAudioModel.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Data2VecAudioModel.forward |
2 | 0 | 0 |
attr |
Data2VecAudioModel.feature_extractor |
1 | 0 | 0 |
attr |
Data2VecAudioModel.feature_projection |
1 | 0 | 0 |
attr |
Data2VecAudioModel.encoder |
1 | 0 | 0 |
attr |
Data2VecAudioModel.adapter |
1 | 0 | 0 |
attr |
Data2VecAudioModel.masked_spec_embed |
1 | 0 | 0 |
meth |
Data2VecAudioForCTC.init |
2 | 0 | 0 |
meth |
Data2VecAudioForCTC.freeze_base_model |
1 | 0 | 0 |
meth |
Data2VecAudioForCTC.tie_weights |
1 | 0 | 0 |
meth |
Data2VecAudioForCTC.forward |
2 | 0 | 0 |
attr |
Data2VecAudioForCTC.data2vec_audio |
1 | 0 | 0 |
attr |
Data2VecAudioForCTC.dropout |
1 | 0 | 0 |
attr |
Data2VecAudioForCTC.lm_head |
1 | 0 | 0 |
meth |
Data2VecAudioPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Data2VecAudioPreTrainedModel._get_adapters |
1 | 0 | 0 |
meth |
Data2VecAudioPreTrainedModel.init_adapter_layers |
1 | 0 | 0 |
meth |
Data2VecAudioPreTrainedModel.load_adapter |
1 | 0 | 0 |
transformers.models.data2vec.modular_data2vec_text (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Data2VecTextForTokenClassification.init |
2 | 0 | 0 |
attr |
Data2VecTextForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
Data2VecTextForTokenClassification.data2vec_text |
1 | 0 | 0 |
attr |
Data2VecTextForTokenClassification.dropout |
1 | 0 | 0 |
attr |
Data2VecTextForTokenClassification.classifier |
1 | 0 | 0 |
meth |
Data2VecTextForMultipleChoice.init |
2 | 0 | 0 |
attr |
Data2VecTextForMultipleChoice.data2vec_text |
1 | 0 | 0 |
attr |
Data2VecTextForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
Data2VecTextForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
Data2VecTextForCausalLM.init |
2 | 0 | 0 |
meth |
Data2VecTextForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
Data2VecTextForCausalLM.set_output_embeddings |
2 | 0 | 0 |
attr |
Data2VecTextForCausalLM.data2vec_text |
1 | 0 | 0 |
attr |
Data2VecTextForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Data2VecTextForSequenceClassification.init |
2 | 0 | 0 |
attr |
Data2VecTextForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
Data2VecTextForSequenceClassification.config |
1 | 0 | 0 |
attr |
Data2VecTextForSequenceClassification.data2vec_text |
1 | 0 | 0 |
attr |
Data2VecTextForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
Data2VecTextForQuestionAnswering.init |
2 | 0 | 0 |
attr |
Data2VecTextForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
Data2VecTextForQuestionAnswering.data2vec_text |
1 | 0 | 0 |
attr |
Data2VecTextForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
Data2VecTextPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Data2VecTextForMaskedLM.init |
2 | 0 | 0 |
meth |
Data2VecTextForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
Data2VecTextForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
attr |
Data2VecTextForMaskedLM.data2vec_text |
1 | 0 | 0 |
attr |
Data2VecTextForMaskedLM.lm_head |
1 | 0 | 0 |
transformers.models.dbrx.configuration_dbrx (19 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DbrxConfig.init |
19 | 18 | 1 |
attr |
DbrxConfig.d_model |
1 | 0 | 0 |
attr |
DbrxConfig.n_heads |
1 | 0 | 0 |
attr |
DbrxConfig.n_layers |
1 | 0 | 0 |
attr |
DbrxConfig.max_seq_len |
1 | 0 | 0 |
attr |
DbrxConfig.vocab_size |
1 | 0 | 0 |
attr |
DbrxConfig.resid_pdrop |
1 | 0 | 0 |
attr |
DbrxConfig.emb_pdrop |
1 | 0 | 0 |
attr |
DbrxConfig.use_cache |
1 | 0 | 0 |
attr |
DbrxConfig.initializer_range |
1 | 0 | 0 |
attr |
DbrxConfig.output_router_logits |
1 | 0 | 0 |
attr |
DbrxConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
DbrxConfig.pad_token_id |
1 | 0 | 0 |
attr |
DbrxConfig.bos_token_id |
1 | 0 | 0 |
attr |
DbrxConfig.eos_token_id |
1 | 0 | 0 |
attr |
DbrxConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
DbrxConfig.rope_parameters |
1 | 0 | 0 |
attr |
DbrxConfig.attn_config |
1 | 0 | 0 |
attr |
DbrxConfig.ffn_config |
1 | 0 | 0 |
transformers.models.dbrx.modeling_dbrx (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DbrxPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
DbrxModel.init |
2 | 1 | 0 |
meth |
DbrxModel.set_input_embeddings |
2 | 1 | 0 |
attr |
DbrxModel.padding_idx |
1 | 0 | 0 |
attr |
DbrxModel.vocab_size |
1 | 0 | 0 |
attr |
DbrxModel.emb_pdrop |
1 | 0 | 0 |
attr |
DbrxModel.rotary_emb |
1 | 0 | 0 |
attr |
DbrxModel.wte |
1 | 0 | 0 |
attr |
DbrxModel.blocks |
1 | 0 | 0 |
attr |
DbrxModel.norm_f |
1 | 0 | 0 |
attr |
DbrxModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
DbrxForCausalLM.init |
2 | 1 | 0 |
meth |
DbrxForCausalLM.set_input_embeddings |
2 | 1 | 0 |
meth |
DbrxForCausalLM.set_output_embeddings |
2 | 1 | 0 |
meth |
DbrxForCausalLM.set_decoder |
2 | 1 | 0 |
attr |
DbrxForCausalLM.transformer |
1 | 0 | 0 |
attr |
DbrxForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
DbrxForCausalLM.lm_head |
1 | 0 | 0 |
attr |
DbrxForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
DbrxForCausalLM.num_experts |
1 | 0 | 0 |
attr |
DbrxForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
transformers.models.dbrx.modular_dbrx (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DbrxPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
DbrxModel.init |
2 | 1 | 0 |
meth |
DbrxModel.set_input_embeddings |
2 | 1 | 0 |
attr |
DbrxModel.padding_idx |
1 | 0 | 0 |
attr |
DbrxModel.vocab_size |
1 | 0 | 0 |
attr |
DbrxModel.emb_pdrop |
1 | 0 | 0 |
attr |
DbrxModel.rotary_emb |
1 | 0 | 0 |
attr |
DbrxModel.wte |
1 | 0 | 0 |
attr |
DbrxModel.blocks |
1 | 0 | 0 |
attr |
DbrxModel.norm_f |
1 | 0 | 0 |
attr |
DbrxModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
DbrxForCausalLM.init |
2 | 1 | 0 |
meth |
DbrxForCausalLM.set_input_embeddings |
2 | 1 | 0 |
meth |
DbrxForCausalLM.set_output_embeddings |
2 | 1 | 0 |
meth |
DbrxForCausalLM.set_decoder |
2 | 1 | 0 |
attr |
DbrxForCausalLM.transformer |
1 | 0 | 0 |
attr |
DbrxForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
DbrxForCausalLM.lm_head |
1 | 0 | 0 |
attr |
DbrxForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
DbrxForCausalLM.num_experts |
1 | 0 | 0 |
attr |
DbrxForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
transformers.models.deberta.configuration_deberta (49 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DebertaConfig.init |
25 | 0 | 0 |
attr |
DebertaConfig.hidden_size |
1 | 0 | 0 |
attr |
DebertaConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
DebertaConfig.num_attention_heads |
1 | 0 | 0 |
attr |
DebertaConfig.intermediate_size |
1 | 0 | 0 |
attr |
DebertaConfig.hidden_act |
1 | 0 | 0 |
attr |
DebertaConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
DebertaConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
DebertaConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
DebertaConfig.type_vocab_size |
1 | 0 | 0 |
attr |
DebertaConfig.initializer_range |
1 | 0 | 0 |
attr |
DebertaConfig.relative_attention |
1 | 0 | 0 |
attr |
DebertaConfig.max_relative_positions |
1 | 0 | 0 |
attr |
DebertaConfig.pad_token_id |
1 | 0 | 0 |
attr |
DebertaConfig.bos_token_id |
1 | 0 | 0 |
attr |
DebertaConfig.eos_token_id |
1 | 0 | 0 |
attr |
DebertaConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
DebertaConfig.position_biased_input |
1 | 0 | 0 |
attr |
DebertaConfig.pos_att_type |
1 | 0 | 0 |
attr |
DebertaConfig.vocab_size |
1 | 0 | 0 |
attr |
DebertaConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
DebertaConfig.pooler_hidden_size |
1 | 0 | 0 |
attr |
DebertaConfig.pooler_dropout |
1 | 0 | 0 |
attr |
DebertaConfig.pooler_hidden_act |
1 | 0 | 0 |
attr |
DebertaConfig.legacy |
1 | 0 | 0 |
transformers.models.deberta.modeling_deberta (45 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DebertaPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
DebertaForQuestionAnswering.init |
2 | 0 | 0 |
meth |
DebertaForQuestionAnswering.forward |
12 | 11 | 0 |
attr |
DebertaForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
DebertaForQuestionAnswering.deberta |
1 | 0 | 0 |
attr |
DebertaForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
DebertaForMaskedLM.init |
2 | 0 | 0 |
meth |
DebertaForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
DebertaForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
DebertaForMaskedLM.forward |
11 | 10 | 0 |
attr |
DebertaForMaskedLM.legacy |
1 | 0 | 0 |
attr |
DebertaForMaskedLM.deberta |
1 | 0 | 0 |
attr |
DebertaForMaskedLM.cls |
1 | 0 | 0 |
attr |
DebertaForMaskedLM.lm_predictions |
1 | 0 | 0 |
meth |
DebertaForTokenClassification.init |
2 | 0 | 0 |
meth |
DebertaForTokenClassification.forward |
11 | 10 | 0 |
attr |
DebertaForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
DebertaForTokenClassification.deberta |
1 | 0 | 0 |
attr |
DebertaForTokenClassification.dropout |
1 | 0 | 0 |
attr |
DebertaForTokenClassification.classifier |
1 | 0 | 0 |
meth |
DebertaForSequenceClassification.init |
2 | 0 | 0 |
meth |
DebertaForSequenceClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
DebertaForSequenceClassification.set_input_embeddings |
2 | 0 | 0 |
meth |
DebertaForSequenceClassification.forward |
11 | 10 | 0 |
attr |
DebertaForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
DebertaForSequenceClassification.deberta |
1 | 0 | 0 |
attr |
DebertaForSequenceClassification.pooler |
1 | 0 | 0 |
attr |
DebertaForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
DebertaForSequenceClassification.dropout |
1 | 0 | 0 |
meth |
DebertaModel.init |
2 | 0 | 0 |
meth |
DebertaModel.get_input_embeddings |
1 | 0 | 0 |
meth |
DebertaModel.set_input_embeddings |
2 | 0 | 0 |
meth |
DebertaModel.forward |
10 | 9 | 0 |
attr |
DebertaModel.embeddings |
1 | 0 | 0 |
attr |
DebertaModel.encoder |
1 | 0 | 0 |
attr |
DebertaModel.z_steps |
1 | 0 | 0 |
transformers.models.deberta.tokenization_deberta (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DebertaTokenizer.init |
13 | 2 | 0 |
prop |
DebertaTokenizer.mask_token |
2 | 1 | 0 |
attr |
DebertaTokenizer.add_prefix_space |
1 | 0 | 0 |
transformers.models.deberta_v2.configuration_deberta_v2 (49 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DebertaV2Config.init |
25 | 0 | 0 |
attr |
DebertaV2Config.hidden_size |
1 | 0 | 0 |
attr |
DebertaV2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
DebertaV2Config.num_attention_heads |
1 | 0 | 0 |
attr |
DebertaV2Config.intermediate_size |
1 | 0 | 0 |
attr |
DebertaV2Config.hidden_act |
1 | 0 | 0 |
attr |
DebertaV2Config.hidden_dropout_prob |
1 | 0 | 0 |
attr |
DebertaV2Config.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
DebertaV2Config.max_position_embeddings |
1 | 0 | 0 |
attr |
DebertaV2Config.type_vocab_size |
1 | 0 | 0 |
attr |
DebertaV2Config.initializer_range |
1 | 0 | 0 |
attr |
DebertaV2Config.relative_attention |
1 | 0 | 0 |
attr |
DebertaV2Config.max_relative_positions |
1 | 0 | 0 |
attr |
DebertaV2Config.pad_token_id |
1 | 0 | 0 |
attr |
DebertaV2Config.bos_token_id |
1 | 0 | 0 |
attr |
DebertaV2Config.eos_token_id |
1 | 0 | 0 |
attr |
DebertaV2Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
DebertaV2Config.position_biased_input |
1 | 0 | 0 |
attr |
DebertaV2Config.pos_att_type |
1 | 0 | 0 |
attr |
DebertaV2Config.vocab_size |
1 | 0 | 0 |
attr |
DebertaV2Config.layer_norm_eps |
1 | 0 | 0 |
attr |
DebertaV2Config.pooler_hidden_size |
1 | 0 | 0 |
attr |
DebertaV2Config.pooler_dropout |
1 | 0 | 0 |
attr |
DebertaV2Config.pooler_hidden_act |
1 | 0 | 0 |
attr |
DebertaV2Config.legacy |
1 | 0 | 0 |
transformers.models.deberta_v2.modeling_deberta_v2 (56 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DebertaV2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
DebertaV2ForQuestionAnswering.init |
2 | 0 | 0 |
meth |
DebertaV2ForQuestionAnswering.forward |
12 | 11 | 0 |
attr |
DebertaV2ForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
DebertaV2ForQuestionAnswering.deberta |
1 | 0 | 0 |
attr |
DebertaV2ForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
DebertaV2ForTokenClassification.init |
2 | 0 | 0 |
meth |
DebertaV2ForTokenClassification.forward |
11 | 10 | 0 |
attr |
DebertaV2ForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
DebertaV2ForTokenClassification.deberta |
1 | 0 | 0 |
attr |
DebertaV2ForTokenClassification.dropout |
1 | 0 | 0 |
attr |
DebertaV2ForTokenClassification.classifier |
1 | 0 | 0 |
meth |
DebertaV2ForMultipleChoice.init |
2 | 0 | 0 |
meth |
DebertaV2ForMultipleChoice.get_input_embeddings |
1 | 0 | 0 |
meth |
DebertaV2ForMultipleChoice.set_input_embeddings |
2 | 0 | 0 |
meth |
DebertaV2ForMultipleChoice.forward |
11 | 10 | 0 |
attr |
DebertaV2ForMultipleChoice.num_labels |
1 | 0 | 0 |
attr |
DebertaV2ForMultipleChoice.deberta |
1 | 0 | 0 |
attr |
DebertaV2ForMultipleChoice.pooler |
1 | 0 | 0 |
attr |
DebertaV2ForMultipleChoice.classifier |
1 | 0 | 0 |
attr |
DebertaV2ForMultipleChoice.dropout |
1 | 0 | 0 |
meth |
DebertaV2ForMaskedLM.init |
2 | 0 | 0 |
meth |
DebertaV2ForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
DebertaV2ForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
DebertaV2ForMaskedLM.forward |
11 | 10 | 0 |
attr |
DebertaV2ForMaskedLM.legacy |
1 | 0 | 0 |
attr |
DebertaV2ForMaskedLM.deberta |
1 | 0 | 0 |
attr |
DebertaV2ForMaskedLM.cls |
1 | 0 | 0 |
attr |
DebertaV2ForMaskedLM.lm_predictions |
1 | 0 | 0 |
meth |
DebertaV2ForSequenceClassification.init |
2 | 0 | 0 |
meth |
DebertaV2ForSequenceClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
DebertaV2ForSequenceClassification.set_input_embeddings |
2 | 0 | 0 |
meth |
DebertaV2ForSequenceClassification.forward |
11 | 10 | 0 |
attr |
DebertaV2ForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
DebertaV2ForSequenceClassification.deberta |
1 | 0 | 0 |
attr |
DebertaV2ForSequenceClassification.pooler |
1 | 0 | 0 |
attr |
DebertaV2ForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
DebertaV2ForSequenceClassification.dropout |
1 | 0 | 0 |
meth |
DebertaV2Model.init |
2 | 0 | 0 |
meth |
DebertaV2Model.get_input_embeddings |
1 | 0 | 0 |
meth |
DebertaV2Model.set_input_embeddings |
2 | 0 | 0 |
meth |
DebertaV2Model.forward |
10 | 9 | 0 |
attr |
DebertaV2Model.embeddings |
1 | 0 | 0 |
attr |
DebertaV2Model.encoder |
1 | 0 | 0 |
attr |
DebertaV2Model.z_steps |
1 | 0 | 0 |
transformers.models.deberta_v2.tokenization_deberta_v2 (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DebertaV2Tokenizer.init |
14 | 1 | 0 |
attr |
DebertaV2Tokenizer.do_lower_case |
1 | 0 | 0 |
attr |
DebertaV2Tokenizer.split_by_punct |
1 | 0 | 0 |
attr |
DebertaV2Tokenizer.add_prefix_space |
1 | 0 | 0 |
transformers.models.decision_transformer.configuration_decision_transformer (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DecisionTransformerConfig.init |
25 | 0 | 0 |
attr |
DecisionTransformerConfig.add_cross_attention |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.state_dim |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.act_dim |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.hidden_size |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.max_ep_len |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.action_tanh |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.vocab_size |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.n_positions |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.n_layer |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.n_head |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.n_inner |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.activation_function |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.resid_pdrop |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.embd_pdrop |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.attn_pdrop |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.initializer_range |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.scale_attn_weights |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.use_cache |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.scale_attn_by_inverse_layer_idx |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.reorder_and_upcast_attn |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.bos_token_id |
1 | 0 | 0 |
attr |
DecisionTransformerConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.decision_transformer.modeling_decision_transformer (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DecisionTransformerGPT2Model.init |
2 | 0 | 0 |
meth |
DecisionTransformerGPT2Model.get_input_embeddings |
1 | 0 | 0 |
meth |
DecisionTransformerGPT2Model.set_input_embeddings |
2 | 0 | 0 |
attr |
DecisionTransformerGPT2Model.embed_dim |
1 | 0 | 0 |
attr |
DecisionTransformerGPT2Model.wte |
1 | 0 | 0 |
attr |
DecisionTransformerGPT2Model.wpe |
1 | 0 | 0 |
attr |
DecisionTransformerGPT2Model.drop |
1 | 0 | 0 |
attr |
DecisionTransformerGPT2Model.h |
1 | 0 | 0 |
attr |
DecisionTransformerGPT2Model.ln_f |
1 | 0 | 0 |
attr |
DecisionTransformerGPT2Model.gradient_checkpointing |
1 | 0 | 0 |
meth |
DecisionTransformerGPT2PreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
DecisionTransformerGPT2PreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
DecisionTransformerModel.init |
2 | 0 | 0 |
meth |
DecisionTransformerModel.forward |
11 | 10 | 0 |
attr |
DecisionTransformerModel.hidden_size |
1 | 0 | 0 |
attr |
DecisionTransformerModel.encoder |
1 | 0 | 0 |
attr |
DecisionTransformerModel.embed_timestep |
1 | 0 | 0 |
attr |
DecisionTransformerModel.embed_return |
1 | 0 | 0 |
attr |
DecisionTransformerModel.embed_state |
1 | 0 | 0 |
attr |
DecisionTransformerModel.embed_action |
1 | 0 | 0 |
attr |
DecisionTransformerModel.embed_ln |
1 | 0 | 0 |
attr |
DecisionTransformerModel.predict_state |
1 | 0 | 0 |
attr |
DecisionTransformerModel.predict_action |
1 | 0 | 0 |
attr |
DecisionTransformerModel.predict_return |
1 | 0 | 0 |
transformers.models.deepseek_v2.configuration_deepseek_v2 (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekV2Config.init |
36 | 34 | 0 |
attr |
DeepseekV2Config.first_k_dense_replace |
1 | 0 | 0 |
attr |
DeepseekV2Config.kv_lora_rank |
1 | 0 | 0 |
attr |
DeepseekV2Config.q_lora_rank |
1 | 0 | 0 |
attr |
DeepseekV2Config.n_group |
1 | 0 | 0 |
attr |
DeepseekV2Config.n_routed_experts |
1 | 0 | 0 |
attr |
DeepseekV2Config.n_shared_experts |
1 | 0 | 0 |
attr |
DeepseekV2Config.qk_nope_head_dim |
1 | 0 | 0 |
attr |
DeepseekV2Config.qk_rope_head_dim |
1 | 0 | 0 |
attr |
DeepseekV2Config.routed_scaling_factor |
1 | 0 | 0 |
attr |
DeepseekV2Config.topk_group |
1 | 0 | 0 |
attr |
DeepseekV2Config.topk_method |
1 | 0 | 0 |
attr |
DeepseekV2Config.norm_topk_prob |
1 | 0 | 0 |
attr |
DeepseekV2Config.v_head_dim |
1 | 0 | 0 |
attr |
DeepseekV2Config.num_experts_per_tok |
1 | 0 | 0 |
attr |
DeepseekV2Config.moe_intermediate_size |
1 | 0 | 0 |
attr |
DeepseekV2Config.vocab_size |
1 | 0 | 0 |
attr |
DeepseekV2Config.max_position_embeddings |
1 | 0 | 0 |
attr |
DeepseekV2Config.hidden_size |
1 | 0 | 0 |
attr |
DeepseekV2Config.intermediate_size |
1 | 0 | 0 |
attr |
DeepseekV2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
DeepseekV2Config.num_attention_heads |
1 | 0 | 0 |
attr |
DeepseekV2Config.num_key_value_heads |
1 | 0 | 0 |
attr |
DeepseekV2Config.hidden_act |
1 | 0 | 0 |
attr |
DeepseekV2Config.initializer_range |
1 | 0 | 0 |
attr |
DeepseekV2Config.rms_norm_eps |
1 | 0 | 0 |
attr |
DeepseekV2Config.use_cache |
1 | 0 | 0 |
attr |
DeepseekV2Config.attention_bias |
1 | 0 | 0 |
attr |
DeepseekV2Config.attention_dropout |
1 | 0 | 0 |
attr |
DeepseekV2Config.mlp_bias |
1 | 0 | 0 |
attr |
DeepseekV2Config.head_dim |
1 | 0 | 0 |
attr |
DeepseekV2Config.rope_parameters |
1 | 0 | 0 |
attr |
DeepseekV2Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
DeepseekV2Config.pad_token_id |
1 | 0 | 0 |
attr |
DeepseekV2Config.bos_token_id |
1 | 0 | 0 |
attr |
DeepseekV2Config.eos_token_id |
1 | 0 | 0 |
transformers.models.deepseek_v2.modeling_deepseek_v2 (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekV2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
DeepseekV2ForCausalLM.init |
2 | 0 | 0 |
attr |
DeepseekV2ForCausalLM.model |
1 | 0 | 0 |
attr |
DeepseekV2ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
DeepseekV2ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
DeepseekV2Model.init |
2 | 1 | 0 |
attr |
DeepseekV2Model.padding_idx |
1 | 0 | 0 |
attr |
DeepseekV2Model.vocab_size |
1 | 0 | 0 |
attr |
DeepseekV2Model.embed_tokens |
1 | 0 | 0 |
attr |
DeepseekV2Model.layers |
1 | 0 | 0 |
attr |
DeepseekV2Model.norm |
1 | 0 | 0 |
attr |
DeepseekV2Model.rotary_emb |
1 | 0 | 0 |
attr |
DeepseekV2Model.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.deepseek_v2.modular_deepseek_v2 (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekV2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
DeepseekV2Config.init |
36 | 34 | 0 |
attr |
DeepseekV2Config.first_k_dense_replace |
1 | 0 | 0 |
attr |
DeepseekV2Config.kv_lora_rank |
1 | 0 | 0 |
attr |
DeepseekV2Config.q_lora_rank |
1 | 0 | 0 |
attr |
DeepseekV2Config.n_group |
1 | 0 | 0 |
attr |
DeepseekV2Config.n_routed_experts |
1 | 0 | 0 |
attr |
DeepseekV2Config.n_shared_experts |
1 | 0 | 0 |
attr |
DeepseekV2Config.qk_nope_head_dim |
1 | 0 | 0 |
attr |
DeepseekV2Config.qk_rope_head_dim |
1 | 0 | 0 |
attr |
DeepseekV2Config.routed_scaling_factor |
1 | 0 | 0 |
attr |
DeepseekV2Config.topk_group |
1 | 0 | 0 |
attr |
DeepseekV2Config.topk_method |
1 | 0 | 0 |
attr |
DeepseekV2Config.norm_topk_prob |
1 | 0 | 0 |
attr |
DeepseekV2Config.v_head_dim |
1 | 0 | 0 |
attr |
DeepseekV2Config.num_experts_per_tok |
1 | 0 | 0 |
attr |
DeepseekV2Config.moe_intermediate_size |
1 | 0 | 0 |
attr |
DeepseekV2Config.head_dim |
1 | 0 | 0 |
transformers.models.deepseek_v3.configuration_deepseek_v3 (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekV3Config.init |
36 | 34 | 0 |
meth |
DeepseekV3Config.convert_rope_params_to_dict |
3 | 1 | 0 |
attr |
DeepseekV3Config.vocab_size |
1 | 0 | 0 |
attr |
DeepseekV3Config.max_position_embeddings |
1 | 0 | 0 |
attr |
DeepseekV3Config.hidden_size |
1 | 0 | 0 |
attr |
DeepseekV3Config.intermediate_size |
1 | 0 | 0 |
attr |
DeepseekV3Config.moe_intermediate_size |
1 | 0 | 0 |
attr |
DeepseekV3Config.num_hidden_layers |
1 | 0 | 0 |
attr |
DeepseekV3Config.num_attention_heads |
1 | 0 | 0 |
attr |
DeepseekV3Config.n_shared_experts |
1 | 0 | 0 |
attr |
DeepseekV3Config.n_routed_experts |
1 | 0 | 0 |
attr |
DeepseekV3Config.routed_scaling_factor |
1 | 0 | 0 |
attr |
DeepseekV3Config.kv_lora_rank |
1 | 0 | 0 |
attr |
DeepseekV3Config.q_lora_rank |
1 | 0 | 0 |
attr |
DeepseekV3Config.qk_rope_head_dim |
1 | 0 | 0 |
attr |
DeepseekV3Config.v_head_dim |
1 | 0 | 0 |
attr |
DeepseekV3Config.qk_nope_head_dim |
1 | 0 | 0 |
attr |
DeepseekV3Config.qk_head_dim |
1 | 0 | 0 |
attr |
DeepseekV3Config.head_dim |
1 | 0 | 0 |
attr |
DeepseekV3Config.n_group |
1 | 0 | 0 |
attr |
DeepseekV3Config.topk_group |
1 | 0 | 0 |
attr |
DeepseekV3Config.num_experts_per_tok |
1 | 0 | 0 |
attr |
DeepseekV3Config.first_k_dense_replace |
1 | 0 | 0 |
attr |
DeepseekV3Config.norm_topk_prob |
1 | 0 | 0 |
attr |
DeepseekV3Config.rope_interleave |
1 | 0 | 0 |
attr |
DeepseekV3Config.num_key_value_heads |
1 | 0 | 0 |
attr |
DeepseekV3Config.hidden_act |
1 | 0 | 0 |
attr |
DeepseekV3Config.initializer_range |
1 | 0 | 0 |
attr |
DeepseekV3Config.rms_norm_eps |
1 | 0 | 0 |
attr |
DeepseekV3Config.pretraining_tp |
1 | 0 | 0 |
attr |
DeepseekV3Config.use_cache |
1 | 0 | 0 |
attr |
DeepseekV3Config.attention_bias |
1 | 0 | 0 |
attr |
DeepseekV3Config.attention_dropout |
1 | 0 | 0 |
attr |
DeepseekV3Config.rope_parameters |
1 | 0 | 0 |
attr |
DeepseekV3Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
DeepseekV3Config.pad_token_id |
1 | 0 | 0 |
attr |
DeepseekV3Config.bos_token_id |
1 | 0 | 0 |
attr |
DeepseekV3Config.eos_token_id |
1 | 0 | 0 |
transformers.models.deepseek_v3.modeling_deepseek_v3 (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekV3PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
DeepseekV3ForCausalLM.init |
2 | 0 | 0 |
attr |
DeepseekV3ForCausalLM.model |
1 | 0 | 0 |
attr |
DeepseekV3ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
DeepseekV3ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
DeepseekV3Model.init |
2 | 1 | 0 |
attr |
DeepseekV3Model.padding_idx |
1 | 0 | 0 |
attr |
DeepseekV3Model.vocab_size |
1 | 0 | 0 |
attr |
DeepseekV3Model.embed_tokens |
1 | 0 | 0 |
attr |
DeepseekV3Model.layers |
1 | 0 | 0 |
attr |
DeepseekV3Model.norm |
1 | 0 | 0 |
attr |
DeepseekV3Model.rotary_emb |
1 | 0 | 0 |
attr |
DeepseekV3Model.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.deepseek_v3.modular_deepseek_v3 (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekV3PreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.deepseek_vl.configuration_deepseek_vl (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekVLConfig.init |
6 | 4 | 0 |
attr |
DeepseekVLConfig.text_config |
1 | 0 | 0 |
attr |
DeepseekVLConfig.vision_config |
1 | 0 | 0 |
attr |
DeepseekVLConfig.image_token_id |
1 | 0 | 0 |
attr |
DeepseekVLConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.deepseek_vl.image_processing_deepseek_vl (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekVLImageProcessor.init |
13 | 12 | 0 |
meth |
DeepseekVLImageProcessor.resize |
7 | 6 | 0 |
attr |
DeepseekVLImageProcessor.do_resize |
1 | 0 | 0 |
attr |
DeepseekVLImageProcessor.size |
1 | 0 | 0 |
attr |
DeepseekVLImageProcessor.resample |
1 | 0 | 0 |
attr |
DeepseekVLImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
DeepseekVLImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
DeepseekVLImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
DeepseekVLImageProcessor.image_mean |
1 | 0 | 0 |
attr |
DeepseekVLImageProcessor.image_std |
1 | 0 | 0 |
attr |
DeepseekVLImageProcessor.do_convert_rgb |
1 | 0 | 0 |
attr |
DeepseekVLImageProcessor.do_pad |
1 | 0 | 0 |
attr |
DeepseekVLImageProcessor.min_size |
1 | 0 | 0 |
attr |
DeepseekVLImageProcessor.background_color |
1 | 0 | 0 |
transformers.models.deepseek_vl.image_processing_deepseek_vl_fast (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekVLImageProcessorFast.init |
2 | 1 | 0 |
meth |
DeepseekVLImageProcessorFast.resize |
7 | 6 | 0 |
meth |
DeepseekVLImageProcessorFast._preprocess |
15 | 14 | 0 |
attr |
DeepseekVLImageProcessorFast.background_color |
1 | 0 | 0 |
transformers.models.deepseek_vl.modeling_deepseek_vl (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekVLForConditionalGeneration.init |
2 | 1 | 0 |
meth |
DeepseekVLForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
DeepseekVLForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
DeepseekVLForConditionalGeneration.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
DeepseekVLForConditionalGeneration.model |
1 | 0 | 0 |
attr |
DeepseekVLForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
DeepseekVLModel.init |
2 | 0 | 0 |
meth |
DeepseekVLModel.get_input_embeddings |
1 | 0 | 0 |
meth |
DeepseekVLModel.set_input_embeddings |
2 | 0 | 0 |
meth |
DeepseekVLModel.get_placeholder_mask |
4 | 3 | 0 |
meth |
DeepseekVLModel.forward |
11 | 10 | 0 |
attr |
DeepseekVLModel.vision_model |
1 | 0 | 0 |
attr |
DeepseekVLModel.aligner |
1 | 0 | 0 |
attr |
DeepseekVLModel.language_model |
1 | 0 | 0 |
attr |
DeepseekVLModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.deepseek_vl.modular_deepseek_vl (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekVLImageProcessor.init |
2 | 0 | 0 |
meth |
DeepseekVLImageProcessor.postprocess |
1 | 0 | 0 |
meth |
DeepseekVLImageProcessor.unnormalize |
1 | 0 | 0 |
meth |
DeepseekVLImageProcessorFast.init |
2 | 0 | 0 |
meth |
DeepseekVLImageProcessorFast.postprocess |
1 | 0 | 0 |
meth |
DeepseekVLConfig.init |
6 | 4 | 0 |
attr |
DeepseekVLConfig.text_config |
1 | 0 | 0 |
attr |
DeepseekVLConfig.vision_config |
1 | 0 | 0 |
attr |
DeepseekVLConfig.image_token_id |
1 | 0 | 0 |
attr |
DeepseekVLConfig.tie_word_embeddings |
1 | 0 | 0 |
meth |
DeepseekVLPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
DeepseekVLForConditionalGeneration.prepare_embeddings_for_image_generation |
1 | 0 | 0 |
meth |
DeepseekVLForConditionalGeneration.decode_image_tokens |
1 | 0 | 0 |
meth |
DeepseekVLForConditionalGeneration.generate |
1 | 0 | 0 |
meth |
DeepseekVLProcessor.init |
5 | 0 | 0 |
meth |
DeepseekVLProcessor.batch_decode |
3 | 0 | 0 |
meth |
DeepseekVLProcessor.decode |
3 | 0 | 0 |
prop |
DeepseekVLProcessor.model_input_names |
1 | 0 | 0 |
attr |
DeepseekVLProcessor.image_token |
1 | 0 | 0 |
attr |
DeepseekVLProcessor.num_image_tokens |
1 | 0 | 0 |
meth |
DeepseekVLModel.init |
2 | 0 | 0 |
attr |
DeepseekVLModel.config |
1 | 0 | 0 |
attr |
DeepseekVLModel.vision_model |
1 | 0 | 0 |
attr |
DeepseekVLModel.aligner |
1 | 0 | 0 |
attr |
DeepseekVLModel.language_model |
1 | 0 | 0 |
attr |
DeepseekVLModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.deepseek_vl.processing_deepseek_vl (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekVLProcessor.init |
5 | 0 | 0 |
meth |
DeepseekVLProcessor.batch_decode |
3 | 0 | 0 |
meth |
DeepseekVLProcessor.decode |
3 | 0 | 0 |
prop |
DeepseekVLProcessor.model_input_names |
1 | 0 | 0 |
attr |
DeepseekVLProcessor.image_token |
1 | 0 | 0 |
attr |
DeepseekVLProcessor.num_image_tokens |
1 | 0 | 0 |
transformers.models.deepseek_vl_hybrid.configuration_deepseek_vl_hybrid (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekVLHybridConfig.init |
7 | 5 | 0 |
attr |
DeepseekVLHybridConfig.high_res_vision_config |
1 | 0 | 0 |
attr |
DeepseekVLHybridConfig.text_config |
1 | 0 | 0 |
attr |
DeepseekVLHybridConfig.vision_config |
1 | 0 | 0 |
attr |
DeepseekVLHybridConfig.image_token_id |
1 | 0 | 0 |
attr |
DeepseekVLHybridConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.deepseek_vl_hybrid.image_processing_deepseek_vl_hybrid (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekVLHybridImageProcessor.init |
17 | 16 | 0 |
meth |
DeepseekVLHybridImageProcessor.resize |
7 | 6 | 0 |
attr |
DeepseekVLHybridImageProcessor.high_res_size |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.high_res_image_mean |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.high_res_image_std |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.resample |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.high_res_resample |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.do_resize |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.size |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.image_mean |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.image_std |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.do_convert_rgb |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.do_pad |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.min_size |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.background_color |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.high_res_background_color |
1 | 0 | 0 |
transformers.models.deepseek_vl_hybrid.image_processing_deepseek_vl_hybrid_fast (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekVLHybridImageProcessorFast.init |
2 | 1 | 0 |
meth |
DeepseekVLHybridImageProcessorFast.resize |
7 | 6 | 0 |
meth |
DeepseekVLHybridImageProcessorFast._preprocess |
19 | 18 | 0 |
meth |
DeepseekVLHybridImageProcessorFast._further_process_kwargs |
10 | 9 | 0 |
attr |
DeepseekVLHybridImageProcessorFast.background_color |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessorFast.high_res_background_color |
1 | 0 | 0 |
transformers.models.deepseek_vl_hybrid.modeling_deepseek_vl_hybrid (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekVLHybridPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
DeepseekVLHybridModel.init |
2 | 0 | 0 |
meth |
DeepseekVLHybridModel.get_input_embeddings |
1 | 0 | 0 |
meth |
DeepseekVLHybridModel.set_input_embeddings |
2 | 0 | 0 |
meth |
DeepseekVLHybridModel.get_placeholder_mask |
4 | 3 | 0 |
meth |
DeepseekVLHybridModel.forward |
12 | 11 | 0 |
meth |
DeepseekVLHybridModel.get_low_res_image_features |
3 | 2 | 0 |
meth |
DeepseekVLHybridModel.get_high_res_image_features |
4 | 3 | 0 |
attr |
DeepseekVLHybridModel.output_size |
1 | 0 | 0 |
attr |
DeepseekVLHybridModel.global_attn_index |
1 | 0 | 0 |
attr |
DeepseekVLHybridModel.high_res_vision_model |
1 | 0 | 0 |
attr |
DeepseekVLHybridModel.high_res_vision_neck |
1 | 0 | 0 |
attr |
DeepseekVLHybridModel.high_res_vision_proj |
1 | 0 | 0 |
attr |
DeepseekVLHybridModel.high_res_vision_alpha |
1 | 0 | 0 |
attr |
DeepseekVLHybridModel.vision_model |
1 | 0 | 0 |
attr |
DeepseekVLHybridModel.aligner |
1 | 0 | 0 |
attr |
DeepseekVLHybridModel.language_model |
1 | 0 | 0 |
attr |
DeepseekVLHybridModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
DeepseekVLHybridForConditionalGeneration.init |
2 | 1 | 0 |
meth |
DeepseekVLHybridForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
DeepseekVLHybridForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
DeepseekVLHybridForConditionalGeneration.prepare_inputs_for_generation |
11 | 0 | 0 |
attr |
DeepseekVLHybridForConditionalGeneration.model |
1 | 0 | 0 |
attr |
DeepseekVLHybridForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.deepseek_vl_hybrid.modular_deepseek_vl_hybrid (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekVLHybridPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
DeepseekVLHybridModel.init |
2 | 0 | 0 |
meth |
DeepseekVLHybridModel.get_low_res_image_features |
3 | 2 | 0 |
meth |
DeepseekVLHybridModel.get_high_res_image_features |
4 | 3 | 0 |
meth |
DeepseekVLHybridModel.forward |
12 | 11 | 0 |
attr |
DeepseekVLHybridModel.output_size |
1 | 0 | 0 |
attr |
DeepseekVLHybridModel.global_attn_index |
1 | 0 | 0 |
attr |
DeepseekVLHybridModel.high_res_vision_model |
1 | 0 | 0 |
attr |
DeepseekVLHybridModel.high_res_vision_neck |
1 | 0 | 0 |
attr |
DeepseekVLHybridModel.high_res_vision_proj |
1 | 0 | 0 |
attr |
DeepseekVLHybridModel.high_res_vision_alpha |
1 | 0 | 0 |
meth |
DeepseekVLHybridImageProcessor.init |
17 | 16 | 0 |
meth |
DeepseekVLHybridImageProcessor.preprocess |
20 | 19 | 0 |
attr |
DeepseekVLHybridImageProcessor.high_res_size |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.high_res_image_mean |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.high_res_image_std |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.resample |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.high_res_resample |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessor.high_res_background_color |
1 | 0 | 0 |
meth |
DeepseekVLHybridImageProcessorFast.init |
2 | 1 | 0 |
meth |
DeepseekVLHybridImageProcessorFast._further_process_kwargs |
10 | 9 | 0 |
meth |
DeepseekVLHybridImageProcessorFast._preprocess |
19 | 18 | 0 |
attr |
DeepseekVLHybridImageProcessorFast.background_color |
1 | 0 | 0 |
attr |
DeepseekVLHybridImageProcessorFast.high_res_background_color |
1 | 0 | 0 |
meth |
DeepseekVLHybridConfig.init |
7 | 5 | 0 |
attr |
DeepseekVLHybridConfig.high_res_vision_config |
1 | 0 | 0 |
meth |
DeepseekVLHybridForConditionalGeneration.prepare_inputs_for_generation |
11 | 0 | 0 |
transformers.models.deepseek_vl_hybrid.processing_deepseek_vl_hybrid (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeepseekVLHybridProcessor.init |
5 | 0 | 0 |
meth |
DeepseekVLHybridProcessor.batch_decode |
3 | 0 | 0 |
meth |
DeepseekVLHybridProcessor.decode |
3 | 0 | 0 |
prop |
DeepseekVLHybridProcessor.model_input_names |
1 | 0 | 0 |
attr |
DeepseekVLHybridProcessor.image_token |
1 | 0 | 0 |
attr |
DeepseekVLHybridProcessor.num_image_tokens |
1 | 0 | 0 |
transformers.models.deformable_detr.configuration_deformable_detr (80 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeformableDetrConfig.init |
42 | 0 | 0 |
attr |
DeformableDetrConfig.backbone_config |
1 | 0 | 0 |
attr |
DeformableDetrConfig.num_channels |
1 | 0 | 0 |
attr |
DeformableDetrConfig.num_queries |
1 | 0 | 0 |
attr |
DeformableDetrConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
DeformableDetrConfig.d_model |
1 | 0 | 0 |
attr |
DeformableDetrConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
DeformableDetrConfig.encoder_layers |
1 | 0 | 0 |
attr |
DeformableDetrConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
DeformableDetrConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
DeformableDetrConfig.decoder_layers |
1 | 0 | 0 |
attr |
DeformableDetrConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
DeformableDetrConfig.dropout |
1 | 0 | 0 |
attr |
DeformableDetrConfig.attention_dropout |
1 | 0 | 0 |
attr |
DeformableDetrConfig.activation_dropout |
1 | 0 | 0 |
attr |
DeformableDetrConfig.activation_function |
1 | 0 | 0 |
attr |
DeformableDetrConfig.init_std |
1 | 0 | 0 |
attr |
DeformableDetrConfig.init_xavier_std |
1 | 0 | 0 |
attr |
DeformableDetrConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
DeformableDetrConfig.auxiliary_loss |
1 | 0 | 0 |
attr |
DeformableDetrConfig.position_embedding_type |
1 | 0 | 0 |
attr |
DeformableDetrConfig.dilation |
1 | 0 | 0 |
attr |
DeformableDetrConfig.num_feature_levels |
1 | 0 | 0 |
attr |
DeformableDetrConfig.encoder_n_points |
1 | 0 | 0 |
attr |
DeformableDetrConfig.decoder_n_points |
1 | 0 | 0 |
attr |
DeformableDetrConfig.two_stage |
1 | 0 | 0 |
attr |
DeformableDetrConfig.two_stage_num_proposals |
1 | 0 | 0 |
attr |
DeformableDetrConfig.with_box_refine |
1 | 0 | 0 |
attr |
DeformableDetrConfig.class_cost |
1 | 0 | 0 |
attr |
DeformableDetrConfig.bbox_cost |
1 | 0 | 0 |
attr |
DeformableDetrConfig.giou_cost |
1 | 0 | 0 |
attr |
DeformableDetrConfig.mask_loss_coefficient |
1 | 0 | 0 |
attr |
DeformableDetrConfig.dice_loss_coefficient |
1 | 0 | 0 |
attr |
DeformableDetrConfig.bbox_loss_coefficient |
1 | 0 | 0 |
attr |
DeformableDetrConfig.giou_loss_coefficient |
1 | 0 | 0 |
attr |
DeformableDetrConfig.eos_coefficient |
1 | 0 | 0 |
attr |
DeformableDetrConfig.focal_alpha |
1 | 0 | 0 |
attr |
DeformableDetrConfig.disable_custom_kernels |
1 | 0 | 0 |
attr |
DeformableDetrConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.deformable_detr.image_processing_deformable_detr (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeformableDetrImageProcessor.init |
14 | 13 | 0 |
meth |
DeformableDetrImageProcessor.resize |
7 | 6 | 0 |
meth |
DeformableDetrImageProcessor.resize_annotation |
5 | 2 | 0 |
meth |
DeformableDetrImageProcessor._update_annotation_for_padded_image |
6 | 4 | 0 |
meth |
DeformableDetrImageProcessor.preprocess |
21 | 19 | 0 |
meth |
DeformableDetrImageProcessor.post_process_object_detection |
5 | 3 | 0 |
attr |
DeformableDetrImageProcessor.format |
1 | 0 | 0 |
attr |
DeformableDetrImageProcessor.do_resize |
1 | 0 | 0 |
attr |
DeformableDetrImageProcessor.size |
1 | 0 | 0 |
attr |
DeformableDetrImageProcessor.resample |
1 | 0 | 0 |
attr |
DeformableDetrImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
DeformableDetrImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
DeformableDetrImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
DeformableDetrImageProcessor.do_convert_annotations |
1 | 0 | 0 |
attr |
DeformableDetrImageProcessor.image_mean |
1 | 0 | 0 |
attr |
DeformableDetrImageProcessor.image_std |
1 | 0 | 0 |
attr |
DeformableDetrImageProcessor.do_pad |
1 | 0 | 0 |
attr |
DeformableDetrImageProcessor.pad_size |
1 | 0 | 0 |
transformers.models.deformable_detr.image_processing_deformable_detr_fast (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeformableDetrImageProcessorFast.resize |
5 | 4 | 0 |
meth |
DeformableDetrImageProcessorFast.resize_annotation |
6 | 5 | 0 |
meth |
DeformableDetrImageProcessorFast._update_annotation_for_padded_image |
6 | 4 | 0 |
meth |
DeformableDetrImageProcessorFast.pad |
6 | 5 | 0 |
meth |
DeformableDetrImageProcessorFast._preprocess |
19 | 18 | 0 |
meth |
DeformableDetrImageProcessorFast.post_process_object_detection |
5 | 3 | 0 |
attr |
DeformableDetrImageProcessorFast.size |
1 | 0 | 0 |
attr |
DeformableDetrImageProcessorFast.do_convert_annotations |
1 | 0 | 0 |
transformers.models.deformable_detr.modeling_deformable_detr (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeformableDetrForObjectDetection.init |
2 | 1 | 0 |
attr |
DeformableDetrForObjectDetection.model |
1 | 0 | 0 |
attr |
DeformableDetrForObjectDetection.class_embed |
1 | 0 | 0 |
attr |
DeformableDetrForObjectDetection.bbox_embed |
1 | 0 | 0 |
meth |
DeformableDetrModel.init |
2 | 1 | 0 |
meth |
DeformableDetrModel.freeze_backbone |
1 | 0 | 0 |
meth |
DeformableDetrModel.unfreeze_backbone |
1 | 0 | 0 |
meth |
DeformableDetrModel.get_valid_ratio |
3 | 0 | 0 |
meth |
DeformableDetrModel.get_proposal_pos_embed |
2 | 0 | 0 |
meth |
DeformableDetrModel.gen_encoder_output_proposals |
4 | 0 | 0 |
attr |
DeformableDetrModel.backbone |
1 | 0 | 0 |
attr |
DeformableDetrModel.encoder |
1 | 0 | 0 |
attr |
DeformableDetrModel.decoder |
1 | 0 | 0 |
attr |
DeformableDetrModel.level_embed |
1 | 0 | 0 |
attr |
DeformableDetrModel.position_embedding |
1 | 0 | 0 |
attr |
DeformableDetrModel.input_proj |
1 | 0 | 0 |
attr |
DeformableDetrModel.query_position_embeddings |
1 | 0 | 0 |
attr |
DeformableDetrModel.enc_output |
1 | 0 | 0 |
attr |
DeformableDetrModel.enc_output_norm |
1 | 0 | 0 |
attr |
DeformableDetrModel.pos_trans |
1 | 0 | 0 |
attr |
DeformableDetrModel.pos_trans_norm |
1 | 0 | 0 |
attr |
DeformableDetrModel.reference_points |
1 | 0 | 0 |
meth |
DeformableDetrPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.deformable_detr.modular_deformable_detr (35 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeformableDetrImageProcessorFast.post_process_object_detection |
5 | 3 | 0 |
meth |
DeformableDetrImageProcessorFast.post_process_instance_segmentation |
1 | 0 | 0 |
meth |
DeformableDetrImageProcessorFast.post_process_semantic_segmentation |
1 | 0 | 0 |
meth |
DeformableDetrImageProcessorFast.post_process_panoptic_segmentation |
1 | 0 | 0 |
meth |
DeformableDetrForObjectDetection.init |
2 | 1 | 0 |
attr |
DeformableDetrForObjectDetection.model |
1 | 0 | 0 |
attr |
DeformableDetrForObjectDetection.class_embed |
1 | 0 | 0 |
attr |
DeformableDetrForObjectDetection.bbox_embed |
1 | 0 | 0 |
meth |
DeformableDetrModel.init |
2 | 1 | 0 |
meth |
DeformableDetrModel.freeze_backbone |
1 | 0 | 0 |
meth |
DeformableDetrModel.unfreeze_backbone |
1 | 0 | 0 |
meth |
DeformableDetrModel.get_valid_ratio |
3 | 0 | 0 |
meth |
DeformableDetrModel.get_proposal_pos_embed |
2 | 0 | 0 |
meth |
DeformableDetrModel.gen_encoder_output_proposals |
4 | 0 | 0 |
attr |
DeformableDetrModel.backbone |
1 | 0 | 0 |
attr |
DeformableDetrModel.encoder |
1 | 0 | 0 |
attr |
DeformableDetrModel.decoder |
1 | 0 | 0 |
attr |
DeformableDetrModel.level_embed |
1 | 0 | 0 |
attr |
DeformableDetrModel.position_embedding |
1 | 0 | 0 |
attr |
DeformableDetrModel.input_proj |
1 | 0 | 0 |
attr |
DeformableDetrModel.query_position_embeddings |
1 | 0 | 0 |
attr |
DeformableDetrModel.enc_output |
1 | 0 | 0 |
attr |
DeformableDetrModel.enc_output_norm |
1 | 0 | 0 |
attr |
DeformableDetrModel.pos_trans |
1 | 0 | 0 |
attr |
DeformableDetrModel.pos_trans_norm |
1 | 0 | 0 |
attr |
DeformableDetrModel.reference_points |
1 | 0 | 0 |
meth |
DeformableDetrPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.deit.configuration_deit (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeiTConfig.init |
18 | 0 | 0 |
attr |
DeiTConfig.hidden_size |
1 | 0 | 0 |
attr |
DeiTConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
DeiTConfig.num_attention_heads |
1 | 0 | 0 |
attr |
DeiTConfig.intermediate_size |
1 | 0 | 0 |
attr |
DeiTConfig.hidden_act |
1 | 0 | 0 |
attr |
DeiTConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
DeiTConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
DeiTConfig.initializer_range |
1 | 0 | 0 |
attr |
DeiTConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
DeiTConfig.image_size |
1 | 0 | 0 |
attr |
DeiTConfig.patch_size |
1 | 0 | 0 |
attr |
DeiTConfig.num_channels |
1 | 0 | 0 |
attr |
DeiTConfig.qkv_bias |
1 | 0 | 0 |
attr |
DeiTConfig.encoder_stride |
1 | 0 | 0 |
attr |
DeiTConfig.pooler_output_size |
1 | 0 | 0 |
attr |
DeiTConfig.pooler_act |
1 | 0 | 0 |
transformers.models.deit.image_processing_deit (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DeiTImageProcessor.init |
12 | 11 | 0 |
meth |
DeiTImageProcessor.resize |
7 | 6 | 0 |
meth |
DeiTImageProcessor.preprocess |
15 | 14 | 0 |
attr |
DeiTImageProcessor.do_resize |
1 | 0 | 0 |
attr |
DeiTImageProcessor.size |
1 | 0 | 0 |
attr |
DeiTImageProcessor.resample |
1 | 0 | 0 |
attr |
DeiTImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
DeiTImageProcessor.crop_size |
1 | 0 | 0 |
attr |
DeiTImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
DeiTImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
DeiTImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
DeiTImageProcessor.image_mean |
1 | 0 | 0 |
attr |
DeiTImageProcessor.image_std |
1 | 0 | 0 |
transformers.models.deit.modeling_deit (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
DeiTForMaskedImageModeling.deit |
1 | 0 | 0 |
attr |
DeiTForMaskedImageModeling.decoder |
1 | 0 | 0 |
attr |
DeiTForImageClassification.num_labels |
1 | 0 | 0 |
attr |
DeiTForImageClassification.deit |
1 | 0 | 0 |
attr |
DeiTForImageClassification.classifier |
1 | 0 | 0 |
attr |
DeiTForImageClassificationWithTeacher.num_labels |
1 | 0 | 0 |
attr |
DeiTForImageClassificationWithTeacher.deit |
1 | 0 | 0 |
attr |
DeiTForImageClassificationWithTeacher.cls_classifier |
1 | 0 | 0 |
attr |
DeiTForImageClassificationWithTeacher.distillation_classifier |
1 | 0 | 0 |
attr |
DeiTModel.embeddings |
1 | 0 | 0 |
attr |
DeiTModel.encoder |
1 | 0 | 0 |
attr |
DeiTModel.layernorm |
1 | 0 | 0 |
attr |
DeiTModel.pooler |
1 | 0 | 0 |
transformers.models.depth_anything.configuration_depth_anything (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DepthAnythingConfig.init |
13 | 0 | 0 |
attr |
DepthAnythingConfig.backbone_config |
1 | 0 | 0 |
attr |
DepthAnythingConfig.reassemble_hidden_size |
1 | 0 | 0 |
attr |
DepthAnythingConfig.patch_size |
1 | 0 | 0 |
attr |
DepthAnythingConfig.initializer_range |
1 | 0 | 0 |
attr |
DepthAnythingConfig.reassemble_factors |
1 | 0 | 0 |
attr |
DepthAnythingConfig.neck_hidden_sizes |
1 | 0 | 0 |
attr |
DepthAnythingConfig.fusion_hidden_size |
1 | 0 | 0 |
attr |
DepthAnythingConfig.head_in_index |
1 | 0 | 0 |
attr |
DepthAnythingConfig.head_hidden_size |
1 | 0 | 0 |
attr |
DepthAnythingConfig.depth_estimation_type |
1 | 0 | 0 |
attr |
DepthAnythingConfig.max_depth |
1 | 0 | 0 |
transformers.models.depth_anything.modeling_depth_anything (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DepthAnythingForDepthEstimation.init |
2 | 0 | 0 |
meth |
DepthAnythingForDepthEstimation.forward |
7 | 6 | 0 |
attr |
DepthAnythingForDepthEstimation.backbone |
1 | 0 | 0 |
attr |
DepthAnythingForDepthEstimation.neck |
1 | 0 | 0 |
attr |
DepthAnythingForDepthEstimation.head |
1 | 0 | 0 |
transformers.models.depth_pro.configuration_depth_pro (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DepthProConfig.init |
18 | 0 | 0 |
attr |
DepthProConfig.fusion_hidden_size |
1 | 0 | 0 |
attr |
DepthProConfig.patch_size |
1 | 0 | 0 |
attr |
DepthProConfig.initializer_range |
1 | 0 | 0 |
attr |
DepthProConfig.use_batch_norm_in_fusion_residual |
1 | 0 | 0 |
attr |
DepthProConfig.use_bias_in_fusion_residual |
1 | 0 | 0 |
attr |
DepthProConfig.use_fov_model |
1 | 0 | 0 |
attr |
DepthProConfig.num_fov_head_layers |
1 | 0 | 0 |
attr |
DepthProConfig.intermediate_hook_ids |
1 | 0 | 0 |
attr |
DepthProConfig.intermediate_feature_dims |
1 | 0 | 0 |
attr |
DepthProConfig.scaled_images_ratios |
1 | 0 | 0 |
attr |
DepthProConfig.scaled_images_overlap_ratios |
1 | 0 | 0 |
attr |
DepthProConfig.scaled_images_feature_dims |
1 | 0 | 0 |
attr |
DepthProConfig.merge_padding_value |
1 | 0 | 0 |
attr |
DepthProConfig.image_model_config |
1 | 0 | 0 |
attr |
DepthProConfig.patch_model_config |
1 | 0 | 0 |
attr |
DepthProConfig.fov_model_config |
1 | 0 | 0 |
transformers.models.depth_pro.image_processing_depth_pro (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DepthProImageProcessor.init |
10 | 8 | 0 |
meth |
DepthProImageProcessor.resize |
7 | 6 | 0 |
meth |
DepthProImageProcessor._validate_input_arguments |
10 | 9 | 0 |
meth |
DepthProImageProcessor.preprocess |
13 | 12 | 0 |
attr |
DepthProImageProcessor.do_resize |
1 | 0 | 0 |
attr |
DepthProImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
DepthProImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
DepthProImageProcessor.size |
1 | 0 | 0 |
attr |
DepthProImageProcessor.resample |
1 | 0 | 0 |
attr |
DepthProImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
DepthProImageProcessor.image_mean |
1 | 0 | 0 |
attr |
DepthProImageProcessor.image_std |
1 | 0 | 0 |
transformers.models.depth_pro.image_processing_depth_pro_fast (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DepthProImageProcessorFast._preprocess |
15 | 14 | 0 |
transformers.models.depth_pro.modeling_depth_pro (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DepthProModel.init |
2 | 0 | 0 |
meth |
DepthProModel.get_input_embeddings |
1 | 0 | 0 |
meth |
DepthProModel.forward |
6 | 5 | 0 |
attr |
DepthProModel.encoder |
1 | 0 | 0 |
attr |
DepthProModel.neck |
1 | 0 | 0 |
meth |
DepthProForDepthEstimation.init |
3 | 0 | 0 |
meth |
DepthProForDepthEstimation.forward |
7 | 6 | 0 |
attr |
DepthProForDepthEstimation.use_fov_model |
1 | 0 | 0 |
attr |
DepthProForDepthEstimation.depth_pro |
1 | 0 | 0 |
attr |
DepthProForDepthEstimation.fusion_stage |
1 | 0 | 0 |
attr |
DepthProForDepthEstimation.head |
1 | 0 | 0 |
attr |
DepthProForDepthEstimation.fov_model |
1 | 0 | 0 |
meth |
DepthProPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.detr.configuration_detr (61 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DetrConfig.init |
32 | 0 | 0 |
attr |
DetrConfig.backbone_config |
1 | 0 | 0 |
attr |
DetrConfig.num_channels |
1 | 0 | 0 |
attr |
DetrConfig.num_queries |
1 | 0 | 0 |
attr |
DetrConfig.d_model |
1 | 0 | 0 |
attr |
DetrConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
DetrConfig.encoder_layers |
1 | 0 | 0 |
attr |
DetrConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
DetrConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
DetrConfig.decoder_layers |
1 | 0 | 0 |
attr |
DetrConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
DetrConfig.dropout |
1 | 0 | 0 |
attr |
DetrConfig.attention_dropout |
1 | 0 | 0 |
attr |
DetrConfig.activation_dropout |
1 | 0 | 0 |
attr |
DetrConfig.activation_function |
1 | 0 | 0 |
attr |
DetrConfig.init_std |
1 | 0 | 0 |
attr |
DetrConfig.init_xavier_std |
1 | 0 | 0 |
attr |
DetrConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
DetrConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
DetrConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
DetrConfig.auxiliary_loss |
1 | 0 | 0 |
attr |
DetrConfig.position_embedding_type |
1 | 0 | 0 |
attr |
DetrConfig.class_cost |
1 | 0 | 0 |
attr |
DetrConfig.bbox_cost |
1 | 0 | 0 |
attr |
DetrConfig.giou_cost |
1 | 0 | 0 |
attr |
DetrConfig.mask_loss_coefficient |
1 | 0 | 0 |
attr |
DetrConfig.dice_loss_coefficient |
1 | 0 | 0 |
attr |
DetrConfig.bbox_loss_coefficient |
1 | 0 | 0 |
attr |
DetrConfig.giou_loss_coefficient |
1 | 0 | 0 |
attr |
DetrConfig.eos_coefficient |
1 | 0 | 0 |
transformers.models.detr.image_processing_detr (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DetrImageProcessor.init |
14 | 13 | 0 |
meth |
DetrImageProcessor.resize |
7 | 6 | 0 |
meth |
DetrImageProcessor.resize_annotation |
5 | 2 | 0 |
meth |
DetrImageProcessor._update_annotation_for_padded_image |
6 | 4 | 0 |
meth |
DetrImageProcessor.preprocess |
21 | 19 | 0 |
meth |
DetrImageProcessor.post_process_object_detection |
4 | 2 | 0 |
meth |
DetrImageProcessor.post_process_semantic_segmentation |
3 | 1 | 0 |
meth |
DetrImageProcessor.post_process_instance_segmentation |
7 | 6 | 0 |
meth |
DetrImageProcessor.post_process_panoptic_segmentation |
7 | 6 | 0 |
attr |
DetrImageProcessor.format |
1 | 0 | 0 |
attr |
DetrImageProcessor.do_resize |
1 | 0 | 0 |
attr |
DetrImageProcessor.size |
1 | 0 | 0 |
attr |
DetrImageProcessor.resample |
1 | 0 | 0 |
attr |
DetrImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
DetrImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
DetrImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
DetrImageProcessor.do_convert_annotations |
1 | 0 | 0 |
attr |
DetrImageProcessor.image_mean |
1 | 0 | 0 |
attr |
DetrImageProcessor.image_std |
1 | 0 | 0 |
attr |
DetrImageProcessor.do_pad |
1 | 0 | 0 |
attr |
DetrImageProcessor.pad_size |
1 | 0 | 0 |
transformers.models.detr.image_processing_detr_fast (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DetrImageProcessorFast.resize |
5 | 4 | 0 |
meth |
DetrImageProcessorFast.resize_annotation |
6 | 5 | 0 |
meth |
DetrImageProcessorFast._update_annotation_for_padded_image |
6 | 4 | 0 |
meth |
DetrImageProcessorFast.pad |
6 | 5 | 0 |
meth |
DetrImageProcessorFast._preprocess |
19 | 18 | 0 |
meth |
DetrImageProcessorFast.post_process_object_detection |
4 | 2 | 0 |
meth |
DetrImageProcessorFast.post_process_semantic_segmentation |
3 | 1 | 0 |
meth |
DetrImageProcessorFast.post_process_instance_segmentation |
7 | 6 | 0 |
meth |
DetrImageProcessorFast.post_process_panoptic_segmentation |
7 | 6 | 0 |
attr |
DetrImageProcessorFast.size |
1 | 0 | 0 |
attr |
DetrImageProcessorFast.do_convert_annotations |
1 | 0 | 0 |
transformers.models.detr.modeling_detr (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DetrPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
DetrForSegmentation.init |
2 | 1 | 0 |
attr |
DetrForSegmentation.detr |
1 | 0 | 0 |
attr |
DetrForSegmentation.mask_head |
1 | 0 | 0 |
attr |
DetrForSegmentation.bbox_attention |
1 | 0 | 0 |
meth |
DetrModel.init |
2 | 1 | 0 |
meth |
DetrModel.freeze_backbone |
1 | 0 | 0 |
meth |
DetrModel.unfreeze_backbone |
1 | 0 | 0 |
attr |
DetrModel.backbone |
1 | 0 | 0 |
attr |
DetrModel.query_position_embeddings |
1 | 0 | 0 |
attr |
DetrModel.input_projection |
1 | 0 | 0 |
attr |
DetrModel.encoder |
1 | 0 | 0 |
attr |
DetrModel.decoder |
1 | 0 | 0 |
attr |
DetrModel.position_embedding |
1 | 0 | 0 |
meth |
DetrForObjectDetection.init |
2 | 1 | 0 |
attr |
DetrForObjectDetection.model |
1 | 0 | 0 |
attr |
DetrForObjectDetection.class_labels_classifier |
1 | 0 | 0 |
attr |
DetrForObjectDetection.bbox_predictor |
1 | 0 | 0 |
transformers.models.dia.configuration_dia (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DiaEncoderConfig.init |
14 | 12 | 0 |
attr |
DiaEncoderConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
DiaEncoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
DiaEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
DiaEncoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
DiaEncoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
DiaEncoderConfig.head_dim |
1 | 0 | 0 |
attr |
DiaEncoderConfig.norm_eps |
1 | 0 | 0 |
attr |
DiaEncoderConfig.vocab_size |
1 | 0 | 0 |
attr |
DiaEncoderConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
DiaEncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
DiaEncoderConfig.initializer_range |
1 | 0 | 0 |
attr |
DiaEncoderConfig.rope_parameters |
1 | 0 | 0 |
meth |
DiaDecoderConfig.init |
24 | 22 | 0 |
attr |
DiaDecoderConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
DiaDecoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
DiaDecoderConfig.hidden_size |
1 | 0 | 0 |
attr |
DiaDecoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
DiaDecoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
DiaDecoderConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
DiaDecoderConfig.head_dim |
1 | 0 | 0 |
attr |
DiaDecoderConfig.cross_num_key_value_heads |
1 | 0 | 0 |
attr |
DiaDecoderConfig.cross_num_attention_heads |
1 | 0 | 0 |
attr |
DiaDecoderConfig.cross_head_dim |
1 | 0 | 0 |
attr |
DiaDecoderConfig.cross_hidden_size |
1 | 0 | 0 |
attr |
DiaDecoderConfig.norm_eps |
1 | 0 | 0 |
attr |
DiaDecoderConfig.vocab_size |
1 | 0 | 0 |
attr |
DiaDecoderConfig.hidden_act |
1 | 0 | 0 |
attr |
DiaDecoderConfig.num_channels |
1 | 0 | 0 |
attr |
DiaDecoderConfig.initializer_range |
1 | 0 | 0 |
attr |
DiaDecoderConfig.use_cache |
1 | 0 | 0 |
attr |
DiaDecoderConfig.rope_parameters |
1 | 0 | 0 |
attr |
DiaDecoderConfig.pad_token_id |
1 | 0 | 0 |
attr |
DiaDecoderConfig.eos_token_id |
1 | 0 | 0 |
attr |
DiaDecoderConfig.bos_token_id |
1 | 0 | 0 |
meth |
DiaConfig.init |
12 | 10 | 0 |
meth |
DiaConfig.get_text_config |
3 | 0 | 0 |
attr |
DiaConfig.encoder_config |
1 | 0 | 0 |
attr |
DiaConfig.decoder_config |
1 | 0 | 0 |
attr |
DiaConfig.norm_eps |
1 | 0 | 0 |
attr |
DiaConfig.delay_pattern |
1 | 0 | 0 |
attr |
DiaConfig.initializer_range |
1 | 0 | 0 |
attr |
DiaConfig.use_cache |
1 | 0 | 0 |
transformers.models.dia.feature_extraction_dia (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DiaFeatureExtractor.init |
6 | 4 | 0 |
attr |
DiaFeatureExtractor.hop_length |
1 | 0 | 0 |
transformers.models.dia.generation_dia (9 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DiaGenerationMixin._prepare_generation_config |
3 | 3 | 1 |
meth |
DiaGenerationMixin.prepare_inputs_for_generation |
5 | 0 | 0 |
meth |
DiaGenerationMixin._main_generate_loop |
13 | 11 | 0 |
meth |
DiaGenerationMixin.generate |
13 | 12 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.models.dia.modeling_dia (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DiaModel.init |
2 | 1 | 0 |
meth |
DiaModel.forward |
13 | 12 | 0 |
attr |
DiaModel.encoder |
1 | 0 | 0 |
attr |
DiaModel.decoder |
1 | 0 | 0 |
meth |
DiaPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
DiaForConditionalGeneration.init |
2 | 1 | 0 |
meth |
DiaForConditionalGeneration.forward |
14 | 13 | 0 |
attr |
DiaForConditionalGeneration.model |
1 | 0 | 0 |
attr |
DiaForConditionalGeneration.num_channels |
1 | 0 | 0 |
attr |
DiaForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
DiaForConditionalGeneration.logits_dense |
1 | 0 | 0 |
attr |
DiaForConditionalGeneration.loss_type |
1 | 0 | 0 |
transformers.models.dia.modular_dia (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DiaModel.init |
2 | 1 | 0 |
meth |
DiaModel.forward |
13 | 12 | 0 |
attr |
DiaModel.encoder |
1 | 0 | 0 |
attr |
DiaModel.decoder |
1 | 0 | 0 |
meth |
DiaPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
DiaForConditionalGeneration.init |
2 | 1 | 0 |
meth |
DiaForConditionalGeneration.forward |
14 | 13 | 0 |
attr |
DiaForConditionalGeneration.model |
1 | 0 | 0 |
attr |
DiaForConditionalGeneration.num_channels |
1 | 0 | 0 |
attr |
DiaForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
DiaForConditionalGeneration.logits_dense |
1 | 0 | 0 |
attr |
DiaForConditionalGeneration.loss_type |
1 | 0 | 0 |
transformers.models.dia.processing_dia (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DiaProcessor.init |
4 | 0 | 0 |
meth |
DiaProcessor.call |
5 | 4 | 0 |
meth |
DiaProcessor.save_audio |
4 | 3 | 0 |
transformers.models.dia.tokenization_dia (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DiaTokenizer.init |
6 | 4 | 0 |
meth |
DiaTokenizer.get_vocab |
1 | 0 | 0 |
meth |
DiaTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
DiaTokenizer._convert_id_to_token |
2 | 0 | 0 |
prop |
DiaTokenizer.vocab_size |
1 | 0 | 0 |
attr |
DiaTokenizer.offset |
1 | 0 | 0 |
transformers.models.diffllama.configuration_diffllama (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DiffLlamaConfig.init |
22 | 20 | 0 |
attr |
DiffLlamaConfig.vocab_size |
1 | 0 | 0 |
attr |
DiffLlamaConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
DiffLlamaConfig.hidden_size |
1 | 0 | 0 |
attr |
DiffLlamaConfig.intermediate_size |
1 | 0 | 0 |
attr |
DiffLlamaConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
DiffLlamaConfig.num_attention_heads |
1 | 0 | 0 |
attr |
DiffLlamaConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
DiffLlamaConfig.hidden_act |
1 | 0 | 0 |
attr |
DiffLlamaConfig.initializer_range |
1 | 0 | 0 |
attr |
DiffLlamaConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
DiffLlamaConfig.use_cache |
1 | 0 | 0 |
attr |
DiffLlamaConfig.attention_bias |
1 | 0 | 0 |
attr |
DiffLlamaConfig.attention_dropout |
1 | 0 | 0 |
attr |
DiffLlamaConfig.lambda_std_dev |
1 | 0 | 0 |
attr |
DiffLlamaConfig.head_dim |
1 | 0 | 0 |
attr |
DiffLlamaConfig.rope_parameters |
1 | 0 | 0 |
attr |
DiffLlamaConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
DiffLlamaConfig.pad_token_id |
1 | 0 | 0 |
attr |
DiffLlamaConfig.bos_token_id |
1 | 0 | 0 |
attr |
DiffLlamaConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.diffllama.modeling_diffllama (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DiffLlamaForCausalLM.init |
2 | 0 | 0 |
attr |
DiffLlamaForCausalLM.model |
1 | 0 | 0 |
attr |
DiffLlamaForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
DiffLlamaForCausalLM.lm_head |
1 | 0 | 0 |
meth |
DiffLlamaPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
DiffLlamaModel.init |
2 | 1 | 0 |
attr |
DiffLlamaModel.padding_idx |
1 | 0 | 0 |
attr |
DiffLlamaModel.vocab_size |
1 | 0 | 0 |
attr |
DiffLlamaModel.embed_tokens |
1 | 0 | 0 |
attr |
DiffLlamaModel.layers |
1 | 0 | 0 |
attr |
DiffLlamaModel.norm |
1 | 0 | 0 |
attr |
DiffLlamaModel.rotary_emb |
1 | 0 | 0 |
attr |
DiffLlamaModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.diffllama.modular_diffllama (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DiffLlamaPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.dinat.configuration_dinat (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DinatConfig.init |
20 | 0 | 0 |
attr |
DinatConfig.patch_size |
1 | 0 | 0 |
attr |
DinatConfig.num_channels |
1 | 0 | 0 |
attr |
DinatConfig.embed_dim |
1 | 0 | 0 |
attr |
DinatConfig.depths |
1 | 0 | 0 |
attr |
DinatConfig.num_layers |
1 | 0 | 0 |
attr |
DinatConfig.num_heads |
1 | 0 | 0 |
attr |
DinatConfig.kernel_size |
1 | 0 | 0 |
attr |
DinatConfig.dilations |
1 | 0 | 0 |
attr |
DinatConfig.mlp_ratio |
1 | 0 | 0 |
attr |
DinatConfig.qkv_bias |
1 | 0 | 0 |
attr |
DinatConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
DinatConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
DinatConfig.drop_path_rate |
1 | 0 | 0 |
attr |
DinatConfig.hidden_act |
1 | 0 | 0 |
attr |
DinatConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
DinatConfig.initializer_range |
1 | 0 | 0 |
attr |
DinatConfig.hidden_size |
1 | 0 | 0 |
attr |
DinatConfig.layer_scale_init_value |
1 | 0 | 0 |
attr |
DinatConfig.stage_names |
1 | 0 | 0 |
transformers.models.dinat.modeling_dinat (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DinatBackbone.init |
2 | 0 | 0 |
meth |
DinatBackbone.get_input_embeddings |
1 | 0 | 0 |
meth |
DinatBackbone.forward |
6 | 5 | 0 |
attr |
DinatBackbone.embeddings |
1 | 0 | 0 |
attr |
DinatBackbone.encoder |
1 | 0 | 0 |
attr |
DinatBackbone.num_features |
1 | 0 | 0 |
attr |
DinatBackbone.hidden_states_norms |
1 | 0 | 0 |
meth |
DinatModel.init |
3 | 0 | 0 |
meth |
DinatModel.get_input_embeddings |
1 | 0 | 0 |
meth |
DinatModel.forward |
6 | 5 | 0 |
attr |
DinatModel.num_levels |
1 | 0 | 0 |
attr |
DinatModel.num_features |
1 | 0 | 0 |
attr |
DinatModel.embeddings |
1 | 0 | 0 |
attr |
DinatModel.encoder |
1 | 0 | 0 |
attr |
DinatModel.layernorm |
1 | 0 | 0 |
attr |
DinatModel.pooler |
1 | 0 | 0 |
meth |
DinatForImageClassification.init |
2 | 0 | 0 |
meth |
DinatForImageClassification.forward |
7 | 6 | 0 |
attr |
DinatForImageClassification.num_labels |
1 | 0 | 0 |
attr |
DinatForImageClassification.dinat |
1 | 0 | 0 |
attr |
DinatForImageClassification.classifier |
1 | 0 | 0 |
transformers.models.dinov2.configuration_dinov2 (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Dinov2Config.init |
23 | 0 | 0 |
attr |
Dinov2Config.hidden_size |
1 | 0 | 0 |
attr |
Dinov2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Dinov2Config.num_attention_heads |
1 | 0 | 0 |
attr |
Dinov2Config.mlp_ratio |
1 | 0 | 0 |
attr |
Dinov2Config.hidden_act |
1 | 0 | 0 |
attr |
Dinov2Config.hidden_dropout_prob |
1 | 0 | 0 |
attr |
Dinov2Config.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
Dinov2Config.initializer_range |
1 | 0 | 0 |
attr |
Dinov2Config.layer_norm_eps |
1 | 0 | 0 |
attr |
Dinov2Config.image_size |
1 | 0 | 0 |
attr |
Dinov2Config.patch_size |
1 | 0 | 0 |
attr |
Dinov2Config.num_channels |
1 | 0 | 0 |
attr |
Dinov2Config.qkv_bias |
1 | 0 | 0 |
attr |
Dinov2Config.layerscale_value |
1 | 0 | 0 |
attr |
Dinov2Config.drop_path_rate |
1 | 0 | 0 |
attr |
Dinov2Config.use_swiglu_ffn |
1 | 0 | 0 |
attr |
Dinov2Config.stage_names |
1 | 0 | 0 |
attr |
Dinov2Config.apply_layernorm |
1 | 0 | 0 |
attr |
Dinov2Config.reshape_hidden_states |
1 | 0 | 0 |
attr |
Dinov2Config.use_mask_token |
1 | 0 | 0 |
transformers.models.dinov2.modeling_dinov2 (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Dinov2Backbone.init |
2 | 0 | 0 |
meth |
Dinov2Backbone.forward |
4 | 3 | 0 |
attr |
Dinov2Backbone.num_features |
1 | 0 | 0 |
attr |
Dinov2Backbone.embeddings |
1 | 0 | 0 |
attr |
Dinov2Backbone.encoder |
1 | 0 | 0 |
attr |
Dinov2Backbone.layernorm |
1 | 0 | 0 |
attr |
Dinov2ForImageClassification.num_labels |
1 | 0 | 0 |
attr |
Dinov2ForImageClassification.dinov2 |
1 | 0 | 0 |
attr |
Dinov2ForImageClassification.classifier |
1 | 0 | 0 |
meth |
Dinov2Model.init |
2 | 1 | 0 |
meth |
Dinov2Model.forward |
5 | 4 | 0 |
attr |
Dinov2Model.embeddings |
1 | 0 | 0 |
attr |
Dinov2Model.encoder |
1 | 0 | 0 |
attr |
Dinov2Model.layernorm |
1 | 0 | 0 |
transformers.models.dinov2_with_registers.configuration_dinov2_with_registers (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Dinov2WithRegistersConfig.init |
23 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.hidden_size |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.mlp_ratio |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.hidden_act |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.initializer_range |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.image_size |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.patch_size |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.num_channels |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.qkv_bias |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.layerscale_value |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.drop_path_rate |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.use_swiglu_ffn |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.num_register_tokens |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.stage_names |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.apply_layernorm |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.reshape_hidden_states |
1 | 0 | 0 |
transformers.models.dinov2_with_registers.modeling_dinov2_with_registers (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Dinov2WithRegistersModel.init |
2 | 1 | 0 |
meth |
Dinov2WithRegistersModel.forward |
5 | 4 | 0 |
attr |
Dinov2WithRegistersModel.embeddings |
1 | 0 | 0 |
attr |
Dinov2WithRegistersModel.encoder |
1 | 0 | 0 |
attr |
Dinov2WithRegistersModel.layernorm |
1 | 0 | 0 |
attr |
Dinov2WithRegistersForImageClassification.num_labels |
1 | 0 | 0 |
attr |
Dinov2WithRegistersForImageClassification.dinov2_with_registers |
1 | 0 | 0 |
attr |
Dinov2WithRegistersForImageClassification.classifier |
1 | 0 | 0 |
meth |
Dinov2WithRegistersBackbone.init |
2 | 0 | 0 |
meth |
Dinov2WithRegistersBackbone.forward |
4 | 3 | 0 |
attr |
Dinov2WithRegistersBackbone.num_features |
1 | 0 | 0 |
attr |
Dinov2WithRegistersBackbone.embeddings |
1 | 0 | 0 |
attr |
Dinov2WithRegistersBackbone.encoder |
1 | 0 | 0 |
attr |
Dinov2WithRegistersBackbone.layernorm |
1 | 0 | 0 |
attr |
Dinov2WithRegistersBackbone.num_register_tokens |
1 | 0 | 0 |
transformers.models.dinov2_with_registers.modular_dinov2_with_registers (51 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Dinov2WithRegistersBackbone.init |
2 | 0 | 0 |
meth |
Dinov2WithRegistersBackbone.forward |
4 | 3 | 0 |
attr |
Dinov2WithRegistersBackbone.num_register_tokens |
1 | 0 | 0 |
attr |
Dinov2WithRegistersBackbone.num_features |
1 | 0 | 0 |
attr |
Dinov2WithRegistersBackbone.embeddings |
1 | 0 | 0 |
attr |
Dinov2WithRegistersBackbone.encoder |
1 | 0 | 0 |
attr |
Dinov2WithRegistersBackbone.layernorm |
1 | 0 | 0 |
meth |
Dinov2WithRegistersConfig.init |
23 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.hidden_size |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.mlp_ratio |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.hidden_act |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.initializer_range |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.image_size |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.patch_size |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.num_channels |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.qkv_bias |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.layerscale_value |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.drop_path_rate |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.use_swiglu_ffn |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.num_register_tokens |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.stage_names |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.apply_layernorm |
1 | 0 | 0 |
attr |
Dinov2WithRegistersConfig.reshape_hidden_states |
1 | 0 | 0 |
transformers.models.dinov3_convnext.configuration_dinov3_convnext (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DINOv3ConvNextConfig.init |
13 | 11 | 0 |
attr |
DINOv3ConvNextConfig.num_channels |
1 | 0 | 0 |
attr |
DINOv3ConvNextConfig.hidden_sizes |
1 | 0 | 0 |
attr |
DINOv3ConvNextConfig.depths |
1 | 0 | 0 |
attr |
DINOv3ConvNextConfig.hidden_act |
1 | 0 | 0 |
attr |
DINOv3ConvNextConfig.initializer_range |
1 | 0 | 0 |
attr |
DINOv3ConvNextConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
DINOv3ConvNextConfig.layer_scale_init_value |
1 | 0 | 0 |
attr |
DINOv3ConvNextConfig.drop_path_rate |
1 | 0 | 0 |
attr |
DINOv3ConvNextConfig.image_size |
1 | 0 | 0 |
attr |
DINOv3ConvNextConfig.stage_names |
1 | 0 | 0 |
transformers.models.dinov3_convnext.modeling_dinov3_convnext (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DINOv3ConvNextPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
DINOv3ConvNextModel.init |
2 | 1 | 0 |
meth |
DINOv3ConvNextModel.forward |
4 | 3 | 0 |
attr |
DINOv3ConvNextModel.stages |
1 | 0 | 0 |
attr |
DINOv3ConvNextModel.layer_norm |
1 | 0 | 0 |
attr |
DINOv3ConvNextModel.pool |
1 | 0 | 0 |
meth |
DINOv3ConvNextBackbone.init |
2 | 1 | 0 |
meth |
DINOv3ConvNextBackbone.get_input_embeddings |
1 | 0 | 0 |
meth |
DINOv3ConvNextBackbone.forward |
4 | 3 | 0 |
attr |
DINOv3ConvNextBackbone.num_features |
1 | 0 | 0 |
attr |
DINOv3ConvNextBackbone.stages |
1 | 0 | 0 |
transformers.models.dinov3_vit.configuration_dinov3_vit (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DINOv3ViTConfig.init |
30 | 28 | 0 |
attr |
DINOv3ViTConfig.image_size |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.patch_size |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.num_channels |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.hidden_size |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.intermediate_size |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.num_attention_heads |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.hidden_act |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.attention_dropout |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.initializer_range |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.layerscale_value |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.drop_path_rate |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.use_gated_mlp |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.rope_theta |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.query_bias |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.key_bias |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.value_bias |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.proj_bias |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.mlp_bias |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.num_register_tokens |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.pos_embed_shift |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.pos_embed_jitter |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.pos_embed_rescale |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.apply_layernorm |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.reshape_hidden_states |
1 | 0 | 0 |
attr |
DINOv3ViTConfig.stage_names |
1 | 0 | 0 |
transformers.models.dinov3_vit.image_processing_dinov3_vit_fast (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DINOv3ViTImageProcessorFast._preprocess |
15 | 14 | 0 |
transformers.models.dinov3_vit.modeling_dinov3_vit (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DINOv3ViTModel.init |
2 | 1 | 0 |
meth |
DINOv3ViTModel.get_input_embeddings |
1 | 0 | 0 |
attr |
DINOv3ViTModel.embeddings |
1 | 0 | 0 |
attr |
DINOv3ViTModel.rope_embeddings |
1 | 0 | 0 |
attr |
DINOv3ViTModel.layer |
1 | 0 | 0 |
attr |
DINOv3ViTModel.norm |
1 | 0 | 0 |
attr |
DINOv3ViTModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
DINOv3ViTPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
DINOv3ViTBackbone.init |
2 | 0 | 0 |
meth |
DINOv3ViTBackbone.get_input_embeddings |
1 | 0 | 0 |
attr |
DINOv3ViTBackbone.embeddings |
1 | 0 | 0 |
attr |
DINOv3ViTBackbone.rope_embeddings |
1 | 0 | 0 |
attr |
DINOv3ViTBackbone.layer |
1 | 0 | 0 |
attr |
DINOv3ViTBackbone.norm |
1 | 0 | 0 |
attr |
DINOv3ViTBackbone.gradient_checkpointing |
1 | 0 | 0 |
attr |
DINOv3ViTBackbone.num_features |
1 | 0 | 0 |
transformers.models.dinov3_vit.modular_dinov3_vit (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DINOv3ViTModel.init |
2 | 1 | 0 |
meth |
DINOv3ViTModel.get_input_embeddings |
1 | 0 | 0 |
attr |
DINOv3ViTModel.config |
1 | 0 | 0 |
attr |
DINOv3ViTModel.embeddings |
1 | 0 | 0 |
attr |
DINOv3ViTModel.rope_embeddings |
1 | 0 | 0 |
attr |
DINOv3ViTModel.layer |
1 | 0 | 0 |
attr |
DINOv3ViTModel.norm |
1 | 0 | 0 |
attr |
DINOv3ViTModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
DINOv3ViTPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
DINOv3ViTBackbone.init |
2 | 0 | 0 |
meth |
DINOv3ViTBackbone.get_input_embeddings |
1 | 0 | 0 |
attr |
DINOv3ViTBackbone.embeddings |
1 | 0 | 0 |
attr |
DINOv3ViTBackbone.rope_embeddings |
1 | 0 | 0 |
attr |
DINOv3ViTBackbone.layer |
1 | 0 | 0 |
attr |
DINOv3ViTBackbone.norm |
1 | 0 | 0 |
attr |
DINOv3ViTBackbone.gradient_checkpointing |
1 | 0 | 0 |
attr |
DINOv3ViTBackbone.num_features |
1 | 0 | 0 |
transformers.models.distilbert.configuration_distilbert (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DistilBertConfig.init |
19 | 0 | 0 |
attr |
DistilBertConfig.vocab_size |
1 | 0 | 0 |
attr |
DistilBertConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
DistilBertConfig.sinusoidal_pos_embds |
1 | 0 | 0 |
attr |
DistilBertConfig.n_layers |
1 | 0 | 0 |
attr |
DistilBertConfig.n_heads |
1 | 0 | 0 |
attr |
DistilBertConfig.dim |
1 | 0 | 0 |
attr |
DistilBertConfig.hidden_dim |
1 | 0 | 0 |
attr |
DistilBertConfig.dropout |
1 | 0 | 0 |
attr |
DistilBertConfig.attention_dropout |
1 | 0 | 0 |
attr |
DistilBertConfig.activation |
1 | 0 | 0 |
attr |
DistilBertConfig.initializer_range |
1 | 0 | 0 |
attr |
DistilBertConfig.qa_dropout |
1 | 0 | 0 |
attr |
DistilBertConfig.seq_classif_dropout |
1 | 0 | 0 |
attr |
DistilBertConfig.pad_token_id |
1 | 0 | 0 |
attr |
DistilBertConfig.eos_token_id |
1 | 0 | 0 |
attr |
DistilBertConfig.bos_token_id |
1 | 0 | 0 |
attr |
DistilBertConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.distilbert.modeling_distilbert (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DistilBertForSequenceClassification.init |
2 | 1 | 0 |
meth |
DistilBertForSequenceClassification.resize_position_embeddings |
2 | 1 | 0 |
attr |
DistilBertForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
DistilBertForSequenceClassification.distilbert |
1 | 0 | 0 |
attr |
DistilBertForSequenceClassification.pre_classifier |
1 | 0 | 0 |
attr |
DistilBertForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
DistilBertForSequenceClassification.dropout |
1 | 0 | 0 |
meth |
DistilBertForMultipleChoice.init |
2 | 1 | 0 |
meth |
DistilBertForMultipleChoice.resize_position_embeddings |
2 | 1 | 0 |
attr |
DistilBertForMultipleChoice.distilbert |
1 | 0 | 0 |
attr |
DistilBertForMultipleChoice.pre_classifier |
1 | 0 | 0 |
attr |
DistilBertForMultipleChoice.classifier |
1 | 0 | 0 |
attr |
DistilBertForMultipleChoice.dropout |
1 | 0 | 0 |
meth |
DistilBertForQuestionAnswering.init |
2 | 1 | 0 |
meth |
DistilBertForQuestionAnswering.resize_position_embeddings |
2 | 1 | 0 |
attr |
DistilBertForQuestionAnswering.distilbert |
1 | 0 | 0 |
attr |
DistilBertForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
attr |
DistilBertForQuestionAnswering.dropout |
1 | 0 | 0 |
meth |
DistilBertForTokenClassification.init |
2 | 1 | 0 |
meth |
DistilBertForTokenClassification.resize_position_embeddings |
2 | 1 | 0 |
attr |
DistilBertForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
DistilBertForTokenClassification.distilbert |
1 | 0 | 0 |
attr |
DistilBertForTokenClassification.dropout |
1 | 0 | 0 |
attr |
DistilBertForTokenClassification.classifier |
1 | 0 | 0 |
meth |
DistilBertPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
DistilBertForMaskedLM.init |
2 | 1 | 0 |
meth |
DistilBertForMaskedLM.resize_position_embeddings |
2 | 1 | 0 |
meth |
DistilBertForMaskedLM.set_output_embeddings |
2 | 1 | 0 |
attr |
DistilBertForMaskedLM.activation |
1 | 0 | 0 |
attr |
DistilBertForMaskedLM.distilbert |
1 | 0 | 0 |
attr |
DistilBertForMaskedLM.vocab_transform |
1 | 0 | 0 |
attr |
DistilBertForMaskedLM.vocab_layer_norm |
1 | 0 | 0 |
attr |
DistilBertForMaskedLM.vocab_projector |
1 | 0 | 0 |
attr |
DistilBertForMaskedLM.mlm_loss_fct |
1 | 0 | 0 |
meth |
DistilBertModel.init |
2 | 1 | 0 |
meth |
DistilBertModel.resize_position_embeddings |
2 | 1 | 0 |
meth |
DistilBertModel.set_input_embeddings |
2 | 1 | 0 |
attr |
DistilBertModel.embeddings |
1 | 0 | 0 |
attr |
DistilBertModel.transformer |
1 | 0 | 0 |
transformers.models.distilbert.tokenization_distilbert (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DistilBertTokenizer.init |
4 | 1 | 0 |
transformers.models.doge.configuration_doge (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DogeConfig.init |
30 | 28 | 0 |
attr |
DogeConfig.vocab_size |
1 | 0 | 0 |
attr |
DogeConfig.hidden_size |
1 | 0 | 0 |
attr |
DogeConfig.intermediate_size |
1 | 0 | 0 |
attr |
DogeConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
DogeConfig.hidden_dropout |
1 | 0 | 0 |
attr |
DogeConfig.hidden_act |
1 | 0 | 0 |
attr |
DogeConfig.initializer_range |
1 | 0 | 0 |
attr |
DogeConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
DogeConfig.use_cache |
1 | 0 | 0 |
attr |
DogeConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
DogeConfig.num_attention_heads |
1 | 0 | 0 |
attr |
DogeConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
DogeConfig.attention_bias |
1 | 0 | 0 |
attr |
DogeConfig.attention_dropout |
1 | 0 | 0 |
attr |
DogeConfig.mlp_bias |
1 | 0 | 0 |
attr |
DogeConfig.sliding_window |
1 | 0 | 0 |
attr |
DogeConfig.keep_window_size |
1 | 0 | 0 |
attr |
DogeConfig.is_moe |
1 | 0 | 0 |
attr |
DogeConfig.num_experts |
1 | 0 | 0 |
attr |
DogeConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
DogeConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
DogeConfig.output_router_logits |
1 | 0 | 0 |
attr |
DogeConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
DogeConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
DogeConfig.pad_token_id |
1 | 0 | 0 |
attr |
DogeConfig.bos_token_id |
1 | 0 | 0 |
attr |
DogeConfig.eos_token_id |
1 | 0 | 0 |
attr |
DogeConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.doge.modeling_doge (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DogeForCausalLM.init |
2 | 0 | 0 |
attr |
DogeForCausalLM.model |
1 | 0 | 0 |
attr |
DogeForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
DogeForCausalLM.lm_head |
1 | 0 | 0 |
attr |
DogeForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
DogeForCausalLM.num_experts |
1 | 0 | 0 |
attr |
DogeForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
meth |
DogePreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
DogePreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
DogeModel.init |
2 | 1 | 0 |
attr |
DogeModel.padding_idx |
1 | 0 | 0 |
attr |
DogeModel.vocab_size |
1 | 0 | 0 |
attr |
DogeModel.embed_tokens |
1 | 0 | 0 |
attr |
DogeModel.layers |
1 | 0 | 0 |
attr |
DogeModel.norm |
1 | 0 | 0 |
attr |
DogeModel.rotary_emb |
1 | 0 | 0 |
attr |
DogeModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.doge.modular_doge (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DogePreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
DogePreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
DogeForCausalLM.init |
2 | 0 | 0 |
attr |
DogeForCausalLM.model |
1 | 0 | 0 |
attr |
DogeForCausalLM.num_experts |
1 | 0 | 0 |
meth |
DogeConfig.init |
30 | 28 | 0 |
attr |
DogeConfig.vocab_size |
1 | 0 | 0 |
attr |
DogeConfig.hidden_size |
1 | 0 | 0 |
attr |
DogeConfig.intermediate_size |
1 | 0 | 0 |
attr |
DogeConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
DogeConfig.hidden_dropout |
1 | 0 | 0 |
attr |
DogeConfig.hidden_act |
1 | 0 | 0 |
attr |
DogeConfig.initializer_range |
1 | 0 | 0 |
attr |
DogeConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
DogeConfig.use_cache |
1 | 0 | 0 |
attr |
DogeConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
DogeConfig.num_attention_heads |
1 | 0 | 0 |
attr |
DogeConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
DogeConfig.attention_bias |
1 | 0 | 0 |
attr |
DogeConfig.attention_dropout |
1 | 0 | 0 |
attr |
DogeConfig.mlp_bias |
1 | 0 | 0 |
attr |
DogeConfig.sliding_window |
1 | 0 | 0 |
attr |
DogeConfig.keep_window_size |
1 | 0 | 0 |
attr |
DogeConfig.is_moe |
1 | 0 | 0 |
attr |
DogeConfig.num_experts |
1 | 0 | 0 |
attr |
DogeConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
DogeConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
DogeConfig.output_router_logits |
1 | 0 | 0 |
attr |
DogeConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
DogeConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
DogeConfig.pad_token_id |
1 | 0 | 0 |
attr |
DogeConfig.bos_token_id |
1 | 0 | 0 |
attr |
DogeConfig.eos_token_id |
1 | 0 | 0 |
attr |
DogeConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.donut.configuration_donut_swin (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DonutSwinConfig.init |
18 | 0 | 0 |
attr |
DonutSwinConfig.image_size |
1 | 0 | 0 |
attr |
DonutSwinConfig.patch_size |
1 | 0 | 0 |
attr |
DonutSwinConfig.num_channels |
1 | 0 | 0 |
attr |
DonutSwinConfig.embed_dim |
1 | 0 | 0 |
attr |
DonutSwinConfig.depths |
1 | 0 | 0 |
attr |
DonutSwinConfig.num_layers |
1 | 0 | 0 |
attr |
DonutSwinConfig.num_heads |
1 | 0 | 0 |
attr |
DonutSwinConfig.window_size |
1 | 0 | 0 |
attr |
DonutSwinConfig.mlp_ratio |
1 | 0 | 0 |
attr |
DonutSwinConfig.qkv_bias |
1 | 0 | 0 |
attr |
DonutSwinConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
DonutSwinConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
DonutSwinConfig.drop_path_rate |
1 | 0 | 0 |
attr |
DonutSwinConfig.hidden_act |
1 | 0 | 0 |
attr |
DonutSwinConfig.use_absolute_embeddings |
1 | 0 | 0 |
attr |
DonutSwinConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
DonutSwinConfig.initializer_range |
1 | 0 | 0 |
attr |
DonutSwinConfig.hidden_size |
1 | 0 | 0 |
transformers.models.donut.image_processing_donut (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DonutImageProcessor.init |
13 | 12 | 0 |
meth |
DonutImageProcessor.thumbnail |
7 | 6 | 0 |
meth |
DonutImageProcessor.resize |
7 | 6 | 0 |
attr |
DonutImageProcessor.do_resize |
1 | 0 | 0 |
attr |
DonutImageProcessor.size |
1 | 0 | 0 |
attr |
DonutImageProcessor.resample |
1 | 0 | 0 |
attr |
DonutImageProcessor.do_thumbnail |
1 | 0 | 0 |
attr |
DonutImageProcessor.do_align_long_axis |
1 | 0 | 0 |
attr |
DonutImageProcessor.do_pad |
1 | 0 | 0 |
attr |
DonutImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
DonutImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
DonutImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
DonutImageProcessor.image_mean |
1 | 0 | 0 |
attr |
DonutImageProcessor.image_std |
1 | 0 | 0 |
transformers.models.donut.image_processing_donut_fast (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DonutImageProcessorFast.init |
2 | 1 | 0 |
meth |
DonutImageProcessorFast._preprocess |
18 | 17 | 0 |
transformers.models.donut.modeling_donut_swin (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DonutSwinPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
DonutSwinForImageClassification.init |
2 | 0 | 0 |
meth |
DonutSwinForImageClassification.forward |
8 | 7 | 0 |
attr |
DonutSwinForImageClassification.num_labels |
1 | 0 | 0 |
attr |
DonutSwinForImageClassification.donut |
1 | 0 | 0 |
attr |
DonutSwinForImageClassification.classifier |
1 | 0 | 0 |
meth |
DonutSwinModel.init |
4 | 0 | 0 |
meth |
DonutSwinModel.get_input_embeddings |
1 | 0 | 0 |
meth |
DonutSwinModel.forward |
8 | 7 | 0 |
attr |
DonutSwinModel.num_layers |
1 | 0 | 0 |
attr |
DonutSwinModel.num_features |
1 | 0 | 0 |
attr |
DonutSwinModel.embeddings |
1 | 0 | 0 |
attr |
DonutSwinModel.encoder |
1 | 0 | 0 |
attr |
DonutSwinModel.pooler |
1 | 0 | 0 |
transformers.models.donut.processing_donut (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DonutProcessor.init |
4 | 0 | 0 |
meth |
DonutProcessor.call |
4 | 3 | 0 |
meth |
DonutProcessor.token2json |
4 | 0 | 0 |
prop |
DonutProcessor.model_input_names |
1 | 0 | 0 |
transformers.models.dots1.configuration_dots1 (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Dots1Config.init |
32 | 30 | 0 |
attr |
Dots1Config.vocab_size |
1 | 0 | 0 |
attr |
Dots1Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Dots1Config.hidden_size |
1 | 0 | 0 |
attr |
Dots1Config.intermediate_size |
1 | 0 | 0 |
attr |
Dots1Config.moe_intermediate_size |
1 | 0 | 0 |
attr |
Dots1Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Dots1Config.num_attention_heads |
1 | 0 | 0 |
attr |
Dots1Config.n_shared_experts |
1 | 0 | 0 |
attr |
Dots1Config.n_routed_experts |
1 | 0 | 0 |
attr |
Dots1Config.num_experts_per_tok |
1 | 0 | 0 |
attr |
Dots1Config.first_k_dense_replace |
1 | 0 | 0 |
attr |
Dots1Config.norm_topk_prob |
1 | 0 | 0 |
attr |
Dots1Config.n_group |
1 | 0 | 0 |
attr |
Dots1Config.topk_group |
1 | 0 | 0 |
attr |
Dots1Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Dots1Config.hidden_act |
1 | 0 | 0 |
attr |
Dots1Config.initializer_range |
1 | 0 | 0 |
attr |
Dots1Config.rms_norm_eps |
1 | 0 | 0 |
attr |
Dots1Config.use_cache |
1 | 0 | 0 |
attr |
Dots1Config.attention_bias |
1 | 0 | 0 |
attr |
Dots1Config.attention_dropout |
1 | 0 | 0 |
attr |
Dots1Config.routed_scaling_factor |
1 | 0 | 0 |
attr |
Dots1Config.sliding_window |
1 | 0 | 0 |
attr |
Dots1Config.max_window_layers |
1 | 0 | 0 |
attr |
Dots1Config.layer_types |
1 | 0 | 0 |
attr |
Dots1Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Dots1Config.pad_token_id |
1 | 0 | 0 |
attr |
Dots1Config.bos_token_id |
1 | 0 | 0 |
attr |
Dots1Config.eos_token_id |
1 | 0 | 0 |
attr |
Dots1Config.rope_parameters |
1 | 0 | 0 |
transformers.models.dots1.modeling_dots1 (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Dots1PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Dots1Model.init |
2 | 1 | 0 |
attr |
Dots1Model.padding_idx |
1 | 0 | 0 |
attr |
Dots1Model.vocab_size |
1 | 0 | 0 |
attr |
Dots1Model.embed_tokens |
1 | 0 | 0 |
attr |
Dots1Model.layers |
1 | 0 | 0 |
attr |
Dots1Model.norm |
1 | 0 | 0 |
attr |
Dots1Model.rotary_emb |
1 | 0 | 0 |
attr |
Dots1Model.gradient_checkpointing |
1 | 0 | 0 |
attr |
Dots1Model.has_sliding_layers |
1 | 0 | 0 |
meth |
Dots1ForCausalLM.init |
2 | 0 | 0 |
attr |
Dots1ForCausalLM.model |
1 | 0 | 0 |
attr |
Dots1ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Dots1ForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.dpr.configuration_dpr (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DPRConfig.init |
20 | 1 | 0 |
attr |
DPRConfig.pad_token_id |
1 | 0 | 0 |
attr |
DPRConfig.bos_token_id |
1 | 0 | 0 |
attr |
DPRConfig.eos_token_id |
1 | 0 | 0 |
attr |
DPRConfig.is_decoder |
1 | 0 | 0 |
attr |
DPRConfig.add_cross_attention |
1 | 0 | 0 |
attr |
DPRConfig.vocab_size |
1 | 0 | 0 |
attr |
DPRConfig.hidden_size |
1 | 0 | 0 |
attr |
DPRConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
DPRConfig.num_attention_heads |
1 | 0 | 0 |
attr |
DPRConfig.hidden_act |
1 | 0 | 0 |
attr |
DPRConfig.intermediate_size |
1 | 0 | 0 |
attr |
DPRConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
DPRConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
DPRConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
DPRConfig.type_vocab_size |
1 | 0 | 0 |
attr |
DPRConfig.initializer_range |
1 | 0 | 0 |
attr |
DPRConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
DPRConfig.projection_dim |
1 | 0 | 0 |
transformers.models.dpr.modeling_dpr (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DPRContextEncoder.init |
2 | 1 | 0 |
meth |
DPRContextEncoder.forward |
9 | 8 | 0 |
attr |
DPRContextEncoder.ctx_encoder |
1 | 0 | 0 |
meth |
DPRQuestionEncoder.init |
2 | 1 | 0 |
meth |
DPRQuestionEncoder.forward |
9 | 8 | 0 |
attr |
DPRQuestionEncoder.question_encoder |
1 | 0 | 0 |
meth |
DPRReader.init |
2 | 1 | 0 |
meth |
DPRReader.forward |
8 | 7 | 0 |
attr |
DPRReader.span_predictor |
1 | 0 | 0 |
transformers.models.dpr.tokenization_dpr (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DPRReaderTokenizer.init |
4 | 0 | 0 |
attr |
DPRReaderTokenizer.do_lower_case |
1 | 0 | 0 |
meth |
DPRQuestionEncoderTokenizer.init |
4 | 0 | 0 |
attr |
DPRQuestionEncoderTokenizer.do_lower_case |
1 | 0 | 0 |
meth |
DPRContextEncoderTokenizer.init |
4 | 0 | 0 |
attr |
DPRContextEncoderTokenizer.do_lower_case |
1 | 0 | 0 |
transformers.models.dpt.configuration_dpt (66 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DPTConfig.init |
34 | 0 | 0 |
attr |
DPTConfig.hidden_size |
1 | 0 | 0 |
attr |
DPTConfig.is_hybrid |
1 | 0 | 0 |
attr |
DPTConfig.backbone_config |
1 | 0 | 0 |
attr |
DPTConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
DPTConfig.num_attention_heads |
1 | 0 | 0 |
attr |
DPTConfig.intermediate_size |
1 | 0 | 0 |
attr |
DPTConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
DPTConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
DPTConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
DPTConfig.image_size |
1 | 0 | 0 |
attr |
DPTConfig.patch_size |
1 | 0 | 0 |
attr |
DPTConfig.num_channels |
1 | 0 | 0 |
attr |
DPTConfig.qkv_bias |
1 | 0 | 0 |
attr |
DPTConfig.backbone_out_indices |
1 | 0 | 0 |
attr |
DPTConfig.backbone_featmap_shape |
1 | 0 | 0 |
attr |
DPTConfig.neck_ignore_stages |
1 | 0 | 0 |
attr |
DPTConfig.hidden_act |
1 | 0 | 0 |
attr |
DPTConfig.initializer_range |
1 | 0 | 0 |
attr |
DPTConfig.readout_type |
1 | 0 | 0 |
attr |
DPTConfig.reassemble_factors |
1 | 0 | 0 |
attr |
DPTConfig.neck_hidden_sizes |
1 | 0 | 0 |
attr |
DPTConfig.fusion_hidden_size |
1 | 0 | 0 |
attr |
DPTConfig.head_in_index |
1 | 0 | 0 |
attr |
DPTConfig.use_batch_norm_in_fusion_residual |
1 | 0 | 0 |
attr |
DPTConfig.use_bias_in_fusion_residual |
1 | 0 | 0 |
attr |
DPTConfig.add_projection |
1 | 0 | 0 |
attr |
DPTConfig.use_auxiliary_head |
1 | 0 | 0 |
attr |
DPTConfig.auxiliary_loss_weight |
1 | 0 | 0 |
attr |
DPTConfig.semantic_loss_ignore_index |
1 | 0 | 0 |
attr |
DPTConfig.semantic_classifier_dropout |
1 | 0 | 0 |
attr |
DPTConfig.pooler_output_size |
1 | 0 | 0 |
attr |
DPTConfig.pooler_act |
1 | 0 | 0 |
transformers.models.dpt.image_processing_dpt (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DPTImageProcessor.init |
15 | 14 | 0 |
meth |
DPTImageProcessor.resize |
9 | 8 | 0 |
meth |
DPTImageProcessor.pad_image |
5 | 4 | 0 |
meth |
DPTImageProcessor._preprocess |
16 | 15 | 0 |
meth |
DPTImageProcessor._preprocess_segmentation_map |
9 | 8 | 0 |
meth |
DPTImageProcessor.call |
4 | 0 | 0 |
meth |
DPTImageProcessor.post_process_semantic_segmentation |
3 | 1 | 0 |
attr |
DPTImageProcessor.do_resize |
1 | 0 | 0 |
attr |
DPTImageProcessor.size |
1 | 0 | 0 |
attr |
DPTImageProcessor.keep_aspect_ratio |
1 | 0 | 0 |
attr |
DPTImageProcessor.ensure_multiple_of |
1 | 0 | 0 |
attr |
DPTImageProcessor.resample |
1 | 0 | 0 |
attr |
DPTImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
DPTImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
DPTImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
DPTImageProcessor.image_mean |
1 | 0 | 0 |
attr |
DPTImageProcessor.image_std |
1 | 0 | 0 |
attr |
DPTImageProcessor.do_pad |
1 | 0 | 0 |
attr |
DPTImageProcessor.size_divisor |
1 | 0 | 0 |
attr |
DPTImageProcessor.do_reduce_labels |
1 | 0 | 0 |
transformers.models.dpt.image_processing_dpt_fast (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DPTImageProcessorFast.init |
2 | 1 | 0 |
meth |
DPTImageProcessorFast.reduce_label |
2 | 1 | 0 |
meth |
DPTImageProcessorFast._preprocess |
20 | 19 | 0 |
meth |
DPTImageProcessorFast.post_process_semantic_segmentation |
3 | 1 | 0 |
transformers.models.dpt.modeling_dpt (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DPTPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
DPTModel.init |
3 | 2 | 0 |
meth |
DPTModel.get_input_embeddings |
1 | 0 | 0 |
meth |
DPTModel.forward |
4 | 3 | 0 |
attr |
DPTModel.encoder |
1 | 0 | 0 |
attr |
DPTModel.layernorm |
1 | 0 | 0 |
attr |
DPTModel.pooler |
1 | 0 | 0 |
attr |
DPTModel.embeddings |
1 | 0 | 0 |
meth |
DPTForDepthEstimation.init |
2 | 0 | 0 |
meth |
DPTForDepthEstimation.forward |
5 | 4 | 0 |
attr |
DPTForDepthEstimation.backbone |
1 | 0 | 0 |
attr |
DPTForDepthEstimation.neck |
1 | 0 | 0 |
attr |
DPTForDepthEstimation.head |
1 | 0 | 0 |
attr |
DPTForDepthEstimation.dpt |
1 | 0 | 0 |
meth |
DPTForSemanticSegmentation.init |
2 | 1 | 0 |
meth |
DPTForSemanticSegmentation.forward |
5 | 4 | 0 |
attr |
DPTForSemanticSegmentation.dpt |
1 | 0 | 0 |
attr |
DPTForSemanticSegmentation.neck |
1 | 0 | 0 |
attr |
DPTForSemanticSegmentation.head |
1 | 0 | 0 |
attr |
DPTForSemanticSegmentation.auxiliary_head |
1 | 0 | 0 |
transformers.models.dpt.modular_dpt (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DPTImageProcessorFast._preprocess |
20 | 19 | 0 |
transformers.models.edgetam.configuration_edgetam (80 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EdgeTamPromptEncoderConfig.init |
10 | 0 | 0 |
attr |
EdgeTamPromptEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
EdgeTamPromptEncoderConfig.image_size |
1 | 0 | 0 |
attr |
EdgeTamPromptEncoderConfig.patch_size |
1 | 0 | 0 |
attr |
EdgeTamPromptEncoderConfig.mask_input_channels |
1 | 0 | 0 |
attr |
EdgeTamPromptEncoderConfig.num_point_embeddings |
1 | 0 | 0 |
attr |
EdgeTamPromptEncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
EdgeTamPromptEncoderConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
EdgeTamPromptEncoderConfig.scale |
1 | 0 | 0 |
meth |
EdgeTamMaskDecoderConfig.init |
14 | 0 | 0 |
attr |
EdgeTamMaskDecoderConfig.hidden_size |
1 | 0 | 0 |
attr |
EdgeTamMaskDecoderConfig.num_multimask_outputs |
1 | 0 | 0 |
attr |
EdgeTamMaskDecoderConfig.hidden_act |
1 | 0 | 0 |
attr |
EdgeTamMaskDecoderConfig.iou_head_depth |
1 | 0 | 0 |
attr |
EdgeTamMaskDecoderConfig.iou_head_hidden_dim |
1 | 0 | 0 |
attr |
EdgeTamMaskDecoderConfig.dynamic_multimask_via_stability |
1 | 0 | 0 |
attr |
EdgeTamMaskDecoderConfig.dynamic_multimask_stability_delta |
1 | 0 | 0 |
attr |
EdgeTamMaskDecoderConfig.dynamic_multimask_stability_thresh |
1 | 0 | 0 |
attr |
EdgeTamMaskDecoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
EdgeTamMaskDecoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
EdgeTamMaskDecoderConfig.mlp_dim |
1 | 0 | 0 |
attr |
EdgeTamMaskDecoderConfig.attention_downsample_rate |
1 | 0 | 0 |
meth |
EdgeTamConfig.init |
6 | 0 | 0 |
attr |
EdgeTamConfig.vision_config |
1 | 0 | 0 |
attr |
EdgeTamConfig.prompt_encoder_config |
1 | 0 | 0 |
attr |
EdgeTamConfig.mask_decoder_config |
1 | 0 | 0 |
attr |
EdgeTamConfig.initializer_range |
1 | 0 | 0 |
meth |
EdgeTamVisionConfig.init |
14 | 0 | 0 |
attr |
EdgeTamVisionConfig.backbone_config |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.backbone_channel_list |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.backbone_feature_sizes |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.fpn_hidden_size |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.fpn_kernel_size |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.fpn_stride |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.fpn_padding |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.fpn_top_down_levels |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.num_feature_levels |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.initializer_range |
1 | 0 | 0 |
transformers.models.edgetam.modeling_edgetam (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EdgeTamVisionModel.init |
2 | 1 | 0 |
attr |
EdgeTamVisionModel.config |
1 | 0 | 0 |
attr |
EdgeTamVisionModel.backbone |
1 | 0 | 0 |
attr |
EdgeTamVisionModel.neck |
1 | 0 | 0 |
attr |
EdgeTamVisionModel.num_feature_levels |
1 | 0 | 0 |
meth |
EdgeTamModel.init |
2 | 1 | 0 |
meth |
EdgeTamModel.get_prompt_embeddings |
5 | 4 | 0 |
attr |
EdgeTamModel._can_record_outputs |
1 | 0 | 0 |
attr |
EdgeTamModel.shared_image_embedding |
1 | 0 | 0 |
attr |
EdgeTamModel.vision_encoder |
1 | 0 | 0 |
attr |
EdgeTamModel.prompt_encoder |
1 | 0 | 0 |
attr |
EdgeTamModel.mask_decoder |
1 | 0 | 0 |
attr |
EdgeTamModel.num_feature_levels |
1 | 0 | 0 |
attr |
EdgeTamModel.backbone_feature_sizes |
1 | 0 | 0 |
attr |
EdgeTamModel.hidden_dim |
1 | 0 | 0 |
attr |
EdgeTamModel.no_memory_embedding |
1 | 0 | 0 |
meth |
EdgeTamPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.edgetam.modular_edgetam (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EdgeTamVisionConfig.init |
14 | 0 | 0 |
attr |
EdgeTamVisionConfig.backbone_config |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.backbone_channel_list |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.backbone_feature_sizes |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.fpn_hidden_size |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.fpn_kernel_size |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.fpn_stride |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.fpn_padding |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.fpn_top_down_levels |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.num_feature_levels |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
EdgeTamVisionConfig.initializer_range |
1 | 0 | 0 |
meth |
EdgeTamVisionModel.get_input_embeddings |
1 | 0 | 0 |
meth |
EdgeTamModel.get_input_embeddings |
1 | 0 | 0 |
meth |
EdgeTamPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.edgetam_video.configuration_edgetam_video (148 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EdgeTamVideoConfig.init |
53 | 0 | 0 |
attr |
EdgeTamVideoConfig.vision_config |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.prompt_encoder_config |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.mask_decoder_config |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.initializer_range |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.num_maskmem |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.image_size |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.sigmoid_scale_for_mem_enc |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.sigmoid_bias_for_mem_enc |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.enable_occlusion_spatial_embedding |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.multimask_output_in_sam |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.multimask_min_pt_num |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.multimask_max_pt_num |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.multimask_output_for_tracking |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.max_object_pointers_in_encoder |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.max_cond_frame_num |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.enable_temporal_pos_encoding_for_object_pointers |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_hidden_size |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_num_layers |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_num_attention_heads |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_downsample_rate |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_mlp_hidden_size |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_mlp_hidden_act |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_dropout |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_rope_theta |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_rope_feat_sizes |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_rope_k_sizes |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_rope_dropout |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.perceiver_resampler_num_latents |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.perceiver_resampler_num_latents_2d |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.perceiver_resampler_hidden_size |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.perceiver_resampler_mlp_intermediate_size |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.perceiver_resampler_attention_head_dim |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.perceiver_resampler_num_attention_heads |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.perceiver_resampler_num_layers |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.perceiver_resampler_hidden_dropout |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.perceiver_resampler_attention_dropout |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_encoder_hidden_size |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_encoder_output_channels |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.mask_downsampler_embed_dim |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.mask_downsampler_kernel_size |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.mask_downsampler_stride |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.mask_downsampler_padding |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.mask_downsampler_total_stride |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.mask_downsampler_hidden_act |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_fuser_num_layers |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_fuser_embed_dim |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_fuser_intermediate_dim |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_fuser_kernel_size |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_fuser_padding |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_fuser_layer_scale_init_value |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_fuser_hidden_act |
1 | 0 | 0 |
meth |
EdgeTamVideoMaskDecoderConfig.init |
14 | 0 | 0 |
attr |
EdgeTamVideoMaskDecoderConfig.hidden_size |
1 | 0 | 0 |
attr |
EdgeTamVideoMaskDecoderConfig.num_multimask_outputs |
1 | 0 | 0 |
attr |
EdgeTamVideoMaskDecoderConfig.hidden_act |
1 | 0 | 0 |
attr |
EdgeTamVideoMaskDecoderConfig.iou_head_depth |
1 | 0 | 0 |
attr |
EdgeTamVideoMaskDecoderConfig.iou_head_hidden_dim |
1 | 0 | 0 |
attr |
EdgeTamVideoMaskDecoderConfig.dynamic_multimask_via_stability |
1 | 0 | 0 |
attr |
EdgeTamVideoMaskDecoderConfig.dynamic_multimask_stability_delta |
1 | 0 | 0 |
attr |
EdgeTamVideoMaskDecoderConfig.dynamic_multimask_stability_thresh |
1 | 0 | 0 |
attr |
EdgeTamVideoMaskDecoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
EdgeTamVideoMaskDecoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
EdgeTamVideoMaskDecoderConfig.mlp_dim |
1 | 0 | 0 |
attr |
EdgeTamVideoMaskDecoderConfig.attention_downsample_rate |
1 | 0 | 0 |
meth |
EdgeTamVideoPromptEncoderConfig.init |
10 | 0 | 0 |
attr |
EdgeTamVideoPromptEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
EdgeTamVideoPromptEncoderConfig.image_size |
1 | 0 | 0 |
attr |
EdgeTamVideoPromptEncoderConfig.patch_size |
1 | 0 | 0 |
attr |
EdgeTamVideoPromptEncoderConfig.mask_input_channels |
1 | 0 | 0 |
attr |
EdgeTamVideoPromptEncoderConfig.num_point_embeddings |
1 | 0 | 0 |
attr |
EdgeTamVideoPromptEncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
EdgeTamVideoPromptEncoderConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
EdgeTamVideoPromptEncoderConfig.scale |
1 | 0 | 0 |
transformers.models.edgetam_video.modeling_edgetam_video (56 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EdgeTamVideoModel.init |
2 | 1 | 0 |
meth |
EdgeTamVideoModel.get_input_embeddings |
1 | 0 | 0 |
meth |
EdgeTamVideoModel.forward |
6 | 5 | 0 |
meth |
EdgeTamVideoModel._select_closest_cond_frames |
4 | 0 | 0 |
attr |
EdgeTamVideoModel._can_record_outputs |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.shared_image_embedding |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.vision_encoder |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.prompt_encoder |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.mask_decoder |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.num_feature_levels |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.backbone_feature_sizes |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.hidden_dim |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.no_memory_embedding |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.config |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.image_size |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.memory_attention |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.memory_encoder |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.no_memory_positional_encoding |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.mem_dim |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.num_maskmem |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.memory_temporal_positional_encoding |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.no_object_pointer |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.mask_downsample |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.object_pointer_proj |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.occlusion_spatial_embedding_parameter |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.spatial_perceiver |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.temporal_positional_encoding_projection_layer |
1 | 0 | 0 |
meth |
EdgeTamVideoInferenceSession.init |
9 | 8 | 0 |
meth |
EdgeTamVideoInferenceSession.add_point_inputs |
4 | 3 | 0 |
meth |
EdgeTamVideoInferenceSession.remove_point_inputs |
3 | 2 | 0 |
meth |
EdgeTamVideoInferenceSession.add_mask_inputs |
4 | 3 | 0 |
meth |
EdgeTamVideoInferenceSession.remove_mask_inputs |
3 | 2 | 0 |
meth |
EdgeTamVideoInferenceSession.store_output |
6 | 5 | 0 |
meth |
EdgeTamVideoInferenceSession.get_output |
5 | 4 | 0 |
meth |
EdgeTamVideoInferenceSession.reset_tracking_data |
1 | 0 | 0 |
meth |
EdgeTamVideoInferenceSession.reset_inference_session |
1 | 0 | 0 |
attr |
EdgeTamVideoInferenceSession.processed_frames |
1 | 0 | 0 |
attr |
EdgeTamVideoInferenceSession.video_height |
1 | 0 | 0 |
attr |
EdgeTamVideoInferenceSession.video_width |
1 | 0 | 0 |
attr |
EdgeTamVideoInferenceSession.inference_device |
1 | 0 | 0 |
attr |
EdgeTamVideoInferenceSession.inference_state_device |
1 | 0 | 0 |
attr |
EdgeTamVideoInferenceSession.video_storage_device |
1 | 0 | 0 |
attr |
EdgeTamVideoInferenceSession.dtype |
1 | 0 | 0 |
attr |
EdgeTamVideoInferenceSession.max_vision_features_cache_size |
1 | 0 | 0 |
attr |
EdgeTamVideoInferenceSession.cache |
1 | 0 | 0 |
attr |
EdgeTamVideoInferenceSession.obj_ids |
1 | 0 | 0 |
attr |
EdgeTamVideoInferenceSession.point_inputs_per_obj |
1 | 0 | 0 |
attr |
EdgeTamVideoInferenceSession.mask_inputs_per_obj |
1 | 0 | 0 |
attr |
EdgeTamVideoInferenceSession.output_dict_per_obj |
1 | 0 | 0 |
attr |
EdgeTamVideoInferenceSession.frames_tracked_per_obj |
1 | 0 | 0 |
attr |
EdgeTamVideoInferenceSession.obj_with_new_inputs |
1 | 0 | 0 |
meth |
EdgeTamVideoPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.edgetam_video.modular_edgetam_video (111 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EdgeTamVideoConfig.init |
53 | 0 | 0 |
attr |
EdgeTamVideoConfig.vision_config |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.prompt_encoder_config |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.mask_decoder_config |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.initializer_range |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.num_maskmem |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.image_size |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.sigmoid_scale_for_mem_enc |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.sigmoid_bias_for_mem_enc |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.enable_occlusion_spatial_embedding |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.multimask_output_in_sam |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.multimask_min_pt_num |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.multimask_max_pt_num |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.multimask_output_for_tracking |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.max_object_pointers_in_encoder |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.max_cond_frame_num |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.enable_temporal_pos_encoding_for_object_pointers |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_hidden_size |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_num_layers |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_num_attention_heads |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_downsample_rate |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_mlp_hidden_size |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_mlp_hidden_act |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_dropout |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_rope_theta |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_rope_feat_sizes |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_rope_k_sizes |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_attention_rope_dropout |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.perceiver_resampler_num_latents |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.perceiver_resampler_num_latents_2d |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.perceiver_resampler_hidden_size |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.perceiver_resampler_mlp_intermediate_size |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.perceiver_resampler_attention_head_dim |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.perceiver_resampler_num_attention_heads |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.perceiver_resampler_num_layers |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.perceiver_resampler_hidden_dropout |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.perceiver_resampler_attention_dropout |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_encoder_hidden_size |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_encoder_output_channels |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.mask_downsampler_embed_dim |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.mask_downsampler_kernel_size |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.mask_downsampler_stride |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.mask_downsampler_padding |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.mask_downsampler_total_stride |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.mask_downsampler_hidden_act |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_fuser_num_layers |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_fuser_embed_dim |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_fuser_intermediate_dim |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_fuser_kernel_size |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_fuser_padding |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_fuser_layer_scale_init_value |
1 | 0 | 0 |
attr |
EdgeTamVideoConfig.memory_fuser_hidden_act |
1 | 0 | 0 |
meth |
EdgeTamVideoPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
EdgeTamVideoModel.init |
2 | 1 | 0 |
meth |
EdgeTamVideoModel.forward |
6 | 5 | 0 |
meth |
EdgeTamVideoModel._batch_encode_memories |
1 | 0 | 0 |
attr |
EdgeTamVideoModel._can_record_outputs |
1 | 0 | 0 |
attr |
EdgeTamVideoModel.spatial_perceiver |
1 | 0 | 0 |
transformers.models.efficientloftr.configuration_efficientloftr (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EfficientLoFTRConfig.init |
26 | 24 | 0 |
attr |
EfficientLoFTRConfig.stage_num_blocks |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.stage_stride |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.out_features |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.stage_in_channels |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.stage_block_stride |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.stage_block_out_channels |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.stage_block_in_channels |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.fine_fusion_dims |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.hidden_size |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.activation_function |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.q_aggregation_kernel_size |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.kv_aggregation_kernel_size |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.q_aggregation_stride |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.kv_aggregation_stride |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.num_attention_layers |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.num_attention_heads |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.attention_dropout |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.attention_bias |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.intermediate_size |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.mlp_activation_function |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.coarse_matching_skip_softmax |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.coarse_matching_threshold |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.coarse_matching_temperature |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.coarse_matching_border_removal |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.fine_kernel_size |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.batch_norm_eps |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.fine_matching_slice_dim |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.fine_matching_regress_temperature |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.initializer_range |
1 | 0 | 0 |
attr |
EfficientLoFTRConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.efficientloftr.image_processing_efficientloftr (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EfficientLoFTRImageProcessor.init |
8 | 7 | 0 |
meth |
EfficientLoFTRImageProcessor.resize |
6 | 4 | 0 |
meth |
EfficientLoFTRImageProcessor.preprocess |
12 | 10 | 0 |
meth |
EfficientLoFTRImageProcessor._get_color |
2 | 0 | 0 |
attr |
EfficientLoFTRImageProcessor.do_resize |
1 | 0 | 0 |
attr |
EfficientLoFTRImageProcessor.size |
1 | 0 | 0 |
attr |
EfficientLoFTRImageProcessor.resample |
1 | 0 | 0 |
attr |
EfficientLoFTRImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
EfficientLoFTRImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
EfficientLoFTRImageProcessor.do_grayscale |
1 | 0 | 0 |
transformers.models.efficientloftr.image_processing_efficientloftr_fast (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EfficientLoFTRImageProcessorFast.init |
2 | 1 | 0 |
meth |
EfficientLoFTRImageProcessorFast._prepare_images_structure |
3 | 2 | 0 |
meth |
EfficientLoFTRImageProcessorFast._preprocess |
11 | 10 | 0 |
meth |
EfficientLoFTRImageProcessorFast.visualize_keypoint_matching |
3 | 2 | 0 |
meth |
EfficientLoFTRImageProcessorFast._get_color |
2 | 0 | 0 |
transformers.models.efficientloftr.modeling_efficientloftr (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EfficientLoFTRModel.init |
2 | 1 | 0 |
attr |
EfficientLoFTRModel.config |
1 | 0 | 0 |
attr |
EfficientLoFTRModel.backbone |
1 | 0 | 0 |
attr |
EfficientLoFTRModel.local_feature_transformer |
1 | 0 | 0 |
attr |
EfficientLoFTRModel.rotary_emb |
1 | 0 | 0 |
meth |
EfficientLoFTRForKeypointMatching.init |
2 | 1 | 0 |
attr |
EfficientLoFTRForKeypointMatching.config |
1 | 0 | 0 |
attr |
EfficientLoFTRForKeypointMatching.efficientloftr |
1 | 0 | 0 |
attr |
EfficientLoFTRForKeypointMatching.refinement_layer |
1 | 0 | 0 |
transformers.models.efficientnet.configuration_efficientnet (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EfficientNetConfig.init |
23 | 21 | 0 |
attr |
EfficientNetConfig.num_channels |
1 | 0 | 0 |
attr |
EfficientNetConfig.image_size |
1 | 0 | 0 |
attr |
EfficientNetConfig.width_coefficient |
1 | 0 | 0 |
attr |
EfficientNetConfig.depth_coefficient |
1 | 0 | 0 |
attr |
EfficientNetConfig.depth_divisor |
1 | 0 | 0 |
attr |
EfficientNetConfig.kernel_sizes |
1 | 0 | 0 |
attr |
EfficientNetConfig.in_channels |
1 | 0 | 0 |
attr |
EfficientNetConfig.out_channels |
1 | 0 | 0 |
attr |
EfficientNetConfig.depthwise_padding |
1 | 0 | 0 |
attr |
EfficientNetConfig.strides |
1 | 0 | 0 |
attr |
EfficientNetConfig.num_block_repeats |
1 | 0 | 0 |
attr |
EfficientNetConfig.expand_ratios |
1 | 0 | 0 |
attr |
EfficientNetConfig.squeeze_expansion_ratio |
1 | 0 | 0 |
attr |
EfficientNetConfig.hidden_act |
1 | 0 | 0 |
attr |
EfficientNetConfig.hidden_dim |
1 | 0 | 0 |
attr |
EfficientNetConfig.pooling_type |
1 | 0 | 0 |
attr |
EfficientNetConfig.initializer_range |
1 | 0 | 0 |
attr |
EfficientNetConfig.batch_norm_eps |
1 | 0 | 0 |
attr |
EfficientNetConfig.batch_norm_momentum |
1 | 0 | 0 |
attr |
EfficientNetConfig.dropout_rate |
1 | 0 | 0 |
attr |
EfficientNetConfig.drop_connect_rate |
1 | 0 | 0 |
attr |
EfficientNetConfig.num_hidden_layers |
1 | 0 | 0 |
transformers.models.efficientnet.image_processing_efficientnet (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EfficientNetImageProcessor.init |
14 | 13 | 0 |
meth |
EfficientNetImageProcessor.resize |
7 | 6 | 0 |
meth |
EfficientNetImageProcessor.rescale |
7 | 5 | 0 |
meth |
EfficientNetImageProcessor.preprocess |
17 | 16 | 0 |
attr |
EfficientNetImageProcessor.do_resize |
1 | 0 | 0 |
attr |
EfficientNetImageProcessor.size |
1 | 0 | 0 |
attr |
EfficientNetImageProcessor.resample |
1 | 0 | 0 |
attr |
EfficientNetImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
EfficientNetImageProcessor.crop_size |
1 | 0 | 0 |
attr |
EfficientNetImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
EfficientNetImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
EfficientNetImageProcessor.rescale_offset |
1 | 0 | 0 |
attr |
EfficientNetImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
EfficientNetImageProcessor.image_mean |
1 | 0 | 0 |
attr |
EfficientNetImageProcessor.image_std |
1 | 0 | 0 |
attr |
EfficientNetImageProcessor.include_top |
1 | 0 | 0 |
transformers.models.efficientnet.image_processing_efficientnet_fast (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EfficientNetImageProcessorFast.init |
2 | 1 | 0 |
meth |
EfficientNetImageProcessorFast.rescale |
5 | 4 | 0 |
meth |
EfficientNetImageProcessorFast._preprocess |
17 | 16 | 0 |
transformers.models.efficientnet.modeling_efficientnet (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EfficientNetModel.init |
2 | 1 | 0 |
meth |
EfficientNetModel.forward |
5 | 4 | 0 |
attr |
EfficientNetModel.embeddings |
1 | 0 | 0 |
attr |
EfficientNetModel.encoder |
1 | 0 | 0 |
attr |
EfficientNetModel.pooler |
1 | 0 | 0 |
meth |
EfficientNetPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
EfficientNetForImageClassification.init |
2 | 0 | 0 |
meth |
EfficientNetForImageClassification.forward |
6 | 5 | 0 |
attr |
EfficientNetForImageClassification.num_labels |
1 | 0 | 0 |
attr |
EfficientNetForImageClassification.efficientnet |
1 | 0 | 0 |
attr |
EfficientNetForImageClassification.dropout |
1 | 0 | 0 |
attr |
EfficientNetForImageClassification.classifier |
1 | 0 | 0 |
transformers.models.electra.configuration_electra (52 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ElectraConfig.init |
27 | 0 | 0 |
attr |
ElectraConfig.pad_token_id |
1 | 0 | 0 |
attr |
ElectraConfig.bos_token_id |
1 | 0 | 0 |
attr |
ElectraConfig.eos_token_id |
1 | 0 | 0 |
attr |
ElectraConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
ElectraConfig.is_decoder |
1 | 0 | 0 |
attr |
ElectraConfig.add_cross_attention |
1 | 0 | 0 |
attr |
ElectraConfig.vocab_size |
1 | 0 | 0 |
attr |
ElectraConfig.embedding_size |
1 | 0 | 0 |
attr |
ElectraConfig.hidden_size |
1 | 0 | 0 |
attr |
ElectraConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ElectraConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ElectraConfig.intermediate_size |
1 | 0 | 0 |
attr |
ElectraConfig.hidden_act |
1 | 0 | 0 |
attr |
ElectraConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
ElectraConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
ElectraConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ElectraConfig.type_vocab_size |
1 | 0 | 0 |
attr |
ElectraConfig.initializer_range |
1 | 0 | 0 |
attr |
ElectraConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
ElectraConfig.summary_type |
1 | 0 | 0 |
attr |
ElectraConfig.summary_use_proj |
1 | 0 | 0 |
attr |
ElectraConfig.summary_activation |
1 | 0 | 0 |
attr |
ElectraConfig.summary_last_dropout |
1 | 0 | 0 |
attr |
ElectraConfig.use_cache |
1 | 0 | 0 |
attr |
ElectraConfig.classifier_dropout |
1 | 0 | 0 |
transformers.models.electra.modeling_electra (61 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ElectraForPreTraining.init |
2 | 0 | 0 |
attr |
ElectraForPreTraining.electra |
1 | 0 | 0 |
attr |
ElectraForPreTraining.discriminator_predictions |
1 | 0 | 0 |
meth |
ElectraForTokenClassification.init |
2 | 0 | 0 |
attr |
ElectraForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
ElectraForTokenClassification.electra |
1 | 0 | 0 |
attr |
ElectraForTokenClassification.dropout |
1 | 0 | 0 |
attr |
ElectraForTokenClassification.classifier |
1 | 0 | 0 |
meth |
ElectraForSequenceClassification.init |
2 | 0 | 0 |
attr |
ElectraForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
ElectraForSequenceClassification.config |
1 | 0 | 0 |
attr |
ElectraForSequenceClassification.electra |
1 | 0 | 0 |
attr |
ElectraForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
ElectraForMultipleChoice.init |
2 | 0 | 0 |
attr |
ElectraForMultipleChoice.electra |
1 | 0 | 0 |
attr |
ElectraForMultipleChoice.sequence_summary |
1 | 0 | 0 |
attr |
ElectraForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
ElectraForMaskedLM.init |
2 | 0 | 0 |
meth |
ElectraForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
ElectraForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
attr |
ElectraForMaskedLM.electra |
1 | 0 | 0 |
attr |
ElectraForMaskedLM.generator_predictions |
1 | 0 | 0 |
attr |
ElectraForMaskedLM.generator_lm_head |
1 | 0 | 0 |
meth |
ElectraPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
ElectraForQuestionAnswering.init |
2 | 0 | 0 |
attr |
ElectraForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
ElectraForQuestionAnswering.electra |
1 | 0 | 0 |
attr |
ElectraForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
ElectraForCausalLM.init |
2 | 0 | 0 |
meth |
ElectraForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
ElectraForCausalLM.set_output_embeddings |
2 | 0 | 0 |
attr |
ElectraForCausalLM.electra |
1 | 0 | 0 |
attr |
ElectraForCausalLM.generator_predictions |
1 | 0 | 0 |
attr |
ElectraForCausalLM.generator_lm_head |
1 | 0 | 0 |
meth |
ElectraModel.init |
2 | 0 | 0 |
meth |
ElectraModel.get_input_embeddings |
1 | 0 | 0 |
meth |
ElectraModel.set_input_embeddings |
2 | 0 | 0 |
meth |
ElectraModel._create_attention_masks |
7 | 0 | 0 |
attr |
ElectraModel.embeddings |
1 | 0 | 0 |
attr |
ElectraModel.encoder |
1 | 0 | 0 |
attr |
ElectraModel.config |
1 | 0 | 0 |
attr |
ElectraModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
ElectraModel.embeddings_project |
1 | 0 | 0 |
transformers.models.emu3.configuration_emu3 (46 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Emu3TextConfig.init |
21 | 17 | 0 |
attr |
Emu3TextConfig.vocab_size |
1 | 0 | 0 |
attr |
Emu3TextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Emu3TextConfig.hidden_size |
1 | 0 | 0 |
attr |
Emu3TextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Emu3TextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Emu3TextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Emu3TextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Emu3TextConfig.hidden_act |
1 | 0 | 0 |
attr |
Emu3TextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Emu3TextConfig.use_cache |
1 | 0 | 0 |
attr |
Emu3TextConfig.mlp_bias |
1 | 0 | 0 |
attr |
Emu3TextConfig.attention_bias |
1 | 0 | 0 |
attr |
Emu3TextConfig.initializer_range |
1 | 0 | 0 |
attr |
Emu3TextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Emu3TextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Emu3TextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Emu3TextConfig.bos_token_id |
1 | 0 | 0 |
attr |
Emu3TextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Emu3TextConfig.tie_word_embeddings |
1 | 0 | 0 |
meth |
Emu3VQVAEConfig.init |
16 | 14 | 0 |
attr |
Emu3VQVAEConfig.codebook_size |
1 | 0 | 0 |
attr |
Emu3VQVAEConfig.embed_dim |
1 | 0 | 0 |
attr |
Emu3VQVAEConfig.latent_channels |
1 | 0 | 0 |
attr |
Emu3VQVAEConfig.double_latent |
1 | 0 | 0 |
attr |
Emu3VQVAEConfig.in_channels |
1 | 0 | 0 |
attr |
Emu3VQVAEConfig.out_channels |
1 | 0 | 0 |
attr |
Emu3VQVAEConfig.temporal_downsample_factor |
1 | 0 | 0 |
attr |
Emu3VQVAEConfig.base_channels |
1 | 0 | 0 |
attr |
Emu3VQVAEConfig.channel_multiplier |
1 | 0 | 0 |
attr |
Emu3VQVAEConfig.num_res_blocks |
1 | 0 | 0 |
attr |
Emu3VQVAEConfig.attn_resolutions |
1 | 0 | 0 |
attr |
Emu3VQVAEConfig.hidden_size |
1 | 0 | 0 |
attr |
Emu3VQVAEConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Emu3VQVAEConfig.attention_dropout |
1 | 0 | 0 |
meth |
Emu3Config.init |
6 | 4 | 0 |
attr |
Emu3Config.vq_config |
1 | 0 | 0 |
attr |
Emu3Config.text_config |
1 | 0 | 0 |
attr |
Emu3Config.vocabulary_map |
1 | 0 | 0 |
attr |
Emu3Config.image_token_id |
1 | 0 | 0 |
attr |
Emu3Config.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.emu3.image_processing_emu3 (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Emu3ImageProcessor.init |
14 | 13 | 0 |
meth |
Emu3ImageProcessor._preprocess |
12 | 11 | 0 |
meth |
Emu3ImageProcessor._pad_for_batching |
5 | 4 | 0 |
meth |
Emu3ImageProcessor.preprocess |
15 | 14 | 0 |
meth |
Emu3ImageProcessor.postprocess |
9 | 8 | 0 |
attr |
Emu3ImageProcessor.do_resize |
1 | 0 | 0 |
attr |
Emu3ImageProcessor.resample |
1 | 0 | 0 |
attr |
Emu3ImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
Emu3ImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
Emu3ImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
Emu3ImageProcessor.image_mean |
1 | 0 | 0 |
attr |
Emu3ImageProcessor.image_std |
1 | 0 | 0 |
attr |
Emu3ImageProcessor.min_pixels |
1 | 0 | 0 |
attr |
Emu3ImageProcessor.max_pixels |
1 | 0 | 0 |
attr |
Emu3ImageProcessor.spatial_factor |
1 | 0 | 0 |
attr |
Emu3ImageProcessor.size |
1 | 0 | 0 |
attr |
Emu3ImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.emu3.modeling_emu3 (54 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Emu3TextModel.init |
2 | 1 | 0 |
attr |
Emu3TextModel.padding_idx |
1 | 0 | 0 |
attr |
Emu3TextModel.vocab_size |
1 | 0 | 0 |
attr |
Emu3TextModel.embed_tokens |
1 | 0 | 0 |
attr |
Emu3TextModel.layers |
1 | 0 | 0 |
attr |
Emu3TextModel.norm |
1 | 0 | 0 |
attr |
Emu3TextModel.rotary_emb |
1 | 0 | 0 |
attr |
Emu3TextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Emu3ForCausalLM.init |
2 | 0 | 0 |
attr |
Emu3ForCausalLM.model |
1 | 0 | 0 |
attr |
Emu3ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Emu3ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Emu3Model.init |
2 | 0 | 0 |
meth |
Emu3Model.get_input_embeddings |
1 | 0 | 0 |
meth |
Emu3Model.set_input_embeddings |
2 | 0 | 0 |
meth |
Emu3Model.decode_image_tokens |
4 | 3 | 0 |
meth |
Emu3Model.get_placeholder_mask |
4 | 3 | 0 |
attr |
Emu3Model.text_model |
1 | 0 | 0 |
attr |
Emu3Model.vqmodel |
1 | 0 | 0 |
attr |
Emu3Model.vocabulary_mapping |
1 | 0 | 0 |
meth |
Emu3VQVAE._init_weights |
2 | 0 | 0 |
meth |
Emu3VQVAE.init |
2 | 1 | 0 |
meth |
Emu3VQVAE.decode |
2 | 1 | 0 |
attr |
Emu3VQVAE.encoder |
1 | 0 | 0 |
attr |
Emu3VQVAE.decoder |
1 | 0 | 0 |
attr |
Emu3VQVAE.quantize |
1 | 0 | 0 |
attr |
Emu3VQVAE.vision_spatial_factor |
1 | 0 | 0 |
attr |
Emu3VQVAE.quant_conv |
1 | 0 | 0 |
attr |
Emu3VQVAE.post_quant_conv |
1 | 0 | 0 |
attr |
Emu3VQVAE.spatial_scale_factor |
1 | 0 | 0 |
meth |
Emu3ForConditionalGeneration.init |
2 | 0 | 0 |
meth |
Emu3ForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Emu3ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Emu3ForConditionalGeneration.decode_image_tokens |
2 | 0 | 0 |
meth |
Emu3ForConditionalGeneration.prepare_inputs_for_generation |
11 | 0 | 0 |
attr |
Emu3ForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Emu3ForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.emu3.modular_emu3 (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Emu3TextModel.init |
2 | 1 | 0 |
attr |
Emu3TextModel.layers |
1 | 0 | 0 |
meth |
Emu3ForCausalLM.init |
2 | 0 | 0 |
meth |
Emu3ForCausalLM.forward |
2 | 0 | 0 |
attr |
Emu3ForCausalLM.model |
1 | 0 | 0 |
meth |
Emu3Model.init |
2 | 0 | 0 |
meth |
Emu3Model.get_input_embeddings |
1 | 0 | 0 |
meth |
Emu3Model.set_input_embeddings |
2 | 0 | 0 |
meth |
Emu3Model.decode_image_tokens |
4 | 3 | 0 |
meth |
Emu3Model.get_placeholder_mask |
4 | 3 | 0 |
attr |
Emu3Model.text_model |
1 | 0 | 0 |
attr |
Emu3Model.vqmodel |
1 | 0 | 0 |
attr |
Emu3Model.vocabulary_mapping |
1 | 0 | 0 |
meth |
Emu3VQVAE._init_weights |
2 | 0 | 0 |
meth |
Emu3VQVAE.init |
2 | 1 | 0 |
meth |
Emu3VQVAE.decode |
2 | 1 | 0 |
attr |
Emu3VQVAE.encoder |
1 | 0 | 0 |
attr |
Emu3VQVAE.decoder |
1 | 0 | 0 |
attr |
Emu3VQVAE.quantize |
1 | 0 | 0 |
attr |
Emu3VQVAE.vision_spatial_factor |
1 | 0 | 0 |
attr |
Emu3VQVAE.quant_conv |
1 | 0 | 0 |
attr |
Emu3VQVAE.post_quant_conv |
1 | 0 | 0 |
attr |
Emu3VQVAE.spatial_scale_factor |
1 | 0 | 0 |
meth |
Emu3ForConditionalGeneration.init |
2 | 0 | 0 |
meth |
Emu3ForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Emu3ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Emu3ForConditionalGeneration.decode_image_tokens |
2 | 0 | 0 |
meth |
Emu3ForConditionalGeneration.prepare_inputs_for_generation |
11 | 0 | 0 |
attr |
Emu3ForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Emu3ForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.emu3.processing_emu3 (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Emu3Processor.init |
5 | 0 | 0 |
meth |
Emu3Processor._get_num_multimodal_tokens |
3 | 0 | 0 |
meth |
Emu3Processor.calculate_generate_size |
4 | 0 | 0 |
meth |
Emu3Processor.postprocess |
3 | 1 | 0 |
meth |
Emu3Processor.post_process_multimodal_output |
5 | 0 | 0 |
attr |
Emu3Processor.image_token |
1 | 0 | 0 |
attr |
Emu3Processor.image_token_id |
1 | 0 | 0 |
attr |
Emu3Processor.image_start_token |
1 | 0 | 0 |
attr |
Emu3Processor.image_end_token |
1 | 0 | 0 |
attr |
Emu3Processor.fake_token_around_image |
1 | 0 | 0 |
attr |
Emu3Processor.eof_token |
1 | 0 | 0 |
attr |
Emu3Processor.bos_token |
1 | 0 | 0 |
attr |
Emu3Processor.downsample_ratio |
1 | 0 | 0 |
transformers.models.encodec.configuration_encodec (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EncodecConfig.init |
25 | 0 | 0 |
attr |
EncodecConfig.target_bandwidths |
1 | 0 | 0 |
attr |
EncodecConfig.sampling_rate |
1 | 0 | 0 |
attr |
EncodecConfig.audio_channels |
1 | 0 | 0 |
attr |
EncodecConfig.normalize |
1 | 0 | 0 |
attr |
EncodecConfig.chunk_length_s |
1 | 0 | 0 |
attr |
EncodecConfig.overlap |
1 | 0 | 0 |
attr |
EncodecConfig.hidden_size |
1 | 0 | 0 |
attr |
EncodecConfig.num_filters |
1 | 0 | 0 |
attr |
EncodecConfig.num_residual_layers |
1 | 0 | 0 |
attr |
EncodecConfig.upsampling_ratios |
1 | 0 | 0 |
attr |
EncodecConfig.norm_type |
1 | 0 | 0 |
attr |
EncodecConfig.kernel_size |
1 | 0 | 0 |
attr |
EncodecConfig.last_kernel_size |
1 | 0 | 0 |
attr |
EncodecConfig.residual_kernel_size |
1 | 0 | 0 |
attr |
EncodecConfig.dilation_growth_rate |
1 | 0 | 0 |
attr |
EncodecConfig.use_causal_conv |
1 | 0 | 0 |
attr |
EncodecConfig.pad_mode |
1 | 0 | 0 |
attr |
EncodecConfig.compress |
1 | 0 | 0 |
attr |
EncodecConfig.num_lstm_layers |
1 | 0 | 0 |
attr |
EncodecConfig.trim_right_ratio |
1 | 0 | 0 |
attr |
EncodecConfig.codebook_size |
1 | 0 | 0 |
attr |
EncodecConfig.codebook_dim |
1 | 0 | 0 |
attr |
EncodecConfig.use_conv_shortcut |
1 | 0 | 0 |
transformers.models.encodec.feature_extraction_encodec (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EncodecFeatureExtractor.init |
7 | 5 | 0 |
attr |
EncodecFeatureExtractor.chunk_length_s |
1 | 0 | 0 |
attr |
EncodecFeatureExtractor.overlap |
1 | 0 | 0 |
transformers.models.encodec.modeling_encodec (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EncodecPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
EncodecModel.init |
2 | 1 | 0 |
meth |
EncodecModel._linear_overlap_add |
3 | 2 | 0 |
attr |
EncodecModel.encoder |
1 | 0 | 0 |
attr |
EncodecModel.decoder |
1 | 0 | 0 |
attr |
EncodecModel.quantizer |
1 | 0 | 0 |
attr |
EncodecModel.bits_per_codebook |
1 | 0 | 0 |
transformers.models.encoder_decoder.configuration_encoder_decoder (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EncoderDecoderConfig.init |
4 | 0 | 0 |
meth |
EncoderDecoderConfig.from_encoder_decoder_configs |
4 | 3 | 0 |
attr |
EncoderDecoderConfig.encoder |
1 | 0 | 0 |
attr |
EncoderDecoderConfig.decoder |
1 | 0 | 0 |
attr |
EncoderDecoderConfig.is_encoder_decoder |
1 | 0 | 0 |
attr |
EncoderDecoderConfig.pad_token_id |
1 | 0 | 0 |
attr |
EncoderDecoderConfig.decoder_start_token_id |
1 | 0 | 0 |
transformers.models.encoder_decoder.modeling_encoder_decoder (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EncoderDecoderModel.init |
4 | 3 | 0 |
meth |
EncoderDecoderModel._init_weights |
2 | 0 | 0 |
meth |
EncoderDecoderModel.get_input_embeddings |
1 | 0 | 0 |
meth |
EncoderDecoderModel.get_output_embeddings |
1 | 0 | 0 |
meth |
EncoderDecoderModel.set_output_embeddings |
2 | 0 | 0 |
meth |
EncoderDecoderModel.from_encoder_decoder_pretrained |
5 | 3 | 0 |
meth |
EncoderDecoderModel.forward |
13 | 12 | 0 |
meth |
EncoderDecoderModel.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
meth |
EncoderDecoderModel.resize_token_embeddings |
3 | 0 | 0 |
attr |
EncoderDecoderModel.encoder |
1 | 0 | 0 |
attr |
EncoderDecoderModel.decoder |
1 | 0 | 0 |
attr |
EncoderDecoderModel.enc_to_dec_proj |
1 | 0 | 0 |
transformers.models.eomt.configuration_eomt (47 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EomtConfig.init |
28 | 7 | 0 |
attr |
EomtConfig.mlp_ratio |
1 | 0 | 0 |
attr |
EomtConfig.attention_dropout |
1 | 0 | 0 |
attr |
EomtConfig.layerscale_value |
1 | 0 | 0 |
attr |
EomtConfig.drop_path_rate |
1 | 0 | 0 |
attr |
EomtConfig.num_upscale_blocks |
1 | 0 | 0 |
attr |
EomtConfig.use_swiglu_ffn |
1 | 0 | 0 |
attr |
EomtConfig.num_blocks |
1 | 0 | 0 |
attr |
EomtConfig.no_object_weight |
1 | 0 | 0 |
attr |
EomtConfig.class_weight |
1 | 0 | 0 |
attr |
EomtConfig.mask_weight |
1 | 0 | 0 |
attr |
EomtConfig.dice_weight |
1 | 0 | 0 |
attr |
EomtConfig.train_num_points |
1 | 0 | 0 |
attr |
EomtConfig.oversample_ratio |
1 | 0 | 0 |
attr |
EomtConfig.importance_sample_ratio |
1 | 0 | 0 |
attr |
EomtConfig.num_queries |
1 | 0 | 0 |
attr |
EomtConfig.num_register_tokens |
1 | 0 | 0 |
attr |
EomtConfig.hidden_size |
1 | 0 | 0 |
attr |
EomtConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
EomtConfig.num_attention_heads |
1 | 0 | 0 |
attr |
EomtConfig.hidden_act |
1 | 0 | 0 |
attr |
EomtConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
EomtConfig.initializer_range |
1 | 0 | 0 |
attr |
EomtConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
EomtConfig.image_size |
1 | 0 | 0 |
attr |
EomtConfig.patch_size |
1 | 0 | 0 |
attr |
EomtConfig.num_channels |
1 | 0 | 0 |
transformers.models.eomt.image_processing_eomt (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EomtImageProcessor.init |
14 | 12 | 0 |
meth |
EomtImageProcessor.resize |
7 | 5 | 0 |
meth |
EomtImageProcessor.encode_inputs |
7 | 6 | 0 |
meth |
EomtImageProcessor.post_process_semantic_segmentation |
4 | 3 | 0 |
meth |
EomtImageProcessor.post_process_panoptic_segmentation |
8 | 6 | 0 |
meth |
EomtImageProcessor.post_process_instance_segmentation |
5 | 3 | 0 |
attr |
EomtImageProcessor.do_resize |
1 | 0 | 0 |
attr |
EomtImageProcessor.size |
1 | 0 | 0 |
attr |
EomtImageProcessor.resample |
1 | 0 | 0 |
attr |
EomtImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
EomtImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
EomtImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
EomtImageProcessor.do_split_image |
1 | 0 | 0 |
attr |
EomtImageProcessor.do_pad |
1 | 0 | 0 |
attr |
EomtImageProcessor.image_mean |
1 | 0 | 0 |
attr |
EomtImageProcessor.image_std |
1 | 0 | 0 |
attr |
EomtImageProcessor.ignore_index |
1 | 0 | 0 |
attr |
EomtImageProcessor.num_labels |
1 | 0 | 0 |
transformers.models.eomt.image_processing_eomt_fast (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EomtImageProcessorFast.init |
2 | 1 | 0 |
meth |
EomtImageProcessorFast._preprocess |
15 | 13 | 0 |
meth |
EomtImageProcessorFast.post_process_semantic_segmentation |
4 | 3 | 0 |
meth |
EomtImageProcessorFast.post_process_panoptic_segmentation |
8 | 6 | 0 |
meth |
EomtImageProcessorFast.post_process_instance_segmentation |
5 | 3 | 0 |
transformers.models.eomt.modeling_eomt (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EomtForUniversalSegmentation.init |
2 | 1 | 0 |
meth |
EomtForUniversalSegmentation.get_input_embeddings |
1 | 0 | 0 |
meth |
EomtForUniversalSegmentation.predict |
2 | 1 | 0 |
meth |
EomtForUniversalSegmentation._disable_attention_mask |
6 | 0 | 0 |
attr |
EomtForUniversalSegmentation.num_hidden_layers |
1 | 0 | 0 |
attr |
EomtForUniversalSegmentation.embeddings |
1 | 0 | 0 |
attr |
EomtForUniversalSegmentation.layernorm |
1 | 0 | 0 |
attr |
EomtForUniversalSegmentation.query |
1 | 0 | 0 |
attr |
EomtForUniversalSegmentation.layers |
1 | 0 | 0 |
attr |
EomtForUniversalSegmentation.upscale_block |
1 | 0 | 0 |
attr |
EomtForUniversalSegmentation.mask_head |
1 | 0 | 0 |
attr |
EomtForUniversalSegmentation.class_predictor |
1 | 0 | 0 |
attr |
EomtForUniversalSegmentation.grid_size |
1 | 0 | 0 |
attr |
EomtForUniversalSegmentation.criterion |
1 | 0 | 0 |
transformers.models.eomt.modular_eomt (58 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EomtForUniversalSegmentation.init |
2 | 1 | 0 |
meth |
EomtForUniversalSegmentation.get_input_embeddings |
1 | 0 | 0 |
meth |
EomtForUniversalSegmentation.get_auxiliary_logits |
1 | 0 | 0 |
meth |
EomtForUniversalSegmentation.predict |
2 | 1 | 0 |
meth |
EomtForUniversalSegmentation._disable_attention_mask |
6 | 0 | 0 |
attr |
EomtForUniversalSegmentation.config |
1 | 0 | 0 |
attr |
EomtForUniversalSegmentation.num_hidden_layers |
1 | 0 | 0 |
attr |
EomtForUniversalSegmentation.embeddings |
1 | 0 | 0 |
attr |
EomtForUniversalSegmentation.layernorm |
1 | 0 | 0 |
attr |
EomtForUniversalSegmentation.query |
1 | 0 | 0 |
attr |
EomtForUniversalSegmentation.layers |
1 | 0 | 0 |
attr |
EomtForUniversalSegmentation.upscale_block |
1 | 0 | 0 |
attr |
EomtForUniversalSegmentation.mask_head |
1 | 0 | 0 |
attr |
EomtForUniversalSegmentation.class_predictor |
1 | 0 | 0 |
attr |
EomtForUniversalSegmentation.grid_size |
1 | 0 | 0 |
attr |
EomtForUniversalSegmentation.criterion |
1 | 0 | 0 |
meth |
EomtConfig.init |
28 | 7 | 0 |
attr |
EomtConfig.mlp_ratio |
1 | 0 | 0 |
attr |
EomtConfig.attention_dropout |
1 | 0 | 0 |
attr |
EomtConfig.layerscale_value |
1 | 0 | 0 |
attr |
EomtConfig.drop_path_rate |
1 | 0 | 0 |
attr |
EomtConfig.num_upscale_blocks |
1 | 0 | 0 |
attr |
EomtConfig.use_swiglu_ffn |
1 | 0 | 0 |
attr |
EomtConfig.num_blocks |
1 | 0 | 0 |
attr |
EomtConfig.no_object_weight |
1 | 0 | 0 |
attr |
EomtConfig.class_weight |
1 | 0 | 0 |
attr |
EomtConfig.mask_weight |
1 | 0 | 0 |
attr |
EomtConfig.dice_weight |
1 | 0 | 0 |
attr |
EomtConfig.train_num_points |
1 | 0 | 0 |
attr |
EomtConfig.oversample_ratio |
1 | 0 | 0 |
attr |
EomtConfig.importance_sample_ratio |
1 | 0 | 0 |
attr |
EomtConfig.num_queries |
1 | 0 | 0 |
attr |
EomtConfig.num_register_tokens |
1 | 0 | 0 |
transformers.models.eomt_dinov3.configuration_eomt_dinov3 (55 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EomtDinov3Config.init |
37 | 17 | 0 |
attr |
EomtDinov3Config.intermediate_size |
1 | 0 | 0 |
attr |
EomtDinov3Config.attention_dropout |
1 | 0 | 0 |
attr |
EomtDinov3Config.layerscale_value |
1 | 0 | 0 |
attr |
EomtDinov3Config.drop_path_rate |
1 | 0 | 0 |
attr |
EomtDinov3Config.num_upscale_blocks |
1 | 0 | 0 |
attr |
EomtDinov3Config.num_blocks |
1 | 0 | 0 |
attr |
EomtDinov3Config.no_object_weight |
1 | 0 | 0 |
attr |
EomtDinov3Config.class_weight |
1 | 0 | 0 |
attr |
EomtDinov3Config.mask_weight |
1 | 0 | 0 |
attr |
EomtDinov3Config.dice_weight |
1 | 0 | 0 |
attr |
EomtDinov3Config.train_num_points |
1 | 0 | 0 |
attr |
EomtDinov3Config.oversample_ratio |
1 | 0 | 0 |
attr |
EomtDinov3Config.importance_sample_ratio |
1 | 0 | 0 |
attr |
EomtDinov3Config.num_queries |
1 | 0 | 0 |
attr |
EomtDinov3Config.num_register_tokens |
1 | 0 | 0 |
attr |
EomtDinov3Config.rope_parameters |
1 | 0 | 0 |
attr |
EomtDinov3Config.query_bias |
1 | 0 | 0 |
attr |
EomtDinov3Config.key_bias |
1 | 0 | 0 |
attr |
EomtDinov3Config.value_bias |
1 | 0 | 0 |
attr |
EomtDinov3Config.proj_bias |
1 | 0 | 0 |
attr |
EomtDinov3Config.mlp_bias |
1 | 0 | 0 |
attr |
EomtDinov3Config.use_gated_mlp |
1 | 0 | 0 |
attr |
EomtDinov3Config.pos_embed_shift |
1 | 0 | 0 |
attr |
EomtDinov3Config.pos_embed_jitter |
1 | 0 | 0 |
attr |
EomtDinov3Config.pos_embed_rescale |
1 | 0 | 0 |
attr |
EomtDinov3Config.hidden_size |
1 | 0 | 0 |
attr |
EomtDinov3Config.num_hidden_layers |
1 | 0 | 0 |
attr |
EomtDinov3Config.num_attention_heads |
1 | 0 | 0 |
attr |
EomtDinov3Config.hidden_act |
1 | 0 | 0 |
attr |
EomtDinov3Config.hidden_dropout_prob |
1 | 0 | 0 |
attr |
EomtDinov3Config.initializer_range |
1 | 0 | 0 |
attr |
EomtDinov3Config.layer_norm_eps |
1 | 0 | 0 |
attr |
EomtDinov3Config.image_size |
1 | 0 | 0 |
attr |
EomtDinov3Config.patch_size |
1 | 0 | 0 |
attr |
EomtDinov3Config.num_channels |
1 | 0 | 0 |
transformers.models.eomt_dinov3.modeling_eomt_dinov3 (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EomtDinov3ForUniversalSegmentation.init |
2 | 1 | 0 |
meth |
EomtDinov3ForUniversalSegmentation.get_input_embeddings |
1 | 0 | 0 |
meth |
EomtDinov3ForUniversalSegmentation.predict |
2 | 1 | 0 |
meth |
EomtDinov3ForUniversalSegmentation._disable_attention_mask |
6 | 0 | 0 |
attr |
EomtDinov3ForUniversalSegmentation.num_hidden_layers |
1 | 0 | 0 |
attr |
EomtDinov3ForUniversalSegmentation.embeddings |
1 | 0 | 0 |
attr |
EomtDinov3ForUniversalSegmentation.layernorm |
1 | 0 | 0 |
attr |
EomtDinov3ForUniversalSegmentation.query |
1 | 0 | 0 |
attr |
EomtDinov3ForUniversalSegmentation.layers |
1 | 0 | 0 |
attr |
EomtDinov3ForUniversalSegmentation.upscale_block |
1 | 0 | 0 |
attr |
EomtDinov3ForUniversalSegmentation.mask_head |
1 | 0 | 0 |
attr |
EomtDinov3ForUniversalSegmentation.class_predictor |
1 | 0 | 0 |
attr |
EomtDinov3ForUniversalSegmentation.grid_size |
1 | 0 | 0 |
attr |
EomtDinov3ForUniversalSegmentation.criterion |
1 | 0 | 0 |
attr |
EomtDinov3ForUniversalSegmentation.num_prefix_tokens |
1 | 0 | 0 |
attr |
EomtDinov3ForUniversalSegmentation.dropout |
1 | 0 | 0 |
attr |
EomtDinov3ForUniversalSegmentation.rope_embeddings |
1 | 0 | 0 |
transformers.models.eomt_dinov3.modular_eomt_dinov3 (51 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EomtDinov3Config.init |
37 | 17 | 0 |
attr |
EomtDinov3Config.intermediate_size |
1 | 0 | 0 |
attr |
EomtDinov3Config.attention_dropout |
1 | 0 | 0 |
attr |
EomtDinov3Config.layerscale_value |
1 | 0 | 0 |
attr |
EomtDinov3Config.drop_path_rate |
1 | 0 | 0 |
attr |
EomtDinov3Config.num_upscale_blocks |
1 | 0 | 0 |
attr |
EomtDinov3Config.num_blocks |
1 | 0 | 0 |
attr |
EomtDinov3Config.no_object_weight |
1 | 0 | 0 |
attr |
EomtDinov3Config.class_weight |
1 | 0 | 0 |
attr |
EomtDinov3Config.mask_weight |
1 | 0 | 0 |
attr |
EomtDinov3Config.dice_weight |
1 | 0 | 0 |
attr |
EomtDinov3Config.train_num_points |
1 | 0 | 0 |
attr |
EomtDinov3Config.oversample_ratio |
1 | 0 | 0 |
attr |
EomtDinov3Config.importance_sample_ratio |
1 | 0 | 0 |
attr |
EomtDinov3Config.num_queries |
1 | 0 | 0 |
attr |
EomtDinov3Config.num_register_tokens |
1 | 0 | 0 |
attr |
EomtDinov3Config.rope_parameters |
1 | 0 | 0 |
attr |
EomtDinov3Config.query_bias |
1 | 0 | 0 |
attr |
EomtDinov3Config.key_bias |
1 | 0 | 0 |
attr |
EomtDinov3Config.value_bias |
1 | 0 | 0 |
attr |
EomtDinov3Config.proj_bias |
1 | 0 | 0 |
attr |
EomtDinov3Config.mlp_bias |
1 | 0 | 0 |
attr |
EomtDinov3Config.use_gated_mlp |
1 | 0 | 0 |
attr |
EomtDinov3Config.pos_embed_shift |
1 | 0 | 0 |
attr |
EomtDinov3Config.pos_embed_jitter |
1 | 0 | 0 |
attr |
EomtDinov3Config.pos_embed_rescale |
1 | 0 | 0 |
meth |
EomtDinov3ForUniversalSegmentation.init |
2 | 1 | 0 |
attr |
EomtDinov3ForUniversalSegmentation.num_prefix_tokens |
1 | 0 | 0 |
attr |
EomtDinov3ForUniversalSegmentation.dropout |
1 | 0 | 0 |
attr |
EomtDinov3ForUniversalSegmentation.embeddings |
1 | 0 | 0 |
attr |
EomtDinov3ForUniversalSegmentation.rope_embeddings |
1 | 0 | 0 |
attr |
EomtDinov3ForUniversalSegmentation.layers |
1 | 0 | 0 |
transformers.models.ernie.configuration_ernie (46 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ErnieConfig.init |
24 | 0 | 0 |
attr |
ErnieConfig.pad_token_id |
1 | 0 | 0 |
attr |
ErnieConfig.bos_token_id |
1 | 0 | 0 |
attr |
ErnieConfig.eos_token_id |
1 | 0 | 0 |
attr |
ErnieConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
ErnieConfig.is_decoder |
1 | 0 | 0 |
attr |
ErnieConfig.add_cross_attention |
1 | 0 | 0 |
attr |
ErnieConfig.vocab_size |
1 | 0 | 0 |
attr |
ErnieConfig.hidden_size |
1 | 0 | 0 |
attr |
ErnieConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ErnieConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ErnieConfig.hidden_act |
1 | 0 | 0 |
attr |
ErnieConfig.intermediate_size |
1 | 0 | 0 |
attr |
ErnieConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
ErnieConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
ErnieConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ErnieConfig.type_vocab_size |
1 | 0 | 0 |
attr |
ErnieConfig.task_type_vocab_size |
1 | 0 | 0 |
attr |
ErnieConfig.use_task_id |
1 | 0 | 0 |
attr |
ErnieConfig.initializer_range |
1 | 0 | 0 |
attr |
ErnieConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
ErnieConfig.use_cache |
1 | 0 | 0 |
attr |
ErnieConfig.classifier_dropout |
1 | 0 | 0 |
transformers.models.ernie.modeling_ernie (68 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ErnieModel.init |
3 | 0 | 0 |
meth |
ErnieModel.get_input_embeddings |
1 | 0 | 0 |
meth |
ErnieModel.set_input_embeddings |
2 | 0 | 0 |
meth |
ErnieModel._create_attention_masks |
7 | 0 | 0 |
attr |
ErnieModel.config |
1 | 0 | 0 |
attr |
ErnieModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
ErnieModel.embeddings |
1 | 0 | 0 |
attr |
ErnieModel.encoder |
1 | 0 | 0 |
attr |
ErnieModel.pooler |
1 | 0 | 0 |
meth |
ErniePreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
ErnieForMaskedLM.init |
2 | 0 | 0 |
meth |
ErnieForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
ErnieForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
attr |
ErnieForMaskedLM.ernie |
1 | 0 | 0 |
attr |
ErnieForMaskedLM.cls |
1 | 0 | 0 |
meth |
ErnieForMultipleChoice.init |
2 | 0 | 0 |
attr |
ErnieForMultipleChoice.ernie |
1 | 0 | 0 |
attr |
ErnieForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
ErnieForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
ErnieForTokenClassification.init |
2 | 0 | 0 |
attr |
ErnieForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
ErnieForTokenClassification.ernie |
1 | 0 | 0 |
attr |
ErnieForTokenClassification.dropout |
1 | 0 | 0 |
attr |
ErnieForTokenClassification.classifier |
1 | 0 | 0 |
meth |
ErnieForPreTraining.init |
2 | 0 | 0 |
meth |
ErnieForPreTraining.get_output_embeddings |
1 | 0 | 0 |
meth |
ErnieForPreTraining.set_output_embeddings |
2 | 0 | 0 |
attr |
ErnieForPreTraining.ernie |
1 | 0 | 0 |
attr |
ErnieForPreTraining.cls |
1 | 0 | 0 |
meth |
ErnieForSequenceClassification.init |
2 | 0 | 0 |
attr |
ErnieForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
ErnieForSequenceClassification.config |
1 | 0 | 0 |
attr |
ErnieForSequenceClassification.ernie |
1 | 0 | 0 |
attr |
ErnieForSequenceClassification.dropout |
1 | 0 | 0 |
attr |
ErnieForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
ErnieForNextSentencePrediction.init |
2 | 0 | 0 |
attr |
ErnieForNextSentencePrediction.ernie |
1 | 0 | 0 |
attr |
ErnieForNextSentencePrediction.cls |
1 | 0 | 0 |
meth |
ErnieForQuestionAnswering.init |
2 | 0 | 0 |
attr |
ErnieForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
ErnieForQuestionAnswering.ernie |
1 | 0 | 0 |
attr |
ErnieForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
ErnieForCausalLM.init |
2 | 0 | 0 |
meth |
ErnieForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
ErnieForCausalLM.set_output_embeddings |
2 | 0 | 0 |
attr |
ErnieForCausalLM.ernie |
1 | 0 | 0 |
attr |
ErnieForCausalLM.cls |
1 | 0 | 0 |
transformers.models.ernie.modular_ernie (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ErnieModel.init |
3 | 0 | 0 |
meth |
ErnieModel._create_attention_masks |
7 | 0 | 0 |
attr |
ErnieModel.config |
1 | 0 | 0 |
attr |
ErnieModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
ErnieModel.embeddings |
1 | 0 | 0 |
attr |
ErnieModel.encoder |
1 | 0 | 0 |
attr |
ErnieModel.pooler |
1 | 0 | 0 |
meth |
ErniePreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.ernie4_5.configuration_ernie4_5 (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ernie4_5Config.init |
20 | 18 | 0 |
attr |
Ernie4_5Config.vocab_size |
1 | 0 | 0 |
attr |
Ernie4_5Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Ernie4_5Config.hidden_size |
1 | 0 | 0 |
attr |
Ernie4_5Config.intermediate_size |
1 | 0 | 0 |
attr |
Ernie4_5Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Ernie4_5Config.num_attention_heads |
1 | 0 | 0 |
attr |
Ernie4_5Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Ernie4_5Config.hidden_act |
1 | 0 | 0 |
attr |
Ernie4_5Config.initializer_range |
1 | 0 | 0 |
attr |
Ernie4_5Config.rms_norm_eps |
1 | 0 | 0 |
attr |
Ernie4_5Config.use_cache |
1 | 0 | 0 |
attr |
Ernie4_5Config.use_bias |
1 | 0 | 0 |
attr |
Ernie4_5Config.head_dim |
1 | 0 | 0 |
attr |
Ernie4_5Config.rope_parameters |
1 | 0 | 0 |
attr |
Ernie4_5Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Ernie4_5Config.pad_token_id |
1 | 0 | 0 |
attr |
Ernie4_5Config.bos_token_id |
1 | 0 | 0 |
attr |
Ernie4_5Config.eos_token_id |
1 | 0 | 0 |
transformers.models.ernie4_5.modeling_ernie4_5 (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ernie4_5Model.init |
2 | 1 | 0 |
attr |
Ernie4_5Model.padding_idx |
1 | 0 | 0 |
attr |
Ernie4_5Model.vocab_size |
1 | 0 | 0 |
attr |
Ernie4_5Model.embed_tokens |
1 | 0 | 0 |
attr |
Ernie4_5Model.layers |
1 | 0 | 0 |
attr |
Ernie4_5Model.norm |
1 | 0 | 0 |
attr |
Ernie4_5Model.rotary_emb |
1 | 0 | 0 |
attr |
Ernie4_5Model.gradient_checkpointing |
1 | 0 | 0 |
meth |
Ernie4_5ForCausalLM.init |
2 | 0 | 0 |
attr |
Ernie4_5ForCausalLM.model |
1 | 0 | 0 |
attr |
Ernie4_5ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Ernie4_5ForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.ernie4_5.modular_ernie4_5 (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Ernie4_5PreTrainedModel |
1 | 0 | 0 |
attr |
Ernie4_5Model |
1 | 0 | 0 |
meth |
Ernie4_5ForCausalLM.forward |
2 | 0 | 0 |
transformers.models.ernie4_5_moe.configuration_ernie4_5_moe (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ernie4_5_MoeConfig.init |
29 | 27 | 0 |
attr |
Ernie4_5_MoeConfig.vocab_size |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.hidden_size |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.intermediate_size |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.hidden_act |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.initializer_range |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.use_cache |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.use_bias |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.moe_k |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.moe_num_experts |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.moe_num_shared_experts |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.moe_layer_start_index |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.moe_layer_end_index |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.moe_layer_interval |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.moe_norm_min |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.output_router_logits |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.rope_parameters |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.pad_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.bos_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_MoeConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.ernie4_5_moe.modeling_ernie4_5_moe (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ernie4_5_MoeModel.init |
2 | 1 | 0 |
attr |
Ernie4_5_MoeModel.padding_idx |
1 | 0 | 0 |
attr |
Ernie4_5_MoeModel.vocab_size |
1 | 0 | 0 |
attr |
Ernie4_5_MoeModel.embed_tokens |
1 | 0 | 0 |
attr |
Ernie4_5_MoeModel.layers |
1 | 0 | 0 |
attr |
Ernie4_5_MoeModel.norm |
1 | 0 | 0 |
attr |
Ernie4_5_MoeModel.rotary_emb |
1 | 0 | 0 |
attr |
Ernie4_5_MoeModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Ernie4_5_MoePreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
Ernie4_5_MoePreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
Ernie4_5_MoeForCausalLM.init |
2 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.model |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.num_experts |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
transformers.models.ernie4_5_moe.modular_ernie4_5_moe (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ernie4_5_MoeModel.init |
2 | 1 | 0 |
attr |
Ernie4_5_MoeModel.padding_idx |
1 | 0 | 0 |
attr |
Ernie4_5_MoeModel.vocab_size |
1 | 0 | 0 |
attr |
Ernie4_5_MoeModel.embed_tokens |
1 | 0 | 0 |
attr |
Ernie4_5_MoeModel.layers |
1 | 0 | 0 |
attr |
Ernie4_5_MoeModel.norm |
1 | 0 | 0 |
attr |
Ernie4_5_MoeModel.rotary_emb |
1 | 0 | 0 |
attr |
Ernie4_5_MoeModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Ernie4_5_MoePreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
Ernie4_5_MoePreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
Ernie4_5_MoeForCausalLM.init |
2 | 0 | 0 |
meth |
Ernie4_5_MoeForCausalLM.forward |
2 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.model |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.num_experts |
1 | 0 | 0 |
attr |
Ernie4_5_MoeForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
transformers.models.ernie4_5_vl_moe.configuration_ernie4_5_vl_moe (103 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ernie4_5_VL_MoeVisionConfig.init |
3 | 0 | 0 |
meth |
Ernie4_5_VLMoeConfig.init |
11 | 0 | 0 |
attr |
Ernie4_5_VLMoeConfig.image_start_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeConfig.image_end_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeConfig.image_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeConfig.video_start_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeConfig.video_end_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeConfig.video_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeConfig.vision_config |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeConfig.text_config |
1 | 0 | 0 |
meth |
Ernie4_5_VLMoeTextConfig.init |
26 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.vocab_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.hidden_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.hidden_act |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.initializer_range |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.use_cache |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.use_bias |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.mlp_layer_types |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.moe_k |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.moe_num_experts |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.moe_num_shared_experts |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.moe_norm_min |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.output_router_logits |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.bos_token_id |
1 | 0 | 0 |
meth |
Ernie4_5_VL_MoeConfig.init |
3 | 0 | 0 |
meth |
Ernie4_5_VLMoeVisionConfig.init |
13 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionConfig.depth |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionConfig.num_heads |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionConfig.in_channels |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionConfig.patch_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionConfig.spatial_merge_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionConfig.temporal_merge_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionConfig.rms_norm_eps |
1 | 0 | 0 |
meth |
Ernie4_5_VL_MoeTextConfig.init |
3 | 0 | 0 |
transformers.models.ernie4_5_vl_moe.image_processing_ernie4_5_vl_moe (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ernie4_5_VL_MoeImageProcessor.init |
3 | 0 | 0 |
meth |
Ernie4_5_VLMoeImageProcessor.init |
14 | 13 | 0 |
meth |
Ernie4_5_VLMoeImageProcessor._preprocess |
16 | 15 | 0 |
meth |
Ernie4_5_VLMoeImageProcessor.preprocess |
17 | 16 | 0 |
meth |
Ernie4_5_VLMoeImageProcessor.get_number_of_image_patches |
4 | 2 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.do_resize |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.resample |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.image_mean |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.image_std |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.patch_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.temporal_patch_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.merge_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.ernie4_5_vl_moe.image_processing_ernie4_5_vl_moe_fast (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ernie4_5_VLMoeImageProcessorFast.init |
2 | 1 | 0 |
meth |
Ernie4_5_VLMoeImageProcessorFast._further_process_kwargs |
3 | 2 | 0 |
meth |
Ernie4_5_VLMoeImageProcessorFast._preprocess |
15 | 14 | 0 |
meth |
Ernie4_5_VLMoeImageProcessorFast.get_number_of_image_patches |
4 | 2 | 0 |
meth |
Ernie4_5_VL_MoeImageProcessorFast.init |
3 | 0 | 0 |
transformers.models.ernie4_5_vl_moe.modeling_ernie4_5_vl_moe (92 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ernie4_5_VL_MoeVisionTransformerPretrainedModel.init |
3 | 0 | 0 |
meth |
Ernie4_5_VLMoeTextModel.init |
2 | 1 | 0 |
attr |
Ernie4_5_VLMoeTextModel.padding_idx |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextModel.vocab_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextModel.embed_tokens |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextModel.layers |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextModel.norm |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextModel.rotary_emb |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Ernie4_5_VLMoeForConditionalGeneration.init |
2 | 0 | 0 |
meth |
Ernie4_5_VLMoeForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Ernie4_5_VLMoeForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Ernie4_5_VLMoeForConditionalGeneration.prepare_inputs_for_generation |
12 | 0 | 0 |
meth |
Ernie4_5_VLMoeForConditionalGeneration._prepare_position_ids_for_generation |
3 | 0 | 0 |
meth |
Ernie4_5_VLMoeForConditionalGeneration._expand_inputs_for_generation |
5 | 4 | 0 |
attr |
Ernie4_5_VLMoeForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeForConditionalGeneration.router_aux_loss_coef |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeForConditionalGeneration.num_experts |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeForConditionalGeneration.num_experts_per_tok |
1 | 0 | 0 |
meth |
Ernie4_5_VL_MoeTextModel.init |
3 | 0 | 0 |
meth |
Ernie4_5_VL_MoeVariableResolutionResamplerModel.init |
3 | 0 | 0 |
meth |
Ernie4_5_VLMoeModel.init |
2 | 1 | 0 |
meth |
Ernie4_5_VLMoeModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Ernie4_5_VLMoeModel.set_input_embeddings |
2 | 0 | 0 |
meth |
Ernie4_5_VLMoeModel.get_vision_position_ids |
7 | 6 | 0 |
meth |
Ernie4_5_VLMoeModel.get_rope_index |
7 | 6 | 0 |
meth |
Ernie4_5_VLMoeModel.get_placeholder_mask |
5 | 4 | 0 |
attr |
Ernie4_5_VLMoeModel.language_model |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeModel.rope_deltas |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeModel.vision_tower |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeModel.resampler_model |
1 | 0 | 0 |
meth |
Ernie4_5_VLMoeVariableResolutionResamplerModel.init |
2 | 1 | 0 |
meth |
Ernie4_5_VLMoeVariableResolutionResamplerModel._temporal_slicing |
3 | 0 | 0 |
meth |
Ernie4_5_VLMoeVariableResolutionResamplerModel.forward |
3 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.config |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.in_dim |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.out_dim |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.spatial_merge_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.temporal_merge_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.spatial_dim |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.temporal_dim |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.spatial_linear |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.temporal_linear |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.mlp |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.after_norm |
1 | 0 | 0 |
meth |
Ernie4_5_VL_MoeModel.init |
3 | 0 | 0 |
meth |
Ernie4_5_VL_MoePreTrainedModel.post_init |
1 | 0 | 0 |
meth |
Ernie4_5_VLMoePreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
Ernie4_5_VLMoePreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
Ernie4_5_VL_MoeForConditionalGeneration.init |
3 | 0 | 0 |
meth |
Ernie4_5_VLMoeVisionTransformerPretrainedModel.init |
2 | 1 | 0 |
meth |
Ernie4_5_VLMoeVisionTransformerPretrainedModel.rot_pos_emb |
2 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionTransformerPretrainedModel._can_record_outputs |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionTransformerPretrainedModel.spatial_merge_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionTransformerPretrainedModel.patch_embed |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionTransformerPretrainedModel.rotary_pos_emb |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionTransformerPretrainedModel.blocks |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionTransformerPretrainedModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionTransformerPretrainedModel.ln |
1 | 0 | 0 |
transformers.models.ernie4_5_vl_moe.modular_ernie4_5_vl_moe (192 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ernie4_5_VLMoeImageProcessorFast._preprocess |
15 | 13 | 0 |
meth |
Ernie4_5_VLMoeImageProcessorFast.get_number_of_image_patches |
4 | 2 | 0 |
meth |
Ernie4_5_VL_MoeVisionTransformerPretrainedModel.init |
3 | 0 | 0 |
meth |
Ernie4_5_VLMoeTextModel.init |
2 | 1 | 0 |
attr |
Ernie4_5_VLMoeTextModel.rotary_emb |
1 | 0 | 0 |
meth |
Ernie4_5_VLMoeConfig.init |
11 | 0 | 0 |
attr |
Ernie4_5_VLMoeConfig.image_start_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeConfig.image_end_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeConfig.image_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeConfig.video_start_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeConfig.video_end_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeConfig.video_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeConfig.vision_config |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeConfig.text_config |
1 | 0 | 0 |
meth |
Ernie4_5_VLMoeVariableResolutionResamplerModel.init |
2 | 1 | 0 |
meth |
Ernie4_5_VLMoeVariableResolutionResamplerModel._temporal_slicing |
3 | 0 | 0 |
meth |
Ernie4_5_VLMoeVariableResolutionResamplerModel.forward |
3 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.config |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.in_dim |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.out_dim |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.spatial_merge_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.temporal_merge_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.spatial_dim |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.temporal_dim |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.spatial_linear |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.temporal_linear |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.mlp |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVariableResolutionResamplerModel.after_norm |
1 | 0 | 0 |
meth |
Ernie4_5_VL_MoeTextConfig.init |
3 | 0 | 0 |
meth |
Ernie4_5_VL_MoeModel.init |
3 | 0 | 0 |
meth |
Ernie4_5_VL_MoeImageProcessor.init |
3 | 0 | 0 |
meth |
Ernie4_5_VL_MoeVisionConfig.init |
3 | 0 | 0 |
meth |
Ernie4_5_VLMoeTextConfig.init |
26 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.vocab_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.hidden_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.hidden_act |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.initializer_range |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.use_cache |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.use_bias |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.mlp_layer_types |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.moe_k |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.moe_num_experts |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.moe_num_shared_experts |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.moe_norm_min |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.output_router_logits |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeTextConfig.bos_token_id |
1 | 0 | 0 |
meth |
Ernie4_5_VLMoeImageProcessor.init |
14 | 13 | 0 |
meth |
Ernie4_5_VLMoeImageProcessor._preprocess |
16 | 15 | 0 |
meth |
Ernie4_5_VLMoeImageProcessor.get_number_of_image_patches |
4 | 2 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.do_resize |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.resample |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.image_mean |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.image_std |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.patch_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.temporal_patch_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.merge_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeImageProcessor.do_convert_rgb |
1 | 0 | 0 |
meth |
Ernie4_5_VLMoeVisionConfig.init |
13 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionConfig.temporal_merge_size |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionConfig.rms_norm_eps |
1 | 0 | 0 |
meth |
Ernie4_5_VLMoeVisionTransformerPretrainedModel.init |
2 | 1 | 0 |
meth |
Ernie4_5_VLMoeVisionTransformerPretrainedModel.get_dtype |
1 | 0 | 0 |
meth |
Ernie4_5_VLMoeVisionTransformerPretrainedModel.get_device |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionTransformerPretrainedModel._can_record_outputs |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionTransformerPretrainedModel.patch_embed |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionTransformerPretrainedModel.rotary_pos_emb |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeVisionTransformerPretrainedModel.ln |
1 | 0 | 0 |
meth |
Ernie4_5_VLMoeForConditionalGeneration.init |
2 | 0 | 0 |
meth |
Ernie4_5_VLMoeForConditionalGeneration.get_video_features |
2 | 0 | 0 |
meth |
Ernie4_5_VLMoeForConditionalGeneration.get_image_features |
2 | 0 | 0 |
meth |
Ernie4_5_VLMoeForConditionalGeneration.prepare_inputs_for_generation |
12 | 0 | 0 |
attr |
Ernie4_5_VLMoeForConditionalGeneration.router_aux_loss_coef |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeForConditionalGeneration.num_experts |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeForConditionalGeneration.num_experts_per_tok |
1 | 0 | 0 |
meth |
Ernie4_5_VL_MoeTextModel.init |
3 | 0 | 0 |
meth |
Ernie4_5_VL_MoeVariableResolutionResamplerModel.init |
3 | 0 | 0 |
meth |
Ernie4_5_VLMoeModel.init |
2 | 1 | 0 |
meth |
Ernie4_5_VLMoeModel.get_rope_index |
7 | 6 | 0 |
attr |
Ernie4_5_VLMoeModel.vision_tower |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeModel.resampler_model |
1 | 0 | 0 |
meth |
Ernie4_5_VL_MoeImageProcessorFast.init |
3 | 0 | 0 |
meth |
Ernie4_5_VL_MoePreTrainedModel.post_init |
1 | 0 | 0 |
meth |
Ernie4_5_VLMoePreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
Ernie4_5_VLMoePreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
Ernie4_5_VL_MoeForConditionalGeneration.init |
3 | 0 | 0 |
meth |
Ernie4_5_VL_MoeConfig.init |
3 | 0 | 0 |
transformers.models.ernie4_5_vl_moe.processing_ernie4_5_vl_moe (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ernie4_5_VLMoeProcessor.init |
6 | 0 | 0 |
meth |
Ernie4_5_VLMoeProcessor.save_pretrained |
4 | 1 | 0 |
meth |
Ernie4_5_VLMoeProcessor._get_num_multimodal_tokens |
4 | 0 | 0 |
prop |
Ernie4_5_VLMoeProcessor.model_input_names |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeProcessor.image_token |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeProcessor.image_end_token |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeProcessor.image_start_token |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeProcessor.video_token |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeProcessor.video_end_token |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeProcessor.video_start_token |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeProcessor.image_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeProcessor.image_end_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeProcessor.image_start_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeProcessor.video_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeProcessor.video_end_token_id |
1 | 0 | 0 |
attr |
Ernie4_5_VLMoeProcessor.video_start_token_id |
1 | 0 | 0 |
transformers.models.ernie4_5_vl_moe.video_processing_ernie4_5_vl_moe (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ernie4_5_VLMoeVideoProcessor.init |
2 | 1 | 0 |
meth |
Ernie4_5_VLMoeVideoProcessor.get_video_processor_dict |
3 | 2 | 0 |
meth |
Ernie4_5_VLMoeVideoProcessor.save_pretrained |
4 | 2 | 0 |
meth |
Ernie4_5_VLMoeVideoProcessor._further_process_kwargs |
3 | 2 | 0 |
meth |
Ernie4_5_VLMoeVideoProcessor.sample_frames |
7 | 5 | 0 |
meth |
Ernie4_5_VLMoeVideoProcessor._convert_timestamp |
2 | 0 | 0 |
meth |
Ernie4_5_VLMoeVideoProcessor._render_image_with_timestamp |
4 | 3 | 0 |
meth |
Ernie4_5_VLMoeVideoProcessor._preprocess |
15 | 13 | 0 |
transformers.models.esm.configuration_esm (47 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EsmConfig.init |
24 | 0 | 0 |
meth |
EsmConfig.to_dict |
1 | 0 | 0 |
attr |
EsmConfig.is_decoder |
1 | 0 | 0 |
attr |
EsmConfig.add_cross_attention |
1 | 0 | 0 |
attr |
EsmConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
EsmConfig.pad_token_id |
1 | 0 | 0 |
attr |
EsmConfig.mask_token_id |
1 | 0 | 0 |
attr |
EsmConfig.vocab_size |
1 | 0 | 0 |
attr |
EsmConfig.hidden_size |
1 | 0 | 0 |
attr |
EsmConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
EsmConfig.num_attention_heads |
1 | 0 | 0 |
attr |
EsmConfig.intermediate_size |
1 | 0 | 0 |
attr |
EsmConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
EsmConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
EsmConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
EsmConfig.initializer_range |
1 | 0 | 0 |
attr |
EsmConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
EsmConfig.position_embedding_type |
1 | 0 | 0 |
attr |
EsmConfig.use_cache |
1 | 0 | 0 |
attr |
EsmConfig.emb_layer_norm_before |
1 | 0 | 0 |
attr |
EsmConfig.token_dropout |
1 | 0 | 0 |
attr |
EsmConfig.is_folding_model |
1 | 0 | 0 |
attr |
EsmConfig.esmfold_config |
1 | 0 | 0 |
attr |
EsmConfig.vocab_list |
1 | 0 | 0 |
transformers.models.esm.modeling_esm (45 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EsmModel.init |
3 | 0 | 0 |
meth |
EsmModel.get_input_embeddings |
1 | 0 | 0 |
meth |
EsmModel.set_input_embeddings |
2 | 0 | 0 |
meth |
EsmModel._create_attention_masks |
7 | 0 | 0 |
meth |
EsmModel.predict_contacts |
3 | 0 | 0 |
attr |
EsmModel.embeddings |
1 | 0 | 0 |
attr |
EsmModel.encoder |
1 | 0 | 0 |
attr |
EsmModel.pooler |
1 | 0 | 0 |
attr |
EsmModel.contact_head |
1 | 0 | 0 |
meth |
EsmForTokenClassification.init |
2 | 0 | 0 |
attr |
EsmForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
EsmForTokenClassification.esm |
1 | 0 | 0 |
attr |
EsmForTokenClassification.dropout |
1 | 0 | 0 |
attr |
EsmForTokenClassification.classifier |
1 | 0 | 0 |
meth |
EsmForMaskedLM.init |
2 | 0 | 0 |
meth |
EsmForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
EsmForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
EsmForMaskedLM.predict_contacts |
3 | 0 | 0 |
attr |
EsmForMaskedLM.esm |
1 | 0 | 0 |
attr |
EsmForMaskedLM.lm_head |
1 | 0 | 0 |
meth |
EsmPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
EsmPreTrainedModel.get_output_embeddings |
1 | 0 | 0 |
attr |
EsmPreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
EsmForSequenceClassification.init |
2 | 0 | 0 |
attr |
EsmForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
EsmForSequenceClassification.esm |
1 | 0 | 0 |
attr |
EsmForSequenceClassification.classifier |
1 | 0 | 0 |
transformers.models.esm.modeling_esmfold (45 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EsmFoldPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
EsmForProteinFolding._init_weights |
2 | 0 | 0 |
meth |
EsmForProteinFolding.init |
2 | 0 | 0 |
meth |
EsmForProteinFolding.forward |
8 | 7 | 0 |
meth |
EsmForProteinFolding.af2_idx_to_esm_idx |
3 | 0 | 0 |
meth |
EsmForProteinFolding.bert_mask |
5 | 0 | 0 |
meth |
EsmForProteinFolding.infer |
3 | 1 | 0 |
meth |
EsmForProteinFolding.infer_pdb |
4 | 1 | 0 |
meth |
EsmForProteinFolding.infer_pdbs |
4 | 2 | 0 |
attr |
EsmForProteinFolding.config |
1 | 0 | 0 |
attr |
EsmForProteinFolding.distogram_bins |
1 | 0 | 0 |
attr |
EsmForProteinFolding.esm |
1 | 0 | 0 |
attr |
EsmForProteinFolding.esm_feats |
1 | 0 | 0 |
attr |
EsmForProteinFolding.esm_attns |
1 | 0 | 0 |
attr |
EsmForProteinFolding.esm_layers |
1 | 0 | 0 |
attr |
EsmForProteinFolding.esm_s_combine |
1 | 0 | 0 |
attr |
EsmForProteinFolding.esm_s_mlp |
1 | 0 | 0 |
attr |
EsmForProteinFolding.n_tokens_embed |
1 | 0 | 0 |
attr |
EsmForProteinFolding.pad_idx |
1 | 0 | 0 |
attr |
EsmForProteinFolding.unk_idx |
1 | 0 | 0 |
attr |
EsmForProteinFolding.mask_idx |
1 | 0 | 0 |
attr |
EsmForProteinFolding.esm_dict_cls_idx |
1 | 0 | 0 |
attr |
EsmForProteinFolding.esm_dict_mask_idx |
1 | 0 | 0 |
attr |
EsmForProteinFolding.esm_dict_eos_idx |
1 | 0 | 0 |
attr |
EsmForProteinFolding.esm_dict_padding_idx |
1 | 0 | 0 |
attr |
EsmForProteinFolding.trunk |
1 | 0 | 0 |
attr |
EsmForProteinFolding.distogram_head |
1 | 0 | 0 |
attr |
EsmForProteinFolding.ptm_head |
1 | 0 | 0 |
attr |
EsmForProteinFolding.lm_head |
1 | 0 | 0 |
attr |
EsmForProteinFolding.lddt_bins |
1 | 0 | 0 |
attr |
EsmForProteinFolding.lddt_head |
1 | 0 | 0 |
attr |
EsmForProteinFolding.embedding |
1 | 0 | 0 |
transformers.models.esm.openfold_utils.chunk_utils (2 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ChunkSizeTuner.init |
2 | 1 | 0 |
attr |
ChunkSizeTuner.max_chunk_size |
1 | 0 | 0 |
func |
chunk_layer |
8 | 8 | 2 |
transformers.models.esm.openfold_utils.loss (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
compute_tm |
7 | 6 | 0 |
func |
compute_predicted_aligned_error |
5 | 4 | 0 |
transformers.models.esm.openfold_utils.residue_constants (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
restype_num |
1 | 0 | 0 |
attr |
restype_atom37_to_rigid_group |
1 | 0 | 0 |
attr |
restype_atom37_mask |
1 | 0 | 0 |
attr |
restype_atom14_ambiguous_atoms |
1 | 0 | 0 |
attr |
chi_atom_2_one_hot |
1 | 0 | 0 |
attr |
STANDARD_ATOM_MASK |
1 | 0 | 0 |
attr |
restype_rigid_group_default_frame |
1 | 0 | 0 |
attr |
restype_atom14_mask |
1 | 0 | 0 |
attr |
restype_atom37_rigid_group_positions |
1 | 0 | 0 |
attr |
restype_atom14_to_rigid_group |
1 | 0 | 0 |
attr |
restype_atom14_rigid_group_positions |
1 | 0 | 0 |
attr |
chi_atom_1_one_hot |
1 | 0 | 0 |
attr |
chi_angles_atom_indices |
1 | 0 | 0 |
attr |
atom_type_num |
1 | 0 | 0 |
transformers.models.esm.openfold_utils.rigid_utils (3 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Rotation.init |
4 | 3 | 0 |
meth |
Rotation.identity |
6 | 5 | 0 |
meth |
Rotation.getitem |
2 | 2 | 1 |
meth |
Rigid.init |
3 | 2 | 0 |
meth |
Rigid.getitem |
2 | 2 | 1 |
transformers.models.esm.openfold_utils.tensor_utils (1 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
tensor_tree_map |
1 | 0 | 0 |
func |
tree_map |
4 | 4 | 1 |
transformers.models.esm.tokenization_esm (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EsmTokenizer.init |
8 | 0 | 0 |
meth |
EsmTokenizer._tokenize |
3 | 0 | 0 |
meth |
EsmTokenizer.get_vocab |
1 | 0 | 0 |
meth |
EsmTokenizer.save_vocabulary |
3 | 0 | 0 |
attr |
EsmTokenizer.all_tokens |
1 | 0 | 0 |
attr |
EsmTokenizer.unique_no_split_tokens |
1 | 0 | 0 |
transformers.models.eurobert.configuration_eurobert (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EuroBertConfig.init |
24 | 1 | 0 |
attr |
EuroBertConfig.mask_token_id |
1 | 0 | 0 |
attr |
EuroBertConfig.classifier_pooling |
1 | 0 | 0 |
attr |
EuroBertConfig.is_causal |
1 | 0 | 0 |
transformers.models.eurobert.modeling_eurobert (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EuroBertForTokenClassification.init |
2 | 1 | 0 |
meth |
EuroBertForTokenClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
EuroBertForTokenClassification.set_input_embeddings |
2 | 0 | 0 |
attr |
EuroBertForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
EuroBertForTokenClassification.model |
1 | 0 | 0 |
attr |
EuroBertForTokenClassification.classifier |
1 | 0 | 0 |
meth |
EuroBertForMaskedLM.init |
2 | 1 | 0 |
attr |
EuroBertForMaskedLM.model |
1 | 0 | 0 |
attr |
EuroBertForMaskedLM.lm_head |
1 | 0 | 0 |
meth |
EuroBertModel.init |
2 | 1 | 0 |
attr |
EuroBertModel.padding_idx |
1 | 0 | 0 |
attr |
EuroBertModel.vocab_size |
1 | 0 | 0 |
attr |
EuroBertModel.embed_tokens |
1 | 0 | 0 |
attr |
EuroBertModel.layers |
1 | 0 | 0 |
attr |
EuroBertModel.norm |
1 | 0 | 0 |
attr |
EuroBertModel.rotary_emb |
1 | 0 | 0 |
attr |
EuroBertModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
EuroBertForSequenceClassification.init |
2 | 1 | 0 |
attr |
EuroBertForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
EuroBertForSequenceClassification.classifier_pooling |
1 | 0 | 0 |
attr |
EuroBertForSequenceClassification.model |
1 | 0 | 0 |
attr |
EuroBertForSequenceClassification.dense |
1 | 0 | 0 |
attr |
EuroBertForSequenceClassification.activation |
1 | 0 | 0 |
attr |
EuroBertForSequenceClassification.classifier |
1 | 0 | 0 |
transformers.models.eurobert.modular_eurobert (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EuroBertForTokenClassification.init |
2 | 1 | 0 |
meth |
EuroBertForTokenClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
EuroBertForTokenClassification.set_input_embeddings |
2 | 0 | 0 |
attr |
EuroBertForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
EuroBertForTokenClassification.model |
1 | 0 | 0 |
attr |
EuroBertForTokenClassification.classifier |
1 | 0 | 0 |
meth |
EuroBertForMaskedLM.init |
2 | 1 | 0 |
attr |
EuroBertForMaskedLM.model |
1 | 0 | 0 |
attr |
EuroBertForMaskedLM.lm_head |
1 | 0 | 0 |
meth |
EuroBertForSequenceClassification.init |
2 | 1 | 0 |
attr |
EuroBertForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
EuroBertForSequenceClassification.classifier_pooling |
1 | 0 | 0 |
attr |
EuroBertForSequenceClassification.model |
1 | 0 | 0 |
attr |
EuroBertForSequenceClassification.dense |
1 | 0 | 0 |
attr |
EuroBertForSequenceClassification.activation |
1 | 0 | 0 |
attr |
EuroBertForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
EuroBertConfig.init |
24 | 1 | 0 |
attr |
EuroBertConfig.mask_token_id |
1 | 0 | 0 |
attr |
EuroBertConfig.classifier_pooling |
1 | 0 | 0 |
attr |
EuroBertConfig.is_causal |
1 | 0 | 0 |
transformers.models.evolla.configuration_evolla (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EvollaConfig.init |
33 | 31 | 0 |
attr |
EvollaConfig.is_decoder |
1 | 0 | 0 |
attr |
EvollaConfig.add_cross_attention |
1 | 0 | 0 |
attr |
EvollaConfig.vocab_size |
1 | 0 | 0 |
attr |
EvollaConfig.hidden_size |
1 | 0 | 0 |
attr |
EvollaConfig.intermediate_size |
1 | 0 | 0 |
attr |
EvollaConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
EvollaConfig.num_attention_heads |
1 | 0 | 0 |
attr |
EvollaConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
EvollaConfig.hidden_act |
1 | 0 | 0 |
attr |
EvollaConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
EvollaConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
EvollaConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
EvollaConfig.attention_bias |
1 | 0 | 0 |
attr |
EvollaConfig.attention_dropout |
1 | 0 | 0 |
attr |
EvollaConfig.mlp_bias |
1 | 0 | 0 |
attr |
EvollaConfig.aligner_ffn_mult |
1 | 0 | 0 |
attr |
EvollaConfig.aligner_enable_bias |
1 | 0 | 0 |
attr |
EvollaConfig.aligner_attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
EvollaConfig.aligner_num_add_layers |
1 | 0 | 0 |
attr |
EvollaConfig.use_cache |
1 | 0 | 0 |
attr |
EvollaConfig.initializer_range |
1 | 0 | 0 |
attr |
EvollaConfig.resampler_depth |
1 | 0 | 0 |
attr |
EvollaConfig.resampler_dim_head |
1 | 0 | 0 |
attr |
EvollaConfig.resampler_heads |
1 | 0 | 0 |
attr |
EvollaConfig.resampler_num_latents |
1 | 0 | 0 |
attr |
EvollaConfig.resampler_ff_mult |
1 | 0 | 0 |
attr |
EvollaConfig.rope_parameters |
1 | 0 | 0 |
attr |
EvollaConfig.protein_encoder_config |
1 | 0 | 0 |
attr |
EvollaConfig.pad_token_id |
1 | 0 | 0 |
attr |
EvollaConfig.bos_token_id |
1 | 0 | 0 |
attr |
EvollaConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.evolla.modeling_evolla (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EvollaForProteinText2Text.init |
2 | 0 | 0 |
meth |
EvollaForProteinText2Text.get_input_embeddings |
1 | 0 | 0 |
meth |
EvollaForProteinText2Text.set_input_embeddings |
2 | 0 | 0 |
meth |
EvollaForProteinText2Text.forward |
10 | 8 | 0 |
attr |
EvollaForProteinText2Text.model |
1 | 0 | 0 |
attr |
EvollaForProteinText2Text.vocab_size |
1 | 0 | 0 |
attr |
EvollaForProteinText2Text.lm_head |
1 | 0 | 0 |
meth |
EvollaPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
EvollaModel.init |
2 | 1 | 0 |
meth |
EvollaModel.get_input_embeddings |
1 | 0 | 0 |
meth |
EvollaModel.set_input_embeddings |
2 | 0 | 0 |
meth |
EvollaModel.forward |
15 | 14 | 0 |
attr |
EvollaModel.padding_idx |
1 | 0 | 0 |
attr |
EvollaModel.vocab_size |
1 | 0 | 0 |
attr |
EvollaModel.embed_tokens |
1 | 0 | 0 |
attr |
EvollaModel.protein_encoder |
1 | 0 | 0 |
attr |
EvollaModel.layers |
1 | 0 | 0 |
attr |
EvollaModel.norm |
1 | 0 | 0 |
attr |
EvollaModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
EvollaModel.rotary_emb |
1 | 0 | 0 |
transformers.models.evolla.modular_evolla (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EvollaForProteinText2Text.init |
2 | 0 | 0 |
meth |
EvollaForProteinText2Text.get_input_embeddings |
1 | 0 | 0 |
meth |
EvollaForProteinText2Text.set_input_embeddings |
2 | 0 | 0 |
meth |
EvollaForProteinText2Text.forward |
10 | 8 | 0 |
attr |
EvollaForProteinText2Text.model |
1 | 0 | 0 |
attr |
EvollaForProteinText2Text.vocab_size |
1 | 0 | 0 |
attr |
EvollaForProteinText2Text.lm_head |
1 | 0 | 0 |
meth |
EvollaPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
EvollaModel.init |
2 | 1 | 0 |
meth |
EvollaModel.get_input_embeddings |
1 | 0 | 0 |
meth |
EvollaModel.set_input_embeddings |
2 | 0 | 0 |
meth |
EvollaModel.forward |
15 | 14 | 0 |
attr |
EvollaModel.padding_idx |
1 | 0 | 0 |
attr |
EvollaModel.vocab_size |
1 | 0 | 0 |
attr |
EvollaModel.embed_tokens |
1 | 0 | 0 |
attr |
EvollaModel.protein_encoder |
1 | 0 | 0 |
attr |
EvollaModel.layers |
1 | 0 | 0 |
attr |
EvollaModel.norm |
1 | 0 | 0 |
attr |
EvollaModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
EvollaModel.rotary_emb |
1 | 0 | 0 |
transformers.models.evolla.processing_evolla (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EvollaProcessor.init |
6 | 0 | 0 |
meth |
EvollaProcessor.process_proteins |
3 | 0 | 0 |
meth |
EvollaProcessor.process_text |
3 | 1 | 0 |
meth |
EvollaProcessor.call |
6 | 4 | 0 |
meth |
EvollaProcessor.batch_decode |
3 | 0 | 0 |
meth |
EvollaProcessor.decode |
3 | 0 | 0 |
meth |
EvollaProcessor.protein_batch_decode |
3 | 0 | 0 |
meth |
EvollaProcessor.protein_decode |
3 | 0 | 0 |
attr |
EvollaProcessor.protein_max_length |
1 | 0 | 0 |
attr |
EvollaProcessor.text_max_length |
1 | 0 | 0 |
transformers.models.exaone4.configuration_exaone4 (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Exaone4Config.init |
22 | 20 | 0 |
attr |
Exaone4Config.vocab_size |
1 | 0 | 0 |
attr |
Exaone4Config.hidden_size |
1 | 0 | 0 |
attr |
Exaone4Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Exaone4Config.num_attention_heads |
1 | 0 | 0 |
attr |
Exaone4Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Exaone4Config.intermediate_size |
1 | 0 | 0 |
attr |
Exaone4Config.hidden_act |
1 | 0 | 0 |
attr |
Exaone4Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Exaone4Config.initializer_range |
1 | 0 | 0 |
attr |
Exaone4Config.rms_norm_eps |
1 | 0 | 0 |
attr |
Exaone4Config.use_cache |
1 | 0 | 0 |
attr |
Exaone4Config.attention_dropout |
1 | 0 | 0 |
attr |
Exaone4Config.sliding_window |
1 | 0 | 0 |
attr |
Exaone4Config.sliding_window_pattern |
1 | 0 | 0 |
attr |
Exaone4Config.bos_token_id |
1 | 0 | 0 |
attr |
Exaone4Config.eos_token_id |
1 | 0 | 0 |
attr |
Exaone4Config.pad_token_id |
1 | 0 | 0 |
attr |
Exaone4Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Exaone4Config.layer_types |
1 | 0 | 0 |
attr |
Exaone4Config.rope_parameters |
1 | 0 | 0 |
transformers.models.exaone4.modeling_exaone4 (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Exaone4Model.init |
2 | 1 | 0 |
attr |
Exaone4Model.padding_idx |
1 | 0 | 0 |
attr |
Exaone4Model.vocab_size |
1 | 0 | 0 |
attr |
Exaone4Model.embed_tokens |
1 | 0 | 0 |
attr |
Exaone4Model.layers |
1 | 0 | 0 |
attr |
Exaone4Model.norm |
1 | 0 | 0 |
attr |
Exaone4Model.rotary_emb |
1 | 0 | 0 |
attr |
Exaone4Model.gradient_checkpointing |
1 | 0 | 0 |
meth |
Exaone4ForCausalLM.init |
2 | 0 | 0 |
attr |
Exaone4ForCausalLM.model |
1 | 0 | 0 |
attr |
Exaone4ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Exaone4ForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.exaone4.modular_exaone4 (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Exaone4Model.init |
2 | 1 | 0 |
attr |
Exaone4Model.layers |
1 | 0 | 0 |
attr |
Exaone4Model.norm |
1 | 0 | 0 |
meth |
Exaone4Config.init |
22 | 20 | 0 |
attr |
Exaone4Config.vocab_size |
1 | 0 | 0 |
attr |
Exaone4Config.hidden_size |
1 | 0 | 0 |
attr |
Exaone4Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Exaone4Config.num_attention_heads |
1 | 0 | 0 |
attr |
Exaone4Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Exaone4Config.intermediate_size |
1 | 0 | 0 |
attr |
Exaone4Config.hidden_act |
1 | 0 | 0 |
attr |
Exaone4Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Exaone4Config.initializer_range |
1 | 0 | 0 |
attr |
Exaone4Config.rms_norm_eps |
1 | 0 | 0 |
attr |
Exaone4Config.use_cache |
1 | 0 | 0 |
attr |
Exaone4Config.attention_dropout |
1 | 0 | 0 |
attr |
Exaone4Config.sliding_window |
1 | 0 | 0 |
attr |
Exaone4Config.sliding_window_pattern |
1 | 0 | 0 |
attr |
Exaone4Config.bos_token_id |
1 | 0 | 0 |
attr |
Exaone4Config.eos_token_id |
1 | 0 | 0 |
attr |
Exaone4Config.pad_token_id |
1 | 0 | 0 |
attr |
Exaone4Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Exaone4Config.layer_types |
1 | 0 | 0 |
attr |
Exaone4Config.rope_parameters |
1 | 0 | 0 |
transformers.models.exaone_moe.configuration_exaone_moe (62 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ExaoneMoeConfig.init |
32 | 0 | 0 |
attr |
ExaoneMoeConfig.vocab_size |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.hidden_size |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.intermediate_size |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.hidden_act |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.initializer_range |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.use_cache |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.attention_dropout |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.sliding_window |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.sliding_window_pattern |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.first_k_dense_replace |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.num_experts |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.num_shared_experts |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.routed_scaling_factor |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.n_group |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.topk_group |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.rope_parameters |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.layer_types |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.mlp_layer_types |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.bos_token_id |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.eos_token_id |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.pad_token_id |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.exaone_moe.modeling_exaone_moe (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ExaoneMoeForCausalLM.init |
2 | 0 | 0 |
attr |
ExaoneMoeForCausalLM.model |
1 | 0 | 0 |
attr |
ExaoneMoeForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
ExaoneMoeForCausalLM.lm_head |
1 | 0 | 0 |
meth |
ExaoneMoeModel.init |
2 | 1 | 0 |
attr |
ExaoneMoeModel.padding_idx |
1 | 0 | 0 |
attr |
ExaoneMoeModel.vocab_size |
1 | 0 | 0 |
attr |
ExaoneMoeModel.embed_tokens |
1 | 0 | 0 |
attr |
ExaoneMoeModel.layers |
1 | 0 | 0 |
attr |
ExaoneMoeModel.norm |
1 | 0 | 0 |
attr |
ExaoneMoeModel.rotary_emb |
1 | 0 | 0 |
attr |
ExaoneMoeModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
ExaoneMoePreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.exaone_moe.modular_exaone_moe (64 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ExaoneMoePreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
ExaoneMoeConfig.init |
32 | 0 | 0 |
attr |
ExaoneMoeConfig.vocab_size |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.hidden_size |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.intermediate_size |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.hidden_act |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.initializer_range |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.use_cache |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.attention_dropout |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.sliding_window |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.sliding_window_pattern |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.first_k_dense_replace |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.num_experts |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.num_shared_experts |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.routed_scaling_factor |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.n_group |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.topk_group |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.rope_parameters |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.layer_types |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.mlp_layer_types |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.bos_token_id |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.eos_token_id |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.pad_token_id |
1 | 0 | 0 |
attr |
ExaoneMoeConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.falcon.configuration_falcon (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FalconConfig.init |
26 | 24 | 0 |
prop |
FalconConfig.head_dim |
1 | 0 | 0 |
prop |
FalconConfig.rotary |
1 | 0 | 0 |
attr |
FalconConfig.vocab_size |
1 | 0 | 0 |
attr |
FalconConfig.hidden_size |
1 | 0 | 0 |
attr |
FalconConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
FalconConfig.num_attention_heads |
1 | 0 | 0 |
attr |
FalconConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
FalconConfig.initializer_range |
1 | 0 | 0 |
attr |
FalconConfig.use_cache |
1 | 0 | 0 |
attr |
FalconConfig.hidden_dropout |
1 | 0 | 0 |
attr |
FalconConfig.attention_dropout |
1 | 0 | 0 |
attr |
FalconConfig.bos_token_id |
1 | 0 | 0 |
attr |
FalconConfig.eos_token_id |
1 | 0 | 0 |
attr |
FalconConfig.pad_token_id |
1 | 0 | 0 |
attr |
FalconConfig.num_kv_heads |
1 | 0 | 0 |
attr |
FalconConfig.alibi |
1 | 0 | 0 |
attr |
FalconConfig.new_decoder_architecture |
1 | 0 | 0 |
attr |
FalconConfig.multi_query |
1 | 0 | 0 |
attr |
FalconConfig.parallel_attn |
1 | 0 | 0 |
attr |
FalconConfig.bias |
1 | 0 | 0 |
attr |
FalconConfig.num_ln_in_parallel_attn |
1 | 0 | 0 |
attr |
FalconConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
FalconConfig.activation |
1 | 0 | 0 |
attr |
FalconConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
FalconConfig.rope_parameters |
1 | 0 | 0 |
attr |
FalconConfig.ffn_hidden_size |
1 | 0 | 0 |
transformers.models.falcon.modeling_falcon (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FalconForCausalLM.init |
2 | 1 | 0 |
meth |
FalconForCausalLM.set_output_embeddings |
2 | 1 | 0 |
meth |
FalconForCausalLM.forward |
14 | 13 | 0 |
attr |
FalconForCausalLM.transformer |
1 | 0 | 0 |
attr |
FalconForCausalLM.lm_head |
1 | 0 | 0 |
meth |
FalconPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
FalconPreTrainedModel._check_and_enable_sdpa |
3 | 1 | 0 |
meth |
FalconModel.init |
2 | 1 | 0 |
meth |
FalconModel.get_input_embeddings |
1 | 0 | 0 |
meth |
FalconModel.set_input_embeddings |
2 | 1 | 0 |
meth |
FalconModel.forward |
12 | 11 | 0 |
attr |
FalconModel.embed_dim |
1 | 0 | 0 |
attr |
FalconModel.num_heads |
1 | 0 | 0 |
attr |
FalconModel.use_alibi |
1 | 0 | 0 |
attr |
FalconModel.word_embeddings |
1 | 0 | 0 |
attr |
FalconModel.h |
1 | 0 | 0 |
attr |
FalconModel.ln_f |
1 | 0 | 0 |
attr |
FalconModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
FalconModel.rotary_emb |
1 | 0 | 0 |
meth |
FalconForTokenClassification.init |
2 | 1 | 0 |
meth |
FalconForTokenClassification.forward |
11 | 10 | 0 |
attr |
FalconForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
FalconForTokenClassification.transformer |
1 | 0 | 0 |
attr |
FalconForTokenClassification.dropout |
1 | 0 | 0 |
attr |
FalconForTokenClassification.classifier |
1 | 0 | 0 |
meth |
FalconForSequenceClassification.init |
2 | 1 | 0 |
meth |
FalconForSequenceClassification.forward |
11 | 10 | 0 |
attr |
FalconForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
FalconForSequenceClassification.transformer |
1 | 0 | 0 |
attr |
FalconForSequenceClassification.score |
1 | 0 | 0 |
meth |
FalconForQuestionAnswering.init |
2 | 0 | 0 |
meth |
FalconForQuestionAnswering.forward |
10 | 9 | 0 |
attr |
FalconForQuestionAnswering.transformer |
1 | 0 | 0 |
attr |
FalconForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
transformers.models.falcon_h1.configuration_falcon_h1 (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FalconH1Config.init |
45 | 43 | 0 |
prop |
FalconH1Config.layers_block_type |
1 | 0 | 0 |
attr |
FalconH1Config.vocab_size |
1 | 0 | 0 |
attr |
FalconH1Config.hidden_size |
1 | 0 | 0 |
attr |
FalconH1Config.intermediate_size |
1 | 0 | 0 |
attr |
FalconH1Config.num_hidden_layers |
1 | 0 | 0 |
attr |
FalconH1Config.num_attention_heads |
1 | 0 | 0 |
attr |
FalconH1Config.max_position_embeddings |
1 | 0 | 0 |
attr |
FalconH1Config.attention_dropout |
1 | 0 | 0 |
attr |
FalconH1Config.attention_bias |
1 | 0 | 0 |
attr |
FalconH1Config.mlp_bias |
1 | 0 | 0 |
attr |
FalconH1Config.num_key_value_heads |
1 | 0 | 0 |
attr |
FalconH1Config.hidden_act |
1 | 0 | 0 |
attr |
FalconH1Config.initializer_range |
1 | 0 | 0 |
attr |
FalconH1Config.rms_norm_eps |
1 | 0 | 0 |
attr |
FalconH1Config.use_cache |
1 | 0 | 0 |
attr |
FalconH1Config.num_logits_to_keep |
1 | 0 | 0 |
attr |
FalconH1Config.projectors_bias |
1 | 0 | 0 |
attr |
FalconH1Config.mamba_d_ssm |
1 | 0 | 0 |
attr |
FalconH1Config.mamba_n_heads |
1 | 0 | 0 |
attr |
FalconH1Config.mamba_d_head |
1 | 0 | 0 |
attr |
FalconH1Config.mamba_n_groups |
1 | 0 | 0 |
attr |
FalconH1Config.mamba_d_state |
1 | 0 | 0 |
attr |
FalconH1Config.mamba_d_conv |
1 | 0 | 0 |
attr |
FalconH1Config.mamba_expand |
1 | 0 | 0 |
attr |
FalconH1Config.mamba_chunk_size |
1 | 0 | 0 |
attr |
FalconH1Config.mamba_conv_bias |
1 | 0 | 0 |
attr |
FalconH1Config.mamba_proj_bias |
1 | 0 | 0 |
attr |
FalconH1Config.mamba_norm_before_gate |
1 | 0 | 0 |
attr |
FalconH1Config.mamba_rms_norm |
1 | 0 | 0 |
attr |
FalconH1Config.time_step_min |
1 | 0 | 0 |
attr |
FalconH1Config.time_step_max |
1 | 0 | 0 |
attr |
FalconH1Config.time_step_limit |
1 | 0 | 0 |
attr |
FalconH1Config.lm_head_multiplier |
1 | 0 | 0 |
attr |
FalconH1Config.embedding_multiplier |
1 | 0 | 0 |
attr |
FalconH1Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
FalconH1Config.pad_token_id |
1 | 0 | 0 |
attr |
FalconH1Config.bos_token_id |
1 | 0 | 0 |
attr |
FalconH1Config.eos_token_id |
1 | 0 | 0 |
attr |
FalconH1Config.rope_parameters |
1 | 0 | 0 |
attr |
FalconH1Config.mlp_multipliers |
1 | 0 | 0 |
attr |
FalconH1Config.attention_out_multiplier |
1 | 0 | 0 |
attr |
FalconH1Config.attention_in_multiplier |
1 | 0 | 0 |
attr |
FalconH1Config.key_multiplier |
1 | 0 | 0 |
attr |
FalconH1Config.ssm_multipliers |
1 | 0 | 0 |
attr |
FalconH1Config.ssm_in_multiplier |
1 | 0 | 0 |
attr |
FalconH1Config.ssm_out_multiplier |
1 | 0 | 0 |
transformers.models.falcon_h1.modeling_falcon_h1 (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FalconH1Model.init |
2 | 1 | 0 |
meth |
FalconH1Model.forward |
11 | 10 | 0 |
meth |
FalconH1Model._update_mamba_mask |
3 | 0 | 0 |
attr |
FalconH1Model.padding_idx |
1 | 0 | 0 |
attr |
FalconH1Model.vocab_size |
1 | 0 | 0 |
attr |
FalconH1Model.embed_tokens |
1 | 0 | 0 |
attr |
FalconH1Model.layers |
1 | 0 | 0 |
attr |
FalconH1Model.final_layernorm |
1 | 0 | 0 |
attr |
FalconH1Model.rotary_emb |
1 | 0 | 0 |
attr |
FalconH1Model.embedding_multiplier |
1 | 0 | 0 |
attr |
FalconH1Model.lm_head_multiplier |
1 | 0 | 0 |
attr |
FalconH1Model.gradient_checkpointing |
1 | 0 | 0 |
meth |
FalconH1ForCausalLM.init |
2 | 0 | 0 |
meth |
FalconH1ForCausalLM.forward |
13 | 12 | 0 |
meth |
FalconH1ForCausalLM.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
FalconH1ForCausalLM.model |
1 | 0 | 0 |
attr |
FalconH1ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
FalconH1ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
FalconH1PreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.falcon_h1.modular_falcon_h1 (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FalconH1Model.init |
2 | 1 | 0 |
meth |
FalconH1Model.forward |
11 | 10 | 0 |
meth |
FalconH1Model._update_mamba_mask |
3 | 0 | 0 |
attr |
FalconH1Model.padding_idx |
1 | 0 | 0 |
attr |
FalconH1Model.vocab_size |
1 | 0 | 0 |
attr |
FalconH1Model.embed_tokens |
1 | 0 | 0 |
attr |
FalconH1Model.layers |
1 | 0 | 0 |
attr |
FalconH1Model.final_layernorm |
1 | 0 | 0 |
attr |
FalconH1Model.rotary_emb |
1 | 0 | 0 |
attr |
FalconH1Model.embedding_multiplier |
1 | 0 | 0 |
attr |
FalconH1Model.lm_head_multiplier |
1 | 0 | 0 |
attr |
FalconH1Model.gradient_checkpointing |
1 | 0 | 0 |
meth |
FalconH1ForCausalLM.forward |
13 | 12 | 0 |
meth |
FalconH1ForCausalLM.prepare_inputs_for_generation |
10 | 0 | 0 |
meth |
FalconH1PreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.falcon_mamba.configuration_falcon_mamba (57 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FalconMambaConfig.init |
29 | 0 | 0 |
attr |
FalconMambaConfig.vocab_size |
1 | 0 | 0 |
attr |
FalconMambaConfig.hidden_size |
1 | 0 | 0 |
attr |
FalconMambaConfig.state_size |
1 | 0 | 0 |
attr |
FalconMambaConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
FalconMambaConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
FalconMambaConfig.conv_kernel |
1 | 0 | 0 |
attr |
FalconMambaConfig.expand |
1 | 0 | 0 |
attr |
FalconMambaConfig.intermediate_size |
1 | 0 | 0 |
attr |
FalconMambaConfig.bos_token_id |
1 | 0 | 0 |
attr |
FalconMambaConfig.eos_token_id |
1 | 0 | 0 |
attr |
FalconMambaConfig.pad_token_id |
1 | 0 | 0 |
attr |
FalconMambaConfig.use_bias |
1 | 0 | 0 |
attr |
FalconMambaConfig.use_conv_bias |
1 | 0 | 0 |
attr |
FalconMambaConfig.hidden_act |
1 | 0 | 0 |
attr |
FalconMambaConfig.initializer_range |
1 | 0 | 0 |
attr |
FalconMambaConfig.time_step_rank |
1 | 0 | 0 |
attr |
FalconMambaConfig.time_step_scale |
1 | 0 | 0 |
attr |
FalconMambaConfig.time_step_min |
1 | 0 | 0 |
attr |
FalconMambaConfig.time_step_max |
1 | 0 | 0 |
attr |
FalconMambaConfig.time_step_init_scheme |
1 | 0 | 0 |
attr |
FalconMambaConfig.time_step_floor |
1 | 0 | 0 |
attr |
FalconMambaConfig.rescale_prenorm_residual |
1 | 0 | 0 |
attr |
FalconMambaConfig.residual_in_fp32 |
1 | 0 | 0 |
attr |
FalconMambaConfig.use_cache |
1 | 0 | 0 |
attr |
FalconMambaConfig.use_falcon_mambapy |
1 | 0 | 0 |
attr |
FalconMambaConfig.use_associative_scan |
1 | 0 | 0 |
attr |
FalconMambaConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
FalconMambaConfig.mixer_rms_eps |
1 | 0 | 0 |
transformers.models.falcon_mamba.modeling_falcon_mamba (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FalconMambaForCausalLM.init |
2 | 0 | 0 |
meth |
FalconMambaForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
FalconMambaForCausalLM.set_input_embeddings |
2 | 0 | 0 |
meth |
FalconMambaForCausalLM._update_model_kwargs_for_generation |
5 | 4 | 0 |
meth |
FalconMambaForCausalLM.prepare_inputs_for_generation |
9 | 4 | 0 |
meth |
FalconMambaForCausalLM.forward |
12 | 11 | 0 |
attr |
FalconMambaForCausalLM.backbone |
1 | 0 | 0 |
attr |
FalconMambaForCausalLM.lm_head |
1 | 0 | 0 |
meth |
FalconMambaModel.init |
2 | 0 | 0 |
meth |
FalconMambaModel.get_input_embeddings |
1 | 0 | 0 |
meth |
FalconMambaModel.set_input_embeddings |
2 | 0 | 0 |
meth |
FalconMambaModel.forward |
10 | 9 | 0 |
attr |
FalconMambaModel.embeddings |
1 | 0 | 0 |
attr |
FalconMambaModel.layers |
1 | 0 | 0 |
attr |
FalconMambaModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
FalconMambaModel.norm_f |
1 | 0 | 0 |
meth |
FalconMambaCache.init |
5 | 4 | 0 |
meth |
FalconMambaCache.update_ssm_state |
3 | 2 | 0 |
meth |
FalconMambaCache.reset |
1 | 0 | 0 |
attr |
FalconMambaCache.max_batch_size |
1 | 0 | 0 |
attr |
FalconMambaCache.intermediate_size |
1 | 0 | 0 |
attr |
FalconMambaCache.ssm_state_size |
1 | 0 | 0 |
attr |
FalconMambaCache.conv_kernel_size |
1 | 0 | 0 |
meth |
FalconMambaPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.falcon_mamba.modular_falcon_mamba (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FalconMambaModel.init |
2 | 0 | 0 |
meth |
FalconMambaModel.load_hook |
4 | 0 | 0 |
attr |
FalconMambaModel.embeddings |
1 | 0 | 0 |
attr |
FalconMambaModel.layers |
1 | 0 | 0 |
attr |
FalconMambaModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
FalconMambaModel.norm_f |
1 | 0 | 0 |
meth |
FalconMambaConfig.init |
29 | 0 | 0 |
attr |
FalconMambaConfig.mixer_rms_eps |
1 | 0 | 0 |
attr |
FalconMambaConfig.intermediate_size |
1 | 0 | 0 |
transformers.models.fast_vlm.configuration_fast_vlm (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FastVlmConfig.init |
10 | 0 | 0 |
attr |
FastVlmConfig.image_token_id |
1 | 0 | 0 |
attr |
FastVlmConfig.projector_hidden_act |
1 | 0 | 0 |
attr |
FastVlmConfig.vision_feature_select_strategy |
1 | 0 | 0 |
attr |
FastVlmConfig.vision_feature_layer |
1 | 0 | 0 |
attr |
FastVlmConfig.vision_config |
1 | 0 | 0 |
attr |
FastVlmConfig.text_config |
1 | 0 | 0 |
attr |
FastVlmConfig.multimodal_projector_bias |
1 | 0 | 0 |
attr |
FastVlmConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.fast_vlm.modeling_fast_vlm (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FastVlmModel.init |
2 | 1 | 0 |
meth |
FastVlmModel.get_input_embeddings |
1 | 0 | 0 |
meth |
FastVlmModel.set_input_embeddings |
2 | 0 | 0 |
meth |
FastVlmModel.get_placeholder_mask |
4 | 3 | 0 |
attr |
FastVlmModel.vision_tower |
1 | 0 | 0 |
attr |
FastVlmModel.multi_modal_projector |
1 | 0 | 0 |
attr |
FastVlmModel.language_model |
1 | 0 | 0 |
meth |
FastVlmForConditionalGeneration.init |
2 | 1 | 0 |
meth |
FastVlmForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
FastVlmForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
FastVlmForConditionalGeneration.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
FastVlmForConditionalGeneration.model |
1 | 0 | 0 |
attr |
FastVlmForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.fast_vlm.modular_fast_vlm (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FastVlmConfig.init |
10 | 0 | 0 |
attr |
FastVlmConfig.image_token_id |
1 | 0 | 0 |
attr |
FastVlmConfig.projector_hidden_act |
1 | 0 | 0 |
attr |
FastVlmConfig.vision_feature_select_strategy |
1 | 0 | 0 |
attr |
FastVlmConfig.vision_feature_layer |
1 | 0 | 0 |
attr |
FastVlmConfig.vision_config |
1 | 0 | 0 |
attr |
FastVlmConfig.text_config |
1 | 0 | 0 |
attr |
FastVlmConfig.multimodal_projector_bias |
1 | 0 | 0 |
attr |
FastVlmConfig.tie_word_embeddings |
1 | 0 | 0 |
meth |
FastVlmModel.init |
2 | 1 | 0 |
transformers.models.fastspeech2_conformer.configuration_fastspeech2_conformer (123 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FastSpeech2ConformerConfig.init |
58 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.hidden_size |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.vocab_size |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.num_mel_bins |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.encoder_config |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.decoder_config |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.encoder_num_attention_heads |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.encoder_layers |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.duration_predictor_channels |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.duration_predictor_kernel_size |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.duration_predictor_layers |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.energy_embed_dropout |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.energy_embed_kernel_size |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.energy_predictor_channels |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.energy_predictor_dropout |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.energy_predictor_kernel_size |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.energy_predictor_layers |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.pitch_embed_dropout |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.pitch_embed_kernel_size |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.pitch_predictor_channels |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.pitch_predictor_dropout |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.pitch_predictor_kernel_size |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.pitch_predictor_layers |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.positionwise_conv_kernel_size |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.speech_decoder_postnet_units |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.speech_decoder_postnet_dropout |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.speech_decoder_postnet_kernel |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.speech_decoder_postnet_layers |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.reduction_factor |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.speaking_speed |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.stop_gradient_from_energy_predictor |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.stop_gradient_from_pitch_predictor |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.max_source_positions |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.use_cnn_in_conformer |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.use_macaron_style_in_conformer |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.use_masking |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.use_weighted_masking |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.num_speakers |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.num_languages |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.speaker_embed_dim |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.duration_predictor_dropout_rate |
1 | 0 | 0 |
attr |
FastSpeech2ConformerConfig.convolution_bias |
1 | 0 | 0 |
meth |
FastSpeech2ConformerHifiGanConfig.init |
11 | 0 | 0 |
attr |
FastSpeech2ConformerHifiGanConfig.model_in_dim |
1 | 0 | 0 |
attr |
FastSpeech2ConformerHifiGanConfig.upsample_initial_channel |
1 | 0 | 0 |
attr |
FastSpeech2ConformerHifiGanConfig.upsample_rates |
1 | 0 | 0 |
attr |
FastSpeech2ConformerHifiGanConfig.upsample_kernel_sizes |
1 | 0 | 0 |
attr |
FastSpeech2ConformerHifiGanConfig.resblock_kernel_sizes |
1 | 0 | 0 |
attr |
FastSpeech2ConformerHifiGanConfig.resblock_dilation_sizes |
1 | 0 | 0 |
attr |
FastSpeech2ConformerHifiGanConfig.initializer_range |
1 | 0 | 0 |
attr |
FastSpeech2ConformerHifiGanConfig.leaky_relu_slope |
1 | 0 | 0 |
attr |
FastSpeech2ConformerHifiGanConfig.normalize_before |
1 | 0 | 0 |
meth |
FastSpeech2ConformerWithHifiGanConfig.init |
4 | 2 | 0 |
attr |
FastSpeech2ConformerWithHifiGanConfig.model_config |
1 | 0 | 0 |
attr |
FastSpeech2ConformerWithHifiGanConfig.vocoder_config |
1 | 0 | 0 |
transformers.models.fastspeech2_conformer.modeling_fastspeech2_conformer (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FastSpeech2ConformerWithHifiGan.init |
2 | 1 | 0 |
meth |
FastSpeech2ConformerWithHifiGan.forward |
14 | 13 | 0 |
attr |
FastSpeech2ConformerWithHifiGan.model |
1 | 0 | 0 |
attr |
FastSpeech2ConformerWithHifiGan.vocoder |
1 | 0 | 0 |
meth |
FastSpeech2ConformerPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
FastSpeech2ConformerPreTrainedModel._set_gradient_checkpointing |
3 | 0 | 0 |
meth |
FastSpeech2ConformerHifiGan.init |
2 | 1 | 0 |
meth |
FastSpeech2ConformerHifiGan._init_weights |
2 | 0 | 0 |
meth |
FastSpeech2ConformerHifiGan.apply_weight_norm |
1 | 0 | 0 |
meth |
FastSpeech2ConformerHifiGan.remove_weight_norm |
1 | 0 | 0 |
meth |
FastSpeech2ConformerHifiGan.forward |
3 | 2 | 0 |
attr |
FastSpeech2ConformerHifiGan.num_kernels |
1 | 0 | 0 |
attr |
FastSpeech2ConformerHifiGan.num_upsamples |
1 | 0 | 0 |
attr |
FastSpeech2ConformerHifiGan.conv_pre |
1 | 0 | 0 |
attr |
FastSpeech2ConformerHifiGan.upsampler |
1 | 0 | 0 |
attr |
FastSpeech2ConformerHifiGan.resblocks |
1 | 0 | 0 |
attr |
FastSpeech2ConformerHifiGan.conv_post |
1 | 0 | 0 |
meth |
FastSpeech2ConformerModel.init |
2 | 1 | 0 |
meth |
FastSpeech2ConformerModel.forward |
14 | 13 | 0 |
attr |
FastSpeech2ConformerModel.vocab_size |
1 | 0 | 0 |
attr |
FastSpeech2ConformerModel.num_mel_bins |
1 | 0 | 0 |
attr |
FastSpeech2ConformerModel.hidden_size |
1 | 0 | 0 |
attr |
FastSpeech2ConformerModel.reduction_factor |
1 | 0 | 0 |
attr |
FastSpeech2ConformerModel.stop_gradient_from_pitch_predictor |
1 | 0 | 0 |
attr |
FastSpeech2ConformerModel.stop_gradient_from_energy_predictor |
1 | 0 | 0 |
attr |
FastSpeech2ConformerModel.multilingual_model |
1 | 0 | 0 |
attr |
FastSpeech2ConformerModel.multispeaker_model |
1 | 0 | 0 |
attr |
FastSpeech2ConformerModel.speaker_embed_dim |
1 | 0 | 0 |
attr |
FastSpeech2ConformerModel.encoder |
1 | 0 | 0 |
attr |
FastSpeech2ConformerModel.duration_predictor |
1 | 0 | 0 |
attr |
FastSpeech2ConformerModel.pitch_predictor |
1 | 0 | 0 |
attr |
FastSpeech2ConformerModel.pitch_embed |
1 | 0 | 0 |
attr |
FastSpeech2ConformerModel.energy_predictor |
1 | 0 | 0 |
attr |
FastSpeech2ConformerModel.energy_embed |
1 | 0 | 0 |
attr |
FastSpeech2ConformerModel.decoder |
1 | 0 | 0 |
attr |
FastSpeech2ConformerModel.speech_decoder_postnet |
1 | 0 | 0 |
attr |
FastSpeech2ConformerModel.criterion |
1 | 0 | 0 |
attr |
FastSpeech2ConformerModel.language_id_embedding |
1 | 0 | 0 |
attr |
FastSpeech2ConformerModel.speaker_id_embedding |
1 | 0 | 0 |
attr |
FastSpeech2ConformerModel.projection |
1 | 0 | 0 |
transformers.models.fastspeech2_conformer.tokenization_fastspeech2_conformer (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FastSpeech2ConformerTokenizer.init |
8 | 0 | 0 |
meth |
FastSpeech2ConformerTokenizer.get_vocab |
1 | 0 | 0 |
meth |
FastSpeech2ConformerTokenizer.prepare_for_tokenization |
4 | 0 | 0 |
meth |
FastSpeech2ConformerTokenizer._tokenize |
2 | 0 | 0 |
meth |
FastSpeech2ConformerTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
FastSpeech2ConformerTokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
FastSpeech2ConformerTokenizer.decode |
3 | 0 | 0 |
meth |
FastSpeech2ConformerTokenizer.convert_tokens_to_string |
3 | 0 | 0 |
meth |
FastSpeech2ConformerTokenizer.getstate |
1 | 0 | 0 |
meth |
FastSpeech2ConformerTokenizer.setstate |
2 | 0 | 0 |
prop |
FastSpeech2ConformerTokenizer.vocab_size |
1 | 0 | 0 |
attr |
FastSpeech2ConformerTokenizer.g2p |
1 | 0 | 0 |
attr |
FastSpeech2ConformerTokenizer.decoder |
1 | 0 | 0 |
attr |
FastSpeech2ConformerTokenizer.should_strip_spaces |
1 | 0 | 0 |
attr |
FastSpeech2ConformerTokenizer.encoder |
1 | 0 | 0 |
transformers.models.flaubert.configuration_flaubert (74 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlaubertConfig.init |
39 | 0 | 0 |
attr |
FlaubertConfig.pre_norm |
1 | 0 | 0 |
attr |
FlaubertConfig.layerdrop |
1 | 0 | 0 |
attr |
FlaubertConfig.vocab_size |
1 | 0 | 0 |
attr |
FlaubertConfig.emb_dim |
1 | 0 | 0 |
attr |
FlaubertConfig.n_layers |
1 | 0 | 0 |
attr |
FlaubertConfig.n_heads |
1 | 0 | 0 |
attr |
FlaubertConfig.dropout |
1 | 0 | 0 |
attr |
FlaubertConfig.attention_dropout |
1 | 0 | 0 |
attr |
FlaubertConfig.gelu_activation |
1 | 0 | 0 |
attr |
FlaubertConfig.sinusoidal_embeddings |
1 | 0 | 0 |
attr |
FlaubertConfig.causal |
1 | 0 | 0 |
attr |
FlaubertConfig.asm |
1 | 0 | 0 |
attr |
FlaubertConfig.n_langs |
1 | 0 | 0 |
attr |
FlaubertConfig.use_lang_emb |
1 | 0 | 0 |
attr |
FlaubertConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
FlaubertConfig.unk_index |
1 | 0 | 0 |
attr |
FlaubertConfig.mask_index |
1 | 0 | 0 |
attr |
FlaubertConfig.is_encoder |
1 | 0 | 0 |
attr |
FlaubertConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
FlaubertConfig.embed_init_std |
1 | 0 | 0 |
attr |
FlaubertConfig.init_std |
1 | 0 | 0 |
attr |
FlaubertConfig.summary_type |
1 | 0 | 0 |
attr |
FlaubertConfig.summary_use_proj |
1 | 0 | 0 |
attr |
FlaubertConfig.summary_activation |
1 | 0 | 0 |
attr |
FlaubertConfig.summary_proj_to_labels |
1 | 0 | 0 |
attr |
FlaubertConfig.summary_first_dropout |
1 | 0 | 0 |
attr |
FlaubertConfig.start_n_top |
1 | 0 | 0 |
attr |
FlaubertConfig.end_n_top |
1 | 0 | 0 |
attr |
FlaubertConfig.mask_token_id |
1 | 0 | 0 |
attr |
FlaubertConfig.lang_id |
1 | 0 | 0 |
attr |
FlaubertConfig.pad_token_id |
1 | 0 | 0 |
attr |
FlaubertConfig.bos_token_id |
1 | 0 | 0 |
attr |
FlaubertConfig.eos_token_id |
1 | 0 | 0 |
attr |
FlaubertConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
FlaubertConfig.n_words |
1 | 0 | 0 |
transformers.models.flaubert.modeling_flaubert (75 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlaubertModel.init |
2 | 0 | 0 |
meth |
FlaubertModel.get_input_embeddings |
1 | 0 | 0 |
meth |
FlaubertModel.set_input_embeddings |
2 | 0 | 0 |
meth |
FlaubertModel.forward |
14 | 13 | 0 |
attr |
FlaubertModel.is_encoder |
1 | 0 | 0 |
attr |
FlaubertModel.is_decoder |
1 | 0 | 0 |
attr |
FlaubertModel.causal |
1 | 0 | 0 |
attr |
FlaubertModel.n_langs |
1 | 0 | 0 |
attr |
FlaubertModel.use_lang_emb |
1 | 0 | 0 |
attr |
FlaubertModel.n_words |
1 | 0 | 0 |
attr |
FlaubertModel.eos_index |
1 | 0 | 0 |
attr |
FlaubertModel.pad_index |
1 | 0 | 0 |
attr |
FlaubertModel.dim |
1 | 0 | 0 |
attr |
FlaubertModel.hidden_dim |
1 | 0 | 0 |
attr |
FlaubertModel.n_heads |
1 | 0 | 0 |
attr |
FlaubertModel.n_layers |
1 | 0 | 0 |
attr |
FlaubertModel.dropout |
1 | 0 | 0 |
attr |
FlaubertModel.attention_dropout |
1 | 0 | 0 |
attr |
FlaubertModel.position_embeddings |
1 | 0 | 0 |
attr |
FlaubertModel.embeddings |
1 | 0 | 0 |
attr |
FlaubertModel.layer_norm_emb |
1 | 0 | 0 |
attr |
FlaubertModel.attentions |
1 | 0 | 0 |
attr |
FlaubertModel.layer_norm1 |
1 | 0 | 0 |
attr |
FlaubertModel.ffns |
1 | 0 | 0 |
attr |
FlaubertModel.layer_norm2 |
1 | 0 | 0 |
attr |
FlaubertModel.layerdrop |
1 | 0 | 0 |
attr |
FlaubertModel.pre_norm |
1 | 0 | 0 |
attr |
FlaubertModel.lang_embeddings |
1 | 0 | 0 |
meth |
FlaubertPreTrainedModel._init_weights |
2 | 0 | 0 |
prop |
FlaubertPreTrainedModel.dummy_inputs |
1 | 0 | 0 |
meth |
FlaubertForQuestionAnswering.init |
2 | 0 | 0 |
meth |
FlaubertForQuestionAnswering.forward |
18 | 17 | 0 |
attr |
FlaubertForQuestionAnswering.transformer |
1 | 0 | 0 |
attr |
FlaubertForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
FlaubertWithLMHeadModel.init |
2 | 0 | 0 |
meth |
FlaubertWithLMHeadModel.get_output_embeddings |
1 | 0 | 0 |
meth |
FlaubertWithLMHeadModel.set_output_embeddings |
2 | 0 | 0 |
meth |
FlaubertWithLMHeadModel.prepare_inputs_for_generation |
3 | 0 | 0 |
meth |
FlaubertWithLMHeadModel.forward |
14 | 13 | 0 |
attr |
FlaubertWithLMHeadModel.transformer |
1 | 0 | 0 |
attr |
FlaubertWithLMHeadModel.pred_layer |
1 | 0 | 0 |
meth |
FlaubertForSequenceClassification.init |
2 | 0 | 0 |
meth |
FlaubertForSequenceClassification.forward |
14 | 13 | 0 |
attr |
FlaubertForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
FlaubertForSequenceClassification.transformer |
1 | 0 | 0 |
attr |
FlaubertForSequenceClassification.sequence_summary |
1 | 0 | 0 |
meth |
FlaubertForQuestionAnsweringSimple.init |
2 | 0 | 0 |
meth |
FlaubertForQuestionAnsweringSimple.forward |
15 | 14 | 0 |
attr |
FlaubertForQuestionAnsweringSimple.transformer |
1 | 0 | 0 |
attr |
FlaubertForQuestionAnsweringSimple.qa_outputs |
1 | 0 | 0 |
meth |
FlaubertForTokenClassification.init |
2 | 0 | 0 |
meth |
FlaubertForTokenClassification.forward |
14 | 13 | 0 |
attr |
FlaubertForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
FlaubertForTokenClassification.transformer |
1 | 0 | 0 |
attr |
FlaubertForTokenClassification.dropout |
1 | 0 | 0 |
attr |
FlaubertForTokenClassification.classifier |
1 | 0 | 0 |
meth |
FlaubertForMultipleChoice.init |
4 | 0 | 0 |
meth |
FlaubertForMultipleChoice.forward |
14 | 13 | 0 |
attr |
FlaubertForMultipleChoice.transformer |
1 | 0 | 0 |
attr |
FlaubertForMultipleChoice.sequence_summary |
1 | 0 | 0 |
attr |
FlaubertForMultipleChoice.logits_proj |
1 | 0 | 0 |
transformers.models.flaubert.tokenization_flaubert (58 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlaubertTokenizer.init |
14 | 0 | 0 |
meth |
FlaubertTokenizer.moses_punct_norm |
3 | 0 | 0 |
meth |
FlaubertTokenizer.moses_tokenize |
3 | 0 | 0 |
meth |
FlaubertTokenizer.moses_pipeline |
3 | 0 | 0 |
meth |
FlaubertTokenizer.ja_tokenize |
2 | 0 | 0 |
meth |
FlaubertTokenizer.get_vocab |
1 | 0 | 0 |
meth |
FlaubertTokenizer.bpe |
2 | 0 | 0 |
meth |
FlaubertTokenizer.preprocess_text |
2 | 0 | 0 |
meth |
FlaubertTokenizer._tokenize |
3 | 0 | 0 |
meth |
FlaubertTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
FlaubertTokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
FlaubertTokenizer.convert_tokens_to_string |
2 | 0 | 0 |
meth |
FlaubertTokenizer.getstate |
1 | 0 | 0 |
meth |
FlaubertTokenizer.setstate |
2 | 0 | 0 |
prop |
FlaubertTokenizer.do_lower_case |
1 | 0 | 0 |
prop |
FlaubertTokenizer.vocab_size |
1 | 0 | 0 |
attr |
FlaubertTokenizer.do_lowercase_and_remove_accent |
1 | 0 | 0 |
attr |
FlaubertTokenizer.do_lowercase |
1 | 0 | 0 |
attr |
FlaubertTokenizer.sm |
1 | 0 | 0 |
attr |
FlaubertTokenizer.cache_moses_punct_normalizer |
1 | 0 | 0 |
attr |
FlaubertTokenizer.cache_moses_tokenizer |
1 | 0 | 0 |
attr |
FlaubertTokenizer.lang_with_custom_tokenizer |
1 | 0 | 0 |
attr |
FlaubertTokenizer.lang2id |
1 | 0 | 0 |
attr |
FlaubertTokenizer.id2lang |
1 | 0 | 0 |
attr |
FlaubertTokenizer.ja_word_tokenizer |
1 | 0 | 0 |
attr |
FlaubertTokenizer.zh_word_tokenizer |
1 | 0 | 0 |
attr |
FlaubertTokenizer.decoder |
1 | 0 | 0 |
attr |
FlaubertTokenizer.bpe_ranks |
1 | 0 | 0 |
attr |
FlaubertTokenizer.cache |
1 | 0 | 0 |
attr |
FlaubertTokenizer.encoder |
1 | 0 | 0 |
transformers.models.flava.configuration_flava (79 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlavaImageConfig.init |
17 | 15 | 0 |
attr |
FlavaImageConfig.hidden_size |
1 | 0 | 0 |
attr |
FlavaImageConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
FlavaImageConfig.num_attention_heads |
1 | 0 | 0 |
attr |
FlavaImageConfig.intermediate_size |
1 | 0 | 0 |
attr |
FlavaImageConfig.hidden_act |
1 | 0 | 0 |
attr |
FlavaImageConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
FlavaImageConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
FlavaImageConfig.initializer_range |
1 | 0 | 0 |
attr |
FlavaImageConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
FlavaImageConfig.image_size |
1 | 0 | 0 |
attr |
FlavaImageConfig.patch_size |
1 | 0 | 0 |
attr |
FlavaImageConfig.num_channels |
1 | 0 | 0 |
attr |
FlavaImageConfig.qkv_bias |
1 | 0 | 0 |
attr |
FlavaImageConfig.mask_token |
1 | 0 | 0 |
attr |
FlavaImageConfig.vocab_size |
1 | 0 | 0 |
meth |
FlavaImageCodebookConfig.init |
9 | 7 | 0 |
attr |
FlavaImageCodebookConfig.num_groups |
1 | 0 | 0 |
attr |
FlavaImageCodebookConfig.input_channels |
1 | 0 | 0 |
attr |
FlavaImageCodebookConfig.num_blocks_per_group |
1 | 0 | 0 |
attr |
FlavaImageCodebookConfig.hidden_size |
1 | 0 | 0 |
attr |
FlavaImageCodebookConfig.vocab_size |
1 | 0 | 0 |
attr |
FlavaImageCodebookConfig.freeze |
1 | 0 | 0 |
attr |
FlavaImageCodebookConfig.initializer_range |
1 | 0 | 0 |
meth |
FlavaTextConfig.init |
16 | 14 | 0 |
attr |
FlavaTextConfig.vocab_size |
1 | 0 | 0 |
attr |
FlavaTextConfig.type_vocab_size |
1 | 0 | 0 |
attr |
FlavaTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
FlavaTextConfig.hidden_size |
1 | 0 | 0 |
attr |
FlavaTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
FlavaTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
FlavaTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
FlavaTextConfig.hidden_act |
1 | 0 | 0 |
attr |
FlavaTextConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
FlavaTextConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
FlavaTextConfig.initializer_range |
1 | 0 | 0 |
attr |
FlavaTextConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
FlavaTextConfig.qkv_bias |
1 | 0 | 0 |
attr |
FlavaTextConfig.pad_token_id |
1 | 0 | 0 |
meth |
FlavaMultimodalConfig.init |
13 | 11 | 0 |
attr |
FlavaMultimodalConfig.hidden_size |
1 | 0 | 0 |
attr |
FlavaMultimodalConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
FlavaMultimodalConfig.num_attention_heads |
1 | 0 | 0 |
attr |
FlavaMultimodalConfig.intermediate_size |
1 | 0 | 0 |
attr |
FlavaMultimodalConfig.hidden_act |
1 | 0 | 0 |
attr |
FlavaMultimodalConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
FlavaMultimodalConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
FlavaMultimodalConfig.initializer_range |
1 | 0 | 0 |
attr |
FlavaMultimodalConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
FlavaMultimodalConfig.qkv_bias |
1 | 0 | 0 |
attr |
FlavaMultimodalConfig.use_cls_token |
1 | 0 | 0 |
meth |
FlavaConfig.init |
23 | 21 | 0 |
attr |
FlavaConfig.text_config |
1 | 0 | 0 |
attr |
FlavaConfig.image_config |
1 | 0 | 0 |
attr |
FlavaConfig.multimodal_config |
1 | 0 | 0 |
attr |
FlavaConfig.image_codebook_config |
1 | 0 | 0 |
attr |
FlavaConfig.projection_dim |
1 | 0 | 0 |
attr |
FlavaConfig.init_codebook |
1 | 0 | 0 |
attr |
FlavaConfig.hidden_size |
1 | 0 | 0 |
attr |
FlavaConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
FlavaConfig.initializer_range |
1 | 0 | 0 |
attr |
FlavaConfig.logit_scale_init_value |
1 | 0 | 0 |
attr |
FlavaConfig.initializer_factor |
1 | 0 | 0 |
attr |
FlavaConfig.ce_ignore_index |
1 | 0 | 0 |
attr |
FlavaConfig.mim_weight |
1 | 0 | 0 |
attr |
FlavaConfig.mlm_weight |
1 | 0 | 0 |
attr |
FlavaConfig.global_contrastive_weight |
1 | 0 | 0 |
attr |
FlavaConfig.itm_weight |
1 | 0 | 0 |
attr |
FlavaConfig.mmm_image_weight |
1 | 0 | 0 |
attr |
FlavaConfig.mmm_text_weight |
1 | 0 | 0 |
attr |
FlavaConfig.global_backprop_contrastive |
1 | 0 | 0 |
attr |
FlavaConfig.skip_unmasked_multimodal_encoder |
1 | 0 | 0 |
attr |
FlavaConfig.return_loss |
1 | 0 | 0 |
attr |
FlavaConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.flava.image_processing_flava (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlavaImageProcessor.init |
31 | 30 | 0 |
meth |
FlavaImageProcessor.from_dict |
3 | 1 | 0 |
meth |
FlavaImageProcessor.masking_generator |
7 | 1 | 0 |
meth |
FlavaImageProcessor.resize |
7 | 6 | 0 |
attr |
FlavaImageProcessor.do_resize |
1 | 0 | 0 |
attr |
FlavaImageProcessor.size |
1 | 0 | 0 |
attr |
FlavaImageProcessor.resample |
1 | 0 | 0 |
attr |
FlavaImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
FlavaImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
FlavaImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
FlavaImageProcessor.crop_size |
1 | 0 | 0 |
attr |
FlavaImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
FlavaImageProcessor.image_mean |
1 | 0 | 0 |
attr |
FlavaImageProcessor.image_std |
1 | 0 | 0 |
attr |
FlavaImageProcessor.return_image_mask |
1 | 0 | 0 |
attr |
FlavaImageProcessor.input_size_patches |
1 | 0 | 0 |
attr |
FlavaImageProcessor.total_mask_patches |
1 | 0 | 0 |
attr |
FlavaImageProcessor.mask_group_min_patches |
1 | 0 | 0 |
attr |
FlavaImageProcessor.mask_group_max_patches |
1 | 0 | 0 |
attr |
FlavaImageProcessor.mask_group_min_aspect_ratio |
1 | 0 | 0 |
attr |
FlavaImageProcessor.mask_group_max_aspect_ratio |
1 | 0 | 0 |
attr |
FlavaImageProcessor.return_codebook_pixels |
1 | 0 | 0 |
attr |
FlavaImageProcessor.codebook_do_resize |
1 | 0 | 0 |
attr |
FlavaImageProcessor.codebook_size |
1 | 0 | 0 |
attr |
FlavaImageProcessor.codebook_resample |
1 | 0 | 0 |
attr |
FlavaImageProcessor.codebook_do_center_crop |
1 | 0 | 0 |
attr |
FlavaImageProcessor.codebook_crop_size |
1 | 0 | 0 |
attr |
FlavaImageProcessor.codebook_do_rescale |
1 | 0 | 0 |
attr |
FlavaImageProcessor.codebook_rescale_factor |
1 | 0 | 0 |
attr |
FlavaImageProcessor.codebook_do_map_pixels |
1 | 0 | 0 |
attr |
FlavaImageProcessor.codebook_do_normalize |
1 | 0 | 0 |
attr |
FlavaImageProcessor.codebook_image_mean |
1 | 0 | 0 |
attr |
FlavaImageProcessor.codebook_image_std |
1 | 0 | 0 |
transformers.models.flava.image_processing_flava_fast (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlavaImageProcessorFast.init |
2 | 1 | 0 |
meth |
FlavaImageProcessorFast.from_dict |
3 | 1 | 0 |
meth |
FlavaImageProcessorFast.masking_generator |
7 | 1 | 0 |
meth |
FlavaImageProcessorFast._further_process_kwargs |
13 | 12 | 0 |
meth |
FlavaImageProcessorFast._preprocess |
34 | 33 | 0 |
transformers.models.flava.modeling_flava (65 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlavaTextModel.init |
3 | 2 | 0 |
meth |
FlavaTextModel.set_input_embeddings |
2 | 1 | 0 |
meth |
FlavaTextModel.forward |
9 | 8 | 0 |
attr |
FlavaTextModel.embeddings |
1 | 0 | 0 |
attr |
FlavaTextModel.encoder |
1 | 0 | 0 |
attr |
FlavaTextModel.layernorm |
1 | 0 | 0 |
attr |
FlavaTextModel.pooler |
1 | 0 | 0 |
meth |
FlavaMultimodalModel.init |
3 | 1 | 0 |
meth |
FlavaMultimodalModel.forward |
7 | 6 | 0 |
attr |
FlavaMultimodalModel.use_cls_token |
1 | 0 | 0 |
attr |
FlavaMultimodalModel.encoder |
1 | 0 | 0 |
attr |
FlavaMultimodalModel.layernorm |
1 | 0 | 0 |
attr |
FlavaMultimodalModel.pooler |
1 | 0 | 0 |
attr |
FlavaMultimodalModel.cls_token |
1 | 0 | 0 |
meth |
FlavaImageModel.init |
3 | 2 | 0 |
meth |
FlavaImageModel.set_input_embeddings |
2 | 1 | 0 |
meth |
FlavaImageModel.forward |
9 | 8 | 0 |
attr |
FlavaImageModel.embeddings |
1 | 0 | 0 |
attr |
FlavaImageModel.encoder |
1 | 0 | 0 |
attr |
FlavaImageModel.layernorm |
1 | 0 | 0 |
attr |
FlavaImageModel.pooler |
1 | 0 | 0 |
meth |
FlavaForPreTraining.init |
3 | 2 | 0 |
meth |
FlavaForPreTraining._resize_to_2d |
2 | 1 | 0 |
meth |
FlavaForPreTraining.forward |
19 | 18 | 0 |
attr |
FlavaForPreTraining.flava |
1 | 0 | 0 |
attr |
FlavaForPreTraining.image_codebook |
1 | 0 | 0 |
attr |
FlavaForPreTraining.mim_head |
1 | 0 | 0 |
attr |
FlavaForPreTraining.mlm_head |
1 | 0 | 0 |
attr |
FlavaForPreTraining.itm_head |
1 | 0 | 0 |
attr |
FlavaForPreTraining.mmm_image_head |
1 | 0 | 0 |
attr |
FlavaForPreTraining.mmm_text_head |
1 | 0 | 0 |
attr |
FlavaForPreTraining.global_contrastive_head |
1 | 0 | 0 |
attr |
FlavaForPreTraining.image_vocab_size |
1 | 0 | 0 |
attr |
FlavaForPreTraining.text_vocab_size |
1 | 0 | 0 |
attr |
FlavaForPreTraining.mlm_weight |
1 | 0 | 0 |
attr |
FlavaForPreTraining.mim_weight |
1 | 0 | 0 |
attr |
FlavaForPreTraining.global_contrastive_weight |
1 | 0 | 0 |
attr |
FlavaForPreTraining.ce_ignore_index |
1 | 0 | 0 |
attr |
FlavaForPreTraining.itm_weight |
1 | 0 | 0 |
attr |
FlavaForPreTraining.mmm_image_weight |
1 | 0 | 0 |
attr |
FlavaForPreTraining.mmm_text_weight |
1 | 0 | 0 |
attr |
FlavaForPreTraining.skip_unmasked_multimodal_encoder |
1 | 0 | 0 |
meth |
FlavaImageCodebook.init |
3 | 2 | 1 |
meth |
FlavaImageCodebook.forward |
3 | 2 | 0 |
attr |
FlavaImageCodebook.num_groups |
1 | 0 | 0 |
attr |
FlavaImageCodebook.input_channels |
1 | 0 | 0 |
attr |
FlavaImageCodebook.num_blocks_per_group |
1 | 0 | 0 |
attr |
FlavaImageCodebook.hidden_size |
1 | 0 | 0 |
attr |
FlavaImageCodebook.vocab_size |
1 | 0 | 0 |
attr |
FlavaImageCodebook.blocks |
1 | 0 | 0 |
meth |
FlavaModel.init |
2 | 1 | 0 |
meth |
FlavaModel.forward |
13 | 12 | 0 |
attr |
FlavaModel.projection_dim |
1 | 0 | 0 |
attr |
FlavaModel.text_hidden_size |
1 | 0 | 0 |
attr |
FlavaModel.image_hidden_size |
1 | 0 | 0 |
attr |
FlavaModel.mm_hidden_size |
1 | 0 | 0 |
attr |
FlavaModel.text_model |
1 | 0 | 0 |
attr |
FlavaModel.image_model |
1 | 0 | 0 |
attr |
FlavaModel.multimodal_model |
1 | 0 | 0 |
attr |
FlavaModel.image_projection |
1 | 0 | 0 |
attr |
FlavaModel.text_projection |
1 | 0 | 0 |
attr |
FlavaModel.logit_scale |
1 | 0 | 0 |
attr |
FlavaModel.image_to_mm_projection |
1 | 0 | 0 |
attr |
FlavaModel.text_to_mm_projection |
1 | 0 | 0 |
transformers.models.flava.processing_flava (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlavaProcessor.init |
4 | 0 | 0 |
transformers.models.flex_olmo.configuration_flex_olmo (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlexOlmoConfig.init |
25 | 23 | 0 |
attr |
FlexOlmoConfig.vocab_size |
1 | 0 | 0 |
attr |
FlexOlmoConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
FlexOlmoConfig.hidden_size |
1 | 0 | 0 |
attr |
FlexOlmoConfig.intermediate_size |
1 | 0 | 0 |
attr |
FlexOlmoConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
FlexOlmoConfig.num_attention_heads |
1 | 0 | 0 |
attr |
FlexOlmoConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
FlexOlmoConfig.hidden_act |
1 | 0 | 0 |
attr |
FlexOlmoConfig.initializer_range |
1 | 0 | 0 |
attr |
FlexOlmoConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
FlexOlmoConfig.use_cache |
1 | 0 | 0 |
attr |
FlexOlmoConfig.attention_bias |
1 | 0 | 0 |
attr |
FlexOlmoConfig.attention_dropout |
1 | 0 | 0 |
attr |
FlexOlmoConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
FlexOlmoConfig.num_experts |
1 | 0 | 0 |
attr |
FlexOlmoConfig.output_router_logits |
1 | 0 | 0 |
attr |
FlexOlmoConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
FlexOlmoConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
FlexOlmoConfig.rope_parameters |
1 | 0 | 0 |
attr |
FlexOlmoConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
FlexOlmoConfig.pad_token_id |
1 | 0 | 0 |
attr |
FlexOlmoConfig.bos_token_id |
1 | 0 | 0 |
attr |
FlexOlmoConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.flex_olmo.modeling_flex_olmo (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlexOlmoModel.init |
2 | 1 | 0 |
attr |
FlexOlmoModel.padding_idx |
1 | 0 | 0 |
attr |
FlexOlmoModel.vocab_size |
1 | 0 | 0 |
attr |
FlexOlmoModel.embed_tokens |
1 | 0 | 0 |
attr |
FlexOlmoModel.layers |
1 | 0 | 0 |
attr |
FlexOlmoModel.norm |
1 | 0 | 0 |
attr |
FlexOlmoModel.rotary_emb |
1 | 0 | 0 |
attr |
FlexOlmoModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
FlexOlmoPreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
FlexOlmoPreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
FlexOlmoForCausalLM.init |
2 | 0 | 0 |
attr |
FlexOlmoForCausalLM.model |
1 | 0 | 0 |
attr |
FlexOlmoForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
FlexOlmoForCausalLM.lm_head |
1 | 0 | 0 |
attr |
FlexOlmoForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
FlexOlmoForCausalLM.num_experts |
1 | 0 | 0 |
attr |
FlexOlmoForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
transformers.models.flex_olmo.modular_flex_olmo (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FlexOlmoConfig.init |
25 | 23 | 0 |
attr |
FlexOlmoConfig.vocab_size |
1 | 0 | 0 |
attr |
FlexOlmoConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
FlexOlmoConfig.hidden_size |
1 | 0 | 0 |
attr |
FlexOlmoConfig.intermediate_size |
1 | 0 | 0 |
attr |
FlexOlmoConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
FlexOlmoConfig.num_attention_heads |
1 | 0 | 0 |
attr |
FlexOlmoConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
FlexOlmoConfig.hidden_act |
1 | 0 | 0 |
attr |
FlexOlmoConfig.initializer_range |
1 | 0 | 0 |
attr |
FlexOlmoConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
FlexOlmoConfig.use_cache |
1 | 0 | 0 |
attr |
FlexOlmoConfig.attention_bias |
1 | 0 | 0 |
attr |
FlexOlmoConfig.attention_dropout |
1 | 0 | 0 |
attr |
FlexOlmoConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
FlexOlmoConfig.num_experts |
1 | 0 | 0 |
attr |
FlexOlmoConfig.output_router_logits |
1 | 0 | 0 |
attr |
FlexOlmoConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
FlexOlmoConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
FlexOlmoConfig.rope_parameters |
1 | 0 | 0 |
attr |
FlexOlmoConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
FlexOlmoConfig.pad_token_id |
1 | 0 | 0 |
attr |
FlexOlmoConfig.bos_token_id |
1 | 0 | 0 |
attr |
FlexOlmoConfig.eos_token_id |
1 | 0 | 0 |
attr |
FlexOlmoPreTrainedModel._can_record_outputs |
1 | 0 | 0 |
transformers.models.florence2.configuration_florence2 (49 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Florence2VisionConfig.init |
20 | 0 | 0 |
attr |
Florence2VisionConfig.in_channels |
1 | 0 | 0 |
attr |
Florence2VisionConfig.depths |
1 | 0 | 0 |
attr |
Florence2VisionConfig.patch_size |
1 | 0 | 0 |
attr |
Florence2VisionConfig.patch_stride |
1 | 0 | 0 |
attr |
Florence2VisionConfig.patch_padding |
1 | 0 | 0 |
attr |
Florence2VisionConfig.patch_prenorm |
1 | 0 | 0 |
attr |
Florence2VisionConfig.embed_dim |
1 | 0 | 0 |
attr |
Florence2VisionConfig.num_heads |
1 | 0 | 0 |
attr |
Florence2VisionConfig.num_groups |
1 | 0 | 0 |
attr |
Florence2VisionConfig.window_size |
1 | 0 | 0 |
attr |
Florence2VisionConfig.drop_path_rate |
1 | 0 | 0 |
attr |
Florence2VisionConfig.mlp_ratio |
1 | 0 | 0 |
attr |
Florence2VisionConfig.qkv_bias |
1 | 0 | 0 |
attr |
Florence2VisionConfig.projection_dim |
1 | 0 | 0 |
attr |
Florence2VisionConfig.max_temporal_embeddings |
1 | 0 | 0 |
attr |
Florence2VisionConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Florence2VisionConfig.initializer_range |
1 | 0 | 0 |
attr |
Florence2VisionConfig.activation_function |
1 | 0 | 0 |
meth |
Florence2Config.init |
7 | 0 | 0 |
attr |
Florence2Config.text_config |
1 | 0 | 0 |
attr |
Florence2Config.vision_config |
1 | 0 | 0 |
attr |
Florence2Config.image_token_id |
1 | 0 | 0 |
attr |
Florence2Config.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.florence2.modeling_florence2 (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Florence2VisionBackbone.init |
2 | 1 | 0 |
attr |
Florence2VisionBackbone.config |
1 | 0 | 0 |
attr |
Florence2VisionBackbone.embed_dim |
1 | 0 | 0 |
attr |
Florence2VisionBackbone.num_heads |
1 | 0 | 0 |
attr |
Florence2VisionBackbone.num_groups |
1 | 0 | 0 |
attr |
Florence2VisionBackbone.num_stages |
1 | 0 | 0 |
attr |
Florence2VisionBackbone.convs |
1 | 0 | 0 |
attr |
Florence2VisionBackbone.blocks |
1 | 0 | 0 |
meth |
Florence2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Florence2Model.init |
2 | 1 | 0 |
meth |
Florence2Model.get_input_embeddings |
1 | 0 | 0 |
meth |
Florence2Model.set_input_embeddings |
2 | 0 | 0 |
meth |
Florence2Model.get_placeholder_mask |
4 | 3 | 0 |
meth |
Florence2Model.forward |
16 | 15 | 0 |
meth |
Florence2Model.get_encoder |
2 | 0 | 0 |
attr |
Florence2Model.vision_tower |
1 | 0 | 0 |
attr |
Florence2Model.multi_modal_projector |
1 | 0 | 0 |
attr |
Florence2Model.language_model |
1 | 0 | 0 |
meth |
Florence2ForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Florence2ForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Florence2ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Florence2ForConditionalGeneration.prepare_inputs_for_generation |
10 | 0 | 0 |
meth |
Florence2ForConditionalGeneration.get_placeholder_mask |
4 | 3 | 0 |
meth |
Florence2ForConditionalGeneration._prepare_encoder_decoder_kwargs_for_generation |
5 | 3 | 0 |
attr |
Florence2ForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Florence2ForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.florence2.modular_florence2 (98 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Florence2Config.init |
7 | 0 | 0 |
attr |
Florence2Config.text_config |
1 | 0 | 0 |
attr |
Florence2Config.vision_config |
1 | 0 | 0 |
attr |
Florence2Config.image_token_id |
1 | 0 | 0 |
attr |
Florence2Config.tie_word_embeddings |
1 | 0 | 0 |
meth |
Florence2VisionConfig.init |
20 | 0 | 0 |
attr |
Florence2VisionConfig.in_channels |
1 | 0 | 0 |
attr |
Florence2VisionConfig.depths |
1 | 0 | 0 |
attr |
Florence2VisionConfig.patch_size |
1 | 0 | 0 |
attr |
Florence2VisionConfig.patch_stride |
1 | 0 | 0 |
attr |
Florence2VisionConfig.patch_padding |
1 | 0 | 0 |
attr |
Florence2VisionConfig.patch_prenorm |
1 | 0 | 0 |
attr |
Florence2VisionConfig.embed_dim |
1 | 0 | 0 |
attr |
Florence2VisionConfig.num_heads |
1 | 0 | 0 |
attr |
Florence2VisionConfig.num_groups |
1 | 0 | 0 |
attr |
Florence2VisionConfig.window_size |
1 | 0 | 0 |
attr |
Florence2VisionConfig.drop_path_rate |
1 | 0 | 0 |
attr |
Florence2VisionConfig.mlp_ratio |
1 | 0 | 0 |
attr |
Florence2VisionConfig.qkv_bias |
1 | 0 | 0 |
attr |
Florence2VisionConfig.projection_dim |
1 | 0 | 0 |
attr |
Florence2VisionConfig.max_temporal_embeddings |
1 | 0 | 0 |
attr |
Florence2VisionConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Florence2VisionConfig.initializer_range |
1 | 0 | 0 |
attr |
Florence2VisionConfig.activation_function |
1 | 0 | 0 |
meth |
Florence2VisionBackbone.init |
2 | 1 | 0 |
attr |
Florence2VisionBackbone.config |
1 | 0 | 0 |
attr |
Florence2VisionBackbone.embed_dim |
1 | 0 | 0 |
attr |
Florence2VisionBackbone.num_heads |
1 | 0 | 0 |
attr |
Florence2VisionBackbone.num_groups |
1 | 0 | 0 |
attr |
Florence2VisionBackbone.num_stages |
1 | 0 | 0 |
attr |
Florence2VisionBackbone.convs |
1 | 0 | 0 |
attr |
Florence2VisionBackbone.blocks |
1 | 0 | 0 |
meth |
Florence2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Florence2Processor.init |
6 | 2 | 0 |
meth |
Florence2Processor.batch_decode |
3 | 0 | 0 |
meth |
Florence2Processor.decode |
3 | 0 | 0 |
meth |
Florence2Processor._get_num_multimodal_tokens |
3 | 0 | 0 |
meth |
Florence2Processor.post_process_image_text_to_text |
4 | 0 | 0 |
meth |
Florence2Processor.post_process_generation |
5 | 1 | 0 |
prop |
Florence2Processor.model_input_names |
1 | 0 | 0 |
attr |
Florence2Processor.tasks_answer_post_processing_type |
1 | 0 | 0 |
attr |
Florence2Processor.task_prompts_without_inputs |
1 | 0 | 0 |
attr |
Florence2Processor.task_prompts_with_input |
1 | 0 | 0 |
attr |
Florence2Processor.num_image_tokens |
1 | 0 | 0 |
attr |
Florence2Processor.num_additional_image_tokens |
1 | 0 | 0 |
attr |
Florence2Processor.post_processor_config |
1 | 0 | 0 |
attr |
Florence2Processor.post_processor |
1 | 0 | 0 |
attr |
Florence2Processor.image_token |
1 | 0 | 0 |
attr |
Florence2Processor.image_token_id |
1 | 0 | 0 |
meth |
Florence2Model.init |
2 | 1 | 0 |
meth |
Florence2Model.get_encoder |
2 | 0 | 0 |
meth |
Florence2Model.forward |
16 | 15 | 0 |
attr |
Florence2Model.vision_tower |
1 | 0 | 0 |
meth |
Florence2ForConditionalGeneration.get_placeholder_mask |
4 | 3 | 0 |
meth |
Florence2ForConditionalGeneration._prepare_encoder_decoder_kwargs_for_generation |
5 | 3 | 0 |
transformers.models.florence2.processing_florence2 (31 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Florence2Processor.init |
6 | 2 | 0 |
meth |
Florence2Processor.batch_decode |
3 | 0 | 0 |
meth |
Florence2Processor.decode |
3 | 0 | 0 |
meth |
Florence2Processor._get_num_multimodal_tokens |
3 | 0 | 0 |
meth |
Florence2Processor.post_process_image_text_to_text |
4 | 0 | 0 |
meth |
Florence2Processor.post_process_generation |
5 | 1 | 0 |
prop |
Florence2Processor.model_input_names |
1 | 0 | 0 |
attr |
Florence2Processor.tasks_answer_post_processing_type |
1 | 0 | 0 |
attr |
Florence2Processor.task_prompts_without_inputs |
1 | 0 | 0 |
attr |
Florence2Processor.task_prompts_with_input |
1 | 0 | 0 |
attr |
Florence2Processor.num_image_tokens |
1 | 0 | 0 |
attr |
Florence2Processor.num_additional_image_tokens |
1 | 0 | 0 |
attr |
Florence2Processor.post_processor_config |
1 | 0 | 0 |
attr |
Florence2Processor.post_processor |
1 | 0 | 0 |
attr |
Florence2Processor.image_token |
1 | 0 | 0 |
attr |
Florence2Processor.image_token_id |
1 | 0 | 0 |
transformers.models.fnet.configuration_fnet (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FNetConfig.init |
18 | 0 | 0 |
attr |
FNetConfig.pad_token_id |
1 | 0 | 0 |
attr |
FNetConfig.bos_token_id |
1 | 0 | 0 |
attr |
FNetConfig.eos_token_id |
1 | 0 | 0 |
attr |
FNetConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
FNetConfig.vocab_size |
1 | 0 | 0 |
attr |
FNetConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
FNetConfig.hidden_size |
1 | 0 | 0 |
attr |
FNetConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
FNetConfig.intermediate_size |
1 | 0 | 0 |
attr |
FNetConfig.hidden_act |
1 | 0 | 0 |
attr |
FNetConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
FNetConfig.initializer_range |
1 | 0 | 0 |
attr |
FNetConfig.type_vocab_size |
1 | 0 | 0 |
attr |
FNetConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
FNetConfig.use_tpu_fourier_optimizations |
1 | 0 | 0 |
attr |
FNetConfig.tpu_short_seq_length |
1 | 0 | 0 |
transformers.models.fnet.modeling_fnet (70 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FNetForNextSentencePrediction.init |
2 | 0 | 0 |
meth |
FNetForNextSentencePrediction.forward |
9 | 8 | 0 |
attr |
FNetForNextSentencePrediction.fnet |
1 | 0 | 0 |
attr |
FNetForNextSentencePrediction.cls |
1 | 0 | 0 |
meth |
FNetModel.init |
3 | 0 | 0 |
meth |
FNetModel.get_input_embeddings |
1 | 0 | 0 |
meth |
FNetModel.set_input_embeddings |
2 | 0 | 0 |
meth |
FNetModel.forward |
8 | 7 | 0 |
attr |
FNetModel.embeddings |
1 | 0 | 0 |
attr |
FNetModel.encoder |
1 | 0 | 0 |
attr |
FNetModel.pooler |
1 | 0 | 0 |
meth |
FNetForMultipleChoice.init |
2 | 0 | 0 |
meth |
FNetForMultipleChoice.forward |
9 | 8 | 0 |
attr |
FNetForMultipleChoice.fnet |
1 | 0 | 0 |
attr |
FNetForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
FNetForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
FNetForSequenceClassification.init |
2 | 0 | 0 |
meth |
FNetForSequenceClassification.forward |
9 | 8 | 0 |
attr |
FNetForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
FNetForSequenceClassification.fnet |
1 | 0 | 0 |
attr |
FNetForSequenceClassification.dropout |
1 | 0 | 0 |
attr |
FNetForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
FNetLayer.init |
2 | 0 | 0 |
meth |
FNetLayer.forward |
2 | 0 | 0 |
meth |
FNetLayer.feed_forward_chunk |
2 | 0 | 0 |
attr |
FNetLayer.chunk_size_feed_forward |
1 | 0 | 0 |
attr |
FNetLayer.seq_len_dim |
1 | 0 | 0 |
attr |
FNetLayer.fourier |
1 | 0 | 0 |
attr |
FNetLayer.intermediate |
1 | 0 | 0 |
attr |
FNetLayer.output |
1 | 0 | 0 |
meth |
FNetForTokenClassification.init |
2 | 0 | 0 |
meth |
FNetForTokenClassification.forward |
9 | 8 | 0 |
attr |
FNetForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
FNetForTokenClassification.fnet |
1 | 0 | 0 |
attr |
FNetForTokenClassification.dropout |
1 | 0 | 0 |
attr |
FNetForTokenClassification.classifier |
1 | 0 | 0 |
meth |
FNetForMaskedLM.init |
2 | 0 | 0 |
meth |
FNetForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
FNetForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
FNetForMaskedLM.forward |
9 | 8 | 0 |
attr |
FNetForMaskedLM.fnet |
1 | 0 | 0 |
attr |
FNetForMaskedLM.cls |
1 | 0 | 0 |
meth |
FNetPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
FNetForQuestionAnswering.init |
2 | 0 | 0 |
meth |
FNetForQuestionAnswering.forward |
10 | 9 | 0 |
attr |
FNetForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
FNetForQuestionAnswering.fnet |
1 | 0 | 0 |
attr |
FNetForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
FNetForPreTraining.init |
2 | 0 | 0 |
meth |
FNetForPreTraining.get_output_embeddings |
1 | 0 | 0 |
meth |
FNetForPreTraining.set_output_embeddings |
2 | 0 | 0 |
meth |
FNetForPreTraining.forward |
10 | 9 | 0 |
attr |
FNetForPreTraining.fnet |
1 | 0 | 0 |
attr |
FNetForPreTraining.cls |
1 | 0 | 0 |
transformers.models.focalnet.configuration_focalnet (47 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FocalNetConfig.init |
25 | 0 | 0 |
attr |
FocalNetConfig.image_size |
1 | 0 | 0 |
attr |
FocalNetConfig.patch_size |
1 | 0 | 0 |
attr |
FocalNetConfig.num_channels |
1 | 0 | 0 |
attr |
FocalNetConfig.embed_dim |
1 | 0 | 0 |
attr |
FocalNetConfig.use_conv_embed |
1 | 0 | 0 |
attr |
FocalNetConfig.hidden_sizes |
1 | 0 | 0 |
attr |
FocalNetConfig.depths |
1 | 0 | 0 |
attr |
FocalNetConfig.focal_levels |
1 | 0 | 0 |
attr |
FocalNetConfig.focal_windows |
1 | 0 | 0 |
attr |
FocalNetConfig.hidden_act |
1 | 0 | 0 |
attr |
FocalNetConfig.mlp_ratio |
1 | 0 | 0 |
attr |
FocalNetConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
FocalNetConfig.drop_path_rate |
1 | 0 | 0 |
attr |
FocalNetConfig.use_layerscale |
1 | 0 | 0 |
attr |
FocalNetConfig.layerscale_value |
1 | 0 | 0 |
attr |
FocalNetConfig.use_post_layernorm |
1 | 0 | 0 |
attr |
FocalNetConfig.use_post_layernorm_in_modulation |
1 | 0 | 0 |
attr |
FocalNetConfig.normalize_modulator |
1 | 0 | 0 |
attr |
FocalNetConfig.initializer_range |
1 | 0 | 0 |
attr |
FocalNetConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
FocalNetConfig.encoder_stride |
1 | 0 | 0 |
attr |
FocalNetConfig.stage_names |
1 | 0 | 0 |
transformers.models.focalnet.modeling_focalnet (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FocalNetPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
FocalNetForMaskedImageModeling.init |
2 | 0 | 0 |
meth |
FocalNetForMaskedImageModeling.forward |
6 | 5 | 0 |
attr |
FocalNetForMaskedImageModeling.focalnet |
1 | 0 | 0 |
attr |
FocalNetForMaskedImageModeling.num_stages |
1 | 0 | 0 |
attr |
FocalNetForMaskedImageModeling.decoder |
1 | 0 | 0 |
meth |
FocalNetForImageClassification.init |
2 | 0 | 0 |
meth |
FocalNetForImageClassification.forward |
6 | 5 | 0 |
attr |
FocalNetForImageClassification.num_labels |
1 | 0 | 0 |
attr |
FocalNetForImageClassification.focalnet |
1 | 0 | 0 |
attr |
FocalNetForImageClassification.classifier |
1 | 0 | 0 |
meth |
FocalNetModel.init |
4 | 0 | 0 |
meth |
FocalNetModel.get_input_embeddings |
1 | 0 | 0 |
meth |
FocalNetModel.forward |
6 | 5 | 0 |
attr |
FocalNetModel.num_stages |
1 | 0 | 0 |
attr |
FocalNetModel.num_features |
1 | 0 | 0 |
attr |
FocalNetModel.embeddings |
1 | 0 | 0 |
attr |
FocalNetModel.encoder |
1 | 0 | 0 |
attr |
FocalNetModel.layernorm |
1 | 0 | 0 |
attr |
FocalNetModel.pooler |
1 | 0 | 0 |
meth |
FocalNetBackbone.init |
2 | 1 | 0 |
meth |
FocalNetBackbone.forward |
5 | 4 | 0 |
attr |
FocalNetBackbone.num_features |
1 | 0 | 0 |
attr |
FocalNetBackbone.focalnet |
1 | 0 | 0 |
transformers.models.fsmt.configuration_fsmt (59 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FSMTConfig.init |
33 | 0 | 0 |
attr |
FSMTConfig.langs |
1 | 0 | 0 |
attr |
FSMTConfig.src_vocab_size |
1 | 0 | 0 |
attr |
FSMTConfig.tgt_vocab_size |
1 | 0 | 0 |
attr |
FSMTConfig.d_model |
1 | 0 | 0 |
attr |
FSMTConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
FSMTConfig.encoder_layers |
1 | 0 | 0 |
attr |
FSMTConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
FSMTConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
FSMTConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
FSMTConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
FSMTConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
FSMTConfig.decoder_layers |
1 | 0 | 0 |
attr |
FSMTConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
FSMTConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
FSMTConfig.init_std |
1 | 0 | 0 |
attr |
FSMTConfig.activation_function |
1 | 0 | 0 |
attr |
FSMTConfig.scale_embedding |
1 | 0 | 0 |
attr |
FSMTConfig.attention_dropout |
1 | 0 | 0 |
attr |
FSMTConfig.activation_dropout |
1 | 0 | 0 |
attr |
FSMTConfig.dropout |
1 | 0 | 0 |
attr |
FSMTConfig.use_cache |
1 | 0 | 0 |
attr |
FSMTConfig.pad_token_id |
1 | 0 | 0 |
attr |
FSMTConfig.bos_token_id |
1 | 0 | 0 |
attr |
FSMTConfig.eos_token_id |
1 | 0 | 0 |
attr |
FSMTConfig.decoder_start_token_id |
1 | 0 | 0 |
attr |
FSMTConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.fsmt.modeling_fsmt (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FSMTModel.init |
2 | 1 | 0 |
meth |
FSMTModel.forward |
15 | 14 | 0 |
meth |
FSMTModel.get_input_embeddings |
1 | 0 | 0 |
meth |
FSMTModel.set_input_embeddings |
2 | 0 | 0 |
meth |
FSMTModel.get_output_embeddings |
1 | 0 | 0 |
meth |
FSMTModel.set_output_embeddings |
2 | 0 | 0 |
attr |
FSMTModel.encoder |
1 | 0 | 0 |
attr |
FSMTModel.decoder |
1 | 0 | 0 |
meth |
PretrainedFSMTModel._init_weights |
2 | 0 | 0 |
prop |
PretrainedFSMTModel.dummy_inputs |
1 | 0 | 0 |
meth |
FSMTForConditionalGeneration.init |
2 | 1 | 0 |
meth |
FSMTForConditionalGeneration.forward |
16 | 15 | 0 |
meth |
FSMTForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
meth |
FSMTForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
FSMTForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
attr |
FSMTForConditionalGeneration.model |
1 | 0 | 0 |
transformers.models.fsmt.tokenization_fsmt (54 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FSMTTokenizer.init |
11 | 0 | 0 |
meth |
FSMTTokenizer.moses_punct_norm |
3 | 0 | 0 |
meth |
FSMTTokenizer.moses_tokenize |
3 | 0 | 0 |
meth |
FSMTTokenizer.moses_detokenize |
3 | 0 | 0 |
meth |
FSMTTokenizer.moses_pipeline |
3 | 0 | 0 |
meth |
FSMTTokenizer.get_src_vocab |
1 | 0 | 0 |
meth |
FSMTTokenizer.get_tgt_vocab |
1 | 0 | 0 |
meth |
FSMTTokenizer.bpe |
2 | 0 | 0 |
meth |
FSMTTokenizer._tokenize |
4 | 0 | 0 |
meth |
FSMTTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
FSMTTokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
FSMTTokenizer.convert_tokens_to_string |
2 | 0 | 0 |
meth |
FSMTTokenizer.getstate |
1 | 0 | 0 |
meth |
FSMTTokenizer.setstate |
2 | 0 | 0 |
prop |
FSMTTokenizer.src_vocab_size |
1 | 0 | 0 |
prop |
FSMTTokenizer.tgt_vocab_size |
1 | 0 | 0 |
attr |
FSMTTokenizer.sm |
1 | 0 | 0 |
attr |
FSMTTokenizer.src_vocab_file |
1 | 0 | 0 |
attr |
FSMTTokenizer.tgt_vocab_file |
1 | 0 | 0 |
attr |
FSMTTokenizer.merges_file |
1 | 0 | 0 |
attr |
FSMTTokenizer.do_lower_case |
1 | 0 | 0 |
attr |
FSMTTokenizer.cache_moses_punct_normalizer |
1 | 0 | 0 |
attr |
FSMTTokenizer.cache_moses_tokenizer |
1 | 0 | 0 |
attr |
FSMTTokenizer.cache_moses_detokenizer |
1 | 0 | 0 |
attr |
FSMTTokenizer.bpe_ranks |
1 | 0 | 0 |
attr |
FSMTTokenizer.cache |
1 | 0 | 0 |
attr |
FSMTTokenizer.encoder |
1 | 0 | 0 |
attr |
FSMTTokenizer.decoder |
1 | 0 | 0 |
transformers.models.funnel.configuration_funnel (50 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FunnelConfig.init |
24 | 0 | 0 |
prop |
FunnelConfig.num_hidden_layers |
2 | 0 | 0 |
prop |
FunnelConfig.num_blocks |
2 | 0 | 0 |
attr |
FunnelConfig.pad_token_id |
1 | 0 | 0 |
attr |
FunnelConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
FunnelConfig.vocab_size |
1 | 0 | 0 |
attr |
FunnelConfig.block_sizes |
1 | 0 | 0 |
attr |
FunnelConfig.block_repeats |
1 | 0 | 0 |
attr |
FunnelConfig.num_decoder_layers |
1 | 0 | 0 |
attr |
FunnelConfig.d_model |
1 | 0 | 0 |
attr |
FunnelConfig.n_head |
1 | 0 | 0 |
attr |
FunnelConfig.d_head |
1 | 0 | 0 |
attr |
FunnelConfig.d_inner |
1 | 0 | 0 |
attr |
FunnelConfig.hidden_act |
1 | 0 | 0 |
attr |
FunnelConfig.hidden_dropout |
1 | 0 | 0 |
attr |
FunnelConfig.attention_dropout |
1 | 0 | 0 |
attr |
FunnelConfig.activation_dropout |
1 | 0 | 0 |
attr |
FunnelConfig.initializer_range |
1 | 0 | 0 |
attr |
FunnelConfig.initializer_std |
1 | 0 | 0 |
attr |
FunnelConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
FunnelConfig.pooling_type |
1 | 0 | 0 |
attr |
FunnelConfig.attention_type |
1 | 0 | 0 |
attr |
FunnelConfig.separate_cls |
1 | 0 | 0 |
attr |
FunnelConfig.truncate_seq |
1 | 0 | 0 |
attr |
FunnelConfig.pool_q_only |
1 | 0 | 0 |
transformers.models.funnel.modeling_funnel (31 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FunnelForPreTraining.forward |
10 | 9 | 0 |
attr |
FunnelForPreTraining.funnel |
1 | 0 | 0 |
attr |
FunnelForPreTraining.discriminator_predictions |
1 | 0 | 0 |
meth |
FunnelPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
FunnelForMultipleChoice.forward |
10 | 9 | 0 |
attr |
FunnelForMultipleChoice.funnel |
1 | 0 | 0 |
attr |
FunnelForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
FunnelForQuestionAnswering.forward |
11 | 10 | 0 |
attr |
FunnelForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
FunnelForQuestionAnswering.funnel |
1 | 0 | 0 |
attr |
FunnelForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
FunnelModel.forward |
9 | 8 | 0 |
attr |
FunnelModel.embeddings |
1 | 0 | 0 |
attr |
FunnelModel.encoder |
1 | 0 | 0 |
attr |
FunnelModel.decoder |
1 | 0 | 0 |
meth |
FunnelForSequenceClassification.forward |
10 | 9 | 0 |
attr |
FunnelForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
FunnelForSequenceClassification.funnel |
1 | 0 | 0 |
attr |
FunnelForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
FunnelBaseModel.forward |
10 | 9 | 0 |
attr |
FunnelBaseModel.embeddings |
1 | 0 | 0 |
attr |
FunnelBaseModel.encoder |
1 | 0 | 0 |
meth |
FunnelForMaskedLM.forward |
10 | 9 | 0 |
attr |
FunnelForMaskedLM.funnel |
1 | 0 | 0 |
attr |
FunnelForMaskedLM.lm_head |
1 | 0 | 0 |
meth |
FunnelForTokenClassification.forward |
10 | 9 | 0 |
attr |
FunnelForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
FunnelForTokenClassification.funnel |
1 | 0 | 0 |
attr |
FunnelForTokenClassification.dropout |
1 | 0 | 0 |
attr |
FunnelForTokenClassification.classifier |
1 | 0 | 0 |
transformers.models.funnel.tokenization_funnel (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FunnelTokenizer.init |
15 | 13 | 0 |
attr |
FunnelTokenizer.do_lower_case |
1 | 0 | 0 |
attr |
FunnelTokenizer.tokenize_chinese_chars |
1 | 0 | 0 |
attr |
FunnelTokenizer.strip_accents |
1 | 0 | 0 |
attr |
FunnelTokenizer.clean_text |
1 | 0 | 0 |
attr |
FunnelTokenizer.wordpieces_prefix |
1 | 0 | 0 |
transformers.models.fuyu.configuration_fuyu (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FuyuConfig.init |
25 | 23 | 0 |
attr |
FuyuConfig.text_config |
1 | 0 | 0 |
attr |
FuyuConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
FuyuConfig.image_size |
1 | 0 | 0 |
attr |
FuyuConfig.patch_size |
1 | 0 | 0 |
attr |
FuyuConfig.num_channels |
1 | 0 | 0 |
attr |
FuyuConfig.hidden_size |
1 | 0 | 0 |
attr |
FuyuConfig.intermediate_size |
1 | 0 | 0 |
attr |
FuyuConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
FuyuConfig.num_attention_heads |
1 | 0 | 0 |
attr |
FuyuConfig.hidden_act |
1 | 0 | 0 |
attr |
FuyuConfig.initializer_range |
1 | 0 | 0 |
attr |
FuyuConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
FuyuConfig.use_cache |
1 | 0 | 0 |
attr |
FuyuConfig.qk_layernorm |
1 | 0 | 0 |
attr |
FuyuConfig.hidden_dropout |
1 | 0 | 0 |
attr |
FuyuConfig.attention_dropout |
1 | 0 | 0 |
attr |
FuyuConfig.image_token_id |
1 | 0 | 0 |
attr |
FuyuConfig.rope_parameters |
1 | 0 | 0 |
attr |
FuyuConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
FuyuConfig.pad_token_id |
1 | 0 | 0 |
attr |
FuyuConfig.bos_token_id |
1 | 0 | 0 |
attr |
FuyuConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.fuyu.image_processing_fuyu (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FuyuImageProcessor.init |
14 | 12 | 0 |
meth |
FuyuImageProcessor.resize |
7 | 6 | 0 |
meth |
FuyuImageProcessor.preprocess |
17 | 15 | 0 |
attr |
FuyuImageProcessor.do_resize |
1 | 0 | 0 |
attr |
FuyuImageProcessor.size |
1 | 0 | 0 |
attr |
FuyuImageProcessor.resample |
1 | 0 | 0 |
attr |
FuyuImageProcessor.do_pad |
1 | 0 | 0 |
attr |
FuyuImageProcessor.padding_value |
1 | 0 | 0 |
attr |
FuyuImageProcessor.padding_mode |
1 | 0 | 0 |
attr |
FuyuImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
FuyuImageProcessor.image_mean |
1 | 0 | 0 |
attr |
FuyuImageProcessor.image_std |
1 | 0 | 0 |
attr |
FuyuImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
FuyuImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
FuyuImageProcessor.patch_size |
1 | 0 | 0 |
transformers.models.fuyu.image_processing_fuyu_fast (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FuyuImageProcessorFast.resize |
6 | 5 | 0 |
meth |
FuyuImageProcessorFast._preprocess |
16 | 15 | 0 |
meth |
FuyuImageProcessorFast._further_process_kwargs |
3 | 2 | 0 |
transformers.models.fuyu.modeling_fuyu (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FuyuForCausalLM.init |
2 | 1 | 0 |
meth |
FuyuForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
FuyuForCausalLM.set_input_embeddings |
2 | 0 | 0 |
meth |
FuyuForCausalLM.forward |
15 | 14 | 0 |
meth |
FuyuForCausalLM.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
FuyuForCausalLM.model |
1 | 0 | 0 |
attr |
FuyuForCausalLM.lm_head |
1 | 0 | 0 |
meth |
FuyuModel.init |
2 | 1 | 0 |
meth |
FuyuModel.get_input_embeddings |
1 | 0 | 0 |
meth |
FuyuModel.set_input_embeddings |
2 | 0 | 0 |
meth |
FuyuModel.get_placeholder_mask |
4 | 3 | 0 |
meth |
FuyuModel.forward |
13 | 12 | 0 |
attr |
FuyuModel.padding_idx |
1 | 0 | 0 |
attr |
FuyuModel.vocab_size |
1 | 0 | 0 |
attr |
FuyuModel.language_model |
1 | 0 | 0 |
attr |
FuyuModel.vision_embed_tokens |
1 | 0 | 0 |
attr |
FuyuModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.fuyu.processing_fuyu (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FuyuProcessor._load_tokenizer_from_pretrained |
5 | 0 | 0 |
meth |
FuyuProcessor.init |
4 | 0 | 0 |
meth |
FuyuProcessor._left_pad_inputs_with_attention_mask |
3 | 2 | 0 |
meth |
FuyuProcessor.get_sample_encoding |
8 | 0 | 0 |
meth |
FuyuProcessor._get_num_multimodal_tokens |
3 | 0 | 0 |
meth |
FuyuProcessor.post_process_box_coordinates |
3 | 0 | 0 |
meth |
FuyuProcessor.post_process_image_text_to_text |
4 | 0 | 0 |
prop |
FuyuProcessor.model_input_names |
1 | 0 | 0 |
attr |
FuyuProcessor.image_processor |
1 | 0 | 0 |
attr |
FuyuProcessor.tokenizer |
1 | 0 | 0 |
attr |
FuyuProcessor.max_tokens_to_generate |
1 | 0 | 0 |
attr |
FuyuProcessor.max_position_embeddings |
1 | 0 | 0 |
attr |
FuyuProcessor.pad_token_id |
1 | 0 | 0 |
attr |
FuyuProcessor.dummy_image_index |
1 | 0 | 0 |
attr |
FuyuProcessor.image_token_id |
1 | 0 | 0 |
attr |
FuyuProcessor.image_newline_id |
1 | 0 | 0 |
transformers.models.gemma.configuration_gemma (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GemmaConfig.init |
22 | 20 | 0 |
attr |
GemmaConfig.vocab_size |
1 | 0 | 0 |
attr |
GemmaConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
GemmaConfig.hidden_size |
1 | 0 | 0 |
attr |
GemmaConfig.intermediate_size |
1 | 0 | 0 |
attr |
GemmaConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GemmaConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GemmaConfig.head_dim |
1 | 0 | 0 |
attr |
GemmaConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
GemmaConfig.hidden_act |
1 | 0 | 0 |
attr |
GemmaConfig.initializer_range |
1 | 0 | 0 |
attr |
GemmaConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
GemmaConfig.use_cache |
1 | 0 | 0 |
attr |
GemmaConfig.attention_bias |
1 | 0 | 0 |
attr |
GemmaConfig.attention_dropout |
1 | 0 | 0 |
attr |
GemmaConfig.use_bidirectional_attention |
1 | 0 | 0 |
attr |
GemmaConfig.pad_token_id |
1 | 0 | 0 |
attr |
GemmaConfig.bos_token_id |
1 | 0 | 0 |
attr |
GemmaConfig.eos_token_id |
1 | 0 | 0 |
attr |
GemmaConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
GemmaConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.gemma.modeling_gemma (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GemmaModel.init |
2 | 1 | 0 |
attr |
GemmaModel.padding_idx |
1 | 0 | 0 |
attr |
GemmaModel.vocab_size |
1 | 0 | 0 |
attr |
GemmaModel.embed_tokens |
1 | 0 | 0 |
attr |
GemmaModel.layers |
1 | 0 | 0 |
attr |
GemmaModel.norm |
1 | 0 | 0 |
attr |
GemmaModel.rotary_emb |
1 | 0 | 0 |
attr |
GemmaModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
GemmaPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
GemmaForCausalLM.init |
2 | 0 | 0 |
attr |
GemmaForCausalLM.model |
1 | 0 | 0 |
attr |
GemmaForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
GemmaForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.gemma.modular_gemma (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GemmaPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
GemmaForCausalLM.forward |
2 | 0 | 0 |
meth |
GemmaConfig.init |
22 | 20 | 0 |
attr |
GemmaConfig.vocab_size |
1 | 0 | 0 |
attr |
GemmaConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
GemmaConfig.hidden_size |
1 | 0 | 0 |
attr |
GemmaConfig.intermediate_size |
1 | 0 | 0 |
attr |
GemmaConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GemmaConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GemmaConfig.head_dim |
1 | 0 | 0 |
attr |
GemmaConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
GemmaConfig.hidden_act |
1 | 0 | 0 |
attr |
GemmaConfig.initializer_range |
1 | 0 | 0 |
attr |
GemmaConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
GemmaConfig.use_cache |
1 | 0 | 0 |
attr |
GemmaConfig.attention_bias |
1 | 0 | 0 |
attr |
GemmaConfig.attention_dropout |
1 | 0 | 0 |
attr |
GemmaConfig.use_bidirectional_attention |
1 | 0 | 0 |
attr |
GemmaConfig.pad_token_id |
1 | 0 | 0 |
attr |
GemmaConfig.bos_token_id |
1 | 0 | 0 |
attr |
GemmaConfig.eos_token_id |
1 | 0 | 0 |
attr |
GemmaConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
GemmaConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.gemma.tokenization_gemma (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GemmaTokenizer.init |
9 | 7 | 0 |
transformers.models.gemma2.configuration_gemma2 (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Gemma2Config.init |
27 | 25 | 0 |
attr |
Gemma2Config.pad_token_id |
1 | 0 | 0 |
attr |
Gemma2Config.bos_token_id |
1 | 0 | 0 |
attr |
Gemma2Config.eos_token_id |
1 | 0 | 0 |
attr |
Gemma2Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Gemma2Config.vocab_size |
1 | 0 | 0 |
attr |
Gemma2Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Gemma2Config.hidden_size |
1 | 0 | 0 |
attr |
Gemma2Config.intermediate_size |
1 | 0 | 0 |
attr |
Gemma2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Gemma2Config.num_attention_heads |
1 | 0 | 0 |
attr |
Gemma2Config.head_dim |
1 | 0 | 0 |
attr |
Gemma2Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Gemma2Config.initializer_range |
1 | 0 | 0 |
attr |
Gemma2Config.rms_norm_eps |
1 | 0 | 0 |
attr |
Gemma2Config.use_cache |
1 | 0 | 0 |
attr |
Gemma2Config.attention_bias |
1 | 0 | 0 |
attr |
Gemma2Config.attention_dropout |
1 | 0 | 0 |
attr |
Gemma2Config.hidden_activation |
1 | 0 | 0 |
attr |
Gemma2Config.query_pre_attn_scalar |
1 | 0 | 0 |
attr |
Gemma2Config.sliding_window |
1 | 0 | 0 |
attr |
Gemma2Config.final_logit_softcapping |
1 | 0 | 0 |
attr |
Gemma2Config.attn_logit_softcapping |
1 | 0 | 0 |
attr |
Gemma2Config.layer_types |
1 | 0 | 0 |
attr |
Gemma2Config.use_bidirectional_attention |
1 | 0 | 0 |
attr |
Gemma2Config.rope_parameters |
1 | 0 | 0 |
transformers.models.gemma2.modeling_gemma2 (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Gemma2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Gemma2Model.init |
2 | 1 | 0 |
attr |
Gemma2Model.padding_idx |
1 | 0 | 0 |
attr |
Gemma2Model.vocab_size |
1 | 0 | 0 |
attr |
Gemma2Model.embed_tokens |
1 | 0 | 0 |
attr |
Gemma2Model.layers |
1 | 0 | 0 |
attr |
Gemma2Model.norm |
1 | 0 | 0 |
attr |
Gemma2Model.rotary_emb |
1 | 0 | 0 |
attr |
Gemma2Model.gradient_checkpointing |
1 | 0 | 0 |
meth |
Gemma2ForCausalLM.init |
2 | 0 | 0 |
attr |
Gemma2ForCausalLM.model |
1 | 0 | 0 |
attr |
Gemma2ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Gemma2ForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.gemma2.modular_gemma2 (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Gemma2Config.init |
27 | 25 | 0 |
attr |
Gemma2Config.pad_token_id |
1 | 0 | 0 |
attr |
Gemma2Config.bos_token_id |
1 | 0 | 0 |
attr |
Gemma2Config.eos_token_id |
1 | 0 | 0 |
attr |
Gemma2Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Gemma2Config.vocab_size |
1 | 0 | 0 |
attr |
Gemma2Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Gemma2Config.hidden_size |
1 | 0 | 0 |
attr |
Gemma2Config.intermediate_size |
1 | 0 | 0 |
attr |
Gemma2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Gemma2Config.num_attention_heads |
1 | 0 | 0 |
attr |
Gemma2Config.head_dim |
1 | 0 | 0 |
attr |
Gemma2Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Gemma2Config.initializer_range |
1 | 0 | 0 |
attr |
Gemma2Config.rms_norm_eps |
1 | 0 | 0 |
attr |
Gemma2Config.use_cache |
1 | 0 | 0 |
attr |
Gemma2Config.attention_bias |
1 | 0 | 0 |
attr |
Gemma2Config.attention_dropout |
1 | 0 | 0 |
attr |
Gemma2Config.hidden_activation |
1 | 0 | 0 |
attr |
Gemma2Config.query_pre_attn_scalar |
1 | 0 | 0 |
attr |
Gemma2Config.sliding_window |
1 | 0 | 0 |
attr |
Gemma2Config.final_logit_softcapping |
1 | 0 | 0 |
attr |
Gemma2Config.attn_logit_softcapping |
1 | 0 | 0 |
attr |
Gemma2Config.layer_types |
1 | 0 | 0 |
attr |
Gemma2Config.use_bidirectional_attention |
1 | 0 | 0 |
attr |
Gemma2Config.rope_parameters |
1 | 0 | 0 |
meth |
Gemma2Model.init |
2 | 1 | 0 |
attr |
Gemma2Model.layers |
1 | 0 | 0 |
attr |
Gemma2Model.rotary_emb |
1 | 0 | 0 |
meth |
Gemma2ForCausalLM.init |
2 | 0 | 0 |
attr |
Gemma2ForCausalLM.model |
1 | 0 | 0 |
transformers.models.gemma3.configuration_gemma3 (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Gemma3TextConfig.init |
27 | 25 | 0 |
meth |
Gemma3TextConfig.convert_rope_params_to_dict |
3 | 0 | 0 |
attr |
Gemma3TextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Gemma3TextConfig.bos_token_id |
1 | 0 | 0 |
attr |
Gemma3TextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Gemma3TextConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Gemma3TextConfig.vocab_size |
1 | 0 | 0 |
attr |
Gemma3TextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Gemma3TextConfig.hidden_size |
1 | 0 | 0 |
attr |
Gemma3TextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Gemma3TextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Gemma3TextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Gemma3TextConfig.head_dim |
1 | 0 | 0 |
attr |
Gemma3TextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Gemma3TextConfig.initializer_range |
1 | 0 | 0 |
attr |
Gemma3TextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Gemma3TextConfig.use_cache |
1 | 0 | 0 |
attr |
Gemma3TextConfig.attention_bias |
1 | 0 | 0 |
attr |
Gemma3TextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Gemma3TextConfig.hidden_activation |
1 | 0 | 0 |
attr |
Gemma3TextConfig.query_pre_attn_scalar |
1 | 0 | 0 |
attr |
Gemma3TextConfig.sliding_window |
1 | 0 | 0 |
attr |
Gemma3TextConfig.final_logit_softcapping |
1 | 0 | 0 |
attr |
Gemma3TextConfig.attn_logit_softcapping |
1 | 0 | 0 |
attr |
Gemma3TextConfig.layer_types |
1 | 0 | 0 |
attr |
Gemma3TextConfig.use_bidirectional_attention |
1 | 0 | 0 |
attr |
Gemma3TextConfig.rope_parameters |
1 | 0 | 0 |
meth |
Gemma3Config.init |
10 | 8 | 0 |
attr |
Gemma3Config.text_config |
1 | 0 | 0 |
attr |
Gemma3Config.vision_config |
1 | 0 | 0 |
attr |
Gemma3Config.mm_tokens_per_image |
1 | 0 | 0 |
attr |
Gemma3Config.boi_token_index |
1 | 0 | 0 |
attr |
Gemma3Config.eoi_token_index |
1 | 0 | 0 |
attr |
Gemma3Config.image_token_index |
1 | 0 | 0 |
attr |
Gemma3Config.initializer_range |
1 | 0 | 0 |
attr |
Gemma3Config.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.gemma3.image_processing_gemma3 (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Gemma3ImageProcessor.init |
15 | 14 | 0 |
meth |
Gemma3ImageProcessor.pan_and_scan |
7 | 6 | 0 |
meth |
Gemma3ImageProcessor._process_images_for_pan_and_scan |
8 | 7 | 0 |
attr |
Gemma3ImageProcessor.do_resize |
1 | 0 | 0 |
attr |
Gemma3ImageProcessor.size |
1 | 0 | 0 |
attr |
Gemma3ImageProcessor.resample |
1 | 0 | 0 |
attr |
Gemma3ImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
Gemma3ImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
Gemma3ImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
Gemma3ImageProcessor.image_mean |
1 | 0 | 0 |
attr |
Gemma3ImageProcessor.image_std |
1 | 0 | 0 |
attr |
Gemma3ImageProcessor.do_convert_rgb |
1 | 0 | 0 |
attr |
Gemma3ImageProcessor.do_pan_and_scan |
1 | 0 | 0 |
attr |
Gemma3ImageProcessor.pan_and_scan_min_crop_size |
1 | 0 | 0 |
attr |
Gemma3ImageProcessor.pan_and_scan_max_num_crops |
1 | 0 | 0 |
attr |
Gemma3ImageProcessor.pan_and_scan_min_ratio_to_activate |
1 | 0 | 0 |
transformers.models.gemma3.image_processing_gemma3_fast (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Gemma3ImageProcessorFast.init |
2 | 1 | 0 |
meth |
Gemma3ImageProcessorFast.pan_and_scan_batched |
5 | 4 | 0 |
meth |
Gemma3ImageProcessorFast._process_images_for_pan_and_scan |
6 | 5 | 0 |
meth |
Gemma3ImageProcessorFast._preprocess |
17 | 16 | 0 |
transformers.models.gemma3.modeling_gemma3 (53 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Gemma3ForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Gemma3ForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Gemma3ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Gemma3ForConditionalGeneration.get_image_features |
3 | 2 | 0 |
meth |
Gemma3ForConditionalGeneration.prepare_inputs_for_generation |
14 | 0 | 0 |
meth |
Gemma3ForConditionalGeneration.create_masks_for_generate |
10 | 9 | 0 |
attr |
Gemma3ForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Gemma3ForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
Gemma3TextModel.init |
2 | 1 | 0 |
attr |
Gemma3TextModel.padding_idx |
1 | 0 | 0 |
attr |
Gemma3TextModel.vocab_size |
1 | 0 | 0 |
attr |
Gemma3TextModel.embed_tokens |
1 | 0 | 0 |
attr |
Gemma3TextModel.layers |
1 | 0 | 0 |
attr |
Gemma3TextModel.norm |
1 | 0 | 0 |
attr |
Gemma3TextModel.rotary_emb |
1 | 0 | 0 |
attr |
Gemma3TextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Gemma3Model.init |
2 | 1 | 0 |
meth |
Gemma3Model.get_input_embeddings |
1 | 0 | 0 |
meth |
Gemma3Model.set_input_embeddings |
2 | 0 | 0 |
meth |
Gemma3Model.get_placeholder_mask |
4 | 3 | 0 |
attr |
Gemma3Model.vision_tower |
1 | 0 | 0 |
attr |
Gemma3Model.multi_modal_projector |
1 | 0 | 0 |
attr |
Gemma3Model.vocab_size |
1 | 0 | 0 |
attr |
Gemma3Model.language_model |
1 | 0 | 0 |
meth |
Gemma3ForSequenceClassification.init |
2 | 0 | 0 |
meth |
Gemma3ForSequenceClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
Gemma3ForSequenceClassification.set_input_embeddings |
2 | 0 | 0 |
attr |
Gemma3ForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
Gemma3ForSequenceClassification.model |
1 | 0 | 0 |
attr |
Gemma3ForSequenceClassification.score |
1 | 0 | 0 |
meth |
Gemma3ForCausalLM.init |
2 | 1 | 0 |
attr |
Gemma3ForCausalLM.model |
1 | 0 | 0 |
attr |
Gemma3ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Gemma3ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Gemma3PreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.gemma3.modular_gemma3 (69 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Gemma3TextConfig.init |
27 | 25 | 0 |
meth |
Gemma3TextConfig.convert_rope_params_to_dict |
3 | 0 | 0 |
attr |
Gemma3TextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Gemma3TextConfig.bos_token_id |
1 | 0 | 0 |
attr |
Gemma3TextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Gemma3TextConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Gemma3TextConfig.vocab_size |
1 | 0 | 0 |
attr |
Gemma3TextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Gemma3TextConfig.hidden_size |
1 | 0 | 0 |
attr |
Gemma3TextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Gemma3TextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Gemma3TextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Gemma3TextConfig.head_dim |
1 | 0 | 0 |
attr |
Gemma3TextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Gemma3TextConfig.initializer_range |
1 | 0 | 0 |
attr |
Gemma3TextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Gemma3TextConfig.use_cache |
1 | 0 | 0 |
attr |
Gemma3TextConfig.attention_bias |
1 | 0 | 0 |
attr |
Gemma3TextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Gemma3TextConfig.hidden_activation |
1 | 0 | 0 |
attr |
Gemma3TextConfig.query_pre_attn_scalar |
1 | 0 | 0 |
attr |
Gemma3TextConfig.sliding_window |
1 | 0 | 0 |
attr |
Gemma3TextConfig.final_logit_softcapping |
1 | 0 | 0 |
attr |
Gemma3TextConfig.attn_logit_softcapping |
1 | 0 | 0 |
attr |
Gemma3TextConfig.layer_types |
1 | 0 | 0 |
attr |
Gemma3TextConfig.use_bidirectional_attention |
1 | 0 | 0 |
attr |
Gemma3TextConfig.rope_parameters |
1 | 0 | 0 |
meth |
Gemma3Config.init |
10 | 8 | 0 |
attr |
Gemma3Config.text_config |
1 | 0 | 0 |
attr |
Gemma3Config.vision_config |
1 | 0 | 0 |
attr |
Gemma3Config.mm_tokens_per_image |
1 | 0 | 0 |
attr |
Gemma3Config.boi_token_index |
1 | 0 | 0 |
attr |
Gemma3Config.eoi_token_index |
1 | 0 | 0 |
attr |
Gemma3Config.image_token_index |
1 | 0 | 0 |
attr |
Gemma3Config.initializer_range |
1 | 0 | 0 |
attr |
Gemma3Config.tie_word_embeddings |
1 | 0 | 0 |
meth |
Gemma3Model.init |
2 | 1 | 0 |
meth |
Gemma3ForConditionalGeneration.prepare_inputs_for_generation |
14 | 0 | 0 |
meth |
Gemma3TextModel.init |
2 | 1 | 0 |
attr |
Gemma3TextModel.embed_tokens |
1 | 0 | 0 |
meth |
Gemma3ForSequenceClassification.init |
2 | 0 | 0 |
meth |
Gemma3ForSequenceClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
Gemma3ForSequenceClassification.set_input_embeddings |
2 | 0 | 0 |
attr |
Gemma3ForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
Gemma3ForSequenceClassification.model |
1 | 0 | 0 |
attr |
Gemma3ForSequenceClassification.score |
1 | 0 | 0 |
meth |
Gemma3ForCausalLM.init |
2 | 1 | 0 |
attr |
Gemma3ForCausalLM.model |
1 | 0 | 0 |
meth |
Gemma3PreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.gemma3.processing_gemma3 (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Gemma3Processor.init |
6 | 1 | 0 |
meth |
Gemma3Processor._get_num_multimodal_tokens |
3 | 0 | 0 |
prop |
Gemma3Processor.model_input_names |
1 | 0 | 0 |
attr |
Gemma3Processor.image_seq_length |
1 | 0 | 0 |
attr |
Gemma3Processor.image_token_id |
1 | 0 | 0 |
attr |
Gemma3Processor.boi_token |
1 | 0 | 0 |
attr |
Gemma3Processor.image_token |
1 | 0 | 0 |
attr |
Gemma3Processor.full_image_sequence |
1 | 0 | 0 |
transformers.models.gemma3n.configuration_gemma3n (84 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Gemma3nVisionConfig.init |
10 | 8 | 0 |
meth |
Gemma3nVisionConfig.from_dict |
3 | 1 | 0 |
attr |
Gemma3nVisionConfig.architecture |
1 | 0 | 0 |
attr |
Gemma3nVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
Gemma3nVisionConfig.do_pooling |
1 | 0 | 0 |
attr |
Gemma3nVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
Gemma3nVisionConfig.vocab_size |
1 | 0 | 0 |
attr |
Gemma3nVisionConfig.vocab_offset |
1 | 0 | 0 |
attr |
Gemma3nVisionConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Gemma3nVisionConfig.model_args |
1 | 0 | 0 |
meth |
Gemma3nTextConfig.init |
33 | 31 | 0 |
meth |
Gemma3nTextConfig.convert_rope_params_to_dict |
3 | 0 | 0 |
attr |
Gemma3nTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.vocab_size |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.vocab_size_per_layer_input |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.hidden_size |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.head_dim |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.initializer_range |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.use_cache |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.attention_bias |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.hidden_activation |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.sliding_window |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.final_logit_softcapping |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.layer_types |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.hidden_size_per_layer_input |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.num_kv_shared_layers |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.altup_active_idx |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.altup_coef_clip |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.altup_correct_scale |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.altup_num_inputs |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.laurel_rank |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.activation_sparsity_pattern |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.tie_word_embeddings |
1 | 0 | 0 |
meth |
Gemma3nConfig.init |
15 | 13 | 0 |
attr |
Gemma3nConfig.text_config |
1 | 0 | 0 |
attr |
Gemma3nConfig.vision_config |
1 | 0 | 0 |
attr |
Gemma3nConfig.audio_config |
1 | 0 | 0 |
attr |
Gemma3nConfig.audio_soft_tokens_per_image |
1 | 0 | 0 |
attr |
Gemma3nConfig.vision_soft_tokens_per_image |
1 | 0 | 0 |
attr |
Gemma3nConfig.boi_token_id |
1 | 0 | 0 |
attr |
Gemma3nConfig.eoi_token_id |
1 | 0 | 0 |
attr |
Gemma3nConfig.image_token_id |
1 | 0 | 0 |
attr |
Gemma3nConfig.boa_token_id |
1 | 0 | 0 |
attr |
Gemma3nConfig.eoa_token_id |
1 | 0 | 0 |
attr |
Gemma3nConfig.audio_token_id |
1 | 0 | 0 |
attr |
Gemma3nConfig.initializer_range |
1 | 0 | 0 |
attr |
Gemma3nConfig.tie_word_embeddings |
1 | 0 | 0 |
meth |
Gemma3nAudioConfig.init |
21 | 19 | 0 |
attr |
Gemma3nAudioConfig.input_feat_size |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.hidden_size |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.vocab_size |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.vocab_offset |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.gradient_clipping |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.conf_attention_chunk_size |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.conf_attention_context_left |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.conf_attention_context_right |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.conf_attention_logit_cap |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.conf_num_attention_heads |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.conf_num_hidden_layers |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.conf_conv_kernel_size |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.conf_reduction_factor |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.conf_residual_weight |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.sscp_conv_channel_size |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.sscp_conv_group_norm_eps |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.sscp_conv_kernel_size |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.sscp_conv_stride_size |
1 | 0 | 0 |
transformers.models.gemma3n.feature_extraction_gemma3n (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Gemma3nAudioFeatureExtractor.init |
18 | 16 | 0 |
meth |
Gemma3nAudioFeatureExtractor.call |
9 | 8 | 0 |
attr |
Gemma3nAudioFeatureExtractor.min_frequency |
1 | 0 | 0 |
attr |
Gemma3nAudioFeatureExtractor.max_frequency |
1 | 0 | 0 |
attr |
Gemma3nAudioFeatureExtractor.preemphasis |
1 | 0 | 0 |
attr |
Gemma3nAudioFeatureExtractor.preemphasis_htk_flavor |
1 | 0 | 0 |
attr |
Gemma3nAudioFeatureExtractor.fft_overdrive |
1 | 0 | 0 |
attr |
Gemma3nAudioFeatureExtractor.dither |
1 | 0 | 0 |
attr |
Gemma3nAudioFeatureExtractor.input_scale_factor |
1 | 0 | 0 |
attr |
Gemma3nAudioFeatureExtractor.frame_length |
1 | 0 | 0 |
attr |
Gemma3nAudioFeatureExtractor.hop_length |
1 | 0 | 0 |
attr |
Gemma3nAudioFeatureExtractor.mel_floor |
1 | 0 | 0 |
attr |
Gemma3nAudioFeatureExtractor.fft_length |
1 | 0 | 0 |
attr |
Gemma3nAudioFeatureExtractor.window |
1 | 0 | 0 |
attr |
Gemma3nAudioFeatureExtractor.mel_filters |
1 | 0 | 0 |
attr |
Gemma3nAudioFeatureExtractor.per_bin_mean |
1 | 0 | 0 |
attr |
Gemma3nAudioFeatureExtractor.per_bin_stddev |
1 | 0 | 0 |
transformers.models.gemma3n.modeling_gemma3n (59 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Gemma3nPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Gemma3nModel.init |
2 | 1 | 0 |
meth |
Gemma3nModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Gemma3nModel.set_input_embeddings |
2 | 0 | 0 |
meth |
Gemma3nModel.get_placeholder_mask |
5 | 4 | 0 |
attr |
Gemma3nModel.vision_tower |
1 | 0 | 0 |
attr |
Gemma3nModel.vocab_size |
1 | 0 | 0 |
attr |
Gemma3nModel.language_model |
1 | 0 | 0 |
attr |
Gemma3nModel.vocab_size_per_layer_input |
1 | 0 | 0 |
attr |
Gemma3nModel.audio_tower |
1 | 0 | 0 |
attr |
Gemma3nModel.embed_vision |
1 | 0 | 0 |
attr |
Gemma3nModel.embed_audio |
1 | 0 | 0 |
meth |
Gemma3nForCausalLM.init |
2 | 1 | 0 |
attr |
Gemma3nForCausalLM.model |
1 | 0 | 0 |
attr |
Gemma3nForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Gemma3nForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Gemma3nAudioEncoder.init |
2 | 1 | 0 |
attr |
Gemma3nAudioEncoder.subsample_conv_projection |
1 | 0 | 0 |
attr |
Gemma3nAudioEncoder.conformer |
1 | 0 | 0 |
meth |
Gemma3nTextModel.init |
2 | 1 | 0 |
attr |
Gemma3nTextModel.padding_idx |
1 | 0 | 0 |
attr |
Gemma3nTextModel.vocab_size |
1 | 0 | 0 |
attr |
Gemma3nTextModel.embed_tokens |
1 | 0 | 0 |
attr |
Gemma3nTextModel.layers |
1 | 0 | 0 |
attr |
Gemma3nTextModel.norm |
1 | 0 | 0 |
attr |
Gemma3nTextModel.rotary_emb |
1 | 0 | 0 |
attr |
Gemma3nTextModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
Gemma3nTextModel.hidden_size |
1 | 0 | 0 |
attr |
Gemma3nTextModel.hidden_size_per_layer_input |
1 | 0 | 0 |
attr |
Gemma3nTextModel.embed_tokens_per_layer |
1 | 0 | 0 |
attr |
Gemma3nTextModel.per_layer_model_projection |
1 | 0 | 0 |
attr |
Gemma3nTextModel.per_layer_projection_norm |
1 | 0 | 0 |
attr |
Gemma3nTextModel.altup_projections |
1 | 0 | 0 |
attr |
Gemma3nTextModel.altup_unembed_projections |
1 | 0 | 0 |
meth |
Gemma3nForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Gemma3nForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Gemma3nForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Gemma3nForConditionalGeneration.get_image_features |
3 | 2 | 0 |
meth |
Gemma3nForConditionalGeneration.prepare_inputs_for_generation |
16 | 0 | 0 |
attr |
Gemma3nForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Gemma3nForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.gemma3n.modular_gemma3n (120 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Gemma3nAudioEncoder.init |
2 | 1 | 0 |
attr |
Gemma3nAudioEncoder.subsample_conv_projection |
1 | 0 | 0 |
attr |
Gemma3nAudioEncoder.conformer |
1 | 0 | 0 |
meth |
Gemma3nTextModel.init |
2 | 1 | 0 |
attr |
Gemma3nTextModel.hidden_size |
1 | 0 | 0 |
attr |
Gemma3nTextModel.hidden_size_per_layer_input |
1 | 0 | 0 |
attr |
Gemma3nTextModel.embed_tokens_per_layer |
1 | 0 | 0 |
attr |
Gemma3nTextModel.per_layer_model_projection |
1 | 0 | 0 |
attr |
Gemma3nTextModel.per_layer_projection_norm |
1 | 0 | 0 |
attr |
Gemma3nTextModel.layers |
1 | 0 | 0 |
attr |
Gemma3nTextModel.norm |
1 | 0 | 0 |
attr |
Gemma3nTextModel.altup_projections |
1 | 0 | 0 |
attr |
Gemma3nTextModel.altup_unembed_projections |
1 | 0 | 0 |
meth |
Gemma3nForConditionalGeneration.prepare_inputs_for_generation |
16 | 0 | 0 |
meth |
Gemma3nForConditionalGeneration.create_masks_for_generate |
2 | 0 | 0 |
meth |
Gemma3nTextConfig.init |
33 | 31 | 0 |
meth |
Gemma3nTextConfig.convert_rope_params_to_dict |
3 | 0 | 0 |
attr |
Gemma3nTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.vocab_size |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.vocab_size_per_layer_input |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.hidden_size |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.head_dim |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.initializer_range |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.use_cache |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.attention_bias |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.hidden_activation |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.sliding_window |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.final_logit_softcapping |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.layer_types |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.hidden_size_per_layer_input |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.num_kv_shared_layers |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.altup_active_idx |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.altup_coef_clip |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.altup_correct_scale |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.altup_num_inputs |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.laurel_rank |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.activation_sparsity_pattern |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Gemma3nTextConfig.tie_word_embeddings |
1 | 0 | 0 |
meth |
Gemma3nPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Gemma3nConfig.init |
15 | 13 | 0 |
attr |
Gemma3nConfig.text_config |
1 | 0 | 0 |
attr |
Gemma3nConfig.vision_config |
1 | 0 | 0 |
attr |
Gemma3nConfig.audio_config |
1 | 0 | 0 |
attr |
Gemma3nConfig.audio_soft_tokens_per_image |
1 | 0 | 0 |
attr |
Gemma3nConfig.vision_soft_tokens_per_image |
1 | 0 | 0 |
attr |
Gemma3nConfig.boi_token_id |
1 | 0 | 0 |
attr |
Gemma3nConfig.eoi_token_id |
1 | 0 | 0 |
attr |
Gemma3nConfig.image_token_id |
1 | 0 | 0 |
attr |
Gemma3nConfig.boa_token_id |
1 | 0 | 0 |
attr |
Gemma3nConfig.eoa_token_id |
1 | 0 | 0 |
attr |
Gemma3nConfig.audio_token_id |
1 | 0 | 0 |
attr |
Gemma3nConfig.initializer_range |
1 | 0 | 0 |
attr |
Gemma3nConfig.tie_word_embeddings |
1 | 0 | 0 |
meth |
Gemma3nModel.init |
2 | 1 | 0 |
meth |
Gemma3nModel.get_placeholder_mask |
5 | 4 | 0 |
attr |
Gemma3nModel.vocab_size_per_layer_input |
1 | 0 | 0 |
attr |
Gemma3nModel.audio_tower |
1 | 0 | 0 |
attr |
Gemma3nModel.embed_vision |
1 | 0 | 0 |
attr |
Gemma3nModel.embed_audio |
1 | 0 | 0 |
meth |
Gemma3nVisionConfig.init |
10 | 8 | 0 |
attr |
Gemma3nVisionConfig.architecture |
1 | 0 | 0 |
attr |
Gemma3nVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
Gemma3nVisionConfig.do_pooling |
1 | 0 | 0 |
attr |
Gemma3nVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
Gemma3nVisionConfig.vocab_size |
1 | 0 | 0 |
attr |
Gemma3nVisionConfig.vocab_offset |
1 | 0 | 0 |
attr |
Gemma3nVisionConfig.rms_norm_eps |
1 | 0 | 0 |
meth |
Gemma3nAudioConfig.init |
21 | 19 | 0 |
attr |
Gemma3nAudioConfig.input_feat_size |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.hidden_size |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.vocab_size |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.vocab_offset |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.gradient_clipping |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.conf_attention_chunk_size |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.conf_attention_context_left |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.conf_attention_context_right |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.conf_attention_logit_cap |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.conf_num_attention_heads |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.conf_num_hidden_layers |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.conf_conv_kernel_size |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.conf_reduction_factor |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.conf_residual_weight |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.sscp_conv_channel_size |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.sscp_conv_group_norm_eps |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.sscp_conv_kernel_size |
1 | 0 | 0 |
attr |
Gemma3nAudioConfig.sscp_conv_stride_size |
1 | 0 | 0 |
transformers.models.gemma3n.processing_gemma3n (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Gemma3nProcessor.init |
8 | 2 | 0 |
prop |
Gemma3nProcessor.model_input_names |
1 | 0 | 0 |
attr |
Gemma3nProcessor.audio_seq_length |
1 | 0 | 0 |
attr |
Gemma3nProcessor.audio_token_id |
1 | 0 | 0 |
attr |
Gemma3nProcessor.boa_token |
1 | 0 | 0 |
attr |
Gemma3nProcessor.audio_token |
1 | 0 | 0 |
attr |
Gemma3nProcessor.full_audio_sequence |
1 | 0 | 0 |
attr |
Gemma3nProcessor.image_seq_length |
1 | 0 | 0 |
attr |
Gemma3nProcessor.image_token_id |
1 | 0 | 0 |
attr |
Gemma3nProcessor.boi_token |
1 | 0 | 0 |
attr |
Gemma3nProcessor.image_token |
1 | 0 | 0 |
attr |
Gemma3nProcessor.full_image_sequence |
1 | 0 | 0 |
transformers.models.git.configuration_git (62 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GitConfig.init |
20 | 0 | 0 |
attr |
GitConfig.vision_config |
1 | 0 | 0 |
attr |
GitConfig.vocab_size |
1 | 0 | 0 |
attr |
GitConfig.hidden_size |
1 | 0 | 0 |
attr |
GitConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GitConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GitConfig.hidden_act |
1 | 0 | 0 |
attr |
GitConfig.intermediate_size |
1 | 0 | 0 |
attr |
GitConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
GitConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
GitConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
GitConfig.initializer_range |
1 | 0 | 0 |
attr |
GitConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
GitConfig.use_cache |
1 | 0 | 0 |
attr |
GitConfig.num_image_with_embedding |
1 | 0 | 0 |
attr |
GitConfig.bos_token_id |
1 | 0 | 0 |
attr |
GitConfig.eos_token_id |
1 | 0 | 0 |
attr |
GitConfig.pad_token_id |
1 | 0 | 0 |
attr |
GitConfig.tie_word_embeddings |
1 | 0 | 0 |
meth |
GitVisionConfig.init |
13 | 0 | 0 |
attr |
GitVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
GitVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
GitVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GitVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GitVisionConfig.num_channels |
1 | 0 | 0 |
attr |
GitVisionConfig.patch_size |
1 | 0 | 0 |
attr |
GitVisionConfig.image_size |
1 | 0 | 0 |
attr |
GitVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
GitVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
GitVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
GitVisionConfig.hidden_act |
1 | 0 | 0 |
transformers.models.git.modeling_git (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GitModel.init |
2 | 0 | 0 |
meth |
GitModel.get_input_embeddings |
1 | 0 | 0 |
meth |
GitModel.set_input_embeddings |
2 | 0 | 0 |
meth |
GitModel.forward |
14 | 13 | 0 |
attr |
GitModel.embeddings |
1 | 0 | 0 |
attr |
GitModel.image_encoder |
1 | 0 | 0 |
attr |
GitModel.encoder |
1 | 0 | 0 |
attr |
GitModel.visual_projection |
1 | 0 | 0 |
attr |
GitModel.img_temporal_embedding |
1 | 0 | 0 |
meth |
GitPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
GitForCausalLM.init |
2 | 0 | 0 |
meth |
GitForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
GitForCausalLM.set_output_embeddings |
2 | 0 | 0 |
meth |
GitForCausalLM.forward |
16 | 15 | 0 |
meth |
GitForCausalLM.prepare_inputs_for_generation |
9 | 0 | 0 |
attr |
GitForCausalLM.git |
1 | 0 | 0 |
attr |
GitForCausalLM.output |
1 | 0 | 0 |
meth |
GitVisionModel.init |
2 | 1 | 0 |
meth |
GitVisionModel.forward |
7 | 6 | 0 |
attr |
GitVisionModel.vision_model |
1 | 0 | 0 |
transformers.models.git.processing_git (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GitProcessor.init |
3 | 0 | 0 |
transformers.models.glm.configuration_glm (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmConfig.init |
21 | 19 | 0 |
attr |
GlmConfig.vocab_size |
1 | 0 | 0 |
attr |
GlmConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
GlmConfig.hidden_size |
1 | 0 | 0 |
attr |
GlmConfig.intermediate_size |
1 | 0 | 0 |
attr |
GlmConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GlmConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GlmConfig.head_dim |
1 | 0 | 0 |
attr |
GlmConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
GlmConfig.hidden_act |
1 | 0 | 0 |
attr |
GlmConfig.initializer_range |
1 | 0 | 0 |
attr |
GlmConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
GlmConfig.use_cache |
1 | 0 | 0 |
attr |
GlmConfig.attention_bias |
1 | 0 | 0 |
attr |
GlmConfig.attention_dropout |
1 | 0 | 0 |
attr |
GlmConfig.rope_parameters |
1 | 0 | 0 |
attr |
GlmConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
GlmConfig.pad_token_id |
1 | 0 | 0 |
attr |
GlmConfig.bos_token_id |
1 | 0 | 0 |
attr |
GlmConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.glm.modeling_glm (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmForCausalLM.init |
2 | 0 | 0 |
attr |
GlmForCausalLM.model |
1 | 0 | 0 |
attr |
GlmForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
GlmForCausalLM.lm_head |
1 | 0 | 0 |
meth |
GlmModel.init |
2 | 1 | 0 |
attr |
GlmModel.padding_idx |
1 | 0 | 0 |
attr |
GlmModel.vocab_size |
1 | 0 | 0 |
attr |
GlmModel.embed_tokens |
1 | 0 | 0 |
attr |
GlmModel.layers |
1 | 0 | 0 |
attr |
GlmModel.norm |
1 | 0 | 0 |
attr |
GlmModel.rotary_emb |
1 | 0 | 0 |
attr |
GlmModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.glm.modular_glm (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
GlmPreTrainedModel |
1 | 0 | 0 |
attr |
GlmModel |
1 | 0 | 0 |
transformers.models.glm4.configuration_glm4 (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm4Config.init |
21 | 19 | 0 |
attr |
Glm4Config.vocab_size |
1 | 0 | 0 |
attr |
Glm4Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Glm4Config.hidden_size |
1 | 0 | 0 |
attr |
Glm4Config.intermediate_size |
1 | 0 | 0 |
attr |
Glm4Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Glm4Config.num_attention_heads |
1 | 0 | 0 |
attr |
Glm4Config.head_dim |
1 | 0 | 0 |
attr |
Glm4Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Glm4Config.hidden_act |
1 | 0 | 0 |
attr |
Glm4Config.initializer_range |
1 | 0 | 0 |
attr |
Glm4Config.rms_norm_eps |
1 | 0 | 0 |
attr |
Glm4Config.use_cache |
1 | 0 | 0 |
attr |
Glm4Config.attention_bias |
1 | 0 | 0 |
attr |
Glm4Config.attention_dropout |
1 | 0 | 0 |
attr |
Glm4Config.rope_parameters |
1 | 0 | 0 |
attr |
Glm4Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Glm4Config.pad_token_id |
1 | 0 | 0 |
attr |
Glm4Config.bos_token_id |
1 | 0 | 0 |
attr |
Glm4Config.eos_token_id |
1 | 0 | 0 |
transformers.models.glm4.modeling_glm4 (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm4Model.init |
2 | 1 | 0 |
attr |
Glm4Model.padding_idx |
1 | 0 | 0 |
attr |
Glm4Model.vocab_size |
1 | 0 | 0 |
attr |
Glm4Model.embed_tokens |
1 | 0 | 0 |
attr |
Glm4Model.layers |
1 | 0 | 0 |
attr |
Glm4Model.norm |
1 | 0 | 0 |
attr |
Glm4Model.rotary_emb |
1 | 0 | 0 |
attr |
Glm4Model.gradient_checkpointing |
1 | 0 | 0 |
meth |
Glm4ForCausalLM.init |
2 | 0 | 0 |
attr |
Glm4ForCausalLM.model |
1 | 0 | 0 |
attr |
Glm4ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Glm4ForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.glm4.modular_glm4 (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Glm4Model |
1 | 0 | 0 |
attr |
Glm4PreTrainedModel |
1 | 0 | 0 |
transformers.models.glm46v.configuration_glm46v (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm46VConfig.init |
11 | 0 | 0 |
attr |
Glm46VConfig.image_token_id |
1 | 0 | 0 |
attr |
Glm46VConfig.video_token_id |
1 | 0 | 0 |
attr |
Glm46VConfig.video_start_token_id |
1 | 0 | 0 |
attr |
Glm46VConfig.video_end_token_id |
1 | 0 | 0 |
attr |
Glm46VConfig.image_start_token_id |
1 | 0 | 0 |
attr |
Glm46VConfig.image_end_token_id |
1 | 0 | 0 |
attr |
Glm46VConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Glm46VConfig.vision_config |
1 | 0 | 0 |
attr |
Glm46VConfig.text_config |
1 | 0 | 0 |
transformers.models.glm46v.image_processing_glm46v (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm46VImageProcessor.init |
14 | 13 | 0 |
meth |
Glm46VImageProcessor._preprocess |
16 | 15 | 0 |
meth |
Glm46VImageProcessor.preprocess |
17 | 16 | 0 |
meth |
Glm46VImageProcessor.get_number_of_image_patches |
4 | 2 | 0 |
attr |
Glm46VImageProcessor.size |
1 | 0 | 0 |
attr |
Glm46VImageProcessor.do_resize |
1 | 0 | 0 |
attr |
Glm46VImageProcessor.resample |
1 | 0 | 0 |
attr |
Glm46VImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
Glm46VImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
Glm46VImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
Glm46VImageProcessor.image_mean |
1 | 0 | 0 |
attr |
Glm46VImageProcessor.image_std |
1 | 0 | 0 |
attr |
Glm46VImageProcessor.patch_size |
1 | 0 | 0 |
attr |
Glm46VImageProcessor.temporal_patch_size |
1 | 0 | 0 |
attr |
Glm46VImageProcessor.merge_size |
1 | 0 | 0 |
attr |
Glm46VImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.glm46v.image_processing_glm46v_fast (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm46VImageProcessorFast.init |
2 | 1 | 0 |
meth |
Glm46VImageProcessorFast._further_process_kwargs |
3 | 2 | 0 |
meth |
Glm46VImageProcessorFast._preprocess |
16 | 15 | 0 |
meth |
Glm46VImageProcessorFast.get_number_of_image_patches |
4 | 2 | 0 |
transformers.models.glm46v.modeling_glm46v (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm46VForConditionalGeneration.init |
2 | 0 | 0 |
meth |
Glm46VForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Glm46VForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Glm46VForConditionalGeneration.prepare_inputs_for_generation |
14 | 0 | 0 |
meth |
Glm46VForConditionalGeneration._prepare_position_ids_for_generation |
3 | 0 | 0 |
meth |
Glm46VForConditionalGeneration._expand_inputs_for_generation |
5 | 4 | 0 |
attr |
Glm46VForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Glm46VForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
Glm46VModel.init |
2 | 0 | 0 |
meth |
Glm46VModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Glm46VModel.set_input_embeddings |
2 | 0 | 0 |
meth |
Glm46VModel.get_vision_position_ids |
7 | 6 | 0 |
meth |
Glm46VModel.get_rope_index |
7 | 6 | 0 |
meth |
Glm46VModel.get_placeholder_mask |
5 | 4 | 0 |
attr |
Glm46VModel.visual |
1 | 0 | 0 |
attr |
Glm46VModel.language_model |
1 | 0 | 0 |
attr |
Glm46VModel.rope_deltas |
1 | 0 | 0 |
transformers.models.glm46v.modular_glm46v (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm46VConfig.init |
11 | 0 | 0 |
attr |
Glm46VConfig.image_token_id |
1 | 0 | 0 |
attr |
Glm46VConfig.video_token_id |
1 | 0 | 0 |
attr |
Glm46VConfig.video_start_token_id |
1 | 0 | 0 |
attr |
Glm46VConfig.video_end_token_id |
1 | 0 | 0 |
attr |
Glm46VConfig.image_start_token_id |
1 | 0 | 0 |
attr |
Glm46VConfig.image_end_token_id |
1 | 0 | 0 |
attr |
Glm46VConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Glm46VConfig.vision_config |
1 | 0 | 0 |
attr |
Glm46VConfig.text_config |
1 | 0 | 0 |
meth |
Glm46VVideoProcessor.sample_frames |
4 | 2 | 0 |
meth |
Glm46VProcessor.replace_frame_token_id |
2 | 0 | 0 |
meth |
Glm46VPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Glm46VModel.init |
2 | 0 | 0 |
attr |
Glm46VModel.visual |
1 | 0 | 0 |
attr |
Glm46VModel.language_model |
1 | 0 | 0 |
transformers.models.glm46v.processing_glm46v (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm46VProcessor.init |
6 | 0 | 0 |
meth |
Glm46VProcessor._get_num_multimodal_tokens |
4 | 0 | 0 |
meth |
Glm46VProcessor.post_process_image_text_to_text |
5 | 0 | 0 |
meth |
Glm46VProcessor.replace_frame_token_id |
2 | 0 | 0 |
prop |
Glm46VProcessor.model_input_names |
1 | 0 | 0 |
attr |
Glm46VProcessor.image_token |
1 | 0 | 0 |
attr |
Glm46VProcessor.video_token |
1 | 0 | 0 |
attr |
Glm46VProcessor.image_token_id |
1 | 0 | 0 |
attr |
Glm46VProcessor.video_token_id |
1 | 0 | 0 |
attr |
Glm46VProcessor.video_start_id |
1 | 0 | 0 |
attr |
Glm46VProcessor.video_end_id |
1 | 0 | 0 |
transformers.models.glm46v.video_processing_glm46v (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm46VVideoProcessor.init |
2 | 1 | 0 |
meth |
Glm46VVideoProcessor._further_process_kwargs |
3 | 2 | 0 |
meth |
Glm46VVideoProcessor.sample_frames |
4 | 2 | 0 |
meth |
Glm46VVideoProcessor._preprocess |
16 | 14 | 0 |
transformers.models.glm4_moe.configuration_glm4_moe (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm4MoeConfig.init |
30 | 28 | 0 |
attr |
Glm4MoeConfig.vocab_size |
1 | 0 | 0 |
attr |
Glm4MoeConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Glm4MoeConfig.hidden_size |
1 | 0 | 0 |
attr |
Glm4MoeConfig.intermediate_size |
1 | 0 | 0 |
attr |
Glm4MoeConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Glm4MoeConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Glm4MoeConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Glm4MoeConfig.hidden_act |
1 | 0 | 0 |
attr |
Glm4MoeConfig.initializer_range |
1 | 0 | 0 |
attr |
Glm4MoeConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Glm4MoeConfig.use_cache |
1 | 0 | 0 |
attr |
Glm4MoeConfig.attention_bias |
1 | 0 | 0 |
attr |
Glm4MoeConfig.attention_dropout |
1 | 0 | 0 |
attr |
Glm4MoeConfig.rope_parameters |
1 | 0 | 0 |
attr |
Glm4MoeConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
Glm4MoeConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
Glm4MoeConfig.n_group |
1 | 0 | 0 |
attr |
Glm4MoeConfig.topk_group |
1 | 0 | 0 |
attr |
Glm4MoeConfig.n_shared_experts |
1 | 0 | 0 |
attr |
Glm4MoeConfig.n_routed_experts |
1 | 0 | 0 |
attr |
Glm4MoeConfig.routed_scaling_factor |
1 | 0 | 0 |
attr |
Glm4MoeConfig.first_k_dense_replace |
1 | 0 | 0 |
attr |
Glm4MoeConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
Glm4MoeConfig.use_qk_norm |
1 | 0 | 0 |
attr |
Glm4MoeConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Glm4MoeConfig.bos_token_id |
1 | 0 | 0 |
attr |
Glm4MoeConfig.eos_token_id |
1 | 0 | 0 |
attr |
Glm4MoeConfig.pad_token_id |
1 | 0 | 0 |
transformers.models.glm4_moe.modeling_glm4_moe (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm4MoeModel.init |
2 | 1 | 0 |
attr |
Glm4MoeModel.padding_idx |
1 | 0 | 0 |
attr |
Glm4MoeModel.vocab_size |
1 | 0 | 0 |
attr |
Glm4MoeModel.embed_tokens |
1 | 0 | 0 |
attr |
Glm4MoeModel.layers |
1 | 0 | 0 |
attr |
Glm4MoeModel.norm |
1 | 0 | 0 |
attr |
Glm4MoeModel.rotary_emb |
1 | 0 | 0 |
attr |
Glm4MoeModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Glm4MoeForCausalLM.init |
2 | 0 | 0 |
attr |
Glm4MoeForCausalLM.model |
1 | 0 | 0 |
attr |
Glm4MoeForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Glm4MoeForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Glm4MoePreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.glm4_moe.modular_glm4_moe (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm4MoeConfig.init |
30 | 28 | 0 |
attr |
Glm4MoeConfig.vocab_size |
1 | 0 | 0 |
attr |
Glm4MoeConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Glm4MoeConfig.hidden_size |
1 | 0 | 0 |
attr |
Glm4MoeConfig.intermediate_size |
1 | 0 | 0 |
attr |
Glm4MoeConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Glm4MoeConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Glm4MoeConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Glm4MoeConfig.hidden_act |
1 | 0 | 0 |
attr |
Glm4MoeConfig.initializer_range |
1 | 0 | 0 |
attr |
Glm4MoeConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Glm4MoeConfig.use_cache |
1 | 0 | 0 |
attr |
Glm4MoeConfig.attention_bias |
1 | 0 | 0 |
attr |
Glm4MoeConfig.attention_dropout |
1 | 0 | 0 |
attr |
Glm4MoeConfig.rope_parameters |
1 | 0 | 0 |
attr |
Glm4MoeConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
Glm4MoeConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
Glm4MoeConfig.n_group |
1 | 0 | 0 |
attr |
Glm4MoeConfig.topk_group |
1 | 0 | 0 |
attr |
Glm4MoeConfig.n_shared_experts |
1 | 0 | 0 |
attr |
Glm4MoeConfig.n_routed_experts |
1 | 0 | 0 |
attr |
Glm4MoeConfig.routed_scaling_factor |
1 | 0 | 0 |
attr |
Glm4MoeConfig.first_k_dense_replace |
1 | 0 | 0 |
attr |
Glm4MoeConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
Glm4MoeConfig.use_qk_norm |
1 | 0 | 0 |
attr |
Glm4MoeConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Glm4MoeConfig.bos_token_id |
1 | 0 | 0 |
attr |
Glm4MoeConfig.eos_token_id |
1 | 0 | 0 |
attr |
Glm4MoeConfig.pad_token_id |
1 | 0 | 0 |
transformers.models.glm4_moe_lite.configuration_glm4_moe_lite (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm4MoeLiteConfig.init |
36 | 33 | 0 |
attr |
Glm4MoeLiteConfig.vocab_size |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.hidden_size |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.intermediate_size |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.mlp_layer_types |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.n_shared_experts |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.n_routed_experts |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.routed_scaling_factor |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.kv_lora_rank |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.q_lora_rank |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.qk_rope_head_dim |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.v_head_dim |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.qk_nope_head_dim |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.qk_head_dim |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.head_dim |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.n_group |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.topk_group |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.rope_interleave |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.hidden_act |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.initializer_range |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.pretraining_tp |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.use_cache |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.attention_bias |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.attention_dropout |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.rope_parameters |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.pad_token_id |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.bos_token_id |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.eos_token_id |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.glm4_moe_lite.modeling_glm4_moe_lite (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm4MoeLiteForCausalLM.init |
2 | 0 | 0 |
attr |
Glm4MoeLiteForCausalLM.model |
1 | 0 | 0 |
attr |
Glm4MoeLiteForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Glm4MoeLiteForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Glm4MoeLiteModel.init |
2 | 1 | 0 |
attr |
Glm4MoeLiteModel.padding_idx |
1 | 0 | 0 |
attr |
Glm4MoeLiteModel.vocab_size |
1 | 0 | 0 |
attr |
Glm4MoeLiteModel.embed_tokens |
1 | 0 | 0 |
attr |
Glm4MoeLiteModel.layers |
1 | 0 | 0 |
attr |
Glm4MoeLiteModel.norm |
1 | 0 | 0 |
attr |
Glm4MoeLiteModel.rotary_emb |
1 | 0 | 0 |
attr |
Glm4MoeLiteModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Glm4MoeLitePreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.glm4_moe_lite.modular_glm4_moe_lite (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm4MoeLiteConfig.init |
36 | 33 | 0 |
attr |
Glm4MoeLiteConfig.vocab_size |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.hidden_size |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.intermediate_size |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.mlp_layer_types |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.n_shared_experts |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.n_routed_experts |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.routed_scaling_factor |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.kv_lora_rank |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.q_lora_rank |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.qk_rope_head_dim |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.v_head_dim |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.qk_nope_head_dim |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.qk_head_dim |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.head_dim |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.n_group |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.topk_group |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.rope_interleave |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.hidden_act |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.initializer_range |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.pretraining_tp |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.use_cache |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.attention_bias |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.attention_dropout |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.rope_parameters |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.pad_token_id |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.bos_token_id |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.eos_token_id |
1 | 0 | 0 |
attr |
Glm4MoeLiteConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.glm4v.configuration_glm4v (68 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm4vConfig.init |
11 | 0 | 0 |
attr |
Glm4vConfig.image_token_id |
1 | 0 | 0 |
attr |
Glm4vConfig.video_token_id |
1 | 0 | 0 |
attr |
Glm4vConfig.video_start_token_id |
1 | 0 | 0 |
attr |
Glm4vConfig.video_end_token_id |
1 | 0 | 0 |
attr |
Glm4vConfig.image_start_token_id |
1 | 0 | 0 |
attr |
Glm4vConfig.image_end_token_id |
1 | 0 | 0 |
attr |
Glm4vConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Glm4vConfig.vision_config |
1 | 0 | 0 |
attr |
Glm4vConfig.text_config |
1 | 0 | 0 |
meth |
Glm4vVisionConfig.init |
17 | 0 | 0 |
attr |
Glm4vVisionConfig.depth |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.num_heads |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.in_channels |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.image_size |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.patch_size |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.spatial_merge_size |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.temporal_patch_size |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.out_hidden_size |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.attention_bias |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.attention_dropout |
1 | 0 | 0 |
meth |
Glm4vTextConfig.init |
16 | 14 | 0 |
attr |
Glm4vTextConfig.vocab_size |
1 | 0 | 0 |
attr |
Glm4vTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Glm4vTextConfig.hidden_size |
1 | 0 | 0 |
attr |
Glm4vTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Glm4vTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Glm4vTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Glm4vTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Glm4vTextConfig.hidden_act |
1 | 0 | 0 |
attr |
Glm4vTextConfig.initializer_range |
1 | 0 | 0 |
attr |
Glm4vTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Glm4vTextConfig.use_cache |
1 | 0 | 0 |
attr |
Glm4vTextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Glm4vTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Glm4vTextConfig.pad_token_id |
1 | 0 | 0 |
transformers.models.glm4v.image_processing_glm4v (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm4vImageProcessor.init |
14 | 13 | 0 |
meth |
Glm4vImageProcessor._preprocess |
16 | 15 | 0 |
meth |
Glm4vImageProcessor.preprocess |
17 | 16 | 0 |
meth |
Glm4vImageProcessor.get_number_of_image_patches |
4 | 2 | 0 |
attr |
Glm4vImageProcessor.size |
1 | 0 | 0 |
attr |
Glm4vImageProcessor.do_resize |
1 | 0 | 0 |
attr |
Glm4vImageProcessor.resample |
1 | 0 | 0 |
attr |
Glm4vImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
Glm4vImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
Glm4vImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
Glm4vImageProcessor.image_mean |
1 | 0 | 0 |
attr |
Glm4vImageProcessor.image_std |
1 | 0 | 0 |
attr |
Glm4vImageProcessor.patch_size |
1 | 0 | 0 |
attr |
Glm4vImageProcessor.temporal_patch_size |
1 | 0 | 0 |
attr |
Glm4vImageProcessor.merge_size |
1 | 0 | 0 |
attr |
Glm4vImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.glm4v.image_processing_glm4v_fast (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm4vImageProcessorFast.init |
2 | 1 | 0 |
meth |
Glm4vImageProcessorFast._further_process_kwargs |
3 | 2 | 0 |
meth |
Glm4vImageProcessorFast._preprocess |
16 | 15 | 0 |
meth |
Glm4vImageProcessorFast.get_number_of_image_patches |
4 | 2 | 0 |
transformers.models.glm4v.modeling_glm4v (60 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm4vModel.init |
2 | 0 | 0 |
meth |
Glm4vModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Glm4vModel.set_input_embeddings |
2 | 0 | 0 |
meth |
Glm4vModel.get_vision_position_ids |
7 | 6 | 0 |
meth |
Glm4vModel.get_rope_index |
7 | 6 | 0 |
meth |
Glm4vModel.get_placeholder_mask |
5 | 4 | 0 |
attr |
Glm4vModel.visual |
1 | 0 | 0 |
attr |
Glm4vModel.language_model |
1 | 0 | 0 |
attr |
Glm4vModel.rope_deltas |
1 | 0 | 0 |
meth |
Glm4vTextModel.init |
2 | 1 | 0 |
attr |
Glm4vTextModel.padding_idx |
1 | 0 | 0 |
attr |
Glm4vTextModel.vocab_size |
1 | 0 | 0 |
attr |
Glm4vTextModel.embed_tokens |
1 | 0 | 0 |
attr |
Glm4vTextModel.layers |
1 | 0 | 0 |
attr |
Glm4vTextModel.norm |
1 | 0 | 0 |
attr |
Glm4vTextModel.rotary_emb |
1 | 0 | 0 |
attr |
Glm4vTextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Glm4vForConditionalGeneration.init |
2 | 0 | 0 |
meth |
Glm4vForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Glm4vForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Glm4vForConditionalGeneration.prepare_inputs_for_generation |
14 | 0 | 0 |
meth |
Glm4vForConditionalGeneration._prepare_position_ids_for_generation |
3 | 0 | 0 |
meth |
Glm4vForConditionalGeneration._expand_inputs_for_generation |
5 | 4 | 0 |
attr |
Glm4vForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Glm4vForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
Glm4vVisionModel.init |
2 | 1 | 0 |
meth |
Glm4vVisionModel.rot_pos_emb |
2 | 0 | 0 |
attr |
Glm4vVisionModel.spatial_merge_size |
1 | 0 | 0 |
attr |
Glm4vVisionModel.patch_size |
1 | 0 | 0 |
attr |
Glm4vVisionModel.embeddings |
1 | 0 | 0 |
attr |
Glm4vVisionModel.patch_embed |
1 | 0 | 0 |
attr |
Glm4vVisionModel.rotary_pos_emb |
1 | 0 | 0 |
attr |
Glm4vVisionModel.blocks |
1 | 0 | 0 |
attr |
Glm4vVisionModel.merger |
1 | 0 | 0 |
attr |
Glm4vVisionModel.post_conv_layernorm |
1 | 0 | 0 |
attr |
Glm4vVisionModel.downsample |
1 | 0 | 0 |
attr |
Glm4vVisionModel.post_layernorm |
1 | 0 | 0 |
attr |
Glm4vVisionModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Glm4vPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.glm4v.modular_glm4v (119 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm4vVisionConfig.init |
17 | 0 | 0 |
attr |
Glm4vVisionConfig.depth |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.num_heads |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.in_channels |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.image_size |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.patch_size |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.spatial_merge_size |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.temporal_patch_size |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.out_hidden_size |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.attention_bias |
1 | 0 | 0 |
attr |
Glm4vVisionConfig.attention_dropout |
1 | 0 | 0 |
meth |
Glm4vTextModel.init |
2 | 1 | 0 |
attr |
Glm4vTextModel.layers |
1 | 0 | 0 |
attr |
Glm4vTextModel.norm |
1 | 0 | 0 |
attr |
Glm4vTextModel.rotary_emb |
1 | 0 | 0 |
meth |
Glm4vProcessor.init |
6 | 0 | 0 |
meth |
Glm4vProcessor.replace_frame_token_id |
2 | 0 | 0 |
attr |
Glm4vProcessor.image_token |
1 | 0 | 0 |
attr |
Glm4vProcessor.video_token |
1 | 0 | 0 |
attr |
Glm4vProcessor.video_start_id |
1 | 0 | 0 |
attr |
Glm4vProcessor.video_end_id |
1 | 0 | 0 |
meth |
Glm4vVisionModel.init |
2 | 1 | 0 |
meth |
Glm4vVisionModel.rot_pos_emb |
2 | 0 | 0 |
attr |
Glm4vVisionModel.spatial_merge_size |
1 | 0 | 0 |
attr |
Glm4vVisionModel.patch_size |
1 | 0 | 0 |
attr |
Glm4vVisionModel.embeddings |
1 | 0 | 0 |
attr |
Glm4vVisionModel.patch_embed |
1 | 0 | 0 |
attr |
Glm4vVisionModel.rotary_pos_emb |
1 | 0 | 0 |
attr |
Glm4vVisionModel.blocks |
1 | 0 | 0 |
attr |
Glm4vVisionModel.merger |
1 | 0 | 0 |
attr |
Glm4vVisionModel.post_conv_layernorm |
1 | 0 | 0 |
attr |
Glm4vVisionModel.downsample |
1 | 0 | 0 |
attr |
Glm4vVisionModel.post_layernorm |
1 | 0 | 0 |
attr |
Glm4vVisionModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Glm4vPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Glm4vModel.init |
2 | 0 | 0 |
meth |
Glm4vModel.get_placeholder_mask |
5 | 4 | 0 |
meth |
Glm4vModel.get_rope_index |
7 | 6 | 0 |
attr |
Glm4vModel.visual |
1 | 0 | 0 |
meth |
Glm4vTextConfig.init |
16 | 14 | 0 |
attr |
Glm4vTextConfig.vocab_size |
1 | 0 | 0 |
attr |
Glm4vTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Glm4vTextConfig.hidden_size |
1 | 0 | 0 |
attr |
Glm4vTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Glm4vTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Glm4vTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Glm4vTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Glm4vTextConfig.hidden_act |
1 | 0 | 0 |
attr |
Glm4vTextConfig.initializer_range |
1 | 0 | 0 |
attr |
Glm4vTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Glm4vTextConfig.use_cache |
1 | 0 | 0 |
attr |
Glm4vTextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Glm4vTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Glm4vTextConfig.pad_token_id |
1 | 0 | 0 |
meth |
Glm4vForConditionalGeneration.prepare_inputs_for_generation |
14 | 0 | 0 |
meth |
Glm4vConfig.init |
11 | 0 | 0 |
attr |
Glm4vConfig.image_token_id |
1 | 0 | 0 |
attr |
Glm4vConfig.video_token_id |
1 | 0 | 0 |
attr |
Glm4vConfig.video_start_token_id |
1 | 0 | 0 |
attr |
Glm4vConfig.video_end_token_id |
1 | 0 | 0 |
attr |
Glm4vConfig.image_start_token_id |
1 | 0 | 0 |
attr |
Glm4vConfig.image_end_token_id |
1 | 0 | 0 |
attr |
Glm4vConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Glm4vConfig.vision_config |
1 | 0 | 0 |
attr |
Glm4vConfig.text_config |
1 | 0 | 0 |
transformers.models.glm4v.processing_glm4v (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm4vProcessor.init |
6 | 0 | 0 |
meth |
Glm4vProcessor._get_num_multimodal_tokens |
4 | 0 | 0 |
meth |
Glm4vProcessor.post_process_image_text_to_text |
5 | 0 | 0 |
meth |
Glm4vProcessor.replace_frame_token_id |
2 | 0 | 0 |
prop |
Glm4vProcessor.model_input_names |
1 | 0 | 0 |
attr |
Glm4vProcessor.image_token |
1 | 0 | 0 |
attr |
Glm4vProcessor.video_token |
1 | 0 | 0 |
attr |
Glm4vProcessor.image_token_id |
1 | 0 | 0 |
attr |
Glm4vProcessor.video_token_id |
1 | 0 | 0 |
attr |
Glm4vProcessor.video_start_id |
1 | 0 | 0 |
attr |
Glm4vProcessor.video_end_id |
1 | 0 | 0 |
transformers.models.glm4v.video_processing_glm4v (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm4vVideoProcessor.init |
2 | 1 | 0 |
meth |
Glm4vVideoProcessor._further_process_kwargs |
3 | 2 | 0 |
meth |
Glm4vVideoProcessor.sample_frames |
4 | 2 | 0 |
meth |
Glm4vVideoProcessor._preprocess |
16 | 14 | 0 |
transformers.models.glm4v_moe.configuration_glm4v_moe (81 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm4vMoeConfig.init |
11 | 0 | 0 |
attr |
Glm4vMoeConfig.image_token_id |
1 | 0 | 0 |
attr |
Glm4vMoeConfig.video_token_id |
1 | 0 | 0 |
attr |
Glm4vMoeConfig.video_start_token_id |
1 | 0 | 0 |
attr |
Glm4vMoeConfig.video_end_token_id |
1 | 0 | 0 |
attr |
Glm4vMoeConfig.image_start_token_id |
1 | 0 | 0 |
attr |
Glm4vMoeConfig.image_end_token_id |
1 | 0 | 0 |
attr |
Glm4vMoeConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Glm4vMoeConfig.vision_config |
1 | 0 | 0 |
attr |
Glm4vMoeConfig.text_config |
1 | 0 | 0 |
meth |
Glm4vMoeVisionConfig.init |
17 | 0 | 0 |
attr |
Glm4vMoeVisionConfig.depth |
1 | 0 | 0 |
attr |
Glm4vMoeVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
Glm4vMoeVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
Glm4vMoeVisionConfig.num_heads |
1 | 0 | 0 |
attr |
Glm4vMoeVisionConfig.in_channels |
1 | 0 | 0 |
attr |
Glm4vMoeVisionConfig.image_size |
1 | 0 | 0 |
attr |
Glm4vMoeVisionConfig.patch_size |
1 | 0 | 0 |
attr |
Glm4vMoeVisionConfig.spatial_merge_size |
1 | 0 | 0 |
attr |
Glm4vMoeVisionConfig.temporal_patch_size |
1 | 0 | 0 |
attr |
Glm4vMoeVisionConfig.out_hidden_size |
1 | 0 | 0 |
attr |
Glm4vMoeVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
Glm4vMoeVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
Glm4vMoeVisionConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Glm4vMoeVisionConfig.attention_bias |
1 | 0 | 0 |
attr |
Glm4vMoeVisionConfig.attention_dropout |
1 | 0 | 0 |
meth |
Glm4vMoeTextConfig.init |
29 | 27 | 0 |
attr |
Glm4vMoeTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.vocab_size |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.hidden_size |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.hidden_act |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.initializer_range |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.use_cache |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.attention_bias |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.n_group |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.topk_group |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.n_shared_experts |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.n_routed_experts |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.routed_scaling_factor |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.first_k_dense_replace |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.router_aux_loss_coef |
1 | 0 | 0 |
transformers.models.glm4v_moe.modeling_glm4v_moe (62 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm4vMoeForConditionalGeneration.init |
2 | 0 | 0 |
meth |
Glm4vMoeForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Glm4vMoeForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Glm4vMoeForConditionalGeneration.prepare_inputs_for_generation |
14 | 0 | 0 |
meth |
Glm4vMoeForConditionalGeneration._prepare_position_ids_for_generation |
3 | 0 | 0 |
meth |
Glm4vMoeForConditionalGeneration._expand_inputs_for_generation |
5 | 4 | 0 |
attr |
Glm4vMoeForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Glm4vMoeForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
Glm4vMoeForConditionalGeneration.num_experts |
1 | 0 | 0 |
attr |
Glm4vMoeForConditionalGeneration.num_experts_per_tok |
1 | 0 | 0 |
meth |
Glm4vMoeVisionModel.init |
2 | 1 | 0 |
meth |
Glm4vMoeVisionModel.rot_pos_emb |
2 | 0 | 0 |
attr |
Glm4vMoeVisionModel.spatial_merge_size |
1 | 0 | 0 |
attr |
Glm4vMoeVisionModel.patch_size |
1 | 0 | 0 |
attr |
Glm4vMoeVisionModel.embeddings |
1 | 0 | 0 |
attr |
Glm4vMoeVisionModel.patch_embed |
1 | 0 | 0 |
attr |
Glm4vMoeVisionModel.rotary_pos_emb |
1 | 0 | 0 |
attr |
Glm4vMoeVisionModel.blocks |
1 | 0 | 0 |
attr |
Glm4vMoeVisionModel.merger |
1 | 0 | 0 |
attr |
Glm4vMoeVisionModel.post_conv_layernorm |
1 | 0 | 0 |
attr |
Glm4vMoeVisionModel.downsample |
1 | 0 | 0 |
attr |
Glm4vMoeVisionModel.post_layernorm |
1 | 0 | 0 |
attr |
Glm4vMoeVisionModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Glm4vMoeTextModel.init |
2 | 1 | 0 |
attr |
Glm4vMoeTextModel.padding_idx |
1 | 0 | 0 |
attr |
Glm4vMoeTextModel.vocab_size |
1 | 0 | 0 |
attr |
Glm4vMoeTextModel.embed_tokens |
1 | 0 | 0 |
attr |
Glm4vMoeTextModel.layers |
1 | 0 | 0 |
attr |
Glm4vMoeTextModel.norm |
1 | 0 | 0 |
attr |
Glm4vMoeTextModel.rotary_emb |
1 | 0 | 0 |
attr |
Glm4vMoeTextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Glm4vMoePreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Glm4vMoeModel.init |
2 | 0 | 0 |
meth |
Glm4vMoeModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Glm4vMoeModel.set_input_embeddings |
2 | 0 | 0 |
meth |
Glm4vMoeModel.get_vision_position_ids |
7 | 6 | 0 |
meth |
Glm4vMoeModel.get_rope_index |
7 | 6 | 0 |
meth |
Glm4vMoeModel.get_placeholder_mask |
5 | 4 | 0 |
attr |
Glm4vMoeModel.visual |
1 | 0 | 0 |
attr |
Glm4vMoeModel.language_model |
1 | 0 | 0 |
attr |
Glm4vMoeModel.rope_deltas |
1 | 0 | 0 |
transformers.models.glm4v_moe.modular_glm4v_moe (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Glm4vMoeConfig.init |
11 | 0 | 0 |
meth |
Glm4vMoeForConditionalGeneration.init |
2 | 0 | 0 |
attr |
Glm4vMoeForConditionalGeneration.num_experts |
1 | 0 | 0 |
attr |
Glm4vMoeForConditionalGeneration.num_experts_per_tok |
1 | 0 | 0 |
attr |
Glm4vMoeVisionConfig |
1 | 0 | 0 |
attr |
Glm4vMoeModel |
1 | 0 | 0 |
meth |
Glm4vMoeTextConfig.init |
29 | 27 | 0 |
attr |
Glm4vMoeTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.vocab_size |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.hidden_size |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.hidden_act |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.initializer_range |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.use_cache |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.attention_bias |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.n_group |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.topk_group |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.n_shared_experts |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.n_routed_experts |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.routed_scaling_factor |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.first_k_dense_replace |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
Glm4vMoeTextConfig.router_aux_loss_coef |
1 | 0 | 0 |
meth |
Glm4vMoePreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.glm_image.configuration_glm_image (70 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmImageTextConfig.init |
19 | 17 | 0 |
attr |
GlmImageTextConfig.vocab_size |
1 | 0 | 0 |
attr |
GlmImageTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
GlmImageTextConfig.hidden_size |
1 | 0 | 0 |
attr |
GlmImageTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
GlmImageTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GlmImageTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GlmImageTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
GlmImageTextConfig.hidden_act |
1 | 0 | 0 |
attr |
GlmImageTextConfig.initializer_range |
1 | 0 | 0 |
attr |
GlmImageTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
GlmImageTextConfig.use_cache |
1 | 0 | 0 |
attr |
GlmImageTextConfig.attention_dropout |
1 | 0 | 0 |
attr |
GlmImageTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
GlmImageTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
GlmImageTextConfig.vision_vocab_size |
1 | 0 | 0 |
attr |
GlmImageTextConfig.attention_bias |
1 | 0 | 0 |
attr |
GlmImageTextConfig.eos_token_id |
1 | 0 | 0 |
meth |
GlmImageVisionConfig.init |
15 | 0 | 0 |
attr |
GlmImageVisionConfig.depth |
1 | 0 | 0 |
attr |
GlmImageVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
GlmImageVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
GlmImageVisionConfig.num_heads |
1 | 0 | 0 |
attr |
GlmImageVisionConfig.in_channels |
1 | 0 | 0 |
attr |
GlmImageVisionConfig.image_size |
1 | 0 | 0 |
attr |
GlmImageVisionConfig.patch_size |
1 | 0 | 0 |
attr |
GlmImageVisionConfig.spatial_merge_size |
1 | 0 | 0 |
attr |
GlmImageVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
GlmImageVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
GlmImageVisionConfig.attention_bias |
1 | 0 | 0 |
attr |
GlmImageVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
GlmImageVisionConfig.layer_norm_eps |
1 | 0 | 0 |
meth |
GlmImageConfig.init |
9 | 1 | 0 |
attr |
GlmImageConfig.image_token_id |
1 | 0 | 0 |
attr |
GlmImageConfig.image_start_token_id |
1 | 0 | 0 |
attr |
GlmImageConfig.image_end_token_id |
1 | 0 | 0 |
attr |
GlmImageConfig.text_config |
1 | 0 | 0 |
attr |
GlmImageConfig.vision_config |
1 | 0 | 0 |
attr |
GlmImageConfig.vq_config |
1 | 0 | 0 |
attr |
GlmImageConfig.tie_word_embeddings |
1 | 0 | 0 |
meth |
GlmImageVQVAEConfig.init |
7 | 4 | 0 |
attr |
GlmImageVQVAEConfig.embed_dim |
1 | 0 | 0 |
attr |
GlmImageVQVAEConfig.num_embeddings |
1 | 0 | 0 |
attr |
GlmImageVQVAEConfig.latent_channels |
1 | 0 | 0 |
attr |
GlmImageVQVAEConfig.in_channels |
1 | 0 | 0 |
attr |
GlmImageVQVAEConfig.initializer_range |
1 | 0 | 0 |
transformers.models.glm_image.image_processing_glm_image (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmImageImageProcessor.init |
16 | 15 | 0 |
meth |
GlmImageImageProcessor._preprocess |
16 | 15 | 0 |
meth |
GlmImageImageProcessor.preprocess |
19 | 18 | 0 |
meth |
GlmImageImageProcessor.get_number_of_image_patches |
4 | 2 | 0 |
attr |
GlmImageImageProcessor.min_pixels |
1 | 0 | 0 |
attr |
GlmImageImageProcessor.max_pixels |
1 | 0 | 0 |
attr |
GlmImageImageProcessor.size |
1 | 0 | 0 |
attr |
GlmImageImageProcessor.do_resize |
1 | 0 | 0 |
attr |
GlmImageImageProcessor.resample |
1 | 0 | 0 |
attr |
GlmImageImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
GlmImageImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
GlmImageImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
GlmImageImageProcessor.image_mean |
1 | 0 | 0 |
attr |
GlmImageImageProcessor.image_std |
1 | 0 | 0 |
attr |
GlmImageImageProcessor.patch_size |
1 | 0 | 0 |
attr |
GlmImageImageProcessor.temporal_patch_size |
1 | 0 | 0 |
attr |
GlmImageImageProcessor.merge_size |
1 | 0 | 0 |
attr |
GlmImageImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.glm_image.image_processing_glm_image_fast (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmImageImageProcessorFast.init |
2 | 1 | 0 |
meth |
GlmImageImageProcessorFast._further_process_kwargs |
5 | 4 | 0 |
meth |
GlmImageImageProcessorFast._preprocess |
16 | 14 | 0 |
meth |
GlmImageImageProcessorFast.get_number_of_image_patches |
4 | 2 | 0 |
transformers.models.glm_image.modeling_glm_image (55 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmImageForConditionalGeneration.init |
2 | 0 | 0 |
meth |
GlmImageForConditionalGeneration.get_image_tokens |
3 | 2 | 0 |
meth |
GlmImageForConditionalGeneration.prepare_inputs_for_generation |
13 | 0 | 0 |
meth |
GlmImageForConditionalGeneration._expand_inputs_for_generation |
5 | 4 | 0 |
attr |
GlmImageForConditionalGeneration.model |
1 | 0 | 0 |
attr |
GlmImageForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
GlmImageTextModel.init |
2 | 1 | 0 |
attr |
GlmImageTextModel.padding_idx |
1 | 0 | 0 |
attr |
GlmImageTextModel.vocab_size |
1 | 0 | 0 |
attr |
GlmImageTextModel.embed_tokens |
1 | 0 | 0 |
attr |
GlmImageTextModel.layers |
1 | 0 | 0 |
attr |
GlmImageTextModel.norm |
1 | 0 | 0 |
attr |
GlmImageTextModel.rotary_emb |
1 | 0 | 0 |
attr |
GlmImageTextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
GlmImageVQVAE.init |
2 | 1 | 0 |
meth |
GlmImageVQVAE.encode |
2 | 1 | 0 |
attr |
GlmImageVQVAE.quantize |
1 | 0 | 0 |
attr |
GlmImageVQVAE.quant_conv |
1 | 0 | 0 |
attr |
GlmImageVQVAE.post_quant_conv |
1 | 0 | 0 |
meth |
GlmImagePreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
GlmImageVisionModel.rot_pos_emb |
2 | 0 | 0 |
attr |
GlmImageVisionModel.spatial_merge_size |
1 | 0 | 0 |
attr |
GlmImageVisionModel.patch_size |
1 | 0 | 0 |
attr |
GlmImageVisionModel.embeddings |
1 | 0 | 0 |
attr |
GlmImageVisionModel.patch_embed |
1 | 0 | 0 |
attr |
GlmImageVisionModel.blocks |
1 | 0 | 0 |
attr |
GlmImageVisionModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
GlmImageVisionModel.head_dim |
1 | 0 | 0 |
meth |
GlmImageModel.init |
2 | 0 | 0 |
meth |
GlmImageModel.get_input_embeddings |
1 | 0 | 0 |
meth |
GlmImageModel.set_input_embeddings |
2 | 0 | 0 |
meth |
GlmImageModel.get_vision_position_ids |
7 | 6 | 0 |
meth |
GlmImageModel.get_rope_index |
6 | 5 | 0 |
meth |
GlmImageModel.get_placeholder_mask |
3 | 2 | 0 |
attr |
GlmImageModel.visual |
1 | 0 | 0 |
attr |
GlmImageModel.language_model |
1 | 0 | 0 |
attr |
GlmImageModel.rope_deltas |
1 | 0 | 0 |
attr |
GlmImageModel.vqmodel |
1 | 0 | 0 |
transformers.models.glm_image.modular_glm_image (92 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmImageTextConfig.init |
8 | 6 | 0 |
attr |
GlmImageTextConfig.vision_vocab_size |
1 | 0 | 0 |
attr |
GlmImageTextConfig.attention_bias |
1 | 0 | 0 |
attr |
GlmImageTextConfig.eos_token_id |
1 | 0 | 0 |
meth |
GlmImageForConditionalGeneration.init |
2 | 0 | 0 |
meth |
GlmImageForConditionalGeneration.get_image_tokens |
3 | 2 | 0 |
meth |
GlmImageForConditionalGeneration.prepare_inputs_for_generation |
13 | 0 | 0 |
meth |
GlmImageForConditionalGeneration._expand_inputs_for_generation |
5 | 4 | 0 |
attr |
GlmImageForConditionalGeneration.model |
1 | 0 | 0 |
attr |
GlmImageForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
GlmImageVisionModel.init |
2 | 1 | 0 |
meth |
GlmImageVisionModel.rot_pos_emb |
2 | 0 | 0 |
attr |
GlmImageVisionModel.head_dim |
1 | 0 | 0 |
meth |
GlmImageVisionConfig.init |
15 | 0 | 0 |
attr |
GlmImageVisionConfig.layer_norm_eps |
1 | 0 | 0 |
meth |
GlmImageVQVAE.init |
2 | 1 | 0 |
meth |
GlmImageVQVAE.encode |
2 | 0 | 0 |
meth |
GlmImagePreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
GlmImageModel.init |
2 | 0 | 0 |
meth |
GlmImageModel.get_rope_index |
6 | 5 | 0 |
meth |
GlmImageModel.get_video_features |
1 | 0 | 0 |
meth |
GlmImageModel.get_placeholder_mask |
3 | 2 | 0 |
attr |
GlmImageModel.visual |
1 | 0 | 0 |
attr |
GlmImageModel.language_model |
1 | 0 | 0 |
attr |
GlmImageModel.vqmodel |
1 | 0 | 0 |
attr |
GlmImageModel.rope_deltas |
1 | 0 | 0 |
meth |
GlmImageProcessor.init |
5 | 0 | 0 |
meth |
GlmImageProcessor._build_target_image_grid_thw |
6 | 5 | 0 |
attr |
GlmImageProcessor.image_token |
1 | 0 | 0 |
attr |
GlmImageProcessor.grid_bos_token |
1 | 0 | 0 |
attr |
GlmImageProcessor.grid_eos_token |
1 | 0 | 0 |
attr |
GlmImageProcessor.bos_token |
1 | 0 | 0 |
attr |
GlmImageProcessor.image_token_id |
1 | 0 | 0 |
meth |
GlmImageConfig.init |
9 | 1 | 0 |
attr |
GlmImageConfig.image_token_id |
1 | 0 | 0 |
attr |
GlmImageConfig.image_start_token_id |
1 | 0 | 0 |
attr |
GlmImageConfig.image_end_token_id |
1 | 0 | 0 |
attr |
GlmImageConfig.text_config |
1 | 0 | 0 |
attr |
GlmImageConfig.vision_config |
1 | 0 | 0 |
attr |
GlmImageConfig.vq_config |
1 | 0 | 0 |
attr |
GlmImageConfig.tie_word_embeddings |
1 | 0 | 0 |
meth |
GlmImageVQVAEConfig.init |
7 | 4 | 0 |
attr |
GlmImageVQVAEConfig.embed_dim |
1 | 0 | 0 |
attr |
GlmImageVQVAEConfig.num_embeddings |
1 | 0 | 0 |
attr |
GlmImageVQVAEConfig.latent_channels |
1 | 0 | 0 |
attr |
GlmImageVQVAEConfig.in_channels |
1 | 0 | 0 |
attr |
GlmImageVQVAEConfig.initializer_range |
1 | 0 | 0 |
transformers.models.glm_image.processing_glm_image (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmImageProcessor.init |
5 | 0 | 0 |
meth |
GlmImageProcessor._build_target_image_grid_thw |
6 | 5 | 0 |
attr |
GlmImageProcessor.image_token |
1 | 0 | 0 |
attr |
GlmImageProcessor.grid_bos_token |
1 | 0 | 0 |
attr |
GlmImageProcessor.grid_eos_token |
1 | 0 | 0 |
attr |
GlmImageProcessor.bos_token |
1 | 0 | 0 |
attr |
GlmImageProcessor.image_token_id |
1 | 0 | 0 |
transformers.models.glm_moe_dsa.configuration_glm_moe_dsa (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmMoeDsaConfig.init |
37 | 34 | 0 |
attr |
GlmMoeDsaConfig.vocab_size |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.hidden_size |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.intermediate_size |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.kv_lora_rank |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.q_lora_rank |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.qk_rope_head_dim |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.qk_nope_head_dim |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.qk_head_dim |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.v_head_dim |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.head_dim |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.n_shared_experts |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.n_routed_experts |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.routed_scaling_factor |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.n_group |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.topk_group |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.mlp_layer_types |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.index_topk |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.index_head_dim |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.index_n_heads |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.hidden_act |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.initializer_range |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.use_cache |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.attention_bias |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.attention_dropout |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.glm_moe_dsa.modeling_glm_moe_dsa (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmMoeDsaModel.init |
2 | 1 | 0 |
attr |
GlmMoeDsaModel.padding_idx |
1 | 0 | 0 |
attr |
GlmMoeDsaModel.vocab_size |
1 | 0 | 0 |
attr |
GlmMoeDsaModel.embed_tokens |
1 | 0 | 0 |
attr |
GlmMoeDsaModel.layers |
1 | 0 | 0 |
attr |
GlmMoeDsaModel.norm |
1 | 0 | 0 |
attr |
GlmMoeDsaModel.rotary_emb |
1 | 0 | 0 |
attr |
GlmMoeDsaModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
GlmMoeDsaPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
GlmMoeDsaForCausalLM.init |
2 | 0 | 0 |
attr |
GlmMoeDsaForCausalLM.model |
1 | 0 | 0 |
attr |
GlmMoeDsaForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
GlmMoeDsaForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.glm_moe_dsa.modular_glm_moe_dsa (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmMoeDsaConfig.init |
37 | 34 | 0 |
attr |
GlmMoeDsaConfig.vocab_size |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.hidden_size |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.intermediate_size |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.kv_lora_rank |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.q_lora_rank |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.qk_rope_head_dim |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.qk_nope_head_dim |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.qk_head_dim |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.v_head_dim |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.head_dim |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.n_shared_experts |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.n_routed_experts |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.routed_scaling_factor |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.n_group |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.topk_group |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.mlp_layer_types |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.index_topk |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.index_head_dim |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.index_n_heads |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.hidden_act |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.initializer_range |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.use_cache |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.attention_bias |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.attention_dropout |
1 | 0 | 0 |
attr |
GlmMoeDsaConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.glm_ocr.configuration_glm_ocr (68 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmOcrConfig.init |
11 | 0 | 0 |
attr |
GlmOcrConfig.image_token_id |
1 | 0 | 0 |
attr |
GlmOcrConfig.video_token_id |
1 | 0 | 0 |
attr |
GlmOcrConfig.video_start_token_id |
1 | 0 | 0 |
attr |
GlmOcrConfig.video_end_token_id |
1 | 0 | 0 |
attr |
GlmOcrConfig.image_start_token_id |
1 | 0 | 0 |
attr |
GlmOcrConfig.image_end_token_id |
1 | 0 | 0 |
attr |
GlmOcrConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
GlmOcrConfig.vision_config |
1 | 0 | 0 |
attr |
GlmOcrConfig.text_config |
1 | 0 | 0 |
meth |
GlmOcrTextConfig.init |
16 | 14 | 0 |
attr |
GlmOcrTextConfig.vocab_size |
1 | 0 | 0 |
attr |
GlmOcrTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
GlmOcrTextConfig.hidden_size |
1 | 0 | 0 |
attr |
GlmOcrTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
GlmOcrTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GlmOcrTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GlmOcrTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
GlmOcrTextConfig.hidden_act |
1 | 0 | 0 |
attr |
GlmOcrTextConfig.initializer_range |
1 | 0 | 0 |
attr |
GlmOcrTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
GlmOcrTextConfig.use_cache |
1 | 0 | 0 |
attr |
GlmOcrTextConfig.attention_dropout |
1 | 0 | 0 |
attr |
GlmOcrTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
GlmOcrTextConfig.pad_token_id |
1 | 0 | 0 |
meth |
GlmOcrVisionConfig.init |
17 | 0 | 0 |
attr |
GlmOcrVisionConfig.depth |
1 | 0 | 0 |
attr |
GlmOcrVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
GlmOcrVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
GlmOcrVisionConfig.num_heads |
1 | 0 | 0 |
attr |
GlmOcrVisionConfig.in_channels |
1 | 0 | 0 |
attr |
GlmOcrVisionConfig.image_size |
1 | 0 | 0 |
attr |
GlmOcrVisionConfig.patch_size |
1 | 0 | 0 |
attr |
GlmOcrVisionConfig.spatial_merge_size |
1 | 0 | 0 |
attr |
GlmOcrVisionConfig.temporal_patch_size |
1 | 0 | 0 |
attr |
GlmOcrVisionConfig.out_hidden_size |
1 | 0 | 0 |
attr |
GlmOcrVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
GlmOcrVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
GlmOcrVisionConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
GlmOcrVisionConfig.attention_bias |
1 | 0 | 0 |
attr |
GlmOcrVisionConfig.attention_dropout |
1 | 0 | 0 |
transformers.models.glm_ocr.modeling_glm_ocr (59 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmOcrModel.init |
2 | 0 | 0 |
meth |
GlmOcrModel.get_input_embeddings |
1 | 0 | 0 |
meth |
GlmOcrModel.set_input_embeddings |
2 | 0 | 0 |
meth |
GlmOcrModel.get_vision_position_ids |
7 | 6 | 0 |
meth |
GlmOcrModel.get_rope_index |
7 | 6 | 0 |
meth |
GlmOcrModel.get_placeholder_mask |
5 | 4 | 0 |
attr |
GlmOcrModel.visual |
1 | 0 | 0 |
attr |
GlmOcrModel.language_model |
1 | 0 | 0 |
attr |
GlmOcrModel.rope_deltas |
1 | 0 | 0 |
meth |
GlmOcrForConditionalGeneration.init |
2 | 0 | 0 |
meth |
GlmOcrForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
GlmOcrForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
GlmOcrForConditionalGeneration.prepare_inputs_for_generation |
14 | 0 | 0 |
meth |
GlmOcrForConditionalGeneration._prepare_position_ids_for_generation |
3 | 0 | 0 |
meth |
GlmOcrForConditionalGeneration._expand_inputs_for_generation |
5 | 4 | 0 |
attr |
GlmOcrForConditionalGeneration.model |
1 | 0 | 0 |
attr |
GlmOcrForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
GlmOcrTextModel.init |
2 | 1 | 0 |
attr |
GlmOcrTextModel.padding_idx |
1 | 0 | 0 |
attr |
GlmOcrTextModel.vocab_size |
1 | 0 | 0 |
attr |
GlmOcrTextModel.embed_tokens |
1 | 0 | 0 |
attr |
GlmOcrTextModel.layers |
1 | 0 | 0 |
attr |
GlmOcrTextModel.norm |
1 | 0 | 0 |
attr |
GlmOcrTextModel.rotary_emb |
1 | 0 | 0 |
attr |
GlmOcrTextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
GlmOcrVisionModel.init |
2 | 1 | 0 |
meth |
GlmOcrVisionModel.rot_pos_emb |
2 | 0 | 0 |
meth |
GlmOcrVisionModel.forward |
4 | 3 | 0 |
attr |
GlmOcrVisionModel.spatial_merge_size |
1 | 0 | 0 |
attr |
GlmOcrVisionModel.patch_size |
1 | 0 | 0 |
attr |
GlmOcrVisionModel.patch_embed |
1 | 0 | 0 |
attr |
GlmOcrVisionModel.rotary_pos_emb |
1 | 0 | 0 |
attr |
GlmOcrVisionModel.blocks |
1 | 0 | 0 |
attr |
GlmOcrVisionModel.merger |
1 | 0 | 0 |
attr |
GlmOcrVisionModel.downsample |
1 | 0 | 0 |
attr |
GlmOcrVisionModel.post_layernorm |
1 | 0 | 0 |
attr |
GlmOcrVisionModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
GlmOcrPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.glm_ocr.modular_glm_ocr (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmOcrConfig.init |
11 | 0 | 0 |
meth |
GlmOcrVisionConfig.init |
10 | 0 | 0 |
attr |
GlmOcrTextModel |
1 | 0 | 0 |
meth |
GlmOcrVisionModel.init |
2 | 1 | 0 |
meth |
GlmOcrVisionModel.forward |
4 | 3 | 0 |
attr |
GlmOcrVisionModel.merger |
1 | 0 | 0 |
meth |
GlmOcrTextConfig.init |
9 | 7 | 0 |
transformers.models.glmasr.configuration_glmasr (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmAsrConfig.init |
6 | 0 | 0 |
attr |
GlmAsrConfig.audio_config |
1 | 0 | 0 |
attr |
GlmAsrConfig.text_config |
1 | 0 | 0 |
attr |
GlmAsrConfig.vocab_size |
1 | 0 | 0 |
attr |
GlmAsrConfig.hidden_size |
1 | 0 | 0 |
attr |
GlmAsrConfig.audio_token_id |
1 | 0 | 0 |
attr |
GlmAsrConfig.projector_hidden_act |
1 | 0 | 0 |
meth |
GlmAsrEncoderConfig.init |
13 | 0 | 0 |
attr |
GlmAsrEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
GlmAsrEncoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
GlmAsrEncoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GlmAsrEncoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GlmAsrEncoderConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
GlmAsrEncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
GlmAsrEncoderConfig.initializer_range |
1 | 0 | 0 |
attr |
GlmAsrEncoderConfig.head_dim |
1 | 0 | 0 |
attr |
GlmAsrEncoderConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
GlmAsrEncoderConfig.rope_parameters |
1 | 0 | 0 |
attr |
GlmAsrEncoderConfig.attention_dropout |
1 | 0 | 0 |
attr |
GlmAsrEncoderConfig.num_mel_bins |
1 | 0 | 0 |
transformers.models.glmasr.modeling_glmasr (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmAsrForConditionalGeneration.init |
2 | 0 | 0 |
meth |
GlmAsrForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
GlmAsrForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
GlmAsrForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
GlmAsrForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
GlmAsrForConditionalGeneration.set_decoder |
2 | 0 | 0 |
meth |
GlmAsrForConditionalGeneration.get_decoder |
1 | 0 | 0 |
meth |
GlmAsrForConditionalGeneration.prepare_inputs_for_generation |
3 | 0 | 0 |
attr |
GlmAsrForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
GlmAsrForConditionalGeneration.audio_tower |
1 | 0 | 0 |
attr |
GlmAsrForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
GlmAsrForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
meth |
GlmAsrEncoder.init |
2 | 1 | 0 |
meth |
GlmAsrEncoder.forward |
3 | 1 | 0 |
attr |
GlmAsrEncoder.conv1 |
1 | 0 | 0 |
attr |
GlmAsrEncoder.conv2 |
1 | 0 | 0 |
attr |
GlmAsrEncoder.layers |
1 | 0 | 0 |
attr |
GlmAsrEncoder.norm |
1 | 0 | 0 |
attr |
GlmAsrEncoder.rotary_emb |
1 | 0 | 0 |
attr |
GlmAsrEncoder.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.glmasr.modular_glmasr (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmAsrProcessor.init |
7 | 0 | 0 |
meth |
GlmAsrEncoder.init |
2 | 1 | 0 |
meth |
GlmAsrEncoder.forward |
3 | 1 | 0 |
attr |
GlmAsrEncoder.conv1 |
1 | 0 | 0 |
attr |
GlmAsrEncoder.conv2 |
1 | 0 | 0 |
attr |
GlmAsrEncoder.layers |
1 | 0 | 0 |
attr |
GlmAsrEncoder.norm |
1 | 0 | 0 |
attr |
GlmAsrEncoder.rotary_emb |
1 | 0 | 0 |
attr |
GlmAsrEncoder.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.glmasr.processing_glmasr (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GlmAsrProcessor.init |
7 | 0 | 0 |
meth |
GlmAsrProcessor.batch_decode |
4 | 0 | 0 |
attr |
GlmAsrProcessor.audio_token |
1 | 0 | 0 |
attr |
GlmAsrProcessor.audio_token_id |
1 | 0 | 0 |
attr |
GlmAsrProcessor.default_transcription_prompt |
1 | 0 | 0 |
attr |
GlmAsrProcessor.max_audio_len |
1 | 0 | 0 |
transformers.models.glpn.configuration_glpn (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GLPNConfig.init |
20 | 0 | 0 |
attr |
GLPNConfig.num_channels |
1 | 0 | 0 |
attr |
GLPNConfig.num_encoder_blocks |
1 | 0 | 0 |
attr |
GLPNConfig.depths |
1 | 0 | 0 |
attr |
GLPNConfig.sr_ratios |
1 | 0 | 0 |
attr |
GLPNConfig.hidden_sizes |
1 | 0 | 0 |
attr |
GLPNConfig.patch_sizes |
1 | 0 | 0 |
attr |
GLPNConfig.strides |
1 | 0 | 0 |
attr |
GLPNConfig.mlp_ratios |
1 | 0 | 0 |
attr |
GLPNConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GLPNConfig.hidden_act |
1 | 0 | 0 |
attr |
GLPNConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
GLPNConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
GLPNConfig.initializer_range |
1 | 0 | 0 |
attr |
GLPNConfig.drop_path_rate |
1 | 0 | 0 |
attr |
GLPNConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
GLPNConfig.decoder_hidden_size |
1 | 0 | 0 |
attr |
GLPNConfig.max_depth |
1 | 0 | 0 |
attr |
GLPNConfig.head_in_index |
1 | 0 | 0 |
transformers.models.glpn.image_processing_glpn (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GLPNImageProcessor.init |
7 | 5 | 0 |
meth |
GLPNImageProcessor.resize |
7 | 6 | 0 |
meth |
GLPNImageProcessor.preprocess |
10 | 9 | 0 |
attr |
GLPNImageProcessor.do_resize |
1 | 0 | 0 |
attr |
GLPNImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
GLPNImageProcessor.size_divisor |
1 | 0 | 0 |
attr |
GLPNImageProcessor.resample |
1 | 0 | 0 |
attr |
GLPNImageProcessor.rescale_factor |
1 | 0 | 0 |
transformers.models.glpn.image_processing_glpn_fast (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GLPNImageProcessorFast._validate_preprocess_kwargs |
2 | 0 | 0 |
meth |
GLPNImageProcessorFast.resize |
6 | 5 | 0 |
meth |
GLPNImageProcessorFast._preprocess |
14 | 13 | 0 |
meth |
GLPNImageProcessorFast.post_process_depth_estimation |
3 | 0 | 0 |
transformers.models.glpn.modeling_glpn (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GLPNModel.init |
2 | 0 | 0 |
meth |
GLPNModel.forward |
6 | 5 | 0 |
attr |
GLPNModel.encoder |
1 | 0 | 0 |
meth |
GLPNLayer.init |
7 | 0 | 0 |
meth |
GLPNLayer.forward |
5 | 0 | 0 |
attr |
GLPNLayer.layer_norm_1 |
1 | 0 | 0 |
attr |
GLPNLayer.attention |
1 | 0 | 0 |
attr |
GLPNLayer.drop_path |
1 | 0 | 0 |
attr |
GLPNLayer.layer_norm_2 |
1 | 0 | 0 |
attr |
GLPNLayer.mlp |
1 | 0 | 0 |
meth |
GLPNForDepthEstimation.init |
2 | 0 | 0 |
meth |
GLPNForDepthEstimation.forward |
7 | 6 | 0 |
attr |
GLPNForDepthEstimation.glpn |
1 | 0 | 0 |
attr |
GLPNForDepthEstimation.decoder |
1 | 0 | 0 |
attr |
GLPNForDepthEstimation.head |
1 | 0 | 0 |
transformers.models.got_ocr2.configuration_got_ocr2 (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GotOcr2Config.init |
7 | 5 | 0 |
attr |
GotOcr2Config.image_token_index |
1 | 0 | 0 |
attr |
GotOcr2Config.image_seq_length |
1 | 0 | 0 |
attr |
GotOcr2Config.text_config |
1 | 0 | 0 |
attr |
GotOcr2Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
GotOcr2Config.vision_config |
1 | 0 | 0 |
meth |
GotOcr2VisionConfig.init |
19 | 0 | 0 |
attr |
GotOcr2VisionConfig.hidden_size |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.output_channels |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.num_channels |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.image_size |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.patch_size |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.hidden_act |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.initializer_range |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.qkv_bias |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.use_abs_pos |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.use_rel_pos |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.window_size |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.global_attn_indexes |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.mlp_dim |
1 | 0 | 0 |
transformers.models.got_ocr2.image_processing_got_ocr2 (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GotOcr2ImageProcessor.init |
14 | 13 | 0 |
meth |
GotOcr2ImageProcessor.resize |
7 | 6 | 0 |
meth |
GotOcr2ImageProcessor.crop_image_to_patches |
7 | 6 | 0 |
meth |
GotOcr2ImageProcessor.get_number_of_image_patches |
4 | 2 | 0 |
attr |
GotOcr2ImageProcessor.do_resize |
1 | 0 | 0 |
attr |
GotOcr2ImageProcessor.size |
1 | 0 | 0 |
attr |
GotOcr2ImageProcessor.crop_to_patches |
1 | 0 | 0 |
attr |
GotOcr2ImageProcessor.min_patches |
1 | 0 | 0 |
attr |
GotOcr2ImageProcessor.max_patches |
1 | 0 | 0 |
attr |
GotOcr2ImageProcessor.resample |
1 | 0 | 0 |
attr |
GotOcr2ImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
GotOcr2ImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
GotOcr2ImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
GotOcr2ImageProcessor.image_mean |
1 | 0 | 0 |
attr |
GotOcr2ImageProcessor.image_std |
1 | 0 | 0 |
attr |
GotOcr2ImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.got_ocr2.image_processing_got_ocr2_fast (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GotOcr2ImageProcessorFast.init |
2 | 1 | 0 |
meth |
GotOcr2ImageProcessorFast.crop_image_to_patches |
7 | 6 | 0 |
meth |
GotOcr2ImageProcessorFast._preprocess |
18 | 17 | 0 |
meth |
GotOcr2ImageProcessorFast.get_number_of_image_patches |
4 | 2 | 0 |
transformers.models.got_ocr2.modeling_got_ocr2 (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GotOcr2ForConditionalGeneration.init |
2 | 1 | 0 |
meth |
GotOcr2ForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
GotOcr2ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
GotOcr2ForConditionalGeneration.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
GotOcr2ForConditionalGeneration.model |
1 | 0 | 0 |
attr |
GotOcr2ForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
GotOcr2Model.init |
2 | 1 | 0 |
meth |
GotOcr2Model.get_input_embeddings |
1 | 0 | 0 |
meth |
GotOcr2Model.set_input_embeddings |
2 | 0 | 0 |
meth |
GotOcr2Model.get_placeholder_mask |
4 | 3 | 0 |
attr |
GotOcr2Model.vision_tower |
1 | 0 | 0 |
attr |
GotOcr2Model.multi_modal_projector |
1 | 0 | 0 |
attr |
GotOcr2Model.language_model |
1 | 0 | 0 |
meth |
GotOcr2PreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.got_ocr2.modular_got_ocr2 (45 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GotOcr2VisionConfig.init |
19 | 0 | 0 |
attr |
GotOcr2VisionConfig.hidden_size |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.output_channels |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.num_channels |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.image_size |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.patch_size |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.hidden_act |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.initializer_range |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.qkv_bias |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.use_abs_pos |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.use_rel_pos |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.window_size |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.global_attn_indexes |
1 | 0 | 0 |
attr |
GotOcr2VisionConfig.mlp_dim |
1 | 0 | 0 |
meth |
GotOcr2Config.init |
7 | 5 | 0 |
attr |
GotOcr2Config.image_token_index |
1 | 0 | 0 |
attr |
GotOcr2Config.image_seq_length |
1 | 0 | 0 |
attr |
GotOcr2Config.text_config |
1 | 0 | 0 |
attr |
GotOcr2Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
GotOcr2Config.vision_config |
1 | 0 | 0 |
meth |
GotOcr2Model.init |
2 | 1 | 0 |
attr |
GotOcr2Model.vision_tower |
1 | 0 | 0 |
transformers.models.got_ocr2.processing_got_ocr2 (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GotOcr2Processor.init |
5 | 0 | 0 |
meth |
GotOcr2Processor._make_list_of_inputs |
6 | 0 | 0 |
attr |
GotOcr2Processor.message_start_token |
1 | 0 | 0 |
attr |
GotOcr2Processor.message_end_token |
1 | 0 | 0 |
attr |
GotOcr2Processor.img_start_token |
1 | 0 | 0 |
attr |
GotOcr2Processor.img_end_token |
1 | 0 | 0 |
attr |
GotOcr2Processor.img_pad_token |
1 | 0 | 0 |
attr |
GotOcr2Processor.image_token |
1 | 0 | 0 |
attr |
GotOcr2Processor.image_token_id |
1 | 0 | 0 |
attr |
GotOcr2Processor.system_query |
1 | 0 | 0 |
transformers.models.gpt2.configuration_gpt2 (54 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPT2Config.init |
28 | 0 | 0 |
attr |
GPT2Config.add_cross_attention |
1 | 0 | 0 |
attr |
GPT2Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
GPT2Config.vocab_size |
1 | 0 | 0 |
attr |
GPT2Config.n_positions |
1 | 0 | 0 |
attr |
GPT2Config.n_embd |
1 | 0 | 0 |
attr |
GPT2Config.n_layer |
1 | 0 | 0 |
attr |
GPT2Config.n_head |
1 | 0 | 0 |
attr |
GPT2Config.n_inner |
1 | 0 | 0 |
attr |
GPT2Config.activation_function |
1 | 0 | 0 |
attr |
GPT2Config.resid_pdrop |
1 | 0 | 0 |
attr |
GPT2Config.embd_pdrop |
1 | 0 | 0 |
attr |
GPT2Config.attn_pdrop |
1 | 0 | 0 |
attr |
GPT2Config.layer_norm_epsilon |
1 | 0 | 0 |
attr |
GPT2Config.initializer_range |
1 | 0 | 0 |
attr |
GPT2Config.summary_type |
1 | 0 | 0 |
attr |
GPT2Config.summary_use_proj |
1 | 0 | 0 |
attr |
GPT2Config.summary_activation |
1 | 0 | 0 |
attr |
GPT2Config.summary_first_dropout |
1 | 0 | 0 |
attr |
GPT2Config.summary_proj_to_labels |
1 | 0 | 0 |
attr |
GPT2Config.scale_attn_weights |
1 | 0 | 0 |
attr |
GPT2Config.use_cache |
1 | 0 | 0 |
attr |
GPT2Config.scale_attn_by_inverse_layer_idx |
1 | 0 | 0 |
attr |
GPT2Config.reorder_and_upcast_attn |
1 | 0 | 0 |
attr |
GPT2Config.bos_token_id |
1 | 0 | 0 |
attr |
GPT2Config.eos_token_id |
1 | 0 | 0 |
attr |
GPT2Config.pad_token_id |
1 | 0 | 0 |
transformers.models.gpt2.modeling_gpt2 (46 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPT2LMHeadModel.init |
2 | 0 | 0 |
meth |
GPT2LMHeadModel.forward |
14 | 13 | 0 |
attr |
GPT2LMHeadModel.transformer |
1 | 0 | 0 |
attr |
GPT2LMHeadModel.lm_head |
1 | 0 | 0 |
meth |
GPT2Model.init |
2 | 0 | 0 |
meth |
GPT2Model.get_input_embeddings |
1 | 0 | 0 |
meth |
GPT2Model.set_input_embeddings |
2 | 0 | 0 |
meth |
GPT2Model.forward |
12 | 11 | 0 |
attr |
GPT2Model.embed_dim |
1 | 0 | 0 |
attr |
GPT2Model.wte |
1 | 0 | 0 |
attr |
GPT2Model.wpe |
1 | 0 | 0 |
attr |
GPT2Model.drop |
1 | 0 | 0 |
attr |
GPT2Model.h |
1 | 0 | 0 |
attr |
GPT2Model.ln_f |
1 | 0 | 0 |
attr |
GPT2Model.gradient_checkpointing |
1 | 0 | 0 |
meth |
GPT2DoubleHeadsModel.init |
2 | 0 | 0 |
meth |
GPT2DoubleHeadsModel.forward |
13 | 12 | 0 |
attr |
GPT2DoubleHeadsModel.transformer |
1 | 0 | 0 |
attr |
GPT2DoubleHeadsModel.lm_head |
1 | 0 | 0 |
attr |
GPT2DoubleHeadsModel.multiple_choice_head |
1 | 0 | 0 |
meth |
GPT2ForQuestionAnswering.init |
2 | 0 | 0 |
meth |
GPT2ForQuestionAnswering.forward |
9 | 8 | 0 |
attr |
GPT2ForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
GPT2ForQuestionAnswering.transformer |
1 | 0 | 0 |
attr |
GPT2ForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
GPT2PreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
GPT2PreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
GPT2ForTokenClassification.init |
2 | 0 | 0 |
meth |
GPT2ForTokenClassification.forward |
10 | 9 | 0 |
attr |
GPT2ForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
GPT2ForTokenClassification.transformer |
1 | 0 | 0 |
attr |
GPT2ForTokenClassification.dropout |
1 | 0 | 0 |
attr |
GPT2ForTokenClassification.classifier |
1 | 0 | 0 |
meth |
GPT2ForSequenceClassification.init |
2 | 0 | 0 |
meth |
GPT2ForSequenceClassification.forward |
10 | 9 | 0 |
attr |
GPT2ForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
GPT2ForSequenceClassification.transformer |
1 | 0 | 0 |
attr |
GPT2ForSequenceClassification.score |
1 | 0 | 0 |
transformers.models.gpt2.tokenization_gpt2 (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPT2Tokenizer.init |
10 | 7 | 0 |
attr |
GPT2Tokenizer.add_prefix_space |
1 | 0 | 0 |
transformers.models.gpt_bigcode.configuration_gpt_bigcode (47 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPTBigCodeConfig.init |
24 | 0 | 0 |
attr |
GPTBigCodeConfig.add_cross_attention |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.vocab_size |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.n_positions |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.n_embd |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.n_layer |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.n_head |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.n_inner |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.activation_function |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.resid_pdrop |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.embd_pdrop |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.attn_pdrop |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.initializer_range |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.scale_attn_weights |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.use_cache |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.attention_softmax_in_fp32 |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.scale_attention_softmax_in_fp32 |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.multi_query |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.bos_token_id |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.eos_token_id |
1 | 0 | 0 |
attr |
GPTBigCodeConfig.pad_token_id |
1 | 0 | 0 |
transformers.models.gpt_bigcode.modeling_gpt_bigcode (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPTBigCodeForTokenClassification.init |
2 | 0 | 0 |
meth |
GPTBigCodeForTokenClassification.forward |
13 | 12 | 0 |
attr |
GPTBigCodeForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
GPTBigCodeForTokenClassification.transformer |
1 | 0 | 0 |
attr |
GPTBigCodeForTokenClassification.dropout |
1 | 0 | 0 |
attr |
GPTBigCodeForTokenClassification.classifier |
1 | 0 | 0 |
meth |
GPTBigCodeForSequenceClassification.init |
2 | 0 | 0 |
meth |
GPTBigCodeForSequenceClassification.forward |
13 | 12 | 0 |
attr |
GPTBigCodeForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
GPTBigCodeForSequenceClassification.transformer |
1 | 0 | 0 |
attr |
GPTBigCodeForSequenceClassification.score |
1 | 0 | 0 |
meth |
GPTBigCodeForCausalLM.init |
2 | 0 | 0 |
meth |
GPTBigCodeForCausalLM.forward |
17 | 16 | 0 |
attr |
GPTBigCodeForCausalLM.transformer |
1 | 0 | 0 |
attr |
GPTBigCodeForCausalLM.lm_head |
1 | 0 | 0 |
meth |
GPTBigCodePreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
GPTBigCodeModel.init |
2 | 0 | 0 |
meth |
GPTBigCodeModel.get_input_embeddings |
1 | 0 | 0 |
meth |
GPTBigCodeModel.set_input_embeddings |
2 | 0 | 0 |
meth |
GPTBigCodeModel.forward |
15 | 14 | 0 |
attr |
GPTBigCodeModel.multi_query |
1 | 0 | 0 |
attr |
GPTBigCodeModel.embed_dim |
1 | 0 | 0 |
attr |
GPTBigCodeModel.wte |
1 | 0 | 0 |
attr |
GPTBigCodeModel.wpe |
1 | 0 | 0 |
attr |
GPTBigCodeModel.drop |
1 | 0 | 0 |
attr |
GPTBigCodeModel.h |
1 | 0 | 0 |
attr |
GPTBigCodeModel.ln_f |
1 | 0 | 0 |
attr |
GPTBigCodeModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.gpt_neo.configuration_gpt_neo (45 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPTNeoConfig.init |
22 | 0 | 0 |
meth |
GPTNeoConfig.expand_attention_types_params |
2 | 0 | 0 |
attr |
GPTNeoConfig.vocab_size |
1 | 0 | 0 |
attr |
GPTNeoConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
GPTNeoConfig.hidden_size |
1 | 0 | 0 |
attr |
GPTNeoConfig.num_layers |
1 | 0 | 0 |
attr |
GPTNeoConfig.num_heads |
1 | 0 | 0 |
attr |
GPTNeoConfig.intermediate_size |
1 | 0 | 0 |
attr |
GPTNeoConfig.window_size |
1 | 0 | 0 |
attr |
GPTNeoConfig.activation_function |
1 | 0 | 0 |
attr |
GPTNeoConfig.resid_dropout |
1 | 0 | 0 |
attr |
GPTNeoConfig.embed_dropout |
1 | 0 | 0 |
attr |
GPTNeoConfig.attention_dropout |
1 | 0 | 0 |
attr |
GPTNeoConfig.classifier_dropout |
1 | 0 | 0 |
attr |
GPTNeoConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
GPTNeoConfig.initializer_range |
1 | 0 | 0 |
attr |
GPTNeoConfig.use_cache |
1 | 0 | 0 |
attr |
GPTNeoConfig.bos_token_id |
1 | 0 | 0 |
attr |
GPTNeoConfig.eos_token_id |
1 | 0 | 0 |
attr |
GPTNeoConfig.pad_token_id |
1 | 0 | 0 |
attr |
GPTNeoConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
GPTNeoConfig.attention_types |
1 | 0 | 0 |
attr |
GPTNeoConfig.attention_layers |
1 | 0 | 0 |
transformers.models.gpt_neo.modeling_gpt_neo (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPTNeoPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
GPTNeoForCausalLM.init |
2 | 0 | 0 |
meth |
GPTNeoForCausalLM.forward |
15 | 14 | 0 |
attr |
GPTNeoForCausalLM.transformer |
1 | 0 | 0 |
attr |
GPTNeoForCausalLM.lm_head |
1 | 0 | 0 |
meth |
GPTNeoModel.init |
2 | 0 | 0 |
meth |
GPTNeoModel.get_input_embeddings |
1 | 0 | 0 |
meth |
GPTNeoModel.set_input_embeddings |
2 | 0 | 0 |
meth |
GPTNeoModel.forward |
13 | 12 | 0 |
attr |
GPTNeoModel.embed_dim |
1 | 0 | 0 |
attr |
GPTNeoModel.wte |
1 | 0 | 0 |
attr |
GPTNeoModel.wpe |
1 | 0 | 0 |
attr |
GPTNeoModel.drop |
1 | 0 | 0 |
attr |
GPTNeoModel.h |
1 | 0 | 0 |
attr |
GPTNeoModel.ln_f |
1 | 0 | 0 |
attr |
GPTNeoModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
GPTNeoForSequenceClassification.init |
2 | 0 | 0 |
meth |
GPTNeoForSequenceClassification.forward |
13 | 12 | 0 |
attr |
GPTNeoForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
GPTNeoForSequenceClassification.transformer |
1 | 0 | 0 |
attr |
GPTNeoForSequenceClassification.score |
1 | 0 | 0 |
meth |
GPTNeoForQuestionAnswering.init |
2 | 0 | 0 |
meth |
GPTNeoForQuestionAnswering.forward |
12 | 11 | 0 |
attr |
GPTNeoForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
GPTNeoForQuestionAnswering.transformer |
1 | 0 | 0 |
attr |
GPTNeoForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
GPTNeoForTokenClassification.init |
2 | 0 | 0 |
meth |
GPTNeoForTokenClassification.forward |
13 | 12 | 0 |
attr |
GPTNeoForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
GPTNeoForTokenClassification.transformer |
1 | 0 | 0 |
attr |
GPTNeoForTokenClassification.dropout |
1 | 0 | 0 |
attr |
GPTNeoForTokenClassification.classifier |
1 | 0 | 0 |
transformers.models.gpt_neox.configuration_gpt_neox (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPTNeoXConfig.init |
23 | 21 | 0 |
meth |
GPTNeoXConfig.convert_rope_params_to_dict |
3 | 0 | 0 |
attr |
GPTNeoXConfig.is_decoder |
1 | 0 | 0 |
attr |
GPTNeoXConfig.bos_token_id |
1 | 0 | 0 |
attr |
GPTNeoXConfig.eos_token_id |
1 | 0 | 0 |
attr |
GPTNeoXConfig.pad_token_id |
1 | 0 | 0 |
attr |
GPTNeoXConfig.vocab_size |
1 | 0 | 0 |
attr |
GPTNeoXConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
GPTNeoXConfig.hidden_size |
1 | 0 | 0 |
attr |
GPTNeoXConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GPTNeoXConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GPTNeoXConfig.intermediate_size |
1 | 0 | 0 |
attr |
GPTNeoXConfig.hidden_act |
1 | 0 | 0 |
attr |
GPTNeoXConfig.attention_dropout |
1 | 0 | 0 |
attr |
GPTNeoXConfig.hidden_dropout |
1 | 0 | 0 |
attr |
GPTNeoXConfig.classifier_dropout |
1 | 0 | 0 |
attr |
GPTNeoXConfig.initializer_range |
1 | 0 | 0 |
attr |
GPTNeoXConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
GPTNeoXConfig.use_cache |
1 | 0 | 0 |
attr |
GPTNeoXConfig.use_parallel_residual |
1 | 0 | 0 |
attr |
GPTNeoXConfig.attention_bias |
1 | 0 | 0 |
attr |
GPTNeoXConfig.rope_parameters |
1 | 0 | 0 |
attr |
GPTNeoXConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.gpt_neox.modeling_gpt_neox (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPTNeoXModel.init |
2 | 0 | 0 |
meth |
GPTNeoXModel.get_input_embeddings |
1 | 0 | 0 |
meth |
GPTNeoXModel.set_input_embeddings |
2 | 0 | 0 |
attr |
GPTNeoXModel.embed_in |
1 | 0 | 0 |
attr |
GPTNeoXModel.emb_dropout |
1 | 0 | 0 |
attr |
GPTNeoXModel.layers |
1 | 0 | 0 |
attr |
GPTNeoXModel.final_layer_norm |
1 | 0 | 0 |
attr |
GPTNeoXModel.rotary_emb |
1 | 0 | 0 |
attr |
GPTNeoXModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
GPTNeoXLayer.init |
3 | 0 | 0 |
meth |
GPTNeoXLayer.forward |
9 | 8 | 0 |
attr |
GPTNeoXLayer.use_parallel_residual |
1 | 0 | 0 |
attr |
GPTNeoXLayer.input_layernorm |
1 | 0 | 0 |
attr |
GPTNeoXLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
GPTNeoXLayer.post_attention_dropout |
1 | 0 | 0 |
attr |
GPTNeoXLayer.post_mlp_dropout |
1 | 0 | 0 |
attr |
GPTNeoXLayer.attention |
1 | 0 | 0 |
attr |
GPTNeoXLayer.mlp |
1 | 0 | 0 |
meth |
GPTNeoXForSequenceClassification.init |
2 | 0 | 0 |
meth |
GPTNeoXForSequenceClassification.forward |
9 | 8 | 0 |
attr |
GPTNeoXForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
GPTNeoXForSequenceClassification.gpt_neox |
1 | 0 | 0 |
attr |
GPTNeoXForSequenceClassification.score |
1 | 0 | 0 |
meth |
GPTNeoXForCausalLM.init |
2 | 0 | 0 |
meth |
GPTNeoXForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
GPTNeoXForCausalLM.set_output_embeddings |
2 | 0 | 0 |
attr |
GPTNeoXForCausalLM.gpt_neox |
1 | 0 | 0 |
attr |
GPTNeoXForCausalLM.embed_out |
1 | 0 | 0 |
meth |
GPTNeoXForQuestionAnswering.init |
2 | 0 | 0 |
meth |
GPTNeoXForQuestionAnswering.forward |
9 | 8 | 0 |
attr |
GPTNeoXForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
GPTNeoXForQuestionAnswering.gpt_neox |
1 | 0 | 0 |
attr |
GPTNeoXForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
GPTNeoXForTokenClassification.init |
2 | 0 | 0 |
meth |
GPTNeoXForTokenClassification.forward |
10 | 9 | 0 |
attr |
GPTNeoXForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
GPTNeoXForTokenClassification.gpt_neox |
1 | 0 | 0 |
attr |
GPTNeoXForTokenClassification.dropout |
1 | 0 | 0 |
attr |
GPTNeoXForTokenClassification.classifier |
1 | 0 | 0 |
transformers.models.gpt_neox.modular_gpt_neox (49 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPTNeoXModel.init |
2 | 0 | 0 |
meth |
GPTNeoXModel.get_input_embeddings |
1 | 0 | 0 |
meth |
GPTNeoXModel.set_input_embeddings |
2 | 0 | 0 |
attr |
GPTNeoXModel.config |
1 | 0 | 0 |
attr |
GPTNeoXModel.embed_in |
1 | 0 | 0 |
attr |
GPTNeoXModel.emb_dropout |
1 | 0 | 0 |
attr |
GPTNeoXModel.layers |
1 | 0 | 0 |
attr |
GPTNeoXModel.final_layer_norm |
1 | 0 | 0 |
attr |
GPTNeoXModel.rotary_emb |
1 | 0 | 0 |
attr |
GPTNeoXModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
GPTNeoXLayer.init |
3 | 0 | 0 |
meth |
GPTNeoXLayer.forward |
9 | 8 | 0 |
attr |
GPTNeoXLayer.use_parallel_residual |
1 | 0 | 0 |
attr |
GPTNeoXLayer.input_layernorm |
1 | 0 | 0 |
attr |
GPTNeoXLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
GPTNeoXLayer.post_attention_dropout |
1 | 0 | 0 |
attr |
GPTNeoXLayer.post_mlp_dropout |
1 | 0 | 0 |
attr |
GPTNeoXLayer.attention |
1 | 0 | 0 |
attr |
GPTNeoXLayer.mlp |
1 | 0 | 0 |
meth |
GPTNeoXForSequenceClassification.init |
2 | 0 | 0 |
meth |
GPTNeoXForSequenceClassification.forward |
9 | 8 | 0 |
attr |
GPTNeoXForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
GPTNeoXForSequenceClassification.gpt_neox |
1 | 0 | 0 |
attr |
GPTNeoXForSequenceClassification.score |
1 | 0 | 0 |
meth |
GPTNeoXForCausalLM.init |
2 | 0 | 0 |
meth |
GPTNeoXForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
GPTNeoXForCausalLM.set_output_embeddings |
2 | 0 | 0 |
attr |
GPTNeoXForCausalLM.gpt_neox |
1 | 0 | 0 |
attr |
GPTNeoXForCausalLM.embed_out |
1 | 0 | 0 |
meth |
GPTNeoXForQuestionAnswering.init |
2 | 0 | 0 |
meth |
GPTNeoXForQuestionAnswering.forward |
9 | 8 | 0 |
attr |
GPTNeoXForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
GPTNeoXForQuestionAnswering.gpt_neox |
1 | 0 | 0 |
attr |
GPTNeoXForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
GPTNeoXForTokenClassification.init |
2 | 0 | 0 |
meth |
GPTNeoXForTokenClassification.forward |
10 | 9 | 0 |
attr |
GPTNeoXForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
GPTNeoXForTokenClassification.gpt_neox |
1 | 0 | 0 |
attr |
GPTNeoXForTokenClassification.dropout |
1 | 0 | 0 |
attr |
GPTNeoXForTokenClassification.classifier |
1 | 0 | 0 |
transformers.models.gpt_neox.tokenization_gpt_neox (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPTNeoXTokenizer.init |
11 | 9 | 0 |
attr |
GPTNeoXTokenizer.add_prefix_space |
1 | 0 | 0 |
attr |
GPTNeoXTokenizer.trim_offsets |
1 | 0 | 0 |
transformers.models.gpt_neox_japanese.configuration_gpt_neox_japanese (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPTNeoXJapaneseConfig.init |
20 | 18 | 0 |
meth |
GPTNeoXJapaneseConfig.convert_rope_params_to_dict |
3 | 0 | 0 |
attr |
GPTNeoXJapaneseConfig.is_decoder |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseConfig.bos_token_id |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseConfig.eos_token_id |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseConfig.pad_token_id |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseConfig.vocab_size |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseConfig.hidden_size |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseConfig.intermediate_multiple_size |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseConfig.hidden_act |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseConfig.initializer_range |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseConfig.use_cache |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseConfig.attention_dropout |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseConfig.hidden_dropout |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.gpt_neox_japanese.modeling_gpt_neox_japanese (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPTNeoXJapanesePreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
GPTNeoXJapaneseLayer.init |
3 | 0 | 0 |
meth |
GPTNeoXJapaneseLayer.forward |
9 | 8 | 0 |
attr |
GPTNeoXJapaneseLayer.layer_number |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseLayer.input_layernorm |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseLayer.post_attention_layernorm |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseLayer.attention |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseLayer.mlp |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseLayer.hidden_dropout |
1 | 0 | 0 |
meth |
GPTNeoXJapaneseModel.init |
2 | 0 | 0 |
meth |
GPTNeoXJapaneseModel.get_input_embeddings |
1 | 0 | 0 |
meth |
GPTNeoXJapaneseModel.set_input_embeddings |
2 | 0 | 0 |
meth |
GPTNeoXJapaneseModel.forward |
12 | 11 | 0 |
attr |
GPTNeoXJapaneseModel.embed_in |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseModel.layers |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseModel.final_layer_norm |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseModel.rotary_emb |
1 | 0 | 0 |
meth |
GPTNeoXJapaneseForCausalLM.init |
2 | 0 | 0 |
meth |
GPTNeoXJapaneseForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
GPTNeoXJapaneseForCausalLM.set_output_embeddings |
2 | 0 | 0 |
meth |
GPTNeoXJapaneseForCausalLM.forward |
14 | 13 | 0 |
attr |
GPTNeoXJapaneseForCausalLM.gpt_neox_japanese |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseForCausalLM.embed_out |
1 | 0 | 0 |
transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPTNeoXJapaneseTokenizer.init |
9 | 0 | 0 |
meth |
GPTNeoXJapaneseTokenizer.get_vocab |
1 | 0 | 0 |
meth |
GPTNeoXJapaneseTokenizer._tokenize |
2 | 0 | 0 |
meth |
GPTNeoXJapaneseTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
GPTNeoXJapaneseTokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
GPTNeoXJapaneseTokenizer.convert_tokens_to_string |
2 | 0 | 0 |
prop |
GPTNeoXJapaneseTokenizer.vocab_size |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseTokenizer.do_clean_text |
1 | 0 | 0 |
attr |
GPTNeoXJapaneseTokenizer.subword_tokenizer |
1 | 0 | 0 |
transformers.models.gpt_oss.configuration_gpt_oss (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GptOssConfig.init |
26 | 24 | 0 |
attr |
GptOssConfig.vocab_size |
1 | 0 | 0 |
attr |
GptOssConfig.hidden_size |
1 | 0 | 0 |
attr |
GptOssConfig.intermediate_size |
1 | 0 | 0 |
attr |
GptOssConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GptOssConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GptOssConfig.num_local_experts |
1 | 0 | 0 |
attr |
GptOssConfig.sliding_window |
1 | 0 | 0 |
attr |
GptOssConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
GptOssConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
GptOssConfig.hidden_act |
1 | 0 | 0 |
attr |
GptOssConfig.initializer_range |
1 | 0 | 0 |
attr |
GptOssConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
GptOssConfig.attention_dropout |
1 | 0 | 0 |
attr |
GptOssConfig.head_dim |
1 | 0 | 0 |
attr |
GptOssConfig.layer_types |
1 | 0 | 0 |
attr |
GptOssConfig.attention_bias |
1 | 0 | 0 |
attr |
GptOssConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
GptOssConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
GptOssConfig.output_router_logits |
1 | 0 | 0 |
attr |
GptOssConfig.use_cache |
1 | 0 | 0 |
attr |
GptOssConfig.rope_parameters |
1 | 0 | 0 |
attr |
GptOssConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
GptOssConfig.pad_token_id |
1 | 0 | 0 |
attr |
GptOssConfig.bos_token_id |
1 | 0 | 0 |
attr |
GptOssConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.gpt_oss.modeling_gpt_oss (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GptOssModel.init |
2 | 1 | 0 |
attr |
GptOssModel.padding_idx |
1 | 0 | 0 |
attr |
GptOssModel.vocab_size |
1 | 0 | 0 |
attr |
GptOssModel.embed_tokens |
1 | 0 | 0 |
attr |
GptOssModel.layers |
1 | 0 | 0 |
attr |
GptOssModel.norm |
1 | 0 | 0 |
attr |
GptOssModel.rotary_emb |
1 | 0 | 0 |
attr |
GptOssModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
GptOssForCausalLM.init |
2 | 0 | 0 |
attr |
GptOssForCausalLM.model |
1 | 0 | 0 |
attr |
GptOssForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
GptOssForCausalLM.lm_head |
1 | 0 | 0 |
attr |
GptOssForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
GptOssForCausalLM.num_experts |
1 | 0 | 0 |
attr |
GptOssForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
meth |
GptOssPreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
GptOssPreTrainedModel._can_record_outputs |
1 | 0 | 0 |
transformers.models.gpt_oss.modular_gpt_oss (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GptOssPreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
GptOssPreTrainedModel._can_record_outputs |
1 | 0 | 0 |
transformers.models.gpt_sw3.tokenization_gpt_sw3 (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPTSw3Tokenizer.init |
11 | 2 | 0 |
meth |
GPTSw3Tokenizer._tokenize |
3 | 2 | 0 |
attr |
GPTSw3Tokenizer.do_lower_case |
1 | 0 | 0 |
attr |
GPTSw3Tokenizer.remove_space |
1 | 0 | 0 |
attr |
GPTSw3Tokenizer.keep_accents |
1 | 0 | 0 |
attr |
GPTSw3Tokenizer.whitespaces |
1 | 0 | 0 |
attr |
GPTSw3Tokenizer.non_printing_characters_re |
1 | 0 | 0 |
transformers.models.gptj.configuration_gptj (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPTJConfig.init |
20 | 0 | 0 |
attr |
GPTJConfig.vocab_size |
1 | 0 | 0 |
attr |
GPTJConfig.n_positions |
1 | 0 | 0 |
attr |
GPTJConfig.n_embd |
1 | 0 | 0 |
attr |
GPTJConfig.n_layer |
1 | 0 | 0 |
attr |
GPTJConfig.n_head |
1 | 0 | 0 |
attr |
GPTJConfig.n_inner |
1 | 0 | 0 |
attr |
GPTJConfig.rotary_dim |
1 | 0 | 0 |
attr |
GPTJConfig.activation_function |
1 | 0 | 0 |
attr |
GPTJConfig.resid_pdrop |
1 | 0 | 0 |
attr |
GPTJConfig.embd_pdrop |
1 | 0 | 0 |
attr |
GPTJConfig.attn_pdrop |
1 | 0 | 0 |
attr |
GPTJConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
GPTJConfig.initializer_range |
1 | 0 | 0 |
attr |
GPTJConfig.use_cache |
1 | 0 | 0 |
attr |
GPTJConfig.bos_token_id |
1 | 0 | 0 |
attr |
GPTJConfig.eos_token_id |
1 | 0 | 0 |
attr |
GPTJConfig.pad_token_id |
1 | 0 | 0 |
attr |
GPTJConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.gptj.modeling_gptj (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GPTJForSequenceClassification.init |
2 | 0 | 0 |
meth |
GPTJForSequenceClassification.forward |
13 | 12 | 0 |
attr |
GPTJForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
GPTJForSequenceClassification.transformer |
1 | 0 | 0 |
attr |
GPTJForSequenceClassification.score |
1 | 0 | 0 |
meth |
GPTJForQuestionAnswering.init |
2 | 0 | 0 |
meth |
GPTJForQuestionAnswering.forward |
12 | 11 | 0 |
attr |
GPTJForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
GPTJForQuestionAnswering.transformer |
1 | 0 | 0 |
attr |
GPTJForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
GPTJModel.init |
2 | 0 | 0 |
meth |
GPTJModel.get_input_embeddings |
1 | 0 | 0 |
meth |
GPTJModel.set_input_embeddings |
2 | 0 | 0 |
meth |
GPTJModel.forward |
13 | 12 | 0 |
attr |
GPTJModel.embed_dim |
1 | 0 | 0 |
attr |
GPTJModel.vocab_size |
1 | 0 | 0 |
attr |
GPTJModel.wte |
1 | 0 | 0 |
attr |
GPTJModel.drop |
1 | 0 | 0 |
attr |
GPTJModel.h |
1 | 0 | 0 |
attr |
GPTJModel.ln_f |
1 | 0 | 0 |
attr |
GPTJModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
GPTJForCausalLM.init |
2 | 0 | 0 |
meth |
GPTJForCausalLM.forward |
15 | 14 | 0 |
attr |
GPTJForCausalLM.transformer |
1 | 0 | 0 |
attr |
GPTJForCausalLM.lm_head |
1 | 0 | 0 |
meth |
GPTJPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.granite.configuration_granite (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteConfig.init |
25 | 23 | 0 |
attr |
GraniteConfig.vocab_size |
1 | 0 | 0 |
attr |
GraniteConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
GraniteConfig.hidden_size |
1 | 0 | 0 |
attr |
GraniteConfig.intermediate_size |
1 | 0 | 0 |
attr |
GraniteConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GraniteConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GraniteConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
GraniteConfig.hidden_act |
1 | 0 | 0 |
attr |
GraniteConfig.initializer_range |
1 | 0 | 0 |
attr |
GraniteConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
GraniteConfig.use_cache |
1 | 0 | 0 |
attr |
GraniteConfig.attention_bias |
1 | 0 | 0 |
attr |
GraniteConfig.attention_dropout |
1 | 0 | 0 |
attr |
GraniteConfig.mlp_bias |
1 | 0 | 0 |
attr |
GraniteConfig.embedding_multiplier |
1 | 0 | 0 |
attr |
GraniteConfig.logits_scaling |
1 | 0 | 0 |
attr |
GraniteConfig.residual_multiplier |
1 | 0 | 0 |
attr |
GraniteConfig.attention_multiplier |
1 | 0 | 0 |
attr |
GraniteConfig.rope_parameters |
1 | 0 | 0 |
attr |
GraniteConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
GraniteConfig.pad_token_id |
1 | 0 | 0 |
attr |
GraniteConfig.bos_token_id |
1 | 0 | 0 |
attr |
GraniteConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.granite.modeling_granite (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteForCausalLM.init |
2 | 0 | 0 |
attr |
GraniteForCausalLM.model |
1 | 0 | 0 |
attr |
GraniteForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
GraniteForCausalLM.lm_head |
1 | 0 | 0 |
meth |
GraniteModel.init |
2 | 1 | 0 |
attr |
GraniteModel.padding_idx |
1 | 0 | 0 |
attr |
GraniteModel.vocab_size |
1 | 0 | 0 |
attr |
GraniteModel.embed_tokens |
1 | 0 | 0 |
attr |
GraniteModel.layers |
1 | 0 | 0 |
attr |
GraniteModel.norm |
1 | 0 | 0 |
attr |
GraniteModel.rotary_emb |
1 | 0 | 0 |
attr |
GraniteModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
GraniteModel.embedding_multiplier |
1 | 0 | 0 |
transformers.models.granite.modular_granite (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteModel.init |
2 | 1 | 0 |
attr |
GraniteModel.embedding_multiplier |
1 | 0 | 0 |
attr |
GraniteModel.layers |
1 | 0 | 0 |
transformers.models.granite_speech.configuration_granite_speech (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteSpeechConfig.init |
10 | 0 | 0 |
attr |
GraniteSpeechConfig.text_config |
1 | 0 | 0 |
attr |
GraniteSpeechConfig.encoder_config |
1 | 0 | 0 |
attr |
GraniteSpeechConfig.projector_config |
1 | 0 | 0 |
attr |
GraniteSpeechConfig.audio_token_index |
1 | 0 | 0 |
attr |
GraniteSpeechConfig.initializer_range |
1 | 0 | 0 |
attr |
GraniteSpeechConfig.has_lora_adapter |
1 | 0 | 0 |
attr |
GraniteSpeechConfig.downsample_rate |
1 | 0 | 0 |
attr |
GraniteSpeechConfig.window_size |
1 | 0 | 0 |
meth |
GraniteSpeechEncoderConfig.init |
14 | 0 | 0 |
attr |
GraniteSpeechEncoderConfig.input_dim |
1 | 0 | 0 |
attr |
GraniteSpeechEncoderConfig.num_layers |
1 | 0 | 0 |
attr |
GraniteSpeechEncoderConfig.hidden_dim |
1 | 0 | 0 |
attr |
GraniteSpeechEncoderConfig.feedforward_mult |
1 | 0 | 0 |
attr |
GraniteSpeechEncoderConfig.num_heads |
1 | 0 | 0 |
attr |
GraniteSpeechEncoderConfig.dim_head |
1 | 0 | 0 |
attr |
GraniteSpeechEncoderConfig.output_dim |
1 | 0 | 0 |
attr |
GraniteSpeechEncoderConfig.context_size |
1 | 0 | 0 |
attr |
GraniteSpeechEncoderConfig.dropout |
1 | 0 | 0 |
attr |
GraniteSpeechEncoderConfig.conv_kernel_size |
1 | 0 | 0 |
attr |
GraniteSpeechEncoderConfig.conv_expansion_factor |
1 | 0 | 0 |
attr |
GraniteSpeechEncoderConfig.max_pos_emb |
1 | 0 | 0 |
transformers.models.granite_speech.feature_extraction_granite_speech (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteSpeechFeatureExtractor.init |
9 | 7 | 0 |
meth |
GraniteSpeechFeatureExtractor._extract_mel_spectrograms |
3 | 1 | 0 |
attr |
GraniteSpeechFeatureExtractor.sampling_rate |
1 | 0 | 0 |
attr |
GraniteSpeechFeatureExtractor.melspec_kwargs |
1 | 0 | 0 |
attr |
GraniteSpeechFeatureExtractor.mel_filters |
1 | 0 | 0 |
attr |
GraniteSpeechFeatureExtractor.projector_window_size |
1 | 0 | 0 |
attr |
GraniteSpeechFeatureExtractor.projector_downsample_rate |
1 | 0 | 0 |
transformers.models.granite_speech.modeling_granite_speech (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteSpeechCTCEncoder.init |
2 | 1 | 0 |
attr |
GraniteSpeechCTCEncoder.input_linear |
1 | 0 | 0 |
attr |
GraniteSpeechCTCEncoder.layers |
1 | 0 | 0 |
attr |
GraniteSpeechCTCEncoder.out |
1 | 0 | 0 |
attr |
GraniteSpeechCTCEncoder.out_mid |
1 | 0 | 0 |
attr |
GraniteSpeechCTCEncoder.num_layers |
1 | 0 | 0 |
meth |
GraniteSpeechPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
GraniteSpeechForConditionalGeneration.init |
2 | 1 | 0 |
meth |
GraniteSpeechForConditionalGeneration.set_decoder |
2 | 0 | 0 |
meth |
GraniteSpeechForConditionalGeneration.get_decoder |
1 | 0 | 0 |
meth |
GraniteSpeechForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
GraniteSpeechForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
GraniteSpeechForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
GraniteSpeechForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
GraniteSpeechForConditionalGeneration.forward |
16 | 15 | 0 |
meth |
GraniteSpeechForConditionalGeneration.prepare_inputs_for_generation |
10 | 0 | 0 |
meth |
GraniteSpeechForConditionalGeneration.generate |
3 | 1 | 0 |
meth |
GraniteSpeechForConditionalGeneration.save_pretrained |
4 | 0 | 0 |
meth |
GraniteSpeechForConditionalGeneration._get_adapter_name |
1 | 0 | 0 |
attr |
GraniteSpeechForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
GraniteSpeechForConditionalGeneration.encoder |
1 | 0 | 0 |
attr |
GraniteSpeechForConditionalGeneration.projector |
1 | 0 | 0 |
transformers.models.granite_speech.processing_granite_speech (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteSpeechProcessor.init |
5 | 0 | 0 |
meth |
GraniteSpeechProcessor.call |
5 | 4 | 0 |
attr |
GraniteSpeechProcessor.audio_token |
1 | 0 | 0 |
transformers.models.granitemoe.configuration_granitemoe (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteMoeConfig.init |
28 | 26 | 0 |
attr |
GraniteMoeConfig.vocab_size |
1 | 0 | 0 |
attr |
GraniteMoeConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
GraniteMoeConfig.hidden_size |
1 | 0 | 0 |
attr |
GraniteMoeConfig.intermediate_size |
1 | 0 | 0 |
attr |
GraniteMoeConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GraniteMoeConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GraniteMoeConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
GraniteMoeConfig.hidden_act |
1 | 0 | 0 |
attr |
GraniteMoeConfig.initializer_range |
1 | 0 | 0 |
attr |
GraniteMoeConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
GraniteMoeConfig.use_cache |
1 | 0 | 0 |
attr |
GraniteMoeConfig.attention_bias |
1 | 0 | 0 |
attr |
GraniteMoeConfig.attention_dropout |
1 | 0 | 0 |
attr |
GraniteMoeConfig.embedding_multiplier |
1 | 0 | 0 |
attr |
GraniteMoeConfig.logits_scaling |
1 | 0 | 0 |
attr |
GraniteMoeConfig.residual_multiplier |
1 | 0 | 0 |
attr |
GraniteMoeConfig.attention_multiplier |
1 | 0 | 0 |
attr |
GraniteMoeConfig.num_local_experts |
1 | 0 | 0 |
attr |
GraniteMoeConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
GraniteMoeConfig.output_router_logits |
1 | 0 | 0 |
attr |
GraniteMoeConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
GraniteMoeConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
GraniteMoeConfig.pad_token_id |
1 | 0 | 0 |
attr |
GraniteMoeConfig.bos_token_id |
1 | 0 | 0 |
attr |
GraniteMoeConfig.eos_token_id |
1 | 0 | 0 |
attr |
GraniteMoeConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.granitemoe.modeling_granitemoe (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteMoeForCausalLM.init |
2 | 1 | 0 |
meth |
GraniteMoeForCausalLM.forward |
11 | 10 | 0 |
attr |
GraniteMoeForCausalLM.model |
1 | 0 | 0 |
attr |
GraniteMoeForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
GraniteMoeForCausalLM.lm_head |
1 | 0 | 0 |
attr |
GraniteMoeForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
GraniteMoeForCausalLM.num_experts |
1 | 0 | 0 |
attr |
GraniteMoeForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
attr |
GraniteMoeForCausalLM.logits_scaling |
1 | 0 | 0 |
meth |
GraniteMoeModel.init |
2 | 1 | 0 |
attr |
GraniteMoeModel.padding_idx |
1 | 0 | 0 |
attr |
GraniteMoeModel.vocab_size |
1 | 0 | 0 |
attr |
GraniteMoeModel.embed_tokens |
1 | 0 | 0 |
attr |
GraniteMoeModel.layers |
1 | 0 | 0 |
attr |
GraniteMoeModel.norm |
1 | 0 | 0 |
attr |
GraniteMoeModel.rotary_emb |
1 | 0 | 0 |
attr |
GraniteMoeModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
GraniteMoeModel.embedding_multiplier |
1 | 0 | 0 |
meth |
GraniteMoePreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.granitemoe.modular_granitemoe (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteMoeForCausalLM.init |
2 | 1 | 0 |
meth |
GraniteMoeForCausalLM.forward |
11 | 10 | 0 |
attr |
GraniteMoeForCausalLM.model |
1 | 0 | 0 |
attr |
GraniteMoeForCausalLM.logits_scaling |
1 | 0 | 0 |
meth |
GraniteMoeModel.init |
2 | 1 | 0 |
attr |
GraniteMoeModel.layers |
1 | 0 | 0 |
attr |
GraniteMoeModel.norm |
1 | 0 | 0 |
attr |
GraniteMoeModel.embedding_multiplier |
1 | 0 | 0 |
meth |
GraniteMoePreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.granitemoehybrid.configuration_granitemoehybrid (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteMoeHybridConfig.init |
43 | 41 | 0 |
prop |
GraniteMoeHybridConfig.layers_block_type |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.vocab_size |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.hidden_size |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.intermediate_size |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.hidden_act |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.initializer_range |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.use_cache |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.attention_bias |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.embedding_multiplier |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.logits_scaling |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.residual_multiplier |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.attention_multiplier |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.attention_dropout |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.num_local_experts |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.output_router_logits |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.shared_intermediate_size |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.position_embedding_type |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.rope_parameters |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.mamba_n_heads |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.mamba_d_head |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.mamba_n_groups |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.mamba_d_state |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.mamba_d_conv |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.mamba_chunk_size |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.mamba_conv_bias |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.mamba_proj_bias |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.time_step_min |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.time_step_max |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.time_step_limit |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.mamba_expand |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.layer_types |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.pad_token_id |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.bos_token_id |
1 | 0 | 0 |
attr |
GraniteMoeHybridConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.granitemoehybrid.modeling_granitemoehybrid (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteMoeHybridForCausalLM.init |
2 | 1 | 0 |
meth |
GraniteMoeHybridForCausalLM.forward |
11 | 10 | 0 |
meth |
GraniteMoeHybridForCausalLM.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
GraniteMoeHybridForCausalLM.model |
1 | 0 | 0 |
attr |
GraniteMoeHybridForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
GraniteMoeHybridForCausalLM.lm_head |
1 | 0 | 0 |
attr |
GraniteMoeHybridForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
GraniteMoeHybridForCausalLM.num_experts |
1 | 0 | 0 |
attr |
GraniteMoeHybridForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
attr |
GraniteMoeHybridForCausalLM.logits_scaling |
1 | 0 | 0 |
meth |
GraniteMoeHybridPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
GraniteMoeHybridModel.init |
2 | 1 | 0 |
meth |
GraniteMoeHybridModel._update_mamba_mask |
3 | 0 | 0 |
attr |
GraniteMoeHybridModel.padding_idx |
1 | 0 | 0 |
attr |
GraniteMoeHybridModel.vocab_size |
1 | 0 | 0 |
attr |
GraniteMoeHybridModel.embed_tokens |
1 | 0 | 0 |
attr |
GraniteMoeHybridModel.layers |
1 | 0 | 0 |
attr |
GraniteMoeHybridModel.norm |
1 | 0 | 0 |
attr |
GraniteMoeHybridModel.rotary_emb |
1 | 0 | 0 |
attr |
GraniteMoeHybridModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
GraniteMoeHybridModel.embedding_multiplier |
1 | 0 | 0 |
transformers.models.granitemoehybrid.modular_granitemoehybrid (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteMoeHybridForCausalLM.init |
2 | 1 | 0 |
meth |
GraniteMoeHybridForCausalLM.forward |
2 | 0 | 0 |
meth |
GraniteMoeHybridForCausalLM.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
GraniteMoeHybridForCausalLM.model |
1 | 0 | 0 |
meth |
GraniteMoeHybridPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
GraniteMoeHybridModel.init |
2 | 1 | 0 |
meth |
GraniteMoeHybridModel._update_mamba_mask |
3 | 0 | 0 |
attr |
GraniteMoeHybridModel.layers |
1 | 0 | 0 |
attr |
GraniteMoeHybridModel.embedding_multiplier |
1 | 0 | 0 |
attr |
GraniteMoeHybridModel.rotary_emb |
1 | 0 | 0 |
transformers.models.granitemoeshared.configuration_granitemoeshared (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteMoeSharedConfig.init |
29 | 27 | 0 |
attr |
GraniteMoeSharedConfig.vocab_size |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.hidden_size |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.intermediate_size |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.hidden_act |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.initializer_range |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.use_cache |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.attention_bias |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.attention_dropout |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.embedding_multiplier |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.logits_scaling |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.residual_multiplier |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.attention_multiplier |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.num_local_experts |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.output_router_logits |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.shared_intermediate_size |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.pad_token_id |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.bos_token_id |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.eos_token_id |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.position_embedding_type |
1 | 0 | 0 |
attr |
GraniteMoeSharedConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.granitemoeshared.modeling_granitemoeshared (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteMoeSharedForCausalLM.init |
2 | 1 | 0 |
meth |
GraniteMoeSharedForCausalLM.forward |
11 | 10 | 0 |
attr |
GraniteMoeSharedForCausalLM.model |
1 | 0 | 0 |
attr |
GraniteMoeSharedForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
GraniteMoeSharedForCausalLM.lm_head |
1 | 0 | 0 |
attr |
GraniteMoeSharedForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
GraniteMoeSharedForCausalLM.num_experts |
1 | 0 | 0 |
attr |
GraniteMoeSharedForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
attr |
GraniteMoeSharedForCausalLM.logits_scaling |
1 | 0 | 0 |
meth |
GraniteMoeSharedModel.init |
2 | 1 | 0 |
attr |
GraniteMoeSharedModel.padding_idx |
1 | 0 | 0 |
attr |
GraniteMoeSharedModel.vocab_size |
1 | 0 | 0 |
attr |
GraniteMoeSharedModel.embed_tokens |
1 | 0 | 0 |
attr |
GraniteMoeSharedModel.layers |
1 | 0 | 0 |
attr |
GraniteMoeSharedModel.norm |
1 | 0 | 0 |
attr |
GraniteMoeSharedModel.rotary_emb |
1 | 0 | 0 |
attr |
GraniteMoeSharedModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
GraniteMoeSharedModel.embedding_multiplier |
1 | 0 | 0 |
meth |
GraniteMoeSharedPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.granitemoeshared.modular_granitemoeshared (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GraniteMoeSharedForCausalLM.init |
2 | 1 | 0 |
attr |
GraniteMoeSharedForCausalLM.model |
1 | 0 | 0 |
meth |
GraniteMoeSharedModel.init |
2 | 1 | 0 |
attr |
GraniteMoeSharedModel.layers |
1 | 0 | 0 |
transformers.models.grounding_dino.configuration_grounding_dino (81 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GroundingDinoConfig.init |
42 | 0 | 0 |
attr |
GroundingDinoConfig.backbone_config |
1 | 0 | 0 |
attr |
GroundingDinoConfig.num_queries |
1 | 0 | 0 |
attr |
GroundingDinoConfig.d_model |
1 | 0 | 0 |
attr |
GroundingDinoConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
GroundingDinoConfig.encoder_layers |
1 | 0 | 0 |
attr |
GroundingDinoConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
GroundingDinoConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
GroundingDinoConfig.decoder_layers |
1 | 0 | 0 |
attr |
GroundingDinoConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
GroundingDinoConfig.dropout |
1 | 0 | 0 |
attr |
GroundingDinoConfig.attention_dropout |
1 | 0 | 0 |
attr |
GroundingDinoConfig.activation_dropout |
1 | 0 | 0 |
attr |
GroundingDinoConfig.activation_function |
1 | 0 | 0 |
attr |
GroundingDinoConfig.auxiliary_loss |
1 | 0 | 0 |
attr |
GroundingDinoConfig.position_embedding_type |
1 | 0 | 0 |
attr |
GroundingDinoConfig.num_feature_levels |
1 | 0 | 0 |
attr |
GroundingDinoConfig.encoder_n_points |
1 | 0 | 0 |
attr |
GroundingDinoConfig.decoder_n_points |
1 | 0 | 0 |
attr |
GroundingDinoConfig.two_stage |
1 | 0 | 0 |
attr |
GroundingDinoConfig.class_cost |
1 | 0 | 0 |
attr |
GroundingDinoConfig.bbox_cost |
1 | 0 | 0 |
attr |
GroundingDinoConfig.giou_cost |
1 | 0 | 0 |
attr |
GroundingDinoConfig.bbox_loss_coefficient |
1 | 0 | 0 |
attr |
GroundingDinoConfig.giou_loss_coefficient |
1 | 0 | 0 |
attr |
GroundingDinoConfig.focal_alpha |
1 | 0 | 0 |
attr |
GroundingDinoConfig.disable_custom_kernels |
1 | 0 | 0 |
attr |
GroundingDinoConfig.text_config |
1 | 0 | 0 |
attr |
GroundingDinoConfig.max_text_len |
1 | 0 | 0 |
attr |
GroundingDinoConfig.text_enhancer_dropout |
1 | 0 | 0 |
attr |
GroundingDinoConfig.fusion_droppath |
1 | 0 | 0 |
attr |
GroundingDinoConfig.fusion_dropout |
1 | 0 | 0 |
attr |
GroundingDinoConfig.embedding_init_target |
1 | 0 | 0 |
attr |
GroundingDinoConfig.query_dim |
1 | 0 | 0 |
attr |
GroundingDinoConfig.decoder_bbox_embed_share |
1 | 0 | 0 |
attr |
GroundingDinoConfig.two_stage_bbox_embed_share |
1 | 0 | 0 |
attr |
GroundingDinoConfig.positional_embedding_temperature |
1 | 0 | 0 |
attr |
GroundingDinoConfig.init_std |
1 | 0 | 0 |
attr |
GroundingDinoConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
GroundingDinoConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.grounding_dino.image_processing_grounding_dino (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GroundingDinoImageProcessor.init |
14 | 13 | 0 |
meth |
GroundingDinoImageProcessor.resize |
7 | 6 | 0 |
meth |
GroundingDinoImageProcessor.resize_annotation |
5 | 2 | 0 |
meth |
GroundingDinoImageProcessor._update_annotation_for_padded_image |
6 | 4 | 0 |
meth |
GroundingDinoImageProcessor.preprocess |
21 | 19 | 0 |
meth |
GroundingDinoImageProcessor.post_process_object_detection |
4 | 3 | 0 |
attr |
GroundingDinoImageProcessor.format |
1 | 0 | 0 |
attr |
GroundingDinoImageProcessor.do_resize |
1 | 0 | 0 |
attr |
GroundingDinoImageProcessor.size |
1 | 0 | 0 |
attr |
GroundingDinoImageProcessor.resample |
1 | 0 | 0 |
attr |
GroundingDinoImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
GroundingDinoImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
GroundingDinoImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
GroundingDinoImageProcessor.do_convert_annotations |
1 | 0 | 0 |
attr |
GroundingDinoImageProcessor.image_mean |
1 | 0 | 0 |
attr |
GroundingDinoImageProcessor.image_std |
1 | 0 | 0 |
attr |
GroundingDinoImageProcessor.do_pad |
1 | 0 | 0 |
attr |
GroundingDinoImageProcessor.pad_size |
1 | 0 | 0 |
transformers.models.grounding_dino.image_processing_grounding_dino_fast (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GroundingDinoImageProcessorFast.resize |
5 | 4 | 0 |
meth |
GroundingDinoImageProcessorFast.resize_annotation |
6 | 5 | 0 |
meth |
GroundingDinoImageProcessorFast._update_annotation_for_padded_image |
6 | 4 | 0 |
meth |
GroundingDinoImageProcessorFast.pad |
6 | 5 | 0 |
meth |
GroundingDinoImageProcessorFast._preprocess |
19 | 18 | 0 |
meth |
GroundingDinoImageProcessorFast.post_process_object_detection |
4 | 3 | 0 |
attr |
GroundingDinoImageProcessorFast.size |
1 | 0 | 0 |
attr |
GroundingDinoImageProcessorFast.do_convert_annotations |
1 | 0 | 0 |
transformers.models.grounding_dino.modeling_grounding_dino (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GroundingDinoPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
GroundingDinoPreTrainedModel._set_gradient_checkpointing |
3 | 0 | 0 |
meth |
GroundingDinoForObjectDetection.init |
2 | 1 | 0 |
meth |
GroundingDinoForObjectDetection.forward |
12 | 10 | 0 |
attr |
GroundingDinoForObjectDetection.model |
1 | 0 | 0 |
attr |
GroundingDinoForObjectDetection.bbox_embed |
1 | 0 | 0 |
attr |
GroundingDinoForObjectDetection.class_embed |
1 | 0 | 0 |
meth |
GroundingDinoModel.init |
2 | 1 | 0 |
meth |
GroundingDinoModel.freeze_backbone |
1 | 0 | 0 |
meth |
GroundingDinoModel.unfreeze_backbone |
1 | 0 | 0 |
meth |
GroundingDinoModel.get_valid_ratio |
2 | 0 | 0 |
meth |
GroundingDinoModel.generate_encoder_output_proposals |
4 | 0 | 0 |
meth |
GroundingDinoModel.forward |
11 | 6 | 0 |
attr |
GroundingDinoModel.backbone |
1 | 0 | 0 |
attr |
GroundingDinoModel.text_backbone |
1 | 0 | 0 |
attr |
GroundingDinoModel.text_projection |
1 | 0 | 0 |
attr |
GroundingDinoModel.encoder |
1 | 0 | 0 |
attr |
GroundingDinoModel.decoder |
1 | 0 | 0 |
attr |
GroundingDinoModel.level_embed |
1 | 0 | 0 |
attr |
GroundingDinoModel.input_proj_vision |
1 | 0 | 0 |
attr |
GroundingDinoModel.query_position_embeddings |
1 | 0 | 0 |
attr |
GroundingDinoModel.enc_output |
1 | 0 | 0 |
attr |
GroundingDinoModel.enc_output_norm |
1 | 0 | 0 |
attr |
GroundingDinoModel.encoder_output_class_embed |
1 | 0 | 0 |
attr |
GroundingDinoModel.reference_points |
1 | 0 | 0 |
attr |
GroundingDinoModel.encoder_output_bbox_embed |
1 | 0 | 0 |
transformers.models.grounding_dino.modular_grounding_dino (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GroundingDinoImageProcessorFast.post_process_object_detection |
4 | 3 | 0 |
meth |
GroundingDinoImageProcessorFast.post_process_instance_segmentation |
1 | 0 | 0 |
meth |
GroundingDinoImageProcessorFast.post_process_semantic_segmentation |
1 | 0 | 0 |
meth |
GroundingDinoImageProcessorFast.post_process_panoptic_segmentation |
1 | 0 | 0 |
transformers.models.grounding_dino.processing_grounding_dino (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GroundingDinoProcessor.init |
3 | 0 | 0 |
meth |
GroundingDinoProcessor._preprocess_input_text |
2 | 0 | 0 |
meth |
GroundingDinoProcessor.post_process_grounded_object_detection |
7 | 6 | 0 |
transformers.models.groupvit.configuration_groupvit (85 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GroupViTConfig.init |
7 | 0 | 0 |
attr |
GroupViTConfig.text_config |
1 | 0 | 0 |
attr |
GroupViTConfig.vision_config |
1 | 0 | 0 |
attr |
GroupViTConfig.projection_dim |
1 | 0 | 0 |
attr |
GroupViTConfig.projection_intermediate_dim |
1 | 0 | 0 |
attr |
GroupViTConfig.logit_scale_init_value |
1 | 0 | 0 |
attr |
GroupViTConfig.initializer_range |
1 | 0 | 0 |
attr |
GroupViTConfig.initializer_factor |
1 | 0 | 0 |
attr |
GroupViTConfig.output_segmentation |
1 | 0 | 0 |
meth |
GroupViTVisionConfig.init |
20 | 0 | 0 |
attr |
GroupViTVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
GroupViTVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
GroupViTVisionConfig.depths |
1 | 0 | 0 |
attr |
GroupViTVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GroupViTVisionConfig.num_group_tokens |
1 | 0 | 0 |
attr |
GroupViTVisionConfig.num_output_groups |
1 | 0 | 0 |
attr |
GroupViTVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GroupViTVisionConfig.image_size |
1 | 0 | 0 |
attr |
GroupViTVisionConfig.patch_size |
1 | 0 | 0 |
attr |
GroupViTVisionConfig.num_channels |
1 | 0 | 0 |
attr |
GroupViTVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
GroupViTVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
GroupViTVisionConfig.dropout |
1 | 0 | 0 |
attr |
GroupViTVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
GroupViTVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
GroupViTVisionConfig.initializer_factor |
1 | 0 | 0 |
attr |
GroupViTVisionConfig.assign_eps |
1 | 0 | 0 |
attr |
GroupViTVisionConfig.assign_mlp_ratio |
1 | 0 | 0 |
meth |
GroupViTTextConfig.init |
17 | 0 | 0 |
attr |
GroupViTTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
GroupViTTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
GroupViTTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
GroupViTTextConfig.vocab_size |
1 | 0 | 0 |
attr |
GroupViTTextConfig.hidden_size |
1 | 0 | 0 |
attr |
GroupViTTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
GroupViTTextConfig.dropout |
1 | 0 | 0 |
attr |
GroupViTTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
GroupViTTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
GroupViTTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
GroupViTTextConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
GroupViTTextConfig.hidden_act |
1 | 0 | 0 |
attr |
GroupViTTextConfig.initializer_range |
1 | 0 | 0 |
attr |
GroupViTTextConfig.initializer_factor |
1 | 0 | 0 |
attr |
GroupViTTextConfig.attention_dropout |
1 | 0 | 0 |
transformers.models.groupvit.modeling_groupvit (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
GroupViTTextModel.init |
2 | 1 | 0 |
meth |
GroupViTTextModel.set_input_embeddings |
2 | 0 | 0 |
meth |
GroupViTTextModel.forward |
8 | 7 | 0 |
attr |
GroupViTTextModel.text_model |
1 | 0 | 0 |
meth |
GroupViTPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
GroupViTVisionModel.init |
2 | 1 | 0 |
meth |
GroupViTVisionModel.forward |
6 | 5 | 0 |
attr |
GroupViTVisionModel.vision_model |
1 | 0 | 0 |
meth |
GroupViTModel.init |
2 | 1 | 0 |
meth |
GroupViTModel.forward |
11 | 10 | 0 |
attr |
GroupViTModel.projection_dim |
1 | 0 | 0 |
attr |
GroupViTModel.projection_intermediate_dim |
1 | 0 | 0 |
attr |
GroupViTModel.text_embed_dim |
1 | 0 | 0 |
attr |
GroupViTModel.vision_embed_dim |
1 | 0 | 0 |
attr |
GroupViTModel.text_model |
1 | 0 | 0 |
attr |
GroupViTModel.vision_model |
1 | 0 | 0 |
attr |
GroupViTModel.visual_projection |
1 | 0 | 0 |
attr |
GroupViTModel.text_projection |
1 | 0 | 0 |
attr |
GroupViTModel.logit_scale |
1 | 0 | 0 |
transformers.models.helium.configuration_helium (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HeliumConfig.init |
22 | 20 | 0 |
attr |
HeliumConfig.vocab_size |
1 | 0 | 0 |
attr |
HeliumConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
HeliumConfig.hidden_size |
1 | 0 | 0 |
attr |
HeliumConfig.intermediate_size |
1 | 0 | 0 |
attr |
HeliumConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
HeliumConfig.num_attention_heads |
1 | 0 | 0 |
attr |
HeliumConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
HeliumConfig.head_dim |
1 | 0 | 0 |
attr |
HeliumConfig.hidden_act |
1 | 0 | 0 |
attr |
HeliumConfig.initializer_range |
1 | 0 | 0 |
attr |
HeliumConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
HeliumConfig.use_cache |
1 | 0 | 0 |
attr |
HeliumConfig.attention_bias |
1 | 0 | 0 |
attr |
HeliumConfig.attention_dropout |
1 | 0 | 0 |
attr |
HeliumConfig.mlp_bias |
1 | 0 | 0 |
attr |
HeliumConfig.rope_parameters |
1 | 0 | 0 |
attr |
HeliumConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
HeliumConfig.pad_token_id |
1 | 0 | 0 |
attr |
HeliumConfig.bos_token_id |
1 | 0 | 0 |
attr |
HeliumConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.helium.modeling_helium (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HeliumForCausalLM.init |
2 | 0 | 0 |
attr |
HeliumForCausalLM.model |
1 | 0 | 0 |
attr |
HeliumForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
HeliumForCausalLM.lm_head |
1 | 0 | 0 |
meth |
HeliumModel.init |
2 | 1 | 0 |
attr |
HeliumModel.padding_idx |
1 | 0 | 0 |
attr |
HeliumModel.vocab_size |
1 | 0 | 0 |
attr |
HeliumModel.embed_tokens |
1 | 0 | 0 |
attr |
HeliumModel.layers |
1 | 0 | 0 |
attr |
HeliumModel.norm |
1 | 0 | 0 |
attr |
HeliumModel.rotary_emb |
1 | 0 | 0 |
attr |
HeliumModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.helium.modular_helium (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HeliumModel.init |
2 | 1 | 0 |
attr |
HeliumModel.layers |
1 | 0 | 0 |
attr |
HeliumModel.norm |
1 | 0 | 0 |
attr |
HeliumModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.herbert.tokenization_herbert (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HerbertTokenizer.init |
11 | 9 | 0 |
transformers.models.hgnet_v2.configuration_hgnet_v2 (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HGNetV2Config.init |
20 | 0 | 0 |
attr |
HGNetV2Config.num_channels |
1 | 0 | 0 |
attr |
HGNetV2Config.embedding_size |
1 | 0 | 0 |
attr |
HGNetV2Config.depths |
1 | 0 | 0 |
attr |
HGNetV2Config.hidden_sizes |
1 | 0 | 0 |
attr |
HGNetV2Config.hidden_act |
1 | 0 | 0 |
attr |
HGNetV2Config.stage_names |
1 | 0 | 0 |
attr |
HGNetV2Config.stem_channels |
1 | 0 | 0 |
attr |
HGNetV2Config.stage_in_channels |
1 | 0 | 0 |
attr |
HGNetV2Config.stage_mid_channels |
1 | 0 | 0 |
attr |
HGNetV2Config.stage_out_channels |
1 | 0 | 0 |
attr |
HGNetV2Config.stage_num_blocks |
1 | 0 | 0 |
attr |
HGNetV2Config.stage_downsample |
1 | 0 | 0 |
attr |
HGNetV2Config.stage_light_block |
1 | 0 | 0 |
attr |
HGNetV2Config.stage_kernel_size |
1 | 0 | 0 |
attr |
HGNetV2Config.stage_numb_of_layers |
1 | 0 | 0 |
attr |
HGNetV2Config.use_learnable_affine_block |
1 | 0 | 0 |
attr |
HGNetV2Config.initializer_range |
1 | 0 | 0 |
transformers.models.hgnet_v2.modeling_hgnet_v2 (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HGNetV2Backbone.init |
2 | 1 | 0 |
meth |
HGNetV2Backbone.forward |
5 | 4 | 0 |
attr |
HGNetV2Backbone.depths |
1 | 0 | 0 |
attr |
HGNetV2Backbone.num_features |
1 | 0 | 0 |
attr |
HGNetV2Backbone.embedder |
1 | 0 | 0 |
attr |
HGNetV2Backbone.encoder |
1 | 0 | 0 |
meth |
HGNetV2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
HGNetV2ForImageClassification.init |
2 | 1 | 0 |
meth |
HGNetV2ForImageClassification.forward |
6 | 5 | 0 |
attr |
HGNetV2ForImageClassification.num_labels |
1 | 0 | 0 |
attr |
HGNetV2ForImageClassification.embedder |
1 | 0 | 0 |
attr |
HGNetV2ForImageClassification.encoder |
1 | 0 | 0 |
attr |
HGNetV2ForImageClassification.avg_pool |
1 | 0 | 0 |
attr |
HGNetV2ForImageClassification.flatten |
1 | 0 | 0 |
attr |
HGNetV2ForImageClassification.fc |
1 | 0 | 0 |
attr |
HGNetV2ForImageClassification.classifier |
1 | 0 | 0 |
transformers.models.hgnet_v2.modular_hgnet_v2 (54 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HGNetV2Backbone.init |
2 | 1 | 0 |
meth |
HGNetV2Backbone.forward |
5 | 4 | 0 |
attr |
HGNetV2Backbone.depths |
1 | 0 | 0 |
attr |
HGNetV2Backbone.num_features |
1 | 0 | 0 |
attr |
HGNetV2Backbone.embedder |
1 | 0 | 0 |
attr |
HGNetV2Backbone.encoder |
1 | 0 | 0 |
meth |
HGNetV2Config.init |
20 | 0 | 0 |
attr |
HGNetV2Config.num_channels |
1 | 0 | 0 |
attr |
HGNetV2Config.embedding_size |
1 | 0 | 0 |
attr |
HGNetV2Config.depths |
1 | 0 | 0 |
attr |
HGNetV2Config.hidden_sizes |
1 | 0 | 0 |
attr |
HGNetV2Config.hidden_act |
1 | 0 | 0 |
attr |
HGNetV2Config.stage_names |
1 | 0 | 0 |
attr |
HGNetV2Config.stem_channels |
1 | 0 | 0 |
attr |
HGNetV2Config.stage_in_channels |
1 | 0 | 0 |
attr |
HGNetV2Config.stage_mid_channels |
1 | 0 | 0 |
attr |
HGNetV2Config.stage_out_channels |
1 | 0 | 0 |
attr |
HGNetV2Config.stage_num_blocks |
1 | 0 | 0 |
attr |
HGNetV2Config.stage_downsample |
1 | 0 | 0 |
attr |
HGNetV2Config.stage_light_block |
1 | 0 | 0 |
attr |
HGNetV2Config.stage_kernel_size |
1 | 0 | 0 |
attr |
HGNetV2Config.stage_numb_of_layers |
1 | 0 | 0 |
attr |
HGNetV2Config.use_learnable_affine_block |
1 | 0 | 0 |
attr |
HGNetV2Config.initializer_range |
1 | 0 | 0 |
meth |
HGNetV2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
HGNetV2ForImageClassification.init |
2 | 1 | 0 |
meth |
HGNetV2ForImageClassification.forward |
6 | 5 | 0 |
attr |
HGNetV2ForImageClassification.num_labels |
1 | 0 | 0 |
attr |
HGNetV2ForImageClassification.embedder |
1 | 0 | 0 |
attr |
HGNetV2ForImageClassification.encoder |
1 | 0 | 0 |
attr |
HGNetV2ForImageClassification.avg_pool |
1 | 0 | 0 |
attr |
HGNetV2ForImageClassification.flatten |
1 | 0 | 0 |
attr |
HGNetV2ForImageClassification.fc |
1 | 0 | 0 |
attr |
HGNetV2ForImageClassification.classifier |
1 | 0 | 0 |
transformers.models.hiera.configuration_hiera (55 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HieraConfig.init |
28 | 0 | 0 |
attr |
HieraConfig.embed_dim |
1 | 0 | 0 |
attr |
HieraConfig.image_size |
1 | 0 | 0 |
attr |
HieraConfig.patch_size |
1 | 0 | 0 |
attr |
HieraConfig.patch_stride |
1 | 0 | 0 |
attr |
HieraConfig.patch_padding |
1 | 0 | 0 |
attr |
HieraConfig.mlp_ratio |
1 | 0 | 0 |
attr |
HieraConfig.depths |
1 | 0 | 0 |
attr |
HieraConfig.num_heads |
1 | 0 | 0 |
attr |
HieraConfig.num_layers |
1 | 0 | 0 |
attr |
HieraConfig.embed_dim_multiplier |
1 | 0 | 0 |
attr |
HieraConfig.num_query_pool |
1 | 0 | 0 |
attr |
HieraConfig.query_stride |
1 | 0 | 0 |
attr |
HieraConfig.masked_unit_size |
1 | 0 | 0 |
attr |
HieraConfig.masked_unit_attention |
1 | 0 | 0 |
attr |
HieraConfig.drop_path_rate |
1 | 0 | 0 |
attr |
HieraConfig.num_channels |
1 | 0 | 0 |
attr |
HieraConfig.hidden_act |
1 | 0 | 0 |
attr |
HieraConfig.initializer_range |
1 | 0 | 0 |
attr |
HieraConfig.layer_norm_init |
1 | 0 | 0 |
attr |
HieraConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
HieraConfig.decoder_hidden_size |
1 | 0 | 0 |
attr |
HieraConfig.decoder_depth |
1 | 0 | 0 |
attr |
HieraConfig.decoder_num_heads |
1 | 0 | 0 |
attr |
HieraConfig.normalize_pixel_loss |
1 | 0 | 0 |
attr |
HieraConfig.mask_ratio |
1 | 0 | 0 |
attr |
HieraConfig.hidden_size |
1 | 0 | 0 |
attr |
HieraConfig.stage_names |
1 | 0 | 0 |
transformers.models.hiera.modeling_hiera (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HieraForImageClassification.forward |
8 | 6 | 0 |
attr |
HieraForImageClassification.num_labels |
1 | 0 | 0 |
attr |
HieraForImageClassification.hiera |
1 | 0 | 0 |
attr |
HieraForImageClassification.classifier |
1 | 0 | 0 |
meth |
HieraBackbone.init |
2 | 1 | 0 |
meth |
HieraBackbone.get_input_embeddings |
1 | 0 | 0 |
meth |
HieraBackbone.forward |
6 | 5 | 0 |
attr |
HieraBackbone.num_features |
1 | 0 | 0 |
attr |
HieraBackbone.embeddings |
1 | 0 | 0 |
attr |
HieraBackbone.encoder |
1 | 0 | 0 |
attr |
HieraBackbone.hidden_states_norms |
1 | 0 | 0 |
meth |
HieraModel.init |
4 | 3 | 0 |
meth |
HieraModel.forward |
8 | 7 | 0 |
attr |
HieraModel.num_features |
1 | 0 | 0 |
attr |
HieraModel.embeddings |
1 | 0 | 0 |
attr |
HieraModel.encoder |
1 | 0 | 0 |
attr |
HieraModel.unroll_schedule |
1 | 0 | 0 |
attr |
HieraModel.pooler |
1 | 0 | 0 |
meth |
HieraForPreTraining.forward_loss |
4 | 3 | 0 |
meth |
HieraForPreTraining.forward |
8 | 7 | 0 |
attr |
HieraForPreTraining.hiera |
1 | 0 | 0 |
attr |
HieraForPreTraining.encoder_norm |
1 | 0 | 0 |
attr |
HieraForPreTraining.multiscale_fusion |
1 | 0 | 0 |
attr |
HieraForPreTraining.decoder |
1 | 0 | 0 |
attr |
HieraForPreTraining.pred_stride |
1 | 0 | 0 |
meth |
HieraPreTrainedModel._init_weights |
2 | 1 | 0 |
transformers.models.higgs_audio_v2.configuration_higgs_audio_v2 (58 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HiggsAudioV2Config.init |
30 | 0 | 0 |
attr |
HiggsAudioV2Config.vocab_size |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.max_position_embeddings |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.hidden_size |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.intermediate_size |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.num_attention_heads |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.num_key_value_heads |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.hidden_act |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.initializer_range |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.rms_norm_eps |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.pretraining_tp |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.use_cache |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.attention_bias |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.attention_dropout |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.mlp_bias |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.head_dim |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.rope_parameters |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.pad_token_id |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.bos_token_id |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.eos_token_id |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.num_codebooks |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.codebook_size |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.audio_token_id |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.audio_bos_token_id |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.audio_delay_token_id |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.audio_stream_bos_id |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.audio_stream_eos_id |
1 | 0 | 0 |
transformers.models.higgs_audio_v2.generation_higgs_audio_v2 (17 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HiggsAudioV2DelayPatternLogitsProcessor.init |
8 | 7 | 0 |
attr |
HiggsAudioV2DelayPatternLogitsProcessor.delay_pattern |
1 | 0 | 0 |
attr |
HiggsAudioV2DelayPatternLogitsProcessor.audio_bos_token_id |
1 | 0 | 0 |
attr |
HiggsAudioV2DelayPatternLogitsProcessor.audio_eos_token_id |
1 | 0 | 0 |
attr |
HiggsAudioV2DelayPatternLogitsProcessor.audio_stream_bos_id |
1 | 0 | 0 |
attr |
HiggsAudioV2DelayPatternLogitsProcessor.audio_stream_eos_id |
1 | 0 | 0 |
attr |
HiggsAudioV2DelayPatternLogitsProcessor.num_codebooks |
1 | 0 | 0 |
attr |
HiggsAudioV2DelayPatternLogitsProcessor.codebook_size |
1 | 0 | 0 |
attr |
HiggsAudioV2DelayPatternLogitsProcessor.bos_delay_pattern |
1 | 0 | 0 |
attr |
HiggsAudioV2DelayPatternLogitsProcessor.eos_delay_pattern |
1 | 0 | 0 |
attr |
HiggsAudioV2DelayPatternLogitsProcessor.vocab_mask_bos |
1 | 0 | 0 |
attr |
HiggsAudioV2DelayPatternLogitsProcessor.vocab_mask_eos |
1 | 0 | 0 |
meth |
HiggsAudioV2GenerationMixin._get_logits_processor |
3 | 0 | 0 |
meth |
HiggsAudioV2GenerationMixin._prepare_generation_config |
3 | 3 | 1 |
meth |
HiggsAudioV2GenerationMixin._sample |
8 | 7 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.models.higgs_audio_v2.modeling_higgs_audio_v2 (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HiggsAudioV2ForConditionalGeneration.init |
3 | 2 | 0 |
meth |
HiggsAudioV2ForConditionalGeneration.prepare_inputs_for_generation |
5 | 3 | 0 |
meth |
HiggsAudioV2ForConditionalGeneration.forward |
14 | 13 | 0 |
attr |
HiggsAudioV2ForConditionalGeneration.model |
1 | 0 | 0 |
attr |
HiggsAudioV2ForConditionalGeneration.audio_lm_head |
1 | 0 | 0 |
attr |
HiggsAudioV2ForConditionalGeneration.text_lm_head |
1 | 0 | 0 |
meth |
HiggsAudioV2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
HiggsAudioV2Model.init |
2 | 1 | 0 |
meth |
HiggsAudioV2Model.get_placeholder_mask |
4 | 3 | 0 |
attr |
HiggsAudioV2Model.padding_idx |
1 | 0 | 0 |
attr |
HiggsAudioV2Model.vocab_size |
1 | 0 | 0 |
attr |
HiggsAudioV2Model.embed_tokens |
1 | 0 | 0 |
attr |
HiggsAudioV2Model.layers |
1 | 0 | 0 |
attr |
HiggsAudioV2Model.norm |
1 | 0 | 0 |
attr |
HiggsAudioV2Model.rotary_emb |
1 | 0 | 0 |
attr |
HiggsAudioV2Model.gradient_checkpointing |
1 | 0 | 0 |
attr |
HiggsAudioV2Model.embed_audio_tokens |
1 | 0 | 0 |
transformers.models.higgs_audio_v2.modular_higgs_audio_v2 (49 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HiggsAudioV2Config.init |
30 | 0 | 0 |
attr |
HiggsAudioV2Config.num_codebooks |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.codebook_size |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.audio_token_id |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.audio_bos_token_id |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.audio_delay_token_id |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.audio_stream_bos_id |
1 | 0 | 0 |
attr |
HiggsAudioV2Config.audio_stream_eos_id |
1 | 0 | 0 |
meth |
HiggsAudioV2ForConditionalGeneration.init |
3 | 2 | 0 |
meth |
HiggsAudioV2ForConditionalGeneration.prepare_inputs_for_generation |
5 | 3 | 0 |
meth |
HiggsAudioV2ForConditionalGeneration.forward |
14 | 13 | 0 |
attr |
HiggsAudioV2ForConditionalGeneration.model |
1 | 0 | 0 |
attr |
HiggsAudioV2ForConditionalGeneration.audio_lm_head |
1 | 0 | 0 |
attr |
HiggsAudioV2ForConditionalGeneration.text_lm_head |
1 | 0 | 0 |
meth |
HiggsAudioV2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
HiggsAudioV2Model.init |
2 | 1 | 0 |
meth |
HiggsAudioV2Model.get_placeholder_mask |
4 | 3 | 0 |
attr |
HiggsAudioV2Model.embed_audio_tokens |
1 | 0 | 0 |
transformers.models.higgs_audio_v2.processing_higgs_audio_v2 (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HiggsAudioV2Processor.init |
11 | 0 | 0 |
meth |
HiggsAudioV2Processor.get_audio_tokens |
2 | 0 | 0 |
meth |
HiggsAudioV2Processor.call |
5 | 4 | 0 |
meth |
HiggsAudioV2Processor.batch_decode |
2 | 0 | 0 |
meth |
HiggsAudioV2Processor.decode |
2 | 0 | 0 |
meth |
HiggsAudioV2Processor.build_delay_pattern |
2 | 0 | 0 |
meth |
HiggsAudioV2Processor.revert_delay_pattern |
2 | 0 | 0 |
meth |
HiggsAudioV2Processor.save_audio |
4 | 3 | 0 |
prop |
HiggsAudioV2Processor.model_input_names |
1 | 0 | 0 |
attr |
HiggsAudioV2Processor.audio_token |
1 | 0 | 0 |
attr |
HiggsAudioV2Processor.audio_bos_token |
1 | 0 | 0 |
attr |
HiggsAudioV2Processor.audio_eos_token |
1 | 0 | 0 |
attr |
HiggsAudioV2Processor.audio_delay_token |
1 | 0 | 0 |
attr |
HiggsAudioV2Processor.audio_token_id |
1 | 0 | 0 |
attr |
HiggsAudioV2Processor.audio_bos_token_id |
1 | 0 | 0 |
attr |
HiggsAudioV2Processor.audio_eos_token_id |
1 | 0 | 0 |
attr |
HiggsAudioV2Processor.audio_delay_token_id |
1 | 0 | 0 |
attr |
HiggsAudioV2Processor.audio_stream_bos_id |
1 | 0 | 0 |
attr |
HiggsAudioV2Processor.audio_stream_eos_id |
1 | 0 | 0 |
transformers.models.higgs_audio_v2_tokenizer.configuration_higgs_audio_v2_tokenizer (31 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HiggsAudioV2TokenizerConfig.init |
16 | 0 | 0 |
prop |
HiggsAudioV2TokenizerConfig.semantic_downsample_factor |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerConfig.acoustic_model_config |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerConfig.semantic_model_config |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerConfig.target_bandwidths |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerConfig.sample_rate |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerConfig.kernel_size |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerConfig.channel_ratios |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerConfig.strides |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerConfig.block_dilations |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerConfig.unit_kernel_size |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerConfig.codebook_size |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerConfig.initializer_range |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerConfig.codebook_dim |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerConfig.semantic_sample_rate |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerConfig.downsample_factor |
1 | 0 | 0 |
transformers.models.higgs_audio_v2_tokenizer.modeling_higgs_audio_v2_tokenizer (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HiggsAudioV2TokenizerModel.init |
2 | 0 | 0 |
meth |
HiggsAudioV2TokenizerModel._adjust_dac_decoder |
2 | 1 | 0 |
attr |
HiggsAudioV2TokenizerModel.config |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerModel.pad |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerModel.acoustic_encoder |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerModel.acoustic_decoder |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerModel.encoder_semantic |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerModel.decoder_semantic |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerModel.semantic_model |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerModel.fc |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerModel.fc1 |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerModel.fc2 |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerModel.quantizer |
1 | 0 | 0 |
meth |
HiggsAudioV2TokenizerPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
HiggsAudioV2TokenizerPreTrainedModel.apply_weight_norm |
1 | 0 | 0 |
meth |
HiggsAudioV2TokenizerPreTrainedModel.remove_weight_norm |
1 | 0 | 0 |
meth |
HiggsAudioV2TokenizerPreTrainedModel._get_conv1d_layers |
2 | 0 | 0 |
meth |
HiggsAudioV2TokenizerPreTrainedModel._get_conv1d_output_lengths |
3 | 0 | 0 |
transformers.models.higgs_audio_v2_tokenizer.modular_higgs_audio_v2_tokenizer (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HiggsAudioV2TokenizerConfig.init |
16 | 0 | 0 |
prop |
HiggsAudioV2TokenizerConfig.semantic_downsample_factor |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerConfig.semantic_sample_rate |
1 | 0 | 0 |
attr |
HiggsAudioV2TokenizerConfig.downsample_factor |
1 | 0 | 0 |
transformers.models.hubert.configuration_hubert (82 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HubertConfig.init |
41 | 0 | 0 |
prop |
HubertConfig.inputs_to_logits_ratio |
1 | 0 | 0 |
attr |
HubertConfig.pad_token_id |
1 | 0 | 0 |
attr |
HubertConfig.bos_token_id |
1 | 0 | 0 |
attr |
HubertConfig.eos_token_id |
1 | 0 | 0 |
attr |
HubertConfig.hidden_size |
1 | 0 | 0 |
attr |
HubertConfig.feat_extract_norm |
1 | 0 | 0 |
attr |
HubertConfig.feat_extract_activation |
1 | 0 | 0 |
attr |
HubertConfig.conv_dim |
1 | 0 | 0 |
attr |
HubertConfig.conv_stride |
1 | 0 | 0 |
attr |
HubertConfig.conv_kernel |
1 | 0 | 0 |
attr |
HubertConfig.conv_bias |
1 | 0 | 0 |
attr |
HubertConfig.num_conv_pos_embeddings |
1 | 0 | 0 |
attr |
HubertConfig.num_conv_pos_embedding_groups |
1 | 0 | 0 |
attr |
HubertConfig.conv_pos_batch_norm |
1 | 0 | 0 |
attr |
HubertConfig.num_feat_extract_layers |
1 | 0 | 0 |
attr |
HubertConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
HubertConfig.intermediate_size |
1 | 0 | 0 |
attr |
HubertConfig.hidden_act |
1 | 0 | 0 |
attr |
HubertConfig.num_attention_heads |
1 | 0 | 0 |
attr |
HubertConfig.hidden_dropout |
1 | 0 | 0 |
attr |
HubertConfig.attention_dropout |
1 | 0 | 0 |
attr |
HubertConfig.activation_dropout |
1 | 0 | 0 |
attr |
HubertConfig.feat_proj_layer_norm |
1 | 0 | 0 |
attr |
HubertConfig.feat_proj_dropout |
1 | 0 | 0 |
attr |
HubertConfig.final_dropout |
1 | 0 | 0 |
attr |
HubertConfig.layerdrop |
1 | 0 | 0 |
attr |
HubertConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
HubertConfig.initializer_range |
1 | 0 | 0 |
attr |
HubertConfig.vocab_size |
1 | 0 | 0 |
attr |
HubertConfig.do_stable_layer_norm |
1 | 0 | 0 |
attr |
HubertConfig.use_weighted_layer_sum |
1 | 0 | 0 |
attr |
HubertConfig.classifier_proj_size |
1 | 0 | 0 |
attr |
HubertConfig.apply_spec_augment |
1 | 0 | 0 |
attr |
HubertConfig.mask_time_prob |
1 | 0 | 0 |
attr |
HubertConfig.mask_time_length |
1 | 0 | 0 |
attr |
HubertConfig.mask_time_min_masks |
1 | 0 | 0 |
attr |
HubertConfig.mask_feature_prob |
1 | 0 | 0 |
attr |
HubertConfig.mask_feature_length |
1 | 0 | 0 |
attr |
HubertConfig.mask_feature_min_masks |
1 | 0 | 0 |
attr |
HubertConfig.ctc_loss_reduction |
1 | 0 | 0 |
attr |
HubertConfig.ctc_zero_infinity |
1 | 0 | 0 |
transformers.models.hubert.modeling_hubert (31 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HubertPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
HubertPreTrainedModel._get_feat_extract_output_lengths |
2 | 1 | 0 |
meth |
HubertPreTrainedModel._get_feature_vector_attention_mask |
3 | 2 | 0 |
meth |
HubertModel.init |
2 | 1 | 0 |
meth |
HubertModel._mask_hidden_states |
4 | 3 | 0 |
meth |
HubertModel.forward |
8 | 7 | 0 |
attr |
HubertModel.feature_extractor |
1 | 0 | 0 |
attr |
HubertModel.feature_projection |
1 | 0 | 0 |
attr |
HubertModel.masked_spec_embed |
1 | 0 | 0 |
attr |
HubertModel.encoder |
1 | 0 | 0 |
meth |
HubertForCTC.init |
3 | 1 | 0 |
meth |
HubertForCTC.tie_weights |
2 | 0 | 0 |
meth |
HubertForCTC.freeze_feature_encoder |
1 | 0 | 0 |
meth |
HubertForCTC.freeze_base_model |
1 | 0 | 0 |
meth |
HubertForCTC.forward |
8 | 7 | 0 |
attr |
HubertForCTC.hubert |
1 | 0 | 0 |
attr |
HubertForCTC.dropout |
1 | 0 | 0 |
attr |
HubertForCTC.target_lang |
1 | 0 | 0 |
attr |
HubertForCTC.lm_head |
1 | 0 | 0 |
meth |
HubertForSequenceClassification.init |
2 | 0 | 0 |
meth |
HubertForSequenceClassification.freeze_feature_encoder |
1 | 0 | 0 |
meth |
HubertForSequenceClassification.freeze_base_model |
1 | 0 | 0 |
meth |
HubertForSequenceClassification.forward |
8 | 7 | 0 |
attr |
HubertForSequenceClassification.hubert |
1 | 0 | 0 |
attr |
HubertForSequenceClassification.projector |
1 | 0 | 0 |
attr |
HubertForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
HubertForSequenceClassification.layer_weights |
1 | 0 | 0 |
transformers.models.hubert.modular_hubert (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HubertPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
HubertPreTrainedModel._get_feat_extract_output_lengths |
2 | 1 | 0 |
meth |
HubertPreTrainedModel._get_feature_vector_attention_mask |
3 | 2 | 0 |
meth |
HubertModel.init |
2 | 1 | 0 |
meth |
HubertModel.freeze_feature_encoder |
1 | 0 | 0 |
meth |
HubertModel.forward |
8 | 7 | 0 |
attr |
HubertModel.feature_extractor |
1 | 0 | 0 |
attr |
HubertModel.feature_projection |
1 | 0 | 0 |
attr |
HubertModel.masked_spec_embed |
1 | 0 | 0 |
attr |
HubertModel.encoder |
1 | 0 | 0 |
transformers.models.hunyuan_v1_dense.configuration_hunyuan_v1_dense (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HunYuanDenseV1Config.init |
23 | 21 | 0 |
attr |
HunYuanDenseV1Config.vocab_size |
1 | 0 | 0 |
attr |
HunYuanDenseV1Config.max_position_embeddings |
1 | 0 | 0 |
attr |
HunYuanDenseV1Config.hidden_size |
1 | 0 | 0 |
attr |
HunYuanDenseV1Config.intermediate_size |
1 | 0 | 0 |
attr |
HunYuanDenseV1Config.num_hidden_layers |
1 | 0 | 0 |
attr |
HunYuanDenseV1Config.num_attention_heads |
1 | 0 | 0 |
attr |
HunYuanDenseV1Config.head_dim |
1 | 0 | 0 |
attr |
HunYuanDenseV1Config.num_key_value_heads |
1 | 0 | 0 |
attr |
HunYuanDenseV1Config.hidden_act |
1 | 0 | 0 |
attr |
HunYuanDenseV1Config.initializer_range |
1 | 0 | 0 |
attr |
HunYuanDenseV1Config.rms_norm_eps |
1 | 0 | 0 |
attr |
HunYuanDenseV1Config.pretraining_tp |
1 | 0 | 0 |
attr |
HunYuanDenseV1Config.use_cache |
1 | 0 | 0 |
attr |
HunYuanDenseV1Config.attention_bias |
1 | 0 | 0 |
attr |
HunYuanDenseV1Config.attention_dropout |
1 | 0 | 0 |
attr |
HunYuanDenseV1Config.rope_parameters |
1 | 0 | 0 |
attr |
HunYuanDenseV1Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
HunYuanDenseV1Config.pad_token_id |
1 | 0 | 0 |
attr |
HunYuanDenseV1Config.bos_token_id |
1 | 0 | 0 |
attr |
HunYuanDenseV1Config.eos_token_id |
1 | 0 | 0 |
transformers.models.hunyuan_v1_dense.modeling_hunyuan_v1_dense (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HunYuanDenseV1PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
HunYuanDenseV1ForCausalLM.init |
2 | 0 | 0 |
attr |
HunYuanDenseV1ForCausalLM.model |
1 | 0 | 0 |
attr |
HunYuanDenseV1ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
HunYuanDenseV1ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
HunYuanDenseV1Model.init |
2 | 1 | 0 |
attr |
HunYuanDenseV1Model.padding_idx |
1 | 0 | 0 |
attr |
HunYuanDenseV1Model.vocab_size |
1 | 0 | 0 |
attr |
HunYuanDenseV1Model.embed_tokens |
1 | 0 | 0 |
attr |
HunYuanDenseV1Model.layers |
1 | 0 | 0 |
attr |
HunYuanDenseV1Model.norm |
1 | 0 | 0 |
attr |
HunYuanDenseV1Model.rotary_emb |
1 | 0 | 0 |
attr |
HunYuanDenseV1Model.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.hunyuan_v1_dense.modular_hunyuan_v1_dense (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HunYuanDenseV1PreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.hunyuan_v1_moe.configuration_hunyuan_v1_moe (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HunYuanMoEV1Config.init |
26 | 24 | 0 |
meth |
HunYuanMoEV1Config._rope_parameters_validation |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.vocab_size |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.max_position_embeddings |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.hidden_size |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.intermediate_size |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.num_hidden_layers |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.num_attention_heads |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.num_experts |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.moe_topk |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.head_dim |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.num_key_value_heads |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.hidden_act |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.initializer_range |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.rms_norm_eps |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.pretraining_tp |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.use_cache |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.attention_bias |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.attention_dropout |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.rope_parameters |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.pad_token_id |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.bos_token_id |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.eos_token_id |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.sep_token_id |
1 | 0 | 0 |
attr |
HunYuanMoEV1Config.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.hunyuan_v1_moe.modeling_hunyuan_v1_moe (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HunYuanMoEV1PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
HunYuanMoEV1Model.init |
2 | 1 | 0 |
attr |
HunYuanMoEV1Model.padding_idx |
1 | 0 | 0 |
attr |
HunYuanMoEV1Model.vocab_size |
1 | 0 | 0 |
attr |
HunYuanMoEV1Model.embed_tokens |
1 | 0 | 0 |
attr |
HunYuanMoEV1Model.layers |
1 | 0 | 0 |
attr |
HunYuanMoEV1Model.norm |
1 | 0 | 0 |
attr |
HunYuanMoEV1Model.rotary_emb |
1 | 0 | 0 |
attr |
HunYuanMoEV1Model.gradient_checkpointing |
1 | 0 | 0 |
meth |
HunYuanMoEV1ForCausalLM.init |
2 | 0 | 0 |
attr |
HunYuanMoEV1ForCausalLM.model |
1 | 0 | 0 |
attr |
HunYuanMoEV1ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
HunYuanMoEV1ForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.hunyuan_v1_moe.modular_hunyuan_v1_moe (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HunYuanMoEV1PreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.ibert.configuration_ibert (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
IBertConfig.init |
19 | 0 | 0 |
attr |
IBertConfig.pad_token_id |
1 | 0 | 0 |
attr |
IBertConfig.bos_token_id |
1 | 0 | 0 |
attr |
IBertConfig.eos_token_id |
1 | 0 | 0 |
attr |
IBertConfig.vocab_size |
1 | 0 | 0 |
attr |
IBertConfig.hidden_size |
1 | 0 | 0 |
attr |
IBertConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
IBertConfig.num_attention_heads |
1 | 0 | 0 |
attr |
IBertConfig.hidden_act |
1 | 0 | 0 |
attr |
IBertConfig.intermediate_size |
1 | 0 | 0 |
attr |
IBertConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
IBertConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
IBertConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
IBertConfig.type_vocab_size |
1 | 0 | 0 |
attr |
IBertConfig.initializer_range |
1 | 0 | 0 |
attr |
IBertConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
IBertConfig.quant_mode |
1 | 0 | 0 |
attr |
IBertConfig.force_dequant |
1 | 0 | 0 |
transformers.models.ibert.modeling_ibert (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
IBertForSequenceClassification.init |
2 | 0 | 0 |
meth |
IBertForSequenceClassification.forward |
11 | 10 | 0 |
attr |
IBertForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
IBertForSequenceClassification.ibert |
1 | 0 | 0 |
attr |
IBertForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
IBertForTokenClassification.init |
2 | 0 | 0 |
meth |
IBertForTokenClassification.forward |
11 | 10 | 0 |
attr |
IBertForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
IBertForTokenClassification.ibert |
1 | 0 | 0 |
attr |
IBertForTokenClassification.dropout |
1 | 0 | 0 |
attr |
IBertForTokenClassification.classifier |
1 | 0 | 0 |
meth |
IBertPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
IBertPreTrainedModel.resize_token_embeddings |
2 | 0 | 0 |
meth |
IBertForMaskedLM.init |
2 | 0 | 0 |
meth |
IBertForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
IBertForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
IBertForMaskedLM.forward |
11 | 10 | 0 |
attr |
IBertForMaskedLM.ibert |
1 | 0 | 0 |
attr |
IBertForMaskedLM.lm_head |
1 | 0 | 0 |
meth |
IBertForMultipleChoice.init |
2 | 0 | 0 |
meth |
IBertForMultipleChoice.forward |
11 | 10 | 0 |
attr |
IBertForMultipleChoice.ibert |
1 | 0 | 0 |
attr |
IBertForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
IBertForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
IBertModel.init |
3 | 0 | 0 |
meth |
IBertModel.get_input_embeddings |
1 | 0 | 0 |
meth |
IBertModel.set_input_embeddings |
2 | 0 | 0 |
meth |
IBertModel.forward |
10 | 9 | 0 |
attr |
IBertModel.quant_mode |
1 | 0 | 0 |
attr |
IBertModel.embeddings |
1 | 0 | 0 |
attr |
IBertModel.encoder |
1 | 0 | 0 |
attr |
IBertModel.pooler |
1 | 0 | 0 |
meth |
IBertForQuestionAnswering.init |
2 | 0 | 0 |
meth |
IBertForQuestionAnswering.forward |
12 | 11 | 0 |
attr |
IBertForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
IBertForQuestionAnswering.ibert |
1 | 0 | 0 |
attr |
IBertForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
transformers.models.ibert.quant_modules (178 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
QuantEmbedding.init |
12 | 0 | 0 |
meth |
QuantEmbedding.forward |
4 | 0 | 0 |
attr |
QuantEmbedding.num_ |
1 | 0 | 0 |
attr |
QuantEmbedding.dim |
1 | 0 | 0 |
attr |
QuantEmbedding.padding_idx |
1 | 0 | 0 |
attr |
QuantEmbedding.max_norm |
1 | 0 | 0 |
attr |
QuantEmbedding.norm_type |
1 | 0 | 0 |
attr |
QuantEmbedding.scale_grad_by_freq |
1 | 0 | 0 |
attr |
QuantEmbedding.sparse |
1 | 0 | 0 |
attr |
QuantEmbedding.weight |
1 | 0 | 0 |
attr |
QuantEmbedding.weight_bit |
1 | 0 | 0 |
attr |
QuantEmbedding.momentum |
1 | 0 | 0 |
attr |
QuantEmbedding.quant_mode |
1 | 0 | 0 |
attr |
QuantEmbedding.percentile_mode |
1 | 0 | 0 |
attr |
QuantEmbedding.weight_function |
1 | 0 | 0 |
meth |
IntGELU.init |
3 | 0 | 0 |
meth |
IntGELU.int_erf |
3 | 0 | 0 |
meth |
IntGELU.forward |
3 | 0 | 0 |
attr |
IntGELU.quant_mode |
1 | 0 | 0 |
attr |
IntGELU.k |
1 | 0 | 0 |
attr |
IntGELU.const |
1 | 0 | 0 |
attr |
IntGELU.coeff |
1 | 0 | 0 |
attr |
IntGELU.activation_fn |
1 | 0 | 0 |
meth |
floor_ste.forward |
3 | 0 | 0 |
meth |
floor_ste.backward |
3 | 0 | 0 |
func |
batch_frexp |
3 | 0 | 0 |
meth |
IntSoftmax.init |
4 | 0 | 0 |
meth |
IntSoftmax.int_polynomial |
3 | 0 | 0 |
meth |
IntSoftmax.int_exp |
3 | 0 | 0 |
meth |
IntSoftmax.forward |
3 | 0 | 0 |
attr |
IntSoftmax.output_bit |
1 | 0 | 0 |
attr |
IntSoftmax.max_bit |
1 | 0 | 0 |
attr |
IntSoftmax.quant_mode |
1 | 0 | 0 |
attr |
IntSoftmax.act |
1 | 0 | 0 |
attr |
IntSoftmax.x0 |
1 | 0 | 0 |
attr |
IntSoftmax.const |
1 | 0 | 0 |
attr |
IntSoftmax.coef |
1 | 0 | 0 |
func |
symmetric_linear_quantization_params |
5 | 0 | 0 |
meth |
SymmetricQuantFunction.forward |
6 | 0 | 0 |
meth |
SymmetricQuantFunction.backward |
3 | 0 | 0 |
meth |
IntLayerNorm.init |
6 | 0 | 0 |
meth |
IntLayerNorm.set_shift |
2 | 0 | 0 |
meth |
IntLayerNorm.overflow_fallback |
2 | 0 | 0 |
meth |
IntLayerNorm.forward |
3 | 0 | 0 |
attr |
IntLayerNorm.normalized_shape |
1 | 0 | 0 |
attr |
IntLayerNorm.eps |
1 | 0 | 0 |
attr |
IntLayerNorm.weight |
1 | 0 | 0 |
attr |
IntLayerNorm.bias |
1 | 0 | 0 |
attr |
IntLayerNorm.quant_mode |
1 | 0 | 0 |
attr |
IntLayerNorm.output_bit |
1 | 0 | 0 |
attr |
IntLayerNorm.max_bit |
1 | 0 | 0 |
attr |
IntLayerNorm.dim_sqrt |
1 | 0 | 0 |
attr |
IntLayerNorm.activation |
1 | 0 | 0 |
func |
get_percentile_min_max |
5 | 0 | 0 |
meth |
FixedPointMul.forward |
8 | 0 | 0 |
meth |
FixedPointMul.backward |
3 | 0 | 0 |
meth |
QuantAct.init |
6 | 0 | 0 |
meth |
QuantAct.repr |
1 | 0 | 0 |
meth |
QuantAct.forward |
7 | 0 | 0 |
attr |
QuantAct.activation_bit |
1 | 0 | 0 |
attr |
QuantAct.act_range_momentum |
1 | 0 | 0 |
attr |
QuantAct.quant_mode |
1 | 0 | 0 |
attr |
QuantAct.per_channel |
1 | 0 | 0 |
attr |
QuantAct.percentile |
1 | 0 | 0 |
attr |
QuantAct.act_function |
1 | 0 | 0 |
func |
linear_quantize |
5 | 0 | 0 |
meth |
round_ste.forward |
3 | 0 | 0 |
meth |
round_ste.backward |
3 | 0 | 0 |
meth |
QuantLinear.init |
8 | 0 | 0 |
meth |
QuantLinear.repr |
1 | 0 | 0 |
meth |
QuantLinear.forward |
3 | 0 | 0 |
attr |
QuantLinear.in_features |
1 | 0 | 0 |
attr |
QuantLinear.out_features |
1 | 0 | 0 |
attr |
QuantLinear.weight |
1 | 0 | 0 |
attr |
QuantLinear.weight_bit |
1 | 0 | 0 |
attr |
QuantLinear.quant_mode |
1 | 0 | 0 |
attr |
QuantLinear.per_channel |
1 | 0 | 0 |
attr |
QuantLinear.bias_bit |
1 | 0 | 0 |
attr |
QuantLinear.percentile_mode |
1 | 0 | 0 |
attr |
QuantLinear.weight_function |
1 | 0 | 0 |
attr |
QuantLinear.bias |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.models.idefics.configuration_idefics (58 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
IdeficsConfig.init |
30 | 0 | 0 |
attr |
IdeficsConfig.vocab_size |
1 | 0 | 0 |
attr |
IdeficsConfig.additional_vocab_size |
1 | 0 | 0 |
attr |
IdeficsConfig.hidden_size |
1 | 0 | 0 |
attr |
IdeficsConfig.intermediate_size |
1 | 0 | 0 |
attr |
IdeficsConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
IdeficsConfig.num_attention_heads |
1 | 0 | 0 |
attr |
IdeficsConfig.dropout |
1 | 0 | 0 |
attr |
IdeficsConfig.hidden_act |
1 | 0 | 0 |
attr |
IdeficsConfig.initializer_range |
1 | 0 | 0 |
attr |
IdeficsConfig.alpha_initializer |
1 | 0 | 0 |
attr |
IdeficsConfig.alphas_initializer_range |
1 | 0 | 0 |
attr |
IdeficsConfig.alpha_type |
1 | 0 | 0 |
attr |
IdeficsConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
IdeficsConfig.use_cache |
1 | 0 | 0 |
attr |
IdeficsConfig.cross_layer_interval |
1 | 0 | 0 |
attr |
IdeficsConfig.qk_layer_norms |
1 | 0 | 0 |
attr |
IdeficsConfig.freeze_vision_layers |
1 | 0 | 0 |
attr |
IdeficsConfig.freeze_text_layers |
1 | 0 | 0 |
attr |
IdeficsConfig.freeze_text_module_exceptions |
1 | 0 | 0 |
attr |
IdeficsConfig.freeze_vision_module_exceptions |
1 | 0 | 0 |
attr |
IdeficsConfig.freeze_lm_head |
1 | 0 | 0 |
attr |
IdeficsConfig.use_resampler |
1 | 0 | 0 |
attr |
IdeficsConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
IdeficsConfig.pad_token_id |
1 | 0 | 0 |
attr |
IdeficsConfig.bos_token_id |
1 | 0 | 0 |
attr |
IdeficsConfig.eos_token_id |
1 | 0 | 0 |
attr |
IdeficsConfig.perceiver_config |
1 | 0 | 0 |
attr |
IdeficsConfig.vision_config |
1 | 0 | 0 |
transformers.models.idefics.image_processing_idefics (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
IdeficsImageProcessor.init |
8 | 7 | 0 |
meth |
IdeficsImageProcessor.preprocess |
11 | 10 | 0 |
attr |
IdeficsImageProcessor.image_size |
1 | 0 | 0 |
attr |
IdeficsImageProcessor.image_num_channels |
1 | 0 | 0 |
attr |
IdeficsImageProcessor.image_mean |
1 | 0 | 0 |
attr |
IdeficsImageProcessor.image_std |
1 | 0 | 0 |
attr |
IdeficsImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
IdeficsImageProcessor.rescale_factor |
1 | 0 | 0 |
transformers.models.idefics.modeling_idefics (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
IdeficsModel.init |
2 | 1 | 0 |
meth |
IdeficsModel.freeze_relevant_params |
2 | 0 | 0 |
meth |
IdeficsModel.freeze_text_layers |
2 | 0 | 0 |
meth |
IdeficsModel.freeze_vision_layers |
2 | 0 | 0 |
attr |
IdeficsModel.padding_idx |
1 | 0 | 0 |
attr |
IdeficsModel.vocab_size |
1 | 0 | 0 |
attr |
IdeficsModel.embed_tokens |
1 | 0 | 0 |
attr |
IdeficsModel.image_size |
1 | 0 | 0 |
attr |
IdeficsModel.vision_config |
1 | 0 | 0 |
attr |
IdeficsModel.vision_model |
1 | 0 | 0 |
attr |
IdeficsModel.layers |
1 | 0 | 0 |
attr |
IdeficsModel.cross_layer_interval |
1 | 0 | 0 |
attr |
IdeficsModel.gated_cross_attn_layers |
1 | 0 | 0 |
attr |
IdeficsModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
IdeficsModel.norm |
1 | 0 | 0 |
attr |
IdeficsModel.perceiver_resampler |
1 | 0 | 0 |
meth |
IdeficsPreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
IdeficsPreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
IdeficsForVisionText2Text.init |
3 | 0 | 0 |
meth |
IdeficsForVisionText2Text.prepare_inputs_for_generation |
12 | 0 | 0 |
meth |
IdeficsForVisionText2Text._update_model_kwargs_for_generation |
5 | 4 | 0 |
attr |
IdeficsForVisionText2Text.model |
1 | 0 | 0 |
attr |
IdeficsForVisionText2Text.lm_head |
1 | 0 | 0 |
transformers.models.idefics.perceiver (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
IdeficsPerceiverAttention.qk_layer_norms |
1 | 0 | 0 |
attr |
IdeficsPerceiverAttention.context_layer_norm |
1 | 0 | 0 |
attr |
IdeficsPerceiverAttention.latents_layer_norm |
1 | 0 | 0 |
attr |
IdeficsPerceiverAttention.qk_scale |
1 | 0 | 0 |
attr |
IdeficsPerceiverAttention.q_proj |
1 | 0 | 0 |
attr |
IdeficsPerceiverAttention.k_proj |
1 | 0 | 0 |
attr |
IdeficsPerceiverAttention.v_proj |
1 | 0 | 0 |
attr |
IdeficsPerceiverAttention.output_proj |
1 | 0 | 0 |
attr |
IdeficsPerceiverAttention.q_layer_norm |
1 | 0 | 0 |
attr |
IdeficsPerceiverAttention.k_layer_norm |
1 | 0 | 0 |
meth |
IdeficsMLP.init |
3 | 1 | 0 |
attr |
IdeficsMLP.embed_dim |
1 | 0 | 0 |
attr |
IdeficsMLP.ln |
1 | 0 | 0 |
attr |
IdeficsMLP.fc |
1 | 0 | 0 |
attr |
IdeficsMLP.act |
1 | 0 | 0 |
attr |
IdeficsMLP.c_proj |
1 | 0 | 0 |
attr |
IdeficsPerceiverResampler.qk_layer_norms |
1 | 0 | 0 |
attr |
IdeficsPerceiverResampler.latents |
1 | 0 | 0 |
attr |
IdeficsPerceiverResampler.intermediate_dim |
1 | 0 | 0 |
attr |
IdeficsPerceiverResampler.blocks |
1 | 0 | 0 |
attr |
IdeficsPerceiverResampler.layer_norm |
1 | 0 | 0 |
transformers.models.idefics.processing_idefics (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
IdeficsProcessor.init |
6 | 0 | 0 |
prop |
IdeficsProcessor.model_input_names |
1 | 0 | 0 |
attr |
IdeficsProcessor.image_token_id |
1 | 0 | 0 |
attr |
IdeficsProcessor.default_image_dims |
1 | 0 | 0 |
attr |
IdeficsProcessor.tokenizer_was_trained_with_end_of_utterance_token |
1 | 0 | 0 |
transformers.models.idefics.vision (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
IdeficsVisionAttention.init |
2 | 1 | 0 |
attr |
IdeficsVisionAttention.config |
1 | 0 | 0 |
attr |
IdeficsVisionAttention.embed_dim |
1 | 0 | 0 |
attr |
IdeficsVisionAttention.num_heads |
1 | 0 | 0 |
attr |
IdeficsVisionAttention.head_dim |
1 | 0 | 0 |
attr |
IdeficsVisionAttention.scale |
1 | 0 | 0 |
attr |
IdeficsVisionAttention.dropout |
1 | 0 | 0 |
attr |
IdeficsVisionAttention.is_causal |
1 | 0 | 0 |
attr |
IdeficsVisionAttention.k_proj |
1 | 0 | 0 |
attr |
IdeficsVisionAttention.v_proj |
1 | 0 | 0 |
attr |
IdeficsVisionAttention.q_proj |
1 | 0 | 0 |
attr |
IdeficsVisionAttention.out_proj |
1 | 0 | 0 |
meth |
IdeficsVisionMLP.init |
2 | 0 | 0 |
attr |
IdeficsVisionMLP.config |
1 | 0 | 0 |
attr |
IdeficsVisionMLP.activation_fn |
1 | 0 | 0 |
attr |
IdeficsVisionMLP.fc1 |
1 | 0 | 0 |
attr |
IdeficsVisionMLP.fc2 |
1 | 0 | 0 |
func |
eager_attention_forward |
9 | 7 | 0 |
meth |
IdeficsVisionEncoderLayer.init |
2 | 1 | 0 |
attr |
IdeficsVisionEncoderLayer.embed_dim |
1 | 0 | 0 |
attr |
IdeficsVisionEncoderLayer.self_attn |
1 | 0 | 0 |
attr |
IdeficsVisionEncoderLayer.layer_norm1 |
1 | 0 | 0 |
attr |
IdeficsVisionEncoderLayer.mlp |
1 | 0 | 0 |
attr |
IdeficsVisionEncoderLayer.layer_norm2 |
1 | 0 | 0 |
meth |
IdeficsVisionEncoder.init |
2 | 1 | 0 |
meth |
IdeficsVisionEncoder.forward |
7 | 6 | 0 |
attr |
IdeficsVisionEncoder.config |
1 | 0 | 0 |
attr |
IdeficsVisionEncoder.layers |
1 | 0 | 0 |
attr |
IdeficsVisionEncoder.gradient_checkpointing |
1 | 0 | 0 |
meth |
IdeficsVisionTransformer.init |
2 | 1 | 0 |
attr |
IdeficsVisionTransformer.config |
1 | 0 | 0 |
attr |
IdeficsVisionTransformer.embeddings |
1 | 0 | 0 |
attr |
IdeficsVisionTransformer.pre_layrnorm |
1 | 0 | 0 |
attr |
IdeficsVisionTransformer.encoder |
1 | 0 | 0 |
attr |
IdeficsVisionTransformer.post_layernorm |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
IdeficsVisionEmbeddings.init |
2 | 1 | 0 |
attr |
IdeficsVisionEmbeddings.config |
1 | 0 | 0 |
attr |
IdeficsVisionEmbeddings.embed_dim |
1 | 0 | 0 |
attr |
IdeficsVisionEmbeddings.image_size |
1 | 0 | 0 |
attr |
IdeficsVisionEmbeddings.patch_size |
1 | 0 | 0 |
attr |
IdeficsVisionEmbeddings.class_embedding |
1 | 0 | 0 |
attr |
IdeficsVisionEmbeddings.patch_embedding |
1 | 0 | 0 |
attr |
IdeficsVisionEmbeddings.num_patches |
1 | 0 | 0 |
attr |
IdeficsVisionEmbeddings.num_positions |
1 | 0 | 0 |
attr |
IdeficsVisionEmbeddings.position_embedding |
1 | 0 | 0 |
transformers.models.idefics2.configuration_idefics2 (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Idefics2Config.init |
8 | 0 | 0 |
attr |
Idefics2Config.image_token_id |
1 | 0 | 0 |
attr |
Idefics2Config.use_cache |
1 | 0 | 0 |
attr |
Idefics2Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Idefics2Config.text_config |
1 | 0 | 0 |
attr |
Idefics2Config.perceiver_config |
1 | 0 | 0 |
attr |
Idefics2Config.vision_config |
1 | 0 | 0 |
transformers.models.idefics2.image_processing_idefics2 (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Idefics2ImageProcessor.init |
13 | 12 | 0 |
meth |
Idefics2ImageProcessor.resize |
7 | 6 | 0 |
meth |
Idefics2ImageProcessor.split_image |
3 | 2 | 0 |
meth |
Idefics2ImageProcessor.preprocess |
16 | 15 | 0 |
attr |
Idefics2ImageProcessor.do_convert_rgb |
1 | 0 | 0 |
attr |
Idefics2ImageProcessor.do_resize |
1 | 0 | 0 |
attr |
Idefics2ImageProcessor.size |
1 | 0 | 0 |
attr |
Idefics2ImageProcessor.resample |
1 | 0 | 0 |
attr |
Idefics2ImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
Idefics2ImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
Idefics2ImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
Idefics2ImageProcessor.image_mean |
1 | 0 | 0 |
attr |
Idefics2ImageProcessor.image_std |
1 | 0 | 0 |
attr |
Idefics2ImageProcessor.do_pad |
1 | 0 | 0 |
attr |
Idefics2ImageProcessor.do_image_splitting |
1 | 0 | 0 |
transformers.models.idefics2.image_processing_idefics2_fast (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Idefics2ImageProcessorFast.resize |
5 | 4 | 0 |
meth |
Idefics2ImageProcessorFast._preprocess |
15 | 14 | 0 |
transformers.models.idefics2.modeling_idefics2 (35 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Idefics2ForConditionalGeneration.init |
2 | 0 | 0 |
meth |
Idefics2ForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Idefics2ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Idefics2ForConditionalGeneration.prepare_inputs_for_generation |
12 | 0 | 0 |
attr |
Idefics2ForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Idefics2ForConditionalGeneration.image_token_id |
1 | 0 | 0 |
attr |
Idefics2ForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
Idefics2ForConditionalGeneration.vocab_size |
1 | 0 | 0 |
meth |
Idefics2Model.init |
2 | 1 | 0 |
meth |
Idefics2Model.get_input_embeddings |
1 | 0 | 0 |
meth |
Idefics2Model.set_input_embeddings |
2 | 0 | 0 |
meth |
Idefics2Model.inputs_merger |
4 | 3 | 0 |
attr |
Idefics2Model.padding_idx |
1 | 0 | 0 |
attr |
Idefics2Model.vocab_size |
1 | 0 | 0 |
attr |
Idefics2Model.vision_model |
1 | 0 | 0 |
attr |
Idefics2Model.connector |
1 | 0 | 0 |
attr |
Idefics2Model.text_model |
1 | 0 | 0 |
attr |
Idefics2Model.image_seq_len |
1 | 0 | 0 |
attr |
Idefics2Model.image_token_id |
1 | 0 | 0 |
meth |
Idefics2PreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.idefics2.processing_idefics2 (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Idefics2Processor.init |
6 | 2 | 0 |
meth |
Idefics2Processor._extract_images_from_prompts |
2 | 0 | 0 |
attr |
Idefics2Processor.end_of_utterance_token |
1 | 0 | 0 |
attr |
Idefics2Processor.image_seq_len |
1 | 0 | 0 |
attr |
Idefics2Processor.fake_image_token |
1 | 0 | 0 |
attr |
Idefics2Processor.image_token |
1 | 0 | 0 |
attr |
Idefics2Processor.image_token_id |
1 | 0 | 0 |
transformers.models.idefics3.configuration_idefics3 (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Idefics3VisionConfig.init |
13 | 0 | 0 |
attr |
Idefics3VisionConfig.hidden_size |
1 | 0 | 0 |
attr |
Idefics3VisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
Idefics3VisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Idefics3VisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Idefics3VisionConfig.num_channels |
1 | 0 | 0 |
attr |
Idefics3VisionConfig.patch_size |
1 | 0 | 0 |
attr |
Idefics3VisionConfig.image_size |
1 | 0 | 0 |
attr |
Idefics3VisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
Idefics3VisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Idefics3VisionConfig.hidden_act |
1 | 0 | 0 |
attr |
Idefics3VisionConfig.initializer_range |
1 | 0 | 0 |
meth |
Idefics3Config.init |
9 | 0 | 0 |
attr |
Idefics3Config.image_token_id |
1 | 0 | 0 |
attr |
Idefics3Config.use_cache |
1 | 0 | 0 |
attr |
Idefics3Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Idefics3Config.text_config |
1 | 0 | 0 |
attr |
Idefics3Config.scale_factor |
1 | 0 | 0 |
attr |
Idefics3Config.vision_config |
1 | 0 | 0 |
transformers.models.idefics3.image_processing_idefics3 (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Idefics3ImageProcessor.init |
14 | 13 | 0 |
meth |
Idefics3ImageProcessor.resize |
7 | 6 | 0 |
meth |
Idefics3ImageProcessor.split_image |
6 | 4 | 0 |
meth |
Idefics3ImageProcessor.resize_for_vision_encoder |
6 | 5 | 0 |
meth |
Idefics3ImageProcessor.preprocess |
18 | 17 | 0 |
meth |
Idefics3ImageProcessor.get_number_of_image_patches |
4 | 3 | 0 |
attr |
Idefics3ImageProcessor.do_convert_rgb |
1 | 0 | 0 |
attr |
Idefics3ImageProcessor.do_resize |
1 | 0 | 0 |
attr |
Idefics3ImageProcessor.size |
1 | 0 | 0 |
attr |
Idefics3ImageProcessor.resample |
1 | 0 | 0 |
attr |
Idefics3ImageProcessor.do_image_splitting |
1 | 0 | 0 |
attr |
Idefics3ImageProcessor.max_image_size |
1 | 0 | 0 |
attr |
Idefics3ImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
Idefics3ImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
Idefics3ImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
Idefics3ImageProcessor.image_mean |
1 | 0 | 0 |
attr |
Idefics3ImageProcessor.image_std |
1 | 0 | 0 |
attr |
Idefics3ImageProcessor.do_pad |
1 | 0 | 0 |
transformers.models.idefics3.image_processing_idefics3_fast (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Idefics3ImageProcessorFast.resize |
6 | 5 | 0 |
meth |
Idefics3ImageProcessorFast.split_images |
4 | 3 | 0 |
meth |
Idefics3ImageProcessorFast.resize_for_vision_encoder |
4 | 3 | 0 |
meth |
Idefics3ImageProcessorFast.pad |
5 | 4 | 0 |
meth |
Idefics3ImageProcessorFast._preprocess |
17 | 16 | 0 |
meth |
Idefics3ImageProcessorFast.to_dict |
1 | 0 | 0 |
meth |
Idefics3ImageProcessorFast.get_number_of_image_patches |
4 | 3 | 0 |
transformers.models.idefics3.modeling_idefics3 (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Idefics3Model.init |
2 | 1 | 0 |
meth |
Idefics3Model.get_input_embeddings |
1 | 0 | 0 |
meth |
Idefics3Model.set_input_embeddings |
2 | 0 | 0 |
meth |
Idefics3Model.inputs_merger |
4 | 3 | 0 |
attr |
Idefics3Model.padding_idx |
1 | 0 | 0 |
attr |
Idefics3Model.vocab_size |
1 | 0 | 0 |
attr |
Idefics3Model.vision_model |
1 | 0 | 0 |
attr |
Idefics3Model.connector |
1 | 0 | 0 |
attr |
Idefics3Model.text_model |
1 | 0 | 0 |
attr |
Idefics3Model.image_seq_len |
1 | 0 | 0 |
attr |
Idefics3Model.image_token_id |
1 | 0 | 0 |
meth |
Idefics3ForConditionalGeneration.init |
2 | 0 | 0 |
meth |
Idefics3ForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Idefics3ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Idefics3ForConditionalGeneration.prepare_inputs_for_generation |
12 | 0 | 0 |
attr |
Idefics3ForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Idefics3ForConditionalGeneration.image_token_id |
1 | 0 | 0 |
attr |
Idefics3ForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
Idefics3ForConditionalGeneration.vocab_size |
1 | 0 | 0 |
meth |
Idefics3VisionTransformer.init |
2 | 1 | 0 |
meth |
Idefics3VisionTransformer.get_input_embeddings |
1 | 0 | 0 |
meth |
Idefics3VisionTransformer.set_input_embeddings |
2 | 0 | 0 |
meth |
Idefics3VisionTransformer.forward |
4 | 3 | 0 |
attr |
Idefics3VisionTransformer.embeddings |
1 | 0 | 0 |
attr |
Idefics3VisionTransformer.encoder |
1 | 0 | 0 |
attr |
Idefics3VisionTransformer.patch_size |
1 | 0 | 0 |
attr |
Idefics3VisionTransformer.post_layernorm |
1 | 0 | 0 |
transformers.models.idefics3.processing_idefics3 (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Idefics3Processor.init |
6 | 2 | 0 |
meth |
Idefics3Processor._extract_images_from_prompts |
2 | 0 | 0 |
meth |
Idefics3Processor._get_num_multimodal_tokens |
3 | 0 | 0 |
attr |
Idefics3Processor.fake_image_token |
1 | 0 | 0 |
attr |
Idefics3Processor.image_token |
1 | 0 | 0 |
attr |
Idefics3Processor.end_of_utterance_token |
1 | 0 | 0 |
attr |
Idefics3Processor.global_image_tag |
1 | 0 | 0 |
attr |
Idefics3Processor.image_seq_len |
1 | 0 | 0 |
attr |
Idefics3Processor.image_token_id |
1 | 0 | 0 |
attr |
Idefics3Processor.fake_image_token_id |
1 | 0 | 0 |
attr |
Idefics3Processor.global_image_token_id |
1 | 0 | 0 |
attr |
Idefics3Processor.row_col_ids |
1 | 0 | 0 |
transformers.models.ijepa.configuration_ijepa (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
IJepaConfig.init |
17 | 0 | 0 |
attr |
IJepaConfig.hidden_size |
1 | 0 | 0 |
attr |
IJepaConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
IJepaConfig.num_attention_heads |
1 | 0 | 0 |
attr |
IJepaConfig.intermediate_size |
1 | 0 | 0 |
attr |
IJepaConfig.hidden_act |
1 | 0 | 0 |
attr |
IJepaConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
IJepaConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
IJepaConfig.initializer_range |
1 | 0 | 0 |
attr |
IJepaConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
IJepaConfig.image_size |
1 | 0 | 0 |
attr |
IJepaConfig.patch_size |
1 | 0 | 0 |
attr |
IJepaConfig.num_channels |
1 | 0 | 0 |
attr |
IJepaConfig.qkv_bias |
1 | 0 | 0 |
attr |
IJepaConfig.pooler_output_size |
1 | 0 | 0 |
attr |
IJepaConfig.pooler_act |
1 | 0 | 0 |
transformers.models.ijepa.modeling_ijepa (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
IJepaForImageClassification.init |
2 | 1 | 0 |
attr |
IJepaForImageClassification.num_labels |
1 | 0 | 0 |
attr |
IJepaForImageClassification.ijepa |
1 | 0 | 0 |
attr |
IJepaForImageClassification.classifier |
1 | 0 | 0 |
meth |
IJepaModel.init |
4 | 3 | 0 |
attr |
IJepaModel.embeddings |
1 | 0 | 0 |
attr |
IJepaModel.encoder |
1 | 0 | 0 |
attr |
IJepaModel.layernorm |
1 | 0 | 0 |
attr |
IJepaModel.pooler |
1 | 0 | 0 |
transformers.models.ijepa.modular_ijepa (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
IJepaForImageClassification.init |
2 | 1 | 0 |
attr |
IJepaForImageClassification.ijepa |
1 | 0 | 0 |
meth |
IJepaModel.init |
4 | 3 | 0 |
attr |
IJepaModel.config |
1 | 0 | 0 |
attr |
IJepaModel.embeddings |
1 | 0 | 0 |
transformers.models.imagegpt.configuration_imagegpt (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ImageGPTConfig.init |
23 | 0 | 0 |
attr |
ImageGPTConfig.add_cross_attention |
1 | 0 | 0 |
attr |
ImageGPTConfig.vocab_size |
1 | 0 | 0 |
attr |
ImageGPTConfig.n_positions |
1 | 0 | 0 |
attr |
ImageGPTConfig.n_embd |
1 | 0 | 0 |
attr |
ImageGPTConfig.n_layer |
1 | 0 | 0 |
attr |
ImageGPTConfig.n_head |
1 | 0 | 0 |
attr |
ImageGPTConfig.n_inner |
1 | 0 | 0 |
attr |
ImageGPTConfig.activation_function |
1 | 0 | 0 |
attr |
ImageGPTConfig.resid_pdrop |
1 | 0 | 0 |
attr |
ImageGPTConfig.embd_pdrop |
1 | 0 | 0 |
attr |
ImageGPTConfig.attn_pdrop |
1 | 0 | 0 |
attr |
ImageGPTConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
ImageGPTConfig.initializer_range |
1 | 0 | 0 |
attr |
ImageGPTConfig.scale_attn_weights |
1 | 0 | 0 |
attr |
ImageGPTConfig.use_cache |
1 | 0 | 0 |
attr |
ImageGPTConfig.scale_attn_by_inverse_layer_idx |
1 | 0 | 0 |
attr |
ImageGPTConfig.reorder_and_upcast_attn |
1 | 0 | 0 |
attr |
ImageGPTConfig.pad_token_id |
1 | 0 | 0 |
attr |
ImageGPTConfig.bos_token_id |
1 | 0 | 0 |
attr |
ImageGPTConfig.eos_token_id |
1 | 0 | 0 |
attr |
ImageGPTConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.imagegpt.image_processing_imagegpt (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ImageGPTImageProcessor.init |
8 | 7 | 0 |
meth |
ImageGPTImageProcessor.resize |
7 | 6 | 0 |
meth |
ImageGPTImageProcessor.to_dict |
1 | 0 | 0 |
attr |
ImageGPTImageProcessor.clusters |
1 | 0 | 0 |
attr |
ImageGPTImageProcessor.do_resize |
1 | 0 | 0 |
attr |
ImageGPTImageProcessor.size |
1 | 0 | 0 |
attr |
ImageGPTImageProcessor.resample |
1 | 0 | 0 |
attr |
ImageGPTImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
ImageGPTImageProcessor.do_color_quantize |
1 | 0 | 0 |
transformers.models.imagegpt.image_processing_imagegpt_fast (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ImageGPTImageProcessorFast.init |
3 | 2 | 0 |
meth |
ImageGPTImageProcessorFast._preprocess |
17 | 15 | 0 |
meth |
ImageGPTImageProcessorFast.to_dict |
1 | 0 | 0 |
transformers.models.imagegpt.modeling_imagegpt (20 missing, 3 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ImageGPTPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
ImageGPTForImageClassification.init |
2 | 1 | 0 |
meth |
ImageGPTForImageClassification.forward |
13 | 13 | 1 |
attr |
ImageGPTForImageClassification.num_labels |
1 | 0 | 0 |
attr |
ImageGPTForImageClassification.transformer |
1 | 0 | 0 |
attr |
ImageGPTForImageClassification.score |
1 | 0 | 0 |
meth |
ImageGPTModel.init |
2 | 1 | 0 |
meth |
ImageGPTModel.get_input_embeddings |
1 | 0 | 0 |
meth |
ImageGPTModel.set_input_embeddings |
2 | 0 | 0 |
meth |
ImageGPTModel.forward |
15 | 15 | 1 |
attr |
ImageGPTModel.embed_dim |
1 | 0 | 0 |
attr |
ImageGPTModel.wte |
1 | 0 | 0 |
attr |
ImageGPTModel.wpe |
1 | 0 | 0 |
attr |
ImageGPTModel.drop |
1 | 0 | 0 |
attr |
ImageGPTModel.h |
1 | 0 | 0 |
attr |
ImageGPTModel.ln_f |
1 | 0 | 0 |
attr |
ImageGPTModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
ImageGPTForCausalImageModeling.init |
2 | 1 | 0 |
meth |
ImageGPTForCausalImageModeling.forward |
16 | 16 | 1 |
attr |
ImageGPTForCausalImageModeling.transformer |
1 | 0 | 0 |
attr |
ImageGPTForCausalImageModeling.lm_head |
1 | 0 | 0 |
transformers.models.informer.configuration_informer (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InformerConfig.init |
35 | 32 | 0 |
attr |
InformerConfig.prediction_length |
1 | 0 | 0 |
attr |
InformerConfig.context_length |
1 | 0 | 0 |
attr |
InformerConfig.distribution_output |
1 | 0 | 0 |
attr |
InformerConfig.loss |
1 | 0 | 0 |
attr |
InformerConfig.input_size |
1 | 0 | 0 |
attr |
InformerConfig.num_time_features |
1 | 0 | 0 |
attr |
InformerConfig.lags_sequence |
1 | 0 | 0 |
attr |
InformerConfig.scaling |
1 | 0 | 0 |
attr |
InformerConfig.num_dynamic_real_features |
1 | 0 | 0 |
attr |
InformerConfig.num_static_real_features |
1 | 0 | 0 |
attr |
InformerConfig.num_static_categorical_features |
1 | 0 | 0 |
attr |
InformerConfig.num_parallel_samples |
1 | 0 | 0 |
attr |
InformerConfig.feature_size |
1 | 0 | 0 |
attr |
InformerConfig.d_model |
1 | 0 | 0 |
attr |
InformerConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
InformerConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
InformerConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
InformerConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
InformerConfig.encoder_layers |
1 | 0 | 0 |
attr |
InformerConfig.decoder_layers |
1 | 0 | 0 |
attr |
InformerConfig.dropout |
1 | 0 | 0 |
attr |
InformerConfig.attention_dropout |
1 | 0 | 0 |
attr |
InformerConfig.activation_dropout |
1 | 0 | 0 |
attr |
InformerConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
InformerConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
InformerConfig.activation_function |
1 | 0 | 0 |
attr |
InformerConfig.init_std |
1 | 0 | 0 |
attr |
InformerConfig.use_cache |
1 | 0 | 0 |
attr |
InformerConfig.attention_type |
1 | 0 | 0 |
attr |
InformerConfig.sampling_factor |
1 | 0 | 0 |
attr |
InformerConfig.distil |
1 | 0 | 0 |
attr |
InformerConfig.cardinality |
1 | 0 | 0 |
attr |
InformerConfig.embedding_dimension |
1 | 0 | 0 |
transformers.models.informer.modeling_informer (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InformerModel.init |
2 | 1 | 0 |
meth |
InformerModel.create_network_inputs |
8 | 7 | 0 |
meth |
InformerModel.forward |
17 | 16 | 0 |
attr |
InformerModel.encoder |
1 | 0 | 0 |
attr |
InformerModel.decoder |
1 | 0 | 0 |
attr |
InformerModel.scaler |
1 | 0 | 0 |
attr |
InformerModel.embedder |
1 | 0 | 0 |
meth |
InformerPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
InformerForPrediction.init |
2 | 1 | 0 |
meth |
InformerForPrediction.output_params |
2 | 0 | 0 |
meth |
InformerForPrediction.output_distribution |
5 | 1 | 0 |
meth |
InformerForPrediction.forward |
18 | 17 | 0 |
attr |
InformerForPrediction.model |
1 | 0 | 0 |
attr |
InformerForPrediction.parameter_projection |
1 | 0 | 0 |
attr |
InformerForPrediction.target_shape |
1 | 0 | 0 |
attr |
InformerForPrediction.distribution_output |
1 | 0 | 0 |
attr |
InformerForPrediction.loss |
1 | 0 | 0 |
transformers.models.informer.modular_informer (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InformerModel.init |
2 | 1 | 0 |
meth |
InformerModel.forward |
2 | 0 | 0 |
attr |
InformerModel.encoder |
1 | 0 | 0 |
attr |
InformerModel.decoder |
1 | 0 | 0 |
attr |
InformerModel.scaler |
1 | 0 | 0 |
attr |
InformerModel.embedder |
1 | 0 | 0 |
meth |
InformerPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
InformerForPrediction.init |
2 | 1 | 0 |
meth |
InformerForPrediction.forward |
2 | 0 | 0 |
attr |
InformerForPrediction.model |
1 | 0 | 0 |
attr |
InformerForPrediction.parameter_projection |
1 | 0 | 0 |
attr |
InformerForPrediction.target_shape |
1 | 0 | 0 |
attr |
InformerForPrediction.distribution_output |
1 | 0 | 0 |
attr |
InformerForPrediction.loss |
1 | 0 | 0 |
transformers.models.instructblip.configuration_instructblip (69 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InstructBlipVisionConfig.init |
13 | 0 | 0 |
attr |
InstructBlipVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
InstructBlipVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
InstructBlipVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
InstructBlipVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
InstructBlipVisionConfig.patch_size |
1 | 0 | 0 |
attr |
InstructBlipVisionConfig.image_size |
1 | 0 | 0 |
attr |
InstructBlipVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
InstructBlipVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
InstructBlipVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
InstructBlipVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
InstructBlipVisionConfig.qkv_bias |
1 | 0 | 0 |
meth |
InstructBlipConfig.init |
7 | 0 | 0 |
attr |
InstructBlipConfig.text_config |
1 | 0 | 0 |
attr |
InstructBlipConfig.vision_config |
1 | 0 | 0 |
attr |
InstructBlipConfig.qformer_config |
1 | 0 | 0 |
attr |
InstructBlipConfig.num_query_tokens |
1 | 0 | 0 |
attr |
InstructBlipConfig.image_token_index |
1 | 0 | 0 |
attr |
InstructBlipConfig.use_decoder_only_language_model |
1 | 0 | 0 |
attr |
InstructBlipConfig.initializer_factor |
1 | 0 | 0 |
attr |
InstructBlipConfig.initializer_range |
1 | 0 | 0 |
meth |
InstructBlipQFormerConfig.init |
16 | 0 | 0 |
attr |
InstructBlipQFormerConfig.pad_token_id |
1 | 0 | 0 |
attr |
InstructBlipQFormerConfig.vocab_size |
1 | 0 | 0 |
attr |
InstructBlipQFormerConfig.hidden_size |
1 | 0 | 0 |
attr |
InstructBlipQFormerConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
InstructBlipQFormerConfig.num_attention_heads |
1 | 0 | 0 |
attr |
InstructBlipQFormerConfig.hidden_act |
1 | 0 | 0 |
attr |
InstructBlipQFormerConfig.intermediate_size |
1 | 0 | 0 |
attr |
InstructBlipQFormerConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
InstructBlipQFormerConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
InstructBlipQFormerConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
InstructBlipQFormerConfig.initializer_range |
1 | 0 | 0 |
attr |
InstructBlipQFormerConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
InstructBlipQFormerConfig.cross_attention_frequency |
1 | 0 | 0 |
attr |
InstructBlipQFormerConfig.encoder_hidden_size |
1 | 0 | 0 |
transformers.models.instructblip.modeling_instructblip (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InstructBlipQFormerModel.init |
2 | 1 | 0 |
meth |
InstructBlipQFormerModel.get_input_embeddings |
1 | 0 | 0 |
meth |
InstructBlipQFormerModel.set_input_embeddings |
2 | 0 | 0 |
attr |
InstructBlipQFormerModel._can_record_outputs |
1 | 0 | 0 |
attr |
InstructBlipQFormerModel.embeddings |
1 | 0 | 0 |
attr |
InstructBlipQFormerModel.encoder |
1 | 0 | 0 |
meth |
InstructBlipForConditionalGeneration.init |
2 | 1 | 0 |
meth |
InstructBlipForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
InstructBlipForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
InstructBlipForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
InstructBlipForConditionalGeneration.get_encoder |
2 | 0 | 0 |
meth |
InstructBlipForConditionalGeneration.get_decoder |
1 | 0 | 0 |
meth |
InstructBlipForConditionalGeneration._preprocess_accelerate |
1 | 0 | 0 |
meth |
InstructBlipForConditionalGeneration.get_placeholder_mask |
3 | 2 | 0 |
meth |
InstructBlipForConditionalGeneration.generate |
9 | 8 | 0 |
attr |
InstructBlipForConditionalGeneration.vision_model |
1 | 0 | 0 |
attr |
InstructBlipForConditionalGeneration.query_tokens |
1 | 0 | 0 |
attr |
InstructBlipForConditionalGeneration.qformer |
1 | 0 | 0 |
attr |
InstructBlipForConditionalGeneration.language_projection |
1 | 0 | 0 |
attr |
InstructBlipForConditionalGeneration.language_model |
1 | 0 | 0 |
meth |
InstructBlipModel.init |
2 | 1 | 0 |
meth |
InstructBlipModel.get_input_embeddings |
1 | 0 | 0 |
meth |
InstructBlipModel.set_input_embeddings |
2 | 0 | 0 |
meth |
InstructBlipModel._preprocess_accelerate |
1 | 0 | 0 |
meth |
InstructBlipModel.get_placeholder_mask |
3 | 2 | 0 |
attr |
InstructBlipModel.vision_model |
1 | 0 | 0 |
attr |
InstructBlipModel.query_tokens |
1 | 0 | 0 |
attr |
InstructBlipModel.qformer |
1 | 0 | 0 |
attr |
InstructBlipModel.language_projection |
1 | 0 | 0 |
attr |
InstructBlipModel.language_model |
1 | 0 | 0 |
meth |
InstructBlipVisionModel.init |
2 | 1 | 0 |
meth |
InstructBlipVisionModel.get_input_embeddings |
1 | 0 | 0 |
attr |
InstructBlipVisionModel.embeddings |
1 | 0 | 0 |
attr |
InstructBlipVisionModel.encoder |
1 | 0 | 0 |
attr |
InstructBlipVisionModel.post_layernorm |
1 | 0 | 0 |
meth |
InstructBlipPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.instructblip.processing_instructblip (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InstructBlipProcessor.init |
6 | 0 | 0 |
prop |
InstructBlipProcessor.model_input_names |
1 | 0 | 0 |
attr |
InstructBlipProcessor.num_query_tokens |
1 | 0 | 0 |
attr |
InstructBlipProcessor.image_token |
1 | 0 | 0 |
transformers.models.instructblipvideo.configuration_instructblipvideo (69 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InstructBlipVideoConfig.init |
7 | 0 | 0 |
attr |
InstructBlipVideoConfig.text_config |
1 | 0 | 0 |
attr |
InstructBlipVideoConfig.vision_config |
1 | 0 | 0 |
attr |
InstructBlipVideoConfig.qformer_config |
1 | 0 | 0 |
attr |
InstructBlipVideoConfig.num_query_tokens |
1 | 0 | 0 |
attr |
InstructBlipVideoConfig.video_token_index |
1 | 0 | 0 |
attr |
InstructBlipVideoConfig.use_decoder_only_language_model |
1 | 0 | 0 |
attr |
InstructBlipVideoConfig.initializer_factor |
1 | 0 | 0 |
attr |
InstructBlipVideoConfig.initializer_range |
1 | 0 | 0 |
meth |
InstructBlipVideoVisionConfig.init |
13 | 0 | 0 |
attr |
InstructBlipVideoVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
InstructBlipVideoVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
InstructBlipVideoVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
InstructBlipVideoVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
InstructBlipVideoVisionConfig.patch_size |
1 | 0 | 0 |
attr |
InstructBlipVideoVisionConfig.image_size |
1 | 0 | 0 |
attr |
InstructBlipVideoVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
InstructBlipVideoVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
InstructBlipVideoVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
InstructBlipVideoVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
InstructBlipVideoVisionConfig.qkv_bias |
1 | 0 | 0 |
meth |
InstructBlipVideoQFormerConfig.init |
16 | 0 | 0 |
attr |
InstructBlipVideoQFormerConfig.pad_token_id |
1 | 0 | 0 |
attr |
InstructBlipVideoQFormerConfig.vocab_size |
1 | 0 | 0 |
attr |
InstructBlipVideoQFormerConfig.hidden_size |
1 | 0 | 0 |
attr |
InstructBlipVideoQFormerConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
InstructBlipVideoQFormerConfig.num_attention_heads |
1 | 0 | 0 |
attr |
InstructBlipVideoQFormerConfig.hidden_act |
1 | 0 | 0 |
attr |
InstructBlipVideoQFormerConfig.intermediate_size |
1 | 0 | 0 |
attr |
InstructBlipVideoQFormerConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
InstructBlipVideoQFormerConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
InstructBlipVideoQFormerConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
InstructBlipVideoQFormerConfig.initializer_range |
1 | 0 | 0 |
attr |
InstructBlipVideoQFormerConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
InstructBlipVideoQFormerConfig.cross_attention_frequency |
1 | 0 | 0 |
attr |
InstructBlipVideoQFormerConfig.encoder_hidden_size |
1 | 0 | 0 |
transformers.models.instructblipvideo.modeling_instructblipvideo (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InstructBlipVideoQFormerModel.init |
2 | 1 | 0 |
meth |
InstructBlipVideoQFormerModel.get_input_embeddings |
1 | 0 | 0 |
meth |
InstructBlipVideoQFormerModel.set_input_embeddings |
2 | 0 | 0 |
attr |
InstructBlipVideoQFormerModel._can_record_outputs |
1 | 0 | 0 |
attr |
InstructBlipVideoQFormerModel.embeddings |
1 | 0 | 0 |
attr |
InstructBlipVideoQFormerModel.encoder |
1 | 0 | 0 |
meth |
InstructBlipVideoModel.init |
2 | 1 | 0 |
meth |
InstructBlipVideoModel.get_input_embeddings |
1 | 0 | 0 |
meth |
InstructBlipVideoModel.set_input_embeddings |
2 | 0 | 0 |
meth |
InstructBlipVideoModel._preprocess_accelerate |
1 | 0 | 0 |
meth |
InstructBlipVideoModel.get_placeholder_mask |
3 | 2 | 0 |
attr |
InstructBlipVideoModel.vision_model |
1 | 0 | 0 |
attr |
InstructBlipVideoModel.query_tokens |
1 | 0 | 0 |
attr |
InstructBlipVideoModel.qformer |
1 | 0 | 0 |
attr |
InstructBlipVideoModel.language_projection |
1 | 0 | 0 |
attr |
InstructBlipVideoModel.language_model |
1 | 0 | 0 |
meth |
InstructBlipVideoPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
InstructBlipVideoForConditionalGeneration.init |
2 | 1 | 0 |
meth |
InstructBlipVideoForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
InstructBlipVideoForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
InstructBlipVideoForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
InstructBlipVideoForConditionalGeneration.get_encoder |
2 | 0 | 0 |
meth |
InstructBlipVideoForConditionalGeneration.get_decoder |
1 | 0 | 0 |
meth |
InstructBlipVideoForConditionalGeneration._preprocess_accelerate |
1 | 0 | 0 |
meth |
InstructBlipVideoForConditionalGeneration.get_placeholder_mask |
3 | 2 | 0 |
meth |
InstructBlipVideoForConditionalGeneration.generate |
9 | 8 | 0 |
attr |
InstructBlipVideoForConditionalGeneration.vision_model |
1 | 0 | 0 |
attr |
InstructBlipVideoForConditionalGeneration.query_tokens |
1 | 0 | 0 |
attr |
InstructBlipVideoForConditionalGeneration.qformer |
1 | 0 | 0 |
attr |
InstructBlipVideoForConditionalGeneration.language_projection |
1 | 0 | 0 |
attr |
InstructBlipVideoForConditionalGeneration.language_model |
1 | 0 | 0 |
meth |
InstructBlipVideoVisionModel.init |
2 | 1 | 0 |
meth |
InstructBlipVideoVisionModel.get_input_embeddings |
1 | 0 | 0 |
attr |
InstructBlipVideoVisionModel.embeddings |
1 | 0 | 0 |
attr |
InstructBlipVideoVisionModel.encoder |
1 | 0 | 0 |
attr |
InstructBlipVideoVisionModel.post_layernorm |
1 | 0 | 0 |
transformers.models.instructblipvideo.modular_instructblipvideo (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InstructBlipVideoConfig.init |
7 | 0 | 0 |
attr |
InstructBlipVideoConfig.text_config |
1 | 0 | 0 |
attr |
InstructBlipVideoConfig.vision_config |
1 | 0 | 0 |
attr |
InstructBlipVideoConfig.qformer_config |
1 | 0 | 0 |
attr |
InstructBlipVideoConfig.num_query_tokens |
1 | 0 | 0 |
attr |
InstructBlipVideoConfig.video_token_index |
1 | 0 | 0 |
attr |
InstructBlipVideoConfig.use_decoder_only_language_model |
1 | 0 | 0 |
attr |
InstructBlipVideoConfig.initializer_factor |
1 | 0 | 0 |
attr |
InstructBlipVideoConfig.initializer_range |
1 | 0 | 0 |
meth |
InstructBlipVideoForConditionalGeneration.get_image_features |
2 | 0 | 0 |
meth |
InstructBlipVideoForConditionalGeneration.get_placeholder_mask |
3 | 2 | 0 |
meth |
InstructBlipVideoForConditionalGeneration.generate |
9 | 8 | 0 |
transformers.models.instructblipvideo.processing_instructblipvideo (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InstructBlipVideoProcessor.init |
6 | 0 | 0 |
meth |
InstructBlipVideoProcessor.call |
18 | 17 | 0 |
prop |
InstructBlipVideoProcessor.model_input_names |
1 | 0 | 0 |
attr |
InstructBlipVideoProcessor.num_query_tokens |
1 | 0 | 0 |
attr |
InstructBlipVideoProcessor.video_token |
1 | 0 | 0 |
transformers.models.instructblipvideo.video_processing_instructblipvideo (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InstructBlipVideoVideoProcessor._preprocess |
15 | 14 | 0 |
transformers.models.internvl.configuration_internvl (62 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InternVLConfig.init |
11 | 0 | 0 |
attr |
InternVLConfig.image_token_id |
1 | 0 | 0 |
attr |
InternVLConfig.image_seq_length |
1 | 0 | 0 |
attr |
InternVLConfig.downsample_ratio |
1 | 0 | 0 |
attr |
InternVLConfig.projector_hidden_act |
1 | 0 | 0 |
attr |
InternVLConfig.vision_feature_layer |
1 | 0 | 0 |
attr |
InternVLConfig.vision_feature_select_strategy |
1 | 0 | 0 |
attr |
InternVLConfig.text_config |
1 | 0 | 0 |
attr |
InternVLConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
InternVLConfig.vision_config |
1 | 0 | 0 |
meth |
InternVLVisionConfig.init |
22 | 0 | 0 |
attr |
InternVLVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
InternVLVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
InternVLVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
InternVLVisionConfig.attention_bias |
1 | 0 | 0 |
attr |
InternVLVisionConfig.use_qk_norm |
1 | 0 | 0 |
attr |
InternVLVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
InternVLVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
InternVLVisionConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
InternVLVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
InternVLVisionConfig.projection_dropout |
1 | 0 | 0 |
attr |
InternVLVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
InternVLVisionConfig.norm_type |
1 | 0 | 0 |
attr |
InternVLVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
InternVLVisionConfig.image_size |
1 | 0 | 0 |
attr |
InternVLVisionConfig.patch_size |
1 | 0 | 0 |
attr |
InternVLVisionConfig.num_channels |
1 | 0 | 0 |
attr |
InternVLVisionConfig.use_mask_token |
1 | 0 | 0 |
attr |
InternVLVisionConfig.use_absolute_position_embeddings |
1 | 0 | 0 |
attr |
InternVLVisionConfig.layer_scale_init_value |
1 | 0 | 0 |
attr |
InternVLVisionConfig.use_mean_pooling |
1 | 0 | 0 |
transformers.models.internvl.modeling_internvl (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InternVLForConditionalGeneration.init |
2 | 1 | 0 |
meth |
InternVLForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
InternVLForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
InternVLForConditionalGeneration.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
InternVLForConditionalGeneration.model |
1 | 0 | 0 |
attr |
InternVLForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
InternVLModel.init |
2 | 1 | 0 |
meth |
InternVLModel.get_input_embeddings |
1 | 0 | 0 |
meth |
InternVLModel.set_input_embeddings |
2 | 0 | 0 |
meth |
InternVLModel.get_placeholder_mask |
4 | 3 | 0 |
meth |
InternVLModel.pixel_shuffle |
3 | 2 | 0 |
attr |
InternVLModel.vision_tower |
1 | 0 | 0 |
attr |
InternVLModel.multi_modal_projector |
1 | 0 | 0 |
attr |
InternVLModel.language_model |
1 | 0 | 0 |
meth |
InternVLVisionPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
InternVLVisionModel.get_input_embeddings |
1 | 0 | 0 |
meth |
InternVLVisionModel.forward |
4 | 3 | 0 |
attr |
InternVLVisionModel.embeddings |
1 | 0 | 0 |
attr |
InternVLVisionModel.encoder |
1 | 0 | 0 |
attr |
InternVLVisionModel.layernorm |
1 | 0 | 0 |
transformers.models.internvl.modular_internvl (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InternVLForConditionalGeneration.forward |
2 | 0 | 0 |
meth |
InternVLModel.pixel_shuffle |
3 | 2 | 0 |
meth |
InternVLVisionPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
InternVLVisionModel.get_input_embeddings |
1 | 0 | 0 |
meth |
InternVLVisionModel.forward |
4 | 3 | 0 |
attr |
InternVLVisionModel.embeddings |
1 | 0 | 0 |
attr |
InternVLVisionModel.encoder |
1 | 0 | 0 |
attr |
InternVLVisionModel.layernorm |
1 | 0 | 0 |
transformers.models.internvl.processing_internvl (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InternVLProcessor.init |
7 | 1 | 0 |
meth |
InternVLProcessor._insert_media_placeholders |
9 | 6 | 0 |
meth |
InternVLProcessor._get_num_multimodal_tokens |
3 | 0 | 0 |
prop |
InternVLProcessor.model_input_names |
1 | 0 | 0 |
attr |
InternVLProcessor.image_seq_length |
1 | 0 | 0 |
attr |
InternVLProcessor.start_image_token |
1 | 0 | 0 |
attr |
InternVLProcessor.end_image_token |
1 | 0 | 0 |
attr |
InternVLProcessor.start_image_token_id |
1 | 0 | 0 |
attr |
InternVLProcessor.end_image_token_id |
1 | 0 | 0 |
attr |
InternVLProcessor.image_token |
1 | 0 | 0 |
attr |
InternVLProcessor.video_token |
1 | 0 | 0 |
attr |
InternVLProcessor.image_token_id |
1 | 0 | 0 |
attr |
InternVLProcessor.image_ids |
1 | 0 | 0 |
transformers.models.internvl.video_processing_internvl (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
InternVLVideoProcessor.init |
2 | 1 | 0 |
meth |
InternVLVideoProcessor.sample_frames |
6 | 4 | 0 |
meth |
InternVLVideoProcessor._preprocess |
15 | 14 | 0 |
transformers.models.jais2.configuration_jais2 (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Jais2Config.init |
22 | 20 | 0 |
attr |
Jais2Config.vocab_size |
1 | 0 | 0 |
attr |
Jais2Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Jais2Config.hidden_size |
1 | 0 | 0 |
attr |
Jais2Config.intermediate_size |
1 | 0 | 0 |
attr |
Jais2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Jais2Config.num_attention_heads |
1 | 0 | 0 |
attr |
Jais2Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Jais2Config.hidden_act |
1 | 0 | 0 |
attr |
Jais2Config.initializer_range |
1 | 0 | 0 |
attr |
Jais2Config.use_cache |
1 | 0 | 0 |
attr |
Jais2Config.attention_bias |
1 | 0 | 0 |
attr |
Jais2Config.attention_dropout |
1 | 0 | 0 |
attr |
Jais2Config.mlp_bias |
1 | 0 | 0 |
attr |
Jais2Config.head_dim |
1 | 0 | 0 |
attr |
Jais2Config.rope_parameters |
1 | 0 | 0 |
attr |
Jais2Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Jais2Config.pad_token_id |
1 | 0 | 0 |
attr |
Jais2Config.bos_token_id |
1 | 0 | 0 |
attr |
Jais2Config.eos_token_id |
1 | 0 | 0 |
attr |
Jais2Config.layer_norm_eps |
1 | 0 | 0 |
transformers.models.jais2.modeling_jais2 (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Jais2Model.init |
2 | 1 | 0 |
attr |
Jais2Model.padding_idx |
1 | 0 | 0 |
attr |
Jais2Model.vocab_size |
1 | 0 | 0 |
attr |
Jais2Model.embed_tokens |
1 | 0 | 0 |
attr |
Jais2Model.layers |
1 | 0 | 0 |
attr |
Jais2Model.norm |
1 | 0 | 0 |
attr |
Jais2Model.rotary_emb |
1 | 0 | 0 |
attr |
Jais2Model.gradient_checkpointing |
1 | 0 | 0 |
meth |
Jais2ForCausalLM.init |
2 | 0 | 0 |
attr |
Jais2ForCausalLM.model |
1 | 0 | 0 |
attr |
Jais2ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Jais2ForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.jais2.modular_jais2 (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Jais2Model.init |
2 | 1 | 0 |
attr |
Jais2Model.norm |
1 | 0 | 0 |
meth |
Jais2ForCausalLM.forward |
2 | 0 | 0 |
meth |
Jais2Config.init |
22 | 20 | 0 |
attr |
Jais2Config.layer_norm_eps |
1 | 0 | 0 |
transformers.models.jamba.configuration_jamba (67 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
JambaConfig.init |
33 | 0 | 0 |
meth |
JambaConfig._check_supported_offset |
4 | 3 | 0 |
prop |
JambaConfig.layers_block_type |
1 | 0 | 0 |
prop |
JambaConfig.layers_num_experts |
1 | 0 | 0 |
attr |
JambaConfig.vocab_size |
1 | 0 | 0 |
attr |
JambaConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
JambaConfig.hidden_size |
1 | 0 | 0 |
attr |
JambaConfig.intermediate_size |
1 | 0 | 0 |
attr |
JambaConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
JambaConfig.num_attention_heads |
1 | 0 | 0 |
attr |
JambaConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
JambaConfig.attention_dropout |
1 | 0 | 0 |
attr |
JambaConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
JambaConfig.hidden_act |
1 | 0 | 0 |
attr |
JambaConfig.initializer_range |
1 | 0 | 0 |
attr |
JambaConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
JambaConfig.use_cache |
1 | 0 | 0 |
attr |
JambaConfig.output_router_logits |
1 | 0 | 0 |
attr |
JambaConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
JambaConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
JambaConfig.num_experts |
1 | 0 | 0 |
attr |
JambaConfig.expert_layer_period |
1 | 0 | 0 |
attr |
JambaConfig.expert_layer_offset |
1 | 0 | 0 |
attr |
JambaConfig.attn_layer_period |
1 | 0 | 0 |
attr |
JambaConfig.attn_layer_offset |
1 | 0 | 0 |
attr |
JambaConfig.use_mamba_kernels |
1 | 0 | 0 |
attr |
JambaConfig.mamba_d_state |
1 | 0 | 0 |
attr |
JambaConfig.mamba_d_conv |
1 | 0 | 0 |
attr |
JambaConfig.mamba_expand |
1 | 0 | 0 |
attr |
JambaConfig.mamba_dt_rank |
1 | 0 | 0 |
attr |
JambaConfig.mamba_conv_bias |
1 | 0 | 0 |
attr |
JambaConfig.mamba_proj_bias |
1 | 0 | 0 |
attr |
JambaConfig.pad_token_id |
1 | 0 | 0 |
attr |
JambaConfig.bos_token_id |
1 | 0 | 0 |
attr |
JambaConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.jamba.modeling_jamba (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
JambaForCausalLM.init |
2 | 1 | 0 |
attr |
JambaForCausalLM.model |
1 | 0 | 0 |
attr |
JambaForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
JambaForCausalLM.lm_head |
1 | 0 | 0 |
attr |
JambaForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
JambaForCausalLM.num_experts |
1 | 0 | 0 |
attr |
JambaForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
meth |
JambaPreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
JambaPreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
JambaModel.init |
2 | 1 | 0 |
meth |
JambaModel._update_mamba_mask |
3 | 0 | 0 |
attr |
JambaModel.padding_idx |
1 | 0 | 0 |
attr |
JambaModel.vocab_size |
1 | 0 | 0 |
attr |
JambaModel.embed_tokens |
1 | 0 | 0 |
attr |
JambaModel.layers |
1 | 0 | 0 |
attr |
JambaModel.final_layernorm |
1 | 0 | 0 |
attr |
JambaModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.jamba.modular_jamba (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
JambaForCausalLM.init |
2 | 1 | 0 |
attr |
JambaForCausalLM.num_experts |
1 | 0 | 0 |
meth |
JambaPreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
JambaPreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
JambaModel.init |
2 | 1 | 0 |
meth |
JambaModel._update_mamba_mask |
3 | 0 | 0 |
attr |
JambaModel.padding_idx |
1 | 0 | 0 |
attr |
JambaModel.vocab_size |
1 | 0 | 0 |
attr |
JambaModel.embed_tokens |
1 | 0 | 0 |
attr |
JambaModel.layers |
1 | 0 | 0 |
attr |
JambaModel.final_layernorm |
1 | 0 | 0 |
attr |
JambaModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.janus.configuration_janus (72 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
JanusConfig.init |
6 | 0 | 0 |
attr |
JanusConfig.initializer_range |
1 | 0 | 0 |
attr |
JanusConfig.image_token_id |
1 | 0 | 0 |
attr |
JanusConfig.text_config |
1 | 0 | 0 |
attr |
JanusConfig.vision_config |
1 | 0 | 0 |
attr |
JanusConfig.vq_config |
1 | 0 | 0 |
meth |
JanusVisionConfig.init |
20 | 0 | 0 |
attr |
JanusVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
JanusVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
JanusVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
JanusVisionConfig.num_channels |
1 | 0 | 0 |
attr |
JanusVisionConfig.patch_size |
1 | 0 | 0 |
attr |
JanusVisionConfig.image_size |
1 | 0 | 0 |
attr |
JanusVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
JanusVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
JanusVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
JanusVisionConfig.mlp_ratio |
1 | 0 | 0 |
attr |
JanusVisionConfig.attention_bias |
1 | 0 | 0 |
attr |
JanusVisionConfig.hidden_dropout_rate |
1 | 0 | 0 |
attr |
JanusVisionConfig.projection_dim |
1 | 0 | 0 |
attr |
JanusVisionConfig.projection_dropout |
1 | 0 | 0 |
attr |
JanusVisionConfig.use_qk_norm |
1 | 0 | 0 |
attr |
JanusVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
JanusVisionConfig.depth |
1 | 0 | 0 |
attr |
JanusVisionConfig.num_image_tokens |
1 | 0 | 0 |
meth |
JanusVQVAEConfig.init |
18 | 11 | 0 |
attr |
JanusVQVAEConfig.embed_dim |
1 | 0 | 0 |
attr |
JanusVQVAEConfig.num_embeddings |
1 | 0 | 0 |
attr |
JanusVQVAEConfig.double_latent |
1 | 0 | 0 |
attr |
JanusVQVAEConfig.latent_channels |
1 | 0 | 0 |
attr |
JanusVQVAEConfig.in_channels |
1 | 0 | 0 |
attr |
JanusVQVAEConfig.base_channels |
1 | 0 | 0 |
attr |
JanusVQVAEConfig.channel_multiplier |
1 | 0 | 0 |
attr |
JanusVQVAEConfig.num_res_blocks |
1 | 0 | 0 |
attr |
JanusVQVAEConfig.dropout |
1 | 0 | 0 |
attr |
JanusVQVAEConfig.initializer_range |
1 | 0 | 0 |
attr |
JanusVQVAEConfig.num_patches |
1 | 0 | 0 |
attr |
JanusVQVAEConfig.out_channels |
1 | 0 | 0 |
attr |
JanusVQVAEConfig.projection_dim |
1 | 0 | 0 |
attr |
JanusVQVAEConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
JanusVQVAEConfig.hidden_act |
1 | 0 | 0 |
attr |
JanusVQVAEConfig.image_token_embed_dim |
1 | 0 | 0 |
transformers.models.janus.image_processing_janus (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
JanusImageProcessor.init |
13 | 12 | 0 |
meth |
JanusImageProcessor.resize |
7 | 6 | 0 |
meth |
JanusImageProcessor.postprocess |
9 | 8 | 0 |
attr |
JanusImageProcessor.do_resize |
1 | 0 | 0 |
attr |
JanusImageProcessor.size |
1 | 0 | 0 |
attr |
JanusImageProcessor.resample |
1 | 0 | 0 |
attr |
JanusImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
JanusImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
JanusImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
JanusImageProcessor.image_mean |
1 | 0 | 0 |
attr |
JanusImageProcessor.image_std |
1 | 0 | 0 |
attr |
JanusImageProcessor.do_convert_rgb |
1 | 0 | 0 |
attr |
JanusImageProcessor.do_pad |
1 | 0 | 0 |
attr |
JanusImageProcessor.min_size |
1 | 0 | 0 |
attr |
JanusImageProcessor.background_color |
1 | 0 | 0 |
transformers.models.janus.image_processing_janus_fast (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
JanusImageProcessorFast.init |
2 | 1 | 0 |
meth |
JanusImageProcessorFast.resize |
7 | 6 | 0 |
meth |
JanusImageProcessorFast._preprocess |
15 | 14 | 0 |
attr |
JanusImageProcessorFast.background_color |
1 | 0 | 0 |
transformers.models.janus.modeling_janus (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
JanusForConditionalGeneration.init |
2 | 1 | 0 |
meth |
JanusForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
JanusForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
JanusForConditionalGeneration.prepare_inputs_for_generation |
10 | 0 | 0 |
meth |
JanusForConditionalGeneration.decode_image_tokens |
2 | 1 | 0 |
meth |
JanusForConditionalGeneration.generate |
5 | 3 | 0 |
attr |
JanusForConditionalGeneration.model |
1 | 0 | 0 |
attr |
JanusForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
JanusPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
JanusVisionModel.init |
2 | 1 | 0 |
meth |
JanusVisionModel.get_input_embeddings |
1 | 0 | 0 |
attr |
JanusVisionModel.embeddings |
1 | 0 | 0 |
attr |
JanusVisionModel.encoder |
1 | 0 | 0 |
attr |
JanusVisionModel.post_layernorm |
1 | 0 | 0 |
meth |
JanusModel.init |
2 | 1 | 0 |
meth |
JanusModel.get_input_embeddings |
1 | 0 | 0 |
meth |
JanusModel.set_input_embeddings |
2 | 0 | 0 |
meth |
JanusModel.get_placeholder_mask |
4 | 3 | 0 |
meth |
JanusModel.forward |
11 | 10 | 0 |
attr |
JanusModel.vision_model |
1 | 0 | 0 |
attr |
JanusModel.aligner |
1 | 0 | 0 |
attr |
JanusModel.vqmodel |
1 | 0 | 0 |
attr |
JanusModel.generation_embeddings |
1 | 0 | 0 |
attr |
JanusModel.generation_aligner |
1 | 0 | 0 |
attr |
JanusModel.generation_head |
1 | 0 | 0 |
attr |
JanusModel.language_model |
1 | 0 | 0 |
attr |
JanusModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
JanusVQVAE.init |
2 | 1 | 0 |
meth |
JanusVQVAE.forward |
3 | 2 | 0 |
attr |
JanusVQVAE.encoder |
1 | 0 | 0 |
attr |
JanusVQVAE.quantize |
1 | 0 | 0 |
attr |
JanusVQVAE.quant_conv |
1 | 0 | 0 |
attr |
JanusVQVAE.post_quant_conv |
1 | 0 | 0 |
attr |
JanusVQVAE.decoder |
1 | 0 | 0 |
attr |
JanusVQVAE.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.janus.modular_janus (101 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
JanusVisionConfig.init |
20 | 0 | 0 |
attr |
JanusVisionConfig.mlp_ratio |
1 | 0 | 0 |
attr |
JanusVisionConfig.attention_bias |
1 | 0 | 0 |
attr |
JanusVisionConfig.hidden_dropout_rate |
1 | 0 | 0 |
attr |
JanusVisionConfig.projection_dim |
1 | 0 | 0 |
attr |
JanusVisionConfig.projection_dropout |
1 | 0 | 0 |
attr |
JanusVisionConfig.use_qk_norm |
1 | 0 | 0 |
attr |
JanusVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
JanusVisionConfig.depth |
1 | 0 | 0 |
attr |
JanusVisionConfig.num_image_tokens |
1 | 0 | 0 |
meth |
JanusVQVAEConfig.init |
18 | 11 | 0 |
attr |
JanusVQVAEConfig.num_patches |
1 | 0 | 0 |
attr |
JanusVQVAEConfig.out_channels |
1 | 0 | 0 |
attr |
JanusVQVAEConfig.projection_dim |
1 | 0 | 0 |
attr |
JanusVQVAEConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
JanusVQVAEConfig.hidden_act |
1 | 0 | 0 |
attr |
JanusVQVAEConfig.image_token_embed_dim |
1 | 0 | 0 |
meth |
JanusConfig.init |
6 | 0 | 0 |
attr |
JanusConfig.initializer_range |
1 | 0 | 0 |
attr |
JanusConfig.image_token_id |
1 | 0 | 0 |
attr |
JanusConfig.text_config |
1 | 0 | 0 |
attr |
JanusConfig.vision_config |
1 | 0 | 0 |
attr |
JanusConfig.vq_config |
1 | 0 | 0 |
meth |
JanusForConditionalGeneration.init |
2 | 1 | 0 |
meth |
JanusForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
JanusForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
JanusForConditionalGeneration.prepare_inputs_for_generation |
10 | 0 | 0 |
meth |
JanusForConditionalGeneration.decode_image_tokens |
2 | 1 | 0 |
meth |
JanusForConditionalGeneration.generate |
5 | 3 | 0 |
attr |
JanusForConditionalGeneration.model |
1 | 0 | 0 |
attr |
JanusForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
JanusModel.init |
2 | 1 | 0 |
meth |
JanusModel.get_input_embeddings |
1 | 0 | 0 |
meth |
JanusModel.set_input_embeddings |
2 | 0 | 0 |
meth |
JanusModel.get_placeholder_mask |
4 | 3 | 0 |
meth |
JanusModel.forward |
11 | 10 | 0 |
attr |
JanusModel.vision_model |
1 | 0 | 0 |
attr |
JanusModel.aligner |
1 | 0 | 0 |
attr |
JanusModel.vqmodel |
1 | 0 | 0 |
attr |
JanusModel.generation_embeddings |
1 | 0 | 0 |
attr |
JanusModel.generation_aligner |
1 | 0 | 0 |
attr |
JanusModel.generation_head |
1 | 0 | 0 |
attr |
JanusModel.language_model |
1 | 0 | 0 |
attr |
JanusModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
JanusVQVAE.init |
2 | 1 | 0 |
meth |
JanusVQVAE.forward |
3 | 2 | 0 |
attr |
JanusVQVAE.decoder |
1 | 0 | 0 |
attr |
JanusVQVAE.gradient_checkpointing |
1 | 0 | 0 |
meth |
JanusPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
JanusImageProcessor.init |
13 | 11 | 0 |
meth |
JanusImageProcessor.resize |
7 | 6 | 0 |
meth |
JanusImageProcessor.postprocess |
9 | 8 | 0 |
attr |
JanusImageProcessor.do_pad |
1 | 0 | 0 |
attr |
JanusImageProcessor.min_size |
1 | 0 | 0 |
attr |
JanusImageProcessor.background_color |
1 | 0 | 0 |
meth |
JanusVisionModel.init |
2 | 1 | 0 |
attr |
JanusVisionModel.encoder |
1 | 0 | 0 |
transformers.models.janus.processing_janus (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
JanusProcessor.init |
6 | 0 | 0 |
meth |
JanusProcessor.postprocess |
3 | 1 | 0 |
meth |
JanusProcessor.post_process_multimodal_output |
5 | 0 | 0 |
attr |
JanusProcessor.num_image_tokens |
1 | 0 | 0 |
attr |
JanusProcessor.image_token |
1 | 0 | 0 |
attr |
JanusProcessor.image_start_token |
1 | 0 | 0 |
attr |
JanusProcessor.image_end_token |
1 | 0 | 0 |
attr |
JanusProcessor.use_default_system_prompt |
1 | 0 | 0 |
transformers.models.jetmoe.configuration_jetmoe (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
JetMoeConfig.init |
23 | 21 | 0 |
attr |
JetMoeConfig.vocab_size |
1 | 0 | 0 |
attr |
JetMoeConfig.hidden_size |
1 | 0 | 0 |
attr |
JetMoeConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
JetMoeConfig.num_attention_heads |
1 | 0 | 0 |
attr |
JetMoeConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
JetMoeConfig.kv_channels |
1 | 0 | 0 |
attr |
JetMoeConfig.intermediate_size |
1 | 0 | 0 |
attr |
JetMoeConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
JetMoeConfig.activation_function |
1 | 0 | 0 |
attr |
JetMoeConfig.num_local_experts |
1 | 0 | 0 |
attr |
JetMoeConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
JetMoeConfig.output_router_logits |
1 | 0 | 0 |
attr |
JetMoeConfig.aux_loss_coef |
1 | 0 | 0 |
attr |
JetMoeConfig.use_cache |
1 | 0 | 0 |
attr |
JetMoeConfig.initializer_range |
1 | 0 | 0 |
attr |
JetMoeConfig.attention_dropout |
1 | 0 | 0 |
attr |
JetMoeConfig.bos_token_id |
1 | 0 | 0 |
attr |
JetMoeConfig.eos_token_id |
1 | 0 | 0 |
attr |
JetMoeConfig.pad_token_id |
1 | 0 | 0 |
attr |
JetMoeConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
JetMoeConfig.rope_parameters |
1 | 0 | 0 |
attr |
JetMoeConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.jetmoe.modeling_jetmoe (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
JetMoeForCausalLM.init |
2 | 0 | 0 |
meth |
JetMoeForCausalLM.forward |
12 | 11 | 0 |
attr |
JetMoeForCausalLM.model |
1 | 0 | 0 |
attr |
JetMoeForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
JetMoeForCausalLM.aux_loss_coef |
1 | 0 | 0 |
attr |
JetMoeForCausalLM.lm_head |
1 | 0 | 0 |
attr |
JetMoeForCausalLM.tie_word_embeddings |
1 | 0 | 0 |
attr |
JetMoeForCausalLM.num_experts |
1 | 0 | 0 |
attr |
JetMoeForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
meth |
JetMoeModel.init |
2 | 1 | 0 |
attr |
JetMoeModel.padding_idx |
1 | 0 | 0 |
attr |
JetMoeModel.vocab_size |
1 | 0 | 0 |
attr |
JetMoeModel.embed_tokens |
1 | 0 | 0 |
attr |
JetMoeModel.layers |
1 | 0 | 0 |
attr |
JetMoeModel.norm |
1 | 0 | 0 |
attr |
JetMoeModel.rotary_emb |
1 | 0 | 0 |
attr |
JetMoeModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
JetMoePreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
JetMoePreTrainedModel._can_record_outputs |
1 | 0 | 0 |
transformers.models.jetmoe.modular_jetmoe (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
JetMoeForCausalLM.init |
2 | 0 | 0 |
meth |
JetMoeForCausalLM.forward |
12 | 11 | 0 |
attr |
JetMoeForCausalLM.model |
1 | 0 | 0 |
attr |
JetMoeForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
JetMoeForCausalLM.aux_loss_coef |
1 | 0 | 0 |
attr |
JetMoeForCausalLM.lm_head |
1 | 0 | 0 |
attr |
JetMoeForCausalLM.tie_word_embeddings |
1 | 0 | 0 |
attr |
JetMoeForCausalLM.num_experts |
1 | 0 | 0 |
attr |
JetMoeForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
meth |
JetMoeModel.init |
2 | 1 | 0 |
attr |
JetMoeModel.padding_idx |
1 | 0 | 0 |
attr |
JetMoeModel.vocab_size |
1 | 0 | 0 |
attr |
JetMoeModel.embed_tokens |
1 | 0 | 0 |
attr |
JetMoeModel.layers |
1 | 0 | 0 |
attr |
JetMoeModel.norm |
1 | 0 | 0 |
meth |
JetMoePreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
JetMoePreTrainedModel._can_record_outputs |
1 | 0 | 0 |
transformers.models.kosmos2.configuration_kosmos2 (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Kosmos2Config.init |
6 | 0 | 0 |
attr |
Kosmos2Config.text_config |
1 | 0 | 0 |
attr |
Kosmos2Config.vision_config |
1 | 0 | 0 |
attr |
Kosmos2Config.latent_query_num |
1 | 0 | 0 |
attr |
Kosmos2Config.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.kosmos2.modeling_kosmos2 (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Kosmos2Model.init |
2 | 1 | 0 |
meth |
Kosmos2Model.set_input_embeddings |
2 | 0 | 0 |
attr |
Kosmos2Model.text_model |
1 | 0 | 0 |
attr |
Kosmos2Model.vision_model |
1 | 0 | 0 |
attr |
Kosmos2Model.image_to_text_projection |
1 | 0 | 0 |
meth |
Kosmos2PreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
Kosmos2ForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Kosmos2ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Kosmos2ForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
Kosmos2ForConditionalGeneration.generate |
8 | 6 | 0 |
attr |
Kosmos2ForConditionalGeneration.text_model |
1 | 0 | 0 |
attr |
Kosmos2ForConditionalGeneration.vision_model |
1 | 0 | 0 |
attr |
Kosmos2ForConditionalGeneration.image_to_text_projection |
1 | 0 | 0 |
transformers.models.kosmos2.processing_kosmos2 (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Kosmos2Processor.init |
5 | 0 | 0 |
meth |
Kosmos2Processor._check_bboxes_for_single_text |
2 | 0 | 0 |
meth |
Kosmos2Processor._preprocess_single_example |
5 | 0 | 0 |
meth |
Kosmos2Processor.post_process_generation |
3 | 0 | 0 |
meth |
Kosmos2Processor.post_process_image_text_to_text |
4 | 0 | 0 |
prop |
Kosmos2Processor.model_input_names |
1 | 0 | 0 |
attr |
Kosmos2Processor.eod_token |
1 | 0 | 0 |
attr |
Kosmos2Processor.boi_token |
1 | 0 | 0 |
attr |
Kosmos2Processor.eoi_token |
1 | 0 | 0 |
attr |
Kosmos2Processor.eoc_token |
1 | 0 | 0 |
attr |
Kosmos2Processor.eol_token |
1 | 0 | 0 |
attr |
Kosmos2Processor.bop_token |
1 | 0 | 0 |
attr |
Kosmos2Processor.eop_token |
1 | 0 | 0 |
attr |
Kosmos2Processor.boo_token |
1 | 0 | 0 |
attr |
Kosmos2Processor.eoo_token |
1 | 0 | 0 |
attr |
Kosmos2Processor.dom_token |
1 | 0 | 0 |
attr |
Kosmos2Processor.grd_token |
1 | 0 | 0 |
attr |
Kosmos2Processor.tag_tokens |
1 | 0 | 0 |
attr |
Kosmos2Processor.num_patch_index_tokens |
1 | 0 | 0 |
transformers.models.kosmos2_5.configuration_kosmos2_5 (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Kosmos2_5Config.init |
6 | 0 | 0 |
attr |
Kosmos2_5Config.text_config |
1 | 0 | 0 |
attr |
Kosmos2_5Config.vision_config |
1 | 0 | 0 |
attr |
Kosmos2_5Config.latent_query_num |
1 | 0 | 0 |
attr |
Kosmos2_5Config.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.kosmos2_5.image_processing_kosmos2_5 (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Kosmos2_5ImageProcessor.init |
6 | 5 | 0 |
meth |
Kosmos2_5ImageProcessor.extract_flattened_patches |
6 | 5 | 0 |
meth |
Kosmos2_5ImageProcessor.normalize |
5 | 4 | 0 |
meth |
Kosmos2_5ImageProcessor.preprocess |
10 | 9 | 0 |
attr |
Kosmos2_5ImageProcessor.patch_size |
1 | 0 | 0 |
attr |
Kosmos2_5ImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
Kosmos2_5ImageProcessor.do_convert_rgb |
1 | 0 | 0 |
attr |
Kosmos2_5ImageProcessor.max_patches |
1 | 0 | 0 |
transformers.models.kosmos2_5.image_processing_kosmos2_5_fast (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Kosmos2_5ImageProcessorFast.init |
2 | 1 | 0 |
meth |
Kosmos2_5ImageProcessorFast.normalize |
3 | 2 | 0 |
meth |
Kosmos2_5ImageProcessorFast._preprocess |
8 | 7 | 0 |
transformers.models.kosmos2_5.modeling_kosmos2_5 (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Kosmos2_5Model.init |
2 | 1 | 0 |
meth |
Kosmos2_5Model.set_input_embeddings |
2 | 0 | 0 |
attr |
Kosmos2_5Model.text_model |
1 | 0 | 0 |
attr |
Kosmos2_5Model.vision_model |
1 | 0 | 0 |
attr |
Kosmos2_5Model.image_to_text_projection |
1 | 0 | 0 |
meth |
Kosmos2_5ForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Kosmos2_5ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Kosmos2_5ForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
Kosmos2_5ForConditionalGeneration.prepare_inputs_for_generation |
12 | 0 | 0 |
attr |
Kosmos2_5ForConditionalGeneration.text_model |
1 | 0 | 0 |
attr |
Kosmos2_5ForConditionalGeneration.vision_model |
1 | 0 | 0 |
attr |
Kosmos2_5ForConditionalGeneration.image_to_text_projection |
1 | 0 | 0 |
meth |
Kosmos2_5PreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.kosmos2_5.processing_kosmos2_5 (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Kosmos2_5Processor.init |
4 | 1 | 0 |
meth |
Kosmos2_5Processor.batch_decode |
3 | 0 | 0 |
meth |
Kosmos2_5Processor.decode |
3 | 0 | 0 |
prop |
Kosmos2_5Processor.model_input_names |
1 | 0 | 0 |
attr |
Kosmos2_5Processor.image_start_token |
1 | 0 | 0 |
attr |
Kosmos2_5Processor.image_end_token |
1 | 0 | 0 |
attr |
Kosmos2_5Processor.image_token |
1 | 0 | 0 |
attr |
Kosmos2_5Processor.num_image_tokens |
1 | 0 | 0 |
transformers.models.kyutai_speech_to_text.configuration_kyutai_speech_to_text (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KyutaiSpeechToTextConfig.init |
26 | 24 | 0 |
attr |
KyutaiSpeechToTextConfig.num_codebooks |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.frame_size |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.audio_bos_token_id |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.audio_pad_token_id |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.codebook_vocab_size |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.vocab_size |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.hidden_size |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.ffn_dim |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.hidden_act |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.initializer_range |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.use_cache |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.attention_dropout |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.head_dim |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.sliding_window |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextConfig.codec_config |
1 | 0 | 0 |
transformers.models.kyutai_speech_to_text.feature_extraction_kyutai_speech_to_text (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KyutaiSpeechToTextFeatureExtractor.init |
9 | 7 | 0 |
attr |
KyutaiSpeechToTextFeatureExtractor.chunk_length_s |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextFeatureExtractor.overlap |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextFeatureExtractor.audio_delay_seconds |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextFeatureExtractor.audio_silence_prefix_seconds |
1 | 0 | 0 |
transformers.models.kyutai_speech_to_text.modeling_kyutai_speech_to_text (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KyutaiSpeechToTextPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
KyutaiSpeechToTextModel.init |
2 | 0 | 0 |
meth |
KyutaiSpeechToTextModel.forward |
12 | 11 | 0 |
attr |
KyutaiSpeechToTextModel.padding_idx |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextModel.vocab_size |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextModel.embed_tokens |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextModel.layers |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextModel.norm |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
KyutaiSpeechToTextForConditionalGeneration.init |
2 | 0 | 0 |
meth |
KyutaiSpeechToTextForConditionalGeneration._prepare_generation_config |
3 | 0 | 0 |
meth |
KyutaiSpeechToTextForConditionalGeneration.prepare_inputs_for_generation |
10 | 7 | 0 |
meth |
KyutaiSpeechToTextForConditionalGeneration.from_pretrained |
3 | 0 | 0 |
meth |
KyutaiSpeechToTextForConditionalGeneration.save_pretrained |
3 | 0 | 0 |
meth |
KyutaiSpeechToTextForConditionalGeneration.generate |
3 | 0 | 0 |
attr |
KyutaiSpeechToTextForConditionalGeneration.model |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextForConditionalGeneration.codec_model |
1 | 0 | 0 |
transformers.models.kyutai_speech_to_text.modular_kyutai_speech_to_text (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KyutaiSpeechToTextPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
KyutaiSpeechToTextModel.init |
2 | 0 | 0 |
attr |
KyutaiSpeechToTextModel.embed_tokens |
1 | 0 | 0 |
meth |
KyutaiSpeechToTextForConditionalGeneration.init |
2 | 0 | 0 |
meth |
KyutaiSpeechToTextForConditionalGeneration.forward |
2 | 0 | 0 |
meth |
KyutaiSpeechToTextForConditionalGeneration._prepare_generation_config |
3 | 0 | 0 |
meth |
KyutaiSpeechToTextForConditionalGeneration.prepare_inputs_for_generation |
10 | 7 | 0 |
meth |
KyutaiSpeechToTextForConditionalGeneration.from_pretrained |
3 | 0 | 0 |
meth |
KyutaiSpeechToTextForConditionalGeneration.save_pretrained |
3 | 0 | 0 |
meth |
KyutaiSpeechToTextForConditionalGeneration.generate |
3 | 0 | 0 |
attr |
KyutaiSpeechToTextForConditionalGeneration.codec_model |
1 | 0 | 0 |
meth |
KyutaiSpeechToTextFeatureExtractor.init |
4 | 2 | 0 |
attr |
KyutaiSpeechToTextFeatureExtractor.audio_delay_seconds |
1 | 0 | 0 |
attr |
KyutaiSpeechToTextFeatureExtractor.audio_silence_prefix_seconds |
1 | 0 | 0 |
transformers.models.kyutai_speech_to_text.processing_kyutai_speech_to_text (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KyutaiSpeechToTextProcessor.init |
3 | 0 | 0 |
transformers.models.lasr.configuration_lasr (66 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LasrCTCConfig.init |
7 | 1 | 0 |
meth |
LasrCTCConfig.from_encoder_config |
3 | 1 | 0 |
prop |
LasrCTCConfig.inputs_to_logits_ratio |
1 | 0 | 0 |
attr |
LasrCTCConfig.vocab_size |
1 | 0 | 0 |
attr |
LasrCTCConfig.ctc_loss_reduction |
1 | 0 | 0 |
attr |
LasrCTCConfig.ctc_zero_infinity |
1 | 0 | 0 |
attr |
LasrCTCConfig.encoder_config |
1 | 0 | 0 |
attr |
LasrCTCConfig.initializer_range |
1 | 0 | 0 |
attr |
LasrCTCConfig.pad_token_id |
1 | 0 | 0 |
meth |
LasrEncoderConfig.init |
26 | 0 | 0 |
attr |
LasrEncoderConfig.rope_parameters |
1 | 0 | 0 |
attr |
LasrEncoderConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
LasrEncoderConfig.feed_forward_residual_weights |
1 | 0 | 0 |
attr |
LasrEncoderConfig.conv_residual_weights |
1 | 0 | 0 |
attr |
LasrEncoderConfig.batch_norm_momentum |
1 | 0 | 0 |
attr |
LasrEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
LasrEncoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
LasrEncoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
LasrEncoderConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
LasrEncoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
LasrEncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
LasrEncoderConfig.attention_bias |
1 | 0 | 0 |
attr |
LasrEncoderConfig.convolution_bias |
1 | 0 | 0 |
attr |
LasrEncoderConfig.conv_kernel_size |
1 | 0 | 0 |
attr |
LasrEncoderConfig.subsampling_conv_kernel_size |
1 | 0 | 0 |
attr |
LasrEncoderConfig.subsampling_conv_stride |
1 | 0 | 0 |
attr |
LasrEncoderConfig.subsampling_conv_channels |
1 | 0 | 0 |
attr |
LasrEncoderConfig.num_mel_bins |
1 | 0 | 0 |
attr |
LasrEncoderConfig.dropout |
1 | 0 | 0 |
attr |
LasrEncoderConfig.dropout_positions |
1 | 0 | 0 |
attr |
LasrEncoderConfig.layerdrop |
1 | 0 | 0 |
attr |
LasrEncoderConfig.activation_dropout |
1 | 0 | 0 |
attr |
LasrEncoderConfig.attention_dropout |
1 | 0 | 0 |
attr |
LasrEncoderConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
LasrEncoderConfig.initializer_range |
1 | 0 | 0 |
transformers.models.lasr.feature_extraction_lasr (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LasrFeatureExtractor.init |
8 | 0 | 0 |
meth |
LasrFeatureExtractor._torch_extract_fbank_features |
3 | 0 | 0 |
meth |
LasrFeatureExtractor.call |
13 | 12 | 0 |
attr |
LasrFeatureExtractor.hop_length |
1 | 0 | 0 |
attr |
LasrFeatureExtractor.n_fft |
1 | 0 | 0 |
attr |
LasrFeatureExtractor.win_length |
1 | 0 | 0 |
attr |
LasrFeatureExtractor.mel_filters |
1 | 0 | 0 |
transformers.models.lasr.modeling_lasr (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LasrPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
LasrPreTrainedModel._get_subsampling_output_length |
2 | 1 | 0 |
meth |
LasrPreTrainedModel._get_output_attention_mask |
3 | 2 | 0 |
meth |
LasrEncoder.init |
2 | 1 | 0 |
attr |
LasrEncoder.gradient_checkpointing |
1 | 0 | 0 |
attr |
LasrEncoder.dropout |
1 | 0 | 0 |
attr |
LasrEncoder.dropout_positions |
1 | 0 | 0 |
attr |
LasrEncoder.layerdrop |
1 | 0 | 0 |
attr |
LasrEncoder.subsampler |
1 | 0 | 0 |
attr |
LasrEncoder.rotary_emb |
1 | 0 | 0 |
attr |
LasrEncoder.layers |
1 | 0 | 0 |
attr |
LasrEncoder.out_norm |
1 | 0 | 0 |
meth |
LasrForCTC.init |
2 | 1 | 0 |
attr |
LasrForCTC.encoder |
1 | 0 | 0 |
attr |
LasrForCTC.ctc_head |
1 | 0 | 0 |
transformers.models.lasr.modular_lasr (63 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LasrCTCConfig.init |
7 | 1 | 0 |
prop |
LasrCTCConfig.inputs_to_logits_ratio |
1 | 0 | 0 |
meth |
LasrPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
LasrPreTrainedModel._get_subsampling_output_length |
2 | 1 | 0 |
meth |
LasrEncoder.init |
2 | 1 | 0 |
attr |
LasrEncoder.gradient_checkpointing |
1 | 0 | 0 |
attr |
LasrEncoder.dropout |
1 | 0 | 0 |
attr |
LasrEncoder.dropout_positions |
1 | 0 | 0 |
attr |
LasrEncoder.layerdrop |
1 | 0 | 0 |
attr |
LasrEncoder.subsampler |
1 | 0 | 0 |
attr |
LasrEncoder.rotary_emb |
1 | 0 | 0 |
attr |
LasrEncoder.layers |
1 | 0 | 0 |
attr |
LasrEncoder.out_norm |
1 | 0 | 0 |
meth |
LasrEncoderConfig.init |
26 | 0 | 0 |
attr |
LasrEncoderConfig.rope_parameters |
1 | 0 | 0 |
attr |
LasrEncoderConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
LasrEncoderConfig.feed_forward_residual_weights |
1 | 0 | 0 |
attr |
LasrEncoderConfig.conv_residual_weights |
1 | 0 | 0 |
attr |
LasrEncoderConfig.batch_norm_momentum |
1 | 0 | 0 |
meth |
LasrForCTC.generate |
2 | 0 | 0 |
meth |
LasrTokenizer.init |
10 | 0 | 0 |
meth |
LasrTokenizer._decode |
6 | 5 | 0 |
transformers.models.lasr.processing_lasr (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LasrProcessor.init |
3 | 0 | 0 |
meth |
LasrProcessor.call |
5 | 4 | 0 |
prop |
LasrProcessor.model_input_names |
1 | 0 | 0 |
transformers.models.lasr.tokenization_lasr (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LasrTokenizer.init |
10 | 0 | 0 |
meth |
LasrTokenizer.get_sentinel_tokens |
1 | 0 | 0 |
meth |
LasrTokenizer.get_sentinel_token_ids |
1 | 0 | 0 |
meth |
LasrTokenizer._decode |
6 | 5 | 0 |
transformers.models.layoutlm.configuration_layoutlm (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayoutLMConfig.init |
20 | 0 | 0 |
attr |
LayoutLMConfig.pad_token_id |
1 | 0 | 0 |
attr |
LayoutLMConfig.eos_token_id |
1 | 0 | 0 |
attr |
LayoutLMConfig.bos_token_id |
1 | 0 | 0 |
attr |
LayoutLMConfig.vocab_size |
1 | 0 | 0 |
attr |
LayoutLMConfig.hidden_size |
1 | 0 | 0 |
attr |
LayoutLMConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
LayoutLMConfig.num_attention_heads |
1 | 0 | 0 |
attr |
LayoutLMConfig.hidden_act |
1 | 0 | 0 |
attr |
LayoutLMConfig.intermediate_size |
1 | 0 | 0 |
attr |
LayoutLMConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
LayoutLMConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
LayoutLMConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
LayoutLMConfig.type_vocab_size |
1 | 0 | 0 |
attr |
LayoutLMConfig.initializer_range |
1 | 0 | 0 |
attr |
LayoutLMConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
LayoutLMConfig.use_cache |
1 | 0 | 0 |
attr |
LayoutLMConfig.max_2d_position_embeddings |
1 | 0 | 0 |
attr |
LayoutLMConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.layoutlm.modeling_layoutlm (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayoutLMModel.init |
2 | 0 | 0 |
meth |
LayoutLMModel.get_input_embeddings |
1 | 0 | 0 |
meth |
LayoutLMModel.set_input_embeddings |
2 | 0 | 0 |
meth |
LayoutLMModel.forward |
11 | 10 | 0 |
attr |
LayoutLMModel.embeddings |
1 | 0 | 0 |
attr |
LayoutLMModel.encoder |
1 | 0 | 0 |
attr |
LayoutLMModel.pooler |
1 | 0 | 0 |
meth |
LayoutLMForSequenceClassification.init |
2 | 0 | 0 |
meth |
LayoutLMForSequenceClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
LayoutLMForSequenceClassification.forward |
12 | 11 | 0 |
attr |
LayoutLMForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
LayoutLMForSequenceClassification.layoutlm |
1 | 0 | 0 |
attr |
LayoutLMForSequenceClassification.dropout |
1 | 0 | 0 |
attr |
LayoutLMForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
LayoutLMForTokenClassification.init |
2 | 0 | 0 |
meth |
LayoutLMForTokenClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
LayoutLMForTokenClassification.forward |
12 | 11 | 0 |
attr |
LayoutLMForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
LayoutLMForTokenClassification.layoutlm |
1 | 0 | 0 |
attr |
LayoutLMForTokenClassification.dropout |
1 | 0 | 0 |
attr |
LayoutLMForTokenClassification.classifier |
1 | 0 | 0 |
meth |
LayoutLMPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
LayoutLMForMaskedLM.init |
2 | 0 | 0 |
meth |
LayoutLMForMaskedLM.get_input_embeddings |
1 | 0 | 0 |
meth |
LayoutLMForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
LayoutLMForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
LayoutLMForMaskedLM.forward |
12 | 11 | 0 |
attr |
LayoutLMForMaskedLM.layoutlm |
1 | 0 | 0 |
attr |
LayoutLMForMaskedLM.cls |
1 | 0 | 0 |
meth |
LayoutLMForQuestionAnswering.init |
3 | 0 | 0 |
meth |
LayoutLMForQuestionAnswering.get_input_embeddings |
1 | 0 | 0 |
meth |
LayoutLMForQuestionAnswering.forward |
13 | 12 | 0 |
attr |
LayoutLMForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
LayoutLMForQuestionAnswering.layoutlm |
1 | 0 | 0 |
attr |
LayoutLMForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
transformers.models.layoutlmv2.configuration_layoutlmv2 (58 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayoutLMv2Config.init |
29 | 0 | 0 |
meth |
LayoutLMv2Config.get_default_detectron2_config |
1 | 0 | 0 |
meth |
LayoutLMv2Config.get_detectron2_config |
1 | 0 | 0 |
attr |
LayoutLMv2Config.vocab_size |
1 | 0 | 0 |
attr |
LayoutLMv2Config.hidden_size |
1 | 0 | 0 |
attr |
LayoutLMv2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
LayoutLMv2Config.num_attention_heads |
1 | 0 | 0 |
attr |
LayoutLMv2Config.intermediate_size |
1 | 0 | 0 |
attr |
LayoutLMv2Config.hidden_act |
1 | 0 | 0 |
attr |
LayoutLMv2Config.hidden_dropout_prob |
1 | 0 | 0 |
attr |
LayoutLMv2Config.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
LayoutLMv2Config.max_position_embeddings |
1 | 0 | 0 |
attr |
LayoutLMv2Config.type_vocab_size |
1 | 0 | 0 |
attr |
LayoutLMv2Config.initializer_range |
1 | 0 | 0 |
attr |
LayoutLMv2Config.layer_norm_eps |
1 | 0 | 0 |
attr |
LayoutLMv2Config.pad_token_id |
1 | 0 | 0 |
attr |
LayoutLMv2Config.max_2d_position_embeddings |
1 | 0 | 0 |
attr |
LayoutLMv2Config.max_rel_pos |
1 | 0 | 0 |
attr |
LayoutLMv2Config.rel_pos_bins |
1 | 0 | 0 |
attr |
LayoutLMv2Config.fast_qkv |
1 | 0 | 0 |
attr |
LayoutLMv2Config.max_rel_2d_pos |
1 | 0 | 0 |
attr |
LayoutLMv2Config.rel_2d_pos_bins |
1 | 0 | 0 |
attr |
LayoutLMv2Config.convert_sync_batchnorm |
1 | 0 | 0 |
attr |
LayoutLMv2Config.image_feature_pool_shape |
1 | 0 | 0 |
attr |
LayoutLMv2Config.coordinate_size |
1 | 0 | 0 |
attr |
LayoutLMv2Config.shape_size |
1 | 0 | 0 |
attr |
LayoutLMv2Config.has_relative_attention_bias |
1 | 0 | 0 |
attr |
LayoutLMv2Config.has_spatial_attention_bias |
1 | 0 | 0 |
attr |
LayoutLMv2Config.has_visual_segment_embedding |
1 | 0 | 0 |
attr |
LayoutLMv2Config.detectron2_config_args |
1 | 0 | 0 |
transformers.models.layoutlmv2.image_processing_layoutlmv2 (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayoutLMv2ImageProcessor.init |
8 | 7 | 0 |
meth |
LayoutLMv2ImageProcessor.resize |
7 | 6 | 0 |
attr |
LayoutLMv2ImageProcessor.do_resize |
1 | 0 | 0 |
attr |
LayoutLMv2ImageProcessor.size |
1 | 0 | 0 |
attr |
LayoutLMv2ImageProcessor.resample |
1 | 0 | 0 |
attr |
LayoutLMv2ImageProcessor.apply_ocr |
1 | 0 | 0 |
attr |
LayoutLMv2ImageProcessor.ocr_lang |
1 | 0 | 0 |
attr |
LayoutLMv2ImageProcessor.tesseract_config |
1 | 0 | 0 |
transformers.models.layoutlmv2.image_processing_layoutlmv2_fast (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayoutLMv2ImageProcessorFast.init |
2 | 1 | 0 |
meth |
LayoutLMv2ImageProcessorFast._preprocess |
11 | 10 | 0 |
transformers.models.layoutlmv2.modeling_layoutlmv2 (74 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayoutLMv2ForSequenceClassification.init |
2 | 0 | 0 |
meth |
LayoutLMv2ForSequenceClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
LayoutLMv2ForSequenceClassification.forward |
13 | 12 | 0 |
attr |
LayoutLMv2ForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
LayoutLMv2ForSequenceClassification.layoutlmv2 |
1 | 0 | 0 |
attr |
LayoutLMv2ForSequenceClassification.dropout |
1 | 0 | 0 |
attr |
LayoutLMv2ForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
LayoutLMv2ForQuestionAnswering.init |
3 | 0 | 0 |
meth |
LayoutLMv2ForQuestionAnswering.get_input_embeddings |
1 | 0 | 0 |
meth |
LayoutLMv2ForQuestionAnswering.forward |
14 | 13 | 0 |
attr |
LayoutLMv2ForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
LayoutLMv2ForQuestionAnswering.layoutlmv2 |
1 | 0 | 0 |
attr |
LayoutLMv2ForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
LayoutLMv2ForTokenClassification.init |
2 | 0 | 0 |
meth |
LayoutLMv2ForTokenClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
LayoutLMv2ForTokenClassification.forward |
13 | 12 | 0 |
attr |
LayoutLMv2ForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
LayoutLMv2ForTokenClassification.layoutlmv2 |
1 | 0 | 0 |
attr |
LayoutLMv2ForTokenClassification.dropout |
1 | 0 | 0 |
attr |
LayoutLMv2ForTokenClassification.classifier |
1 | 0 | 0 |
meth |
LayoutLMv2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
LayoutLMv2Layer.init |
2 | 0 | 0 |
meth |
LayoutLMv2Layer.forward |
6 | 0 | 0 |
meth |
LayoutLMv2Layer.feed_forward_chunk |
2 | 0 | 0 |
attr |
LayoutLMv2Layer.chunk_size_feed_forward |
1 | 0 | 0 |
attr |
LayoutLMv2Layer.seq_len_dim |
1 | 0 | 0 |
attr |
LayoutLMv2Layer.attention |
1 | 0 | 0 |
attr |
LayoutLMv2Layer.intermediate |
1 | 0 | 0 |
attr |
LayoutLMv2Layer.output |
1 | 0 | 0 |
meth |
LayoutLMv2Model.init |
2 | 0 | 0 |
meth |
LayoutLMv2Model.get_input_embeddings |
1 | 0 | 0 |
meth |
LayoutLMv2Model.set_input_embeddings |
2 | 0 | 0 |
meth |
LayoutLMv2Model._calc_text_embeddings |
6 | 0 | 0 |
meth |
LayoutLMv2Model._calc_img_embeddings |
4 | 0 | 0 |
meth |
LayoutLMv2Model._calc_visual_bbox |
5 | 0 | 0 |
meth |
LayoutLMv2Model._get_input_shape |
3 | 0 | 0 |
meth |
LayoutLMv2Model.forward |
12 | 11 | 0 |
attr |
LayoutLMv2Model.has_visual_segment_embedding |
1 | 0 | 0 |
attr |
LayoutLMv2Model.embeddings |
1 | 0 | 0 |
attr |
LayoutLMv2Model.visual |
1 | 0 | 0 |
attr |
LayoutLMv2Model.visual_proj |
1 | 0 | 0 |
attr |
LayoutLMv2Model.visual_LayerNorm |
1 | 0 | 0 |
attr |
LayoutLMv2Model.visual_dropout |
1 | 0 | 0 |
attr |
LayoutLMv2Model.encoder |
1 | 0 | 0 |
attr |
LayoutLMv2Model.pooler |
1 | 0 | 0 |
attr |
LayoutLMv2Model.visual_segment_embedding |
1 | 0 | 0 |
transformers.models.layoutlmv2.processing_layoutlmv2 (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayoutLMv2Processor.init |
4 | 0 | 0 |
meth |
LayoutLMv2Processor.call |
21 | 19 | 0 |
meth |
LayoutLMv2Processor.get_overflowing_images |
3 | 0 | 0 |
prop |
LayoutLMv2Processor.model_input_names |
1 | 0 | 0 |
transformers.models.layoutlmv2.tokenization_layoutlmv2 (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayoutLMv2Tokenizer.init |
17 | 1 | 0 |
meth |
LayoutLMv2Tokenizer.call |
21 | 20 | 0 |
meth |
LayoutLMv2Tokenizer.batch_encode_plus |
21 | 20 | 0 |
meth |
LayoutLMv2Tokenizer.tokenize |
5 | 4 | 0 |
meth |
LayoutLMv2Tokenizer.encode_plus |
21 | 20 | 0 |
meth |
LayoutLMv2Tokenizer._encode_plus |
21 | 20 | 0 |
meth |
LayoutLMv2Tokenizer.build_inputs_with_special_tokens |
3 | 0 | 0 |
attr |
LayoutLMv2Tokenizer.do_lower_case |
1 | 0 | 0 |
attr |
LayoutLMv2Tokenizer.cls_token_box |
1 | 0 | 0 |
attr |
LayoutLMv2Tokenizer.sep_token_box |
1 | 0 | 0 |
attr |
LayoutLMv2Tokenizer.pad_token_box |
1 | 0 | 0 |
attr |
LayoutLMv2Tokenizer.pad_token_label |
1 | 0 | 0 |
attr |
LayoutLMv2Tokenizer.only_label_first_subword |
1 | 0 | 0 |
transformers.models.layoutlmv3.configuration_layoutlmv3 (62 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayoutLMv3Config.init |
32 | 0 | 0 |
attr |
LayoutLMv3Config.vocab_size |
1 | 0 | 0 |
attr |
LayoutLMv3Config.hidden_size |
1 | 0 | 0 |
attr |
LayoutLMv3Config.num_hidden_layers |
1 | 0 | 0 |
attr |
LayoutLMv3Config.num_attention_heads |
1 | 0 | 0 |
attr |
LayoutLMv3Config.intermediate_size |
1 | 0 | 0 |
attr |
LayoutLMv3Config.hidden_act |
1 | 0 | 0 |
attr |
LayoutLMv3Config.hidden_dropout_prob |
1 | 0 | 0 |
attr |
LayoutLMv3Config.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
LayoutLMv3Config.max_position_embeddings |
1 | 0 | 0 |
attr |
LayoutLMv3Config.type_vocab_size |
1 | 0 | 0 |
attr |
LayoutLMv3Config.initializer_range |
1 | 0 | 0 |
attr |
LayoutLMv3Config.layer_norm_eps |
1 | 0 | 0 |
attr |
LayoutLMv3Config.pad_token_id |
1 | 0 | 0 |
attr |
LayoutLMv3Config.bos_token_id |
1 | 0 | 0 |
attr |
LayoutLMv3Config.eos_token_id |
1 | 0 | 0 |
attr |
LayoutLMv3Config.max_2d_position_embeddings |
1 | 0 | 0 |
attr |
LayoutLMv3Config.coordinate_size |
1 | 0 | 0 |
attr |
LayoutLMv3Config.shape_size |
1 | 0 | 0 |
attr |
LayoutLMv3Config.has_relative_attention_bias |
1 | 0 | 0 |
attr |
LayoutLMv3Config.rel_pos_bins |
1 | 0 | 0 |
attr |
LayoutLMv3Config.max_rel_pos |
1 | 0 | 0 |
attr |
LayoutLMv3Config.has_spatial_attention_bias |
1 | 0 | 0 |
attr |
LayoutLMv3Config.rel_2d_pos_bins |
1 | 0 | 0 |
attr |
LayoutLMv3Config.max_rel_2d_pos |
1 | 0 | 0 |
attr |
LayoutLMv3Config.text_embed |
1 | 0 | 0 |
attr |
LayoutLMv3Config.visual_embed |
1 | 0 | 0 |
attr |
LayoutLMv3Config.input_size |
1 | 0 | 0 |
attr |
LayoutLMv3Config.num_channels |
1 | 0 | 0 |
attr |
LayoutLMv3Config.patch_size |
1 | 0 | 0 |
attr |
LayoutLMv3Config.classifier_dropout |
1 | 0 | 0 |
transformers.models.layoutlmv3.image_processing_layoutlmv3 (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayoutLMv3ImageProcessor.init |
13 | 12 | 0 |
meth |
LayoutLMv3ImageProcessor.resize |
7 | 6 | 0 |
meth |
LayoutLMv3ImageProcessor.preprocess |
16 | 15 | 0 |
attr |
LayoutLMv3ImageProcessor.do_resize |
1 | 0 | 0 |
attr |
LayoutLMv3ImageProcessor.size |
1 | 0 | 0 |
attr |
LayoutLMv3ImageProcessor.resample |
1 | 0 | 0 |
attr |
LayoutLMv3ImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
LayoutLMv3ImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
LayoutLMv3ImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
LayoutLMv3ImageProcessor.image_mean |
1 | 0 | 0 |
attr |
LayoutLMv3ImageProcessor.image_std |
1 | 0 | 0 |
attr |
LayoutLMv3ImageProcessor.apply_ocr |
1 | 0 | 0 |
attr |
LayoutLMv3ImageProcessor.ocr_lang |
1 | 0 | 0 |
attr |
LayoutLMv3ImageProcessor.tesseract_config |
1 | 0 | 0 |
transformers.models.layoutlmv3.image_processing_layoutlmv3_fast (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayoutLMv3ImageProcessorFast.init |
2 | 1 | 0 |
meth |
LayoutLMv3ImageProcessorFast._preprocess |
18 | 17 | 0 |
transformers.models.layoutlmv3.modeling_layoutlmv3 (55 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayoutLMv3ForSequenceClassification.init |
2 | 0 | 0 |
meth |
LayoutLMv3ForSequenceClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
LayoutLMv3ForSequenceClassification.set_input_embeddings |
2 | 0 | 0 |
meth |
LayoutLMv3ForSequenceClassification.forward |
13 | 12 | 0 |
attr |
LayoutLMv3ForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
LayoutLMv3ForSequenceClassification.layoutlmv3 |
1 | 0 | 0 |
attr |
LayoutLMv3ForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
LayoutLMv3ForTokenClassification.init |
2 | 0 | 0 |
meth |
LayoutLMv3ForTokenClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
LayoutLMv3ForTokenClassification.set_input_embeddings |
2 | 0 | 0 |
meth |
LayoutLMv3ForTokenClassification.forward |
13 | 12 | 0 |
attr |
LayoutLMv3ForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
LayoutLMv3ForTokenClassification.layoutlmv3 |
1 | 0 | 0 |
attr |
LayoutLMv3ForTokenClassification.dropout |
1 | 0 | 0 |
attr |
LayoutLMv3ForTokenClassification.classifier |
1 | 0 | 0 |
meth |
LayoutLMv3PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
LayoutLMv3ForQuestionAnswering.init |
2 | 0 | 0 |
meth |
LayoutLMv3ForQuestionAnswering.get_input_embeddings |
1 | 0 | 0 |
meth |
LayoutLMv3ForQuestionAnswering.set_input_embeddings |
2 | 0 | 0 |
meth |
LayoutLMv3ForQuestionAnswering.forward |
14 | 13 | 0 |
attr |
LayoutLMv3ForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
LayoutLMv3ForQuestionAnswering.layoutlmv3 |
1 | 0 | 0 |
attr |
LayoutLMv3ForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
LayoutLMv3Model.init |
2 | 0 | 0 |
meth |
LayoutLMv3Model.get_input_embeddings |
1 | 0 | 0 |
meth |
LayoutLMv3Model.set_input_embeddings |
2 | 0 | 0 |
meth |
LayoutLMv3Model.create_visual_bbox |
3 | 0 | 0 |
meth |
LayoutLMv3Model.calculate_visual_bbox |
4 | 0 | 0 |
meth |
LayoutLMv3Model.forward_image |
2 | 0 | 0 |
meth |
LayoutLMv3Model.forward |
12 | 11 | 0 |
attr |
LayoutLMv3Model.encoder |
1 | 0 | 0 |
attr |
LayoutLMv3Model.embeddings |
1 | 0 | 0 |
attr |
LayoutLMv3Model.patch_embed |
1 | 0 | 0 |
attr |
LayoutLMv3Model.size |
1 | 0 | 0 |
attr |
LayoutLMv3Model.cls_token |
1 | 0 | 0 |
attr |
LayoutLMv3Model.pos_embed |
1 | 0 | 0 |
attr |
LayoutLMv3Model.pos_drop |
1 | 0 | 0 |
attr |
LayoutLMv3Model.LayerNorm |
1 | 0 | 0 |
attr |
LayoutLMv3Model.dropout |
1 | 0 | 0 |
attr |
LayoutLMv3Model.norm |
1 | 0 | 0 |
transformers.models.layoutlmv3.processing_layoutlmv3 (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayoutLMv3Processor.init |
4 | 0 | 0 |
meth |
LayoutLMv3Processor.call |
21 | 19 | 0 |
meth |
LayoutLMv3Processor.get_overflowing_images |
3 | 0 | 0 |
prop |
LayoutLMv3Processor.model_input_names |
1 | 0 | 0 |
transformers.models.layoutlmv3.tokenization_layoutlmv3 (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayoutLMv3Tokenizer.init |
18 | 2 | 0 |
meth |
LayoutLMv3Tokenizer.call |
21 | 20 | 0 |
meth |
LayoutLMv3Tokenizer.batch_encode_plus |
21 | 20 | 0 |
meth |
LayoutLMv3Tokenizer.tokenize |
5 | 4 | 0 |
meth |
LayoutLMv3Tokenizer.encode_plus |
21 | 20 | 0 |
meth |
LayoutLMv3Tokenizer._encode_plus |
21 | 20 | 0 |
meth |
LayoutLMv3Tokenizer.build_inputs_with_special_tokens |
3 | 0 | 0 |
attr |
LayoutLMv3Tokenizer.add_prefix_space |
1 | 0 | 0 |
attr |
LayoutLMv3Tokenizer.cls_token_box |
1 | 0 | 0 |
attr |
LayoutLMv3Tokenizer.sep_token_box |
1 | 0 | 0 |
attr |
LayoutLMv3Tokenizer.pad_token_box |
1 | 0 | 0 |
attr |
LayoutLMv3Tokenizer.pad_token_label |
1 | 0 | 0 |
attr |
LayoutLMv3Tokenizer.only_label_first_subword |
1 | 0 | 0 |
transformers.models.layoutxlm.configuration_layoutxlm (58 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayoutXLMConfig.init |
29 | 0 | 0 |
meth |
LayoutXLMConfig.get_default_detectron2_config |
1 | 0 | 0 |
meth |
LayoutXLMConfig.get_detectron2_config |
1 | 0 | 0 |
attr |
LayoutXLMConfig.vocab_size |
1 | 0 | 0 |
attr |
LayoutXLMConfig.hidden_size |
1 | 0 | 0 |
attr |
LayoutXLMConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
LayoutXLMConfig.num_attention_heads |
1 | 0 | 0 |
attr |
LayoutXLMConfig.intermediate_size |
1 | 0 | 0 |
attr |
LayoutXLMConfig.hidden_act |
1 | 0 | 0 |
attr |
LayoutXLMConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
LayoutXLMConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
LayoutXLMConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
LayoutXLMConfig.type_vocab_size |
1 | 0 | 0 |
attr |
LayoutXLMConfig.initializer_range |
1 | 0 | 0 |
attr |
LayoutXLMConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
LayoutXLMConfig.pad_token_id |
1 | 0 | 0 |
attr |
LayoutXLMConfig.max_2d_position_embeddings |
1 | 0 | 0 |
attr |
LayoutXLMConfig.max_rel_pos |
1 | 0 | 0 |
attr |
LayoutXLMConfig.rel_pos_bins |
1 | 0 | 0 |
attr |
LayoutXLMConfig.fast_qkv |
1 | 0 | 0 |
attr |
LayoutXLMConfig.max_rel_2d_pos |
1 | 0 | 0 |
attr |
LayoutXLMConfig.rel_2d_pos_bins |
1 | 0 | 0 |
attr |
LayoutXLMConfig.convert_sync_batchnorm |
1 | 0 | 0 |
attr |
LayoutXLMConfig.image_feature_pool_shape |
1 | 0 | 0 |
attr |
LayoutXLMConfig.coordinate_size |
1 | 0 | 0 |
attr |
LayoutXLMConfig.shape_size |
1 | 0 | 0 |
attr |
LayoutXLMConfig.has_relative_attention_bias |
1 | 0 | 0 |
attr |
LayoutXLMConfig.has_spatial_attention_bias |
1 | 0 | 0 |
attr |
LayoutXLMConfig.has_visual_segment_embedding |
1 | 0 | 0 |
attr |
LayoutXLMConfig.detectron2_config_args |
1 | 0 | 0 |
transformers.models.layoutxlm.processing_layoutxlm (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayoutXLMProcessor.init |
4 | 0 | 0 |
meth |
LayoutXLMProcessor.call |
21 | 19 | 0 |
meth |
LayoutXLMProcessor.get_overflowing_images |
3 | 0 | 0 |
prop |
LayoutXLMProcessor.model_input_names |
1 | 0 | 0 |
transformers.models.layoutxlm.tokenization_layoutxlm (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayoutXLMTokenizer.init |
16 | 1 | 0 |
meth |
LayoutXLMTokenizer.encode_plus |
21 | 20 | 0 |
meth |
LayoutXLMTokenizer.batch_encode_plus |
21 | 20 | 0 |
meth |
LayoutXLMTokenizer.call |
21 | 20 | 0 |
meth |
LayoutXLMTokenizer.tokenize |
5 | 4 | 0 |
meth |
LayoutXLMTokenizer._batch_encode_plus |
21 | 20 | 0 |
meth |
LayoutXLMTokenizer._encode_plus |
21 | 20 | 0 |
attr |
LayoutXLMTokenizer.add_prefix_space |
1 | 0 | 0 |
attr |
LayoutXLMTokenizer.cls_token_box |
1 | 0 | 0 |
attr |
LayoutXLMTokenizer.sep_token_box |
1 | 0 | 0 |
attr |
LayoutXLMTokenizer.pad_token_box |
1 | 0 | 0 |
attr |
LayoutXLMTokenizer.pad_token_label |
1 | 0 | 0 |
attr |
LayoutXLMTokenizer.only_label_first_subword |
1 | 0 | 0 |
transformers.models.led.configuration_led (52 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LEDConfig.init |
28 | 2 | 0 |
attr |
LEDConfig.vocab_size |
1 | 0 | 0 |
attr |
LEDConfig.max_encoder_position_embeddings |
1 | 0 | 0 |
attr |
LEDConfig.max_decoder_position_embeddings |
1 | 0 | 0 |
attr |
LEDConfig.d_model |
1 | 0 | 0 |
attr |
LEDConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
LEDConfig.encoder_layers |
1 | 0 | 0 |
attr |
LEDConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
LEDConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
LEDConfig.decoder_layers |
1 | 0 | 0 |
attr |
LEDConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
LEDConfig.dropout |
1 | 0 | 0 |
attr |
LEDConfig.attention_dropout |
1 | 0 | 0 |
attr |
LEDConfig.activation_dropout |
1 | 0 | 0 |
attr |
LEDConfig.activation_function |
1 | 0 | 0 |
attr |
LEDConfig.init_std |
1 | 0 | 0 |
attr |
LEDConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
LEDConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
LEDConfig.classifier_dropout |
1 | 0 | 0 |
attr |
LEDConfig.use_cache |
1 | 0 | 0 |
attr |
LEDConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
LEDConfig.attention_window |
1 | 0 | 0 |
attr |
LEDConfig.pad_token_id |
1 | 0 | 0 |
attr |
LEDConfig.bos_token_id |
1 | 0 | 0 |
attr |
LEDConfig.eos_token_id |
1 | 0 | 0 |
attr |
LEDConfig.decoder_start_token_id |
1 | 0 | 0 |
attr |
LEDConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.led.modeling_led (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LEDForQuestionAnswering.init |
2 | 0 | 0 |
meth |
LEDForQuestionAnswering.forward |
16 | 15 | 0 |
attr |
LEDForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
LEDForQuestionAnswering.led |
1 | 0 | 0 |
attr |
LEDForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
LEDPreTrainedModel._init_weights |
2 | 0 | 0 |
prop |
LEDPreTrainedModel.dummy_inputs |
1 | 0 | 0 |
meth |
LEDForConditionalGeneration.init |
2 | 1 | 0 |
meth |
LEDForConditionalGeneration.forward |
17 | 16 | 0 |
meth |
LEDForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
LEDForConditionalGeneration.led |
1 | 0 | 0 |
attr |
LEDForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
LEDModel.init |
2 | 1 | 0 |
meth |
LEDModel.get_input_embeddings |
1 | 0 | 0 |
meth |
LEDModel.set_input_embeddings |
2 | 0 | 0 |
meth |
LEDModel.forward |
16 | 15 | 0 |
attr |
LEDModel.shared |
1 | 0 | 0 |
attr |
LEDModel.encoder |
1 | 0 | 0 |
attr |
LEDModel.decoder |
1 | 0 | 0 |
transformers.models.levit.configuration_levit (31 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LevitConfig.init |
16 | 0 | 0 |
attr |
LevitConfig.image_size |
1 | 0 | 0 |
attr |
LevitConfig.num_channels |
1 | 0 | 0 |
attr |
LevitConfig.kernel_size |
1 | 0 | 0 |
attr |
LevitConfig.stride |
1 | 0 | 0 |
attr |
LevitConfig.padding |
1 | 0 | 0 |
attr |
LevitConfig.hidden_sizes |
1 | 0 | 0 |
attr |
LevitConfig.num_attention_heads |
1 | 0 | 0 |
attr |
LevitConfig.depths |
1 | 0 | 0 |
attr |
LevitConfig.key_dim |
1 | 0 | 0 |
attr |
LevitConfig.drop_path_rate |
1 | 0 | 0 |
attr |
LevitConfig.patch_size |
1 | 0 | 0 |
attr |
LevitConfig.attention_ratio |
1 | 0 | 0 |
attr |
LevitConfig.mlp_ratio |
1 | 0 | 0 |
attr |
LevitConfig.initializer_range |
1 | 0 | 0 |
attr |
LevitConfig.down_ops |
1 | 0 | 0 |
transformers.models.levit.image_processing_levit (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LevitImageProcessor.init |
12 | 11 | 0 |
meth |
LevitImageProcessor.resize |
7 | 6 | 0 |
attr |
LevitImageProcessor.do_resize |
1 | 0 | 0 |
attr |
LevitImageProcessor.size |
1 | 0 | 0 |
attr |
LevitImageProcessor.resample |
1 | 0 | 0 |
attr |
LevitImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
LevitImageProcessor.crop_size |
1 | 0 | 0 |
attr |
LevitImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
LevitImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
LevitImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
LevitImageProcessor.image_mean |
1 | 0 | 0 |
attr |
LevitImageProcessor.image_std |
1 | 0 | 0 |
transformers.models.levit.image_processing_levit_fast (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LevitImageProcessorFast.resize |
5 | 4 | 0 |
transformers.models.levit.modeling_levit (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LevitModel.init |
2 | 0 | 0 |
meth |
LevitModel.forward |
5 | 4 | 0 |
attr |
LevitModel.patch_embeddings |
1 | 0 | 0 |
attr |
LevitModel.encoder |
1 | 0 | 0 |
meth |
LevitForImageClassificationWithTeacher.init |
2 | 0 | 0 |
meth |
LevitForImageClassificationWithTeacher.forward |
5 | 4 | 0 |
attr |
LevitForImageClassificationWithTeacher.num_labels |
1 | 0 | 0 |
attr |
LevitForImageClassificationWithTeacher.levit |
1 | 0 | 0 |
attr |
LevitForImageClassificationWithTeacher.classifier |
1 | 0 | 0 |
attr |
LevitForImageClassificationWithTeacher.classifier_distill |
1 | 0 | 0 |
meth |
LevitForImageClassification.init |
2 | 0 | 0 |
meth |
LevitForImageClassification.forward |
6 | 5 | 0 |
attr |
LevitForImageClassification.num_labels |
1 | 0 | 0 |
attr |
LevitForImageClassification.levit |
1 | 0 | 0 |
attr |
LevitForImageClassification.classifier |
1 | 0 | 0 |
meth |
LevitPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.lfm2.configuration_lfm2 (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Lfm2Config.init |
24 | 22 | 0 |
attr |
Lfm2Config.vocab_size |
1 | 0 | 0 |
attr |
Lfm2Config.hidden_size |
1 | 0 | 0 |
attr |
Lfm2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Lfm2Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Lfm2Config.use_cache |
1 | 0 | 0 |
attr |
Lfm2Config.norm_eps |
1 | 0 | 0 |
attr |
Lfm2Config.initializer_range |
1 | 0 | 0 |
attr |
Lfm2Config.num_attention_heads |
1 | 0 | 0 |
attr |
Lfm2Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Lfm2Config.conv_bias |
1 | 0 | 0 |
attr |
Lfm2Config.conv_L_cache |
1 | 0 | 0 |
attr |
Lfm2Config.intermediate_size |
1 | 0 | 0 |
attr |
Lfm2Config.block_multiple_of |
1 | 0 | 0 |
attr |
Lfm2Config.block_ffn_dim_multiplier |
1 | 0 | 0 |
attr |
Lfm2Config.block_auto_adjust_ff_dim |
1 | 0 | 0 |
attr |
Lfm2Config.layer_types |
1 | 0 | 0 |
attr |
Lfm2Config.rope_parameters |
1 | 0 | 0 |
attr |
Lfm2Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Lfm2Config.pad_token_id |
1 | 0 | 0 |
attr |
Lfm2Config.bos_token_id |
1 | 0 | 0 |
attr |
Lfm2Config.eos_token_id |
1 | 0 | 0 |
transformers.models.lfm2.modeling_lfm2 (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Lfm2ForCausalLM.init |
2 | 0 | 0 |
attr |
Lfm2ForCausalLM.model |
1 | 0 | 0 |
attr |
Lfm2ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Lfm2ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Lfm2Model.init |
2 | 1 | 0 |
attr |
Lfm2Model.padding_idx |
1 | 0 | 0 |
attr |
Lfm2Model.vocab_size |
1 | 0 | 0 |
attr |
Lfm2Model.embed_tokens |
1 | 0 | 0 |
attr |
Lfm2Model.layers |
1 | 0 | 0 |
attr |
Lfm2Model.rotary_emb |
1 | 0 | 0 |
attr |
Lfm2Model.gradient_checkpointing |
1 | 0 | 0 |
attr |
Lfm2Model.embedding_norm |
1 | 0 | 0 |
transformers.models.lfm2.modular_lfm2 (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Lfm2Model.init |
2 | 1 | 0 |
attr |
Lfm2Model.embedding_norm |
1 | 0 | 0 |
transformers.models.lfm2_moe.configuration_lfm2_moe (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Lfm2MoeConfig.init |
27 | 25 | 0 |
attr |
Lfm2MoeConfig.vocab_size |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.hidden_size |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.intermediate_size |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.initializer_range |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.use_cache |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.norm_eps |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.conv_bias |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.conv_L_cache |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.num_dense_layers |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.num_experts |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.use_expert_bias |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.routed_scaling_factor |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.layer_types |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.rope_parameters |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.pad_token_id |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.bos_token_id |
1 | 0 | 0 |
attr |
Lfm2MoeConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.lfm2_moe.modeling_lfm2_moe (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Lfm2MoeForCausalLM.init |
2 | 0 | 0 |
attr |
Lfm2MoeForCausalLM.model |
1 | 0 | 0 |
attr |
Lfm2MoeForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Lfm2MoeForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Lfm2MoePreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Lfm2MoeModel.init |
2 | 1 | 0 |
attr |
Lfm2MoeModel.padding_idx |
1 | 0 | 0 |
attr |
Lfm2MoeModel.vocab_size |
1 | 0 | 0 |
attr |
Lfm2MoeModel.embed_tokens |
1 | 0 | 0 |
attr |
Lfm2MoeModel.layers |
1 | 0 | 0 |
attr |
Lfm2MoeModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
Lfm2MoeModel.pos_emb |
1 | 0 | 0 |
attr |
Lfm2MoeModel.embedding_norm |
1 | 0 | 0 |
transformers.models.lfm2_moe.modular_lfm2_moe (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Lfm2MoePreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Lfm2MoeModel.init |
2 | 1 | 0 |
attr |
Lfm2MoeModel.pos_emb |
1 | 0 | 0 |
attr |
Lfm2MoeModel.embedding_norm |
1 | 0 | 0 |
transformers.models.lfm2_vl.configuration_lfm2_vl (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Lfm2VlConfig.init |
11 | 0 | 0 |
attr |
Lfm2VlConfig.image_token_id |
1 | 0 | 0 |
attr |
Lfm2VlConfig.projector_hidden_act |
1 | 0 | 0 |
attr |
Lfm2VlConfig.projector_hidden_size |
1 | 0 | 0 |
attr |
Lfm2VlConfig.projector_bias |
1 | 0 | 0 |
attr |
Lfm2VlConfig.projector_use_layernorm |
1 | 0 | 0 |
attr |
Lfm2VlConfig.downsample_factor |
1 | 0 | 0 |
attr |
Lfm2VlConfig.vision_config |
1 | 0 | 0 |
attr |
Lfm2VlConfig.text_config |
1 | 0 | 0 |
attr |
Lfm2VlConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.lfm2_vl.image_processing_lfm2_vl_fast (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Lfm2VlImageProcessorFast.init |
2 | 1 | 0 |
meth |
Lfm2VlImageProcessorFast.crop_image_to_patches |
10 | 9 | 0 |
meth |
Lfm2VlImageProcessorFast._preprocess |
25 | 24 | 0 |
attr |
Lfm2VlImageProcessorFast.max_num_patches |
1 | 0 | 0 |
transformers.models.lfm2_vl.modeling_lfm2_vl (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Lfm2VlModel.init |
2 | 1 | 0 |
meth |
Lfm2VlModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Lfm2VlModel.set_input_embeddings |
2 | 0 | 0 |
meth |
Lfm2VlModel.get_placeholder_mask |
4 | 3 | 0 |
attr |
Lfm2VlModel.vision_tower |
1 | 0 | 0 |
attr |
Lfm2VlModel.multi_modal_projector |
1 | 0 | 0 |
attr |
Lfm2VlModel.language_model |
1 | 0 | 0 |
meth |
Lfm2VlForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Lfm2VlForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Lfm2VlForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Lfm2VlForConditionalGeneration.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
Lfm2VlForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Lfm2VlForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.lfm2_vl.modular_lfm2_vl (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Lfm2VlModel.init |
2 | 1 | 0 |
meth |
Lfm2VlModel.get_placeholder_mask |
4 | 3 | 0 |
transformers.models.lfm2_vl.processing_lfm2_vl (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Lfm2VlProcessor.init |
5 | 1 | 0 |
meth |
Lfm2VlProcessor.expand_text_with_placeholders |
8 | 7 | 0 |
meth |
Lfm2VlProcessor._get_image_num_tokens |
3 | 2 | 0 |
meth |
Lfm2VlProcessor.batch_decode |
3 | 0 | 0 |
meth |
Lfm2VlProcessor.decode |
3 | 0 | 0 |
prop |
Lfm2VlProcessor.model_input_names |
1 | 0 | 0 |
attr |
Lfm2VlProcessor.image_token |
1 | 0 | 0 |
attr |
Lfm2VlProcessor.image_token_id |
1 | 0 | 0 |
attr |
Lfm2VlProcessor.image_start_token |
1 | 0 | 0 |
attr |
Lfm2VlProcessor.image_end_token |
1 | 0 | 0 |
attr |
Lfm2VlProcessor.image_thumbnail_token |
1 | 0 | 0 |
transformers.models.lightglue.configuration_lightglue (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LightGlueConfig.init |
15 | 10 | 0 |
attr |
LightGlueConfig.trust_remote_code |
1 | 0 | 0 |
attr |
LightGlueConfig.descriptor_dim |
1 | 0 | 0 |
attr |
LightGlueConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
LightGlueConfig.num_attention_heads |
1 | 0 | 0 |
attr |
LightGlueConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
LightGlueConfig.depth_confidence |
1 | 0 | 0 |
attr |
LightGlueConfig.width_confidence |
1 | 0 | 0 |
attr |
LightGlueConfig.filter_threshold |
1 | 0 | 0 |
attr |
LightGlueConfig.initializer_range |
1 | 0 | 0 |
attr |
LightGlueConfig.keypoint_detector_config |
1 | 0 | 0 |
attr |
LightGlueConfig.hidden_size |
1 | 0 | 0 |
attr |
LightGlueConfig.intermediate_size |
1 | 0 | 0 |
attr |
LightGlueConfig.hidden_act |
1 | 0 | 0 |
attr |
LightGlueConfig.attention_dropout |
1 | 0 | 0 |
attr |
LightGlueConfig.attention_bias |
1 | 0 | 0 |
transformers.models.lightglue.image_processing_lightglue (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LightGlueImageProcessor.init |
8 | 7 | 0 |
meth |
LightGlueImageProcessor.resize |
6 | 4 | 0 |
meth |
LightGlueImageProcessor.preprocess |
12 | 10 | 0 |
meth |
LightGlueImageProcessor._get_color |
2 | 0 | 0 |
attr |
LightGlueImageProcessor.do_resize |
1 | 0 | 0 |
attr |
LightGlueImageProcessor.size |
1 | 0 | 0 |
attr |
LightGlueImageProcessor.resample |
1 | 0 | 0 |
attr |
LightGlueImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
LightGlueImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
LightGlueImageProcessor.do_grayscale |
1 | 0 | 0 |
transformers.models.lightglue.image_processing_lightglue_fast (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LightGlueImageProcessorFast.init |
2 | 1 | 0 |
meth |
LightGlueImageProcessorFast._prepare_images_structure |
3 | 2 | 0 |
meth |
LightGlueImageProcessorFast._preprocess |
11 | 10 | 0 |
meth |
LightGlueImageProcessorFast.visualize_keypoint_matching |
3 | 2 | 0 |
meth |
LightGlueImageProcessorFast._get_color |
2 | 0 | 0 |
transformers.models.lightglue.modeling_lightglue (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LightGlueForKeypointMatching.init |
2 | 1 | 0 |
meth |
LightGlueForKeypointMatching._get_keypoint_matching |
5 | 0 | 0 |
meth |
LightGlueForKeypointMatching._do_layer_keypoint_pruning |
8 | 7 | 0 |
meth |
LightGlueForKeypointMatching._concat_early_stopped_outputs |
6 | 0 | 0 |
meth |
LightGlueForKeypointMatching.forward |
6 | 5 | 0 |
attr |
LightGlueForKeypointMatching.keypoint_detector |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.keypoint_detector_descriptor_dim |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.descriptor_dim |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.num_layers |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.filter_threshold |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.depth_confidence |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.width_confidence |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.positional_encoder |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.transformer_layers |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.match_assignment_layers |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.token_confidence |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.input_projection |
1 | 0 | 0 |
transformers.models.lightglue.modular_lightglue (46 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LightGlueForKeypointMatching.init |
2 | 1 | 0 |
meth |
LightGlueForKeypointMatching._get_keypoint_matching |
5 | 0 | 0 |
meth |
LightGlueForKeypointMatching._do_layer_keypoint_pruning |
8 | 7 | 0 |
meth |
LightGlueForKeypointMatching._concat_early_stopped_outputs |
6 | 0 | 0 |
meth |
LightGlueForKeypointMatching.forward |
6 | 5 | 0 |
attr |
LightGlueForKeypointMatching.keypoint_detector |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.keypoint_detector_descriptor_dim |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.descriptor_dim |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.num_layers |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.filter_threshold |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.depth_confidence |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.width_confidence |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.positional_encoder |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.transformer_layers |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.match_assignment_layers |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.token_confidence |
1 | 0 | 0 |
attr |
LightGlueForKeypointMatching.input_projection |
1 | 0 | 0 |
meth |
LightGlueConfig.init |
15 | 10 | 0 |
attr |
LightGlueConfig.trust_remote_code |
1 | 0 | 0 |
attr |
LightGlueConfig.descriptor_dim |
1 | 0 | 0 |
attr |
LightGlueConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
LightGlueConfig.num_attention_heads |
1 | 0 | 0 |
attr |
LightGlueConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
LightGlueConfig.depth_confidence |
1 | 0 | 0 |
attr |
LightGlueConfig.width_confidence |
1 | 0 | 0 |
attr |
LightGlueConfig.filter_threshold |
1 | 0 | 0 |
attr |
LightGlueConfig.initializer_range |
1 | 0 | 0 |
attr |
LightGlueConfig.keypoint_detector_config |
1 | 0 | 0 |
attr |
LightGlueConfig.hidden_size |
1 | 0 | 0 |
attr |
LightGlueConfig.intermediate_size |
1 | 0 | 0 |
attr |
LightGlueConfig.hidden_act |
1 | 0 | 0 |
attr |
LightGlueConfig.attention_dropout |
1 | 0 | 0 |
attr |
LightGlueConfig.attention_bias |
1 | 0 | 0 |
transformers.models.lighton_ocr.configuration_lighton_ocr (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LightOnOcrConfig.init |
7 | 5 | 0 |
attr |
LightOnOcrConfig.spatial_merge_size |
1 | 0 | 0 |
attr |
LightOnOcrConfig.image_token_id |
1 | 0 | 0 |
attr |
LightOnOcrConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
LightOnOcrConfig.vision_config |
1 | 0 | 0 |
attr |
LightOnOcrConfig.text_config |
1 | 0 | 0 |
transformers.models.lighton_ocr.modeling_lighton_ocr (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LightOnOcrForConditionalGeneration.init |
2 | 1 | 0 |
meth |
LightOnOcrForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
LightOnOcrForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
LightOnOcrForConditionalGeneration.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
LightOnOcrForConditionalGeneration.model |
1 | 0 | 0 |
attr |
LightOnOcrForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
LightOnOcrModel.init |
2 | 1 | 0 |
meth |
LightOnOcrModel.get_input_embeddings |
1 | 0 | 0 |
meth |
LightOnOcrModel.set_input_embeddings |
2 | 0 | 0 |
meth |
LightOnOcrModel.get_placeholder_mask |
4 | 3 | 0 |
attr |
LightOnOcrModel.vision_encoder |
1 | 0 | 0 |
attr |
LightOnOcrModel.vision_projection |
1 | 0 | 0 |
attr |
LightOnOcrModel.language_model |
1 | 0 | 0 |
transformers.models.lighton_ocr.modular_lighton_ocr (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
LightOnOcrPreTrainedModel |
1 | 0 | 0 |
meth |
LightOnOcrProcessor.init |
7 | 2 | 0 |
meth |
LightOnOcrProcessor._get_num_multimodal_tokens |
3 | 0 | 0 |
attr |
LightOnOcrProcessor.patch_size |
1 | 0 | 0 |
attr |
LightOnOcrProcessor.spatial_merge_size |
1 | 0 | 0 |
attr |
LightOnOcrProcessor.effective_patch_size |
1 | 0 | 0 |
attr |
LightOnOcrProcessor.image_token |
1 | 0 | 0 |
attr |
LightOnOcrProcessor.image_break_token |
1 | 0 | 0 |
attr |
LightOnOcrProcessor.image_end_token |
1 | 0 | 0 |
attr |
LightOnOcrProcessor.image_token_id |
1 | 0 | 0 |
attr |
LightOnOcrProcessor.image_break_token_id |
1 | 0 | 0 |
attr |
LightOnOcrProcessor.image_end_token_id |
1 | 0 | 0 |
attr |
LightOnOcrProcessor.image_ids |
1 | 0 | 0 |
meth |
LightOnOcrConfig.init |
7 | 5 | 0 |
attr |
LightOnOcrConfig.spatial_merge_size |
1 | 0 | 0 |
attr |
LightOnOcrConfig.image_token_id |
1 | 0 | 0 |
attr |
LightOnOcrConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
LightOnOcrConfig.vision_config |
1 | 0 | 0 |
attr |
LightOnOcrConfig.text_config |
1 | 0 | 0 |
meth |
LightOnOcrModel.init |
2 | 1 | 0 |
attr |
LightOnOcrModel.vision_encoder |
1 | 0 | 0 |
attr |
LightOnOcrModel.vision_projection |
1 | 0 | 0 |
attr |
LightOnOcrModel.language_model |
1 | 0 | 0 |
transformers.models.lighton_ocr.processing_lighton_ocr (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LightOnOcrProcessor.init |
7 | 2 | 0 |
meth |
LightOnOcrProcessor._get_num_multimodal_tokens |
3 | 0 | 0 |
attr |
LightOnOcrProcessor.patch_size |
1 | 0 | 0 |
attr |
LightOnOcrProcessor.spatial_merge_size |
1 | 0 | 0 |
attr |
LightOnOcrProcessor.effective_patch_size |
1 | 0 | 0 |
attr |
LightOnOcrProcessor.image_token |
1 | 0 | 0 |
attr |
LightOnOcrProcessor.image_break_token |
1 | 0 | 0 |
attr |
LightOnOcrProcessor.image_end_token |
1 | 0 | 0 |
attr |
LightOnOcrProcessor.image_token_id |
1 | 0 | 0 |
attr |
LightOnOcrProcessor.image_break_token_id |
1 | 0 | 0 |
attr |
LightOnOcrProcessor.image_end_token_id |
1 | 0 | 0 |
attr |
LightOnOcrProcessor.image_ids |
1 | 0 | 0 |
transformers.models.lilt.configuration_lilt (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LiltConfig.init |
20 | 0 | 0 |
attr |
LiltConfig.pad_token_id |
1 | 0 | 0 |
attr |
LiltConfig.bos_token_id |
1 | 0 | 0 |
attr |
LiltConfig.eos_token_id |
1 | 0 | 0 |
attr |
LiltConfig.vocab_size |
1 | 0 | 0 |
attr |
LiltConfig.hidden_size |
1 | 0 | 0 |
attr |
LiltConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
LiltConfig.num_attention_heads |
1 | 0 | 0 |
attr |
LiltConfig.hidden_act |
1 | 0 | 0 |
attr |
LiltConfig.intermediate_size |
1 | 0 | 0 |
attr |
LiltConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
LiltConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
LiltConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
LiltConfig.type_vocab_size |
1 | 0 | 0 |
attr |
LiltConfig.initializer_range |
1 | 0 | 0 |
attr |
LiltConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
LiltConfig.classifier_dropout |
1 | 0 | 0 |
attr |
LiltConfig.channel_shrink_ratio |
1 | 0 | 0 |
attr |
LiltConfig.max_2d_position_embeddings |
1 | 0 | 0 |
transformers.models.lilt.modeling_lilt (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LiltForTokenClassification.init |
2 | 0 | 0 |
meth |
LiltForTokenClassification.forward |
12 | 11 | 0 |
attr |
LiltForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
LiltForTokenClassification.lilt |
1 | 0 | 0 |
attr |
LiltForTokenClassification.dropout |
1 | 0 | 0 |
attr |
LiltForTokenClassification.classifier |
1 | 0 | 0 |
meth |
LiltForQuestionAnswering.init |
2 | 0 | 0 |
meth |
LiltForQuestionAnswering.forward |
13 | 12 | 0 |
attr |
LiltForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
LiltForQuestionAnswering.lilt |
1 | 0 | 0 |
attr |
LiltForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
LiltForSequenceClassification.init |
2 | 0 | 0 |
meth |
LiltForSequenceClassification.forward |
12 | 11 | 0 |
attr |
LiltForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
LiltForSequenceClassification.lilt |
1 | 0 | 0 |
attr |
LiltForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
LiltPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
LiltModel.init |
3 | 0 | 0 |
meth |
LiltModel.get_input_embeddings |
1 | 0 | 0 |
meth |
LiltModel.set_input_embeddings |
2 | 0 | 0 |
meth |
LiltModel.forward |
11 | 10 | 0 |
attr |
LiltModel.embeddings |
1 | 0 | 0 |
attr |
LiltModel.layout_embeddings |
1 | 0 | 0 |
attr |
LiltModel.encoder |
1 | 0 | 0 |
attr |
LiltModel.pooler |
1 | 0 | 0 |
transformers.models.llama.configuration_llama (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlamaConfig.init |
23 | 21 | 0 |
attr |
LlamaConfig.vocab_size |
1 | 0 | 0 |
attr |
LlamaConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
LlamaConfig.hidden_size |
1 | 0 | 0 |
attr |
LlamaConfig.intermediate_size |
1 | 0 | 0 |
attr |
LlamaConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
LlamaConfig.num_attention_heads |
1 | 0 | 0 |
attr |
LlamaConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
LlamaConfig.hidden_act |
1 | 0 | 0 |
attr |
LlamaConfig.initializer_range |
1 | 0 | 0 |
attr |
LlamaConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
LlamaConfig.pretraining_tp |
1 | 0 | 0 |
attr |
LlamaConfig.use_cache |
1 | 0 | 0 |
attr |
LlamaConfig.attention_bias |
1 | 0 | 0 |
attr |
LlamaConfig.attention_dropout |
1 | 0 | 0 |
attr |
LlamaConfig.mlp_bias |
1 | 0 | 0 |
attr |
LlamaConfig.head_dim |
1 | 0 | 0 |
attr |
LlamaConfig.rope_parameters |
1 | 0 | 0 |
attr |
LlamaConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
LlamaConfig.pad_token_id |
1 | 0 | 0 |
attr |
LlamaConfig.bos_token_id |
1 | 0 | 0 |
attr |
LlamaConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.llama.modeling_llama (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlamaForCausalLM.init |
2 | 0 | 0 |
attr |
LlamaForCausalLM.model |
1 | 0 | 0 |
attr |
LlamaForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
LlamaForCausalLM.lm_head |
1 | 0 | 0 |
meth |
LlamaModel.init |
2 | 1 | 0 |
attr |
LlamaModel.padding_idx |
1 | 0 | 0 |
attr |
LlamaModel.vocab_size |
1 | 0 | 0 |
attr |
LlamaModel.embed_tokens |
1 | 0 | 0 |
attr |
LlamaModel.layers |
1 | 0 | 0 |
attr |
LlamaModel.norm |
1 | 0 | 0 |
attr |
LlamaModel.rotary_emb |
1 | 0 | 0 |
attr |
LlamaModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.llama.tokenization_llama (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlamaTokenizer.init |
11 | 2 | 0 |
attr |
LlamaTokenizer.add_prefix_space |
1 | 0 | 0 |
attr |
LlamaTokenizer.legacy |
1 | 0 | 0 |
attr |
LlamaTokenizer.use_default_system_prompt |
1 | 0 | 0 |
transformers.models.llama4.configuration_llama4 (104 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Llama4TextConfig.init |
36 | 1 | 0 |
attr |
Llama4TextConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Llama4TextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Llama4TextConfig.bos_token_id |
1 | 0 | 0 |
attr |
Llama4TextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Llama4TextConfig.attn_temperature_tuning |
1 | 0 | 0 |
attr |
Llama4TextConfig.attn_scale |
1 | 0 | 0 |
attr |
Llama4TextConfig.floor_scale |
1 | 0 | 0 |
attr |
Llama4TextConfig.vocab_size |
1 | 0 | 0 |
attr |
Llama4TextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Llama4TextConfig.hidden_size |
1 | 0 | 0 |
attr |
Llama4TextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Llama4TextConfig.intermediate_size_mlp |
1 | 0 | 0 |
attr |
Llama4TextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Llama4TextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Llama4TextConfig.attention_bias |
1 | 0 | 0 |
attr |
Llama4TextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Llama4TextConfig.hidden_act |
1 | 0 | 0 |
attr |
Llama4TextConfig.initializer_range |
1 | 0 | 0 |
attr |
Llama4TextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Llama4TextConfig.use_cache |
1 | 0 | 0 |
attr |
Llama4TextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Llama4TextConfig.head_dim |
1 | 0 | 0 |
attr |
Llama4TextConfig.use_qk_norm |
1 | 0 | 0 |
attr |
Llama4TextConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
Llama4TextConfig.num_local_experts |
1 | 0 | 0 |
attr |
Llama4TextConfig.output_router_logits |
1 | 0 | 0 |
attr |
Llama4TextConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
Llama4TextConfig.router_jitter_noise |
1 | 0 | 0 |
attr |
Llama4TextConfig.no_rope_layers |
1 | 0 | 0 |
attr |
Llama4TextConfig.interleave_moe_layer_step |
1 | 0 | 0 |
attr |
Llama4TextConfig.moe_layers |
1 | 0 | 0 |
attr |
Llama4TextConfig.attention_chunk_size |
1 | 0 | 0 |
attr |
Llama4TextConfig.layer_types |
1 | 0 | 0 |
attr |
Llama4TextConfig.rope_parameters |
1 | 0 | 0 |
meth |
Llama4VisionConfig.init |
21 | 19 | 0 |
attr |
Llama4VisionConfig.hidden_size |
1 | 0 | 0 |
attr |
Llama4VisionConfig.hidden_act |
1 | 0 | 0 |
attr |
Llama4VisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Llama4VisionConfig.num_channels |
1 | 0 | 0 |
attr |
Llama4VisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
Llama4VisionConfig.image_size |
1 | 0 | 0 |
attr |
Llama4VisionConfig.vision_output_dim |
1 | 0 | 0 |
attr |
Llama4VisionConfig.patch_size |
1 | 0 | 0 |
attr |
Llama4VisionConfig.norm_eps |
1 | 0 | 0 |
attr |
Llama4VisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Llama4VisionConfig.initializer_range |
1 | 0 | 0 |
attr |
Llama4VisionConfig.pixel_shuffle_ratio |
1 | 0 | 0 |
attr |
Llama4VisionConfig.projector_input_dim |
1 | 0 | 0 |
attr |
Llama4VisionConfig.projector_output_dim |
1 | 0 | 0 |
attr |
Llama4VisionConfig.multi_modal_projector_bias |
1 | 0 | 0 |
attr |
Llama4VisionConfig.projector_dropout |
1 | 0 | 0 |
attr |
Llama4VisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
Llama4VisionConfig.vision_feature_select_strategy |
1 | 0 | 0 |
attr |
Llama4VisionConfig.rope_parameters |
1 | 0 | 0 |
meth |
Llama4Config.init |
8 | 0 | 0 |
attr |
Llama4Config.boi_token_index |
1 | 0 | 0 |
attr |
Llama4Config.eoi_token_index |
1 | 0 | 0 |
attr |
Llama4Config.image_token_index |
1 | 0 | 0 |
attr |
Llama4Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Llama4Config.vision_config |
1 | 0 | 0 |
attr |
Llama4Config.text_config |
1 | 0 | 0 |
transformers.models.llama4.image_processing_llama4_fast (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Llama4ImageProcessorFast.init |
2 | 1 | 0 |
meth |
Llama4ImageProcessorFast._preprocess |
14 | 13 | 0 |
transformers.models.llama4.modeling_llama4 (57 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Llama4TextModel.init |
2 | 1 | 0 |
attr |
Llama4TextModel.padding_idx |
1 | 0 | 0 |
attr |
Llama4TextModel.vocab_size |
1 | 0 | 0 |
attr |
Llama4TextModel.embed_tokens |
1 | 0 | 0 |
attr |
Llama4TextModel.layers |
1 | 0 | 0 |
attr |
Llama4TextModel.norm |
1 | 0 | 0 |
attr |
Llama4TextModel.rotary_emb |
1 | 0 | 0 |
attr |
Llama4TextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Llama4PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Llama4ForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Llama4ForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Llama4ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Llama4ForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
Llama4ForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
Llama4ForConditionalGeneration.set_decoder |
2 | 0 | 0 |
meth |
Llama4ForConditionalGeneration.get_decoder |
1 | 0 | 0 |
meth |
Llama4ForConditionalGeneration.get_placeholder_mask |
4 | 3 | 0 |
meth |
Llama4ForConditionalGeneration.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.vision_model |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
Llama4ForConditionalGeneration.pad_token_id |
1 | 0 | 0 |
meth |
Llama4ForCausalLM.init |
2 | 1 | 0 |
attr |
Llama4ForCausalLM.model |
1 | 0 | 0 |
attr |
Llama4ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Llama4ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Llama4VisionModel.init |
2 | 1 | 0 |
meth |
Llama4VisionModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Llama4VisionModel.forward |
7 | 6 | 0 |
attr |
Llama4VisionModel.image_size |
1 | 0 | 0 |
attr |
Llama4VisionModel.patch_size |
1 | 0 | 0 |
attr |
Llama4VisionModel.hidden_size |
1 | 0 | 0 |
attr |
Llama4VisionModel.num_channels |
1 | 0 | 0 |
attr |
Llama4VisionModel.num_patches |
1 | 0 | 0 |
attr |
Llama4VisionModel.scale |
1 | 0 | 0 |
attr |
Llama4VisionModel.patch_embedding |
1 | 0 | 0 |
attr |
Llama4VisionModel.class_embedding |
1 | 0 | 0 |
attr |
Llama4VisionModel.positional_embedding_vlm |
1 | 0 | 0 |
attr |
Llama4VisionModel.rotary_embedding |
1 | 0 | 0 |
attr |
Llama4VisionModel.layernorm_pre |
1 | 0 | 0 |
attr |
Llama4VisionModel.layernorm_post |
1 | 0 | 0 |
attr |
Llama4VisionModel.model |
1 | 0 | 0 |
attr |
Llama4VisionModel.vision_adapter |
1 | 0 | 0 |
transformers.models.llama4.processing_llama4 (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Llama4Processor.init |
14 | 2 | 0 |
meth |
Llama4Processor._prompt_split_image |
3 | 0 | 0 |
attr |
Llama4Processor.downsample_ratio |
1 | 0 | 0 |
attr |
Llama4Processor.patch_size |
1 | 0 | 0 |
attr |
Llama4Processor.fake_image_token |
1 | 0 | 0 |
attr |
Llama4Processor.image_token |
1 | 0 | 0 |
attr |
Llama4Processor.image_token_id |
1 | 0 | 0 |
attr |
Llama4Processor.start_of_img_token |
1 | 0 | 0 |
attr |
Llama4Processor.end_of_img_token |
1 | 0 | 0 |
attr |
Llama4Processor.img_patch_token |
1 | 0 | 0 |
attr |
Llama4Processor.tile_token |
1 | 0 | 0 |
attr |
Llama4Processor.tile_global_token |
1 | 0 | 0 |
transformers.models.llava.configuration_llava (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaConfig.init |
11 | 0 | 0 |
attr |
LlavaConfig.image_token_index |
1 | 0 | 0 |
attr |
LlavaConfig.projector_hidden_act |
1 | 0 | 0 |
attr |
LlavaConfig.image_seq_length |
1 | 0 | 0 |
attr |
LlavaConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
LlavaConfig.vision_feature_select_strategy |
1 | 0 | 0 |
attr |
LlavaConfig.vision_feature_layer |
1 | 0 | 0 |
attr |
LlavaConfig.vision_config |
1 | 0 | 0 |
attr |
LlavaConfig.text_config |
1 | 0 | 0 |
attr |
LlavaConfig.multimodal_projector_bias |
1 | 0 | 0 |
transformers.models.llava.image_processing_llava (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaImageProcessor.init |
14 | 13 | 0 |
meth |
LlavaImageProcessor.resize |
7 | 6 | 0 |
meth |
LlavaImageProcessor.preprocess |
18 | 17 | 0 |
attr |
LlavaImageProcessor.do_pad |
1 | 0 | 0 |
attr |
LlavaImageProcessor.do_resize |
1 | 0 | 0 |
attr |
LlavaImageProcessor.size |
1 | 0 | 0 |
attr |
LlavaImageProcessor.resample |
1 | 0 | 0 |
attr |
LlavaImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
LlavaImageProcessor.crop_size |
1 | 0 | 0 |
attr |
LlavaImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
LlavaImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
LlavaImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
LlavaImageProcessor.image_mean |
1 | 0 | 0 |
attr |
LlavaImageProcessor.image_std |
1 | 0 | 0 |
attr |
LlavaImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.llava.image_processing_llava_fast (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaImageProcessorFast._preprocess |
16 | 15 | 0 |
transformers.models.llava.modeling_llava (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaModel.init |
2 | 1 | 0 |
meth |
LlavaModel.get_input_embeddings |
1 | 0 | 0 |
meth |
LlavaModel.set_input_embeddings |
2 | 0 | 0 |
meth |
LlavaModel.get_placeholder_mask |
4 | 3 | 0 |
attr |
LlavaModel.vision_tower |
1 | 0 | 0 |
attr |
LlavaModel.multi_modal_projector |
1 | 0 | 0 |
attr |
LlavaModel.language_model |
1 | 0 | 0 |
meth |
LlavaForConditionalGeneration.init |
2 | 1 | 0 |
meth |
LlavaForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
LlavaForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
LlavaForConditionalGeneration.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
LlavaForConditionalGeneration.model |
1 | 0 | 0 |
attr |
LlavaForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.llava.processing_llava (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaProcessor.init |
9 | 0 | 0 |
meth |
LlavaProcessor._get_num_multimodal_tokens |
3 | 0 | 0 |
attr |
LlavaProcessor.patch_size |
1 | 0 | 0 |
attr |
LlavaProcessor.num_additional_image_tokens |
1 | 0 | 0 |
attr |
LlavaProcessor.vision_feature_select_strategy |
1 | 0 | 0 |
attr |
LlavaProcessor.image_token |
1 | 0 | 0 |
attr |
LlavaProcessor.image_token_id |
1 | 0 | 0 |
transformers.models.llava_next.configuration_llava_next (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaNextConfig.init |
12 | 0 | 0 |
attr |
LlavaNextConfig.image_token_index |
1 | 0 | 0 |
attr |
LlavaNextConfig.projector_hidden_act |
1 | 0 | 0 |
attr |
LlavaNextConfig.image_seq_length |
1 | 0 | 0 |
attr |
LlavaNextConfig.multimodal_projector_bias |
1 | 0 | 0 |
attr |
LlavaNextConfig.vision_feature_select_strategy |
1 | 0 | 0 |
attr |
LlavaNextConfig.vision_feature_layer |
1 | 0 | 0 |
attr |
LlavaNextConfig.image_grid_pinpoints |
1 | 0 | 0 |
attr |
LlavaNextConfig.vision_config |
1 | 0 | 0 |
attr |
LlavaNextConfig.text_config |
1 | 0 | 0 |
attr |
LlavaNextConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.llava_next.image_processing_llava_next (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaNextImageProcessor.init |
15 | 14 | 0 |
meth |
LlavaNextImageProcessor.resize |
7 | 6 | 0 |
meth |
LlavaNextImageProcessor._resize_for_patching |
5 | 4 | 0 |
meth |
LlavaNextImageProcessor._get_padding_size |
3 | 2 | 0 |
meth |
LlavaNextImageProcessor.get_image_patches |
8 | 7 | 0 |
meth |
LlavaNextImageProcessor._pad_for_batching |
4 | 3 | 0 |
meth |
LlavaNextImageProcessor.preprocess |
18 | 17 | 0 |
attr |
LlavaNextImageProcessor.do_resize |
1 | 0 | 0 |
attr |
LlavaNextImageProcessor.size |
1 | 0 | 0 |
attr |
LlavaNextImageProcessor.image_grid_pinpoints |
1 | 0 | 0 |
attr |
LlavaNextImageProcessor.resample |
1 | 0 | 0 |
attr |
LlavaNextImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
LlavaNextImageProcessor.crop_size |
1 | 0 | 0 |
attr |
LlavaNextImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
LlavaNextImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
LlavaNextImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
LlavaNextImageProcessor.image_mean |
1 | 0 | 0 |
attr |
LlavaNextImageProcessor.image_std |
1 | 0 | 0 |
attr |
LlavaNextImageProcessor.do_pad |
1 | 0 | 0 |
attr |
LlavaNextImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.llava_next.image_processing_llava_next_fast (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaNextImageProcessorFast.init |
2 | 1 | 0 |
meth |
LlavaNextImageProcessorFast._get_padding_size |
3 | 2 | 0 |
meth |
LlavaNextImageProcessorFast._get_image_patches |
6 | 5 | 0 |
meth |
LlavaNextImageProcessorFast._preprocess |
17 | 16 | 0 |
transformers.models.llava_next.modeling_llava_next (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaNextModel.init |
2 | 1 | 0 |
meth |
LlavaNextModel.get_input_embeddings |
1 | 0 | 0 |
meth |
LlavaNextModel.set_input_embeddings |
2 | 0 | 0 |
meth |
LlavaNextModel.pack_image_features |
5 | 0 | 0 |
meth |
LlavaNextModel.get_placeholder_mask |
4 | 3 | 0 |
attr |
LlavaNextModel.vision_tower |
1 | 0 | 0 |
attr |
LlavaNextModel.multi_modal_projector |
1 | 0 | 0 |
attr |
LlavaNextModel.image_newline |
1 | 0 | 0 |
attr |
LlavaNextModel.vocab_size |
1 | 0 | 0 |
attr |
LlavaNextModel.language_model |
1 | 0 | 0 |
meth |
LlavaNextPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
LlavaNextForConditionalGeneration.init |
2 | 1 | 0 |
meth |
LlavaNextForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
LlavaNextForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
LlavaNextForConditionalGeneration.pack_image_features |
5 | 0 | 0 |
meth |
LlavaNextForConditionalGeneration.prepare_inputs_for_generation |
11 | 0 | 0 |
attr |
LlavaNextForConditionalGeneration.model |
1 | 0 | 0 |
attr |
LlavaNextForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.llava_next.processing_llava_next (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaNextProcessor.init |
9 | 0 | 0 |
meth |
LlavaNextProcessor._get_unpadded_features |
7 | 0 | 0 |
meth |
LlavaNextProcessor._get_num_multimodal_tokens |
3 | 0 | 0 |
attr |
LlavaNextProcessor.patch_size |
1 | 0 | 0 |
attr |
LlavaNextProcessor.num_additional_image_tokens |
1 | 0 | 0 |
attr |
LlavaNextProcessor.vision_feature_select_strategy |
1 | 0 | 0 |
attr |
LlavaNextProcessor.image_token |
1 | 0 | 0 |
attr |
LlavaNextProcessor.image_token_id |
1 | 0 | 0 |
transformers.models.llava_next_video.configuration_llava_next_video (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaNextVideoConfig.init |
16 | 0 | 0 |
attr |
LlavaNextVideoConfig.video_token_index |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.spatial_pool_mode |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.spatial_pool_stride |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.image_seq_length |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.video_seq_length |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.image_token_index |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.projector_hidden_act |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.multimodal_projector_bias |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.vision_feature_select_strategy |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.vision_feature_layer |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.image_grid_pinpoints |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.vision_config |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.text_config |
1 | 0 | 0 |
transformers.models.llava_next_video.modeling_llava_next_video (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaNextVideoPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
LlavaNextVideoForConditionalGeneration.init |
2 | 1 | 0 |
meth |
LlavaNextVideoForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
LlavaNextVideoForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
LlavaNextVideoForConditionalGeneration.pack_image_features |
5 | 0 | 0 |
meth |
LlavaNextVideoForConditionalGeneration.prepare_inputs_for_generation |
12 | 0 | 0 |
attr |
LlavaNextVideoForConditionalGeneration.model |
1 | 0 | 0 |
attr |
LlavaNextVideoForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
LlavaNextVideoModel.init |
2 | 1 | 0 |
meth |
LlavaNextVideoModel.get_input_embeddings |
1 | 0 | 0 |
meth |
LlavaNextVideoModel.set_input_embeddings |
2 | 0 | 0 |
meth |
LlavaNextVideoModel.pack_image_features |
5 | 0 | 0 |
meth |
LlavaNextVideoModel.get_placeholder_mask |
5 | 4 | 0 |
attr |
LlavaNextVideoModel.vision_tower |
1 | 0 | 0 |
attr |
LlavaNextVideoModel.multi_modal_projector |
1 | 0 | 0 |
attr |
LlavaNextVideoModel.image_newline |
1 | 0 | 0 |
attr |
LlavaNextVideoModel.vocab_size |
1 | 0 | 0 |
attr |
LlavaNextVideoModel.language_model |
1 | 0 | 0 |
attr |
LlavaNextVideoModel.vision_resampler |
1 | 0 | 0 |
transformers.models.llava_next_video.modular_llava_next_video (46 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaNextVideoForConditionalGeneration.prepare_inputs_for_generation |
12 | 0 | 0 |
meth |
LlavaNextVideoConfig.init |
16 | 0 | 0 |
attr |
LlavaNextVideoConfig.video_token_index |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.spatial_pool_mode |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.spatial_pool_stride |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.image_seq_length |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.video_seq_length |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.image_token_index |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.projector_hidden_act |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.multimodal_projector_bias |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.vision_feature_select_strategy |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.vision_feature_layer |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.image_grid_pinpoints |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.vision_config |
1 | 0 | 0 |
attr |
LlavaNextVideoConfig.text_config |
1 | 0 | 0 |
meth |
LlavaNextVideoModel.init |
3 | 1 | 0 |
meth |
LlavaNextVideoModel.get_placeholder_mask |
5 | 4 | 0 |
attr |
LlavaNextVideoModel.vision_resampler |
1 | 0 | 0 |
transformers.models.llava_next_video.processing_llava_next_video (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaNextVideoProcessor.init |
11 | 0 | 0 |
meth |
LlavaNextVideoProcessor._get_unpadded_features |
7 | 0 | 0 |
meth |
LlavaNextVideoProcessor._get_num_multimodal_tokens |
3 | 0 | 0 |
attr |
LlavaNextVideoProcessor.patch_size |
1 | 0 | 0 |
attr |
LlavaNextVideoProcessor.num_additional_image_tokens |
1 | 0 | 0 |
attr |
LlavaNextVideoProcessor.vision_feature_select_strategy |
1 | 0 | 0 |
attr |
LlavaNextVideoProcessor.image_token |
1 | 0 | 0 |
attr |
LlavaNextVideoProcessor.video_token |
1 | 0 | 0 |
attr |
LlavaNextVideoProcessor.image_token_id |
1 | 0 | 0 |
attr |
LlavaNextVideoProcessor.video_token_id |
1 | 0 | 0 |
transformers.models.llava_onevision.configuration_llava_onevision (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaOnevisionConfig.init |
13 | 0 | 0 |
attr |
LlavaOnevisionConfig.image_token_index |
1 | 0 | 0 |
attr |
LlavaOnevisionConfig.video_token_index |
1 | 0 | 0 |
attr |
LlavaOnevisionConfig.projector_hidden_act |
1 | 0 | 0 |
attr |
LlavaOnevisionConfig.multimodal_projector_bias |
1 | 0 | 0 |
attr |
LlavaOnevisionConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
LlavaOnevisionConfig.vision_feature_select_strategy |
1 | 0 | 0 |
attr |
LlavaOnevisionConfig.vision_feature_layer |
1 | 0 | 0 |
attr |
LlavaOnevisionConfig.vision_aspect_ratio |
1 | 0 | 0 |
attr |
LlavaOnevisionConfig.image_grid_pinpoints |
1 | 0 | 0 |
attr |
LlavaOnevisionConfig.vision_config |
1 | 0 | 0 |
attr |
LlavaOnevisionConfig.text_config |
1 | 0 | 0 |
transformers.models.llava_onevision.image_processing_llava_onevision (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaOnevisionImageProcessor.init |
13 | 12 | 0 |
meth |
LlavaOnevisionImageProcessor._resize_for_patching |
5 | 4 | 0 |
meth |
LlavaOnevisionImageProcessor._get_padding_size |
3 | 2 | 0 |
meth |
LlavaOnevisionImageProcessor.get_image_patches |
8 | 7 | 0 |
meth |
LlavaOnevisionImageProcessor._pad_for_batching |
4 | 3 | 0 |
meth |
LlavaOnevisionImageProcessor.preprocess |
16 | 15 | 0 |
attr |
LlavaOnevisionImageProcessor.do_resize |
1 | 0 | 0 |
attr |
LlavaOnevisionImageProcessor.size |
1 | 0 | 0 |
attr |
LlavaOnevisionImageProcessor.image_grid_pinpoints |
1 | 0 | 0 |
attr |
LlavaOnevisionImageProcessor.resample |
1 | 0 | 0 |
attr |
LlavaOnevisionImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
LlavaOnevisionImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
LlavaOnevisionImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
LlavaOnevisionImageProcessor.image_mean |
1 | 0 | 0 |
attr |
LlavaOnevisionImageProcessor.image_std |
1 | 0 | 0 |
attr |
LlavaOnevisionImageProcessor.do_pad |
1 | 0 | 0 |
attr |
LlavaOnevisionImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.llava_onevision.image_processing_llava_onevision_fast (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaOnevisionImageProcessorFast.init |
2 | 1 | 0 |
meth |
LlavaOnevisionImageProcessorFast._get_padding_size |
3 | 2 | 0 |
meth |
LlavaOnevisionImageProcessorFast._get_image_patches |
6 | 5 | 0 |
meth |
LlavaOnevisionImageProcessorFast._preprocess |
18 | 17 | 0 |
transformers.models.llava_onevision.modeling_llava_onevision (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaOnevisionPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
LlavaOnevisionModel.init |
2 | 0 | 0 |
meth |
LlavaOnevisionModel.get_input_embeddings |
1 | 0 | 0 |
meth |
LlavaOnevisionModel.set_input_embeddings |
2 | 0 | 0 |
meth |
LlavaOnevisionModel.pack_image_features |
5 | 0 | 0 |
meth |
LlavaOnevisionModel.get_placeholder_mask |
5 | 4 | 0 |
meth |
LlavaOnevisionModel.apply_pooling |
2 | 0 | 0 |
attr |
LlavaOnevisionModel.vision_tower |
1 | 0 | 0 |
attr |
LlavaOnevisionModel.multi_modal_projector |
1 | 0 | 0 |
attr |
LlavaOnevisionModel.image_newline |
1 | 0 | 0 |
attr |
LlavaOnevisionModel.vocab_size |
1 | 0 | 0 |
attr |
LlavaOnevisionModel.language_model |
1 | 0 | 0 |
meth |
LlavaOnevisionForConditionalGeneration.init |
2 | 1 | 0 |
meth |
LlavaOnevisionForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
LlavaOnevisionForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
LlavaOnevisionForConditionalGeneration.pack_image_features |
5 | 0 | 0 |
meth |
LlavaOnevisionForConditionalGeneration.prepare_inputs_for_generation |
13 | 0 | 0 |
attr |
LlavaOnevisionForConditionalGeneration.model |
1 | 0 | 0 |
attr |
LlavaOnevisionForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.llava_onevision.modular_llava_onevision (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaOnevisionImageProcessorFast._preprocess |
18 | 17 | 0 |
meth |
LlavaOnevisionModel.init |
2 | 0 | 0 |
meth |
LlavaOnevisionModel.pack_image_features |
5 | 0 | 0 |
meth |
LlavaOnevisionModel.apply_pooling |
2 | 0 | 0 |
meth |
LlavaOnevisionForConditionalGeneration.prepare_inputs_for_generation |
13 | 0 | 0 |
transformers.models.llava_onevision.processing_llava_onevision (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LlavaOnevisionProcessor.init |
11 | 0 | 0 |
meth |
LlavaOnevisionProcessor._expand_image_tokens |
7 | 6 | 0 |
meth |
LlavaOnevisionProcessor._get_unpadded_features |
7 | 0 | 0 |
meth |
LlavaOnevisionProcessor._get_num_multimodal_tokens |
4 | 0 | 0 |
attr |
LlavaOnevisionProcessor.num_image_tokens |
1 | 0 | 0 |
attr |
LlavaOnevisionProcessor.vision_feature_select_strategy |
1 | 0 | 0 |
attr |
LlavaOnevisionProcessor.image_token |
1 | 0 | 0 |
attr |
LlavaOnevisionProcessor.video_token |
1 | 0 | 0 |
attr |
LlavaOnevisionProcessor.image_token_id |
1 | 0 | 0 |
attr |
LlavaOnevisionProcessor.video_token_id |
1 | 0 | 0 |
attr |
LlavaOnevisionProcessor.vision_aspect_ratio |
1 | 0 | 0 |
transformers.models.longcat_flash.configuration_longcat_flash (35 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LongcatFlashConfig.init |
33 | 31 | 0 |
meth |
LongcatFlashConfig.convert_rope_params_to_dict |
3 | 1 | 0 |
attr |
LongcatFlashConfig.vocab_size |
1 | 0 | 0 |
attr |
LongcatFlashConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
LongcatFlashConfig.hidden_size |
1 | 0 | 0 |
attr |
LongcatFlashConfig.num_layers |
1 | 0 | 0 |
attr |
LongcatFlashConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
LongcatFlashConfig.num_attention_heads |
1 | 0 | 0 |
attr |
LongcatFlashConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
LongcatFlashConfig.hidden_act |
1 | 0 | 0 |
attr |
LongcatFlashConfig.initializer_range |
1 | 0 | 0 |
attr |
LongcatFlashConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
LongcatFlashConfig.use_cache |
1 | 0 | 0 |
attr |
LongcatFlashConfig.attention_bias |
1 | 0 | 0 |
attr |
LongcatFlashConfig.attention_dropout |
1 | 0 | 0 |
attr |
LongcatFlashConfig.ffn_hidden_size |
1 | 0 | 0 |
attr |
LongcatFlashConfig.q_lora_rank |
1 | 0 | 0 |
attr |
LongcatFlashConfig.kv_lora_rank |
1 | 0 | 0 |
attr |
LongcatFlashConfig.qk_nope_head_dim |
1 | 0 | 0 |
attr |
LongcatFlashConfig.qk_rope_head_dim |
1 | 0 | 0 |
attr |
LongcatFlashConfig.v_head_dim |
1 | 0 | 0 |
attr |
LongcatFlashConfig.qk_head_dim |
1 | 0 | 0 |
attr |
LongcatFlashConfig.head_dim |
1 | 0 | 0 |
attr |
LongcatFlashConfig.moe_topk |
1 | 0 | 0 |
attr |
LongcatFlashConfig.n_routed_experts |
1 | 0 | 0 |
attr |
LongcatFlashConfig.zero_expert_num |
1 | 0 | 0 |
attr |
LongcatFlashConfig.expert_ffn_hidden_size |
1 | 0 | 0 |
attr |
LongcatFlashConfig.routed_scaling_factor |
1 | 0 | 0 |
attr |
LongcatFlashConfig.rope_parameters |
1 | 0 | 0 |
attr |
LongcatFlashConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
LongcatFlashConfig.pad_token_id |
1 | 0 | 0 |
attr |
LongcatFlashConfig.bos_token_id |
1 | 0 | 0 |
attr |
LongcatFlashConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.longcat_flash.modeling_longcat_flash (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LongcatFlashModel.init |
2 | 0 | 0 |
attr |
LongcatFlashModel.padding_idx |
1 | 0 | 0 |
attr |
LongcatFlashModel.vocab_size |
1 | 0 | 0 |
attr |
LongcatFlashModel.embed_tokens |
1 | 0 | 0 |
attr |
LongcatFlashModel.layers |
1 | 0 | 0 |
attr |
LongcatFlashModel.norm |
1 | 0 | 0 |
attr |
LongcatFlashModel.rotary_emb |
1 | 0 | 0 |
attr |
LongcatFlashModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
LongcatFlashModel.head_dim |
1 | 0 | 0 |
meth |
LongcatFlashForCausalLM.init |
2 | 0 | 0 |
attr |
LongcatFlashForCausalLM.model |
1 | 0 | 0 |
attr |
LongcatFlashForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
LongcatFlashForCausalLM.lm_head |
1 | 0 | 0 |
meth |
LongcatFlashPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.longcat_flash.modular_longcat_flash (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LongcatFlashModel.init |
2 | 0 | 0 |
meth |
LongcatFlashModel.forward |
9 | 8 | 0 |
attr |
LongcatFlashModel.layers |
1 | 0 | 0 |
attr |
LongcatFlashModel.head_dim |
1 | 0 | 0 |
attr |
LongcatFlashModel.norm |
1 | 0 | 0 |
attr |
LongcatFlashModel.rotary_emb |
1 | 0 | 0 |
attr |
LongcatFlashModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
LongcatFlashForCausalLM.init |
2 | 0 | 0 |
attr |
LongcatFlashForCausalLM.model |
1 | 0 | 0 |
meth |
LongcatFlashPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.longformer.configuration_longformer (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LongformerConfig.init |
21 | 18 | 0 |
attr |
LongformerConfig.pad_token_id |
1 | 0 | 0 |
attr |
LongformerConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
LongformerConfig.attention_window |
1 | 0 | 0 |
attr |
LongformerConfig.sep_token_id |
1 | 0 | 0 |
attr |
LongformerConfig.bos_token_id |
1 | 0 | 0 |
attr |
LongformerConfig.eos_token_id |
1 | 0 | 0 |
attr |
LongformerConfig.vocab_size |
1 | 0 | 0 |
attr |
LongformerConfig.hidden_size |
1 | 0 | 0 |
attr |
LongformerConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
LongformerConfig.num_attention_heads |
1 | 0 | 0 |
attr |
LongformerConfig.hidden_act |
1 | 0 | 0 |
attr |
LongformerConfig.intermediate_size |
1 | 0 | 0 |
attr |
LongformerConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
LongformerConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
LongformerConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
LongformerConfig.type_vocab_size |
1 | 0 | 0 |
attr |
LongformerConfig.initializer_range |
1 | 0 | 0 |
attr |
LongformerConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
LongformerConfig.onnx_export |
1 | 0 | 0 |
transformers.models.longformer.modeling_longformer (102 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LongformerForMultipleChoice.init |
2 | 0 | 0 |
meth |
LongformerForMultipleChoice.forward |
12 | 11 | 0 |
attr |
LongformerForMultipleChoice.longformer |
1 | 0 | 0 |
attr |
LongformerForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
LongformerForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
LongformerSelfAttention.init |
3 | 0 | 0 |
meth |
LongformerSelfAttention.forward |
7 | 0 | 0 |
meth |
LongformerSelfAttention._pad_and_transpose_last_two_dims |
3 | 0 | 0 |
meth |
LongformerSelfAttention._pad_and_diagonalize |
2 | 0 | 0 |
meth |
LongformerSelfAttention._chunk |
4 | 1 | 0 |
meth |
LongformerSelfAttention._mask_invalid_locations |
3 | 1 | 0 |
meth |
LongformerSelfAttention._sliding_chunks_query_key_matmul |
4 | 3 | 0 |
meth |
LongformerSelfAttention._sliding_chunks_matmul_attn_probs_value |
4 | 3 | 0 |
meth |
LongformerSelfAttention._get_global_attn_indices |
2 | 0 | 0 |
meth |
LongformerSelfAttention._concat_with_global_key_attn_probs |
7 | 0 | 0 |
meth |
LongformerSelfAttention._compute_attn_output_with_global_indices |
6 | 0 | 0 |
meth |
LongformerSelfAttention._compute_global_attn_output_from_hidden |
7 | 0 | 0 |
attr |
LongformerSelfAttention.num_heads |
1 | 0 | 0 |
attr |
LongformerSelfAttention.head_dim |
1 | 0 | 0 |
attr |
LongformerSelfAttention.embed_dim |
1 | 0 | 0 |
attr |
LongformerSelfAttention.query |
1 | 0 | 0 |
attr |
LongformerSelfAttention.key |
1 | 0 | 0 |
attr |
LongformerSelfAttention.value |
1 | 0 | 0 |
attr |
LongformerSelfAttention.query_global |
1 | 0 | 0 |
attr |
LongformerSelfAttention.key_global |
1 | 0 | 0 |
attr |
LongformerSelfAttention.value_global |
1 | 0 | 0 |
attr |
LongformerSelfAttention.dropout |
1 | 0 | 0 |
attr |
LongformerSelfAttention.layer_id |
1 | 0 | 0 |
attr |
LongformerSelfAttention.one_sided_attn_window_size |
1 | 0 | 0 |
attr |
LongformerSelfAttention.config |
1 | 0 | 0 |
meth |
LongformerForTokenClassification.init |
2 | 0 | 0 |
meth |
LongformerForTokenClassification.forward |
12 | 11 | 0 |
attr |
LongformerForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
LongformerForTokenClassification.longformer |
1 | 0 | 0 |
attr |
LongformerForTokenClassification.dropout |
1 | 0 | 0 |
attr |
LongformerForTokenClassification.classifier |
1 | 0 | 0 |
meth |
LongformerModel.init |
3 | 0 | 0 |
meth |
LongformerModel.get_input_embeddings |
1 | 0 | 0 |
meth |
LongformerModel.set_input_embeddings |
2 | 0 | 0 |
meth |
LongformerModel._pad_to_window_size |
7 | 6 | 0 |
meth |
LongformerModel._merge_to_attention_mask |
3 | 2 | 0 |
meth |
LongformerModel.forward |
11 | 10 | 0 |
attr |
LongformerModel.embeddings |
1 | 0 | 0 |
attr |
LongformerModel.encoder |
1 | 0 | 0 |
attr |
LongformerModel.pooler |
1 | 0 | 0 |
meth |
LongformerForMaskedLM.init |
2 | 0 | 0 |
meth |
LongformerForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
LongformerForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
LongformerForMaskedLM.forward |
12 | 11 | 0 |
attr |
LongformerForMaskedLM.longformer |
1 | 0 | 0 |
attr |
LongformerForMaskedLM.lm_head |
1 | 0 | 0 |
meth |
LongformerForQuestionAnswering.init |
2 | 0 | 0 |
meth |
LongformerForQuestionAnswering.forward |
13 | 12 | 0 |
attr |
LongformerForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
LongformerForQuestionAnswering.longformer |
1 | 0 | 0 |
attr |
LongformerForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
LongformerForSequenceClassification.init |
2 | 0 | 0 |
meth |
LongformerForSequenceClassification.forward |
12 | 11 | 0 |
attr |
LongformerForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
LongformerForSequenceClassification.longformer |
1 | 0 | 0 |
attr |
LongformerForSequenceClassification.classifier |
1 | 0 | 0 |
transformers.models.longt5.configuration_longt5 (49 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LongT5Config.init |
25 | 0 | 0 |
attr |
LongT5Config.is_decoder |
1 | 0 | 0 |
attr |
LongT5Config.vocab_size |
1 | 0 | 0 |
attr |
LongT5Config.d_model |
1 | 0 | 0 |
attr |
LongT5Config.d_kv |
1 | 0 | 0 |
attr |
LongT5Config.d_ff |
1 | 0 | 0 |
attr |
LongT5Config.num_layers |
1 | 0 | 0 |
attr |
LongT5Config.num_decoder_layers |
1 | 0 | 0 |
attr |
LongT5Config.num_heads |
1 | 0 | 0 |
attr |
LongT5Config.local_radius |
1 | 0 | 0 |
attr |
LongT5Config.global_block_size |
1 | 0 | 0 |
attr |
LongT5Config.relative_attention_num_buckets |
1 | 0 | 0 |
attr |
LongT5Config.relative_attention_max_distance |
1 | 0 | 0 |
attr |
LongT5Config.dropout_rate |
1 | 0 | 0 |
attr |
LongT5Config.layer_norm_epsilon |
1 | 0 | 0 |
attr |
LongT5Config.initializer_factor |
1 | 0 | 0 |
attr |
LongT5Config.feed_forward_proj |
1 | 0 | 0 |
attr |
LongT5Config.encoder_attention_type |
1 | 0 | 0 |
attr |
LongT5Config.use_cache |
1 | 0 | 0 |
attr |
LongT5Config.pad_token_id |
1 | 0 | 0 |
attr |
LongT5Config.bos_token_id |
1 | 0 | 0 |
attr |
LongT5Config.eos_token_id |
1 | 0 | 0 |
attr |
LongT5Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
LongT5Config.dense_act_fn |
1 | 0 | 0 |
attr |
LongT5Config.is_gated_act |
1 | 0 | 0 |
transformers.models.longt5.modeling_longt5 (31 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LongT5EncoderModel.init |
2 | 1 | 0 |
meth |
LongT5EncoderModel.get_input_embeddings |
1 | 0 | 0 |
meth |
LongT5EncoderModel.set_input_embeddings |
2 | 0 | 0 |
meth |
LongT5EncoderModel.forward |
8 | 7 | 0 |
attr |
LongT5EncoderModel.shared |
1 | 0 | 0 |
attr |
LongT5EncoderModel.encoder |
1 | 0 | 0 |
meth |
LongT5PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
LongT5PreTrainedModel._shift_right |
2 | 0 | 0 |
prop |
LongT5PreTrainedModel.dummy_inputs |
1 | 0 | 0 |
meth |
LongT5Model.init |
2 | 1 | 0 |
meth |
LongT5Model.get_input_embeddings |
1 | 0 | 0 |
meth |
LongT5Model.set_input_embeddings |
2 | 0 | 0 |
meth |
LongT5Model.forward |
15 | 14 | 0 |
attr |
LongT5Model.shared |
1 | 0 | 0 |
attr |
LongT5Model.encoder |
1 | 0 | 0 |
attr |
LongT5Model.decoder |
1 | 0 | 0 |
meth |
LongT5ForConditionalGeneration.init |
2 | 1 | 0 |
meth |
LongT5ForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
LongT5ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
LongT5ForConditionalGeneration.forward |
16 | 15 | 0 |
meth |
LongT5ForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
LongT5ForConditionalGeneration.model_dim |
1 | 0 | 0 |
attr |
LongT5ForConditionalGeneration.shared |
1 | 0 | 0 |
attr |
LongT5ForConditionalGeneration.encoder |
1 | 0 | 0 |
attr |
LongT5ForConditionalGeneration.decoder |
1 | 0 | 0 |
attr |
LongT5ForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.luke.configuration_luke (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LukeConfig.init |
22 | 0 | 0 |
attr |
LukeConfig.pad_token_id |
1 | 0 | 0 |
attr |
LukeConfig.bos_token_id |
1 | 0 | 0 |
attr |
LukeConfig.eos_token_id |
1 | 0 | 0 |
attr |
LukeConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
LukeConfig.vocab_size |
1 | 0 | 0 |
attr |
LukeConfig.entity_vocab_size |
1 | 0 | 0 |
attr |
LukeConfig.hidden_size |
1 | 0 | 0 |
attr |
LukeConfig.entity_emb_size |
1 | 0 | 0 |
attr |
LukeConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
LukeConfig.num_attention_heads |
1 | 0 | 0 |
attr |
LukeConfig.hidden_act |
1 | 0 | 0 |
attr |
LukeConfig.intermediate_size |
1 | 0 | 0 |
attr |
LukeConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
LukeConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
LukeConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
LukeConfig.type_vocab_size |
1 | 0 | 0 |
attr |
LukeConfig.initializer_range |
1 | 0 | 0 |
attr |
LukeConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
LukeConfig.use_entity_aware_attention |
1 | 0 | 0 |
attr |
LukeConfig.classifier_dropout |
1 | 0 | 0 |
transformers.models.luke.modeling_luke (71 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LukeForEntitySpanClassification.init |
2 | 0 | 0 |
meth |
LukeForEntitySpanClassification.forward |
17 | 16 | 0 |
attr |
LukeForEntitySpanClassification.luke |
1 | 0 | 0 |
attr |
LukeForEntitySpanClassification.num_labels |
1 | 0 | 0 |
attr |
LukeForEntitySpanClassification.dropout |
1 | 0 | 0 |
attr |
LukeForEntitySpanClassification.classifier |
1 | 0 | 0 |
meth |
LukeModel.init |
3 | 2 | 0 |
meth |
LukeModel.get_input_embeddings |
1 | 0 | 0 |
meth |
LukeModel.set_input_embeddings |
2 | 0 | 0 |
meth |
LukeModel.get_entity_embeddings |
1 | 0 | 0 |
meth |
LukeModel.set_entity_embeddings |
2 | 0 | 0 |
meth |
LukeModel.forward |
14 | 13 | 0 |
meth |
LukeModel.get_extended_attention_mask |
3 | 2 | 0 |
attr |
LukeModel.embeddings |
1 | 0 | 0 |
attr |
LukeModel.entity_embeddings |
1 | 0 | 0 |
attr |
LukeModel.encoder |
1 | 0 | 0 |
attr |
LukeModel.pooler |
1 | 0 | 0 |
meth |
LukeForEntityPairClassification.init |
2 | 0 | 0 |
meth |
LukeForEntityPairClassification.forward |
15 | 14 | 0 |
attr |
LukeForEntityPairClassification.luke |
1 | 0 | 0 |
attr |
LukeForEntityPairClassification.num_labels |
1 | 0 | 0 |
attr |
LukeForEntityPairClassification.dropout |
1 | 0 | 0 |
attr |
LukeForEntityPairClassification.classifier |
1 | 0 | 0 |
meth |
LukeForTokenClassification.init |
2 | 0 | 0 |
meth |
LukeForTokenClassification.forward |
15 | 14 | 0 |
attr |
LukeForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
LukeForTokenClassification.luke |
1 | 0 | 0 |
attr |
LukeForTokenClassification.dropout |
1 | 0 | 0 |
attr |
LukeForTokenClassification.classifier |
1 | 0 | 0 |
meth |
LukeForQuestionAnswering.init |
2 | 0 | 0 |
meth |
LukeForQuestionAnswering.forward |
16 | 15 | 0 |
attr |
LukeForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
LukeForQuestionAnswering.luke |
1 | 0 | 0 |
attr |
LukeForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
LukeForSequenceClassification.init |
2 | 0 | 0 |
meth |
LukeForSequenceClassification.forward |
15 | 14 | 0 |
attr |
LukeForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
LukeForSequenceClassification.luke |
1 | 0 | 0 |
attr |
LukeForSequenceClassification.dropout |
1 | 0 | 0 |
attr |
LukeForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
LukeForMaskedLM.init |
2 | 0 | 0 |
meth |
LukeForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
LukeForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
LukeForMaskedLM.forward |
16 | 15 | 0 |
attr |
LukeForMaskedLM.luke |
1 | 0 | 0 |
attr |
LukeForMaskedLM.lm_head |
1 | 0 | 0 |
attr |
LukeForMaskedLM.entity_predictions |
1 | 0 | 0 |
attr |
LukeForMaskedLM.loss_fn |
1 | 0 | 0 |
meth |
LukeForMultipleChoice.init |
2 | 0 | 0 |
meth |
LukeForMultipleChoice.forward |
15 | 14 | 0 |
attr |
LukeForMultipleChoice.luke |
1 | 0 | 0 |
attr |
LukeForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
LukeForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
LukeForEntityClassification.init |
2 | 0 | 0 |
meth |
LukeForEntityClassification.forward |
15 | 14 | 0 |
attr |
LukeForEntityClassification.luke |
1 | 0 | 0 |
attr |
LukeForEntityClassification.num_labels |
1 | 0 | 0 |
attr |
LukeForEntityClassification.dropout |
1 | 0 | 0 |
attr |
LukeForEntityClassification.classifier |
1 | 0 | 0 |
meth |
LukePreTrainedModel._init_weights |
2 | 1 | 0 |
transformers.models.luke.tokenization_luke (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LukeTokenizer.init |
23 | 3 | 0 |
meth |
LukeTokenizer._decode |
5 | 4 | 0 |
meth |
LukeTokenizer.call |
25 | 24 | 0 |
meth |
LukeTokenizer._encode_plus |
25 | 24 | 0 |
meth |
LukeTokenizer._batch_encode_plus |
22 | 21 | 0 |
meth |
LukeTokenizer._check_entity_input_format |
3 | 2 | 0 |
meth |
LukeTokenizer._create_input_sequence |
8 | 7 | 0 |
meth |
LukeTokenizer.prepare_for_model |
25 | 24 | 0 |
attr |
LukeTokenizer.add_prefix_space |
1 | 0 | 0 |
attr |
LukeTokenizer.entity_unk_token_id |
1 | 0 | 0 |
attr |
LukeTokenizer.entity_pad_token_id |
1 | 0 | 0 |
attr |
LukeTokenizer.entity_mask_token_id |
1 | 0 | 0 |
attr |
LukeTokenizer.entity_mask2_token_id |
1 | 0 | 0 |
attr |
LukeTokenizer.task |
1 | 0 | 0 |
attr |
LukeTokenizer.max_mention_length |
1 | 0 | 0 |
attr |
LukeTokenizer.token_type_ids_pattern |
1 | 0 | 0 |
attr |
LukeTokenizer.special_tokens_pattern |
1 | 0 | 0 |
attr |
LukeTokenizer.token_type_ids_include_special_tokens |
1 | 0 | 0 |
attr |
LukeTokenizer.entity_vocab |
1 | 0 | 0 |
attr |
LukeTokenizer.max_entity_length |
1 | 0 | 0 |
transformers.models.lw_detr.configuration_lw_detr (100 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LwDetrViTConfig.init |
21 | 1 | 0 |
attr |
LwDetrViTConfig.hidden_size |
1 | 0 | 0 |
attr |
LwDetrViTConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
LwDetrViTConfig.num_attention_heads |
1 | 0 | 0 |
attr |
LwDetrViTConfig.mlp_ratio |
1 | 0 | 0 |
attr |
LwDetrViTConfig.hidden_act |
1 | 0 | 0 |
attr |
LwDetrViTConfig.dropout_prob |
1 | 0 | 0 |
attr |
LwDetrViTConfig.initializer_range |
1 | 0 | 0 |
attr |
LwDetrViTConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
LwDetrViTConfig.image_size |
1 | 0 | 0 |
attr |
LwDetrViTConfig.pretrain_image_size |
1 | 0 | 0 |
attr |
LwDetrViTConfig.patch_size |
1 | 0 | 0 |
attr |
LwDetrViTConfig.num_channels |
1 | 0 | 0 |
attr |
LwDetrViTConfig.qkv_bias |
1 | 0 | 0 |
attr |
LwDetrViTConfig.window_block_indices |
1 | 0 | 0 |
attr |
LwDetrViTConfig.use_absolute_position_embeddings |
1 | 0 | 0 |
attr |
LwDetrViTConfig.stage_names |
1 | 0 | 0 |
attr |
LwDetrViTConfig.cae_init_values |
1 | 0 | 0 |
attr |
LwDetrViTConfig.num_windows |
1 | 0 | 0 |
attr |
LwDetrViTConfig.num_windows_side |
1 | 0 | 0 |
meth |
LwDetrConfig.init |
33 | 5 | 0 |
attr |
LwDetrConfig.batch_norm_eps |
1 | 0 | 0 |
attr |
LwDetrConfig.backbone_config |
1 | 0 | 0 |
attr |
LwDetrConfig.projector_scale_factors |
1 | 0 | 0 |
attr |
LwDetrConfig.projector_in_channels |
1 | 0 | 0 |
attr |
LwDetrConfig.projector_out_channels |
1 | 0 | 0 |
attr |
LwDetrConfig.activation_function |
1 | 0 | 0 |
attr |
LwDetrConfig.hidden_expansion |
1 | 0 | 0 |
attr |
LwDetrConfig.c2f_num_blocks |
1 | 0 | 0 |
attr |
LwDetrConfig.d_model |
1 | 0 | 0 |
attr |
LwDetrConfig.dropout |
1 | 0 | 0 |
attr |
LwDetrConfig.num_queries |
1 | 0 | 0 |
attr |
LwDetrConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
LwDetrConfig.num_feature_levels |
1 | 0 | 0 |
attr |
LwDetrConfig.decoder_n_points |
1 | 0 | 0 |
attr |
LwDetrConfig.decoder_layers |
1 | 0 | 0 |
attr |
LwDetrConfig.decoder_activation_function |
1 | 0 | 0 |
attr |
LwDetrConfig.decoder_self_attention_heads |
1 | 0 | 0 |
attr |
LwDetrConfig.decoder_cross_attention_heads |
1 | 0 | 0 |
attr |
LwDetrConfig.attention_bias |
1 | 0 | 0 |
attr |
LwDetrConfig.attention_dropout |
1 | 0 | 0 |
attr |
LwDetrConfig.activation_dropout |
1 | 0 | 0 |
attr |
LwDetrConfig.init_std |
1 | 0 | 0 |
attr |
LwDetrConfig.group_detr |
1 | 0 | 0 |
attr |
LwDetrConfig.auxiliary_loss |
1 | 0 | 0 |
attr |
LwDetrConfig.class_cost |
1 | 0 | 0 |
attr |
LwDetrConfig.bbox_cost |
1 | 0 | 0 |
attr |
LwDetrConfig.giou_cost |
1 | 0 | 0 |
attr |
LwDetrConfig.dice_loss_coefficient |
1 | 0 | 0 |
attr |
LwDetrConfig.bbox_loss_coefficient |
1 | 0 | 0 |
attr |
LwDetrConfig.giou_loss_coefficient |
1 | 0 | 0 |
attr |
LwDetrConfig.eos_coefficient |
1 | 0 | 0 |
attr |
LwDetrConfig.focal_alpha |
1 | 0 | 0 |
attr |
LwDetrConfig.disable_custom_kernels |
1 | 0 | 0 |
transformers.models.lw_detr.modeling_lw_detr (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LwDetrModel.init |
2 | 1 | 0 |
meth |
LwDetrModel.freeze_backbone |
1 | 0 | 0 |
meth |
LwDetrModel.unfreeze_backbone |
1 | 0 | 0 |
meth |
LwDetrModel.get_valid_ratio |
3 | 0 | 0 |
meth |
LwDetrModel.get_proposal_pos_embed |
2 | 0 | 0 |
meth |
LwDetrModel.gen_encoder_output_proposals |
4 | 0 | 0 |
attr |
LwDetrModel.backbone |
1 | 0 | 0 |
attr |
LwDetrModel.group_detr |
1 | 0 | 0 |
attr |
LwDetrModel.num_queries |
1 | 0 | 0 |
attr |
LwDetrModel.reference_point_embed |
1 | 0 | 0 |
attr |
LwDetrModel.query_feat |
1 | 0 | 0 |
attr |
LwDetrModel.decoder |
1 | 0 | 0 |
attr |
LwDetrModel.enc_output |
1 | 0 | 0 |
attr |
LwDetrModel.enc_output_norm |
1 | 0 | 0 |
attr |
LwDetrModel.enc_out_bbox_embed |
1 | 0 | 0 |
attr |
LwDetrModel.enc_out_class_embed |
1 | 0 | 0 |
meth |
LwDetrViTBackbone.init |
2 | 0 | 0 |
attr |
LwDetrViTBackbone.embeddings |
1 | 0 | 0 |
attr |
LwDetrViTBackbone.encoder |
1 | 0 | 0 |
attr |
LwDetrViTBackbone.num_features |
1 | 0 | 0 |
meth |
LwDetrPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
LwDetrForObjectDetection.init |
2 | 1 | 0 |
attr |
LwDetrForObjectDetection.model |
1 | 0 | 0 |
attr |
LwDetrForObjectDetection.class_embed |
1 | 0 | 0 |
attr |
LwDetrForObjectDetection.bbox_embed |
1 | 0 | 0 |
meth |
LwDetrViTPreTrainedModel._init_weights |
2 | 1 | 0 |
transformers.models.lw_detr.modular_lw_detr (106 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LwDetrModel.init |
2 | 1 | 0 |
meth |
LwDetrModel.gen_encoder_output_proposals |
4 | 0 | 0 |
attr |
LwDetrModel.backbone |
1 | 0 | 0 |
attr |
LwDetrModel.group_detr |
1 | 0 | 0 |
attr |
LwDetrModel.num_queries |
1 | 0 | 0 |
attr |
LwDetrModel.reference_point_embed |
1 | 0 | 0 |
attr |
LwDetrModel.query_feat |
1 | 0 | 0 |
attr |
LwDetrModel.decoder |
1 | 0 | 0 |
attr |
LwDetrModel.enc_output |
1 | 0 | 0 |
attr |
LwDetrModel.enc_output_norm |
1 | 0 | 0 |
attr |
LwDetrModel.enc_out_bbox_embed |
1 | 0 | 0 |
attr |
LwDetrModel.enc_out_class_embed |
1 | 0 | 0 |
meth |
LwDetrPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
LwDetrConfig.init |
33 | 5 | 0 |
attr |
LwDetrConfig.batch_norm_eps |
1 | 0 | 0 |
attr |
LwDetrConfig.backbone_config |
1 | 0 | 0 |
attr |
LwDetrConfig.projector_scale_factors |
1 | 0 | 0 |
attr |
LwDetrConfig.projector_in_channels |
1 | 0 | 0 |
attr |
LwDetrConfig.projector_out_channels |
1 | 0 | 0 |
attr |
LwDetrConfig.activation_function |
1 | 0 | 0 |
attr |
LwDetrConfig.hidden_expansion |
1 | 0 | 0 |
attr |
LwDetrConfig.c2f_num_blocks |
1 | 0 | 0 |
attr |
LwDetrConfig.d_model |
1 | 0 | 0 |
attr |
LwDetrConfig.dropout |
1 | 0 | 0 |
attr |
LwDetrConfig.num_queries |
1 | 0 | 0 |
attr |
LwDetrConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
LwDetrConfig.num_feature_levels |
1 | 0 | 0 |
attr |
LwDetrConfig.decoder_n_points |
1 | 0 | 0 |
attr |
LwDetrConfig.decoder_layers |
1 | 0 | 0 |
attr |
LwDetrConfig.decoder_activation_function |
1 | 0 | 0 |
attr |
LwDetrConfig.decoder_self_attention_heads |
1 | 0 | 0 |
attr |
LwDetrConfig.decoder_cross_attention_heads |
1 | 0 | 0 |
attr |
LwDetrConfig.attention_bias |
1 | 0 | 0 |
attr |
LwDetrConfig.attention_dropout |
1 | 0 | 0 |
attr |
LwDetrConfig.activation_dropout |
1 | 0 | 0 |
attr |
LwDetrConfig.init_std |
1 | 0 | 0 |
attr |
LwDetrConfig.group_detr |
1 | 0 | 0 |
attr |
LwDetrConfig.auxiliary_loss |
1 | 0 | 0 |
attr |
LwDetrConfig.class_cost |
1 | 0 | 0 |
attr |
LwDetrConfig.bbox_cost |
1 | 0 | 0 |
attr |
LwDetrConfig.giou_cost |
1 | 0 | 0 |
attr |
LwDetrConfig.dice_loss_coefficient |
1 | 0 | 0 |
attr |
LwDetrConfig.bbox_loss_coefficient |
1 | 0 | 0 |
attr |
LwDetrConfig.giou_loss_coefficient |
1 | 0 | 0 |
attr |
LwDetrConfig.eos_coefficient |
1 | 0 | 0 |
attr |
LwDetrConfig.focal_alpha |
1 | 0 | 0 |
attr |
LwDetrConfig.disable_custom_kernels |
1 | 0 | 0 |
meth |
LwDetrForObjectDetection.init |
2 | 1 | 0 |
attr |
LwDetrForObjectDetection.model |
1 | 0 | 0 |
attr |
LwDetrForObjectDetection.class_embed |
1 | 0 | 0 |
attr |
LwDetrForObjectDetection.bbox_embed |
1 | 0 | 0 |
meth |
LwDetrViTConfig.init |
21 | 1 | 0 |
attr |
LwDetrViTConfig.cae_init_values |
1 | 0 | 0 |
attr |
LwDetrViTConfig.num_windows |
1 | 0 | 0 |
attr |
LwDetrViTConfig.num_windows_side |
1 | 0 | 0 |
meth |
LwDetrViTPreTrainedModel._init_weights |
2 | 1 | 0 |
transformers.models.lxmert.configuration_lxmert (63 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LxmertConfig.init |
32 | 0 | 0 |
attr |
LxmertConfig.vocab_size |
1 | 0 | 0 |
attr |
LxmertConfig.hidden_size |
1 | 0 | 0 |
attr |
LxmertConfig.num_attention_heads |
1 | 0 | 0 |
attr |
LxmertConfig.hidden_act |
1 | 0 | 0 |
attr |
LxmertConfig.intermediate_size |
1 | 0 | 0 |
attr |
LxmertConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
LxmertConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
LxmertConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
LxmertConfig.type_vocab_size |
1 | 0 | 0 |
attr |
LxmertConfig.initializer_range |
1 | 0 | 0 |
attr |
LxmertConfig.num_qa_labels |
1 | 0 | 0 |
attr |
LxmertConfig.num_object_labels |
1 | 0 | 0 |
attr |
LxmertConfig.num_attr_labels |
1 | 0 | 0 |
attr |
LxmertConfig.l_layers |
1 | 0 | 0 |
attr |
LxmertConfig.x_layers |
1 | 0 | 0 |
attr |
LxmertConfig.r_layers |
1 | 0 | 0 |
attr |
LxmertConfig.visual_feat_dim |
1 | 0 | 0 |
attr |
LxmertConfig.visual_pos_dim |
1 | 0 | 0 |
attr |
LxmertConfig.visual_loss_normalizer |
1 | 0 | 0 |
attr |
LxmertConfig.task_matched |
1 | 0 | 0 |
attr |
LxmertConfig.task_mask_lm |
1 | 0 | 0 |
attr |
LxmertConfig.task_obj_predict |
1 | 0 | 0 |
attr |
LxmertConfig.task_qa |
1 | 0 | 0 |
attr |
LxmertConfig.visual_obj_loss |
1 | 0 | 0 |
attr |
LxmertConfig.visual_attr_loss |
1 | 0 | 0 |
attr |
LxmertConfig.visual_feat_loss |
1 | 0 | 0 |
attr |
LxmertConfig.pad_token_id |
1 | 0 | 0 |
attr |
LxmertConfig.bos_token_id |
1 | 0 | 0 |
attr |
LxmertConfig.eos_token_id |
1 | 0 | 0 |
attr |
LxmertConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
LxmertConfig.num_hidden_layers |
1 | 0 | 0 |
transformers.models.lxmert.modeling_lxmert (110 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LxmertXLayer.init |
2 | 0 | 0 |
meth |
LxmertXLayer.cross_att |
6 | 0 | 0 |
meth |
LxmertXLayer.self_att |
5 | 0 | 0 |
meth |
LxmertXLayer.output_fc |
3 | 0 | 0 |
meth |
LxmertXLayer.forward |
6 | 0 | 0 |
attr |
LxmertXLayer.visual_attention |
1 | 0 | 0 |
attr |
LxmertXLayer.lang_self_att |
1 | 0 | 0 |
attr |
LxmertXLayer.visn_self_att |
1 | 0 | 0 |
attr |
LxmertXLayer.lang_inter |
1 | 0 | 0 |
attr |
LxmertXLayer.lang_output |
1 | 0 | 0 |
attr |
LxmertXLayer.visn_inter |
1 | 0 | 0 |
attr |
LxmertXLayer.visn_output |
1 | 0 | 0 |
meth |
LxmertForPreTraining.init |
2 | 0 | 0 |
meth |
LxmertForPreTraining._resize_bias |
3 | 1 | 0 |
meth |
LxmertForPreTraining.resize_num_qa_labels |
2 | 0 | 0 |
meth |
LxmertForPreTraining._resize_qa_labels |
2 | 0 | 0 |
meth |
LxmertForPreTraining._set_qa_logit_layer |
2 | 0 | 0 |
meth |
LxmertForPreTraining._get_resized_qa_labels |
3 | 0 | 0 |
meth |
LxmertForPreTraining.forward |
16 | 15 | 0 |
attr |
LxmertForPreTraining.num_qa_labels |
1 | 0 | 0 |
attr |
LxmertForPreTraining.visual_loss_normalizer |
1 | 0 | 0 |
attr |
LxmertForPreTraining.task_mask_lm |
1 | 0 | 0 |
attr |
LxmertForPreTraining.task_obj_predict |
1 | 0 | 0 |
attr |
LxmertForPreTraining.task_matched |
1 | 0 | 0 |
attr |
LxmertForPreTraining.task_qa |
1 | 0 | 0 |
attr |
LxmertForPreTraining.lxmert |
1 | 0 | 0 |
attr |
LxmertForPreTraining.cls |
1 | 0 | 0 |
attr |
LxmertForPreTraining.loss_fcts |
1 | 0 | 0 |
attr |
LxmertForPreTraining.visual_losses |
1 | 0 | 0 |
attr |
LxmertForPreTraining.obj_predict_head |
1 | 0 | 0 |
attr |
LxmertForPreTraining.answer_head |
1 | 0 | 0 |
meth |
LxmertModel.init |
2 | 0 | 0 |
meth |
LxmertModel.get_input_embeddings |
1 | 0 | 0 |
meth |
LxmertModel.set_input_embeddings |
2 | 0 | 0 |
meth |
LxmertModel.forward |
12 | 11 | 0 |
attr |
LxmertModel.embeddings |
1 | 0 | 0 |
attr |
LxmertModel.encoder |
1 | 0 | 0 |
attr |
LxmertModel.pooler |
1 | 0 | 0 |
meth |
LxmertPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
LxmertVisualFeatureEncoder.init |
2 | 0 | 0 |
meth |
LxmertVisualFeatureEncoder.forward |
3 | 0 | 0 |
attr |
LxmertVisualFeatureEncoder.visn_fc |
1 | 0 | 0 |
attr |
LxmertVisualFeatureEncoder.visn_layer_norm |
1 | 0 | 0 |
attr |
LxmertVisualFeatureEncoder.box_fc |
1 | 0 | 0 |
attr |
LxmertVisualFeatureEncoder.box_layer_norm |
1 | 0 | 0 |
attr |
LxmertVisualFeatureEncoder.dropout |
1 | 0 | 0 |
meth |
LxmertForQuestionAnswering.init |
2 | 0 | 0 |
meth |
LxmertForQuestionAnswering.resize_num_qa_labels |
2 | 0 | 0 |
meth |
LxmertForQuestionAnswering._resize_qa_labels |
2 | 0 | 0 |
meth |
LxmertForQuestionAnswering._set_qa_logit_layer |
2 | 0 | 0 |
meth |
LxmertForQuestionAnswering._get_resized_qa_labels |
3 | 0 | 0 |
meth |
LxmertForQuestionAnswering.forward |
13 | 12 | 0 |
attr |
LxmertForQuestionAnswering.num_qa_labels |
1 | 0 | 0 |
attr |
LxmertForQuestionAnswering.visual_loss_normalizer |
1 | 0 | 0 |
attr |
LxmertForQuestionAnswering.lxmert |
1 | 0 | 0 |
attr |
LxmertForQuestionAnswering.answer_head |
1 | 0 | 0 |
attr |
LxmertForQuestionAnswering.loss |
1 | 0 | 0 |
meth |
LxmertEncoder.init |
2 | 0 | 0 |
meth |
LxmertEncoder.forward |
7 | 0 | 0 |
attr |
LxmertEncoder.visn_fc |
1 | 0 | 0 |
attr |
LxmertEncoder.config |
1 | 0 | 0 |
attr |
LxmertEncoder.num_l_layers |
1 | 0 | 0 |
attr |
LxmertEncoder.num_x_layers |
1 | 0 | 0 |
attr |
LxmertEncoder.num_r_layers |
1 | 0 | 0 |
attr |
LxmertEncoder.layer |
1 | 0 | 0 |
attr |
LxmertEncoder.x_layers |
1 | 0 | 0 |
attr |
LxmertEncoder.r_layers |
1 | 0 | 0 |
transformers.models.m2m_100.configuration_m2m_100 (50 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
M2M100Config.init |
26 | 0 | 0 |
attr |
M2M100Config.vocab_size |
1 | 0 | 0 |
attr |
M2M100Config.max_position_embeddings |
1 | 0 | 0 |
attr |
M2M100Config.d_model |
1 | 0 | 0 |
attr |
M2M100Config.encoder_ffn_dim |
1 | 0 | 0 |
attr |
M2M100Config.encoder_layers |
1 | 0 | 0 |
attr |
M2M100Config.encoder_attention_heads |
1 | 0 | 0 |
attr |
M2M100Config.decoder_ffn_dim |
1 | 0 | 0 |
attr |
M2M100Config.decoder_layers |
1 | 0 | 0 |
attr |
M2M100Config.decoder_attention_heads |
1 | 0 | 0 |
attr |
M2M100Config.dropout |
1 | 0 | 0 |
attr |
M2M100Config.attention_dropout |
1 | 0 | 0 |
attr |
M2M100Config.activation_dropout |
1 | 0 | 0 |
attr |
M2M100Config.activation_function |
1 | 0 | 0 |
attr |
M2M100Config.init_std |
1 | 0 | 0 |
attr |
M2M100Config.encoder_layerdrop |
1 | 0 | 0 |
attr |
M2M100Config.decoder_layerdrop |
1 | 0 | 0 |
attr |
M2M100Config.use_cache |
1 | 0 | 0 |
attr |
M2M100Config.num_hidden_layers |
1 | 0 | 0 |
attr |
M2M100Config.scale_embedding |
1 | 0 | 0 |
attr |
M2M100Config.pad_token_id |
1 | 0 | 0 |
attr |
M2M100Config.bos_token_id |
1 | 0 | 0 |
attr |
M2M100Config.eos_token_id |
1 | 0 | 0 |
attr |
M2M100Config.decoder_start_token_id |
1 | 0 | 0 |
attr |
M2M100Config.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.m2m_100.modeling_m2m_100 (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
M2M100PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
M2M100ForConditionalGeneration.init |
2 | 1 | 0 |
meth |
M2M100ForConditionalGeneration.forward |
16 | 15 | 0 |
attr |
M2M100ForConditionalGeneration.model |
1 | 0 | 0 |
attr |
M2M100ForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
M2M100Model.init |
2 | 1 | 0 |
meth |
M2M100Model.get_input_embeddings |
1 | 0 | 0 |
meth |
M2M100Model.set_input_embeddings |
2 | 0 | 0 |
meth |
M2M100Model.forward |
15 | 14 | 0 |
attr |
M2M100Model.shared |
1 | 0 | 0 |
attr |
M2M100Model.encoder |
1 | 0 | 0 |
attr |
M2M100Model.decoder |
1 | 0 | 0 |
transformers.models.m2m_100.tokenization_m2m_100 (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
M2M100Tokenizer.init |
14 | 2 | 0 |
meth |
M2M100Tokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
M2M100Tokenizer.convert_tokens_to_string |
2 | 0 | 0 |
meth |
M2M100Tokenizer.prepare_seq2seq_batch |
6 | 5 | 0 |
meth |
M2M100Tokenizer._build_translation_inputs |
5 | 2 | 0 |
meth |
M2M100Tokenizer._switch_to_input_mode |
1 | 0 | 0 |
meth |
M2M100Tokenizer._switch_to_target_mode |
1 | 0 | 0 |
attr |
M2M100Tokenizer.sp_model_kwargs |
1 | 0 | 0 |
attr |
M2M100Tokenizer.language_codes |
1 | 0 | 0 |
attr |
M2M100Tokenizer.lang_code_to_token |
1 | 0 | 0 |
attr |
M2M100Tokenizer.vocab_file |
1 | 0 | 0 |
attr |
M2M100Tokenizer.encoder |
1 | 0 | 0 |
attr |
M2M100Tokenizer.decoder |
1 | 0 | 0 |
attr |
M2M100Tokenizer.spm_file |
1 | 0 | 0 |
attr |
M2M100Tokenizer.sp_model |
1 | 0 | 0 |
attr |
M2M100Tokenizer.encoder_size |
1 | 0 | 0 |
attr |
M2M100Tokenizer.lang_token_to_id |
1 | 0 | 0 |
attr |
M2M100Tokenizer.lang_code_to_id |
1 | 0 | 0 |
attr |
M2M100Tokenizer.id_to_lang_token |
1 | 0 | 0 |
attr |
M2M100Tokenizer.tgt_lang |
1 | 0 | 0 |
attr |
M2M100Tokenizer.cur_lang_id |
1 | 0 | 0 |
attr |
M2M100Tokenizer.num_madeup_words |
1 | 0 | 0 |
transformers.models.mamba.configuration_mamba (55 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MambaConfig.init |
28 | 0 | 0 |
attr |
MambaConfig.vocab_size |
1 | 0 | 0 |
attr |
MambaConfig.hidden_size |
1 | 0 | 0 |
attr |
MambaConfig.state_size |
1 | 0 | 0 |
attr |
MambaConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MambaConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
MambaConfig.conv_kernel |
1 | 0 | 0 |
attr |
MambaConfig.expand |
1 | 0 | 0 |
attr |
MambaConfig.intermediate_size |
1 | 0 | 0 |
attr |
MambaConfig.bos_token_id |
1 | 0 | 0 |
attr |
MambaConfig.eos_token_id |
1 | 0 | 0 |
attr |
MambaConfig.pad_token_id |
1 | 0 | 0 |
attr |
MambaConfig.use_bias |
1 | 0 | 0 |
attr |
MambaConfig.use_conv_bias |
1 | 0 | 0 |
attr |
MambaConfig.hidden_act |
1 | 0 | 0 |
attr |
MambaConfig.initializer_range |
1 | 0 | 0 |
attr |
MambaConfig.time_step_rank |
1 | 0 | 0 |
attr |
MambaConfig.time_step_scale |
1 | 0 | 0 |
attr |
MambaConfig.time_step_min |
1 | 0 | 0 |
attr |
MambaConfig.time_step_max |
1 | 0 | 0 |
attr |
MambaConfig.time_step_init_scheme |
1 | 0 | 0 |
attr |
MambaConfig.time_step_floor |
1 | 0 | 0 |
attr |
MambaConfig.rescale_prenorm_residual |
1 | 0 | 0 |
attr |
MambaConfig.residual_in_fp32 |
1 | 0 | 0 |
attr |
MambaConfig.use_cache |
1 | 0 | 0 |
attr |
MambaConfig.use_mambapy |
1 | 0 | 0 |
attr |
MambaConfig.use_associative_scan |
1 | 0 | 0 |
attr |
MambaConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.mamba.modeling_mamba (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MambaCache.init |
5 | 4 | 0 |
meth |
MambaCache.update_ssm_state |
3 | 2 | 0 |
meth |
MambaCache.reset |
1 | 0 | 0 |
attr |
MambaCache.max_batch_size |
1 | 0 | 0 |
attr |
MambaCache.intermediate_size |
1 | 0 | 0 |
attr |
MambaCache.ssm_state_size |
1 | 0 | 0 |
attr |
MambaCache.conv_kernel_size |
1 | 0 | 0 |
meth |
MambaPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
MambaForCausalLM.init |
2 | 0 | 0 |
meth |
MambaForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
MambaForCausalLM.set_input_embeddings |
2 | 0 | 0 |
meth |
MambaForCausalLM._update_model_kwargs_for_generation |
5 | 4 | 0 |
meth |
MambaForCausalLM.prepare_inputs_for_generation |
9 | 4 | 0 |
meth |
MambaForCausalLM.forward |
12 | 11 | 0 |
attr |
MambaForCausalLM.backbone |
1 | 0 | 0 |
attr |
MambaForCausalLM.lm_head |
1 | 0 | 0 |
meth |
MambaModel.init |
2 | 0 | 0 |
meth |
MambaModel.load_hook |
4 | 0 | 0 |
meth |
MambaModel.get_input_embeddings |
1 | 0 | 0 |
meth |
MambaModel.set_input_embeddings |
2 | 0 | 0 |
meth |
MambaModel.forward |
10 | 9 | 0 |
attr |
MambaModel.embeddings |
1 | 0 | 0 |
attr |
MambaModel.layers |
1 | 0 | 0 |
attr |
MambaModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
MambaModel.norm_f |
1 | 0 | 0 |
transformers.models.mamba2.configuration_mamba2 (58 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Mamba2Config.init |
30 | 0 | 0 |
attr |
Mamba2Config.vocab_size |
1 | 0 | 0 |
attr |
Mamba2Config.hidden_size |
1 | 0 | 0 |
attr |
Mamba2Config.state_size |
1 | 0 | 0 |
attr |
Mamba2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Mamba2Config.layer_norm_epsilon |
1 | 0 | 0 |
attr |
Mamba2Config.conv_kernel |
1 | 0 | 0 |
attr |
Mamba2Config.expand |
1 | 0 | 0 |
attr |
Mamba2Config.bos_token_id |
1 | 0 | 0 |
attr |
Mamba2Config.eos_token_id |
1 | 0 | 0 |
attr |
Mamba2Config.pad_token_id |
1 | 0 | 0 |
attr |
Mamba2Config.use_bias |
1 | 0 | 0 |
attr |
Mamba2Config.use_conv_bias |
1 | 0 | 0 |
attr |
Mamba2Config.hidden_act |
1 | 0 | 0 |
attr |
Mamba2Config.initializer_range |
1 | 0 | 0 |
attr |
Mamba2Config.time_step_rank |
1 | 0 | 0 |
attr |
Mamba2Config.time_step_min |
1 | 0 | 0 |
attr |
Mamba2Config.time_step_max |
1 | 0 | 0 |
attr |
Mamba2Config.time_step_floor |
1 | 0 | 0 |
attr |
Mamba2Config.rescale_prenorm_residual |
1 | 0 | 0 |
attr |
Mamba2Config.residual_in_fp32 |
1 | 0 | 0 |
attr |
Mamba2Config.use_cache |
1 | 0 | 0 |
attr |
Mamba2Config.n_groups |
1 | 0 | 0 |
attr |
Mamba2Config.num_heads |
1 | 0 | 0 |
attr |
Mamba2Config.head_dim |
1 | 0 | 0 |
attr |
Mamba2Config.rms_norm |
1 | 0 | 0 |
attr |
Mamba2Config.chunk_size |
1 | 0 | 0 |
attr |
Mamba2Config.time_step_limit |
1 | 0 | 0 |
attr |
Mamba2Config.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.mamba2.modeling_mamba2 (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Mamba2ForCausalLM.init |
2 | 0 | 0 |
meth |
Mamba2ForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
Mamba2ForCausalLM.set_input_embeddings |
2 | 0 | 0 |
meth |
Mamba2ForCausalLM.prepare_inputs_for_generation |
9 | 4 | 0 |
meth |
Mamba2ForCausalLM.forward |
12 | 11 | 0 |
attr |
Mamba2ForCausalLM.backbone |
1 | 0 | 0 |
attr |
Mamba2ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Mamba2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Mamba2Model.init |
2 | 0 | 0 |
meth |
Mamba2Model.load_hook |
4 | 0 | 0 |
meth |
Mamba2Model.get_input_embeddings |
1 | 0 | 0 |
meth |
Mamba2Model.set_input_embeddings |
2 | 0 | 0 |
meth |
Mamba2Model.forward |
10 | 9 | 0 |
attr |
Mamba2Model.embeddings |
1 | 0 | 0 |
attr |
Mamba2Model.layers |
1 | 0 | 0 |
attr |
Mamba2Model.gradient_checkpointing |
1 | 0 | 0 |
attr |
Mamba2Model.norm_f |
1 | 0 | 0 |
transformers.models.marian.configuration_marian (57 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MarianConfig.init |
30 | 0 | 0 |
attr |
MarianConfig.is_decoder |
1 | 0 | 0 |
attr |
MarianConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
MarianConfig.vocab_size |
1 | 0 | 0 |
attr |
MarianConfig.decoder_vocab_size |
1 | 0 | 0 |
attr |
MarianConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MarianConfig.d_model |
1 | 0 | 0 |
attr |
MarianConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
MarianConfig.encoder_layers |
1 | 0 | 0 |
attr |
MarianConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
MarianConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
MarianConfig.decoder_layers |
1 | 0 | 0 |
attr |
MarianConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
MarianConfig.dropout |
1 | 0 | 0 |
attr |
MarianConfig.attention_dropout |
1 | 0 | 0 |
attr |
MarianConfig.activation_dropout |
1 | 0 | 0 |
attr |
MarianConfig.activation_function |
1 | 0 | 0 |
attr |
MarianConfig.init_std |
1 | 0 | 0 |
attr |
MarianConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
MarianConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
MarianConfig.use_cache |
1 | 0 | 0 |
attr |
MarianConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MarianConfig.scale_embedding |
1 | 0 | 0 |
attr |
MarianConfig.share_encoder_decoder_embeddings |
1 | 0 | 0 |
attr |
MarianConfig.pad_token_id |
1 | 0 | 0 |
attr |
MarianConfig.eos_token_id |
1 | 0 | 0 |
attr |
MarianConfig.bos_token_id |
1 | 0 | 0 |
attr |
MarianConfig.decoder_start_token_id |
1 | 0 | 0 |
transformers.models.marian.modeling_marian (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MarianForCausalLM.init |
2 | 0 | 0 |
meth |
MarianForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
MarianForCausalLM.set_input_embeddings |
2 | 0 | 0 |
meth |
MarianForCausalLM.forward |
15 | 14 | 0 |
attr |
MarianForCausalLM.model |
1 | 0 | 0 |
attr |
MarianForCausalLM.lm_head |
1 | 0 | 0 |
meth |
MarianMTModel.init |
2 | 1 | 0 |
meth |
MarianMTModel._resize_token_embeddings |
4 | 2 | 0 |
meth |
MarianMTModel.resize_decoder_token_embeddings |
2 | 0 | 0 |
meth |
MarianMTModel.set_output_embeddings |
2 | 1 | 0 |
meth |
MarianMTModel.forward |
16 | 15 | 0 |
meth |
MarianMTModel.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
MarianMTModel.model |
1 | 0 | 0 |
attr |
MarianMTModel.lm_head |
1 | 0 | 0 |
meth |
MarianPreTrainedModel._init_weights |
2 | 0 | 0 |
prop |
MarianPreTrainedModel.dummy_inputs |
1 | 0 | 0 |
meth |
MarianModel.init |
2 | 1 | 0 |
meth |
MarianModel.get_input_embeddings |
1 | 0 | 0 |
meth |
MarianModel.set_input_embeddings |
2 | 0 | 0 |
meth |
MarianModel.get_decoder_input_embeddings |
1 | 0 | 0 |
meth |
MarianModel.set_decoder_input_embeddings |
2 | 0 | 0 |
meth |
MarianModel.forward |
15 | 14 | 0 |
attr |
MarianModel.encoder |
1 | 0 | 0 |
attr |
MarianModel.decoder |
1 | 0 | 0 |
attr |
MarianModel.shared |
1 | 0 | 0 |
transformers.models.marian.tokenization_marian (47 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MarianTokenizer.init |
14 | 2 | 0 |
meth |
MarianTokenizer._setup_normalizer |
1 | 0 | 0 |
meth |
MarianTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
MarianTokenizer.remove_language_code |
2 | 1 | 0 |
meth |
MarianTokenizer.batch_decode |
3 | 0 | 0 |
meth |
MarianTokenizer.decode |
3 | 0 | 0 |
meth |
MarianTokenizer._decode |
5 | 3 | 0 |
meth |
MarianTokenizer.build_inputs_with_special_tokens |
3 | 1 | 0 |
meth |
MarianTokenizer._switch_to_input_mode |
1 | 0 | 0 |
meth |
MarianTokenizer._switch_to_target_mode |
1 | 0 | 0 |
meth |
MarianTokenizer.get_src_vocab |
1 | 0 | 0 |
meth |
MarianTokenizer.get_tgt_vocab |
1 | 0 | 0 |
meth |
MarianTokenizer.num_special_tokens_to_add |
3 | 0 | 0 |
meth |
MarianTokenizer._special_token_mask |
2 | 0 | 0 |
attr |
MarianTokenizer.sp_model_kwargs |
1 | 0 | 0 |
attr |
MarianTokenizer.separate_vocabs |
1 | 0 | 0 |
attr |
MarianTokenizer.encoder |
1 | 0 | 0 |
attr |
MarianTokenizer.source_lang |
1 | 0 | 0 |
attr |
MarianTokenizer.target_lang |
1 | 0 | 0 |
attr |
MarianTokenizer.spm_files |
1 | 0 | 0 |
attr |
MarianTokenizer.spm_source |
1 | 0 | 0 |
attr |
MarianTokenizer.spm_target |
1 | 0 | 0 |
attr |
MarianTokenizer.current_spm |
1 | 0 | 0 |
attr |
MarianTokenizer.current_encoder |
1 | 0 | 0 |
attr |
MarianTokenizer.target_encoder |
1 | 0 | 0 |
attr |
MarianTokenizer.decoder |
1 | 0 | 0 |
transformers.models.markuplm.configuration_markuplm (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MarkupLMConfig.init |
25 | 0 | 0 |
attr |
MarkupLMConfig.pad_token_id |
1 | 0 | 0 |
attr |
MarkupLMConfig.bos_token_id |
1 | 0 | 0 |
attr |
MarkupLMConfig.eos_token_id |
1 | 0 | 0 |
attr |
MarkupLMConfig.vocab_size |
1 | 0 | 0 |
attr |
MarkupLMConfig.hidden_size |
1 | 0 | 0 |
attr |
MarkupLMConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MarkupLMConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MarkupLMConfig.hidden_act |
1 | 0 | 0 |
attr |
MarkupLMConfig.intermediate_size |
1 | 0 | 0 |
attr |
MarkupLMConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
MarkupLMConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
MarkupLMConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MarkupLMConfig.type_vocab_size |
1 | 0 | 0 |
attr |
MarkupLMConfig.initializer_range |
1 | 0 | 0 |
attr |
MarkupLMConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
MarkupLMConfig.use_cache |
1 | 0 | 0 |
attr |
MarkupLMConfig.classifier_dropout |
1 | 0 | 0 |
attr |
MarkupLMConfig.max_depth |
1 | 0 | 0 |
attr |
MarkupLMConfig.max_xpath_tag_unit_embeddings |
1 | 0 | 0 |
attr |
MarkupLMConfig.max_xpath_subs_unit_embeddings |
1 | 0 | 0 |
attr |
MarkupLMConfig.tag_pad_id |
1 | 0 | 0 |
attr |
MarkupLMConfig.subs_pad_id |
1 | 0 | 0 |
attr |
MarkupLMConfig.xpath_unit_hidden_size |
1 | 0 | 0 |
transformers.models.markuplm.feature_extraction_markuplm (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MarkupLMFeatureExtractor.init |
2 | 0 | 0 |
meth |
MarkupLMFeatureExtractor.xpath_soup |
2 | 0 | 0 |
meth |
MarkupLMFeatureExtractor.get_three_from_single |
2 | 0 | 0 |
meth |
MarkupLMFeatureExtractor.construct_xpath |
3 | 0 | 0 |
meth |
MarkupLMFeatureExtractor.call |
2 | 1 | 0 |
transformers.models.markuplm.modeling_markuplm (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MarkupLMForSequenceClassification.init |
2 | 0 | 0 |
meth |
MarkupLMForSequenceClassification.forward |
13 | 12 | 0 |
attr |
MarkupLMForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
MarkupLMForSequenceClassification.markuplm |
1 | 0 | 0 |
attr |
MarkupLMForSequenceClassification.dropout |
1 | 0 | 0 |
attr |
MarkupLMForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
MarkupLMForQuestionAnswering.init |
2 | 0 | 0 |
meth |
MarkupLMForQuestionAnswering.forward |
14 | 13 | 0 |
attr |
MarkupLMForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
MarkupLMForQuestionAnswering.markuplm |
1 | 0 | 0 |
attr |
MarkupLMForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
MarkupLMForTokenClassification.init |
2 | 0 | 0 |
meth |
MarkupLMForTokenClassification.forward |
13 | 12 | 0 |
attr |
MarkupLMForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
MarkupLMForTokenClassification.markuplm |
1 | 0 | 0 |
attr |
MarkupLMForTokenClassification.dropout |
1 | 0 | 0 |
attr |
MarkupLMForTokenClassification.classifier |
1 | 0 | 0 |
meth |
MarkupLMModel.init |
3 | 0 | 0 |
meth |
MarkupLMModel.get_input_embeddings |
1 | 0 | 0 |
meth |
MarkupLMModel.set_input_embeddings |
2 | 0 | 0 |
meth |
MarkupLMModel.forward |
12 | 11 | 0 |
attr |
MarkupLMModel.embeddings |
1 | 0 | 0 |
attr |
MarkupLMModel.encoder |
1 | 0 | 0 |
attr |
MarkupLMModel.pooler |
1 | 0 | 0 |
meth |
MarkupLMPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.markuplm.processing_markuplm (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MarkupLMProcessor.init |
3 | 0 | 0 |
meth |
MarkupLMProcessor.call |
21 | 15 | 0 |
transformers.models.markuplm.tokenization_markuplm (35 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MarkupLMTokenizer.init |
20 | 2 | 0 |
meth |
MarkupLMTokenizer.get_xpath_seq |
2 | 0 | 0 |
meth |
MarkupLMTokenizer.call |
22 | 21 | 0 |
meth |
MarkupLMTokenizer.batch_encode_plus |
21 | 20 | 0 |
meth |
MarkupLMTokenizer.tokenize |
5 | 4 | 0 |
meth |
MarkupLMTokenizer.encode_plus |
21 | 20 | 0 |
meth |
MarkupLMTokenizer._encode_plus |
21 | 20 | 0 |
attr |
MarkupLMTokenizer.tags_dict |
1 | 0 | 0 |
attr |
MarkupLMTokenizer.max_depth |
1 | 0 | 0 |
attr |
MarkupLMTokenizer.max_width |
1 | 0 | 0 |
attr |
MarkupLMTokenizer.pad_width |
1 | 0 | 0 |
attr |
MarkupLMTokenizer.unk_tag_id |
1 | 0 | 0 |
attr |
MarkupLMTokenizer.pad_tag_id |
1 | 0 | 0 |
attr |
MarkupLMTokenizer.pad_xpath_tags_seq |
1 | 0 | 0 |
attr |
MarkupLMTokenizer.pad_xpath_subs_seq |
1 | 0 | 0 |
attr |
MarkupLMTokenizer.pad_token_label |
1 | 0 | 0 |
attr |
MarkupLMTokenizer.only_label_first_subword |
1 | 0 | 0 |
transformers.models.mask2former.configuration_mask2former (31 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Mask2FormerConfig.init |
30 | 28 | 0 |
attr |
Mask2FormerConfig.backbone_config |
1 | 0 | 0 |
attr |
Mask2FormerConfig.feature_size |
1 | 0 | 0 |
attr |
Mask2FormerConfig.mask_feature_size |
1 | 0 | 0 |
attr |
Mask2FormerConfig.hidden_dim |
1 | 0 | 0 |
attr |
Mask2FormerConfig.encoder_feedforward_dim |
1 | 0 | 0 |
attr |
Mask2FormerConfig.activation_function |
1 | 0 | 0 |
attr |
Mask2FormerConfig.encoder_layers |
1 | 0 | 0 |
attr |
Mask2FormerConfig.decoder_layers |
1 | 0 | 0 |
attr |
Mask2FormerConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Mask2FormerConfig.dropout |
1 | 0 | 0 |
attr |
Mask2FormerConfig.dim_feedforward |
1 | 0 | 0 |
attr |
Mask2FormerConfig.pre_norm |
1 | 0 | 0 |
attr |
Mask2FormerConfig.enforce_input_projection |
1 | 0 | 0 |
attr |
Mask2FormerConfig.common_stride |
1 | 0 | 0 |
attr |
Mask2FormerConfig.ignore_value |
1 | 0 | 0 |
attr |
Mask2FormerConfig.num_queries |
1 | 0 | 0 |
attr |
Mask2FormerConfig.no_object_weight |
1 | 0 | 0 |
attr |
Mask2FormerConfig.class_weight |
1 | 0 | 0 |
attr |
Mask2FormerConfig.mask_weight |
1 | 0 | 0 |
attr |
Mask2FormerConfig.dice_weight |
1 | 0 | 0 |
attr |
Mask2FormerConfig.train_num_points |
1 | 0 | 0 |
attr |
Mask2FormerConfig.oversample_ratio |
1 | 0 | 0 |
attr |
Mask2FormerConfig.importance_sample_ratio |
1 | 0 | 0 |
attr |
Mask2FormerConfig.init_std |
1 | 0 | 0 |
attr |
Mask2FormerConfig.init_xavier_std |
1 | 0 | 0 |
attr |
Mask2FormerConfig.use_auxiliary_loss |
1 | 0 | 0 |
attr |
Mask2FormerConfig.feature_strides |
1 | 0 | 0 |
attr |
Mask2FormerConfig.output_auxiliary_logits |
1 | 0 | 0 |
attr |
Mask2FormerConfig.num_hidden_layers |
1 | 0 | 0 |
transformers.models.mask2former.image_processing_mask2former (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Mask2FormerImageProcessor.init |
15 | 13 | 0 |
meth |
Mask2FormerImageProcessor.resize |
8 | 6 | 0 |
meth |
Mask2FormerImageProcessor.convert_segmentation_map_to_binary_masks |
5 | 4 | 0 |
meth |
Mask2FormerImageProcessor.call |
4 | 1 | 0 |
meth |
Mask2FormerImageProcessor._preprocess |
12 | 11 | 0 |
meth |
Mask2FormerImageProcessor.encode_inputs |
9 | 8 | 0 |
meth |
Mask2FormerImageProcessor.post_process_semantic_segmentation |
3 | 2 | 0 |
meth |
Mask2FormerImageProcessor.post_process_instance_segmentation |
8 | 7 | 0 |
meth |
Mask2FormerImageProcessor.post_process_panoptic_segmentation |
7 | 6 | 0 |
attr |
Mask2FormerImageProcessor.do_resize |
1 | 0 | 0 |
attr |
Mask2FormerImageProcessor.size |
1 | 0 | 0 |
attr |
Mask2FormerImageProcessor.resample |
1 | 0 | 0 |
attr |
Mask2FormerImageProcessor.size_divisor |
1 | 0 | 0 |
attr |
Mask2FormerImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
Mask2FormerImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
Mask2FormerImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
Mask2FormerImageProcessor.image_mean |
1 | 0 | 0 |
attr |
Mask2FormerImageProcessor.image_std |
1 | 0 | 0 |
attr |
Mask2FormerImageProcessor.ignore_index |
1 | 0 | 0 |
attr |
Mask2FormerImageProcessor.do_reduce_labels |
1 | 0 | 0 |
attr |
Mask2FormerImageProcessor.num_labels |
1 | 0 | 0 |
attr |
Mask2FormerImageProcessor.pad_size |
1 | 0 | 0 |
transformers.models.mask2former.image_processing_mask2former_fast (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Mask2FormerImageProcessorFast.reduce_label |
2 | 1 | 0 |
meth |
Mask2FormerImageProcessorFast.resize |
6 | 5 | 0 |
meth |
Mask2FormerImageProcessorFast._preprocess |
19 | 18 | 0 |
meth |
Mask2FormerImageProcessorFast.post_process_semantic_segmentation |
3 | 2 | 0 |
meth |
Mask2FormerImageProcessorFast.post_process_instance_segmentation |
8 | 7 | 0 |
meth |
Mask2FormerImageProcessorFast.post_process_panoptic_segmentation |
7 | 6 | 0 |
attr |
Mask2FormerImageProcessorFast.size |
1 | 0 | 0 |
transformers.models.mask2former.modeling_mask2former (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Mask2FormerForUniversalSegmentation.init |
2 | 1 | 0 |
meth |
Mask2FormerForUniversalSegmentation.get_auxiliary_logits |
3 | 2 | 0 |
meth |
Mask2FormerForUniversalSegmentation.forward |
10 | 9 | 0 |
attr |
Mask2FormerForUniversalSegmentation.model |
1 | 0 | 0 |
attr |
Mask2FormerForUniversalSegmentation.class_predictor |
1 | 0 | 0 |
attr |
Mask2FormerForUniversalSegmentation.criterion |
1 | 0 | 0 |
meth |
Mask2FormerModel.init |
2 | 1 | 0 |
meth |
Mask2FormerModel.forward |
7 | 6 | 0 |
attr |
Mask2FormerModel.pixel_level_module |
1 | 0 | 0 |
attr |
Mask2FormerModel.transformer_module |
1 | 0 | 0 |
meth |
Mask2FormerPreTrainedModel._init_weights |
2 | 1 | 0 |
transformers.models.mask2former.modular_mask2former (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Mask2FormerImageProcessorFast.post_process_semantic_segmentation |
3 | 2 | 0 |
meth |
Mask2FormerImageProcessorFast.post_process_instance_segmentation |
8 | 7 | 0 |
meth |
Mask2FormerImageProcessorFast.post_process_panoptic_segmentation |
7 | 6 | 0 |
transformers.models.maskformer.configuration_maskformer (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MaskFormerConfig.init |
14 | 12 | 0 |
attr |
MaskFormerConfig.backbone_config |
1 | 0 | 0 |
attr |
MaskFormerConfig.decoder_config |
1 | 0 | 0 |
attr |
MaskFormerConfig.fpn_feature_size |
1 | 0 | 0 |
attr |
MaskFormerConfig.mask_feature_size |
1 | 0 | 0 |
attr |
MaskFormerConfig.init_std |
1 | 0 | 0 |
attr |
MaskFormerConfig.init_xavier_std |
1 | 0 | 0 |
attr |
MaskFormerConfig.cross_entropy_weight |
1 | 0 | 0 |
attr |
MaskFormerConfig.dice_weight |
1 | 0 | 0 |
attr |
MaskFormerConfig.mask_weight |
1 | 0 | 0 |
attr |
MaskFormerConfig.use_auxiliary_loss |
1 | 0 | 0 |
attr |
MaskFormerConfig.no_object_weight |
1 | 0 | 0 |
attr |
MaskFormerConfig.output_auxiliary_logits |
1 | 0 | 0 |
attr |
MaskFormerConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MaskFormerConfig.num_hidden_layers |
1 | 0 | 0 |
transformers.models.maskformer.configuration_maskformer_swin (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MaskFormerSwinConfig.init |
20 | 0 | 0 |
attr |
MaskFormerSwinConfig.image_size |
1 | 0 | 0 |
attr |
MaskFormerSwinConfig.patch_size |
1 | 0 | 0 |
attr |
MaskFormerSwinConfig.num_channels |
1 | 0 | 0 |
attr |
MaskFormerSwinConfig.embed_dim |
1 | 0 | 0 |
attr |
MaskFormerSwinConfig.depths |
1 | 0 | 0 |
attr |
MaskFormerSwinConfig.num_layers |
1 | 0 | 0 |
attr |
MaskFormerSwinConfig.num_heads |
1 | 0 | 0 |
attr |
MaskFormerSwinConfig.window_size |
1 | 0 | 0 |
attr |
MaskFormerSwinConfig.mlp_ratio |
1 | 0 | 0 |
attr |
MaskFormerSwinConfig.qkv_bias |
1 | 0 | 0 |
attr |
MaskFormerSwinConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
MaskFormerSwinConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
MaskFormerSwinConfig.drop_path_rate |
1 | 0 | 0 |
attr |
MaskFormerSwinConfig.hidden_act |
1 | 0 | 0 |
attr |
MaskFormerSwinConfig.use_absolute_embeddings |
1 | 0 | 0 |
attr |
MaskFormerSwinConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
MaskFormerSwinConfig.initializer_range |
1 | 0 | 0 |
attr |
MaskFormerSwinConfig.hidden_size |
1 | 0 | 0 |
attr |
MaskFormerSwinConfig.stage_names |
1 | 0 | 0 |
transformers.models.maskformer.image_processing_maskformer (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MaskFormerImageProcessor.init |
15 | 13 | 0 |
meth |
MaskFormerImageProcessor.resize |
8 | 6 | 0 |
meth |
MaskFormerImageProcessor.convert_segmentation_map_to_binary_masks |
5 | 4 | 0 |
meth |
MaskFormerImageProcessor.call |
4 | 1 | 0 |
meth |
MaskFormerImageProcessor._preprocess |
12 | 11 | 0 |
meth |
MaskFormerImageProcessor.encode_inputs |
9 | 8 | 0 |
meth |
MaskFormerImageProcessor.post_process_semantic_segmentation |
3 | 2 | 0 |
meth |
MaskFormerImageProcessor.post_process_instance_segmentation |
8 | 7 | 0 |
meth |
MaskFormerImageProcessor.post_process_panoptic_segmentation |
7 | 6 | 0 |
attr |
MaskFormerImageProcessor.do_resize |
1 | 0 | 0 |
attr |
MaskFormerImageProcessor.size |
1 | 0 | 0 |
attr |
MaskFormerImageProcessor.resample |
1 | 0 | 0 |
attr |
MaskFormerImageProcessor.size_divisor |
1 | 0 | 0 |
attr |
MaskFormerImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
MaskFormerImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
MaskFormerImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
MaskFormerImageProcessor.image_mean |
1 | 0 | 0 |
attr |
MaskFormerImageProcessor.image_std |
1 | 0 | 0 |
attr |
MaskFormerImageProcessor.ignore_index |
1 | 0 | 0 |
attr |
MaskFormerImageProcessor.do_reduce_labels |
1 | 0 | 0 |
attr |
MaskFormerImageProcessor.num_labels |
1 | 0 | 0 |
attr |
MaskFormerImageProcessor.pad_size |
1 | 0 | 0 |
transformers.models.maskformer.image_processing_maskformer_fast (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MaskFormerImageProcessorFast.reduce_label |
2 | 1 | 0 |
meth |
MaskFormerImageProcessorFast.resize |
6 | 5 | 0 |
meth |
MaskFormerImageProcessorFast._preprocess |
19 | 18 | 0 |
meth |
MaskFormerImageProcessorFast.post_process_semantic_segmentation |
3 | 2 | 0 |
meth |
MaskFormerImageProcessorFast.post_process_instance_segmentation |
8 | 7 | 0 |
meth |
MaskFormerImageProcessorFast.post_process_panoptic_segmentation |
7 | 6 | 0 |
attr |
MaskFormerImageProcessorFast.size |
1 | 0 | 0 |
transformers.models.maskformer.modeling_maskformer (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MaskFormerForInstanceSegmentation.init |
2 | 1 | 0 |
meth |
MaskFormerForInstanceSegmentation.forward |
10 | 9 | 0 |
attr |
MaskFormerForInstanceSegmentation.model |
1 | 0 | 0 |
attr |
MaskFormerForInstanceSegmentation.class_predictor |
1 | 0 | 0 |
attr |
MaskFormerForInstanceSegmentation.mask_embedder |
1 | 0 | 0 |
attr |
MaskFormerForInstanceSegmentation.matcher |
1 | 0 | 0 |
attr |
MaskFormerForInstanceSegmentation.criterion |
1 | 0 | 0 |
meth |
MaskFormerModel.init |
2 | 1 | 0 |
meth |
MaskFormerModel.forward |
7 | 6 | 0 |
attr |
MaskFormerModel.pixel_level_module |
1 | 0 | 0 |
attr |
MaskFormerModel.transformer_module |
1 | 0 | 0 |
meth |
MaskFormerPreTrainedModel._init_weights |
2 | 1 | 0 |
transformers.models.maskformer.modeling_maskformer_swin (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MaskFormerSwinPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
MaskFormerSwinBackbone.init |
2 | 1 | 0 |
meth |
MaskFormerSwinBackbone.forward |
6 | 5 | 0 |
attr |
MaskFormerSwinBackbone.model |
1 | 0 | 0 |
attr |
MaskFormerSwinBackbone.num_features |
1 | 0 | 0 |
attr |
MaskFormerSwinBackbone.hidden_states_norms |
1 | 0 | 0 |
meth |
MaskFormerSwinModel.init |
3 | 0 | 0 |
meth |
MaskFormerSwinModel.get_input_embeddings |
1 | 0 | 0 |
meth |
MaskFormerSwinModel.forward |
7 | 1 | 0 |
attr |
MaskFormerSwinModel.num_layers |
1 | 0 | 0 |
attr |
MaskFormerSwinModel.num_features |
1 | 0 | 0 |
attr |
MaskFormerSwinModel.embeddings |
1 | 0 | 0 |
attr |
MaskFormerSwinModel.encoder |
1 | 0 | 0 |
attr |
MaskFormerSwinModel.layernorm |
1 | 0 | 0 |
attr |
MaskFormerSwinModel.pooler |
1 | 0 | 0 |
transformers.models.mbart.configuration_mbart (55 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MBartConfig.init |
29 | 0 | 0 |
attr |
MBartConfig.is_decoder |
1 | 0 | 0 |
attr |
MBartConfig.vocab_size |
1 | 0 | 0 |
attr |
MBartConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MBartConfig.d_model |
1 | 0 | 0 |
attr |
MBartConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
MBartConfig.encoder_layers |
1 | 0 | 0 |
attr |
MBartConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
MBartConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
MBartConfig.decoder_layers |
1 | 0 | 0 |
attr |
MBartConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
MBartConfig.dropout |
1 | 0 | 0 |
attr |
MBartConfig.attention_dropout |
1 | 0 | 0 |
attr |
MBartConfig.activation_dropout |
1 | 0 | 0 |
attr |
MBartConfig.activation_function |
1 | 0 | 0 |
attr |
MBartConfig.init_std |
1 | 0 | 0 |
attr |
MBartConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
MBartConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
MBartConfig.classifier_dropout |
1 | 0 | 0 |
attr |
MBartConfig.use_cache |
1 | 0 | 0 |
attr |
MBartConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MBartConfig.scale_embedding |
1 | 0 | 0 |
attr |
MBartConfig.pad_token_id |
1 | 0 | 0 |
attr |
MBartConfig.bos_token_id |
1 | 0 | 0 |
attr |
MBartConfig.eos_token_id |
1 | 0 | 0 |
attr |
MBartConfig.decoder_start_token_id |
1 | 0 | 0 |
attr |
MBartConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.mbart.modeling_mbart (35 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MBartForQuestionAnswering.init |
2 | 0 | 0 |
meth |
MBartForQuestionAnswering.forward |
16 | 15 | 0 |
attr |
MBartForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
MBartForQuestionAnswering.model |
1 | 0 | 0 |
attr |
MBartForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
MBartForConditionalGeneration.init |
2 | 1 | 0 |
meth |
MBartForConditionalGeneration.forward |
16 | 15 | 0 |
meth |
MBartForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
MBartForConditionalGeneration.model |
1 | 0 | 0 |
attr |
MBartForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
MBartForCausalLM.init |
2 | 0 | 0 |
meth |
MBartForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
MBartForCausalLM.set_input_embeddings |
2 | 0 | 0 |
meth |
MBartForCausalLM.forward |
15 | 14 | 0 |
attr |
MBartForCausalLM.model |
1 | 0 | 0 |
attr |
MBartForCausalLM.lm_head |
1 | 0 | 0 |
meth |
MBartModel.init |
2 | 1 | 0 |
meth |
MBartModel.get_input_embeddings |
1 | 0 | 0 |
meth |
MBartModel.set_input_embeddings |
2 | 0 | 0 |
meth |
MBartModel.forward |
15 | 14 | 0 |
attr |
MBartModel.shared |
1 | 0 | 0 |
attr |
MBartModel.encoder |
1 | 0 | 0 |
attr |
MBartModel.decoder |
1 | 0 | 0 |
meth |
MBartForSequenceClassification.init |
3 | 1 | 0 |
meth |
MBartForSequenceClassification.forward |
15 | 14 | 0 |
attr |
MBartForSequenceClassification.model |
1 | 0 | 0 |
attr |
MBartForSequenceClassification.classification_head |
1 | 0 | 0 |
meth |
MBartPreTrainedModel._init_weights |
2 | 0 | 0 |
prop |
MBartPreTrainedModel.dummy_inputs |
1 | 0 | 0 |
transformers.models.mbart.tokenization_mbart (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MBartTokenizer.init |
13 | 1 | 0 |
meth |
MBartTokenizer._build_translation_inputs |
6 | 3 | 0 |
meth |
MBartTokenizer._switch_to_input_mode |
1 | 0 | 0 |
meth |
MBartTokenizer._switch_to_target_mode |
1 | 0 | 0 |
meth |
MBartTokenizer.set_src_lang_special_tokens |
2 | 1 | 0 |
attr |
MBartTokenizer.lang_code_to_id |
1 | 0 | 0 |
attr |
MBartTokenizer.fairseq_offset |
1 | 0 | 0 |
attr |
MBartTokenizer.fairseq_tokens_to_ids |
1 | 0 | 0 |
attr |
MBartTokenizer.fairseq_ids_to_tokens |
1 | 0 | 0 |
attr |
MBartTokenizer.cur_lang_code |
1 | 0 | 0 |
attr |
MBartTokenizer.tgt_lang |
1 | 0 | 0 |
transformers.models.mbart50.tokenization_mbart50 (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MBart50Tokenizer.init |
12 | 2 | 0 |
meth |
MBart50Tokenizer._build_language_code_mappings |
1 | 0 | 0 |
meth |
MBart50Tokenizer._post_init |
1 | 0 | 0 |
meth |
MBart50Tokenizer.prepare_seq2seq_batch |
6 | 5 | 0 |
meth |
MBart50Tokenizer._switch_to_input_mode |
1 | 0 | 0 |
meth |
MBart50Tokenizer._switch_to_target_mode |
1 | 0 | 0 |
meth |
MBart50Tokenizer._build_translation_inputs |
6 | 3 | 0 |
attr |
MBart50Tokenizer.fairseq_offset |
1 | 0 | 0 |
attr |
MBart50Tokenizer.tgt_lang |
1 | 0 | 0 |
attr |
MBart50Tokenizer.cur_lang_code_id |
1 | 0 | 0 |
transformers.models.megatron_bert.configuration_megatron_bert (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MegatronBertConfig.init |
21 | 0 | 0 |
attr |
MegatronBertConfig.pad_token_id |
1 | 0 | 0 |
attr |
MegatronBertConfig.bos_token_id |
1 | 0 | 0 |
attr |
MegatronBertConfig.eos_token_id |
1 | 0 | 0 |
attr |
MegatronBertConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
MegatronBertConfig.is_decoder |
1 | 0 | 0 |
attr |
MegatronBertConfig.add_cross_attention |
1 | 0 | 0 |
attr |
MegatronBertConfig.vocab_size |
1 | 0 | 0 |
attr |
MegatronBertConfig.hidden_size |
1 | 0 | 0 |
attr |
MegatronBertConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MegatronBertConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MegatronBertConfig.hidden_act |
1 | 0 | 0 |
attr |
MegatronBertConfig.intermediate_size |
1 | 0 | 0 |
attr |
MegatronBertConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
MegatronBertConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
MegatronBertConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MegatronBertConfig.type_vocab_size |
1 | 0 | 0 |
attr |
MegatronBertConfig.initializer_range |
1 | 0 | 0 |
attr |
MegatronBertConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
MegatronBertConfig.use_cache |
1 | 0 | 0 |
transformers.models.megatron_bert.modeling_megatron_bert (68 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MegatronBertForNextSentencePrediction.init |
2 | 0 | 0 |
meth |
MegatronBertForNextSentencePrediction.forward |
11 | 10 | 0 |
attr |
MegatronBertForNextSentencePrediction.bert |
1 | 0 | 0 |
attr |
MegatronBertForNextSentencePrediction.cls |
1 | 0 | 0 |
meth |
MegatronBertForMaskedLM.init |
2 | 0 | 0 |
meth |
MegatronBertForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
MegatronBertForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
MegatronBertForMaskedLM.forward |
13 | 12 | 0 |
attr |
MegatronBertForMaskedLM.bert |
1 | 0 | 0 |
attr |
MegatronBertForMaskedLM.cls |
1 | 0 | 0 |
meth |
MegatronBertForQuestionAnswering.init |
2 | 0 | 0 |
meth |
MegatronBertForQuestionAnswering.forward |
12 | 11 | 0 |
attr |
MegatronBertForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
MegatronBertForQuestionAnswering.bert |
1 | 0 | 0 |
attr |
MegatronBertForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
MegatronBertForCausalLM.init |
2 | 0 | 0 |
meth |
MegatronBertForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
MegatronBertForCausalLM.set_output_embeddings |
2 | 0 | 0 |
meth |
MegatronBertForCausalLM.forward |
17 | 16 | 0 |
attr |
MegatronBertForCausalLM.bert |
1 | 0 | 0 |
attr |
MegatronBertForCausalLM.cls |
1 | 0 | 0 |
meth |
MegatronBertForMultipleChoice.init |
2 | 0 | 0 |
meth |
MegatronBertForMultipleChoice.forward |
11 | 10 | 0 |
attr |
MegatronBertForMultipleChoice.bert |
1 | 0 | 0 |
attr |
MegatronBertForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
MegatronBertForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
MegatronBertForTokenClassification.init |
2 | 0 | 0 |
meth |
MegatronBertForTokenClassification.forward |
11 | 10 | 0 |
attr |
MegatronBertForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
MegatronBertForTokenClassification.bert |
1 | 0 | 0 |
attr |
MegatronBertForTokenClassification.dropout |
1 | 0 | 0 |
attr |
MegatronBertForTokenClassification.classifier |
1 | 0 | 0 |
meth |
MegatronBertPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
MegatronBertModel.init |
3 | 0 | 0 |
meth |
MegatronBertModel.get_input_embeddings |
1 | 0 | 0 |
meth |
MegatronBertModel.set_input_embeddings |
2 | 0 | 0 |
meth |
MegatronBertModel.forward |
15 | 14 | 0 |
attr |
MegatronBertModel.embeddings |
1 | 0 | 0 |
attr |
MegatronBertModel.encoder |
1 | 0 | 0 |
attr |
MegatronBertModel.pooler |
1 | 0 | 0 |
meth |
MegatronBertForPreTraining.init |
3 | 0 | 0 |
meth |
MegatronBertForPreTraining.get_output_embeddings |
1 | 0 | 0 |
meth |
MegatronBertForPreTraining.set_output_embeddings |
2 | 0 | 0 |
meth |
MegatronBertForPreTraining.forward |
12 | 11 | 0 |
attr |
MegatronBertForPreTraining.bert |
1 | 0 | 0 |
attr |
MegatronBertForPreTraining.cls |
1 | 0 | 0 |
meth |
MegatronBertForSequenceClassification.init |
2 | 0 | 0 |
meth |
MegatronBertForSequenceClassification.forward |
11 | 10 | 0 |
attr |
MegatronBertForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
MegatronBertForSequenceClassification.bert |
1 | 0 | 0 |
attr |
MegatronBertForSequenceClassification.dropout |
1 | 0 | 0 |
attr |
MegatronBertForSequenceClassification.classifier |
1 | 0 | 0 |
transformers.models.megatron_gpt2.checkpoint_reshaping_and_interoperability (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
convert_checkpoint_from_transformers_to_megatron |
2 | 0 | 0 |
func |
add_transformers_checkpoint_args |
2 | 0 | 0 |
func |
megatron_to_transformers_fix_query_key_value_ordering |
6 | 0 | 0 |
func |
add_megatron_checkpoint_args |
2 | 0 | 0 |
func |
get_element_from_dict_by_path |
3 | 0 | 0 |
func |
get_megatron_sharded_states |
5 | 0 | 0 |
func |
add_checkpointing_args |
2 | 0 | 0 |
attr |
transformers_to_megatron |
1 | 0 | 0 |
func |
convert_checkpoint_from_megatron_to_transformers |
2 | 0 | 0 |
func |
recursive_print |
4 | 0 | 0 |
func |
transformers_to_megatron_fix_query_key_value_ordering |
6 | 0 | 0 |
func |
merge_transformers_sharded_states |
3 | 0 | 0 |
func |
main |
1 | 0 | 0 |
transformers.models.metaclip_2.configuration_metaclip_2 (71 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MetaClip2TextConfig.init |
17 | 0 | 0 |
attr |
MetaClip2TextConfig.pad_token_id |
1 | 0 | 0 |
attr |
MetaClip2TextConfig.bos_token_id |
1 | 0 | 0 |
attr |
MetaClip2TextConfig.eos_token_id |
1 | 0 | 0 |
attr |
MetaClip2TextConfig.vocab_size |
1 | 0 | 0 |
attr |
MetaClip2TextConfig.hidden_size |
1 | 0 | 0 |
attr |
MetaClip2TextConfig.intermediate_size |
1 | 0 | 0 |
attr |
MetaClip2TextConfig.projection_dim |
1 | 0 | 0 |
attr |
MetaClip2TextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MetaClip2TextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MetaClip2TextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MetaClip2TextConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
MetaClip2TextConfig.hidden_act |
1 | 0 | 0 |
attr |
MetaClip2TextConfig.initializer_range |
1 | 0 | 0 |
attr |
MetaClip2TextConfig.initializer_factor |
1 | 0 | 0 |
attr |
MetaClip2TextConfig.attention_dropout |
1 | 0 | 0 |
meth |
MetaClip2VisionConfig.init |
15 | 0 | 0 |
attr |
MetaClip2VisionConfig.hidden_size |
1 | 0 | 0 |
attr |
MetaClip2VisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
MetaClip2VisionConfig.projection_dim |
1 | 0 | 0 |
attr |
MetaClip2VisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MetaClip2VisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MetaClip2VisionConfig.num_channels |
1 | 0 | 0 |
attr |
MetaClip2VisionConfig.patch_size |
1 | 0 | 0 |
attr |
MetaClip2VisionConfig.image_size |
1 | 0 | 0 |
attr |
MetaClip2VisionConfig.initializer_range |
1 | 0 | 0 |
attr |
MetaClip2VisionConfig.initializer_factor |
1 | 0 | 0 |
attr |
MetaClip2VisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
MetaClip2VisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
MetaClip2VisionConfig.hidden_act |
1 | 0 | 0 |
meth |
MetaClip2Config.init |
6 | 0 | 0 |
attr |
MetaClip2Config.text_config |
1 | 0 | 0 |
attr |
MetaClip2Config.vision_config |
1 | 0 | 0 |
attr |
MetaClip2Config.projection_dim |
1 | 0 | 0 |
attr |
MetaClip2Config.logit_scale_init_value |
1 | 0 | 0 |
attr |
MetaClip2Config.initializer_factor |
1 | 0 | 0 |
transformers.models.metaclip_2.modeling_metaclip_2 (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MetaClip2VisionModel.init |
2 | 1 | 0 |
attr |
MetaClip2VisionModel.vision_model |
1 | 0 | 0 |
meth |
MetaClip2VisionModelWithProjection.init |
2 | 1 | 0 |
attr |
MetaClip2VisionModelWithProjection.vision_model |
1 | 0 | 0 |
attr |
MetaClip2VisionModelWithProjection.visual_projection |
1 | 0 | 0 |
meth |
MetaClip2TextModel.init |
2 | 1 | 0 |
meth |
MetaClip2TextModel.set_input_embeddings |
2 | 0 | 0 |
attr |
MetaClip2TextModel.text_model |
1 | 0 | 0 |
meth |
MetaClip2TextModelWithProjection.init |
2 | 1 | 0 |
meth |
MetaClip2TextModelWithProjection.set_input_embeddings |
2 | 0 | 0 |
attr |
MetaClip2TextModelWithProjection.text_model |
1 | 0 | 0 |
attr |
MetaClip2TextModelWithProjection.text_projection |
1 | 0 | 0 |
meth |
MetaClip2PreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
MetaClip2ForImageClassification.num_labels |
1 | 0 | 0 |
attr |
MetaClip2ForImageClassification.vision_model |
1 | 0 | 0 |
attr |
MetaClip2ForImageClassification.classifier |
1 | 0 | 0 |
meth |
MetaClip2Model.init |
2 | 1 | 0 |
attr |
MetaClip2Model.projection_dim |
1 | 0 | 0 |
attr |
MetaClip2Model.text_embed_dim |
1 | 0 | 0 |
attr |
MetaClip2Model.vision_embed_dim |
1 | 0 | 0 |
attr |
MetaClip2Model.text_model |
1 | 0 | 0 |
attr |
MetaClip2Model.vision_model |
1 | 0 | 0 |
attr |
MetaClip2Model.visual_projection |
1 | 0 | 0 |
attr |
MetaClip2Model.text_projection |
1 | 0 | 0 |
attr |
MetaClip2Model.logit_scale |
1 | 0 | 0 |
transformers.models.metaclip_2.modular_metaclip_2 (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MetaClip2TextModel.forward |
5 | 4 | 0 |
meth |
MetaClip2TextModelWithProjection.forward |
5 | 4 | 0 |
meth |
MetaClip2VisionModel.forward |
4 | 3 | 0 |
meth |
MetaClip2VisionModelWithProjection.forward |
4 | 3 | 0 |
meth |
MetaClip2Model.init |
2 | 1 | 0 |
meth |
MetaClip2Model.forward |
8 | 7 | 0 |
attr |
MetaClip2Model.projection_dim |
1 | 0 | 0 |
attr |
MetaClip2Model.text_embed_dim |
1 | 0 | 0 |
attr |
MetaClip2Model.vision_embed_dim |
1 | 0 | 0 |
attr |
MetaClip2Model.text_model |
1 | 0 | 0 |
attr |
MetaClip2Model.vision_model |
1 | 0 | 0 |
attr |
MetaClip2Model.visual_projection |
1 | 0 | 0 |
attr |
MetaClip2Model.text_projection |
1 | 0 | 0 |
attr |
MetaClip2Model.logit_scale |
1 | 0 | 0 |
meth |
MetaClip2PreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.mgp_str.configuration_mgp_str (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MgpstrConfig.init |
21 | 0 | 0 |
attr |
MgpstrConfig.image_size |
1 | 0 | 0 |
attr |
MgpstrConfig.patch_size |
1 | 0 | 0 |
attr |
MgpstrConfig.num_channels |
1 | 0 | 0 |
attr |
MgpstrConfig.max_token_length |
1 | 0 | 0 |
attr |
MgpstrConfig.num_character_labels |
1 | 0 | 0 |
attr |
MgpstrConfig.num_bpe_labels |
1 | 0 | 0 |
attr |
MgpstrConfig.num_wordpiece_labels |
1 | 0 | 0 |
attr |
MgpstrConfig.hidden_size |
1 | 0 | 0 |
attr |
MgpstrConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MgpstrConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MgpstrConfig.mlp_ratio |
1 | 0 | 0 |
attr |
MgpstrConfig.distilled |
1 | 0 | 0 |
attr |
MgpstrConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
MgpstrConfig.drop_rate |
1 | 0 | 0 |
attr |
MgpstrConfig.qkv_bias |
1 | 0 | 0 |
attr |
MgpstrConfig.attn_drop_rate |
1 | 0 | 0 |
attr |
MgpstrConfig.drop_path_rate |
1 | 0 | 0 |
attr |
MgpstrConfig.output_a3_attentions |
1 | 0 | 0 |
attr |
MgpstrConfig.initializer_range |
1 | 0 | 0 |
transformers.models.mgp_str.modeling_mgp_str (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MgpstrForSceneTextRecognition.forward |
7 | 6 | 0 |
attr |
MgpstrForSceneTextRecognition.num_labels |
1 | 0 | 0 |
attr |
MgpstrForSceneTextRecognition.mgp_str |
1 | 0 | 0 |
attr |
MgpstrForSceneTextRecognition.char_a3_module |
1 | 0 | 0 |
attr |
MgpstrForSceneTextRecognition.bpe_a3_module |
1 | 0 | 0 |
attr |
MgpstrForSceneTextRecognition.wp_a3_module |
1 | 0 | 0 |
attr |
MgpstrForSceneTextRecognition.char_head |
1 | 0 | 0 |
attr |
MgpstrForSceneTextRecognition.bpe_head |
1 | 0 | 0 |
attr |
MgpstrForSceneTextRecognition.wp_head |
1 | 0 | 0 |
meth |
MgpstrModel.init |
2 | 1 | 0 |
meth |
MgpstrModel.forward |
6 | 5 | 0 |
attr |
MgpstrModel.embeddings |
1 | 0 | 0 |
attr |
MgpstrModel.encoder |
1 | 0 | 0 |
transformers.models.mgp_str.processing_mgp_str (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MgpstrProcessor.init |
4 | 0 | 0 |
meth |
MgpstrProcessor.call |
5 | 0 | 0 |
meth |
MgpstrProcessor.batch_decode |
2 | 0 | 0 |
meth |
MgpstrProcessor._decode_helper |
3 | 0 | 0 |
meth |
MgpstrProcessor.char_decode |
2 | 0 | 0 |
meth |
MgpstrProcessor.bpe_decode |
2 | 0 | 0 |
meth |
MgpstrProcessor.wp_decode |
2 | 0 | 0 |
prop |
MgpstrProcessor.model_input_names |
1 | 0 | 0 |
attr |
MgpstrProcessor.char_tokenizer |
1 | 0 | 0 |
attr |
MgpstrProcessor.bpe_tokenizer |
1 | 0 | 0 |
attr |
MgpstrProcessor.wp_tokenizer |
1 | 0 | 0 |
transformers.models.mgp_str.tokenization_mgp_str (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MgpstrTokenizer.init |
7 | 0 | 0 |
meth |
MgpstrTokenizer.get_vocab |
1 | 0 | 0 |
meth |
MgpstrTokenizer._tokenize |
2 | 0 | 0 |
meth |
MgpstrTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
MgpstrTokenizer._convert_id_to_token |
2 | 0 | 0 |
prop |
MgpstrTokenizer.vocab_size |
1 | 0 | 0 |
attr |
MgpstrTokenizer.decoder |
1 | 0 | 0 |
attr |
MgpstrTokenizer.vocab |
1 | 0 | 0 |
transformers.models.mimi.configuration_mimi (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MimiConfig.init |
41 | 39 | 0 |
attr |
MimiConfig.sampling_rate |
1 | 0 | 0 |
attr |
MimiConfig.audio_channels |
1 | 0 | 0 |
attr |
MimiConfig.hidden_size |
1 | 0 | 0 |
attr |
MimiConfig.num_filters |
1 | 0 | 0 |
attr |
MimiConfig.num_residual_layers |
1 | 0 | 0 |
attr |
MimiConfig.upsampling_ratios |
1 | 0 | 0 |
attr |
MimiConfig.kernel_size |
1 | 0 | 0 |
attr |
MimiConfig.last_kernel_size |
1 | 0 | 0 |
attr |
MimiConfig.residual_kernel_size |
1 | 0 | 0 |
attr |
MimiConfig.dilation_growth_rate |
1 | 0 | 0 |
attr |
MimiConfig.use_causal_conv |
1 | 0 | 0 |
attr |
MimiConfig.pad_mode |
1 | 0 | 0 |
attr |
MimiConfig.compress |
1 | 0 | 0 |
attr |
MimiConfig.trim_right_ratio |
1 | 0 | 0 |
attr |
MimiConfig.codebook_size |
1 | 0 | 0 |
attr |
MimiConfig.codebook_dim |
1 | 0 | 0 |
attr |
MimiConfig.num_quantizers |
1 | 0 | 0 |
attr |
MimiConfig.use_conv_shortcut |
1 | 0 | 0 |
attr |
MimiConfig.vector_quantization_hidden_dimension |
1 | 0 | 0 |
attr |
MimiConfig.upsample_groups |
1 | 0 | 0 |
attr |
MimiConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MimiConfig.intermediate_size |
1 | 0 | 0 |
attr |
MimiConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MimiConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
MimiConfig.hidden_act |
1 | 0 | 0 |
attr |
MimiConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MimiConfig.initializer_range |
1 | 0 | 0 |
attr |
MimiConfig.norm_eps |
1 | 0 | 0 |
attr |
MimiConfig.use_cache |
1 | 0 | 0 |
attr |
MimiConfig.use_streaming |
1 | 0 | 0 |
attr |
MimiConfig.sliding_window |
1 | 0 | 0 |
attr |
MimiConfig.attention_dropout |
1 | 0 | 0 |
attr |
MimiConfig.head_dim |
1 | 0 | 0 |
attr |
MimiConfig.layer_scale_initial_scale |
1 | 0 | 0 |
attr |
MimiConfig.attention_bias |
1 | 0 | 0 |
attr |
MimiConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
MimiConfig.rope_parameters |
1 | 0 | 0 |
attr |
MimiConfig.num_semantic_quantizers |
1 | 0 | 0 |
transformers.models.mimi.modeling_mimi (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MimiModel.init |
2 | 1 | 0 |
meth |
MimiModel.get_audio_codes_mask |
3 | 2 | 0 |
meth |
MimiModel.forward |
9 | 8 | 0 |
attr |
MimiModel.encoder |
1 | 0 | 0 |
attr |
MimiModel.encoder_transformer |
1 | 0 | 0 |
attr |
MimiModel.downsample |
1 | 0 | 0 |
attr |
MimiModel.upsample |
1 | 0 | 0 |
attr |
MimiModel.decoder_transformer |
1 | 0 | 0 |
attr |
MimiModel.decoder |
1 | 0 | 0 |
attr |
MimiModel.quantizer |
1 | 0 | 0 |
attr |
MimiModel.bits_per_codebook |
1 | 0 | 0 |
meth |
MimiPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.minimax.configuration_minimax (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MiniMaxConfig.init |
34 | 32 | 0 |
attr |
MiniMaxConfig.vocab_size |
1 | 0 | 0 |
attr |
MiniMaxConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MiniMaxConfig.hidden_size |
1 | 0 | 0 |
attr |
MiniMaxConfig.intermediate_size |
1 | 0 | 0 |
attr |
MiniMaxConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MiniMaxConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MiniMaxConfig.sliding_window |
1 | 0 | 0 |
attr |
MiniMaxConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
MiniMaxConfig.hidden_act |
1 | 0 | 0 |
attr |
MiniMaxConfig.initializer_range |
1 | 0 | 0 |
attr |
MiniMaxConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
MiniMaxConfig.use_cache |
1 | 0 | 0 |
attr |
MiniMaxConfig.attention_dropout |
1 | 0 | 0 |
attr |
MiniMaxConfig.head_dim |
1 | 0 | 0 |
attr |
MiniMaxConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
MiniMaxConfig.num_local_experts |
1 | 0 | 0 |
attr |
MiniMaxConfig.output_router_logits |
1 | 0 | 0 |
attr |
MiniMaxConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
MiniMaxConfig.router_jitter_noise |
1 | 0 | 0 |
attr |
MiniMaxConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
MiniMaxConfig.pad_token_id |
1 | 0 | 0 |
attr |
MiniMaxConfig.bos_token_id |
1 | 0 | 0 |
attr |
MiniMaxConfig.eos_token_id |
1 | 0 | 0 |
attr |
MiniMaxConfig.layer_types |
1 | 0 | 0 |
attr |
MiniMaxConfig.block_size |
1 | 0 | 0 |
attr |
MiniMaxConfig.full_attn_alpha_factor |
1 | 0 | 0 |
attr |
MiniMaxConfig.full_attn_beta_factor |
1 | 0 | 0 |
attr |
MiniMaxConfig.linear_attn_alpha_factor |
1 | 0 | 0 |
attr |
MiniMaxConfig.linear_attn_beta_factor |
1 | 0 | 0 |
attr |
MiniMaxConfig.mlp_alpha_factor |
1 | 0 | 0 |
attr |
MiniMaxConfig.mlp_beta_factor |
1 | 0 | 0 |
attr |
MiniMaxConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.minimax.modeling_minimax (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MiniMaxPreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
MiniMaxPreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
MiniMaxForCausalLM.init |
2 | 0 | 0 |
attr |
MiniMaxForCausalLM.model |
1 | 0 | 0 |
attr |
MiniMaxForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
MiniMaxForCausalLM.lm_head |
1 | 0 | 0 |
attr |
MiniMaxForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
MiniMaxForCausalLM.num_experts |
1 | 0 | 0 |
attr |
MiniMaxForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
meth |
MiniMaxModel.init |
2 | 1 | 0 |
attr |
MiniMaxModel.padding_idx |
1 | 0 | 0 |
attr |
MiniMaxModel.vocab_size |
1 | 0 | 0 |
attr |
MiniMaxModel.embed_tokens |
1 | 0 | 0 |
attr |
MiniMaxModel.layers |
1 | 0 | 0 |
attr |
MiniMaxModel.norm |
1 | 0 | 0 |
attr |
MiniMaxModel.rotary_emb |
1 | 0 | 0 |
attr |
MiniMaxModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.minimax.modular_minimax (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MiniMaxConfig.init |
34 | 32 | 0 |
attr |
MiniMaxConfig.vocab_size |
1 | 0 | 0 |
attr |
MiniMaxConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MiniMaxConfig.hidden_size |
1 | 0 | 0 |
attr |
MiniMaxConfig.intermediate_size |
1 | 0 | 0 |
attr |
MiniMaxConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MiniMaxConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MiniMaxConfig.sliding_window |
1 | 0 | 0 |
attr |
MiniMaxConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
MiniMaxConfig.hidden_act |
1 | 0 | 0 |
attr |
MiniMaxConfig.initializer_range |
1 | 0 | 0 |
attr |
MiniMaxConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
MiniMaxConfig.use_cache |
1 | 0 | 0 |
attr |
MiniMaxConfig.attention_dropout |
1 | 0 | 0 |
attr |
MiniMaxConfig.head_dim |
1 | 0 | 0 |
attr |
MiniMaxConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
MiniMaxConfig.num_local_experts |
1 | 0 | 0 |
attr |
MiniMaxConfig.output_router_logits |
1 | 0 | 0 |
attr |
MiniMaxConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
MiniMaxConfig.router_jitter_noise |
1 | 0 | 0 |
attr |
MiniMaxConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
MiniMaxConfig.pad_token_id |
1 | 0 | 0 |
attr |
MiniMaxConfig.bos_token_id |
1 | 0 | 0 |
attr |
MiniMaxConfig.eos_token_id |
1 | 0 | 0 |
attr |
MiniMaxConfig.layer_types |
1 | 0 | 0 |
attr |
MiniMaxConfig.block_size |
1 | 0 | 0 |
attr |
MiniMaxConfig.full_attn_alpha_factor |
1 | 0 | 0 |
attr |
MiniMaxConfig.full_attn_beta_factor |
1 | 0 | 0 |
attr |
MiniMaxConfig.linear_attn_alpha_factor |
1 | 0 | 0 |
attr |
MiniMaxConfig.linear_attn_beta_factor |
1 | 0 | 0 |
attr |
MiniMaxConfig.mlp_alpha_factor |
1 | 0 | 0 |
attr |
MiniMaxConfig.mlp_beta_factor |
1 | 0 | 0 |
attr |
MiniMaxConfig.rope_parameters |
1 | 0 | 0 |
meth |
MiniMaxPreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
MiniMaxPreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
MiniMaxForCausalLM.forward |
2 | 0 | 0 |
transformers.models.minimax_m2.configuration_minimax_m2 (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MiniMaxM2Config.init |
25 | 23 | 0 |
attr |
MiniMaxM2Config.vocab_size |
1 | 0 | 0 |
attr |
MiniMaxM2Config.max_position_embeddings |
1 | 0 | 0 |
attr |
MiniMaxM2Config.hidden_size |
1 | 0 | 0 |
attr |
MiniMaxM2Config.intermediate_size |
1 | 0 | 0 |
attr |
MiniMaxM2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
MiniMaxM2Config.num_attention_heads |
1 | 0 | 0 |
attr |
MiniMaxM2Config.num_key_value_heads |
1 | 0 | 0 |
attr |
MiniMaxM2Config.hidden_act |
1 | 0 | 0 |
attr |
MiniMaxM2Config.initializer_range |
1 | 0 | 0 |
attr |
MiniMaxM2Config.rms_norm_eps |
1 | 0 | 0 |
attr |
MiniMaxM2Config.use_cache |
1 | 0 | 0 |
attr |
MiniMaxM2Config.attention_dropout |
1 | 0 | 0 |
attr |
MiniMaxM2Config.head_dim |
1 | 0 | 0 |
attr |
MiniMaxM2Config.rope_parameters |
1 | 0 | 0 |
attr |
MiniMaxM2Config.num_experts_per_tok |
1 | 0 | 0 |
attr |
MiniMaxM2Config.num_local_experts |
1 | 0 | 0 |
attr |
MiniMaxM2Config.output_router_logits |
1 | 0 | 0 |
attr |
MiniMaxM2Config.router_aux_loss_coef |
1 | 0 | 0 |
attr |
MiniMaxM2Config.router_jitter_noise |
1 | 0 | 0 |
attr |
MiniMaxM2Config.pad_token_id |
1 | 0 | 0 |
attr |
MiniMaxM2Config.bos_token_id |
1 | 0 | 0 |
attr |
MiniMaxM2Config.eos_token_id |
1 | 0 | 0 |
attr |
MiniMaxM2Config.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.minimax_m2.modeling_minimax_m2 (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MiniMaxM2PreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
MiniMaxM2PreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
MiniMaxM2Model.init |
2 | 1 | 0 |
attr |
MiniMaxM2Model.padding_idx |
1 | 0 | 0 |
attr |
MiniMaxM2Model.vocab_size |
1 | 0 | 0 |
attr |
MiniMaxM2Model.embed_tokens |
1 | 0 | 0 |
attr |
MiniMaxM2Model.layers |
1 | 0 | 0 |
attr |
MiniMaxM2Model.norm |
1 | 0 | 0 |
attr |
MiniMaxM2Model.rotary_emb |
1 | 0 | 0 |
attr |
MiniMaxM2Model.gradient_checkpointing |
1 | 0 | 0 |
meth |
MiniMaxM2ForCausalLM.init |
2 | 0 | 0 |
attr |
MiniMaxM2ForCausalLM.model |
1 | 0 | 0 |
attr |
MiniMaxM2ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
MiniMaxM2ForCausalLM.lm_head |
1 | 0 | 0 |
attr |
MiniMaxM2ForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
MiniMaxM2ForCausalLM.num_experts |
1 | 0 | 0 |
attr |
MiniMaxM2ForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
transformers.models.minimax_m2.modular_minimax_m2 (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MiniMaxM2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
MiniMaxM2Config.init |
25 | 23 | 0 |
attr |
MiniMaxM2Config.vocab_size |
1 | 0 | 0 |
attr |
MiniMaxM2Config.max_position_embeddings |
1 | 0 | 0 |
attr |
MiniMaxM2Config.hidden_size |
1 | 0 | 0 |
attr |
MiniMaxM2Config.intermediate_size |
1 | 0 | 0 |
attr |
MiniMaxM2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
MiniMaxM2Config.num_attention_heads |
1 | 0 | 0 |
attr |
MiniMaxM2Config.num_key_value_heads |
1 | 0 | 0 |
attr |
MiniMaxM2Config.hidden_act |
1 | 0 | 0 |
attr |
MiniMaxM2Config.initializer_range |
1 | 0 | 0 |
attr |
MiniMaxM2Config.rms_norm_eps |
1 | 0 | 0 |
attr |
MiniMaxM2Config.use_cache |
1 | 0 | 0 |
attr |
MiniMaxM2Config.attention_dropout |
1 | 0 | 0 |
attr |
MiniMaxM2Config.head_dim |
1 | 0 | 0 |
attr |
MiniMaxM2Config.rope_parameters |
1 | 0 | 0 |
attr |
MiniMaxM2Config.num_experts_per_tok |
1 | 0 | 0 |
attr |
MiniMaxM2Config.num_local_experts |
1 | 0 | 0 |
attr |
MiniMaxM2Config.output_router_logits |
1 | 0 | 0 |
attr |
MiniMaxM2Config.router_aux_loss_coef |
1 | 0 | 0 |
attr |
MiniMaxM2Config.router_jitter_noise |
1 | 0 | 0 |
attr |
MiniMaxM2Config.pad_token_id |
1 | 0 | 0 |
attr |
MiniMaxM2Config.bos_token_id |
1 | 0 | 0 |
attr |
MiniMaxM2Config.eos_token_id |
1 | 0 | 0 |
attr |
MiniMaxM2Config.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.ministral.configuration_ministral (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MinistralConfig.init |
22 | 20 | 0 |
attr |
MinistralConfig.pad_token_id |
1 | 0 | 0 |
attr |
MinistralConfig.bos_token_id |
1 | 0 | 0 |
attr |
MinistralConfig.eos_token_id |
1 | 0 | 0 |
attr |
MinistralConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
MinistralConfig.vocab_size |
1 | 0 | 0 |
attr |
MinistralConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MinistralConfig.hidden_size |
1 | 0 | 0 |
attr |
MinistralConfig.intermediate_size |
1 | 0 | 0 |
attr |
MinistralConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MinistralConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MinistralConfig.sliding_window |
1 | 0 | 0 |
attr |
MinistralConfig.head_dim |
1 | 0 | 0 |
attr |
MinistralConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
MinistralConfig.hidden_act |
1 | 0 | 0 |
attr |
MinistralConfig.initializer_range |
1 | 0 | 0 |
attr |
MinistralConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
MinistralConfig.use_cache |
1 | 0 | 0 |
attr |
MinistralConfig.attention_dropout |
1 | 0 | 0 |
attr |
MinistralConfig.layer_types |
1 | 0 | 0 |
attr |
MinistralConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.ministral.modeling_ministral (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MinistralModel.init |
2 | 1 | 0 |
attr |
MinistralModel.padding_idx |
1 | 0 | 0 |
attr |
MinistralModel.vocab_size |
1 | 0 | 0 |
attr |
MinistralModel.embed_tokens |
1 | 0 | 0 |
attr |
MinistralModel.layers |
1 | 0 | 0 |
attr |
MinistralModel.norm |
1 | 0 | 0 |
attr |
MinistralModel.rotary_emb |
1 | 0 | 0 |
attr |
MinistralModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
MinistralForCausalLM.init |
2 | 0 | 0 |
attr |
MinistralForCausalLM.model |
1 | 0 | 0 |
attr |
MinistralForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
MinistralForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.ministral.modular_ministral (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MinistralModel.init |
2 | 1 | 0 |
meth |
MinistralConfig.init |
22 | 20 | 0 |
attr |
MinistralConfig.pad_token_id |
1 | 0 | 0 |
attr |
MinistralConfig.bos_token_id |
1 | 0 | 0 |
attr |
MinistralConfig.eos_token_id |
1 | 0 | 0 |
attr |
MinistralConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
MinistralConfig.vocab_size |
1 | 0 | 0 |
attr |
MinistralConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MinistralConfig.hidden_size |
1 | 0 | 0 |
attr |
MinistralConfig.intermediate_size |
1 | 0 | 0 |
attr |
MinistralConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MinistralConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MinistralConfig.sliding_window |
1 | 0 | 0 |
attr |
MinistralConfig.head_dim |
1 | 0 | 0 |
attr |
MinistralConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
MinistralConfig.hidden_act |
1 | 0 | 0 |
attr |
MinistralConfig.initializer_range |
1 | 0 | 0 |
attr |
MinistralConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
MinistralConfig.use_cache |
1 | 0 | 0 |
attr |
MinistralConfig.attention_dropout |
1 | 0 | 0 |
attr |
MinistralConfig.layer_types |
1 | 0 | 0 |
attr |
MinistralConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.ministral3.configuration_ministral3 (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ministral3Config.init |
21 | 19 | 0 |
attr |
Ministral3Config.vocab_size |
1 | 0 | 0 |
attr |
Ministral3Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Ministral3Config.hidden_size |
1 | 0 | 0 |
attr |
Ministral3Config.intermediate_size |
1 | 0 | 0 |
attr |
Ministral3Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Ministral3Config.num_attention_heads |
1 | 0 | 0 |
attr |
Ministral3Config.sliding_window |
1 | 0 | 0 |
attr |
Ministral3Config.head_dim |
1 | 0 | 0 |
attr |
Ministral3Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Ministral3Config.hidden_act |
1 | 0 | 0 |
attr |
Ministral3Config.initializer_range |
1 | 0 | 0 |
attr |
Ministral3Config.rms_norm_eps |
1 | 0 | 0 |
attr |
Ministral3Config.use_cache |
1 | 0 | 0 |
attr |
Ministral3Config.attention_dropout |
1 | 0 | 0 |
attr |
Ministral3Config.rope_parameters |
1 | 0 | 0 |
attr |
Ministral3Config.pad_token_id |
1 | 0 | 0 |
attr |
Ministral3Config.bos_token_id |
1 | 0 | 0 |
attr |
Ministral3Config.eos_token_id |
1 | 0 | 0 |
attr |
Ministral3Config.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.ministral3.modeling_ministral3 (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ministral3ForCausalLM.init |
2 | 0 | 0 |
attr |
Ministral3ForCausalLM.model |
1 | 0 | 0 |
attr |
Ministral3ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Ministral3ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Ministral3Model.init |
2 | 1 | 0 |
attr |
Ministral3Model.padding_idx |
1 | 0 | 0 |
attr |
Ministral3Model.vocab_size |
1 | 0 | 0 |
attr |
Ministral3Model.embed_tokens |
1 | 0 | 0 |
attr |
Ministral3Model.layers |
1 | 0 | 0 |
attr |
Ministral3Model.norm |
1 | 0 | 0 |
attr |
Ministral3Model.rotary_emb |
1 | 0 | 0 |
attr |
Ministral3Model.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.mistral.configuration_mistral (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MistralConfig.init |
21 | 19 | 0 |
attr |
MistralConfig.vocab_size |
1 | 0 | 0 |
attr |
MistralConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MistralConfig.hidden_size |
1 | 0 | 0 |
attr |
MistralConfig.intermediate_size |
1 | 0 | 0 |
attr |
MistralConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MistralConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MistralConfig.sliding_window |
1 | 0 | 0 |
attr |
MistralConfig.head_dim |
1 | 0 | 0 |
attr |
MistralConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
MistralConfig.hidden_act |
1 | 0 | 0 |
attr |
MistralConfig.initializer_range |
1 | 0 | 0 |
attr |
MistralConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
MistralConfig.use_cache |
1 | 0 | 0 |
attr |
MistralConfig.attention_dropout |
1 | 0 | 0 |
attr |
MistralConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
MistralConfig.pad_token_id |
1 | 0 | 0 |
attr |
MistralConfig.bos_token_id |
1 | 0 | 0 |
attr |
MistralConfig.eos_token_id |
1 | 0 | 0 |
attr |
MistralConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.mistral.modeling_mistral (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MistralModel.init |
2 | 1 | 0 |
attr |
MistralModel.padding_idx |
1 | 0 | 0 |
attr |
MistralModel.vocab_size |
1 | 0 | 0 |
attr |
MistralModel.embed_tokens |
1 | 0 | 0 |
attr |
MistralModel.layers |
1 | 0 | 0 |
attr |
MistralModel.norm |
1 | 0 | 0 |
attr |
MistralModel.rotary_emb |
1 | 0 | 0 |
attr |
MistralModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
MistralForCausalLM.init |
2 | 0 | 0 |
attr |
MistralForCausalLM.model |
1 | 0 | 0 |
attr |
MistralForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
MistralForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.mistral3.configuration_mistral3 (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Mistral3Config.init |
10 | 1 | 0 |
attr |
Mistral3Config.image_token_index |
1 | 0 | 0 |
attr |
Mistral3Config.projector_hidden_act |
1 | 0 | 0 |
attr |
Mistral3Config.vision_feature_layer |
1 | 0 | 0 |
attr |
Mistral3Config.vision_config |
1 | 0 | 0 |
attr |
Mistral3Config.text_config |
1 | 0 | 0 |
attr |
Mistral3Config.multimodal_projector_bias |
1 | 0 | 0 |
attr |
Mistral3Config.spatial_merge_size |
1 | 0 | 0 |
attr |
Mistral3Config.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.mistral3.modeling_mistral3 (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Mistral3ForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Mistral3ForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Mistral3ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Mistral3ForConditionalGeneration.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
Mistral3ForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Mistral3ForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
Mistral3Model.init |
2 | 1 | 0 |
meth |
Mistral3Model.get_input_embeddings |
1 | 0 | 0 |
meth |
Mistral3Model.set_input_embeddings |
2 | 0 | 0 |
meth |
Mistral3Model.get_placeholder_mask |
4 | 3 | 0 |
attr |
Mistral3Model.vision_tower |
1 | 0 | 0 |
attr |
Mistral3Model.multi_modal_projector |
1 | 0 | 0 |
attr |
Mistral3Model.language_model |
1 | 0 | 0 |
transformers.models.mixtral.configuration_mixtral (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MixtralConfig.init |
26 | 24 | 0 |
attr |
MixtralConfig.vocab_size |
1 | 0 | 0 |
attr |
MixtralConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MixtralConfig.hidden_size |
1 | 0 | 0 |
attr |
MixtralConfig.intermediate_size |
1 | 0 | 0 |
attr |
MixtralConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MixtralConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MixtralConfig.sliding_window |
1 | 0 | 0 |
attr |
MixtralConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
MixtralConfig.hidden_act |
1 | 0 | 0 |
attr |
MixtralConfig.initializer_range |
1 | 0 | 0 |
attr |
MixtralConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
MixtralConfig.use_cache |
1 | 0 | 0 |
attr |
MixtralConfig.attention_dropout |
1 | 0 | 0 |
attr |
MixtralConfig.head_dim |
1 | 0 | 0 |
attr |
MixtralConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
MixtralConfig.num_local_experts |
1 | 0 | 0 |
attr |
MixtralConfig.output_router_logits |
1 | 0 | 0 |
attr |
MixtralConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
MixtralConfig.router_jitter_noise |
1 | 0 | 0 |
attr |
MixtralConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
MixtralConfig.pad_token_id |
1 | 0 | 0 |
attr |
MixtralConfig.bos_token_id |
1 | 0 | 0 |
attr |
MixtralConfig.eos_token_id |
1 | 0 | 0 |
attr |
MixtralConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.mixtral.modeling_mixtral (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MixtralForCausalLM.init |
2 | 0 | 0 |
attr |
MixtralForCausalLM.model |
1 | 0 | 0 |
attr |
MixtralForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
MixtralForCausalLM.lm_head |
1 | 0 | 0 |
attr |
MixtralForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
MixtralForCausalLM.num_experts |
1 | 0 | 0 |
attr |
MixtralForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
meth |
MixtralPreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
MixtralPreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
MixtralModel.init |
2 | 1 | 0 |
attr |
MixtralModel.padding_idx |
1 | 0 | 0 |
attr |
MixtralModel.vocab_size |
1 | 0 | 0 |
attr |
MixtralModel.embed_tokens |
1 | 0 | 0 |
attr |
MixtralModel.layers |
1 | 0 | 0 |
attr |
MixtralModel.norm |
1 | 0 | 0 |
attr |
MixtralModel.rotary_emb |
1 | 0 | 0 |
attr |
MixtralModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.mixtral.modular_mixtral (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MixtralForCausalLM.init |
2 | 0 | 0 |
attr |
MixtralForCausalLM.model |
1 | 0 | 0 |
attr |
MixtralForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
MixtralForCausalLM.num_experts |
1 | 0 | 0 |
attr |
MixtralForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
meth |
MixtralPreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
MixtralPreTrainedModel._can_record_outputs |
1 | 0 | 0 |
transformers.models.mlcd.configuration_mlcd (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MLCDVisionConfig.init |
15 | 0 | 0 |
attr |
MLCDVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
MLCDVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
MLCDVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MLCDVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MLCDVisionConfig.num_key_value_groups |
1 | 0 | 0 |
attr |
MLCDVisionConfig.num_channels |
1 | 0 | 0 |
attr |
MLCDVisionConfig.patch_size |
1 | 0 | 0 |
attr |
MLCDVisionConfig.image_size |
1 | 0 | 0 |
attr |
MLCDVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
MLCDVisionConfig.initializer_factor |
1 | 0 | 0 |
attr |
MLCDVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
MLCDVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
MLCDVisionConfig.hidden_act |
1 | 0 | 0 |
transformers.models.mlcd.modeling_mlcd (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MLCDPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
MLCDVisionModel.init |
2 | 1 | 0 |
attr |
MLCDVisionModel.vision_model |
1 | 0 | 0 |
transformers.models.mlcd.modular_mlcd (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MLCDVisionConfig.init |
15 | 0 | 0 |
attr |
MLCDVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
MLCDVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
MLCDVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MLCDVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MLCDVisionConfig.num_key_value_groups |
1 | 0 | 0 |
attr |
MLCDVisionConfig.num_channels |
1 | 0 | 0 |
attr |
MLCDVisionConfig.patch_size |
1 | 0 | 0 |
attr |
MLCDVisionConfig.image_size |
1 | 0 | 0 |
attr |
MLCDVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
MLCDVisionConfig.initializer_factor |
1 | 0 | 0 |
attr |
MLCDVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
MLCDVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
MLCDVisionConfig.hidden_act |
1 | 0 | 0 |
meth |
MLCDPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.mllama.configuration_mllama (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MllamaConfig.init |
5 | 0 | 0 |
attr |
MllamaConfig.image_token_index |
1 | 0 | 0 |
attr |
MllamaConfig.vision_config |
1 | 0 | 0 |
attr |
MllamaConfig.text_config |
1 | 0 | 0 |
transformers.models.mllama.image_processing_mllama (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MllamaImageProcessor.init |
13 | 12 | 0 |
meth |
MllamaImageProcessor.preprocess |
15 | 14 | 0 |
attr |
MllamaImageProcessor.do_convert_rgb |
1 | 0 | 0 |
attr |
MllamaImageProcessor.do_resize |
1 | 0 | 0 |
attr |
MllamaImageProcessor.size |
1 | 0 | 0 |
attr |
MllamaImageProcessor.resample |
1 | 0 | 0 |
attr |
MllamaImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
MllamaImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
MllamaImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
MllamaImageProcessor.image_mean |
1 | 0 | 0 |
attr |
MllamaImageProcessor.image_std |
1 | 0 | 0 |
attr |
MllamaImageProcessor.do_pad |
1 | 0 | 0 |
attr |
MllamaImageProcessor.max_image_tiles |
1 | 0 | 0 |
transformers.models.mllama.image_processing_mllama_fast (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MllamaImageProcessorFast.init |
2 | 1 | 0 |
meth |
MllamaImageProcessorFast._preprocess |
13 | 12 | 0 |
transformers.models.mllama.modeling_mllama (75 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MllamaVisionModel.init |
2 | 1 | 0 |
meth |
MllamaVisionModel.get_input_embeddings |
1 | 0 | 0 |
meth |
MllamaVisionModel.forward |
5 | 4 | 0 |
attr |
MllamaVisionModel.image_size |
1 | 0 | 0 |
attr |
MllamaVisionModel.patch_size |
1 | 0 | 0 |
attr |
MllamaVisionModel.max_num_tiles |
1 | 0 | 0 |
attr |
MllamaVisionModel.hidden_size |
1 | 0 | 0 |
attr |
MllamaVisionModel.num_channels |
1 | 0 | 0 |
attr |
MllamaVisionModel.intermediate_layers_indices |
1 | 0 | 0 |
attr |
MllamaVisionModel.num_patches |
1 | 0 | 0 |
attr |
MllamaVisionModel.scale |
1 | 0 | 0 |
attr |
MllamaVisionModel.patch_embedding |
1 | 0 | 0 |
attr |
MllamaVisionModel.class_embedding |
1 | 0 | 0 |
attr |
MllamaVisionModel.gated_positional_embedding |
1 | 0 | 0 |
attr |
MllamaVisionModel.pre_tile_positional_embedding |
1 | 0 | 0 |
attr |
MllamaVisionModel.post_tile_positional_embedding |
1 | 0 | 0 |
attr |
MllamaVisionModel.layernorm_pre |
1 | 0 | 0 |
attr |
MllamaVisionModel.layernorm_post |
1 | 0 | 0 |
attr |
MllamaVisionModel.transformer |
1 | 0 | 0 |
attr |
MllamaVisionModel.global_transformer |
1 | 0 | 0 |
meth |
MllamaPreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
MllamaPreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
MllamaForCausalLM.init |
2 | 0 | 0 |
attr |
MllamaForCausalLM.text_config |
1 | 0 | 0 |
attr |
MllamaForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
MllamaForCausalLM.model |
1 | 0 | 0 |
attr |
MllamaForCausalLM.lm_head |
1 | 0 | 0 |
meth |
MllamaTextModel.init |
2 | 1 | 0 |
attr |
MllamaTextModel.padding_idx |
1 | 0 | 0 |
attr |
MllamaTextModel.vocab_size |
1 | 0 | 0 |
attr |
MllamaTextModel.embed_tokens |
1 | 0 | 0 |
attr |
MllamaTextModel.cross_attention_layers |
1 | 0 | 0 |
attr |
MllamaTextModel.layers |
1 | 0 | 0 |
attr |
MllamaTextModel.norm |
1 | 0 | 0 |
attr |
MllamaTextModel.rotary_emb |
1 | 0 | 0 |
attr |
MllamaTextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
MllamaModel.init |
2 | 1 | 0 |
meth |
MllamaModel.get_input_embeddings |
1 | 0 | 0 |
meth |
MllamaModel.set_input_embeddings |
2 | 0 | 0 |
attr |
MllamaModel.vocab_size |
1 | 0 | 0 |
attr |
MllamaModel.hidden_size |
1 | 0 | 0 |
attr |
MllamaModel.max_num_tiles |
1 | 0 | 0 |
attr |
MllamaModel.vision_output_dim |
1 | 0 | 0 |
attr |
MllamaModel.vision_model |
1 | 0 | 0 |
attr |
MllamaModel.language_model |
1 | 0 | 0 |
attr |
MllamaModel.multi_modal_projector |
1 | 0 | 0 |
meth |
MllamaForConditionalGeneration.init |
2 | 1 | 0 |
meth |
MllamaForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
MllamaForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
MllamaForConditionalGeneration.prepare_inputs_for_generation |
15 | 0 | 0 |
meth |
MllamaForConditionalGeneration._update_model_kwargs_for_generation |
5 | 0 | 0 |
attr |
MllamaForConditionalGeneration.model |
1 | 0 | 0 |
attr |
MllamaForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.mllama.processing_mllama (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MllamaProcessor.init |
4 | 0 | 0 |
meth |
MllamaProcessor.post_process_image_text_to_text |
5 | 0 | 0 |
prop |
MllamaProcessor.model_input_names |
1 | 0 | 0 |
attr |
MllamaProcessor.python_token |
1 | 0 | 0 |
attr |
MllamaProcessor.python_token_id |
1 | 0 | 0 |
attr |
MllamaProcessor.bos_token |
1 | 0 | 0 |
attr |
MllamaProcessor.image_token |
1 | 0 | 0 |
attr |
MllamaProcessor.image_token_id |
1 | 0 | 0 |
transformers.models.mluke.tokenization_mluke (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MLukeTokenizer.init |
20 | 3 | 0 |
meth |
MLukeTokenizer._post_init |
1 | 0 | 0 |
meth |
MLukeTokenizer.get_vocab |
1 | 0 | 0 |
meth |
MLukeTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
MLukeTokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
MLukeTokenizer.convert_tokens_to_string |
2 | 0 | 0 |
meth |
MLukeTokenizer.call |
25 | 24 | 0 |
meth |
MLukeTokenizer._encode_plus |
25 | 24 | 0 |
meth |
MLukeTokenizer._batch_encode_plus |
22 | 21 | 0 |
meth |
MLukeTokenizer._check_entity_input_format |
3 | 2 | 0 |
meth |
MLukeTokenizer._create_input_sequence |
8 | 7 | 0 |
meth |
MLukeTokenizer.prepare_for_model |
25 | 24 | 0 |
prop |
MLukeTokenizer.vocab_size |
1 | 0 | 0 |
attr |
MLukeTokenizer.fairseq_tokens_to_ids |
1 | 0 | 0 |
attr |
MLukeTokenizer.fairseq_offset |
1 | 0 | 0 |
attr |
MLukeTokenizer.fairseq_ids_to_tokens |
1 | 0 | 0 |
attr |
MLukeTokenizer.entity_unk_token_id |
1 | 0 | 0 |
attr |
MLukeTokenizer.entity_pad_token_id |
1 | 0 | 0 |
attr |
MLukeTokenizer.entity_mask_token_id |
1 | 0 | 0 |
attr |
MLukeTokenizer.entity_mask2_token_id |
1 | 0 | 0 |
attr |
MLukeTokenizer.task |
1 | 0 | 0 |
attr |
MLukeTokenizer.max_mention_length |
1 | 0 | 0 |
attr |
MLukeTokenizer.entity_vocab |
1 | 0 | 0 |
attr |
MLukeTokenizer.max_entity_length |
1 | 0 | 0 |
transformers.models.mm_grounding_dino.configuration_mm_grounding_dino (77 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MMGroundingDinoConfig.init |
40 | 0 | 0 |
attr |
MMGroundingDinoConfig.backbone_config |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.num_queries |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.d_model |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.encoder_layers |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.decoder_layers |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.dropout |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.attention_dropout |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.activation_dropout |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.activation_function |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.auxiliary_loss |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.position_embedding_type |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.num_feature_levels |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.encoder_n_points |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.decoder_n_points |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.two_stage |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.class_cost |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.bbox_cost |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.giou_cost |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.bbox_loss_coefficient |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.giou_loss_coefficient |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.focal_alpha |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.disable_custom_kernels |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.text_config |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.max_text_len |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.text_enhancer_dropout |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.fusion_droppath |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.fusion_dropout |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.embedding_init_target |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.query_dim |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.positional_embedding_temperature |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.init_std |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.mm_grounding_dino.modeling_mm_grounding_dino (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MMGroundingDinoModel.init |
2 | 1 | 0 |
meth |
MMGroundingDinoModel.freeze_backbone |
1 | 0 | 0 |
meth |
MMGroundingDinoModel.unfreeze_backbone |
1 | 0 | 0 |
meth |
MMGroundingDinoModel.get_valid_ratio |
2 | 0 | 0 |
meth |
MMGroundingDinoModel.generate_encoder_output_proposals |
4 | 0 | 0 |
meth |
MMGroundingDinoModel.forward |
11 | 6 | 0 |
attr |
MMGroundingDinoModel.backbone |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.input_proj_vision |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.text_backbone |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.text_projection |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.encoder |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.decoder |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.level_embed |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.enc_output |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.enc_output_norm |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.encoder_output_bbox_embed |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.encoder_output_class_embed |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.query_position_embeddings |
1 | 0 | 0 |
meth |
MMGroundingDinoForObjectDetection.init |
2 | 1 | 0 |
meth |
MMGroundingDinoForObjectDetection.forward |
12 | 10 | 0 |
attr |
MMGroundingDinoForObjectDetection.model |
1 | 0 | 0 |
attr |
MMGroundingDinoForObjectDetection.class_embed |
1 | 0 | 0 |
attr |
MMGroundingDinoForObjectDetection.bbox_embed |
1 | 0 | 0 |
meth |
MMGroundingDinoPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
MMGroundingDinoPreTrainedModel._set_gradient_checkpointing |
3 | 0 | 0 |
transformers.models.mm_grounding_dino.modular_mm_grounding_dino (96 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MMGroundingDinoConfig.init |
40 | 0 | 0 |
attr |
MMGroundingDinoConfig.backbone_config |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.num_queries |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.d_model |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.encoder_layers |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.decoder_layers |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.dropout |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.attention_dropout |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.activation_dropout |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.activation_function |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.auxiliary_loss |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.position_embedding_type |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.num_feature_levels |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.encoder_n_points |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.decoder_n_points |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.two_stage |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.class_cost |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.bbox_cost |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.giou_cost |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.bbox_loss_coefficient |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.giou_loss_coefficient |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.focal_alpha |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.disable_custom_kernels |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.text_config |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.max_text_len |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.text_enhancer_dropout |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.fusion_droppath |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.fusion_dropout |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.embedding_init_target |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.query_dim |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.positional_embedding_temperature |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.init_std |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
MMGroundingDinoConfig.tie_word_embeddings |
1 | 0 | 0 |
meth |
MMGroundingDinoModel.init |
2 | 1 | 0 |
attr |
MMGroundingDinoModel.backbone |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.input_proj_vision |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.text_backbone |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.text_projection |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.encoder |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.decoder |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.level_embed |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.enc_output |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.enc_output_norm |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.encoder_output_bbox_embed |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.encoder_output_class_embed |
1 | 0 | 0 |
attr |
MMGroundingDinoModel.query_position_embeddings |
1 | 0 | 0 |
meth |
MMGroundingDinoForObjectDetection.init |
2 | 1 | 0 |
attr |
MMGroundingDinoForObjectDetection.model |
1 | 0 | 0 |
attr |
MMGroundingDinoForObjectDetection.class_embed |
1 | 0 | 0 |
attr |
MMGroundingDinoForObjectDetection.bbox_embed |
1 | 0 | 0 |
meth |
MMGroundingDinoPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.mobilebert.configuration_mobilebert (51 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MobileBertConfig.init |
26 | 0 | 0 |
attr |
MobileBertConfig.pad_token_id |
1 | 0 | 0 |
attr |
MobileBertConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
MobileBertConfig.vocab_size |
1 | 0 | 0 |
attr |
MobileBertConfig.hidden_size |
1 | 0 | 0 |
attr |
MobileBertConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MobileBertConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MobileBertConfig.hidden_act |
1 | 0 | 0 |
attr |
MobileBertConfig.intermediate_size |
1 | 0 | 0 |
attr |
MobileBertConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
MobileBertConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
MobileBertConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MobileBertConfig.type_vocab_size |
1 | 0 | 0 |
attr |
MobileBertConfig.initializer_range |
1 | 0 | 0 |
attr |
MobileBertConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
MobileBertConfig.embedding_size |
1 | 0 | 0 |
attr |
MobileBertConfig.trigram_input |
1 | 0 | 0 |
attr |
MobileBertConfig.use_bottleneck |
1 | 0 | 0 |
attr |
MobileBertConfig.intra_bottleneck_size |
1 | 0 | 0 |
attr |
MobileBertConfig.use_bottleneck_attention |
1 | 0 | 0 |
attr |
MobileBertConfig.key_query_shared_bottleneck |
1 | 0 | 0 |
attr |
MobileBertConfig.num_feedforward_networks |
1 | 0 | 0 |
attr |
MobileBertConfig.normalization_type |
1 | 0 | 0 |
attr |
MobileBertConfig.classifier_activation |
1 | 0 | 0 |
attr |
MobileBertConfig.classifier_dropout |
1 | 0 | 0 |
attr |
MobileBertConfig.true_hidden_size |
1 | 0 | 0 |
transformers.models.mobilebert.modeling_mobilebert (61 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MobileBertForNextSentencePrediction.init |
2 | 0 | 0 |
attr |
MobileBertForNextSentencePrediction.mobilebert |
1 | 0 | 0 |
attr |
MobileBertForNextSentencePrediction.cls |
1 | 0 | 0 |
meth |
MobileBertForPreTraining.init |
2 | 0 | 0 |
meth |
MobileBertForPreTraining.get_output_embeddings |
1 | 0 | 0 |
meth |
MobileBertForPreTraining.set_output_embeddings |
2 | 0 | 0 |
attr |
MobileBertForPreTraining.mobilebert |
1 | 0 | 0 |
attr |
MobileBertForPreTraining.cls |
1 | 0 | 0 |
meth |
MobileBertForQuestionAnswering.init |
2 | 0 | 0 |
attr |
MobileBertForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
MobileBertForQuestionAnswering.mobilebert |
1 | 0 | 0 |
attr |
MobileBertForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
MobileBertForMultipleChoice.init |
2 | 0 | 0 |
attr |
MobileBertForMultipleChoice.mobilebert |
1 | 0 | 0 |
attr |
MobileBertForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
MobileBertForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
MobileBertPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
MobileBertLayer.init |
2 | 0 | 0 |
attr |
MobileBertLayer.use_bottleneck |
1 | 0 | 0 |
attr |
MobileBertLayer.num_feedforward_networks |
1 | 0 | 0 |
attr |
MobileBertLayer.attention |
1 | 0 | 0 |
attr |
MobileBertLayer.intermediate |
1 | 0 | 0 |
attr |
MobileBertLayer.output |
1 | 0 | 0 |
attr |
MobileBertLayer.bottleneck |
1 | 0 | 0 |
attr |
MobileBertLayer.ffn |
1 | 0 | 0 |
meth |
MobileBertForSequenceClassification.init |
2 | 0 | 0 |
attr |
MobileBertForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
MobileBertForSequenceClassification.mobilebert |
1 | 0 | 0 |
attr |
MobileBertForSequenceClassification.dropout |
1 | 0 | 0 |
attr |
MobileBertForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
MobileBertModel.init |
3 | 0 | 0 |
meth |
MobileBertModel.get_input_embeddings |
1 | 0 | 0 |
meth |
MobileBertModel.set_input_embeddings |
2 | 0 | 0 |
attr |
MobileBertModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
MobileBertModel.embeddings |
1 | 0 | 0 |
attr |
MobileBertModel.encoder |
1 | 0 | 0 |
attr |
MobileBertModel.pooler |
1 | 0 | 0 |
meth |
MobileBertForTokenClassification.init |
2 | 0 | 0 |
attr |
MobileBertForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
MobileBertForTokenClassification.mobilebert |
1 | 0 | 0 |
attr |
MobileBertForTokenClassification.dropout |
1 | 0 | 0 |
attr |
MobileBertForTokenClassification.classifier |
1 | 0 | 0 |
meth |
MobileBertForMaskedLM.init |
2 | 0 | 0 |
meth |
MobileBertForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
MobileBertForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
attr |
MobileBertForMaskedLM.mobilebert |
1 | 0 | 0 |
attr |
MobileBertForMaskedLM.cls |
1 | 0 | 0 |
transformers.models.mobilenet_v1.configuration_mobilenet_v1 (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MobileNetV1Config.init |
11 | 0 | 0 |
attr |
MobileNetV1Config.num_channels |
1 | 0 | 0 |
attr |
MobileNetV1Config.image_size |
1 | 0 | 0 |
attr |
MobileNetV1Config.depth_multiplier |
1 | 0 | 0 |
attr |
MobileNetV1Config.min_depth |
1 | 0 | 0 |
attr |
MobileNetV1Config.hidden_act |
1 | 0 | 0 |
attr |
MobileNetV1Config.tf_padding |
1 | 0 | 0 |
attr |
MobileNetV1Config.classifier_dropout_prob |
1 | 0 | 0 |
attr |
MobileNetV1Config.initializer_range |
1 | 0 | 0 |
attr |
MobileNetV1Config.layer_norm_eps |
1 | 0 | 0 |
transformers.models.mobilenet_v1.image_processing_mobilenet_v1 (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MobileNetV1ImageProcessor.init |
12 | 11 | 0 |
meth |
MobileNetV1ImageProcessor.resize |
7 | 6 | 0 |
meth |
MobileNetV1ImageProcessor.preprocess |
15 | 14 | 0 |
attr |
MobileNetV1ImageProcessor.do_resize |
1 | 0 | 0 |
attr |
MobileNetV1ImageProcessor.size |
1 | 0 | 0 |
attr |
MobileNetV1ImageProcessor.resample |
1 | 0 | 0 |
attr |
MobileNetV1ImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
MobileNetV1ImageProcessor.crop_size |
1 | 0 | 0 |
attr |
MobileNetV1ImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
MobileNetV1ImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
MobileNetV1ImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
MobileNetV1ImageProcessor.image_mean |
1 | 0 | 0 |
attr |
MobileNetV1ImageProcessor.image_std |
1 | 0 | 0 |
transformers.models.mobilenet_v1.modeling_mobilenet_v1 (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MobileNetV1Model.init |
3 | 2 | 0 |
meth |
MobileNetV1Model.forward |
5 | 4 | 0 |
attr |
MobileNetV1Model.conv_stem |
1 | 0 | 0 |
attr |
MobileNetV1Model.layer |
1 | 0 | 0 |
attr |
MobileNetV1Model.pooler |
1 | 0 | 0 |
meth |
MobileNetV1ForImageClassification.forward |
6 | 5 | 0 |
attr |
MobileNetV1ForImageClassification.num_labels |
1 | 0 | 0 |
attr |
MobileNetV1ForImageClassification.mobilenet_v1 |
1 | 0 | 0 |
attr |
MobileNetV1ForImageClassification.dropout |
1 | 0 | 0 |
attr |
MobileNetV1ForImageClassification.classifier |
1 | 0 | 0 |
transformers.models.mobilenet_v2.configuration_mobilenet_v2 (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MobileNetV2Config.init |
17 | 0 | 0 |
attr |
MobileNetV2Config.num_channels |
1 | 0 | 0 |
attr |
MobileNetV2Config.image_size |
1 | 0 | 0 |
attr |
MobileNetV2Config.depth_multiplier |
1 | 0 | 0 |
attr |
MobileNetV2Config.depth_divisible_by |
1 | 0 | 0 |
attr |
MobileNetV2Config.min_depth |
1 | 0 | 0 |
attr |
MobileNetV2Config.expand_ratio |
1 | 0 | 0 |
attr |
MobileNetV2Config.output_stride |
1 | 0 | 0 |
attr |
MobileNetV2Config.first_layer_is_expansion |
1 | 0 | 0 |
attr |
MobileNetV2Config.finegrained_output |
1 | 0 | 0 |
attr |
MobileNetV2Config.hidden_act |
1 | 0 | 0 |
attr |
MobileNetV2Config.tf_padding |
1 | 0 | 0 |
attr |
MobileNetV2Config.classifier_dropout_prob |
1 | 0 | 0 |
attr |
MobileNetV2Config.initializer_range |
1 | 0 | 0 |
attr |
MobileNetV2Config.layer_norm_eps |
1 | 0 | 0 |
attr |
MobileNetV2Config.semantic_loss_ignore_index |
1 | 0 | 0 |
transformers.models.mobilenet_v2.image_processing_mobilenet_v2 (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MobileNetV2ImageProcessor.init |
13 | 12 | 0 |
meth |
MobileNetV2ImageProcessor.resize |
7 | 6 | 0 |
meth |
MobileNetV2ImageProcessor.call |
4 | 0 | 0 |
meth |
MobileNetV2ImageProcessor._preprocess |
14 | 13 | 0 |
meth |
MobileNetV2ImageProcessor.preprocess |
17 | 16 | 0 |
meth |
MobileNetV2ImageProcessor.post_process_semantic_segmentation |
3 | 1 | 0 |
attr |
MobileNetV2ImageProcessor.do_resize |
1 | 0 | 0 |
attr |
MobileNetV2ImageProcessor.size |
1 | 0 | 0 |
attr |
MobileNetV2ImageProcessor.resample |
1 | 0 | 0 |
attr |
MobileNetV2ImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
MobileNetV2ImageProcessor.crop_size |
1 | 0 | 0 |
attr |
MobileNetV2ImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
MobileNetV2ImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
MobileNetV2ImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
MobileNetV2ImageProcessor.image_mean |
1 | 0 | 0 |
attr |
MobileNetV2ImageProcessor.image_std |
1 | 0 | 0 |
attr |
MobileNetV2ImageProcessor.do_reduce_labels |
1 | 0 | 0 |
transformers.models.mobilenet_v2.image_processing_mobilenet_v2_fast (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MobileNetV2ImageProcessorFast.init |
2 | 1 | 0 |
meth |
MobileNetV2ImageProcessorFast.reduce_label |
2 | 1 | 0 |
meth |
MobileNetV2ImageProcessorFast._preprocess |
16 | 15 | 0 |
meth |
MobileNetV2ImageProcessorFast.post_process_semantic_segmentation |
3 | 1 | 0 |
transformers.models.mobilenet_v2.modeling_mobilenet_v2 (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MobileNetV2ForImageClassification.forward |
6 | 5 | 0 |
attr |
MobileNetV2ForImageClassification.num_labels |
1 | 0 | 0 |
attr |
MobileNetV2ForImageClassification.mobilenet_v2 |
1 | 0 | 0 |
attr |
MobileNetV2ForImageClassification.dropout |
1 | 0 | 0 |
attr |
MobileNetV2ForImageClassification.classifier |
1 | 0 | 0 |
meth |
MobileNetV2Model.init |
3 | 2 | 0 |
meth |
MobileNetV2Model.forward |
5 | 4 | 0 |
attr |
MobileNetV2Model.conv_stem |
1 | 0 | 0 |
attr |
MobileNetV2Model.layer |
1 | 0 | 0 |
attr |
MobileNetV2Model.conv_1x1 |
1 | 0 | 0 |
attr |
MobileNetV2Model.pooler |
1 | 0 | 0 |
meth |
MobileNetV2ForSemanticSegmentation.forward |
6 | 5 | 0 |
attr |
MobileNetV2ForSemanticSegmentation.num_labels |
1 | 0 | 0 |
attr |
MobileNetV2ForSemanticSegmentation.mobilenet_v2 |
1 | 0 | 0 |
attr |
MobileNetV2ForSemanticSegmentation.segmentation_head |
1 | 0 | 0 |
transformers.models.mobilevit.configuration_mobilevit (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MobileViTConfig.init |
23 | 0 | 0 |
attr |
MobileViTConfig.num_channels |
1 | 0 | 0 |
attr |
MobileViTConfig.image_size |
1 | 0 | 0 |
attr |
MobileViTConfig.patch_size |
1 | 0 | 0 |
attr |
MobileViTConfig.hidden_sizes |
1 | 0 | 0 |
attr |
MobileViTConfig.neck_hidden_sizes |
1 | 0 | 0 |
attr |
MobileViTConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MobileViTConfig.mlp_ratio |
1 | 0 | 0 |
attr |
MobileViTConfig.expand_ratio |
1 | 0 | 0 |
attr |
MobileViTConfig.hidden_act |
1 | 0 | 0 |
attr |
MobileViTConfig.conv_kernel_size |
1 | 0 | 0 |
attr |
MobileViTConfig.output_stride |
1 | 0 | 0 |
attr |
MobileViTConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
MobileViTConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
MobileViTConfig.classifier_dropout_prob |
1 | 0 | 0 |
attr |
MobileViTConfig.initializer_range |
1 | 0 | 0 |
attr |
MobileViTConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
MobileViTConfig.qkv_bias |
1 | 0 | 0 |
attr |
MobileViTConfig.aspp_out_channels |
1 | 0 | 0 |
attr |
MobileViTConfig.atrous_rates |
1 | 0 | 0 |
attr |
MobileViTConfig.aspp_dropout_prob |
1 | 0 | 0 |
attr |
MobileViTConfig.semantic_loss_ignore_index |
1 | 0 | 0 |
transformers.models.mobilevit.image_processing_mobilevit (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MobileViTImageProcessor.init |
11 | 10 | 0 |
meth |
MobileViTImageProcessor.resize |
7 | 6 | 0 |
meth |
MobileViTImageProcessor.call |
4 | 0 | 0 |
meth |
MobileViTImageProcessor._preprocess |
12 | 11 | 0 |
meth |
MobileViTImageProcessor.post_process_semantic_segmentation |
3 | 1 | 0 |
attr |
MobileViTImageProcessor.do_resize |
1 | 0 | 0 |
attr |
MobileViTImageProcessor.size |
1 | 0 | 0 |
attr |
MobileViTImageProcessor.resample |
1 | 0 | 0 |
attr |
MobileViTImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
MobileViTImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
MobileViTImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
MobileViTImageProcessor.crop_size |
1 | 0 | 0 |
attr |
MobileViTImageProcessor.do_flip_channel_order |
1 | 0 | 0 |
attr |
MobileViTImageProcessor.do_reduce_labels |
1 | 0 | 0 |
transformers.models.mobilevit.image_processing_mobilevit_fast (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MobileViTImageProcessorFast.init |
2 | 1 | 0 |
meth |
MobileViTImageProcessorFast.reduce_label |
2 | 1 | 0 |
meth |
MobileViTImageProcessorFast._preprocess |
14 | 13 | 0 |
meth |
MobileViTImageProcessorFast.post_process_semantic_segmentation |
3 | 1 | 0 |
transformers.models.mobilevit.modeling_mobilevit (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MobileViTForImageClassification.forward |
6 | 5 | 0 |
attr |
MobileViTForImageClassification.num_labels |
1 | 0 | 0 |
attr |
MobileViTForImageClassification.mobilevit |
1 | 0 | 0 |
attr |
MobileViTForImageClassification.dropout |
1 | 0 | 0 |
attr |
MobileViTForImageClassification.classifier |
1 | 0 | 0 |
meth |
MobileViTModel.init |
3 | 2 | 0 |
meth |
MobileViTModel.forward |
5 | 4 | 0 |
attr |
MobileViTModel.expand_output |
1 | 0 | 0 |
attr |
MobileViTModel.conv_stem |
1 | 0 | 0 |
attr |
MobileViTModel.encoder |
1 | 0 | 0 |
attr |
MobileViTModel.conv_1x1_exp |
1 | 0 | 0 |
meth |
MobileViTForSemanticSegmentation.forward |
6 | 5 | 0 |
attr |
MobileViTForSemanticSegmentation.num_labels |
1 | 0 | 0 |
attr |
MobileViTForSemanticSegmentation.mobilevit |
1 | 0 | 0 |
attr |
MobileViTForSemanticSegmentation.segmentation_head |
1 | 0 | 0 |
transformers.models.mobilevitv2.configuration_mobilevitv2 (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MobileViTV2Config.init |
22 | 0 | 0 |
attr |
MobileViTV2Config.num_channels |
1 | 0 | 0 |
attr |
MobileViTV2Config.image_size |
1 | 0 | 0 |
attr |
MobileViTV2Config.patch_size |
1 | 0 | 0 |
attr |
MobileViTV2Config.expand_ratio |
1 | 0 | 0 |
attr |
MobileViTV2Config.hidden_act |
1 | 0 | 0 |
attr |
MobileViTV2Config.conv_kernel_size |
1 | 0 | 0 |
attr |
MobileViTV2Config.output_stride |
1 | 0 | 0 |
attr |
MobileViTV2Config.initializer_range |
1 | 0 | 0 |
attr |
MobileViTV2Config.layer_norm_eps |
1 | 0 | 0 |
attr |
MobileViTV2Config.n_attn_blocks |
1 | 0 | 0 |
attr |
MobileViTV2Config.base_attn_unit_dims |
1 | 0 | 0 |
attr |
MobileViTV2Config.width_multiplier |
1 | 0 | 0 |
attr |
MobileViTV2Config.ffn_multiplier |
1 | 0 | 0 |
attr |
MobileViTV2Config.ffn_dropout |
1 | 0 | 0 |
attr |
MobileViTV2Config.attn_dropout |
1 | 0 | 0 |
attr |
MobileViTV2Config.classifier_dropout_prob |
1 | 0 | 0 |
attr |
MobileViTV2Config.aspp_out_channels |
1 | 0 | 0 |
attr |
MobileViTV2Config.atrous_rates |
1 | 0 | 0 |
attr |
MobileViTV2Config.aspp_dropout_prob |
1 | 0 | 0 |
attr |
MobileViTV2Config.semantic_loss_ignore_index |
1 | 0 | 0 |
transformers.models.mobilevitv2.modeling_mobilevitv2 (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MobileViTV2ForImageClassification.forward |
6 | 5 | 0 |
attr |
MobileViTV2ForImageClassification.num_labels |
1 | 0 | 0 |
attr |
MobileViTV2ForImageClassification.mobilevitv2 |
1 | 0 | 0 |
attr |
MobileViTV2ForImageClassification.classifier |
1 | 0 | 0 |
meth |
MobileViTV2Model.init |
3 | 2 | 0 |
meth |
MobileViTV2Model.forward |
5 | 4 | 0 |
attr |
MobileViTV2Model.expand_output |
1 | 0 | 0 |
attr |
MobileViTV2Model.conv_stem |
1 | 0 | 0 |
attr |
MobileViTV2Model.encoder |
1 | 0 | 0 |
meth |
MobileViTV2ForSemanticSegmentation.forward |
6 | 5 | 0 |
attr |
MobileViTV2ForSemanticSegmentation.num_labels |
1 | 0 | 0 |
attr |
MobileViTV2ForSemanticSegmentation.mobilevitv2 |
1 | 0 | 0 |
attr |
MobileViTV2ForSemanticSegmentation.segmentation_head |
1 | 0 | 0 |
transformers.models.modernbert.configuration_modernbert (46 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ModernBertConfig.setattr |
3 | 0 | 0 |
meth |
ModernBertConfig.init |
36 | 34 | 0 |
meth |
ModernBertConfig.convert_rope_params_to_dict |
3 | 0 | 0 |
meth |
ModernBertConfig.to_dict |
1 | 0 | 0 |
prop |
ModernBertConfig.sliding_window |
2 | 0 | 0 |
attr |
ModernBertConfig.pad_token_id |
1 | 0 | 0 |
attr |
ModernBertConfig.bos_token_id |
1 | 0 | 0 |
attr |
ModernBertConfig.eos_token_id |
1 | 0 | 0 |
attr |
ModernBertConfig.cls_token_id |
1 | 0 | 0 |
attr |
ModernBertConfig.sep_token_id |
1 | 0 | 0 |
attr |
ModernBertConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
ModernBertConfig.vocab_size |
1 | 0 | 0 |
attr |
ModernBertConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ModernBertConfig.hidden_size |
1 | 0 | 0 |
attr |
ModernBertConfig.intermediate_size |
1 | 0 | 0 |
attr |
ModernBertConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ModernBertConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ModernBertConfig.initializer_range |
1 | 0 | 0 |
attr |
ModernBertConfig.initializer_cutoff_factor |
1 | 0 | 0 |
attr |
ModernBertConfig.norm_eps |
1 | 0 | 0 |
attr |
ModernBertConfig.norm_bias |
1 | 0 | 0 |
attr |
ModernBertConfig.attention_bias |
1 | 0 | 0 |
attr |
ModernBertConfig.attention_dropout |
1 | 0 | 0 |
attr |
ModernBertConfig.hidden_activation |
1 | 0 | 0 |
attr |
ModernBertConfig.local_attention |
1 | 0 | 0 |
attr |
ModernBertConfig.embedding_dropout |
1 | 0 | 0 |
attr |
ModernBertConfig.mlp_bias |
1 | 0 | 0 |
attr |
ModernBertConfig.mlp_dropout |
1 | 0 | 0 |
attr |
ModernBertConfig.decoder_bias |
1 | 0 | 0 |
attr |
ModernBertConfig.classifier_pooling |
1 | 0 | 0 |
attr |
ModernBertConfig.classifier_dropout |
1 | 0 | 0 |
attr |
ModernBertConfig.classifier_bias |
1 | 0 | 0 |
attr |
ModernBertConfig.classifier_activation |
1 | 0 | 0 |
attr |
ModernBertConfig.deterministic_flash_attn |
1 | 0 | 0 |
attr |
ModernBertConfig.sparse_prediction |
1 | 0 | 0 |
attr |
ModernBertConfig.sparse_pred_ignore_index |
1 | 0 | 0 |
attr |
ModernBertConfig.reference_compile |
1 | 0 | 0 |
attr |
ModernBertConfig.layer_types |
1 | 0 | 0 |
attr |
ModernBertConfig.global_attn_every_n_layers |
1 | 0 | 0 |
attr |
ModernBertConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.modernbert.modeling_modernbert (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ModernBertForTokenClassification.init |
2 | 1 | 0 |
attr |
ModernBertForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
ModernBertForTokenClassification.model |
1 | 0 | 0 |
attr |
ModernBertForTokenClassification.head |
1 | 0 | 0 |
attr |
ModernBertForTokenClassification.drop |
1 | 0 | 0 |
attr |
ModernBertForTokenClassification.classifier |
1 | 0 | 0 |
meth |
ModernBertForMultipleChoice.init |
2 | 1 | 0 |
attr |
ModernBertForMultipleChoice.model |
1 | 0 | 0 |
attr |
ModernBertForMultipleChoice.head |
1 | 0 | 0 |
attr |
ModernBertForMultipleChoice.drop |
1 | 0 | 0 |
attr |
ModernBertForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
ModernBertPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
ModernBertModel.init |
2 | 1 | 0 |
meth |
ModernBertModel.get_input_embeddings |
1 | 0 | 0 |
meth |
ModernBertModel.set_input_embeddings |
2 | 0 | 0 |
attr |
ModernBertModel.embeddings |
1 | 0 | 0 |
attr |
ModernBertModel.layers |
1 | 0 | 0 |
attr |
ModernBertModel.final_norm |
1 | 0 | 0 |
attr |
ModernBertModel.rotary_emb |
1 | 0 | 0 |
attr |
ModernBertModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
ModernBertForSequenceClassification.init |
2 | 1 | 0 |
attr |
ModernBertForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
ModernBertForSequenceClassification.model |
1 | 0 | 0 |
attr |
ModernBertForSequenceClassification.head |
1 | 0 | 0 |
attr |
ModernBertForSequenceClassification.drop |
1 | 0 | 0 |
attr |
ModernBertForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
ModernBertForMaskedLM.init |
2 | 1 | 0 |
meth |
ModernBertForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
ModernBertForMaskedLM.set_output_embeddings |
2 | 1 | 0 |
attr |
ModernBertForMaskedLM.model |
1 | 0 | 0 |
attr |
ModernBertForMaskedLM.head |
1 | 0 | 0 |
attr |
ModernBertForMaskedLM.decoder |
1 | 0 | 0 |
attr |
ModernBertForMaskedLM.sparse_prediction |
1 | 0 | 0 |
attr |
ModernBertForMaskedLM.sparse_pred_ignore_index |
1 | 0 | 0 |
meth |
ModernBertForQuestionAnswering.init |
2 | 1 | 0 |
attr |
ModernBertForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
ModernBertForQuestionAnswering.model |
1 | 0 | 0 |
attr |
ModernBertForQuestionAnswering.head |
1 | 0 | 0 |
attr |
ModernBertForQuestionAnswering.drop |
1 | 0 | 0 |
attr |
ModernBertForQuestionAnswering.classifier |
1 | 0 | 0 |
transformers.models.modernbert.modular_modernbert (87 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ModernBertForTokenClassification.init |
2 | 1 | 0 |
attr |
ModernBertForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
ModernBertForTokenClassification.model |
1 | 0 | 0 |
attr |
ModernBertForTokenClassification.head |
1 | 0 | 0 |
attr |
ModernBertForTokenClassification.drop |
1 | 0 | 0 |
attr |
ModernBertForTokenClassification.classifier |
1 | 0 | 0 |
meth |
ModernBertPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
ModernBertModel.init |
2 | 1 | 0 |
meth |
ModernBertModel.get_input_embeddings |
1 | 0 | 0 |
meth |
ModernBertModel.set_input_embeddings |
2 | 0 | 0 |
attr |
ModernBertModel.embeddings |
1 | 0 | 0 |
attr |
ModernBertModel.layers |
1 | 0 | 0 |
attr |
ModernBertModel.final_norm |
1 | 0 | 0 |
attr |
ModernBertModel.rotary_emb |
1 | 0 | 0 |
attr |
ModernBertModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
ModernBertForSequenceClassification.init |
2 | 1 | 0 |
attr |
ModernBertForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
ModernBertForSequenceClassification.model |
1 | 0 | 0 |
attr |
ModernBertForSequenceClassification.head |
1 | 0 | 0 |
attr |
ModernBertForSequenceClassification.drop |
1 | 0 | 0 |
attr |
ModernBertForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
ModernBertForQuestionAnswering.init |
2 | 1 | 0 |
attr |
ModernBertForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
ModernBertForQuestionAnswering.model |
1 | 0 | 0 |
attr |
ModernBertForQuestionAnswering.head |
1 | 0 | 0 |
attr |
ModernBertForQuestionAnswering.drop |
1 | 0 | 0 |
attr |
ModernBertForQuestionAnswering.classifier |
1 | 0 | 0 |
meth |
ModernBertForMultipleChoice.init |
2 | 1 | 0 |
attr |
ModernBertForMultipleChoice.model |
1 | 0 | 0 |
attr |
ModernBertForMultipleChoice.head |
1 | 0 | 0 |
attr |
ModernBertForMultipleChoice.drop |
1 | 0 | 0 |
attr |
ModernBertForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
ModernBertConfig.setattr |
3 | 0 | 0 |
meth |
ModernBertConfig.init |
36 | 34 | 0 |
meth |
ModernBertConfig.convert_rope_params_to_dict |
3 | 0 | 0 |
meth |
ModernBertConfig.to_dict |
1 | 0 | 0 |
prop |
ModernBertConfig.sliding_window |
2 | 0 | 0 |
attr |
ModernBertConfig.pad_token_id |
1 | 0 | 0 |
attr |
ModernBertConfig.bos_token_id |
1 | 0 | 0 |
attr |
ModernBertConfig.eos_token_id |
1 | 0 | 0 |
attr |
ModernBertConfig.cls_token_id |
1 | 0 | 0 |
attr |
ModernBertConfig.sep_token_id |
1 | 0 | 0 |
attr |
ModernBertConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
ModernBertConfig.vocab_size |
1 | 0 | 0 |
attr |
ModernBertConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ModernBertConfig.hidden_size |
1 | 0 | 0 |
attr |
ModernBertConfig.intermediate_size |
1 | 0 | 0 |
attr |
ModernBertConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ModernBertConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ModernBertConfig.initializer_range |
1 | 0 | 0 |
attr |
ModernBertConfig.initializer_cutoff_factor |
1 | 0 | 0 |
attr |
ModernBertConfig.norm_eps |
1 | 0 | 0 |
attr |
ModernBertConfig.norm_bias |
1 | 0 | 0 |
attr |
ModernBertConfig.attention_bias |
1 | 0 | 0 |
attr |
ModernBertConfig.attention_dropout |
1 | 0 | 0 |
attr |
ModernBertConfig.hidden_activation |
1 | 0 | 0 |
attr |
ModernBertConfig.local_attention |
1 | 0 | 0 |
attr |
ModernBertConfig.embedding_dropout |
1 | 0 | 0 |
attr |
ModernBertConfig.mlp_bias |
1 | 0 | 0 |
attr |
ModernBertConfig.mlp_dropout |
1 | 0 | 0 |
attr |
ModernBertConfig.decoder_bias |
1 | 0 | 0 |
attr |
ModernBertConfig.classifier_pooling |
1 | 0 | 0 |
attr |
ModernBertConfig.classifier_dropout |
1 | 0 | 0 |
attr |
ModernBertConfig.classifier_bias |
1 | 0 | 0 |
attr |
ModernBertConfig.classifier_activation |
1 | 0 | 0 |
attr |
ModernBertConfig.deterministic_flash_attn |
1 | 0 | 0 |
attr |
ModernBertConfig.sparse_prediction |
1 | 0 | 0 |
attr |
ModernBertConfig.sparse_pred_ignore_index |
1 | 0 | 0 |
attr |
ModernBertConfig.reference_compile |
1 | 0 | 0 |
attr |
ModernBertConfig.layer_types |
1 | 0 | 0 |
attr |
ModernBertConfig.global_attn_every_n_layers |
1 | 0 | 0 |
attr |
ModernBertConfig.rope_parameters |
1 | 0 | 0 |
meth |
ModernBertForMaskedLM.init |
2 | 1 | 0 |
meth |
ModernBertForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
ModernBertForMaskedLM.set_output_embeddings |
2 | 1 | 0 |
attr |
ModernBertForMaskedLM.model |
1 | 0 | 0 |
attr |
ModernBertForMaskedLM.head |
1 | 0 | 0 |
attr |
ModernBertForMaskedLM.decoder |
1 | 0 | 0 |
attr |
ModernBertForMaskedLM.sparse_prediction |
1 | 0 | 0 |
attr |
ModernBertForMaskedLM.sparse_pred_ignore_index |
1 | 0 | 0 |
transformers.models.modernbert_decoder.configuration_modernbert_decoder (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ModernBertDecoderConfig.init |
33 | 31 | 0 |
meth |
ModernBertDecoderConfig.convert_rope_params_to_dict |
3 | 0 | 0 |
attr |
ModernBertDecoderConfig.pad_token_id |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.bos_token_id |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.eos_token_id |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.cls_token_id |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.sep_token_id |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.vocab_size |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.hidden_size |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.initializer_range |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.initializer_cutoff_factor |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.norm_eps |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.norm_bias |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.attention_bias |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.attention_dropout |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.hidden_activation |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.embedding_dropout |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.mlp_bias |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.mlp_dropout |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.decoder_bias |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.classifier_dropout |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.classifier_bias |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.classifier_activation |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.use_cache |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.global_attn_every_n_layers |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.reference_compile |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.layer_types |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.sliding_window |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.modernbert_decoder.modeling_modernbert_decoder (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ModernBertDecoderForCausalLM.init |
2 | 1 | 0 |
meth |
ModernBertDecoderForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
ModernBertDecoderForCausalLM.set_output_embeddings |
2 | 0 | 0 |
attr |
ModernBertDecoderForCausalLM.model |
1 | 0 | 0 |
attr |
ModernBertDecoderForCausalLM.lm_head |
1 | 0 | 0 |
attr |
ModernBertDecoderForCausalLM.decoder |
1 | 0 | 0 |
meth |
ModernBertDecoderModel.init |
2 | 1 | 0 |
meth |
ModernBertDecoderModel.get_input_embeddings |
1 | 0 | 0 |
meth |
ModernBertDecoderModel.set_input_embeddings |
2 | 0 | 0 |
attr |
ModernBertDecoderModel.embeddings |
1 | 0 | 0 |
attr |
ModernBertDecoderModel.layers |
1 | 0 | 0 |
attr |
ModernBertDecoderModel.final_norm |
1 | 0 | 0 |
attr |
ModernBertDecoderModel.rotary_emb |
1 | 0 | 0 |
attr |
ModernBertDecoderModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
ModernBertDecoderPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
ModernBertDecoderForSequenceClassification.init |
2 | 1 | 0 |
attr |
ModernBertDecoderForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
ModernBertDecoderForSequenceClassification.model |
1 | 0 | 0 |
attr |
ModernBertDecoderForSequenceClassification.head |
1 | 0 | 0 |
attr |
ModernBertDecoderForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
ModernBertDecoderForSequenceClassification.drop |
1 | 0 | 0 |
transformers.models.modernbert_decoder.modular_modernbert_decoder (62 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ModernBertDecoderForCausalLM.init |
2 | 1 | 0 |
meth |
ModernBertDecoderForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
ModernBertDecoderForCausalLM.set_output_embeddings |
2 | 0 | 0 |
attr |
ModernBertDecoderForCausalLM.config |
1 | 0 | 0 |
attr |
ModernBertDecoderForCausalLM.model |
1 | 0 | 0 |
attr |
ModernBertDecoderForCausalLM.lm_head |
1 | 0 | 0 |
attr |
ModernBertDecoderForCausalLM.decoder |
1 | 0 | 0 |
meth |
ModernBertDecoderPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
ModernBertDecoderForSequenceClassification.init |
2 | 1 | 0 |
attr |
ModernBertDecoderForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
ModernBertDecoderForSequenceClassification.model |
1 | 0 | 0 |
attr |
ModernBertDecoderForSequenceClassification.head |
1 | 0 | 0 |
attr |
ModernBertDecoderForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
ModernBertDecoderForSequenceClassification.drop |
1 | 0 | 0 |
meth |
ModernBertDecoderModel.init |
2 | 1 | 0 |
meth |
ModernBertDecoderModel.get_input_embeddings |
1 | 0 | 0 |
meth |
ModernBertDecoderModel.set_input_embeddings |
2 | 0 | 0 |
attr |
ModernBertDecoderModel.config |
1 | 0 | 0 |
attr |
ModernBertDecoderModel.embeddings |
1 | 0 | 0 |
attr |
ModernBertDecoderModel.layers |
1 | 0 | 0 |
attr |
ModernBertDecoderModel.final_norm |
1 | 0 | 0 |
attr |
ModernBertDecoderModel.rotary_emb |
1 | 0 | 0 |
attr |
ModernBertDecoderModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
ModernBertDecoderConfig.init |
33 | 31 | 0 |
meth |
ModernBertDecoderConfig.convert_rope_params_to_dict |
3 | 0 | 0 |
attr |
ModernBertDecoderConfig.pad_token_id |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.bos_token_id |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.eos_token_id |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.cls_token_id |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.sep_token_id |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.vocab_size |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.hidden_size |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.initializer_range |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.initializer_cutoff_factor |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.norm_eps |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.norm_bias |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.attention_bias |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.attention_dropout |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.hidden_activation |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.embedding_dropout |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.mlp_bias |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.mlp_dropout |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.decoder_bias |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.classifier_dropout |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.classifier_bias |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.classifier_activation |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.use_cache |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.global_attn_every_n_layers |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.reference_compile |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.layer_types |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.sliding_window |
1 | 0 | 0 |
attr |
ModernBertDecoderConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.modernvbert.configuration_modernvbert (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ModernVBertConfig.init |
11 | 7 | 0 |
attr |
ModernVBertConfig.text_config |
1 | 0 | 0 |
attr |
ModernVBertConfig.vision_config |
1 | 0 | 0 |
attr |
ModernVBertConfig.pixel_shuffle_factor |
1 | 0 | 0 |
attr |
ModernVBertConfig.initializer_range |
1 | 0 | 0 |
attr |
ModernVBertConfig.initializer_cutoff_factor |
1 | 0 | 0 |
attr |
ModernVBertConfig.classifier_pooling |
1 | 0 | 0 |
attr |
ModernVBertConfig.classifier_dropout |
1 | 0 | 0 |
attr |
ModernVBertConfig.classifier_bias |
1 | 0 | 0 |
transformers.models.modernvbert.modeling_modernvbert (35 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ModernVBertModel.init |
2 | 1 | 0 |
meth |
ModernVBertModel.get_input_embeddings |
1 | 0 | 0 |
meth |
ModernVBertModel.set_input_embeddings |
2 | 0 | 0 |
meth |
ModernVBertModel.inputs_merger |
4 | 3 | 0 |
attr |
ModernVBertModel.padding_idx |
1 | 0 | 0 |
attr |
ModernVBertModel.vocab_size |
1 | 0 | 0 |
attr |
ModernVBertModel.vision_model |
1 | 0 | 0 |
attr |
ModernVBertModel.connector |
1 | 0 | 0 |
attr |
ModernVBertModel.text_model |
1 | 0 | 0 |
attr |
ModernVBertModel.image_seq_len |
1 | 0 | 0 |
attr |
ModernVBertModel.image_token_id |
1 | 0 | 0 |
meth |
ModernVBertForTokenClassification.init |
2 | 1 | 0 |
attr |
ModernVBertForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
ModernVBertForTokenClassification.model |
1 | 0 | 0 |
attr |
ModernVBertForTokenClassification.head |
1 | 0 | 0 |
attr |
ModernVBertForTokenClassification.drop |
1 | 0 | 0 |
attr |
ModernVBertForTokenClassification.classifier |
1 | 0 | 0 |
meth |
ModernVBertForMaskedLM.init |
2 | 0 | 0 |
meth |
ModernVBertForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
ModernVBertForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
attr |
ModernVBertForMaskedLM.vocab_size |
1 | 0 | 0 |
attr |
ModernVBertForMaskedLM.model |
1 | 0 | 0 |
attr |
ModernVBertForMaskedLM.projection_head |
1 | 0 | 0 |
attr |
ModernVBertForMaskedLM.lm_head |
1 | 0 | 0 |
meth |
ModernVBertForSequenceClassification.init |
2 | 1 | 0 |
attr |
ModernVBertForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
ModernVBertForSequenceClassification.model |
1 | 0 | 0 |
attr |
ModernVBertForSequenceClassification.head |
1 | 0 | 0 |
attr |
ModernVBertForSequenceClassification.drop |
1 | 0 | 0 |
attr |
ModernVBertForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
ModernVBertPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.modernvbert.modular_modernvbert (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ModernVBertModel.init |
2 | 1 | 0 |
attr |
ModernVBertModel.connector |
1 | 0 | 0 |
attr |
ModernVBertModel.text_model |
1 | 0 | 0 |
attr |
ModernVBertModel.vision_model |
1 | 0 | 0 |
attr |
ModernVBertModel.image_seq_len |
1 | 0 | 0 |
meth |
ModernVBertForTokenClassification.init |
2 | 1 | 0 |
attr |
ModernVBertForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
ModernVBertForTokenClassification.model |
1 | 0 | 0 |
attr |
ModernVBertForTokenClassification.head |
1 | 0 | 0 |
attr |
ModernVBertForTokenClassification.drop |
1 | 0 | 0 |
attr |
ModernVBertForTokenClassification.classifier |
1 | 0 | 0 |
meth |
ModernVBertConfig.init |
11 | 7 | 0 |
attr |
ModernVBertConfig.text_config |
1 | 0 | 0 |
attr |
ModernVBertConfig.vision_config |
1 | 0 | 0 |
attr |
ModernVBertConfig.pixel_shuffle_factor |
1 | 0 | 0 |
attr |
ModernVBertConfig.initializer_range |
1 | 0 | 0 |
attr |
ModernVBertConfig.initializer_cutoff_factor |
1 | 0 | 0 |
attr |
ModernVBertConfig.classifier_pooling |
1 | 0 | 0 |
attr |
ModernVBertConfig.classifier_dropout |
1 | 0 | 0 |
attr |
ModernVBertConfig.classifier_bias |
1 | 0 | 0 |
meth |
ModernVBertForMaskedLM.init |
2 | 0 | 0 |
meth |
ModernVBertForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
ModernVBertForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
attr |
ModernVBertForMaskedLM.vocab_size |
1 | 0 | 0 |
attr |
ModernVBertForMaskedLM.model |
1 | 0 | 0 |
attr |
ModernVBertForMaskedLM.projection_head |
1 | 0 | 0 |
attr |
ModernVBertForMaskedLM.lm_head |
1 | 0 | 0 |
meth |
ModernVBertForSequenceClassification.init |
2 | 1 | 0 |
attr |
ModernVBertForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
ModernVBertForSequenceClassification.config |
1 | 0 | 0 |
attr |
ModernVBertForSequenceClassification.model |
1 | 0 | 0 |
attr |
ModernVBertForSequenceClassification.head |
1 | 0 | 0 |
attr |
ModernVBertForSequenceClassification.drop |
1 | 0 | 0 |
attr |
ModernVBertForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
ModernVBertPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.moonshine.configuration_moonshine (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MoonshineConfig.init |
26 | 24 | 0 |
attr |
MoonshineConfig.vocab_size |
1 | 0 | 0 |
attr |
MoonshineConfig.hidden_size |
1 | 0 | 0 |
attr |
MoonshineConfig.intermediate_size |
1 | 0 | 0 |
attr |
MoonshineConfig.encoder_num_hidden_layers |
1 | 0 | 0 |
attr |
MoonshineConfig.decoder_num_hidden_layers |
1 | 0 | 0 |
attr |
MoonshineConfig.encoder_num_attention_heads |
1 | 0 | 0 |
attr |
MoonshineConfig.decoder_num_attention_heads |
1 | 0 | 0 |
attr |
MoonshineConfig.encoder_num_key_value_heads |
1 | 0 | 0 |
attr |
MoonshineConfig.decoder_num_key_value_heads |
1 | 0 | 0 |
attr |
MoonshineConfig.pad_head_dim_to_multiple_of |
1 | 0 | 0 |
attr |
MoonshineConfig.encoder_hidden_act |
1 | 0 | 0 |
attr |
MoonshineConfig.decoder_hidden_act |
1 | 0 | 0 |
attr |
MoonshineConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MoonshineConfig.initializer_range |
1 | 0 | 0 |
attr |
MoonshineConfig.decoder_start_token_id |
1 | 0 | 0 |
attr |
MoonshineConfig.use_cache |
1 | 0 | 0 |
attr |
MoonshineConfig.is_encoder_decoder |
1 | 0 | 0 |
attr |
MoonshineConfig.attention_bias |
1 | 0 | 0 |
attr |
MoonshineConfig.attention_dropout |
1 | 0 | 0 |
attr |
MoonshineConfig.bos_token_id |
1 | 0 | 0 |
attr |
MoonshineConfig.eos_token_id |
1 | 0 | 0 |
attr |
MoonshineConfig.pad_token_id |
1 | 0 | 0 |
attr |
MoonshineConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
MoonshineConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.moonshine.modeling_moonshine (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MoonshineModel.init |
2 | 1 | 0 |
meth |
MoonshineModel.get_input_embeddings |
1 | 0 | 0 |
meth |
MoonshineModel.set_input_embeddings |
2 | 0 | 0 |
meth |
MoonshineModel.freeze_encoder |
1 | 0 | 0 |
meth |
MoonshineModel._mask_input_features |
1 | 0 | 0 |
attr |
MoonshineModel.encoder |
1 | 0 | 0 |
attr |
MoonshineModel.decoder |
1 | 0 | 0 |
meth |
MoonshinePreTrainedModel._get_feat_extract_output_lengths |
2 | 1 | 0 |
meth |
MoonshineForConditionalGeneration.init |
2 | 1 | 0 |
meth |
MoonshineForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
MoonshineForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
attr |
MoonshineForConditionalGeneration.model |
1 | 0 | 0 |
attr |
MoonshineForConditionalGeneration.proj_out |
1 | 0 | 0 |
transformers.models.moonshine.modular_moonshine (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MoonshineModel._mask_input_features |
1 | 0 | 0 |
meth |
MoonshinePreTrainedModel._get_feat_extract_output_lengths |
2 | 1 | 0 |
meth |
MoonshineForConditionalGeneration.init |
2 | 1 | 0 |
meth |
MoonshineForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
MoonshineForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
attr |
MoonshineForConditionalGeneration.model |
1 | 0 | 0 |
attr |
MoonshineForConditionalGeneration.proj_out |
1 | 0 | 0 |
meth |
MoonshineConfig.init |
26 | 24 | 0 |
attr |
MoonshineConfig.vocab_size |
1 | 0 | 0 |
attr |
MoonshineConfig.hidden_size |
1 | 0 | 0 |
attr |
MoonshineConfig.intermediate_size |
1 | 0 | 0 |
attr |
MoonshineConfig.encoder_num_hidden_layers |
1 | 0 | 0 |
attr |
MoonshineConfig.decoder_num_hidden_layers |
1 | 0 | 0 |
attr |
MoonshineConfig.encoder_num_attention_heads |
1 | 0 | 0 |
attr |
MoonshineConfig.decoder_num_attention_heads |
1 | 0 | 0 |
attr |
MoonshineConfig.encoder_num_key_value_heads |
1 | 0 | 0 |
attr |
MoonshineConfig.decoder_num_key_value_heads |
1 | 0 | 0 |
attr |
MoonshineConfig.pad_head_dim_to_multiple_of |
1 | 0 | 0 |
attr |
MoonshineConfig.encoder_hidden_act |
1 | 0 | 0 |
attr |
MoonshineConfig.decoder_hidden_act |
1 | 0 | 0 |
attr |
MoonshineConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MoonshineConfig.initializer_range |
1 | 0 | 0 |
attr |
MoonshineConfig.decoder_start_token_id |
1 | 0 | 0 |
attr |
MoonshineConfig.use_cache |
1 | 0 | 0 |
attr |
MoonshineConfig.is_encoder_decoder |
1 | 0 | 0 |
attr |
MoonshineConfig.attention_bias |
1 | 0 | 0 |
attr |
MoonshineConfig.attention_dropout |
1 | 0 | 0 |
attr |
MoonshineConfig.bos_token_id |
1 | 0 | 0 |
attr |
MoonshineConfig.eos_token_id |
1 | 0 | 0 |
attr |
MoonshineConfig.pad_token_id |
1 | 0 | 0 |
attr |
MoonshineConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
MoonshineConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.moonshine_streaming.configuration_moonshine_streaming (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MoonshineStreamingConfig.init |
22 | 20 | 0 |
attr |
MoonshineStreamingConfig.encoder_config |
1 | 0 | 0 |
attr |
MoonshineStreamingConfig.vocab_size |
1 | 0 | 0 |
attr |
MoonshineStreamingConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MoonshineStreamingConfig.hidden_size |
1 | 0 | 0 |
attr |
MoonshineStreamingConfig.intermediate_size |
1 | 0 | 0 |
attr |
MoonshineStreamingConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MoonshineStreamingConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MoonshineStreamingConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
MoonshineStreamingConfig.hidden_act |
1 | 0 | 0 |
attr |
MoonshineStreamingConfig.use_cache |
1 | 0 | 0 |
attr |
MoonshineStreamingConfig.attention_bias |
1 | 0 | 0 |
attr |
MoonshineStreamingConfig.attention_dropout |
1 | 0 | 0 |
attr |
MoonshineStreamingConfig.head_dim |
1 | 0 | 0 |
attr |
MoonshineStreamingConfig.rope_parameters |
1 | 0 | 0 |
attr |
MoonshineStreamingConfig.pad_head_dim_to_multiple_of |
1 | 0 | 0 |
meth |
MoonshineStreamingEncoderConfig.init |
15 | 13 | 0 |
attr |
MoonshineStreamingEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
MoonshineStreamingEncoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
MoonshineStreamingEncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
MoonshineStreamingEncoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MoonshineStreamingEncoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MoonshineStreamingEncoderConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
MoonshineStreamingEncoderConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MoonshineStreamingEncoderConfig.attention_dropout |
1 | 0 | 0 |
attr |
MoonshineStreamingEncoderConfig.attention_bias |
1 | 0 | 0 |
attr |
MoonshineStreamingEncoderConfig.head_dim |
1 | 0 | 0 |
attr |
MoonshineStreamingEncoderConfig.sample_rate |
1 | 0 | 0 |
attr |
MoonshineStreamingEncoderConfig.frame_ms |
1 | 0 | 0 |
attr |
MoonshineStreamingEncoderConfig.sliding_windows |
1 | 0 | 0 |
transformers.models.moonshine_streaming.modeling_moonshine_streaming (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MoonshineStreamingModel.init |
2 | 0 | 0 |
meth |
MoonshineStreamingModel.get_input_embeddings |
1 | 0 | 0 |
meth |
MoonshineStreamingModel.set_input_embeddings |
2 | 0 | 0 |
meth |
MoonshineStreamingModel.freeze_encoder |
1 | 0 | 0 |
meth |
MoonshineStreamingModel._mask_input_features |
1 | 0 | 0 |
attr |
MoonshineStreamingModel.encoder |
1 | 0 | 0 |
attr |
MoonshineStreamingModel.decoder |
1 | 0 | 0 |
meth |
MoonshineStreamingPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
MoonshineStreamingForConditionalGeneration.init |
2 | 1 | 0 |
meth |
MoonshineStreamingForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
MoonshineStreamingForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
attr |
MoonshineStreamingForConditionalGeneration.model |
1 | 0 | 0 |
attr |
MoonshineStreamingForConditionalGeneration.proj_out |
1 | 0 | 0 |
transformers.models.moonshine_streaming.modular_moonshine_streaming (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MoonshineStreamingModel.init |
2 | 0 | 0 |
attr |
MoonshineStreamingModel.encoder |
1 | 0 | 0 |
attr |
MoonshineStreamingModel.decoder |
1 | 0 | 0 |
meth |
MoonshineStreamingPreTrainedModel._init_weights |
2 | 1 | 0 |
transformers.models.moonshine_streaming.processing_moonshine_streaming (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MoonshineStreamingProcessor.init |
3 | 0 | 0 |
meth |
MoonshineStreamingProcessor.call |
4 | 3 | 0 |
meth |
MoonshineStreamingProcessor.pad |
3 | 0 | 0 |
prop |
MoonshineStreamingProcessor.model_input_names |
1 | 0 | 0 |
transformers.models.moshi.configuration_moshi (72 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MoshiConfig.init |
23 | 21 | 0 |
meth |
MoshiConfig.from_audio_encoder_config |
3 | 1 | 0 |
prop |
MoshiConfig.sampling_rate |
1 | 0 | 0 |
attr |
MoshiConfig.vocab_size |
1 | 0 | 0 |
attr |
MoshiConfig.hidden_size |
1 | 0 | 0 |
attr |
MoshiConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MoshiConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MoshiConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
MoshiConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MoshiConfig.hidden_act |
1 | 0 | 0 |
attr |
MoshiConfig.head_dim |
1 | 0 | 0 |
attr |
MoshiConfig.initializer_range |
1 | 0 | 0 |
attr |
MoshiConfig.use_cache |
1 | 0 | 0 |
attr |
MoshiConfig.sliding_window |
1 | 0 | 0 |
attr |
MoshiConfig.attention_dropout |
1 | 0 | 0 |
attr |
MoshiConfig.ffn_dim |
1 | 0 | 0 |
attr |
MoshiConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
MoshiConfig.num_codebooks |
1 | 0 | 0 |
attr |
MoshiConfig.rope_parameters |
1 | 0 | 0 |
attr |
MoshiConfig.audio_encoder_config |
1 | 0 | 0 |
attr |
MoshiConfig.audio_vocab_size |
1 | 0 | 0 |
attr |
MoshiConfig.depth_decoder_config |
1 | 0 | 0 |
attr |
MoshiConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
MoshiConfig.pad_token_id |
1 | 0 | 0 |
attr |
MoshiConfig.bos_token_id |
1 | 0 | 0 |
attr |
MoshiConfig.eos_token_id |
1 | 0 | 0 |
meth |
MoshiDepthConfig.init |
23 | 0 | 0 |
attr |
MoshiDepthConfig.vocab_size |
1 | 0 | 0 |
attr |
MoshiDepthConfig.hidden_size |
1 | 0 | 0 |
attr |
MoshiDepthConfig.input_size |
1 | 0 | 0 |
attr |
MoshiDepthConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MoshiDepthConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MoshiDepthConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
MoshiDepthConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MoshiDepthConfig.hidden_act |
1 | 0 | 0 |
attr |
MoshiDepthConfig.head_dim |
1 | 0 | 0 |
attr |
MoshiDepthConfig.initializer_range |
1 | 0 | 0 |
attr |
MoshiDepthConfig.use_cache |
1 | 0 | 0 |
attr |
MoshiDepthConfig.sliding_window |
1 | 0 | 0 |
attr |
MoshiDepthConfig.attention_dropout |
1 | 0 | 0 |
attr |
MoshiDepthConfig.ffn_dim |
1 | 0 | 0 |
attr |
MoshiDepthConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
MoshiDepthConfig.num_codebooks |
1 | 0 | 0 |
attr |
MoshiDepthConfig.audio_vocab_size |
1 | 0 | 0 |
attr |
MoshiDepthConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
MoshiDepthConfig.pad_token_id |
1 | 0 | 0 |
attr |
MoshiDepthConfig.bos_token_id |
1 | 0 | 0 |
attr |
MoshiDepthConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.moshi.modeling_moshi (62 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MoshiModel.init |
2 | 1 | 0 |
meth |
MoshiModel.forward |
12 | 11 | 0 |
attr |
MoshiModel.padding_idx |
1 | 0 | 0 |
attr |
MoshiModel.vocab_size |
1 | 0 | 0 |
attr |
MoshiModel.embed_tokens |
1 | 0 | 0 |
attr |
MoshiModel.layers |
1 | 0 | 0 |
attr |
MoshiModel.norm |
1 | 0 | 0 |
attr |
MoshiModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
MoshiForCausalLM.init |
2 | 0 | 0 |
meth |
MoshiForCausalLM.forward |
14 | 13 | 0 |
attr |
MoshiForCausalLM.model |
1 | 0 | 0 |
attr |
MoshiForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
MoshiForCausalLM.lm_head |
1 | 0 | 0 |
meth |
MoshiPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
MoshiForConditionalGeneration.init |
2 | 1 | 0 |
meth |
MoshiForConditionalGeneration.get_depth_decoder |
1 | 0 | 0 |
meth |
MoshiForConditionalGeneration.forward |
16 | 15 | 0 |
meth |
MoshiForConditionalGeneration._prepare_inputs_embeds_for_generation |
11 | 10 | 0 |
meth |
MoshiForConditionalGeneration.generate |
11 | 10 | 0 |
meth |
MoshiForConditionalGeneration.prepare_inputs_for_generation |
15 | 1 | 0 |
meth |
MoshiForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
MoshiForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
MoshiForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
MoshiForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
MoshiForConditionalGeneration.freeze_audio_encoder |
1 | 0 | 0 |
meth |
MoshiForConditionalGeneration.freeze_depth_decoder |
1 | 0 | 0 |
meth |
MoshiForConditionalGeneration.apply_delay_pattern_mask |
3 | 0 | 0 |
meth |
MoshiForConditionalGeneration.build_delay_pattern_mask |
5 | 4 | 0 |
meth |
MoshiForConditionalGeneration.get_unconditional_inputs |
2 | 0 | 0 |
meth |
MoshiForConditionalGeneration._check_and_maybe_initialize_inputs |
8 | 0 | 0 |
attr |
MoshiForConditionalGeneration.embed_tokens |
1 | 0 | 0 |
attr |
MoshiForConditionalGeneration.audio_encoder |
1 | 0 | 0 |
attr |
MoshiForConditionalGeneration.decoder |
1 | 0 | 0 |
attr |
MoshiForConditionalGeneration.depth_decoder |
1 | 0 | 0 |
attr |
MoshiForConditionalGeneration.num_codebooks |
1 | 0 | 0 |
transformers.models.mpnet.configuration_mpnet (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MPNetConfig.init |
18 | 0 | 0 |
attr |
MPNetConfig.pad_token_id |
1 | 0 | 0 |
attr |
MPNetConfig.bos_token_id |
1 | 0 | 0 |
attr |
MPNetConfig.eos_token_id |
1 | 0 | 0 |
attr |
MPNetConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
MPNetConfig.vocab_size |
1 | 0 | 0 |
attr |
MPNetConfig.hidden_size |
1 | 0 | 0 |
attr |
MPNetConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MPNetConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MPNetConfig.hidden_act |
1 | 0 | 0 |
attr |
MPNetConfig.intermediate_size |
1 | 0 | 0 |
attr |
MPNetConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
MPNetConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
MPNetConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MPNetConfig.initializer_range |
1 | 0 | 0 |
attr |
MPNetConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
MPNetConfig.relative_attention_num_buckets |
1 | 0 | 0 |
transformers.models.mpnet.modeling_mpnet (56 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MPNetForMaskedLM.init |
2 | 0 | 0 |
meth |
MPNetForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
MPNetForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
MPNetForMaskedLM.forward |
10 | 9 | 0 |
attr |
MPNetForMaskedLM.mpnet |
1 | 0 | 0 |
attr |
MPNetForMaskedLM.lm_head |
1 | 0 | 0 |
meth |
MPNetModel.init |
3 | 0 | 0 |
meth |
MPNetModel.get_input_embeddings |
1 | 0 | 0 |
meth |
MPNetModel.set_input_embeddings |
2 | 0 | 0 |
meth |
MPNetModel.forward |
9 | 8 | 0 |
attr |
MPNetModel.embeddings |
1 | 0 | 0 |
attr |
MPNetModel.encoder |
1 | 0 | 0 |
attr |
MPNetModel.pooler |
1 | 0 | 0 |
meth |
MPNetForSequenceClassification.init |
2 | 0 | 0 |
meth |
MPNetForSequenceClassification.forward |
10 | 9 | 0 |
attr |
MPNetForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
MPNetForSequenceClassification.mpnet |
1 | 0 | 0 |
attr |
MPNetForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
MPNetPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
MPNetLayer.init |
2 | 0 | 0 |
meth |
MPNetLayer.forward |
6 | 0 | 0 |
attr |
MPNetLayer.attention |
1 | 0 | 0 |
attr |
MPNetLayer.intermediate |
1 | 0 | 0 |
attr |
MPNetLayer.output |
1 | 0 | 0 |
meth |
MPNetForTokenClassification.init |
2 | 0 | 0 |
meth |
MPNetForTokenClassification.forward |
10 | 9 | 0 |
attr |
MPNetForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
MPNetForTokenClassification.mpnet |
1 | 0 | 0 |
attr |
MPNetForTokenClassification.dropout |
1 | 0 | 0 |
attr |
MPNetForTokenClassification.classifier |
1 | 0 | 0 |
meth |
MPNetForMultipleChoice.init |
2 | 0 | 0 |
meth |
MPNetForMultipleChoice.forward |
10 | 9 | 0 |
attr |
MPNetForMultipleChoice.mpnet |
1 | 0 | 0 |
attr |
MPNetForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
MPNetForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
MPNetForQuestionAnswering.init |
2 | 0 | 0 |
meth |
MPNetForQuestionAnswering.forward |
11 | 10 | 0 |
attr |
MPNetForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
MPNetForQuestionAnswering.mpnet |
1 | 0 | 0 |
attr |
MPNetForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
transformers.models.mpnet.tokenization_mpnet (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MPNetTokenizer.init |
13 | 1 | 0 |
prop |
MPNetTokenizer.mask_token |
2 | 1 | 0 |
attr |
MPNetTokenizer.do_lower_case |
1 | 0 | 0 |
transformers.models.mpt.configuration_mpt (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MptConfig.init |
24 | 17 | 0 |
attr |
MptConfig.d_model |
1 | 0 | 0 |
attr |
MptConfig.n_heads |
1 | 0 | 0 |
attr |
MptConfig.n_layers |
1 | 0 | 0 |
attr |
MptConfig.expansion_ratio |
1 | 0 | 0 |
attr |
MptConfig.max_seq_len |
1 | 0 | 0 |
attr |
MptConfig.vocab_size |
1 | 0 | 0 |
attr |
MptConfig.resid_pdrop |
1 | 0 | 0 |
attr |
MptConfig.emb_pdrop |
1 | 0 | 0 |
attr |
MptConfig.learned_pos_emb |
1 | 0 | 0 |
attr |
MptConfig.init_device |
1 | 0 | 0 |
attr |
MptConfig.logit_scale |
1 | 0 | 0 |
attr |
MptConfig.no_bias |
1 | 0 | 0 |
attr |
MptConfig.embedding_fraction |
1 | 0 | 0 |
attr |
MptConfig.norm_type |
1 | 0 | 0 |
attr |
MptConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
MptConfig.use_cache |
1 | 0 | 0 |
attr |
MptConfig.initializer_range |
1 | 0 | 0 |
attr |
MptConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
MptConfig.pad_token_id |
1 | 0 | 0 |
attr |
MptConfig.bos_token_id |
1 | 0 | 0 |
attr |
MptConfig.eos_token_id |
1 | 0 | 0 |
attr |
MptConfig.attn_config |
1 | 0 | 0 |
transformers.models.mpt.modeling_mpt (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MptForSequenceClassification.init |
2 | 1 | 0 |
meth |
MptForSequenceClassification.set_output_embeddings |
2 | 1 | 0 |
meth |
MptForSequenceClassification.forward |
11 | 10 | 0 |
attr |
MptForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
MptForSequenceClassification.transformer |
1 | 0 | 0 |
attr |
MptForSequenceClassification.score |
1 | 0 | 0 |
meth |
MptModel.init |
2 | 1 | 0 |
meth |
MptModel.get_input_embeddings |
1 | 0 | 0 |
meth |
MptModel.build_mpt_alibi_tensor |
5 | 0 | 0 |
meth |
MptModel.set_input_embeddings |
2 | 1 | 0 |
meth |
MptModel.forward |
11 | 10 | 0 |
attr |
MptModel.hidden_size |
1 | 0 | 0 |
attr |
MptModel.num_heads |
1 | 0 | 0 |
attr |
MptModel.wte |
1 | 0 | 0 |
attr |
MptModel.blocks |
1 | 0 | 0 |
attr |
MptModel.norm_f |
1 | 0 | 0 |
attr |
MptModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
MptForTokenClassification.init |
2 | 1 | 0 |
meth |
MptForTokenClassification.forward |
11 | 10 | 0 |
attr |
MptForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
MptForTokenClassification.transformer |
1 | 0 | 0 |
attr |
MptForTokenClassification.dropout |
1 | 0 | 0 |
attr |
MptForTokenClassification.classifier |
1 | 0 | 0 |
meth |
MptForCausalLM.init |
2 | 1 | 0 |
meth |
MptForCausalLM.set_output_embeddings |
2 | 1 | 0 |
meth |
MptForCausalLM.forward |
13 | 12 | 0 |
attr |
MptForCausalLM.transformer |
1 | 0 | 0 |
attr |
MptForCausalLM.lm_head |
1 | 0 | 0 |
meth |
MptForQuestionAnswering.init |
2 | 0 | 0 |
meth |
MptForQuestionAnswering.forward |
10 | 9 | 0 |
attr |
MptForQuestionAnswering.transformer |
1 | 0 | 0 |
attr |
MptForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
transformers.models.mra.configuration_mra (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MraConfig.init |
23 | 0 | 0 |
attr |
MraConfig.pad_token_id |
1 | 0 | 0 |
attr |
MraConfig.bos_token_id |
1 | 0 | 0 |
attr |
MraConfig.eos_token_id |
1 | 0 | 0 |
attr |
MraConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
MraConfig.add_cross_attention |
1 | 0 | 0 |
attr |
MraConfig.vocab_size |
1 | 0 | 0 |
attr |
MraConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MraConfig.hidden_size |
1 | 0 | 0 |
attr |
MraConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MraConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MraConfig.intermediate_size |
1 | 0 | 0 |
attr |
MraConfig.hidden_act |
1 | 0 | 0 |
attr |
MraConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
MraConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
MraConfig.initializer_range |
1 | 0 | 0 |
attr |
MraConfig.type_vocab_size |
1 | 0 | 0 |
attr |
MraConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
MraConfig.block_per_row |
1 | 0 | 0 |
attr |
MraConfig.approx_mode |
1 | 0 | 0 |
attr |
MraConfig.initial_prior_first_n_blocks |
1 | 0 | 0 |
attr |
MraConfig.initial_prior_diagonal_n_blocks |
1 | 0 | 0 |
transformers.models.mra.modeling_mra (55 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MraLayer.init |
2 | 0 | 0 |
meth |
MraLayer.forward |
3 | 0 | 0 |
meth |
MraLayer.feed_forward_chunk |
2 | 0 | 0 |
attr |
MraLayer.chunk_size_feed_forward |
1 | 0 | 0 |
attr |
MraLayer.seq_len_dim |
1 | 0 | 0 |
attr |
MraLayer.attention |
1 | 0 | 0 |
attr |
MraLayer.add_cross_attention |
1 | 0 | 0 |
attr |
MraLayer.intermediate |
1 | 0 | 0 |
attr |
MraLayer.output |
1 | 0 | 0 |
meth |
MraForMultipleChoice.init |
2 | 0 | 0 |
meth |
MraForMultipleChoice.forward |
10 | 9 | 0 |
attr |
MraForMultipleChoice.mra |
1 | 0 | 0 |
attr |
MraForMultipleChoice.pre_classifier |
1 | 0 | 0 |
attr |
MraForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
MraForSequenceClassification.init |
2 | 0 | 0 |
meth |
MraForSequenceClassification.forward |
10 | 9 | 0 |
attr |
MraForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
MraForSequenceClassification.mra |
1 | 0 | 0 |
attr |
MraForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
MraModel.init |
2 | 0 | 0 |
meth |
MraModel.get_input_embeddings |
1 | 0 | 0 |
meth |
MraModel.set_input_embeddings |
2 | 0 | 0 |
meth |
MraModel.forward |
9 | 8 | 0 |
attr |
MraModel.embeddings |
1 | 0 | 0 |
attr |
MraModel.encoder |
1 | 0 | 0 |
meth |
MraForTokenClassification.init |
2 | 0 | 0 |
meth |
MraForTokenClassification.forward |
10 | 9 | 0 |
attr |
MraForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
MraForTokenClassification.mra |
1 | 0 | 0 |
attr |
MraForTokenClassification.dropout |
1 | 0 | 0 |
attr |
MraForTokenClassification.classifier |
1 | 0 | 0 |
meth |
MraForMaskedLM.init |
2 | 0 | 0 |
meth |
MraForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
MraForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
MraForMaskedLM.forward |
10 | 9 | 0 |
attr |
MraForMaskedLM.mra |
1 | 0 | 0 |
attr |
MraForMaskedLM.cls |
1 | 0 | 0 |
meth |
MraPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
MraForQuestionAnswering.init |
2 | 0 | 0 |
meth |
MraForQuestionAnswering.forward |
11 | 10 | 0 |
attr |
MraForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
MraForQuestionAnswering.mra |
1 | 0 | 0 |
attr |
MraForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
transformers.models.mt5.configuration_mt5 (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MT5Config.init |
24 | 0 | 0 |
attr |
MT5Config.is_decoder |
1 | 0 | 0 |
attr |
MT5Config.vocab_size |
1 | 0 | 0 |
attr |
MT5Config.d_model |
1 | 0 | 0 |
attr |
MT5Config.d_kv |
1 | 0 | 0 |
attr |
MT5Config.d_ff |
1 | 0 | 0 |
attr |
MT5Config.num_layers |
1 | 0 | 0 |
attr |
MT5Config.num_decoder_layers |
1 | 0 | 0 |
attr |
MT5Config.num_heads |
1 | 0 | 0 |
attr |
MT5Config.relative_attention_num_buckets |
1 | 0 | 0 |
attr |
MT5Config.relative_attention_max_distance |
1 | 0 | 0 |
attr |
MT5Config.dropout_rate |
1 | 0 | 0 |
attr |
MT5Config.classifier_dropout |
1 | 0 | 0 |
attr |
MT5Config.layer_norm_epsilon |
1 | 0 | 0 |
attr |
MT5Config.initializer_factor |
1 | 0 | 0 |
attr |
MT5Config.feed_forward_proj |
1 | 0 | 0 |
attr |
MT5Config.use_cache |
1 | 0 | 0 |
attr |
MT5Config.dense_act_fn |
1 | 0 | 0 |
attr |
MT5Config.is_gated_act |
1 | 0 | 0 |
attr |
MT5Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
MT5Config.tokenizer_class |
1 | 0 | 0 |
attr |
MT5Config.bos_token_id |
1 | 0 | 0 |
attr |
MT5Config.pad_token_id |
1 | 0 | 0 |
attr |
MT5Config.eos_token_id |
1 | 0 | 0 |
attr |
MT5Config.decoder_start_token_id |
1 | 0 | 0 |
transformers.models.mt5.modeling_mt5 (52 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MT5ForQuestionAnswering.init |
2 | 1 | 0 |
meth |
MT5ForQuestionAnswering.get_input_embeddings |
1 | 0 | 0 |
meth |
MT5ForQuestionAnswering.set_input_embeddings |
2 | 0 | 0 |
meth |
MT5ForQuestionAnswering.forward |
15 | 14 | 0 |
attr |
MT5ForQuestionAnswering.model_dim |
1 | 0 | 0 |
attr |
MT5ForQuestionAnswering.shared |
1 | 0 | 0 |
attr |
MT5ForQuestionAnswering.encoder |
1 | 0 | 0 |
attr |
MT5ForQuestionAnswering.decoder |
1 | 0 | 0 |
attr |
MT5ForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
MT5ForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
MT5ForTokenClassification.init |
2 | 1 | 0 |
meth |
MT5ForTokenClassification.forward |
9 | 8 | 0 |
attr |
MT5ForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
MT5ForTokenClassification.transformer |
1 | 0 | 0 |
attr |
MT5ForTokenClassification.dropout |
1 | 0 | 0 |
attr |
MT5ForTokenClassification.classifier |
1 | 0 | 0 |
meth |
MT5PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
MT5PreTrainedModel._shift_right |
2 | 0 | 0 |
prop |
MT5PreTrainedModel.dummy_inputs |
1 | 0 | 0 |
meth |
MT5Model.init |
2 | 1 | 0 |
meth |
MT5Model.get_input_embeddings |
1 | 0 | 0 |
meth |
MT5Model.set_input_embeddings |
2 | 0 | 0 |
meth |
MT5Model.forward |
15 | 14 | 0 |
attr |
MT5Model.shared |
1 | 0 | 0 |
attr |
MT5Model.encoder |
1 | 0 | 0 |
attr |
MT5Model.decoder |
1 | 0 | 0 |
meth |
MT5ForSequenceClassification.init |
2 | 1 | 0 |
meth |
MT5ForSequenceClassification.forward |
14 | 13 | 0 |
attr |
MT5ForSequenceClassification.transformer |
1 | 0 | 0 |
attr |
MT5ForSequenceClassification.classification_head |
1 | 0 | 0 |
meth |
MT5ForConditionalGeneration.init |
2 | 1 | 0 |
meth |
MT5ForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
MT5ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
MT5ForConditionalGeneration.forward |
16 | 15 | 0 |
meth |
MT5ForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
MT5ForConditionalGeneration.model_dim |
1 | 0 | 0 |
attr |
MT5ForConditionalGeneration.shared |
1 | 0 | 0 |
attr |
MT5ForConditionalGeneration.encoder |
1 | 0 | 0 |
attr |
MT5ForConditionalGeneration.decoder |
1 | 0 | 0 |
attr |
MT5ForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
MT5EncoderModel.init |
2 | 1 | 0 |
meth |
MT5EncoderModel.get_input_embeddings |
1 | 0 | 0 |
meth |
MT5EncoderModel.set_input_embeddings |
2 | 0 | 0 |
meth |
MT5EncoderModel.forward |
8 | 7 | 0 |
attr |
MT5EncoderModel.shared |
1 | 0 | 0 |
attr |
MT5EncoderModel.encoder |
1 | 0 | 0 |
transformers.models.musicgen.configuration_musicgen (59 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MusicgenConfig.init |
5 | 0 | 0 |
prop |
MusicgenConfig.sampling_rate |
1 | 0 | 0 |
attr |
MusicgenConfig.text_encoder |
1 | 0 | 0 |
attr |
MusicgenConfig.audio_encoder |
1 | 0 | 0 |
attr |
MusicgenConfig.decoder |
1 | 0 | 0 |
attr |
MusicgenConfig.initializer_factor |
1 | 0 | 0 |
attr |
MusicgenConfig.tie_encoder_decoder |
1 | 0 | 0 |
meth |
MusicgenDecoderConfig.init |
25 | 0 | 0 |
attr |
MusicgenDecoderConfig.is_decoder |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.add_cross_attention |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.cross_attention_hidden_size |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.vocab_size |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.hidden_size |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.ffn_dim |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.dropout |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.attention_dropout |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.activation_dropout |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.activation_function |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.initializer_factor |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.layerdrop |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.use_cache |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.scale_embedding |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.num_codebooks |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.audio_channels |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.pad_token_id |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.bos_token_id |
1 | 0 | 0 |
attr |
MusicgenDecoderConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.musicgen.modeling_musicgen (72 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MusicgenModel.init |
2 | 1 | 0 |
meth |
MusicgenModel.get_input_embeddings |
1 | 0 | 0 |
meth |
MusicgenModel.set_input_embeddings |
2 | 0 | 0 |
meth |
MusicgenModel.forward |
13 | 12 | 0 |
attr |
MusicgenModel.decoder |
1 | 0 | 0 |
meth |
MusicgenForCausalLM.init |
2 | 1 | 0 |
meth |
MusicgenForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
MusicgenForCausalLM.set_input_embeddings |
2 | 0 | 0 |
meth |
MusicgenForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
MusicgenForCausalLM.set_output_embeddings |
2 | 0 | 0 |
meth |
MusicgenForCausalLM.forward |
14 | 13 | 0 |
meth |
MusicgenForCausalLM.prepare_inputs_for_generation |
10 | 0 | 0 |
meth |
MusicgenForCausalLM.build_delay_pattern_mask |
4 | 3 | 0 |
meth |
MusicgenForCausalLM.apply_delay_pattern_mask |
3 | 0 | 0 |
meth |
MusicgenForCausalLM.generate |
8 | 6 | 0 |
attr |
MusicgenForCausalLM.model |
1 | 0 | 0 |
attr |
MusicgenForCausalLM.num_codebooks |
1 | 0 | 0 |
attr |
MusicgenForCausalLM.lm_heads |
1 | 0 | 0 |
meth |
MusicgenPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
MusicgenForConditionalGeneration.init |
5 | 4 | 0 |
meth |
MusicgenForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
MusicgenForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
MusicgenForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
MusicgenForConditionalGeneration.from_sub_models_pretrained |
6 | 4 | 0 |
meth |
MusicgenForConditionalGeneration.forward |
17 | 16 | 0 |
meth |
MusicgenForConditionalGeneration.prepare_inputs_for_generation |
11 | 0 | 0 |
meth |
MusicgenForConditionalGeneration._prepare_text_encoder_kwargs_for_generation |
5 | 4 | 0 |
meth |
MusicgenForConditionalGeneration._prepare_audio_encoder_kwargs_for_generation |
4 | 1 | 0 |
meth |
MusicgenForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
meth |
MusicgenForConditionalGeneration.resize_token_embeddings |
3 | 0 | 0 |
meth |
MusicgenForConditionalGeneration.freeze_audio_encoder |
1 | 0 | 0 |
meth |
MusicgenForConditionalGeneration.freeze_text_encoder |
1 | 0 | 0 |
meth |
MusicgenForConditionalGeneration.generate |
8 | 6 | 0 |
meth |
MusicgenForConditionalGeneration.get_unconditional_inputs |
2 | 0 | 0 |
attr |
MusicgenForConditionalGeneration.text_encoder |
1 | 0 | 0 |
attr |
MusicgenForConditionalGeneration.audio_encoder |
1 | 0 | 0 |
attr |
MusicgenForConditionalGeneration.decoder |
1 | 0 | 0 |
attr |
MusicgenForConditionalGeneration.enc_to_dec_proj |
1 | 0 | 0 |
transformers.models.musicgen.processing_musicgen (14 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MusicgenProcessor.init |
3 | 0 | 0 |
meth |
MusicgenProcessor.get_decoder_prompt_ids |
4 | 0 | 0 |
meth |
MusicgenProcessor.call |
3 | 0 | 0 |
meth |
MusicgenProcessor.batch_decode |
3 | 0 | 0 |
meth |
MusicgenProcessor._decode_audio |
3 | 2 | 1 |
transformers.models.musicgen_melody.configuration_musicgen_melody (60 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MusicgenMelodyDecoderConfig.init |
24 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.is_decoder |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.add_cross_attention |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.vocab_size |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.hidden_size |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.ffn_dim |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.dropout |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.attention_dropout |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.activation_dropout |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.activation_function |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.initializer_factor |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.layerdrop |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.use_cache |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.scale_embedding |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.num_codebooks |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.audio_channels |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.pad_token_id |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.bos_token_id |
1 | 0 | 0 |
attr |
MusicgenMelodyDecoderConfig.eos_token_id |
1 | 0 | 0 |
meth |
MusicgenMelodyConfig.init |
7 | 0 | 0 |
prop |
MusicgenMelodyConfig.sampling_rate |
1 | 0 | 0 |
attr |
MusicgenMelodyConfig.text_encoder |
1 | 0 | 0 |
attr |
MusicgenMelodyConfig.audio_encoder |
1 | 0 | 0 |
attr |
MusicgenMelodyConfig.decoder |
1 | 0 | 0 |
attr |
MusicgenMelodyConfig.num_chroma |
1 | 0 | 0 |
attr |
MusicgenMelodyConfig.chroma_length |
1 | 0 | 0 |
attr |
MusicgenMelodyConfig.tie_encoder_decoder |
1 | 0 | 0 |
transformers.models.musicgen_melody.feature_extraction_musicgen_melody (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MusicgenMelodyFeatureExtractor.init |
11 | 0 | 0 |
meth |
MusicgenMelodyFeatureExtractor._extract_stem_indices |
3 | 0 | 0 |
meth |
MusicgenMelodyFeatureExtractor.call |
10 | 9 | 0 |
attr |
MusicgenMelodyFeatureExtractor.n_fft |
1 | 0 | 0 |
attr |
MusicgenMelodyFeatureExtractor.hop_length |
1 | 0 | 0 |
attr |
MusicgenMelodyFeatureExtractor.chunk_length |
1 | 0 | 0 |
attr |
MusicgenMelodyFeatureExtractor.n_samples |
1 | 0 | 0 |
attr |
MusicgenMelodyFeatureExtractor.sampling_rate |
1 | 0 | 0 |
attr |
MusicgenMelodyFeatureExtractor.chroma_filters |
1 | 0 | 0 |
attr |
MusicgenMelodyFeatureExtractor.spectrogram |
1 | 0 | 0 |
attr |
MusicgenMelodyFeatureExtractor.stem_indices |
1 | 0 | 0 |
transformers.models.musicgen_melody.modeling_musicgen_melody (69 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MusicgenMelodyModel.init |
2 | 1 | 0 |
meth |
MusicgenMelodyModel.get_input_embeddings |
1 | 0 | 0 |
meth |
MusicgenMelodyModel.set_input_embeddings |
2 | 0 | 0 |
meth |
MusicgenMelodyModel.forward |
13 | 12 | 0 |
attr |
MusicgenMelodyModel.decoder |
1 | 0 | 0 |
meth |
MusicgenMelodyForCausalLM.init |
2 | 1 | 0 |
meth |
MusicgenMelodyForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
MusicgenMelodyForCausalLM.set_input_embeddings |
2 | 0 | 0 |
meth |
MusicgenMelodyForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
MusicgenMelodyForCausalLM.set_output_embeddings |
2 | 0 | 0 |
meth |
MusicgenMelodyForCausalLM.forward |
14 | 13 | 0 |
meth |
MusicgenMelodyForCausalLM.prepare_inputs_for_generation |
10 | 0 | 0 |
meth |
MusicgenMelodyForCausalLM.build_delay_pattern_mask |
4 | 3 | 0 |
meth |
MusicgenMelodyForCausalLM.apply_delay_pattern_mask |
3 | 0 | 0 |
meth |
MusicgenMelodyForCausalLM.generate |
8 | 6 | 0 |
attr |
MusicgenMelodyForCausalLM.model |
1 | 0 | 0 |
attr |
MusicgenMelodyForCausalLM.num_codebooks |
1 | 0 | 0 |
attr |
MusicgenMelodyForCausalLM.lm_heads |
1 | 0 | 0 |
meth |
MusicgenMelodyPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
MusicgenMelodyForConditionalGeneration.init |
5 | 4 | 0 |
meth |
MusicgenMelodyForConditionalGeneration._init_weights |
2 | 0 | 0 |
meth |
MusicgenMelodyForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
MusicgenMelodyForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
MusicgenMelodyForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
MusicgenMelodyForConditionalGeneration.from_sub_models_pretrained |
6 | 4 | 0 |
meth |
MusicgenMelodyForConditionalGeneration.forward |
16 | 15 | 0 |
meth |
MusicgenMelodyForConditionalGeneration.prepare_inputs_for_generation |
10 | 0 | 0 |
meth |
MusicgenMelodyForConditionalGeneration._prepare_encoder_hidden_states_kwargs_for_generation |
5 | 4 | 0 |
meth |
MusicgenMelodyForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
meth |
MusicgenMelodyForConditionalGeneration.resize_token_embeddings |
3 | 0 | 0 |
meth |
MusicgenMelodyForConditionalGeneration.freeze_audio_encoder |
1 | 0 | 0 |
meth |
MusicgenMelodyForConditionalGeneration.freeze_text_encoder |
1 | 0 | 0 |
meth |
MusicgenMelodyForConditionalGeneration.generate |
8 | 6 | 0 |
attr |
MusicgenMelodyForConditionalGeneration.text_encoder |
1 | 0 | 0 |
attr |
MusicgenMelodyForConditionalGeneration.audio_encoder |
1 | 0 | 0 |
attr |
MusicgenMelodyForConditionalGeneration.decoder |
1 | 0 | 0 |
attr |
MusicgenMelodyForConditionalGeneration.enc_to_dec_proj |
1 | 0 | 0 |
attr |
MusicgenMelodyForConditionalGeneration.audio_enc_to_dec_proj |
1 | 0 | 0 |
transformers.models.musicgen_melody.processing_musicgen_melody (17 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MusicgenMelodyProcessor.init |
3 | 0 | 0 |
meth |
MusicgenMelodyProcessor.get_decoder_prompt_ids |
4 | 0 | 0 |
meth |
MusicgenMelodyProcessor.call |
3 | 0 | 0 |
meth |
MusicgenMelodyProcessor.batch_decode |
3 | 0 | 0 |
meth |
MusicgenMelodyProcessor._decode_audio |
3 | 2 | 1 |
meth |
MusicgenMelodyProcessor.get_unconditional_inputs |
3 | 0 | 0 |
transformers.models.mvp.configuration_mvp (60 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MvpConfig.init |
31 | 0 | 0 |
attr |
MvpConfig.is_decoder |
1 | 0 | 0 |
attr |
MvpConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
MvpConfig.vocab_size |
1 | 0 | 0 |
attr |
MvpConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
MvpConfig.d_model |
1 | 0 | 0 |
attr |
MvpConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
MvpConfig.encoder_layers |
1 | 0 | 0 |
attr |
MvpConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
MvpConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
MvpConfig.decoder_layers |
1 | 0 | 0 |
attr |
MvpConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
MvpConfig.dropout |
1 | 0 | 0 |
attr |
MvpConfig.attention_dropout |
1 | 0 | 0 |
attr |
MvpConfig.activation_dropout |
1 | 0 | 0 |
attr |
MvpConfig.activation_function |
1 | 0 | 0 |
attr |
MvpConfig.init_std |
1 | 0 | 0 |
attr |
MvpConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
MvpConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
MvpConfig.classifier_dropout |
1 | 0 | 0 |
attr |
MvpConfig.use_cache |
1 | 0 | 0 |
attr |
MvpConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
MvpConfig.scale_embedding |
1 | 0 | 0 |
attr |
MvpConfig.use_prompt |
1 | 0 | 0 |
attr |
MvpConfig.prompt_length |
1 | 0 | 0 |
attr |
MvpConfig.prompt_mid_dim |
1 | 0 | 0 |
attr |
MvpConfig.pad_token_id |
1 | 0 | 0 |
attr |
MvpConfig.bos_token_id |
1 | 0 | 0 |
attr |
MvpConfig.eos_token_id |
1 | 0 | 0 |
attr |
MvpConfig.decoder_start_token_id |
1 | 0 | 0 |
transformers.models.mvp.modeling_mvp (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MvpForCausalLM.init |
2 | 0 | 0 |
meth |
MvpForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
MvpForCausalLM.set_input_embeddings |
2 | 0 | 0 |
meth |
MvpForCausalLM.set_lightweight_tuning |
1 | 0 | 0 |
meth |
MvpForCausalLM.forward |
15 | 14 | 0 |
attr |
MvpForCausalLM.model |
1 | 0 | 0 |
attr |
MvpForCausalLM.lm_head |
1 | 0 | 0 |
meth |
MvpForQuestionAnswering.init |
2 | 0 | 0 |
meth |
MvpForQuestionAnswering.set_lightweight_tuning |
1 | 0 | 0 |
meth |
MvpForQuestionAnswering.forward |
15 | 14 | 0 |
attr |
MvpForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
MvpForQuestionAnswering.model |
1 | 0 | 0 |
attr |
MvpForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
MvpForSequenceClassification.init |
3 | 1 | 0 |
meth |
MvpForSequenceClassification.set_lightweight_tuning |
1 | 0 | 0 |
meth |
MvpForSequenceClassification.forward |
14 | 13 | 0 |
attr |
MvpForSequenceClassification.model |
1 | 0 | 0 |
attr |
MvpForSequenceClassification.classification_head |
1 | 0 | 0 |
meth |
MvpPreTrainedModel._init_weights |
2 | 0 | 0 |
prop |
MvpPreTrainedModel.dummy_inputs |
1 | 0 | 0 |
meth |
MvpForConditionalGeneration.init |
2 | 1 | 0 |
meth |
MvpForConditionalGeneration.set_lightweight_tuning |
1 | 0 | 0 |
meth |
MvpForConditionalGeneration.forward |
16 | 15 | 0 |
meth |
MvpForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
MvpForConditionalGeneration.model |
1 | 0 | 0 |
attr |
MvpForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
MvpModel.init |
2 | 1 | 0 |
meth |
MvpModel.get_input_embeddings |
1 | 0 | 0 |
meth |
MvpModel.set_input_embeddings |
2 | 0 | 0 |
meth |
MvpModel.set_lightweight_tuning |
1 | 0 | 0 |
meth |
MvpModel.forward |
15 | 14 | 0 |
attr |
MvpModel.use_prompt |
1 | 0 | 0 |
attr |
MvpModel.shared |
1 | 0 | 0 |
attr |
MvpModel.encoder |
1 | 0 | 0 |
attr |
MvpModel.decoder |
1 | 0 | 0 |
transformers.models.myt5.tokenization_myt5 (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MyT5Tokenizer.init |
8 | 1 | 0 |
meth |
MyT5Tokenizer.get_vocab |
1 | 0 | 0 |
meth |
MyT5Tokenizer._tokenize |
3 | 2 | 0 |
meth |
MyT5Tokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
MyT5Tokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
MyT5Tokenizer.convert_tokens_to_string |
2 | 0 | 0 |
prop |
MyT5Tokenizer.vocab_size |
1 | 0 | 0 |
attr |
MyT5Tokenizer.offset |
1 | 0 | 0 |
attr |
MyT5Tokenizer.byte_maps |
1 | 0 | 0 |
attr |
MyT5Tokenizer.decompose_rewriter |
1 | 0 | 0 |
attr |
MyT5Tokenizer.merge_rewriter |
1 | 0 | 0 |
transformers.models.nanochat.configuration_nanochat (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NanoChatConfig.init |
21 | 19 | 0 |
attr |
NanoChatConfig.vocab_size |
1 | 0 | 0 |
attr |
NanoChatConfig.hidden_size |
1 | 0 | 0 |
attr |
NanoChatConfig.intermediate_size |
1 | 0 | 0 |
attr |
NanoChatConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
NanoChatConfig.num_attention_heads |
1 | 0 | 0 |
attr |
NanoChatConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
NanoChatConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
NanoChatConfig.hidden_act |
1 | 0 | 0 |
attr |
NanoChatConfig.attention_dropout |
1 | 0 | 0 |
attr |
NanoChatConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
NanoChatConfig.initializer_range |
1 | 0 | 0 |
attr |
NanoChatConfig.use_cache |
1 | 0 | 0 |
attr |
NanoChatConfig.final_logit_softcapping |
1 | 0 | 0 |
attr |
NanoChatConfig.attention_bias |
1 | 0 | 0 |
attr |
NanoChatConfig.rope_parameters |
1 | 0 | 0 |
attr |
NanoChatConfig.pad_token_id |
1 | 0 | 0 |
attr |
NanoChatConfig.bos_token_id |
1 | 0 | 0 |
attr |
NanoChatConfig.eos_token_id |
1 | 0 | 0 |
attr |
NanoChatConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.nanochat.modeling_nanochat (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NanoChatForCausalLM.init |
2 | 0 | 0 |
attr |
NanoChatForCausalLM.model |
1 | 0 | 0 |
attr |
NanoChatForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
NanoChatForCausalLM.lm_head |
1 | 0 | 0 |
meth |
NanoChatModel.init |
2 | 1 | 0 |
attr |
NanoChatModel.padding_idx |
1 | 0 | 0 |
attr |
NanoChatModel.vocab_size |
1 | 0 | 0 |
attr |
NanoChatModel.embed_tokens |
1 | 0 | 0 |
attr |
NanoChatModel.layers |
1 | 0 | 0 |
attr |
NanoChatModel.norm |
1 | 0 | 0 |
attr |
NanoChatModel.rotary_emb |
1 | 0 | 0 |
attr |
NanoChatModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.nanochat.modular_nanochat (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NanoChatForCausalLM.forward |
2 | 1 | 0 |
meth |
NanoChatModel.init |
2 | 1 | 0 |
attr |
NanoChatModel.norm |
1 | 0 | 0 |
transformers.models.nemotron.configuration_nemotron (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NemotronConfig.init |
22 | 20 | 0 |
attr |
NemotronConfig.vocab_size |
1 | 0 | 0 |
attr |
NemotronConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
NemotronConfig.hidden_size |
1 | 0 | 0 |
attr |
NemotronConfig.intermediate_size |
1 | 0 | 0 |
attr |
NemotronConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
NemotronConfig.num_attention_heads |
1 | 0 | 0 |
attr |
NemotronConfig.head_dim |
1 | 0 | 0 |
attr |
NemotronConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
NemotronConfig.hidden_act |
1 | 0 | 0 |
attr |
NemotronConfig.initializer_range |
1 | 0 | 0 |
attr |
NemotronConfig.norm_eps |
1 | 0 | 0 |
attr |
NemotronConfig.use_cache |
1 | 0 | 0 |
attr |
NemotronConfig.attention_bias |
1 | 0 | 0 |
attr |
NemotronConfig.attention_dropout |
1 | 0 | 0 |
attr |
NemotronConfig.mlp_bias |
1 | 0 | 0 |
attr |
NemotronConfig.rope_parameters |
1 | 0 | 0 |
attr |
NemotronConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
NemotronConfig.pad_token_id |
1 | 0 | 0 |
attr |
NemotronConfig.bos_token_id |
1 | 0 | 0 |
attr |
NemotronConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.nemotron.modeling_nemotron (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NemotronForCausalLM.init |
2 | 0 | 0 |
meth |
NemotronForCausalLM.forward |
13 | 12 | 0 |
attr |
NemotronForCausalLM.model |
1 | 0 | 0 |
attr |
NemotronForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
NemotronForCausalLM.lm_head |
1 | 0 | 0 |
meth |
NemotronModel.init |
2 | 1 | 0 |
meth |
NemotronModel.forward |
11 | 10 | 0 |
attr |
NemotronModel.padding_idx |
1 | 0 | 0 |
attr |
NemotronModel.vocab_size |
1 | 0 | 0 |
attr |
NemotronModel.embed_tokens |
1 | 0 | 0 |
attr |
NemotronModel.layers |
1 | 0 | 0 |
attr |
NemotronModel.norm |
1 | 0 | 0 |
attr |
NemotronModel.rotary_emb |
1 | 0 | 0 |
attr |
NemotronModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
NemotronPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.nemotron_h.configuration_nemotron_h (113 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NemotronHConfig._validate_layers_block_type |
4 | 0 | 0 |
meth |
NemotronHConfig.init |
57 | 0 | 0 |
prop |
NemotronHConfig.num_hidden_layers |
2 | 1 | 0 |
attr |
NemotronHConfig.vocab_size |
1 | 0 | 0 |
attr |
NemotronHConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
NemotronHConfig.hidden_size |
1 | 0 | 0 |
attr |
NemotronHConfig.intermediate_size |
1 | 0 | 0 |
attr |
NemotronHConfig.num_attention_heads |
1 | 0 | 0 |
attr |
NemotronHConfig.head_dim |
1 | 0 | 0 |
attr |
NemotronHConfig.sliding_window |
1 | 0 | 0 |
attr |
NemotronHConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
NemotronHConfig.attention_dropout |
1 | 0 | 0 |
attr |
NemotronHConfig.hidden_dropout |
1 | 0 | 0 |
attr |
NemotronHConfig.layers_block_type |
1 | 0 | 0 |
attr |
NemotronHConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
NemotronHConfig.mlp_hidden_act |
1 | 0 | 0 |
attr |
NemotronHConfig.attention_bias |
1 | 0 | 0 |
attr |
NemotronHConfig.mlp_bias |
1 | 0 | 0 |
attr |
NemotronHConfig.use_bias |
1 | 0 | 0 |
attr |
NemotronHConfig.initializer_range |
1 | 0 | 0 |
attr |
NemotronHConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
NemotronHConfig.residual_in_fp32 |
1 | 0 | 0 |
attr |
NemotronHConfig.use_cache |
1 | 0 | 0 |
attr |
NemotronHConfig.num_logits_to_keep |
1 | 0 | 0 |
attr |
NemotronHConfig.use_mamba_kernels |
1 | 0 | 0 |
attr |
NemotronHConfig.n_groups |
1 | 0 | 0 |
attr |
NemotronHConfig.mamba_head_dim |
1 | 0 | 0 |
attr |
NemotronHConfig.ssm_state_size |
1 | 0 | 0 |
attr |
NemotronHConfig.mamba_num_heads |
1 | 0 | 0 |
attr |
NemotronHConfig.conv_kernel |
1 | 0 | 0 |
attr |
NemotronHConfig.expand |
1 | 0 | 0 |
attr |
NemotronHConfig.mamba_hidden_act |
1 | 0 | 0 |
attr |
NemotronHConfig.time_step_min |
1 | 0 | 0 |
attr |
NemotronHConfig.time_step_max |
1 | 0 | 0 |
attr |
NemotronHConfig.time_step_limit |
1 | 0 | 0 |
attr |
NemotronHConfig.time_step_floor |
1 | 0 | 0 |
attr |
NemotronHConfig.use_conv_bias |
1 | 0 | 0 |
attr |
NemotronHConfig.mamba_proj_bias |
1 | 0 | 0 |
attr |
NemotronHConfig.chunk_size |
1 | 0 | 0 |
attr |
NemotronHConfig.rescale_prenorm_residual |
1 | 0 | 0 |
attr |
NemotronHConfig.n_routed_experts |
1 | 0 | 0 |
attr |
NemotronHConfig.n_shared_experts |
1 | 0 | 0 |
attr |
NemotronHConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
NemotronHConfig.moe_shared_expert_intermediate_size |
1 | 0 | 0 |
attr |
NemotronHConfig.moe_latent_size |
1 | 0 | 0 |
attr |
NemotronHConfig.moe_shared_expert_overlap |
1 | 0 | 0 |
attr |
NemotronHConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
NemotronHConfig.routed_scaling_factor |
1 | 0 | 0 |
attr |
NemotronHConfig.n_group |
1 | 0 | 0 |
attr |
NemotronHConfig.topk_group |
1 | 0 | 0 |
attr |
NemotronHConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
NemotronHConfig.mamba_ssm_cache_dtype |
1 | 0 | 0 |
attr |
NemotronHConfig.num_nextn_predict_layers |
1 | 0 | 0 |
attr |
NemotronHConfig.mtp_layers_block_type |
1 | 0 | 0 |
transformers.models.nemotron_h.modeling_nemotron_h (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NemotronHForCausalLM.init |
2 | 0 | 0 |
meth |
NemotronHForCausalLM.forward |
11 | 10 | 0 |
meth |
NemotronHForCausalLM.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
NemotronHForCausalLM.model |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
NemotronHForCausalLM.lm_head |
1 | 0 | 0 |
meth |
NemotronHModel.init |
2 | 0 | 0 |
meth |
NemotronHModel.get_input_embeddings |
1 | 0 | 0 |
meth |
NemotronHModel.set_input_embeddings |
2 | 0 | 0 |
meth |
NemotronHModel._update_mamba_mask |
3 | 0 | 0 |
attr |
NemotronHModel.embeddings |
1 | 0 | 0 |
attr |
NemotronHModel.layers |
1 | 0 | 0 |
attr |
NemotronHModel.norm_f |
1 | 0 | 0 |
meth |
NemotronHPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.nemotron_h.modular_nemotron_h (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NemotronHForCausalLM.init |
2 | 0 | 0 |
meth |
NemotronHForCausalLM.forward |
11 | 10 | 0 |
meth |
NemotronHModel.init |
2 | 0 | 0 |
meth |
NemotronHModel.get_input_embeddings |
1 | 0 | 0 |
meth |
NemotronHModel.set_input_embeddings |
2 | 0 | 0 |
meth |
NemotronHModel._update_mamba_mask |
3 | 0 | 0 |
attr |
NemotronHModel.embeddings |
1 | 0 | 0 |
attr |
NemotronHModel.layers |
1 | 0 | 0 |
attr |
NemotronHModel.norm_f |
1 | 0 | 0 |
meth |
NemotronHPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.nllb.tokenization_nllb (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NllbTokenizer.init |
17 | 3 | 0 |
meth |
NllbTokenizer._build_translation_inputs |
6 | 3 | 0 |
meth |
NllbTokenizer.prepare_seq2seq_batch |
11 | 10 | 0 |
meth |
NllbTokenizer._switch_to_input_mode |
1 | 0 | 0 |
meth |
NllbTokenizer._switch_to_target_mode |
1 | 0 | 0 |
meth |
NllbTokenizer.set_src_lang_special_tokens |
2 | 1 | 0 |
attr |
NllbTokenizer.legacy_behaviour |
1 | 0 | 0 |
attr |
NllbTokenizer.fairseq_offset |
1 | 0 | 0 |
attr |
NllbTokenizer.fairseq_tokens_to_ids |
1 | 0 | 0 |
attr |
NllbTokenizer.fairseq_ids_to_tokens |
1 | 0 | 0 |
attr |
NllbTokenizer.cur_lang_code |
1 | 0 | 0 |
attr |
NllbTokenizer.tgt_lang |
1 | 0 | 0 |
transformers.models.nllb_moe.configuration_nllb_moe (80 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NllbMoeConfig.init |
41 | 0 | 0 |
attr |
NllbMoeConfig.vocab_size |
1 | 0 | 0 |
attr |
NllbMoeConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
NllbMoeConfig.d_model |
1 | 0 | 0 |
attr |
NllbMoeConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
NllbMoeConfig.encoder_layers |
1 | 0 | 0 |
attr |
NllbMoeConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
NllbMoeConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
NllbMoeConfig.decoder_layers |
1 | 0 | 0 |
attr |
NllbMoeConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
NllbMoeConfig.dropout |
1 | 0 | 0 |
attr |
NllbMoeConfig.attention_dropout |
1 | 0 | 0 |
attr |
NllbMoeConfig.activation_dropout |
1 | 0 | 0 |
attr |
NllbMoeConfig.activation_function |
1 | 0 | 0 |
attr |
NllbMoeConfig.init_std |
1 | 0 | 0 |
attr |
NllbMoeConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
NllbMoeConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
NllbMoeConfig.use_cache |
1 | 0 | 0 |
attr |
NllbMoeConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
NllbMoeConfig.scale_embedding |
1 | 0 | 0 |
attr |
NllbMoeConfig.router_z_loss_coef |
1 | 0 | 0 |
attr |
NllbMoeConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
NllbMoeConfig.decoder_sparse_step |
1 | 0 | 0 |
attr |
NllbMoeConfig.encoder_sparse_step |
1 | 0 | 0 |
attr |
NllbMoeConfig.num_experts |
1 | 0 | 0 |
attr |
NllbMoeConfig.expert_capacity |
1 | 0 | 0 |
attr |
NllbMoeConfig.router_bias |
1 | 0 | 0 |
attr |
NllbMoeConfig.router_dtype |
1 | 0 | 0 |
attr |
NllbMoeConfig.router_ignore_padding_tokens |
1 | 0 | 0 |
attr |
NllbMoeConfig.batch_prioritized_routing |
1 | 0 | 0 |
attr |
NllbMoeConfig.second_expert_policy |
1 | 0 | 0 |
attr |
NllbMoeConfig.normalize_router_prob_before_dropping |
1 | 0 | 0 |
attr |
NllbMoeConfig.moe_eval_capacity_token_fraction |
1 | 0 | 0 |
attr |
NllbMoeConfig.moe_token_dropout |
1 | 0 | 0 |
attr |
NllbMoeConfig.output_router_logits |
1 | 0 | 0 |
attr |
NllbMoeConfig.pad_token_id |
1 | 0 | 0 |
attr |
NllbMoeConfig.bos_token_id |
1 | 0 | 0 |
attr |
NllbMoeConfig.eos_token_id |
1 | 0 | 0 |
attr |
NllbMoeConfig.decoder_start_token_id |
1 | 0 | 0 |
attr |
NllbMoeConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.nllb_moe.modeling_nllb_moe (35 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NllbMoeTop2Router.init |
2 | 1 | 0 |
meth |
NllbMoeTop2Router._cast_classifier |
1 | 0 | 0 |
meth |
NllbMoeTop2Router.normalize_router_probabilities |
4 | 0 | 0 |
attr |
NllbMoeTop2Router.num_experts |
1 | 0 | 0 |
attr |
NllbMoeTop2Router.expert_capacity |
1 | 0 | 0 |
attr |
NllbMoeTop2Router.classifier |
1 | 0 | 0 |
attr |
NllbMoeTop2Router.router_ignore_padding_tokens |
1 | 0 | 0 |
attr |
NllbMoeTop2Router.dtype |
1 | 0 | 0 |
attr |
NllbMoeTop2Router.second_expert_policy |
1 | 0 | 0 |
attr |
NllbMoeTop2Router.normalize_router_prob_before_dropping |
1 | 0 | 0 |
attr |
NllbMoeTop2Router.batch_prioritized_routing |
1 | 0 | 0 |
attr |
NllbMoeTop2Router.moe_eval_capacity_token_fraction |
1 | 0 | 0 |
meth |
NllbMoePreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
NllbMoeModel.init |
2 | 1 | 0 |
meth |
NllbMoeModel.get_input_embeddings |
1 | 0 | 0 |
meth |
NllbMoeModel.set_input_embeddings |
2 | 0 | 0 |
attr |
NllbMoeModel.shared |
1 | 0 | 0 |
attr |
NllbMoeModel.encoder |
1 | 0 | 0 |
attr |
NllbMoeModel.decoder |
1 | 0 | 0 |
meth |
NllbMoeSparseMLP.init |
3 | 2 | 0 |
meth |
NllbMoeSparseMLP.forward |
3 | 2 | 0 |
attr |
NllbMoeSparseMLP.router |
1 | 0 | 0 |
attr |
NllbMoeSparseMLP.num_experts |
1 | 0 | 0 |
attr |
NllbMoeSparseMLP.experts |
1 | 0 | 0 |
meth |
NllbMoeForConditionalGeneration.init |
2 | 1 | 0 |
attr |
NllbMoeForConditionalGeneration.model |
1 | 0 | 0 |
attr |
NllbMoeForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
NllbMoeForConditionalGeneration.num_experts |
1 | 0 | 0 |
attr |
NllbMoeForConditionalGeneration.router_z_loss_coef |
1 | 0 | 0 |
attr |
NllbMoeForConditionalGeneration.router_aux_loss_coef |
1 | 0 | 0 |
transformers.models.nougat.image_processing_nougat (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NougatImageProcessor.init |
14 | 13 | 0 |
meth |
NougatImageProcessor.python_find_non_zero |
2 | 1 | 0 |
meth |
NougatImageProcessor.python_bounding_rect |
2 | 0 | 0 |
meth |
NougatImageProcessor.thumbnail |
7 | 6 | 0 |
meth |
NougatImageProcessor.resize |
7 | 6 | 0 |
attr |
NougatImageProcessor.do_crop_margin |
1 | 0 | 0 |
attr |
NougatImageProcessor.do_resize |
1 | 0 | 0 |
attr |
NougatImageProcessor.size |
1 | 0 | 0 |
attr |
NougatImageProcessor.resample |
1 | 0 | 0 |
attr |
NougatImageProcessor.do_thumbnail |
1 | 0 | 0 |
attr |
NougatImageProcessor.do_align_long_axis |
1 | 0 | 0 |
attr |
NougatImageProcessor.do_pad |
1 | 0 | 0 |
attr |
NougatImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
NougatImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
NougatImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
NougatImageProcessor.image_mean |
1 | 0 | 0 |
attr |
NougatImageProcessor.image_std |
1 | 0 | 0 |
transformers.models.nougat.image_processing_nougat_fast (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NougatImageProcessorFast.init |
2 | 1 | 0 |
meth |
NougatImageProcessorFast.python_find_non_zero |
2 | 1 | 0 |
meth |
NougatImageProcessorFast.python_bounding_rect |
2 | 0 | 0 |
meth |
NougatImageProcessorFast.resize |
6 | 5 | 0 |
meth |
NougatImageProcessorFast._preprocess |
19 | 18 | 0 |
transformers.models.nougat.processing_nougat (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NougatProcessor.init |
3 | 0 | 0 |
meth |
NougatProcessor.call |
35 | 32 | 0 |
meth |
NougatProcessor.post_process_generation |
3 | 0 | 0 |
transformers.models.nougat.tokenization_nougat (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NougatTokenizer.init |
9 | 7 | 0 |
transformers.models.nystromformer.configuration_nystromformer (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NystromformerConfig.init |
23 | 0 | 0 |
attr |
NystromformerConfig.add_cross_attention |
1 | 0 | 0 |
attr |
NystromformerConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
NystromformerConfig.pad_token_id |
1 | 0 | 0 |
attr |
NystromformerConfig.bos_token_id |
1 | 0 | 0 |
attr |
NystromformerConfig.eos_token_id |
1 | 0 | 0 |
attr |
NystromformerConfig.vocab_size |
1 | 0 | 0 |
attr |
NystromformerConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
NystromformerConfig.hidden_size |
1 | 0 | 0 |
attr |
NystromformerConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
NystromformerConfig.num_attention_heads |
1 | 0 | 0 |
attr |
NystromformerConfig.intermediate_size |
1 | 0 | 0 |
attr |
NystromformerConfig.hidden_act |
1 | 0 | 0 |
attr |
NystromformerConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
NystromformerConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
NystromformerConfig.initializer_range |
1 | 0 | 0 |
attr |
NystromformerConfig.type_vocab_size |
1 | 0 | 0 |
attr |
NystromformerConfig.segment_means_seq_len |
1 | 0 | 0 |
attr |
NystromformerConfig.num_landmarks |
1 | 0 | 0 |
attr |
NystromformerConfig.conv_kernel_size |
1 | 0 | 0 |
attr |
NystromformerConfig.inv_coeff_init_option |
1 | 0 | 0 |
attr |
NystromformerConfig.layer_norm_eps |
1 | 0 | 0 |
transformers.models.nystromformer.modeling_nystromformer (57 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NystromformerForTokenClassification.init |
2 | 0 | 0 |
meth |
NystromformerForTokenClassification.forward |
11 | 10 | 0 |
attr |
NystromformerForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
NystromformerForTokenClassification.nystromformer |
1 | 0 | 0 |
attr |
NystromformerForTokenClassification.dropout |
1 | 0 | 0 |
attr |
NystromformerForTokenClassification.classifier |
1 | 0 | 0 |
meth |
NystromformerModel.init |
2 | 0 | 0 |
meth |
NystromformerModel.get_input_embeddings |
1 | 0 | 0 |
meth |
NystromformerModel.set_input_embeddings |
2 | 0 | 0 |
meth |
NystromformerModel.forward |
10 | 9 | 0 |
attr |
NystromformerModel.embeddings |
1 | 0 | 0 |
attr |
NystromformerModel.encoder |
1 | 0 | 0 |
meth |
NystromformerPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
NystromformerForSequenceClassification.init |
2 | 0 | 0 |
meth |
NystromformerForSequenceClassification.forward |
11 | 10 | 0 |
attr |
NystromformerForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
NystromformerForSequenceClassification.nystromformer |
1 | 0 | 0 |
attr |
NystromformerForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
NystromformerForQuestionAnswering.init |
2 | 0 | 0 |
meth |
NystromformerForQuestionAnswering.forward |
12 | 11 | 0 |
attr |
NystromformerForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
NystromformerForQuestionAnswering.nystromformer |
1 | 0 | 0 |
attr |
NystromformerForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
NystromformerForMultipleChoice.init |
2 | 0 | 0 |
meth |
NystromformerForMultipleChoice.forward |
11 | 10 | 0 |
attr |
NystromformerForMultipleChoice.nystromformer |
1 | 0 | 0 |
attr |
NystromformerForMultipleChoice.pre_classifier |
1 | 0 | 0 |
attr |
NystromformerForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
NystromformerLayer.init |
2 | 0 | 0 |
meth |
NystromformerLayer.forward |
4 | 0 | 0 |
meth |
NystromformerLayer.feed_forward_chunk |
2 | 0 | 0 |
attr |
NystromformerLayer.chunk_size_feed_forward |
1 | 0 | 0 |
attr |
NystromformerLayer.seq_len_dim |
1 | 0 | 0 |
attr |
NystromformerLayer.attention |
1 | 0 | 0 |
attr |
NystromformerLayer.add_cross_attention |
1 | 0 | 0 |
attr |
NystromformerLayer.intermediate |
1 | 0 | 0 |
attr |
NystromformerLayer.output |
1 | 0 | 0 |
meth |
NystromformerForMaskedLM.init |
2 | 0 | 0 |
meth |
NystromformerForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
NystromformerForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
NystromformerForMaskedLM.forward |
11 | 10 | 0 |
attr |
NystromformerForMaskedLM.nystromformer |
1 | 0 | 0 |
attr |
NystromformerForMaskedLM.cls |
1 | 0 | 0 |
transformers.models.olmo.configuration_olmo (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OlmoConfig.init |
20 | 18 | 0 |
attr |
OlmoConfig.vocab_size |
1 | 0 | 0 |
attr |
OlmoConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
OlmoConfig.hidden_size |
1 | 0 | 0 |
attr |
OlmoConfig.intermediate_size |
1 | 0 | 0 |
attr |
OlmoConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
OlmoConfig.num_attention_heads |
1 | 0 | 0 |
attr |
OlmoConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
OlmoConfig.hidden_act |
1 | 0 | 0 |
attr |
OlmoConfig.initializer_range |
1 | 0 | 0 |
attr |
OlmoConfig.use_cache |
1 | 0 | 0 |
attr |
OlmoConfig.attention_bias |
1 | 0 | 0 |
attr |
OlmoConfig.attention_dropout |
1 | 0 | 0 |
attr |
OlmoConfig.clip_qkv |
1 | 0 | 0 |
attr |
OlmoConfig.rope_parameters |
1 | 0 | 0 |
attr |
OlmoConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
OlmoConfig.pad_token_id |
1 | 0 | 0 |
attr |
OlmoConfig.bos_token_id |
1 | 0 | 0 |
attr |
OlmoConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.olmo.modeling_olmo (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OlmoModel.init |
2 | 1 | 0 |
attr |
OlmoModel.padding_idx |
1 | 0 | 0 |
attr |
OlmoModel.vocab_size |
1 | 0 | 0 |
attr |
OlmoModel.embed_tokens |
1 | 0 | 0 |
attr |
OlmoModel.layers |
1 | 0 | 0 |
attr |
OlmoModel.norm |
1 | 0 | 0 |
attr |
OlmoModel.rotary_emb |
1 | 0 | 0 |
attr |
OlmoModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
OlmoForCausalLM.init |
2 | 0 | 0 |
attr |
OlmoForCausalLM.model |
1 | 0 | 0 |
attr |
OlmoForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
OlmoForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.olmo.modular_olmo (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OlmoModel.init |
2 | 1 | 0 |
attr |
OlmoModel.layers |
1 | 0 | 0 |
attr |
OlmoModel.norm |
1 | 0 | 0 |
attr |
OlmoPreTrainedModel |
1 | 0 | 0 |
transformers.models.olmo2.configuration_olmo2 (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Olmo2Config.init |
20 | 18 | 0 |
attr |
Olmo2Config.vocab_size |
1 | 0 | 0 |
attr |
Olmo2Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Olmo2Config.hidden_size |
1 | 0 | 0 |
attr |
Olmo2Config.intermediate_size |
1 | 0 | 0 |
attr |
Olmo2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Olmo2Config.num_attention_heads |
1 | 0 | 0 |
attr |
Olmo2Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Olmo2Config.hidden_act |
1 | 0 | 0 |
attr |
Olmo2Config.initializer_range |
1 | 0 | 0 |
attr |
Olmo2Config.use_cache |
1 | 0 | 0 |
attr |
Olmo2Config.attention_bias |
1 | 0 | 0 |
attr |
Olmo2Config.attention_dropout |
1 | 0 | 0 |
attr |
Olmo2Config.rope_parameters |
1 | 0 | 0 |
attr |
Olmo2Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Olmo2Config.pad_token_id |
1 | 0 | 0 |
attr |
Olmo2Config.bos_token_id |
1 | 0 | 0 |
attr |
Olmo2Config.eos_token_id |
1 | 0 | 0 |
attr |
Olmo2Config.rms_norm_eps |
1 | 0 | 0 |
transformers.models.olmo2.modeling_olmo2 (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Olmo2ForCausalLM.init |
2 | 0 | 0 |
attr |
Olmo2ForCausalLM.model |
1 | 0 | 0 |
attr |
Olmo2ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Olmo2ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Olmo2Model.init |
2 | 1 | 0 |
attr |
Olmo2Model.padding_idx |
1 | 0 | 0 |
attr |
Olmo2Model.vocab_size |
1 | 0 | 0 |
attr |
Olmo2Model.embed_tokens |
1 | 0 | 0 |
attr |
Olmo2Model.layers |
1 | 0 | 0 |
attr |
Olmo2Model.norm |
1 | 0 | 0 |
attr |
Olmo2Model.rotary_emb |
1 | 0 | 0 |
attr |
Olmo2Model.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.olmo2.modular_olmo2 (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Olmo2Config.init |
20 | 18 | 0 |
attr |
Olmo2Config.rms_norm_eps |
1 | 0 | 0 |
meth |
Olmo2Model.init |
2 | 1 | 0 |
attr |
Olmo2Model.norm |
1 | 0 | 0 |
attr |
Olmo2Model.layers |
1 | 0 | 0 |
transformers.models.olmo3.configuration_olmo3 (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Olmo3Config.init |
22 | 20 | 0 |
attr |
Olmo3Config.vocab_size |
1 | 0 | 0 |
attr |
Olmo3Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Olmo3Config.hidden_size |
1 | 0 | 0 |
attr |
Olmo3Config.intermediate_size |
1 | 0 | 0 |
attr |
Olmo3Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Olmo3Config.num_attention_heads |
1 | 0 | 0 |
attr |
Olmo3Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Olmo3Config.hidden_act |
1 | 0 | 0 |
attr |
Olmo3Config.initializer_range |
1 | 0 | 0 |
attr |
Olmo3Config.use_cache |
1 | 0 | 0 |
attr |
Olmo3Config.attention_bias |
1 | 0 | 0 |
attr |
Olmo3Config.attention_dropout |
1 | 0 | 0 |
attr |
Olmo3Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Olmo3Config.pad_token_id |
1 | 0 | 0 |
attr |
Olmo3Config.bos_token_id |
1 | 0 | 0 |
attr |
Olmo3Config.eos_token_id |
1 | 0 | 0 |
attr |
Olmo3Config.rms_norm_eps |
1 | 0 | 0 |
attr |
Olmo3Config.sliding_window |
1 | 0 | 0 |
attr |
Olmo3Config.layer_types |
1 | 0 | 0 |
attr |
Olmo3Config.rope_parameters |
1 | 0 | 0 |
transformers.models.olmo3.modeling_olmo3 (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Olmo3ForCausalLM.init |
2 | 0 | 0 |
attr |
Olmo3ForCausalLM.model |
1 | 0 | 0 |
attr |
Olmo3ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Olmo3ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Olmo3Model.init |
2 | 1 | 0 |
attr |
Olmo3Model.padding_idx |
1 | 0 | 0 |
attr |
Olmo3Model.vocab_size |
1 | 0 | 0 |
attr |
Olmo3Model.embed_tokens |
1 | 0 | 0 |
attr |
Olmo3Model.layers |
1 | 0 | 0 |
attr |
Olmo3Model.norm |
1 | 0 | 0 |
attr |
Olmo3Model.rotary_emb |
1 | 0 | 0 |
attr |
Olmo3Model.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.olmo3.modular_olmo3 (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Olmo3Config.init |
22 | 20 | 0 |
attr |
Olmo3Config.vocab_size |
1 | 0 | 0 |
attr |
Olmo3Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Olmo3Config.hidden_size |
1 | 0 | 0 |
attr |
Olmo3Config.intermediate_size |
1 | 0 | 0 |
attr |
Olmo3Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Olmo3Config.num_attention_heads |
1 | 0 | 0 |
attr |
Olmo3Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Olmo3Config.hidden_act |
1 | 0 | 0 |
attr |
Olmo3Config.initializer_range |
1 | 0 | 0 |
attr |
Olmo3Config.use_cache |
1 | 0 | 0 |
attr |
Olmo3Config.attention_bias |
1 | 0 | 0 |
attr |
Olmo3Config.attention_dropout |
1 | 0 | 0 |
attr |
Olmo3Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Olmo3Config.pad_token_id |
1 | 0 | 0 |
attr |
Olmo3Config.bos_token_id |
1 | 0 | 0 |
attr |
Olmo3Config.eos_token_id |
1 | 0 | 0 |
attr |
Olmo3Config.rms_norm_eps |
1 | 0 | 0 |
attr |
Olmo3Config.sliding_window |
1 | 0 | 0 |
attr |
Olmo3Config.layer_types |
1 | 0 | 0 |
attr |
Olmo3Config.rope_parameters |
1 | 0 | 0 |
meth |
Olmo3Model.init |
2 | 1 | 0 |
attr |
Olmo3Model.norm |
1 | 0 | 0 |
attr |
Olmo3Model.layers |
1 | 0 | 0 |
attr |
Olmo3Model.rotary_emb |
1 | 0 | 0 |
transformers.models.olmo_hybrid.configuration_olmo_hybrid (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OlmoHybridConfig.init |
32 | 29 | 0 |
attr |
OlmoHybridConfig.layer_types |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_num_key_heads |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_num_value_heads |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_key_head_dim |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_value_head_dim |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_a_log_min |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_a_log_max |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_dt_min |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_dt_max |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_dt_init_floor |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_conv_kernel_dim |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_allow_neg_eigval |
1 | 0 | 0 |
attr |
OlmoHybridConfig.vocab_size |
1 | 0 | 0 |
attr |
OlmoHybridConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
OlmoHybridConfig.hidden_size |
1 | 0 | 0 |
attr |
OlmoHybridConfig.intermediate_size |
1 | 0 | 0 |
attr |
OlmoHybridConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
OlmoHybridConfig.num_attention_heads |
1 | 0 | 0 |
attr |
OlmoHybridConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
OlmoHybridConfig.hidden_act |
1 | 0 | 0 |
attr |
OlmoHybridConfig.initializer_range |
1 | 0 | 0 |
attr |
OlmoHybridConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
OlmoHybridConfig.use_cache |
1 | 0 | 0 |
attr |
OlmoHybridConfig.attention_bias |
1 | 0 | 0 |
attr |
OlmoHybridConfig.attention_dropout |
1 | 0 | 0 |
attr |
OlmoHybridConfig.rope_parameters |
1 | 0 | 0 |
attr |
OlmoHybridConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
OlmoHybridConfig.pad_token_id |
1 | 0 | 0 |
attr |
OlmoHybridConfig.bos_token_id |
1 | 0 | 0 |
attr |
OlmoHybridConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.olmo_hybrid.modeling_olmo_hybrid (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OlmoHybridModel.init |
2 | 1 | 0 |
meth |
OlmoHybridModel._update_linear_attn_mask |
3 | 0 | 0 |
attr |
OlmoHybridModel.embed_tokens |
1 | 0 | 0 |
attr |
OlmoHybridModel.layers |
1 | 0 | 0 |
attr |
OlmoHybridModel.norm |
1 | 0 | 0 |
attr |
OlmoHybridModel.rotary_emb |
1 | 0 | 0 |
attr |
OlmoHybridModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
OlmoHybridForCausalLM.init |
2 | 0 | 0 |
attr |
OlmoHybridForCausalLM.model |
1 | 0 | 0 |
attr |
OlmoHybridForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
OlmoHybridForCausalLM.lm_head |
1 | 0 | 0 |
meth |
OlmoHybridPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.olmo_hybrid.modular_olmo_hybrid (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OlmoHybridModel.init |
2 | 1 | 0 |
attr |
OlmoHybridModel.layers |
1 | 0 | 0 |
attr |
OlmoHybridModel.rotary_emb |
1 | 0 | 0 |
meth |
OlmoHybridConfig.init |
32 | 29 | 0 |
attr |
OlmoHybridConfig.layer_types |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_num_key_heads |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_num_value_heads |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_key_head_dim |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_value_head_dim |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_a_log_min |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_a_log_max |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_dt_min |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_dt_max |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_dt_init_floor |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_conv_kernel_dim |
1 | 0 | 0 |
attr |
OlmoHybridConfig.linear_allow_neg_eigval |
1 | 0 | 0 |
meth |
OlmoHybridPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.olmoe.configuration_olmoe (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OlmoeConfig.init |
26 | 24 | 0 |
attr |
OlmoeConfig.vocab_size |
1 | 0 | 0 |
attr |
OlmoeConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
OlmoeConfig.hidden_size |
1 | 0 | 0 |
attr |
OlmoeConfig.intermediate_size |
1 | 0 | 0 |
attr |
OlmoeConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
OlmoeConfig.num_attention_heads |
1 | 0 | 0 |
attr |
OlmoeConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
OlmoeConfig.hidden_act |
1 | 0 | 0 |
attr |
OlmoeConfig.initializer_range |
1 | 0 | 0 |
attr |
OlmoeConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
OlmoeConfig.use_cache |
1 | 0 | 0 |
attr |
OlmoeConfig.attention_bias |
1 | 0 | 0 |
attr |
OlmoeConfig.attention_dropout |
1 | 0 | 0 |
attr |
OlmoeConfig.clip_qkv |
1 | 0 | 0 |
attr |
OlmoeConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
OlmoeConfig.num_experts |
1 | 0 | 0 |
attr |
OlmoeConfig.output_router_logits |
1 | 0 | 0 |
attr |
OlmoeConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
OlmoeConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
OlmoeConfig.rope_parameters |
1 | 0 | 0 |
attr |
OlmoeConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
OlmoeConfig.pad_token_id |
1 | 0 | 0 |
attr |
OlmoeConfig.bos_token_id |
1 | 0 | 0 |
attr |
OlmoeConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.olmoe.modeling_olmoe (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OlmoePreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
OlmoePreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
OlmoeModel.init |
2 | 1 | 0 |
attr |
OlmoeModel.padding_idx |
1 | 0 | 0 |
attr |
OlmoeModel.vocab_size |
1 | 0 | 0 |
attr |
OlmoeModel.embed_tokens |
1 | 0 | 0 |
attr |
OlmoeModel.layers |
1 | 0 | 0 |
attr |
OlmoeModel.norm |
1 | 0 | 0 |
attr |
OlmoeModel.rotary_emb |
1 | 0 | 0 |
attr |
OlmoeModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
OlmoeForCausalLM.init |
2 | 0 | 0 |
attr |
OlmoeForCausalLM.model |
1 | 0 | 0 |
attr |
OlmoeForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
OlmoeForCausalLM.lm_head |
1 | 0 | 0 |
attr |
OlmoeForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
OlmoeForCausalLM.num_experts |
1 | 0 | 0 |
attr |
OlmoeForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
transformers.models.olmoe.modular_olmoe (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OlmoePreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
OlmoePreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
OlmoeModel.init |
2 | 1 | 0 |
attr |
OlmoeModel.embed_tokens |
1 | 0 | 0 |
attr |
OlmoeModel.layers |
1 | 0 | 0 |
attr |
OlmoeModel.norm |
1 | 0 | 0 |
attr |
OlmoeModel.rotary_emb |
1 | 0 | 0 |
meth |
OlmoeForCausalLM.init |
2 | 0 | 0 |
meth |
OlmoeForCausalLM.forward |
2 | 0 | 0 |
attr |
OlmoeForCausalLM.model |
1 | 0 | 0 |
attr |
OlmoeForCausalLM.num_experts |
1 | 0 | 0 |
transformers.models.omdet_turbo.configuration_omdet_turbo (81 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OmDetTurboConfig.init |
42 | 0 | 0 |
attr |
OmDetTurboConfig.text_config |
1 | 0 | 0 |
attr |
OmDetTurboConfig.backbone_config |
1 | 0 | 0 |
attr |
OmDetTurboConfig.apply_layernorm_after_vision_backbone |
1 | 0 | 0 |
attr |
OmDetTurboConfig.image_size |
1 | 0 | 0 |
attr |
OmDetTurboConfig.disable_custom_kernels |
1 | 0 | 0 |
attr |
OmDetTurboConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
OmDetTurboConfig.batch_norm_eps |
1 | 0 | 0 |
attr |
OmDetTurboConfig.init_std |
1 | 0 | 0 |
attr |
OmDetTurboConfig.text_projection_in_dim |
1 | 0 | 0 |
attr |
OmDetTurboConfig.text_projection_out_dim |
1 | 0 | 0 |
attr |
OmDetTurboConfig.task_encoder_hidden_dim |
1 | 0 | 0 |
attr |
OmDetTurboConfig.class_embed_dim |
1 | 0 | 0 |
attr |
OmDetTurboConfig.class_distance_type |
1 | 0 | 0 |
attr |
OmDetTurboConfig.num_queries |
1 | 0 | 0 |
attr |
OmDetTurboConfig.csp_activation |
1 | 0 | 0 |
attr |
OmDetTurboConfig.conv_norm_activation |
1 | 0 | 0 |
attr |
OmDetTurboConfig.encoder_feedforward_activation |
1 | 0 | 0 |
attr |
OmDetTurboConfig.encoder_feedforward_dropout |
1 | 0 | 0 |
attr |
OmDetTurboConfig.encoder_dropout |
1 | 0 | 0 |
attr |
OmDetTurboConfig.hidden_expansion |
1 | 0 | 0 |
attr |
OmDetTurboConfig.vision_features_channels |
1 | 0 | 0 |
attr |
OmDetTurboConfig.encoder_hidden_dim |
1 | 0 | 0 |
attr |
OmDetTurboConfig.encoder_in_channels |
1 | 0 | 0 |
attr |
OmDetTurboConfig.encoder_projection_indices |
1 | 0 | 0 |
attr |
OmDetTurboConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
OmDetTurboConfig.encoder_dim_feedforward |
1 | 0 | 0 |
attr |
OmDetTurboConfig.encoder_layers |
1 | 0 | 0 |
attr |
OmDetTurboConfig.positional_encoding_temperature |
1 | 0 | 0 |
attr |
OmDetTurboConfig.num_feature_levels |
1 | 0 | 0 |
attr |
OmDetTurboConfig.decoder_hidden_dim |
1 | 0 | 0 |
attr |
OmDetTurboConfig.decoder_num_heads |
1 | 0 | 0 |
attr |
OmDetTurboConfig.decoder_num_layers |
1 | 0 | 0 |
attr |
OmDetTurboConfig.decoder_activation |
1 | 0 | 0 |
attr |
OmDetTurboConfig.decoder_dim_feedforward |
1 | 0 | 0 |
attr |
OmDetTurboConfig.decoder_num_points |
1 | 0 | 0 |
attr |
OmDetTurboConfig.decoder_dropout |
1 | 0 | 0 |
attr |
OmDetTurboConfig.eval_size |
1 | 0 | 0 |
attr |
OmDetTurboConfig.learn_initial_query |
1 | 0 | 0 |
attr |
OmDetTurboConfig.cache_size |
1 | 0 | 0 |
transformers.models.omdet_turbo.modeling_omdet_turbo (35 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OmDetTurboForObjectDetection.init |
2 | 1 | 0 |
meth |
OmDetTurboForObjectDetection.get_input_embeddings |
1 | 0 | 0 |
meth |
OmDetTurboForObjectDetection.set_input_embeddings |
2 | 0 | 0 |
meth |
OmDetTurboForObjectDetection.resize_token_embeddings |
4 | 3 | 0 |
meth |
OmDetTurboForObjectDetection.forward |
12 | 11 | 0 |
attr |
OmDetTurboForObjectDetection.vision_backbone |
1 | 0 | 0 |
attr |
OmDetTurboForObjectDetection.language_backbone |
1 | 0 | 0 |
attr |
OmDetTurboForObjectDetection.encoder |
1 | 0 | 0 |
attr |
OmDetTurboForObjectDetection.decoder |
1 | 0 | 0 |
attr |
OmDetTurboForObjectDetection.num_queries |
1 | 0 | 0 |
attr |
OmDetTurboForObjectDetection.language_cache_class |
1 | 0 | 0 |
attr |
OmDetTurboForObjectDetection.language_cache_prompt |
1 | 0 | 0 |
attr |
OmDetTurboForObjectDetection.vocab_size |
1 | 0 | 0 |
meth |
OmDetTurboPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
OmDetTurboPreTrainedModel._set_gradient_checkpointing |
3 | 0 | 0 |
meth |
OmDetTurboPreTrainedModel._get_cache_key_at_index |
4 | 0 | 0 |
meth |
OmDetTurboPreTrainedModel.get_cached_class_embeddings |
3 | 0 | 0 |
meth |
OmDetTurboPreTrainedModel.get_cached_task_embeddings |
3 | 0 | 0 |
meth |
OmDetTurboPreTrainedModel.get_language_embedding |
6 | 0 | 0 |
transformers.models.omdet_turbo.processing_omdet_turbo (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OmDetTurboProcessor.init |
3 | 0 | 0 |
meth |
OmDetTurboProcessor.post_process_grounded_object_detection |
7 | 6 | 0 |
prop |
OmDetTurboProcessor.model_input_names |
1 | 0 | 0 |
transformers.models.oneformer.configuration_oneformer (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OneFormerConfig.init |
43 | 41 | 0 |
attr |
OneFormerConfig.backbone_config |
1 | 0 | 0 |
attr |
OneFormerConfig.ignore_value |
1 | 0 | 0 |
attr |
OneFormerConfig.num_queries |
1 | 0 | 0 |
attr |
OneFormerConfig.no_object_weight |
1 | 0 | 0 |
attr |
OneFormerConfig.class_weight |
1 | 0 | 0 |
attr |
OneFormerConfig.mask_weight |
1 | 0 | 0 |
attr |
OneFormerConfig.dice_weight |
1 | 0 | 0 |
attr |
OneFormerConfig.contrastive_weight |
1 | 0 | 0 |
attr |
OneFormerConfig.contrastive_temperature |
1 | 0 | 0 |
attr |
OneFormerConfig.train_num_points |
1 | 0 | 0 |
attr |
OneFormerConfig.oversample_ratio |
1 | 0 | 0 |
attr |
OneFormerConfig.importance_sample_ratio |
1 | 0 | 0 |
attr |
OneFormerConfig.init_std |
1 | 0 | 0 |
attr |
OneFormerConfig.init_xavier_std |
1 | 0 | 0 |
attr |
OneFormerConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
OneFormerConfig.is_training |
1 | 0 | 0 |
attr |
OneFormerConfig.use_auxiliary_loss |
1 | 0 | 0 |
attr |
OneFormerConfig.output_auxiliary_logits |
1 | 0 | 0 |
attr |
OneFormerConfig.strides |
1 | 0 | 0 |
attr |
OneFormerConfig.task_seq_len |
1 | 0 | 0 |
attr |
OneFormerConfig.text_encoder_width |
1 | 0 | 0 |
attr |
OneFormerConfig.text_encoder_context_length |
1 | 0 | 0 |
attr |
OneFormerConfig.text_encoder_num_layers |
1 | 0 | 0 |
attr |
OneFormerConfig.text_encoder_vocab_size |
1 | 0 | 0 |
attr |
OneFormerConfig.text_encoder_proj_layers |
1 | 0 | 0 |
attr |
OneFormerConfig.text_encoder_n_ctx |
1 | 0 | 0 |
attr |
OneFormerConfig.conv_dim |
1 | 0 | 0 |
attr |
OneFormerConfig.mask_dim |
1 | 0 | 0 |
attr |
OneFormerConfig.hidden_dim |
1 | 0 | 0 |
attr |
OneFormerConfig.encoder_feedforward_dim |
1 | 0 | 0 |
attr |
OneFormerConfig.norm |
1 | 0 | 0 |
attr |
OneFormerConfig.encoder_layers |
1 | 0 | 0 |
attr |
OneFormerConfig.decoder_layers |
1 | 0 | 0 |
attr |
OneFormerConfig.use_task_norm |
1 | 0 | 0 |
attr |
OneFormerConfig.num_attention_heads |
1 | 0 | 0 |
attr |
OneFormerConfig.dropout |
1 | 0 | 0 |
attr |
OneFormerConfig.dim_feedforward |
1 | 0 | 0 |
attr |
OneFormerConfig.pre_norm |
1 | 0 | 0 |
attr |
OneFormerConfig.enforce_input_proj |
1 | 0 | 0 |
attr |
OneFormerConfig.query_dec_layers |
1 | 0 | 0 |
attr |
OneFormerConfig.common_stride |
1 | 0 | 0 |
attr |
OneFormerConfig.num_hidden_layers |
1 | 0 | 0 |
transformers.models.oneformer.image_processing_oneformer (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OneFormerImageProcessor.init |
16 | 14 | 0 |
meth |
OneFormerImageProcessor.resize |
7 | 5 | 0 |
meth |
OneFormerImageProcessor.convert_segmentation_map_to_binary_masks |
5 | 4 | 0 |
meth |
OneFormerImageProcessor.call |
5 | 1 | 0 |
meth |
OneFormerImageProcessor._preprocess |
11 | 10 | 0 |
meth |
OneFormerImageProcessor.get_semantic_annotations |
3 | 0 | 0 |
meth |
OneFormerImageProcessor.get_instance_annotations |
3 | 0 | 0 |
meth |
OneFormerImageProcessor.get_panoptic_annotations |
3 | 0 | 0 |
meth |
OneFormerImageProcessor.encode_inputs |
9 | 8 | 0 |
meth |
OneFormerImageProcessor.post_process_semantic_segmentation |
3 | 2 | 0 |
meth |
OneFormerImageProcessor.post_process_instance_segmentation |
9 | 7 | 0 |
meth |
OneFormerImageProcessor.post_process_panoptic_segmentation |
7 | 6 | 0 |
attr |
OneFormerImageProcessor.do_resize |
1 | 0 | 0 |
attr |
OneFormerImageProcessor.size |
1 | 0 | 0 |
attr |
OneFormerImageProcessor.resample |
1 | 0 | 0 |
attr |
OneFormerImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
OneFormerImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
OneFormerImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
OneFormerImageProcessor.image_mean |
1 | 0 | 0 |
attr |
OneFormerImageProcessor.image_std |
1 | 0 | 0 |
attr |
OneFormerImageProcessor.ignore_index |
1 | 0 | 0 |
attr |
OneFormerImageProcessor.do_reduce_labels |
1 | 0 | 0 |
attr |
OneFormerImageProcessor.class_info_file |
1 | 0 | 0 |
attr |
OneFormerImageProcessor.repo_path |
1 | 0 | 0 |
attr |
OneFormerImageProcessor.metadata |
1 | 0 | 0 |
attr |
OneFormerImageProcessor.num_text |
1 | 0 | 0 |
attr |
OneFormerImageProcessor.num_labels |
1 | 0 | 0 |
transformers.models.oneformer.image_processing_oneformer_fast (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OneFormerImageProcessorFast.init |
2 | 1 | 0 |
meth |
OneFormerImageProcessorFast._preprocess |
18 | 17 | 0 |
meth |
OneFormerImageProcessorFast.convert_segmentation_map_to_binary_masks |
5 | 4 | 0 |
meth |
OneFormerImageProcessorFast.get_semantic_annotations |
3 | 0 | 0 |
meth |
OneFormerImageProcessorFast.get_instance_annotations |
3 | 0 | 0 |
meth |
OneFormerImageProcessorFast.get_panoptic_annotations |
3 | 0 | 0 |
meth |
OneFormerImageProcessorFast.post_process_semantic_segmentation |
3 | 2 | 0 |
meth |
OneFormerImageProcessorFast.post_process_instance_segmentation |
9 | 7 | 0 |
meth |
OneFormerImageProcessorFast.post_process_panoptic_segmentation |
7 | 6 | 0 |
attr |
OneFormerImageProcessorFast.metadata |
1 | 0 | 0 |
transformers.models.oneformer.modeling_oneformer (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OneFormerModel.init |
2 | 1 | 0 |
meth |
OneFormerModel.forward |
9 | 8 | 0 |
attr |
OneFormerModel.pixel_level_module |
1 | 0 | 0 |
attr |
OneFormerModel.transformer_module |
1 | 0 | 0 |
attr |
OneFormerModel.task_encoder |
1 | 0 | 0 |
attr |
OneFormerModel.is_training |
1 | 0 | 0 |
attr |
OneFormerModel.text_mapper |
1 | 0 | 0 |
meth |
OneFormerPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
OneFormerForUniversalSegmentation.init |
2 | 1 | 0 |
meth |
OneFormerForUniversalSegmentation.forward |
12 | 11 | 0 |
attr |
OneFormerForUniversalSegmentation.model |
1 | 0 | 0 |
attr |
OneFormerForUniversalSegmentation.matcher |
1 | 0 | 0 |
attr |
OneFormerForUniversalSegmentation.criterion |
1 | 0 | 0 |
transformers.models.oneformer.processing_oneformer (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OneFormerProcessor.init |
6 | 2 | 0 |
meth |
OneFormerProcessor._preprocess_text |
3 | 0 | 0 |
meth |
OneFormerProcessor.call |
5 | 0 | 0 |
meth |
OneFormerProcessor.encode_inputs |
5 | 0 | 0 |
meth |
OneFormerProcessor.post_process_semantic_segmentation |
3 | 0 | 0 |
meth |
OneFormerProcessor.post_process_instance_segmentation |
3 | 0 | 0 |
meth |
OneFormerProcessor.post_process_panoptic_segmentation |
3 | 0 | 0 |
attr |
OneFormerProcessor.max_seq_length |
1 | 0 | 0 |
attr |
OneFormerProcessor.task_seq_length |
1 | 0 | 0 |
transformers.models.openai.configuration_openai (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OpenAIGPTConfig.init |
22 | 0 | 0 |
attr |
OpenAIGPTConfig.vocab_size |
1 | 0 | 0 |
attr |
OpenAIGPTConfig.n_positions |
1 | 0 | 0 |
attr |
OpenAIGPTConfig.n_embd |
1 | 0 | 0 |
attr |
OpenAIGPTConfig.n_layer |
1 | 0 | 0 |
attr |
OpenAIGPTConfig.n_head |
1 | 0 | 0 |
attr |
OpenAIGPTConfig.afn |
1 | 0 | 0 |
attr |
OpenAIGPTConfig.resid_pdrop |
1 | 0 | 0 |
attr |
OpenAIGPTConfig.embd_pdrop |
1 | 0 | 0 |
attr |
OpenAIGPTConfig.attn_pdrop |
1 | 0 | 0 |
attr |
OpenAIGPTConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
OpenAIGPTConfig.initializer_range |
1 | 0 | 0 |
attr |
OpenAIGPTConfig.summary_type |
1 | 0 | 0 |
attr |
OpenAIGPTConfig.summary_use_proj |
1 | 0 | 0 |
attr |
OpenAIGPTConfig.summary_activation |
1 | 0 | 0 |
attr |
OpenAIGPTConfig.summary_first_dropout |
1 | 0 | 0 |
attr |
OpenAIGPTConfig.summary_proj_to_labels |
1 | 0 | 0 |
attr |
OpenAIGPTConfig.pad_token_id |
1 | 0 | 0 |
attr |
OpenAIGPTConfig.bos_token_id |
1 | 0 | 0 |
attr |
OpenAIGPTConfig.eos_token_id |
1 | 0 | 0 |
attr |
OpenAIGPTConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.openai.modeling_openai (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OpenAIGPTModel.init |
2 | 0 | 0 |
meth |
OpenAIGPTModel.get_input_embeddings |
1 | 0 | 0 |
meth |
OpenAIGPTModel.set_input_embeddings |
2 | 0 | 0 |
meth |
OpenAIGPTModel.forward |
10 | 9 | 0 |
attr |
OpenAIGPTModel.tokens_embed |
1 | 0 | 0 |
attr |
OpenAIGPTModel.positions_embed |
1 | 0 | 0 |
attr |
OpenAIGPTModel.drop |
1 | 0 | 0 |
attr |
OpenAIGPTModel.h |
1 | 0 | 0 |
meth |
OpenAIGPTForSequenceClassification.init |
2 | 0 | 0 |
meth |
OpenAIGPTForSequenceClassification.forward |
11 | 10 | 0 |
attr |
OpenAIGPTForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
OpenAIGPTForSequenceClassification.transformer |
1 | 0 | 0 |
attr |
OpenAIGPTForSequenceClassification.score |
1 | 0 | 0 |
meth |
OpenAIGPTDoubleHeadsModel.init |
2 | 0 | 0 |
meth |
OpenAIGPTDoubleHeadsModel.forward |
13 | 12 | 0 |
attr |
OpenAIGPTDoubleHeadsModel.transformer |
1 | 0 | 0 |
attr |
OpenAIGPTDoubleHeadsModel.lm_head |
1 | 0 | 0 |
attr |
OpenAIGPTDoubleHeadsModel.multiple_choice_head |
1 | 0 | 0 |
meth |
OpenAIGPTPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
OpenAIGPTLMHeadModel.init |
2 | 0 | 0 |
meth |
OpenAIGPTLMHeadModel.forward |
12 | 11 | 0 |
meth |
OpenAIGPTLMHeadModel.prepare_inputs_for_generation |
3 | 2 | 0 |
attr |
OpenAIGPTLMHeadModel.transformer |
1 | 0 | 0 |
attr |
OpenAIGPTLMHeadModel.lm_head |
1 | 0 | 0 |
transformers.models.openai.tokenization_openai (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OpenAIGPTTokenizer.init |
5 | 3 | 0 |
prop |
OpenAIGPTTokenizer.do_lower_case |
1 | 0 | 0 |
transformers.models.opt.configuration_opt (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OPTConfig.init |
23 | 0 | 0 |
attr |
OPTConfig.pad_token_id |
1 | 0 | 0 |
attr |
OPTConfig.bos_token_id |
1 | 0 | 0 |
attr |
OPTConfig.eos_token_id |
1 | 0 | 0 |
attr |
OPTConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
OPTConfig.vocab_size |
1 | 0 | 0 |
attr |
OPTConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
OPTConfig.num_attention_heads |
1 | 0 | 0 |
attr |
OPTConfig.word_embed_proj_dim |
1 | 0 | 0 |
attr |
OPTConfig.ffn_dim |
1 | 0 | 0 |
attr |
OPTConfig.hidden_size |
1 | 0 | 0 |
attr |
OPTConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
OPTConfig.dropout |
1 | 0 | 0 |
attr |
OPTConfig.attention_dropout |
1 | 0 | 0 |
attr |
OPTConfig.activation_function |
1 | 0 | 0 |
attr |
OPTConfig.init_std |
1 | 0 | 0 |
attr |
OPTConfig.layerdrop |
1 | 0 | 0 |
attr |
OPTConfig.use_cache |
1 | 0 | 0 |
attr |
OPTConfig.do_layer_norm_before |
1 | 0 | 0 |
attr |
OPTConfig.enable_bias |
1 | 0 | 0 |
attr |
OPTConfig.layer_norm_elementwise_affine |
1 | 0 | 0 |
transformers.models.opt.modeling_opt (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OPTForQuestionAnswering.init |
2 | 1 | 0 |
meth |
OPTForQuestionAnswering.forward |
13 | 12 | 0 |
meth |
OPTForQuestionAnswering.get_input_embeddings |
1 | 0 | 0 |
meth |
OPTForQuestionAnswering.set_input_embeddings |
2 | 0 | 0 |
attr |
OPTForQuestionAnswering.model |
1 | 0 | 0 |
attr |
OPTForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
OPTForSequenceClassification.init |
2 | 1 | 0 |
meth |
OPTForSequenceClassification.forward |
12 | 11 | 0 |
meth |
OPTForSequenceClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
OPTForSequenceClassification.set_input_embeddings |
2 | 0 | 0 |
attr |
OPTForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
OPTForSequenceClassification.model |
1 | 0 | 0 |
attr |
OPTForSequenceClassification.score |
1 | 0 | 0 |
meth |
OPTModel.init |
2 | 1 | 0 |
meth |
OPTModel.get_input_embeddings |
1 | 0 | 0 |
meth |
OPTModel.set_input_embeddings |
2 | 0 | 0 |
attr |
OPTModel.decoder |
1 | 0 | 0 |
meth |
OPTForCausalLM.init |
2 | 0 | 0 |
meth |
OPTForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
OPTForCausalLM.set_input_embeddings |
2 | 0 | 0 |
attr |
OPTForCausalLM.model |
1 | 0 | 0 |
attr |
OPTForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.ovis2.configuration_ovis2 (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ovis2VisionConfig.init |
19 | 11 | 0 |
attr |
Ovis2VisionConfig.hidden_size |
1 | 0 | 0 |
attr |
Ovis2VisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
Ovis2VisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Ovis2VisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Ovis2VisionConfig.num_channels |
1 | 0 | 0 |
attr |
Ovis2VisionConfig.patch_size |
1 | 0 | 0 |
attr |
Ovis2VisionConfig.image_size |
1 | 0 | 0 |
attr |
Ovis2VisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
Ovis2VisionConfig.hidden_act |
1 | 0 | 0 |
attr |
Ovis2VisionConfig.qkv_bias |
1 | 0 | 0 |
attr |
Ovis2VisionConfig.mlp_bias |
1 | 0 | 0 |
attr |
Ovis2VisionConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Ovis2VisionConfig.vocab_size |
1 | 0 | 0 |
attr |
Ovis2VisionConfig.hidden_stride |
1 | 0 | 0 |
attr |
Ovis2VisionConfig.num_visual_indicator_tokens |
1 | 0 | 0 |
attr |
Ovis2VisionConfig.tokenize_function |
1 | 0 | 0 |
attr |
Ovis2VisionConfig.initializer_range |
1 | 0 | 0 |
meth |
Ovis2Config.init |
9 | 0 | 0 |
attr |
Ovis2Config.vocab_size |
1 | 0 | 0 |
attr |
Ovis2Config.hidden_size |
1 | 0 | 0 |
attr |
Ovis2Config.image_token_id |
1 | 0 | 0 |
attr |
Ovis2Config.visual_indicator_token_ids |
1 | 0 | 0 |
attr |
Ovis2Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Ovis2Config.vision_config |
1 | 0 | 0 |
attr |
Ovis2Config.text_config |
1 | 0 | 0 |
transformers.models.ovis2.image_processing_ovis2 (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ovis2ImageProcessor.init |
15 | 14 | 0 |
meth |
Ovis2ImageProcessor.resize |
7 | 6 | 0 |
meth |
Ovis2ImageProcessor.crop_image_to_patches |
8 | 7 | 0 |
attr |
Ovis2ImageProcessor.do_resize |
1 | 0 | 0 |
attr |
Ovis2ImageProcessor.size |
1 | 0 | 0 |
attr |
Ovis2ImageProcessor.crop_to_patches |
1 | 0 | 0 |
attr |
Ovis2ImageProcessor.min_patches |
1 | 0 | 0 |
attr |
Ovis2ImageProcessor.max_patches |
1 | 0 | 0 |
attr |
Ovis2ImageProcessor.resample |
1 | 0 | 0 |
attr |
Ovis2ImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
Ovis2ImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
Ovis2ImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
Ovis2ImageProcessor.image_mean |
1 | 0 | 0 |
attr |
Ovis2ImageProcessor.image_std |
1 | 0 | 0 |
attr |
Ovis2ImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.ovis2.image_processing_ovis2_fast (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ovis2ImageProcessorFast.crop_image_to_patches |
8 | 7 | 0 |
meth |
Ovis2ImageProcessorFast._preprocess |
19 | 18 | 0 |
transformers.models.ovis2.modeling_ovis2 (31 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ovis2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Ovis2Model.init |
2 | 1 | 0 |
meth |
Ovis2Model.get_input_embeddings |
1 | 0 | 0 |
meth |
Ovis2Model.set_input_embeddings |
2 | 0 | 0 |
meth |
Ovis2Model.get_placeholder_mask |
4 | 3 | 0 |
meth |
Ovis2Model.forward |
15 | 14 | 0 |
attr |
Ovis2Model.vision_tower |
1 | 0 | 0 |
attr |
Ovis2Model.language_model |
1 | 0 | 0 |
attr |
Ovis2Model.visual_embeddings_table |
1 | 0 | 0 |
attr |
Ovis2Model.visual_vocab_size |
1 | 0 | 0 |
attr |
Ovis2Model.vocab_size |
1 | 0 | 0 |
attr |
Ovis2Model.visual_indicator_token_ids |
1 | 0 | 0 |
meth |
Ovis2ForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Ovis2ForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Ovis2ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Ovis2ForConditionalGeneration.forward |
15 | 14 | 0 |
meth |
Ovis2ForConditionalGeneration.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
Ovis2ForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Ovis2ForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.ovis2.modular_ovis2 (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ovis2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Ovis2Model.init |
2 | 1 | 0 |
meth |
Ovis2Model.forward |
15 | 14 | 0 |
attr |
Ovis2Model.vision_tower |
1 | 0 | 0 |
attr |
Ovis2Model.visual_embeddings_table |
1 | 0 | 0 |
attr |
Ovis2Model.visual_vocab_size |
1 | 0 | 0 |
attr |
Ovis2Model.vocab_size |
1 | 0 | 0 |
attr |
Ovis2Model.visual_indicator_token_ids |
1 | 0 | 0 |
attr |
Ovis2Model.language_model |
1 | 0 | 0 |
meth |
Ovis2ForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Ovis2ForConditionalGeneration.forward |
15 | 14 | 0 |
attr |
Ovis2ForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.ovis2.processing_ovis2 (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Ovis2Processor.init |
7 | 0 | 0 |
meth |
Ovis2Processor._expand_image_tokens |
3 | 2 | 0 |
meth |
Ovis2Processor.batch_decode |
3 | 0 | 0 |
meth |
Ovis2Processor.decode |
3 | 0 | 0 |
prop |
Ovis2Processor.model_input_names |
1 | 0 | 0 |
attr |
Ovis2Processor.image_seq_length |
1 | 0 | 0 |
attr |
Ovis2Processor.image_token |
1 | 0 | 0 |
attr |
Ovis2Processor.image_token_id |
1 | 0 | 0 |
transformers.models.owlv2.configuration_owlv2 (69 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Owlv2Config.init |
7 | 0 | 0 |
attr |
Owlv2Config.text_config |
1 | 0 | 0 |
attr |
Owlv2Config.vision_config |
1 | 0 | 0 |
attr |
Owlv2Config.projection_dim |
1 | 0 | 0 |
attr |
Owlv2Config.logit_scale_init_value |
1 | 0 | 0 |
attr |
Owlv2Config.return_dict |
1 | 0 | 0 |
attr |
Owlv2Config.initializer_factor |
1 | 0 | 0 |
meth |
Owlv2VisionConfig.init |
14 | 0 | 0 |
attr |
Owlv2VisionConfig.hidden_size |
1 | 0 | 0 |
attr |
Owlv2VisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
Owlv2VisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Owlv2VisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Owlv2VisionConfig.num_channels |
1 | 0 | 0 |
attr |
Owlv2VisionConfig.image_size |
1 | 0 | 0 |
attr |
Owlv2VisionConfig.patch_size |
1 | 0 | 0 |
attr |
Owlv2VisionConfig.hidden_act |
1 | 0 | 0 |
attr |
Owlv2VisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Owlv2VisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
Owlv2VisionConfig.initializer_range |
1 | 0 | 0 |
attr |
Owlv2VisionConfig.initializer_factor |
1 | 0 | 0 |
meth |
Owlv2TextConfig.init |
16 | 0 | 0 |
attr |
Owlv2TextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Owlv2TextConfig.bos_token_id |
1 | 0 | 0 |
attr |
Owlv2TextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Owlv2TextConfig.vocab_size |
1 | 0 | 0 |
attr |
Owlv2TextConfig.hidden_size |
1 | 0 | 0 |
attr |
Owlv2TextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Owlv2TextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Owlv2TextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Owlv2TextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Owlv2TextConfig.hidden_act |
1 | 0 | 0 |
attr |
Owlv2TextConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Owlv2TextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Owlv2TextConfig.initializer_range |
1 | 0 | 0 |
attr |
Owlv2TextConfig.initializer_factor |
1 | 0 | 0 |
transformers.models.owlv2.image_processing_owlv2 (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Owlv2ImageProcessor.init |
11 | 10 | 0 |
meth |
Owlv2ImageProcessor.pad |
4 | 3 | 0 |
meth |
Owlv2ImageProcessor.resize |
8 | 6 | 0 |
meth |
Owlv2ImageProcessor.post_process_object_detection |
4 | 3 | 0 |
meth |
Owlv2ImageProcessor.post_process_image_guided_detection |
5 | 0 | 0 |
attr |
Owlv2ImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
Owlv2ImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
Owlv2ImageProcessor.do_pad |
1 | 0 | 0 |
attr |
Owlv2ImageProcessor.do_resize |
1 | 0 | 0 |
attr |
Owlv2ImageProcessor.size |
1 | 0 | 0 |
attr |
Owlv2ImageProcessor.resample |
1 | 0 | 0 |
attr |
Owlv2ImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
Owlv2ImageProcessor.image_mean |
1 | 0 | 0 |
attr |
Owlv2ImageProcessor.image_std |
1 | 0 | 0 |
transformers.models.owlv2.image_processing_owlv2_fast (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Owlv2ImageProcessorFast.post_process_object_detection |
4 | 3 | 0 |
meth |
Owlv2ImageProcessorFast.post_process_image_guided_detection |
5 | 0 | 0 |
meth |
Owlv2ImageProcessorFast.pad |
5 | 4 | 0 |
meth |
Owlv2ImageProcessorFast.resize |
6 | 4 | 0 |
meth |
Owlv2ImageProcessorFast._preprocess |
14 | 13 | 0 |
transformers.models.owlv2.modeling_owlv2 (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Owlv2ForObjectDetection.init |
2 | 1 | 0 |
meth |
Owlv2ForObjectDetection.forward |
9 | 8 | 0 |
attr |
Owlv2ForObjectDetection.owlv2 |
1 | 0 | 0 |
attr |
Owlv2ForObjectDetection.class_head |
1 | 0 | 0 |
attr |
Owlv2ForObjectDetection.box_head |
1 | 0 | 0 |
attr |
Owlv2ForObjectDetection.objectness_head |
1 | 0 | 0 |
attr |
Owlv2ForObjectDetection.layer_norm |
1 | 0 | 0 |
attr |
Owlv2ForObjectDetection.sigmoid |
1 | 0 | 0 |
attr |
Owlv2ForObjectDetection.num_patches_height |
1 | 0 | 0 |
attr |
Owlv2ForObjectDetection.num_patches_width |
1 | 0 | 0 |
meth |
Owlv2TextModel.init |
2 | 1 | 0 |
meth |
Owlv2TextModel.set_input_embeddings |
2 | 0 | 0 |
meth |
Owlv2TextModel.forward |
7 | 6 | 0 |
attr |
Owlv2TextModel.text_model |
1 | 0 | 0 |
meth |
Owlv2Model.init |
2 | 1 | 0 |
meth |
Owlv2Model.forward |
11 | 10 | 0 |
attr |
Owlv2Model.projection_dim |
1 | 0 | 0 |
attr |
Owlv2Model.text_embed_dim |
1 | 0 | 0 |
attr |
Owlv2Model.vision_embed_dim |
1 | 0 | 0 |
attr |
Owlv2Model.text_model |
1 | 0 | 0 |
attr |
Owlv2Model.vision_model |
1 | 0 | 0 |
attr |
Owlv2Model.visual_projection |
1 | 0 | 0 |
attr |
Owlv2Model.text_projection |
1 | 0 | 0 |
attr |
Owlv2Model.logit_scale |
1 | 0 | 0 |
meth |
Owlv2VisionModel.init |
2 | 1 | 0 |
meth |
Owlv2VisionModel.forward |
7 | 6 | 0 |
attr |
Owlv2VisionModel.vision_model |
1 | 0 | 0 |
meth |
Owlv2PreTrainedModel._init_weights |
2 | 1 | 0 |
transformers.models.owlv2.modular_owlv2 (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Owlv2ImageProcessorFast.pad |
5 | 4 | 0 |
meth |
Owlv2ImageProcessorFast.resize |
6 | 4 | 0 |
meth |
Owlv2ImageProcessorFast._preprocess |
14 | 13 | 0 |
transformers.models.owlv2.processing_owlv2 (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Owlv2Processor.init |
4 | 0 | 0 |
meth |
Owlv2Processor.post_process_grounded_object_detection |
5 | 4 | 0 |
meth |
Owlv2Processor.post_process_image_guided_detection |
5 | 4 | 0 |
transformers.models.owlvit.configuration_owlvit (69 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OwlViTTextConfig.init |
16 | 0 | 0 |
attr |
OwlViTTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
OwlViTTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
OwlViTTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
OwlViTTextConfig.vocab_size |
1 | 0 | 0 |
attr |
OwlViTTextConfig.hidden_size |
1 | 0 | 0 |
attr |
OwlViTTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
OwlViTTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
OwlViTTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
OwlViTTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
OwlViTTextConfig.hidden_act |
1 | 0 | 0 |
attr |
OwlViTTextConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
OwlViTTextConfig.attention_dropout |
1 | 0 | 0 |
attr |
OwlViTTextConfig.initializer_range |
1 | 0 | 0 |
attr |
OwlViTTextConfig.initializer_factor |
1 | 0 | 0 |
meth |
OwlViTVisionConfig.init |
14 | 0 | 0 |
attr |
OwlViTVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
OwlViTVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
OwlViTVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
OwlViTVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
OwlViTVisionConfig.num_channels |
1 | 0 | 0 |
attr |
OwlViTVisionConfig.image_size |
1 | 0 | 0 |
attr |
OwlViTVisionConfig.patch_size |
1 | 0 | 0 |
attr |
OwlViTVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
OwlViTVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
OwlViTVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
OwlViTVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
OwlViTVisionConfig.initializer_factor |
1 | 0 | 0 |
meth |
OwlViTConfig.init |
7 | 0 | 0 |
attr |
OwlViTConfig.text_config |
1 | 0 | 0 |
attr |
OwlViTConfig.vision_config |
1 | 0 | 0 |
attr |
OwlViTConfig.projection_dim |
1 | 0 | 0 |
attr |
OwlViTConfig.logit_scale_init_value |
1 | 0 | 0 |
attr |
OwlViTConfig.return_dict |
1 | 0 | 0 |
attr |
OwlViTConfig.initializer_factor |
1 | 0 | 0 |
transformers.models.owlvit.image_processing_owlvit (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OwlViTImageProcessor.init |
12 | 0 | 0 |
meth |
OwlViTImageProcessor.resize |
7 | 6 | 0 |
meth |
OwlViTImageProcessor.center_crop |
6 | 5 | 0 |
meth |
OwlViTImageProcessor.post_process_object_detection |
4 | 3 | 0 |
meth |
OwlViTImageProcessor.post_process_image_guided_detection |
5 | 0 | 0 |
attr |
OwlViTImageProcessor.do_resize |
1 | 0 | 0 |
attr |
OwlViTImageProcessor.size |
1 | 0 | 0 |
attr |
OwlViTImageProcessor.resample |
1 | 0 | 0 |
attr |
OwlViTImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
OwlViTImageProcessor.crop_size |
1 | 0 | 0 |
attr |
OwlViTImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
OwlViTImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
OwlViTImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
OwlViTImageProcessor.image_mean |
1 | 0 | 0 |
attr |
OwlViTImageProcessor.image_std |
1 | 0 | 0 |
transformers.models.owlvit.image_processing_owlvit_fast (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OwlViTImageProcessorFast.post_process_object_detection |
4 | 3 | 0 |
meth |
OwlViTImageProcessorFast.post_process_image_guided_detection |
5 | 0 | 0 |
transformers.models.owlvit.modeling_owlvit (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OwlViTTextModel.init |
2 | 1 | 0 |
meth |
OwlViTTextModel.set_input_embeddings |
2 | 0 | 0 |
meth |
OwlViTTextModel.forward |
7 | 6 | 0 |
attr |
OwlViTTextModel.text_model |
1 | 0 | 0 |
meth |
OwlViTVisionModel.init |
2 | 1 | 0 |
meth |
OwlViTVisionModel.forward |
7 | 6 | 0 |
attr |
OwlViTVisionModel.vision_model |
1 | 0 | 0 |
meth |
OwlViTModel.init |
2 | 1 | 0 |
meth |
OwlViTModel.forward |
11 | 10 | 0 |
attr |
OwlViTModel.projection_dim |
1 | 0 | 0 |
attr |
OwlViTModel.text_embed_dim |
1 | 0 | 0 |
attr |
OwlViTModel.vision_embed_dim |
1 | 0 | 0 |
attr |
OwlViTModel.text_model |
1 | 0 | 0 |
attr |
OwlViTModel.vision_model |
1 | 0 | 0 |
attr |
OwlViTModel.visual_projection |
1 | 0 | 0 |
attr |
OwlViTModel.text_projection |
1 | 0 | 0 |
attr |
OwlViTModel.logit_scale |
1 | 0 | 0 |
meth |
OwlViTPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
OwlViTForObjectDetection.init |
2 | 1 | 0 |
meth |
OwlViTForObjectDetection.forward |
9 | 8 | 0 |
attr |
OwlViTForObjectDetection.owlvit |
1 | 0 | 0 |
attr |
OwlViTForObjectDetection.class_head |
1 | 0 | 0 |
attr |
OwlViTForObjectDetection.box_head |
1 | 0 | 0 |
attr |
OwlViTForObjectDetection.layer_norm |
1 | 0 | 0 |
attr |
OwlViTForObjectDetection.sigmoid |
1 | 0 | 0 |
attr |
OwlViTForObjectDetection.num_patches_height |
1 | 0 | 0 |
attr |
OwlViTForObjectDetection.num_patches_width |
1 | 0 | 0 |
transformers.models.owlvit.processing_owlvit (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
OwlViTProcessor.init |
4 | 0 | 0 |
meth |
OwlViTProcessor.post_process |
3 | 0 | 0 |
meth |
OwlViTProcessor.post_process_grounded_object_detection |
5 | 4 | 0 |
meth |
OwlViTProcessor.post_process_image_guided_detection |
5 | 4 | 0 |
transformers.models.paddleocr_vl.configuration_paddleocr_vl (60 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PaddleOCRTextConfig.init |
20 | 18 | 0 |
attr |
PaddleOCRTextConfig.vocab_size |
1 | 0 | 0 |
attr |
PaddleOCRTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
PaddleOCRTextConfig.hidden_size |
1 | 0 | 0 |
attr |
PaddleOCRTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
PaddleOCRTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
PaddleOCRTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
PaddleOCRTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
PaddleOCRTextConfig.hidden_act |
1 | 0 | 0 |
attr |
PaddleOCRTextConfig.initializer_range |
1 | 0 | 0 |
attr |
PaddleOCRTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
PaddleOCRTextConfig.use_cache |
1 | 0 | 0 |
attr |
PaddleOCRTextConfig.use_bias |
1 | 0 | 0 |
attr |
PaddleOCRTextConfig.head_dim |
1 | 0 | 0 |
attr |
PaddleOCRTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
PaddleOCRTextConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
PaddleOCRTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
PaddleOCRTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
PaddleOCRTextConfig.eos_token_id |
1 | 0 | 0 |
meth |
PaddleOCRVLConfig.init |
9 | 0 | 0 |
attr |
PaddleOCRVLConfig.image_token_id |
1 | 0 | 0 |
attr |
PaddleOCRVLConfig.video_token_id |
1 | 0 | 0 |
attr |
PaddleOCRVLConfig.vision_start_token_id |
1 | 0 | 0 |
attr |
PaddleOCRVLConfig.vision_end_token_id |
1 | 0 | 0 |
attr |
PaddleOCRVLConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
PaddleOCRVLConfig.vision_config |
1 | 0 | 0 |
attr |
PaddleOCRVLConfig.text_config |
1 | 0 | 0 |
meth |
PaddleOCRVisionConfig.init |
13 | 0 | 0 |
attr |
PaddleOCRVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
PaddleOCRVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
PaddleOCRVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
PaddleOCRVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
PaddleOCRVisionConfig.num_channels |
1 | 0 | 0 |
attr |
PaddleOCRVisionConfig.patch_size |
1 | 0 | 0 |
attr |
PaddleOCRVisionConfig.image_size |
1 | 0 | 0 |
attr |
PaddleOCRVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
PaddleOCRVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
PaddleOCRVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
PaddleOCRVisionConfig.spatial_merge_size |
1 | 0 | 0 |
transformers.models.paddleocr_vl.image_processing_paddleocr_vl (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PaddleOCRVLImageProcessor.init |
16 | 15 | 0 |
meth |
PaddleOCRVLImageProcessor._preprocess |
16 | 15 | 0 |
meth |
PaddleOCRVLImageProcessor.preprocess |
19 | 18 | 0 |
meth |
PaddleOCRVLImageProcessor.get_number_of_image_patches |
4 | 2 | 0 |
attr |
PaddleOCRVLImageProcessor.min_pixels |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessor.max_pixels |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessor.size |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessor.do_resize |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessor.resample |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessor.image_mean |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessor.image_std |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessor.patch_size |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessor.temporal_patch_size |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessor.merge_size |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.paddleocr_vl.image_processing_paddleocr_vl_fast (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PaddleOCRVLImageProcessorFast.init |
16 | 15 | 0 |
meth |
PaddleOCRVLImageProcessorFast._preprocess |
16 | 14 | 0 |
attr |
PaddleOCRVLImageProcessorFast.min_pixels |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.max_pixels |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.size |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.do_resize |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.resample |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.do_rescale |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.rescale_factor |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.do_normalize |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.image_mean |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.image_std |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.patch_size |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.temporal_patch_size |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.merge_size |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.do_convert_rgb |
1 | 0 | 0 |
transformers.models.paddleocr_vl.modeling_paddleocr_vl (53 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PaddleOCRVLForConditionalGeneration.init |
2 | 0 | 0 |
meth |
PaddleOCRVLForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
PaddleOCRVLForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
PaddleOCRVLForConditionalGeneration.prepare_inputs_for_generation |
14 | 0 | 0 |
meth |
PaddleOCRVLForConditionalGeneration._prepare_position_ids_for_generation |
3 | 0 | 0 |
meth |
PaddleOCRVLForConditionalGeneration._expand_inputs_for_generation |
5 | 4 | 0 |
attr |
PaddleOCRVLForConditionalGeneration.model |
1 | 0 | 0 |
attr |
PaddleOCRVLForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
PaddleOCRVisionTransformer.init |
2 | 1 | 0 |
attr |
PaddleOCRVisionTransformer.embeddings |
1 | 0 | 0 |
attr |
PaddleOCRVisionTransformer.encoder |
1 | 0 | 0 |
attr |
PaddleOCRVisionTransformer.post_layernorm |
1 | 0 | 0 |
meth |
PaddleOCRTextModel.init |
2 | 1 | 0 |
attr |
PaddleOCRTextModel.padding_idx |
1 | 0 | 0 |
attr |
PaddleOCRTextModel.vocab_size |
1 | 0 | 0 |
attr |
PaddleOCRTextModel.embed_tokens |
1 | 0 | 0 |
attr |
PaddleOCRTextModel.layers |
1 | 0 | 0 |
attr |
PaddleOCRTextModel.norm |
1 | 0 | 0 |
attr |
PaddleOCRTextModel.rotary_emb |
1 | 0 | 0 |
attr |
PaddleOCRTextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
PaddleOCRVisionModel.init |
2 | 1 | 0 |
attr |
PaddleOCRVisionModel.vision_model |
1 | 0 | 0 |
meth |
PaddleOCRVLPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
PaddleOCRVLModel.init |
2 | 1 | 0 |
meth |
PaddleOCRVLModel.get_input_embeddings |
1 | 0 | 0 |
meth |
PaddleOCRVLModel.set_input_embeddings |
2 | 0 | 0 |
meth |
PaddleOCRVLModel.get_vision_position_ids |
7 | 6 | 0 |
meth |
PaddleOCRVLModel.get_rope_index |
7 | 6 | 0 |
meth |
PaddleOCRVLModel.get_placeholder_mask |
4 | 3 | 0 |
meth |
PaddleOCRVLModel.forward |
13 | 12 | 0 |
attr |
PaddleOCRVLModel.visual |
1 | 0 | 0 |
attr |
PaddleOCRVLModel.language_model |
1 | 0 | 0 |
attr |
PaddleOCRVLModel.rope_deltas |
1 | 0 | 0 |
attr |
PaddleOCRVLModel.projector |
1 | 0 | 0 |
transformers.models.paddleocr_vl.modular_paddleocr_vl (70 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PaddleOCRVLProcessor.init |
5 | 0 | 0 |
attr |
PaddleOCRVLProcessor.image_token |
1 | 0 | 0 |
attr |
PaddleOCRVLProcessor.image_token_id |
1 | 0 | 0 |
meth |
PaddleOCRVLForConditionalGeneration.get_video_features |
1 | 0 | 0 |
meth |
PaddleOCRVisionTransformer.init |
2 | 1 | 0 |
attr |
PaddleOCRVisionTransformer.embeddings |
1 | 0 | 0 |
attr |
PaddleOCRVisionTransformer.encoder |
1 | 0 | 0 |
attr |
PaddleOCRVisionTransformer.post_layernorm |
1 | 0 | 0 |
meth |
PaddleOCRVLConfig.init |
9 | 0 | 0 |
meth |
PaddleOCRTextModel.init |
2 | 1 | 0 |
meth |
PaddleOCRVisionModel.init |
2 | 1 | 0 |
attr |
PaddleOCRVisionModel.vision_model |
1 | 0 | 0 |
meth |
PaddleOCRVisionConfig.init |
13 | 0 | 0 |
attr |
PaddleOCRVisionConfig.spatial_merge_size |
1 | 0 | 0 |
meth |
PaddleOCRVLPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
PaddleOCRVLImageProcessorFast.init |
16 | 15 | 0 |
meth |
PaddleOCRVLImageProcessorFast._preprocess |
16 | 14 | 0 |
attr |
PaddleOCRVLImageProcessorFast.min_pixels |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.max_pixels |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.size |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.do_resize |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.resample |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.do_rescale |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.rescale_factor |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.do_normalize |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.image_mean |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.image_std |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.patch_size |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.temporal_patch_size |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.merge_size |
1 | 0 | 0 |
attr |
PaddleOCRVLImageProcessorFast.do_convert_rgb |
1 | 0 | 0 |
meth |
PaddleOCRVLImageProcessor.init |
16 | 15 | 0 |
meth |
PaddleOCRVLImageProcessor._preprocess |
16 | 15 | 0 |
meth |
PaddleOCRVLModel.init |
2 | 1 | 0 |
meth |
PaddleOCRVLModel.get_input_embeddings |
1 | 0 | 0 |
meth |
PaddleOCRVLModel.set_input_embeddings |
2 | 0 | 0 |
meth |
PaddleOCRVLModel.get_video_features |
1 | 0 | 0 |
meth |
PaddleOCRVLModel.get_placeholder_mask |
4 | 3 | 0 |
meth |
PaddleOCRVLModel.forward |
13 | 12 | 0 |
attr |
PaddleOCRVLModel.visual |
1 | 0 | 0 |
attr |
PaddleOCRVLModel.projector |
1 | 0 | 0 |
attr |
PaddleOCRVLModel.language_model |
1 | 0 | 0 |
attr |
PaddleOCRVLModel.rope_deltas |
1 | 0 | 0 |
transformers.models.paddleocr_vl.processing_paddleocr_vl (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PaddleOCRVLProcessor.init |
5 | 0 | 0 |
attr |
PaddleOCRVLProcessor.image_token |
1 | 0 | 0 |
attr |
PaddleOCRVLProcessor.image_token_id |
1 | 0 | 0 |
transformers.models.paligemma.configuration_paligemma (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PaliGemmaConfig.init |
9 | 1 | 0 |
attr |
PaliGemmaConfig.image_token_index |
1 | 0 | 0 |
attr |
PaliGemmaConfig.projection_dim |
1 | 0 | 0 |
attr |
PaliGemmaConfig.hidden_size |
1 | 0 | 0 |
attr |
PaliGemmaConfig.vision_config |
1 | 0 | 0 |
attr |
PaliGemmaConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
PaliGemmaConfig.is_encoder_decoder |
1 | 0 | 0 |
attr |
PaliGemmaConfig.text_config |
1 | 0 | 0 |
transformers.models.paligemma.modeling_paligemma (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PaliGemmaModel.init |
2 | 1 | 0 |
meth |
PaliGemmaModel.get_input_embeddings |
1 | 0 | 0 |
meth |
PaliGemmaModel.set_input_embeddings |
2 | 0 | 0 |
meth |
PaliGemmaModel.get_placeholder_mask |
4 | 3 | 0 |
attr |
PaliGemmaModel.vision_tower |
1 | 0 | 0 |
attr |
PaliGemmaModel.multi_modal_projector |
1 | 0 | 0 |
attr |
PaliGemmaModel.vocab_size |
1 | 0 | 0 |
attr |
PaliGemmaModel.language_model |
1 | 0 | 0 |
attr |
PaliGemmaModel.text_config_dtype |
1 | 0 | 0 |
meth |
PaliGemmaForConditionalGeneration.init |
2 | 1 | 0 |
meth |
PaliGemmaForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
PaliGemmaForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
PaliGemmaForConditionalGeneration.get_image_features |
3 | 2 | 0 |
meth |
PaliGemmaForConditionalGeneration.prepare_inputs_for_generation |
14 | 0 | 0 |
meth |
PaliGemmaForConditionalGeneration.create_masks_for_generate |
10 | 9 | 0 |
attr |
PaliGemmaForConditionalGeneration.model |
1 | 0 | 0 |
attr |
PaliGemmaForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.paligemma.processing_paligemma (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PaliGemmaProcessor.init |
5 | 0 | 0 |
meth |
PaliGemmaProcessor._get_num_multimodal_tokens |
3 | 0 | 0 |
prop |
PaliGemmaProcessor.model_input_names |
1 | 0 | 0 |
attr |
PaliGemmaProcessor.image_seq_length |
1 | 0 | 0 |
attr |
PaliGemmaProcessor.image_token_id |
1 | 0 | 0 |
attr |
PaliGemmaProcessor.image_token |
1 | 0 | 0 |
transformers.models.parakeet.configuration_parakeet (59 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ParakeetCTCConfig.init |
7 | 1 | 0 |
meth |
ParakeetCTCConfig.from_encoder_config |
3 | 1 | 0 |
attr |
ParakeetCTCConfig.vocab_size |
1 | 0 | 0 |
attr |
ParakeetCTCConfig.ctc_loss_reduction |
1 | 0 | 0 |
attr |
ParakeetCTCConfig.ctc_zero_infinity |
1 | 0 | 0 |
attr |
ParakeetCTCConfig.encoder_config |
1 | 0 | 0 |
attr |
ParakeetCTCConfig.initializer_range |
1 | 0 | 0 |
attr |
ParakeetCTCConfig.pad_token_id |
1 | 0 | 0 |
meth |
ParakeetEncoderConfig.init |
23 | 0 | 0 |
attr |
ParakeetEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.attention_bias |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.convolution_bias |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.conv_kernel_size |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.subsampling_conv_kernel_size |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.subsampling_conv_stride |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.subsampling_factor |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.subsampling_conv_channels |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.num_mel_bins |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.dropout |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.dropout_positions |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.layerdrop |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.activation_dropout |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.attention_dropout |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.scale_input |
1 | 0 | 0 |
attr |
ParakeetEncoderConfig.initializer_range |
1 | 0 | 0 |
transformers.models.parakeet.feature_extraction_parakeet (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ParakeetFeatureExtractor.init |
9 | 0 | 0 |
meth |
ParakeetFeatureExtractor._torch_extract_fbank_features |
3 | 0 | 0 |
meth |
ParakeetFeatureExtractor.call |
13 | 12 | 0 |
attr |
ParakeetFeatureExtractor.hop_length |
1 | 0 | 0 |
attr |
ParakeetFeatureExtractor.n_fft |
1 | 0 | 0 |
attr |
ParakeetFeatureExtractor.win_length |
1 | 0 | 0 |
attr |
ParakeetFeatureExtractor.preemphasis |
1 | 0 | 0 |
attr |
ParakeetFeatureExtractor.mel_filters |
1 | 0 | 0 |
transformers.models.parakeet.modeling_parakeet (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ParakeetEncoder.init |
2 | 1 | 0 |
attr |
ParakeetEncoder.gradient_checkpointing |
1 | 0 | 0 |
attr |
ParakeetEncoder.dropout |
1 | 0 | 0 |
attr |
ParakeetEncoder.dropout_positions |
1 | 0 | 0 |
attr |
ParakeetEncoder.layerdrop |
1 | 0 | 0 |
attr |
ParakeetEncoder.input_scale |
1 | 0 | 0 |
attr |
ParakeetEncoder.subsampling |
1 | 0 | 0 |
attr |
ParakeetEncoder.encode_positions |
1 | 0 | 0 |
attr |
ParakeetEncoder.layers |
1 | 0 | 0 |
meth |
ParakeetPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
ParakeetPreTrainedModel._get_subsampling_output_length |
2 | 1 | 0 |
meth |
ParakeetPreTrainedModel._get_output_attention_mask |
3 | 2 | 0 |
meth |
ParakeetForCTC.init |
2 | 1 | 0 |
attr |
ParakeetForCTC.encoder |
1 | 0 | 0 |
attr |
ParakeetForCTC.ctc_head |
1 | 0 | 0 |
transformers.models.parakeet.modular_parakeet (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ParakeetEncoder.init |
2 | 1 | 0 |
attr |
ParakeetEncoder.gradient_checkpointing |
1 | 0 | 0 |
attr |
ParakeetEncoder.dropout |
1 | 0 | 0 |
attr |
ParakeetEncoder.dropout_positions |
1 | 0 | 0 |
attr |
ParakeetEncoder.layerdrop |
1 | 0 | 0 |
attr |
ParakeetEncoder.input_scale |
1 | 0 | 0 |
attr |
ParakeetEncoder.subsampling |
1 | 0 | 0 |
attr |
ParakeetEncoder.encode_positions |
1 | 0 | 0 |
attr |
ParakeetEncoder.layers |
1 | 0 | 0 |
meth |
ParakeetPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
ParakeetPreTrainedModel._get_subsampling_output_length |
2 | 1 | 0 |
meth |
ParakeetPreTrainedModel._get_output_attention_mask |
3 | 2 | 0 |
meth |
ParakeetForCTC.init |
2 | 1 | 0 |
attr |
ParakeetForCTC.encoder |
1 | 0 | 0 |
attr |
ParakeetForCTC.ctc_head |
1 | 0 | 0 |
transformers.models.parakeet.processing_parakeet (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ParakeetProcessor.init |
3 | 0 | 0 |
meth |
ParakeetProcessor.call |
5 | 4 | 0 |
prop |
ParakeetProcessor.model_input_names |
1 | 0 | 0 |
transformers.models.parakeet.tokenization_parakeet (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ParakeetTokenizer._decode |
6 | 5 | 0 |
transformers.models.patchtsmixer.configuration_patchtsmixer (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PatchTSMixerConfig.init |
36 | 34 | 0 |
attr |
PatchTSMixerConfig.num_input_channels |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.context_length |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.patch_length |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.patch_stride |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.d_model |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.expansion_factor |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.num_layers |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.dropout |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.mode |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.gated_attn |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.norm_mlp |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.scaling |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.head_dropout |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.num_patches |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.mask_type |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.random_mask_ratio |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.num_forecast_mask_patches |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.mask_value |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.channel_consistent_masking |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.masked_loss |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.patch_last |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.use_positional_encoding |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.positional_encoding_type |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.prediction_length |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.prediction_channel_indices |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.num_targets |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.output_range |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.head_aggregation |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.self_attn |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.self_attn_heads |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.init_std |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.distribution_output |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.loss |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.num_parallel_samples |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.unmasked_channel_indices |
1 | 0 | 0 |
attr |
PatchTSMixerConfig.norm_eps |
1 | 0 | 0 |
transformers.models.patchtsmixer.modeling_patchtsmixer (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PatchTSMixerForTimeSeriesClassification.init |
2 | 1 | 0 |
meth |
PatchTSMixerForTimeSeriesClassification.forward |
7 | 6 | 0 |
attr |
PatchTSMixerForTimeSeriesClassification.model |
1 | 0 | 0 |
attr |
PatchTSMixerForTimeSeriesClassification.head |
1 | 0 | 0 |
attr |
PatchTSMixerForTimeSeriesClassification.use_return_dict |
1 | 0 | 0 |
attr |
PatchTSMixerForTimeSeriesClassification.inject_scale |
1 | 0 | 0 |
meth |
PatchTSMixerForPrediction.init |
2 | 1 | 0 |
meth |
PatchTSMixerForPrediction.forward |
8 | 7 | 0 |
attr |
PatchTSMixerForPrediction.loss |
1 | 0 | 0 |
attr |
PatchTSMixerForPrediction.use_return_dict |
1 | 0 | 0 |
attr |
PatchTSMixerForPrediction.prediction_channel_indices |
1 | 0 | 0 |
attr |
PatchTSMixerForPrediction.num_parallel_samples |
1 | 0 | 0 |
attr |
PatchTSMixerForPrediction.model |
1 | 0 | 0 |
attr |
PatchTSMixerForPrediction.head |
1 | 0 | 0 |
attr |
PatchTSMixerForPrediction.distribution_output |
1 | 0 | 0 |
meth |
PatchTSMixerPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
PatchTSMixerForPretraining.init |
2 | 1 | 0 |
meth |
PatchTSMixerForPretraining.forward |
7 | 6 | 0 |
attr |
PatchTSMixerForPretraining.model |
1 | 0 | 0 |
attr |
PatchTSMixerForPretraining.head |
1 | 0 | 0 |
attr |
PatchTSMixerForPretraining.masked_loss |
1 | 0 | 0 |
attr |
PatchTSMixerForPretraining.use_return_dict |
1 | 0 | 0 |
meth |
PatchTSMixerForRegression.init |
2 | 1 | 0 |
meth |
PatchTSMixerForRegression.forward |
7 | 6 | 0 |
attr |
PatchTSMixerForRegression.model |
1 | 0 | 0 |
attr |
PatchTSMixerForRegression.loss |
1 | 0 | 0 |
attr |
PatchTSMixerForRegression.distribution_output |
1 | 0 | 0 |
attr |
PatchTSMixerForRegression.use_return_dict |
1 | 0 | 0 |
attr |
PatchTSMixerForRegression.num_parallel_samples |
1 | 0 | 0 |
attr |
PatchTSMixerForRegression.head |
1 | 0 | 0 |
attr |
PatchTSMixerForRegression.inject_scale |
1 | 0 | 0 |
meth |
PatchTSMixerModel.init |
3 | 2 | 0 |
meth |
PatchTSMixerModel.forward |
6 | 5 | 0 |
attr |
PatchTSMixerModel.use_return_dict |
1 | 0 | 0 |
attr |
PatchTSMixerModel.encoder |
1 | 0 | 0 |
attr |
PatchTSMixerModel.patching |
1 | 0 | 0 |
attr |
PatchTSMixerModel.masking |
1 | 0 | 0 |
attr |
PatchTSMixerModel.scaler |
1 | 0 | 0 |
transformers.models.patchtst.configuration_patchtst (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PatchTSTConfig.init |
41 | 39 | 0 |
attr |
PatchTSTConfig.context_length |
1 | 0 | 0 |
attr |
PatchTSTConfig.num_input_channels |
1 | 0 | 0 |
attr |
PatchTSTConfig.loss |
1 | 0 | 0 |
attr |
PatchTSTConfig.distribution_output |
1 | 0 | 0 |
attr |
PatchTSTConfig.num_parallel_samples |
1 | 0 | 0 |
attr |
PatchTSTConfig.d_model |
1 | 0 | 0 |
attr |
PatchTSTConfig.num_attention_heads |
1 | 0 | 0 |
attr |
PatchTSTConfig.ffn_dim |
1 | 0 | 0 |
attr |
PatchTSTConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
PatchTSTConfig.attention_dropout |
1 | 0 | 0 |
attr |
PatchTSTConfig.share_embedding |
1 | 0 | 0 |
attr |
PatchTSTConfig.channel_attention |
1 | 0 | 0 |
attr |
PatchTSTConfig.norm_type |
1 | 0 | 0 |
attr |
PatchTSTConfig.norm_eps |
1 | 0 | 0 |
attr |
PatchTSTConfig.positional_dropout |
1 | 0 | 0 |
attr |
PatchTSTConfig.path_dropout |
1 | 0 | 0 |
attr |
PatchTSTConfig.ff_dropout |
1 | 0 | 0 |
attr |
PatchTSTConfig.bias |
1 | 0 | 0 |
attr |
PatchTSTConfig.activation_function |
1 | 0 | 0 |
attr |
PatchTSTConfig.pre_norm |
1 | 0 | 0 |
attr |
PatchTSTConfig.positional_encoding_type |
1 | 0 | 0 |
attr |
PatchTSTConfig.use_cls_token |
1 | 0 | 0 |
attr |
PatchTSTConfig.init_std |
1 | 0 | 0 |
attr |
PatchTSTConfig.scaling |
1 | 0 | 0 |
attr |
PatchTSTConfig.patch_length |
1 | 0 | 0 |
attr |
PatchTSTConfig.patch_stride |
1 | 0 | 0 |
attr |
PatchTSTConfig.do_mask_input |
1 | 0 | 0 |
attr |
PatchTSTConfig.mask_type |
1 | 0 | 0 |
attr |
PatchTSTConfig.random_mask_ratio |
1 | 0 | 0 |
attr |
PatchTSTConfig.num_forecast_mask_patches |
1 | 0 | 0 |
attr |
PatchTSTConfig.channel_consistent_masking |
1 | 0 | 0 |
attr |
PatchTSTConfig.unmasked_channel_indices |
1 | 0 | 0 |
attr |
PatchTSTConfig.mask_value |
1 | 0 | 0 |
attr |
PatchTSTConfig.pooling_type |
1 | 0 | 0 |
attr |
PatchTSTConfig.head_dropout |
1 | 0 | 0 |
attr |
PatchTSTConfig.share_projection |
1 | 0 | 0 |
attr |
PatchTSTConfig.prediction_length |
1 | 0 | 0 |
attr |
PatchTSTConfig.num_targets |
1 | 0 | 0 |
attr |
PatchTSTConfig.output_range |
1 | 0 | 0 |
transformers.models.patchtst.modeling_patchtst (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PatchTSTForRegression.init |
2 | 1 | 0 |
meth |
PatchTSTForRegression.forward |
8 | 7 | 0 |
attr |
PatchTSTForRegression.model |
1 | 0 | 0 |
attr |
PatchTSTForRegression.head |
1 | 0 | 0 |
attr |
PatchTSTForRegression.distribution_output |
1 | 0 | 0 |
meth |
PatchTSTForPrediction.init |
2 | 1 | 0 |
meth |
PatchTSTForPrediction.forward |
8 | 7 | 0 |
attr |
PatchTSTForPrediction.model |
1 | 0 | 0 |
attr |
PatchTSTForPrediction.head |
1 | 0 | 0 |
attr |
PatchTSTForPrediction.distribution_output |
1 | 0 | 0 |
meth |
PatchTSTForClassification.init |
2 | 1 | 0 |
meth |
PatchTSTForClassification.forward |
8 | 7 | 0 |
attr |
PatchTSTForClassification.model |
1 | 0 | 0 |
attr |
PatchTSTForClassification.head |
1 | 0 | 0 |
meth |
PatchTSTForPretraining.init |
2 | 1 | 0 |
meth |
PatchTSTForPretraining.forward |
7 | 6 | 0 |
attr |
PatchTSTForPretraining.model |
1 | 0 | 0 |
attr |
PatchTSTForPretraining.head |
1 | 0 | 0 |
meth |
PatchTSTPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
PatchTSTPreTrainedModel._set_gradient_checkpointing |
3 | 0 | 0 |
meth |
PatchTSTModel.init |
2 | 1 | 0 |
meth |
PatchTSTModel.forward |
8 | 7 | 0 |
attr |
PatchTSTModel.scaler |
1 | 0 | 0 |
attr |
PatchTSTModel.patchifier |
1 | 0 | 0 |
attr |
PatchTSTModel.do_mask_input |
1 | 0 | 0 |
attr |
PatchTSTModel.encoder |
1 | 0 | 0 |
attr |
PatchTSTModel.masking |
1 | 0 | 0 |
transformers.models.pe_audio.configuration_pe_audio (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PeAudioEncoderConfig.init |
16 | 14 | 0 |
attr |
PeAudioEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
PeAudioEncoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
PeAudioEncoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
PeAudioEncoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
PeAudioEncoderConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
PeAudioEncoderConfig.head_dim |
1 | 0 | 0 |
attr |
PeAudioEncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
PeAudioEncoderConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
PeAudioEncoderConfig.initializer_range |
1 | 0 | 0 |
attr |
PeAudioEncoderConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
PeAudioEncoderConfig.rope_parameters |
1 | 0 | 0 |
attr |
PeAudioEncoderConfig.attention_bias |
1 | 0 | 0 |
attr |
PeAudioEncoderConfig.attention_dropout |
1 | 0 | 0 |
attr |
PeAudioEncoderConfig.dac_config |
1 | 0 | 0 |
meth |
PeAudioConfig.init |
4 | 0 | 0 |
attr |
PeAudioConfig.text_config |
1 | 0 | 0 |
attr |
PeAudioConfig.audio_config |
1 | 0 | 0 |
transformers.models.pe_audio.feature_extraction_pe_audio (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PeAudioFeatureExtractor.init |
6 | 4 | 0 |
meth |
PeAudioFeatureExtractor._reflect_pad |
2 | 0 | 0 |
attr |
PeAudioFeatureExtractor.hop_length |
1 | 0 | 0 |
transformers.models.pe_audio.modeling_pe_audio (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PeAudioModel.init |
2 | 1 | 0 |
meth |
PeAudioModel.get_text_audio_embeds |
3 | 0 | 0 |
meth |
PeAudioModel.get_audio_embeds |
3 | 0 | 0 |
meth |
PeAudioModel.forward |
7 | 6 | 0 |
attr |
PeAudioModel.text_model |
1 | 0 | 0 |
attr |
PeAudioModel.audio_encoder |
1 | 0 | 0 |
attr |
PeAudioModel.text_audio_head |
1 | 0 | 0 |
attr |
PeAudioModel.audio_head |
1 | 0 | 0 |
attr |
PeAudioModel.text_audio_logit_scale |
1 | 0 | 0 |
attr |
PeAudioModel.text_audio_logit_bias |
1 | 0 | 0 |
meth |
PeAudioEncoder.init |
2 | 1 | 0 |
meth |
PeAudioEncoder.forward |
4 | 3 | 0 |
attr |
PeAudioEncoder.embedder |
1 | 0 | 0 |
attr |
PeAudioEncoder.patch_embedder |
1 | 0 | 0 |
attr |
PeAudioEncoder.layers |
1 | 0 | 0 |
attr |
PeAudioEncoder.norm |
1 | 0 | 0 |
attr |
PeAudioEncoder.rotary_emb |
1 | 0 | 0 |
attr |
PeAudioEncoder.output |
1 | 0 | 0 |
attr |
PeAudioEncoder.gradient_checkpointing |
1 | 0 | 0 |
meth |
PeAudioFrameLevelModel.get_audio_embeds |
3 | 0 | 0 |
meth |
PeAudioFrameLevelModel.forward |
7 | 6 | 0 |
transformers.models.pe_audio.modular_pe_audio (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PeAudioModel.init |
2 | 1 | 0 |
meth |
PeAudioModel.get_text_audio_embeds |
3 | 0 | 0 |
meth |
PeAudioModel.get_audio_embeds |
3 | 0 | 0 |
meth |
PeAudioModel.forward |
7 | 6 | 0 |
attr |
PeAudioModel.text_model |
1 | 0 | 0 |
attr |
PeAudioModel.audio_encoder |
1 | 0 | 0 |
attr |
PeAudioModel.text_audio_head |
1 | 0 | 0 |
attr |
PeAudioModel.audio_head |
1 | 0 | 0 |
attr |
PeAudioModel.text_audio_logit_scale |
1 | 0 | 0 |
attr |
PeAudioModel.text_audio_logit_bias |
1 | 0 | 0 |
meth |
PeAudioEncoder.forward |
4 | 3 | 0 |
meth |
PeAudioFrameLevelModel.get_audio_embeds |
3 | 0 | 0 |
meth |
PeAudioFrameLevelModel.forward |
7 | 6 | 0 |
transformers.models.pe_audio_video.configuration_pe_audio_video (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PeAudioVideoConfig.init |
4 | 0 | 0 |
prop |
PeAudioVideoConfig.audio_config |
1 | 0 | 0 |
prop |
PeAudioVideoConfig.video_config |
1 | 0 | 0 |
attr |
PeAudioVideoConfig.text_config |
1 | 0 | 0 |
attr |
PeAudioVideoConfig.audio_video_config |
1 | 0 | 0 |
meth |
PeAudioVideoEncoderConfig.init |
17 | 15 | 0 |
attr |
PeAudioVideoEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
PeAudioVideoEncoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
PeAudioVideoEncoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
PeAudioVideoEncoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
PeAudioVideoEncoderConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
PeAudioVideoEncoderConfig.head_dim |
1 | 0 | 0 |
attr |
PeAudioVideoEncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
PeAudioVideoEncoderConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
PeAudioVideoEncoderConfig.initializer_range |
1 | 0 | 0 |
attr |
PeAudioVideoEncoderConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
PeAudioVideoEncoderConfig.rope_parameters |
1 | 0 | 0 |
attr |
PeAudioVideoEncoderConfig.attention_bias |
1 | 0 | 0 |
attr |
PeAudioVideoEncoderConfig.attention_dropout |
1 | 0 | 0 |
attr |
PeAudioVideoEncoderConfig.audio_config |
1 | 0 | 0 |
attr |
PeAudioVideoEncoderConfig.video_config |
1 | 0 | 0 |
transformers.models.pe_audio_video.modeling_pe_audio_video (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PeAudioVideoModel.init |
2 | 1 | 0 |
meth |
PeAudioVideoModel.get_text_audio_embeds |
3 | 0 | 0 |
meth |
PeAudioVideoModel.get_text_video_embeds |
3 | 0 | 0 |
meth |
PeAudioVideoModel.get_text_audio_video_embeds |
3 | 0 | 0 |
meth |
PeAudioVideoModel.get_audio_embeds |
3 | 0 | 0 |
meth |
PeAudioVideoModel.get_video_embeds |
3 | 0 | 0 |
meth |
PeAudioVideoModel.get_audio_video_embeds |
8 | 7 | 0 |
meth |
PeAudioVideoModel.forward |
9 | 7 | 0 |
attr |
PeAudioVideoModel.text_model |
1 | 0 | 0 |
attr |
PeAudioVideoModel.audio_model |
1 | 0 | 0 |
attr |
PeAudioVideoModel.video_model |
1 | 0 | 0 |
attr |
PeAudioVideoModel.audio_video_encoder |
1 | 0 | 0 |
attr |
PeAudioVideoModel.audio_video_head |
1 | 0 | 0 |
attr |
PeAudioVideoModel.text_audio_video_head |
1 | 0 | 0 |
attr |
PeAudioVideoModel.audio_video_logit_scale |
1 | 0 | 0 |
attr |
PeAudioVideoModel.audio_video_logit_bias |
1 | 0 | 0 |
attr |
PeAudioVideoModel.text_audio_video_logit_scale |
1 | 0 | 0 |
attr |
PeAudioVideoModel.text_audio_video_logit_bias |
1 | 0 | 0 |
attr |
PeAudioVideoModel.audio_plus_text_head |
1 | 0 | 0 |
attr |
PeAudioVideoModel.audio_plus_text_logit_scale |
1 | 0 | 0 |
attr |
PeAudioVideoModel.audio_plus_text_logit_bias |
1 | 0 | 0 |
attr |
PeAudioVideoModel.video_plus_text_head |
1 | 0 | 0 |
attr |
PeAudioVideoModel.video_plus_text_logit_scale |
1 | 0 | 0 |
attr |
PeAudioVideoModel.video_plus_text_logit_bias |
1 | 0 | 0 |
meth |
PeAudioVideoEncoder.init |
2 | 1 | 0 |
meth |
PeAudioVideoEncoder.forward |
6 | 5 | 0 |
attr |
PeAudioVideoEncoder.embedder |
1 | 0 | 0 |
attr |
PeAudioVideoEncoder.patch_embedder |
1 | 0 | 0 |
attr |
PeAudioVideoEncoder.layers |
1 | 0 | 0 |
attr |
PeAudioVideoEncoder.norm |
1 | 0 | 0 |
attr |
PeAudioVideoEncoder.rotary_emb |
1 | 0 | 0 |
attr |
PeAudioVideoEncoder.output |
1 | 0 | 0 |
attr |
PeAudioVideoEncoder.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.pe_audio_video.modular_pe_audio_video (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PeAudioVideoModel.init |
2 | 1 | 0 |
meth |
PeAudioVideoModel.get_text_audio_embeds |
3 | 0 | 0 |
meth |
PeAudioVideoModel.get_text_video_embeds |
3 | 0 | 0 |
meth |
PeAudioVideoModel.get_text_audio_video_embeds |
3 | 0 | 0 |
meth |
PeAudioVideoModel.get_audio_embeds |
3 | 0 | 0 |
meth |
PeAudioVideoModel.get_video_embeds |
3 | 0 | 0 |
meth |
PeAudioVideoModel.get_audio_video_embeds |
8 | 7 | 0 |
meth |
PeAudioVideoModel.forward |
9 | 7 | 0 |
attr |
PeAudioVideoModel.text_model |
1 | 0 | 0 |
attr |
PeAudioVideoModel.audio_model |
1 | 0 | 0 |
attr |
PeAudioVideoModel.video_model |
1 | 0 | 0 |
attr |
PeAudioVideoModel.audio_video_encoder |
1 | 0 | 0 |
attr |
PeAudioVideoModel.audio_video_head |
1 | 0 | 0 |
attr |
PeAudioVideoModel.text_audio_video_head |
1 | 0 | 0 |
attr |
PeAudioVideoModel.audio_video_logit_scale |
1 | 0 | 0 |
attr |
PeAudioVideoModel.audio_video_logit_bias |
1 | 0 | 0 |
attr |
PeAudioVideoModel.text_audio_video_logit_scale |
1 | 0 | 0 |
attr |
PeAudioVideoModel.text_audio_video_logit_bias |
1 | 0 | 0 |
attr |
PeAudioVideoModel.audio_plus_text_head |
1 | 0 | 0 |
attr |
PeAudioVideoModel.audio_plus_text_logit_scale |
1 | 0 | 0 |
attr |
PeAudioVideoModel.audio_plus_text_logit_bias |
1 | 0 | 0 |
attr |
PeAudioVideoModel.video_plus_text_head |
1 | 0 | 0 |
attr |
PeAudioVideoModel.video_plus_text_logit_scale |
1 | 0 | 0 |
attr |
PeAudioVideoModel.video_plus_text_logit_bias |
1 | 0 | 0 |
meth |
PeAudioVideoEncoder.init |
2 | 1 | 0 |
meth |
PeAudioVideoEncoder.forward |
6 | 5 | 0 |
attr |
PeAudioVideoEncoder.embedder |
1 | 0 | 0 |
attr |
PeAudioVideoEncoder.patch_embedder |
1 | 0 | 0 |
attr |
PeAudioVideoEncoder.layers |
1 | 0 | 0 |
attr |
PeAudioVideoEncoder.norm |
1 | 0 | 0 |
attr |
PeAudioVideoEncoder.rotary_emb |
1 | 0 | 0 |
attr |
PeAudioVideoEncoder.output |
1 | 0 | 0 |
attr |
PeAudioVideoEncoder.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.pe_video.configuration_pe_video (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PeVideoEncoderConfig.init |
16 | 14 | 0 |
attr |
PeVideoEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
PeVideoEncoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
PeVideoEncoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
PeVideoEncoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
PeVideoEncoderConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
PeVideoEncoderConfig.head_dim |
1 | 0 | 0 |
attr |
PeVideoEncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
PeVideoEncoderConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
PeVideoEncoderConfig.initializer_range |
1 | 0 | 0 |
attr |
PeVideoEncoderConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
PeVideoEncoderConfig.rope_parameters |
1 | 0 | 0 |
attr |
PeVideoEncoderConfig.attention_bias |
1 | 0 | 0 |
attr |
PeVideoEncoderConfig.attention_dropout |
1 | 0 | 0 |
attr |
PeVideoEncoderConfig.vision_config |
1 | 0 | 0 |
meth |
PeVideoConfig.init |
4 | 0 | 0 |
attr |
PeVideoConfig.text_config |
1 | 0 | 0 |
attr |
PeVideoConfig.video_config |
1 | 0 | 0 |
transformers.models.pe_video.modeling_pe_video (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PeVideoModel.init |
2 | 1 | 0 |
meth |
PeVideoModel.forward |
7 | 6 | 0 |
attr |
PeVideoModel.text_model |
1 | 0 | 0 |
attr |
PeVideoModel.video_encoder |
1 | 0 | 0 |
attr |
PeVideoModel.text_video_head |
1 | 0 | 0 |
attr |
PeVideoModel.video_head |
1 | 0 | 0 |
attr |
PeVideoModel.text_video_logit_scale |
1 | 0 | 0 |
attr |
PeVideoModel.text_video_logit_bias |
1 | 0 | 0 |
meth |
PeVideoEncoder.init |
2 | 1 | 0 |
meth |
PeVideoEncoder.forward |
4 | 3 | 0 |
attr |
PeVideoEncoder.embedder |
1 | 0 | 0 |
attr |
PeVideoEncoder.patch_embedder |
1 | 0 | 0 |
attr |
PeVideoEncoder.layers |
1 | 0 | 0 |
attr |
PeVideoEncoder.norm |
1 | 0 | 0 |
attr |
PeVideoEncoder.rotary_emb |
1 | 0 | 0 |
attr |
PeVideoEncoder.output |
1 | 0 | 0 |
attr |
PeVideoEncoder.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.pe_video.modular_pe_video (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PeVideoModel.init |
2 | 1 | 0 |
meth |
PeVideoModel.forward |
7 | 6 | 0 |
attr |
PeVideoModel.text_model |
1 | 0 | 0 |
attr |
PeVideoModel.video_encoder |
1 | 0 | 0 |
attr |
PeVideoModel.text_video_head |
1 | 0 | 0 |
attr |
PeVideoModel.video_head |
1 | 0 | 0 |
attr |
PeVideoModel.text_video_logit_scale |
1 | 0 | 0 |
attr |
PeVideoModel.text_video_logit_bias |
1 | 0 | 0 |
meth |
PeVideoEncoder.init |
2 | 1 | 0 |
meth |
PeVideoEncoder.forward |
4 | 3 | 0 |
attr |
PeVideoEncoder.embedder |
1 | 0 | 0 |
transformers.models.pe_video.video_processing_pe_video (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PeVideoVideoProcessor.sample_frames |
5 | 3 | 0 |
transformers.models.pegasus.configuration_pegasus (52 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PegasusConfig.init |
27 | 0 | 0 |
attr |
PegasusConfig.is_decoder |
1 | 0 | 0 |
attr |
PegasusConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
PegasusConfig.vocab_size |
1 | 0 | 0 |
attr |
PegasusConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
PegasusConfig.d_model |
1 | 0 | 0 |
attr |
PegasusConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
PegasusConfig.encoder_layers |
1 | 0 | 0 |
attr |
PegasusConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
PegasusConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
PegasusConfig.decoder_layers |
1 | 0 | 0 |
attr |
PegasusConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
PegasusConfig.dropout |
1 | 0 | 0 |
attr |
PegasusConfig.attention_dropout |
1 | 0 | 0 |
attr |
PegasusConfig.activation_dropout |
1 | 0 | 0 |
attr |
PegasusConfig.activation_function |
1 | 0 | 0 |
attr |
PegasusConfig.init_std |
1 | 0 | 0 |
attr |
PegasusConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
PegasusConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
PegasusConfig.use_cache |
1 | 0 | 0 |
attr |
PegasusConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
PegasusConfig.scale_embedding |
1 | 0 | 0 |
attr |
PegasusConfig.pad_token_id |
1 | 0 | 0 |
attr |
PegasusConfig.eos_token_id |
1 | 0 | 0 |
attr |
PegasusConfig.decoder_start_token_id |
1 | 0 | 0 |
attr |
PegasusConfig.forced_eos_token_id |
1 | 0 | 0 |
transformers.models.pegasus.modeling_pegasus (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PegasusPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
PegasusModel.init |
2 | 1 | 0 |
meth |
PegasusModel.get_input_embeddings |
1 | 0 | 0 |
meth |
PegasusModel.set_input_embeddings |
2 | 0 | 0 |
meth |
PegasusModel.resize_position_embeddings |
2 | 1 | 0 |
meth |
PegasusModel.forward |
15 | 14 | 0 |
attr |
PegasusModel.shared |
1 | 0 | 0 |
attr |
PegasusModel.encoder |
1 | 0 | 0 |
attr |
PegasusModel.decoder |
1 | 0 | 0 |
meth |
PegasusForCausalLM.init |
2 | 0 | 0 |
meth |
PegasusForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
PegasusForCausalLM.set_input_embeddings |
2 | 0 | 0 |
meth |
PegasusForCausalLM.resize_position_embeddings |
2 | 1 | 0 |
meth |
PegasusForCausalLM.forward |
15 | 14 | 0 |
attr |
PegasusForCausalLM.model |
1 | 0 | 0 |
attr |
PegasusForCausalLM.lm_head |
1 | 0 | 0 |
meth |
PegasusForConditionalGeneration.init |
2 | 1 | 0 |
meth |
PegasusForConditionalGeneration.resize_position_embeddings |
2 | 1 | 0 |
meth |
PegasusForConditionalGeneration.forward |
16 | 15 | 0 |
meth |
PegasusForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
PegasusForConditionalGeneration.model |
1 | 0 | 0 |
attr |
PegasusForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.pegasus.tokenization_pegasus (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PegasusTokenizer.init |
11 | 1 | 0 |
attr |
PegasusTokenizer.offset |
1 | 0 | 0 |
transformers.models.pegasus_x.configuration_pegasus_x (55 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PegasusXConfig.init |
29 | 0 | 0 |
attr |
PegasusXConfig.vocab_size |
1 | 0 | 0 |
attr |
PegasusXConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
PegasusXConfig.d_model |
1 | 0 | 0 |
attr |
PegasusXConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
PegasusXConfig.encoder_layers |
1 | 0 | 0 |
attr |
PegasusXConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
PegasusXConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
PegasusXConfig.decoder_layers |
1 | 0 | 0 |
attr |
PegasusXConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
PegasusXConfig.dropout |
1 | 0 | 0 |
attr |
PegasusXConfig.attention_dropout |
1 | 0 | 0 |
attr |
PegasusXConfig.activation_dropout |
1 | 0 | 0 |
attr |
PegasusXConfig.activation_function |
1 | 0 | 0 |
attr |
PegasusXConfig.init_std |
1 | 0 | 0 |
attr |
PegasusXConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
PegasusXConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
PegasusXConfig.use_cache |
1 | 0 | 0 |
attr |
PegasusXConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
PegasusXConfig.scale_embedding |
1 | 0 | 0 |
attr |
PegasusXConfig.num_global_tokens |
1 | 0 | 0 |
attr |
PegasusXConfig.block_size |
1 | 0 | 0 |
attr |
PegasusXConfig.stagger_local_blocks |
1 | 0 | 0 |
attr |
PegasusXConfig.pad_token_id |
1 | 0 | 0 |
attr |
PegasusXConfig.eos_token_id |
1 | 0 | 0 |
attr |
PegasusXConfig.decoder_start_token_id |
1 | 0 | 0 |
attr |
PegasusXConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.pegasus_x.modeling_pegasus_x (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PegasusXForConditionalGeneration.init |
2 | 1 | 0 |
meth |
PegasusXForConditionalGeneration.resize_position_embeddings |
2 | 1 | 0 |
meth |
PegasusXForConditionalGeneration.forward |
16 | 15 | 0 |
meth |
PegasusXForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
PegasusXForConditionalGeneration.model |
1 | 0 | 0 |
attr |
PegasusXForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
PegasusXModel.init |
2 | 1 | 0 |
meth |
PegasusXModel.get_input_embeddings |
1 | 0 | 0 |
meth |
PegasusXModel.set_input_embeddings |
2 | 0 | 0 |
meth |
PegasusXModel.resize_position_embeddings |
2 | 1 | 0 |
meth |
PegasusXModel.forward |
15 | 14 | 0 |
attr |
PegasusXModel.shared |
1 | 0 | 0 |
attr |
PegasusXModel.encoder |
1 | 0 | 0 |
attr |
PegasusXModel.decoder |
1 | 0 | 0 |
transformers.models.perceiver.configuration_perceiver (55 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PerceiverConfig.init |
29 | 0 | 0 |
attr |
PerceiverConfig.num_latents |
1 | 0 | 0 |
attr |
PerceiverConfig.d_latents |
1 | 0 | 0 |
attr |
PerceiverConfig.d_model |
1 | 0 | 0 |
attr |
PerceiverConfig.num_blocks |
1 | 0 | 0 |
attr |
PerceiverConfig.num_self_attends_per_block |
1 | 0 | 0 |
attr |
PerceiverConfig.num_self_attention_heads |
1 | 0 | 0 |
attr |
PerceiverConfig.num_cross_attention_heads |
1 | 0 | 0 |
attr |
PerceiverConfig.qk_channels |
1 | 0 | 0 |
attr |
PerceiverConfig.v_channels |
1 | 0 | 0 |
attr |
PerceiverConfig.cross_attention_shape_for_attention |
1 | 0 | 0 |
attr |
PerceiverConfig.self_attention_widening_factor |
1 | 0 | 0 |
attr |
PerceiverConfig.cross_attention_widening_factor |
1 | 0 | 0 |
attr |
PerceiverConfig.hidden_act |
1 | 0 | 0 |
attr |
PerceiverConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
PerceiverConfig.initializer_range |
1 | 0 | 0 |
attr |
PerceiverConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
PerceiverConfig.use_query_residual |
1 | 0 | 0 |
attr |
PerceiverConfig.vocab_size |
1 | 0 | 0 |
attr |
PerceiverConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
PerceiverConfig.image_size |
1 | 0 | 0 |
attr |
PerceiverConfig.train_size |
1 | 0 | 0 |
attr |
PerceiverConfig.num_frames |
1 | 0 | 0 |
attr |
PerceiverConfig.audio_samples_per_frame |
1 | 0 | 0 |
attr |
PerceiverConfig.samples_per_patch |
1 | 0 | 0 |
attr |
PerceiverConfig.output_shape |
1 | 0 | 0 |
attr |
PerceiverConfig.output_num_channels |
1 | 0 | 0 |
transformers.models.perceiver.image_processing_perceiver (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PerceiverImageProcessor.init |
12 | 11 | 0 |
meth |
PerceiverImageProcessor.center_crop |
7 | 6 | 0 |
meth |
PerceiverImageProcessor.resize |
7 | 6 | 0 |
attr |
PerceiverImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
PerceiverImageProcessor.crop_size |
1 | 0 | 0 |
attr |
PerceiverImageProcessor.do_resize |
1 | 0 | 0 |
attr |
PerceiverImageProcessor.size |
1 | 0 | 0 |
attr |
PerceiverImageProcessor.resample |
1 | 0 | 0 |
attr |
PerceiverImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
PerceiverImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
PerceiverImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
PerceiverImageProcessor.image_mean |
1 | 0 | 0 |
attr |
PerceiverImageProcessor.image_std |
1 | 0 | 0 |
transformers.models.perceiver.image_processing_perceiver_fast (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PerceiverImageProcessorFast.center_crop |
5 | 4 | 0 |
meth |
PerceiverImageProcessorFast._preprocess |
15 | 14 | 0 |
transformers.models.perceiver.modeling_perceiver (61 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PerceiverForSequenceClassification.init |
2 | 0 | 0 |
meth |
PerceiverForSequenceClassification.forward |
9 | 8 | 0 |
attr |
PerceiverForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
PerceiverForSequenceClassification.perceiver |
1 | 0 | 0 |
meth |
PerceiverModel.init |
5 | 3 | 0 |
meth |
PerceiverModel.get_input_embeddings |
1 | 0 | 0 |
meth |
PerceiverModel.set_input_embeddings |
2 | 0 | 0 |
meth |
PerceiverModel.forward |
9 | 8 | 0 |
attr |
PerceiverModel.input_preprocessor |
1 | 0 | 0 |
attr |
PerceiverModel.output_postprocessor |
1 | 0 | 0 |
attr |
PerceiverModel.embeddings |
1 | 0 | 0 |
attr |
PerceiverModel.encoder |
1 | 0 | 0 |
attr |
PerceiverModel.decoder |
1 | 0 | 0 |
meth |
PerceiverForImageClassificationFourier.init |
2 | 0 | 0 |
meth |
PerceiverForImageClassificationFourier.forward |
9 | 8 | 0 |
attr |
PerceiverForImageClassificationFourier.num_labels |
1 | 0 | 0 |
attr |
PerceiverForImageClassificationFourier.perceiver |
1 | 0 | 0 |
meth |
PerceiverLayer.init |
10 | 0 | 0 |
meth |
PerceiverLayer.feed_forward_chunk |
2 | 0 | 0 |
attr |
PerceiverLayer.chunk_size_feed_forward |
1 | 0 | 0 |
attr |
PerceiverLayer.seq_len_dim |
1 | 0 | 0 |
attr |
PerceiverLayer.attention |
1 | 0 | 0 |
attr |
PerceiverLayer.layernorm |
1 | 0 | 0 |
attr |
PerceiverLayer.mlp |
1 | 0 | 0 |
meth |
PerceiverPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
PerceiverForOpticalFlow.init |
2 | 0 | 0 |
meth |
PerceiverForOpticalFlow.forward |
8 | 7 | 0 |
attr |
PerceiverForOpticalFlow.perceiver |
1 | 0 | 0 |
meth |
PerceiverForMaskedLM.init |
2 | 1 | 0 |
meth |
PerceiverForMaskedLM.forward |
9 | 8 | 0 |
attr |
PerceiverForMaskedLM.perceiver |
1 | 0 | 0 |
attr |
PerceiverForMaskedLM.embedding_decoder |
1 | 0 | 0 |
meth |
PerceiverForMultimodalAutoencoding.init |
2 | 1 | 0 |
meth |
PerceiverForMultimodalAutoencoding.forward |
9 | 8 | 0 |
attr |
PerceiverForMultimodalAutoencoding.perceiver |
1 | 0 | 0 |
meth |
PerceiverForImageClassificationLearned.init |
2 | 0 | 0 |
meth |
PerceiverForImageClassificationLearned.forward |
10 | 9 | 0 |
attr |
PerceiverForImageClassificationLearned.num_labels |
1 | 0 | 0 |
attr |
PerceiverForImageClassificationLearned.perceiver |
1 | 0 | 0 |
meth |
PerceiverForImageClassificationConvProcessing.init |
2 | 0 | 0 |
meth |
PerceiverForImageClassificationConvProcessing.forward |
9 | 8 | 0 |
attr |
PerceiverForImageClassificationConvProcessing.num_labels |
1 | 0 | 0 |
attr |
PerceiverForImageClassificationConvProcessing.perceiver |
1 | 0 | 0 |
transformers.models.perceiver.tokenization_perceiver (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PerceiverTokenizer.init |
9 | 1 | 0 |
meth |
PerceiverTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
PerceiverTokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
PerceiverTokenizer.convert_tokens_to_string |
2 | 0 | 0 |
prop |
PerceiverTokenizer.vocab_size |
1 | 0 | 0 |
transformers.models.perception_lm.configuration_perception_lm (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PerceptionLMConfig.init |
8 | 0 | 0 |
attr |
PerceptionLMConfig.image_token_id |
1 | 0 | 0 |
attr |
PerceptionLMConfig.video_token_id |
1 | 0 | 0 |
attr |
PerceptionLMConfig.vision_config |
1 | 0 | 0 |
attr |
PerceptionLMConfig.vision_use_cls_token |
1 | 0 | 0 |
attr |
PerceptionLMConfig.text_config |
1 | 0 | 0 |
attr |
PerceptionLMConfig.projector_pooling_ratio |
1 | 0 | 0 |
transformers.models.perception_lm.image_processing_perception_lm_fast (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PerceptionLMImageProcessorFast.preprocess |
3 | 2 | 0 |
meth |
PerceptionLMImageProcessorFast._factors |
2 | 1 | 0 |
meth |
PerceptionLMImageProcessorFast._find_supported_aspect_ratios |
1 | 0 | 0 |
meth |
PerceptionLMImageProcessorFast._fit_image_to_canvas |
4 | 3 | 0 |
meth |
PerceptionLMImageProcessorFast.resize |
6 | 5 | 0 |
transformers.models.perception_lm.modeling_perception_lm (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PerceptionLMModel.init |
2 | 1 | 0 |
meth |
PerceptionLMModel.get_input_embeddings |
1 | 0 | 0 |
meth |
PerceptionLMModel.set_input_embeddings |
2 | 0 | 0 |
meth |
PerceptionLMModel.get_placeholder_mask |
5 | 4 | 0 |
meth |
PerceptionLMModel.forward |
14 | 13 | 0 |
attr |
PerceptionLMModel.vision_tower |
1 | 0 | 0 |
attr |
PerceptionLMModel.multi_modal_projector |
1 | 0 | 0 |
attr |
PerceptionLMModel.language_model |
1 | 0 | 0 |
meth |
PerceptionLMForConditionalGeneration.init |
2 | 1 | 0 |
meth |
PerceptionLMForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
PerceptionLMForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
PerceptionLMForConditionalGeneration.forward |
15 | 14 | 0 |
meth |
PerceptionLMForConditionalGeneration.prepare_inputs_for_generation |
11 | 0 | 0 |
attr |
PerceptionLMForConditionalGeneration.model |
1 | 0 | 0 |
attr |
PerceptionLMForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.perception_lm.modular_perception_lm (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PerceptionLMModel.init |
2 | 1 | 0 |
meth |
PerceptionLMModel.get_placeholder_mask |
5 | 4 | 0 |
meth |
PerceptionLMModel.forward |
14 | 13 | 0 |
attr |
PerceptionLMModel.vision_tower |
1 | 0 | 0 |
attr |
PerceptionLMModel.multi_modal_projector |
1 | 0 | 0 |
attr |
PerceptionLMModel.language_model |
1 | 0 | 0 |
meth |
PerceptionLMForConditionalGeneration.prepare_inputs_for_generation |
11 | 0 | 0 |
meth |
PerceptionLMForConditionalGeneration.forward |
15 | 14 | 0 |
meth |
PerceptionLMForConditionalGeneration.get_image_features |
2 | 0 | 0 |
transformers.models.perception_lm.processing_perception_lm (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PerceptionLMProcessor.init |
8 | 0 | 0 |
meth |
PerceptionLMProcessor._expand_media_tokens |
4 | 2 | 0 |
meth |
PerceptionLMProcessor._get_num_multimodal_tokens |
3 | 0 | 0 |
attr |
PerceptionLMProcessor.patch_size |
1 | 0 | 0 |
attr |
PerceptionLMProcessor.pooling_ratio |
1 | 0 | 0 |
attr |
PerceptionLMProcessor.image_token |
1 | 0 | 0 |
attr |
PerceptionLMProcessor.video_token |
1 | 0 | 0 |
attr |
PerceptionLMProcessor.image_token_id |
1 | 0 | 0 |
attr |
PerceptionLMProcessor.video_token_id |
1 | 0 | 0 |
transformers.models.persimmon.configuration_persimmon (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PersimmonConfig.init |
20 | 18 | 0 |
attr |
PersimmonConfig.vocab_size |
1 | 0 | 0 |
attr |
PersimmonConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
PersimmonConfig.hidden_size |
1 | 0 | 0 |
attr |
PersimmonConfig.intermediate_size |
1 | 0 | 0 |
attr |
PersimmonConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
PersimmonConfig.num_attention_heads |
1 | 0 | 0 |
attr |
PersimmonConfig.hidden_act |
1 | 0 | 0 |
attr |
PersimmonConfig.initializer_range |
1 | 0 | 0 |
attr |
PersimmonConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
PersimmonConfig.use_cache |
1 | 0 | 0 |
attr |
PersimmonConfig.qk_layernorm |
1 | 0 | 0 |
attr |
PersimmonConfig.hidden_dropout |
1 | 0 | 0 |
attr |
PersimmonConfig.attention_dropout |
1 | 0 | 0 |
attr |
PersimmonConfig.rope_parameters |
1 | 0 | 0 |
attr |
PersimmonConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
PersimmonConfig.pad_token_id |
1 | 0 | 0 |
attr |
PersimmonConfig.bos_token_id |
1 | 0 | 0 |
attr |
PersimmonConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.persimmon.modeling_persimmon (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PersimmonForCausalLM.init |
2 | 0 | 0 |
meth |
PersimmonForCausalLM.forward |
13 | 12 | 0 |
attr |
PersimmonForCausalLM.model |
1 | 0 | 0 |
attr |
PersimmonForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
PersimmonForCausalLM.lm_head |
1 | 0 | 0 |
meth |
PersimmonModel.init |
2 | 1 | 0 |
attr |
PersimmonModel.padding_idx |
1 | 0 | 0 |
attr |
PersimmonModel.vocab_size |
1 | 0 | 0 |
attr |
PersimmonModel.embed_tokens |
1 | 0 | 0 |
attr |
PersimmonModel.layers |
1 | 0 | 0 |
attr |
PersimmonModel.final_layernorm |
1 | 0 | 0 |
attr |
PersimmonModel.rotary_emb |
1 | 0 | 0 |
attr |
PersimmonModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.phi.configuration_phi (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PhiConfig.init |
22 | 20 | 0 |
attr |
PhiConfig.vocab_size |
1 | 0 | 0 |
attr |
PhiConfig.hidden_size |
1 | 0 | 0 |
attr |
PhiConfig.intermediate_size |
1 | 0 | 0 |
attr |
PhiConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
PhiConfig.num_attention_heads |
1 | 0 | 0 |
attr |
PhiConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
PhiConfig.resid_pdrop |
1 | 0 | 0 |
attr |
PhiConfig.embd_pdrop |
1 | 0 | 0 |
attr |
PhiConfig.attention_dropout |
1 | 0 | 0 |
attr |
PhiConfig.hidden_act |
1 | 0 | 0 |
attr |
PhiConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
PhiConfig.initializer_range |
1 | 0 | 0 |
attr |
PhiConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
PhiConfig.use_cache |
1 | 0 | 0 |
attr |
PhiConfig.qk_layernorm |
1 | 0 | 0 |
attr |
PhiConfig.rope_parameters |
1 | 0 | 0 |
attr |
PhiConfig.bos_token_id |
1 | 0 | 0 |
attr |
PhiConfig.eos_token_id |
1 | 0 | 0 |
attr |
PhiConfig.pad_token_id |
1 | 0 | 0 |
attr |
PhiConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.phi.modeling_phi (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PhiModel.init |
2 | 1 | 0 |
attr |
PhiModel.padding_idx |
1 | 0 | 0 |
attr |
PhiModel.vocab_size |
1 | 0 | 0 |
attr |
PhiModel.embed_tokens |
1 | 0 | 0 |
attr |
PhiModel.layers |
1 | 0 | 0 |
attr |
PhiModel.rotary_emb |
1 | 0 | 0 |
attr |
PhiModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
PhiModel.embed_dropout |
1 | 0 | 0 |
attr |
PhiModel.final_layernorm |
1 | 0 | 0 |
meth |
PhiForCausalLM.init |
2 | 0 | 0 |
attr |
PhiForCausalLM.model |
1 | 0 | 0 |
attr |
PhiForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
PhiForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.phi.modular_phi (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PhiModel.init |
2 | 1 | 0 |
attr |
PhiModel.layers |
1 | 0 | 0 |
attr |
PhiModel.embed_dropout |
1 | 0 | 0 |
attr |
PhiModel.final_layernorm |
1 | 0 | 0 |
attr |
PhiPreTrainedModel |
1 | 0 | 0 |
meth |
PhiForCausalLM.init |
2 | 0 | 0 |
attr |
PhiForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.phi3.configuration_phi3 (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Phi3Config.init |
23 | 21 | 0 |
meth |
Phi3Config.convert_rope_params_to_dict |
4 | 2 | 0 |
meth |
Phi3Config.validate_rope |
2 | 1 | 0 |
attr |
Phi3Config.vocab_size |
1 | 0 | 0 |
attr |
Phi3Config.hidden_size |
1 | 0 | 0 |
attr |
Phi3Config.intermediate_size |
1 | 0 | 0 |
attr |
Phi3Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Phi3Config.num_attention_heads |
1 | 0 | 0 |
attr |
Phi3Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Phi3Config.resid_pdrop |
1 | 0 | 0 |
attr |
Phi3Config.embd_pdrop |
1 | 0 | 0 |
attr |
Phi3Config.attention_dropout |
1 | 0 | 0 |
attr |
Phi3Config.hidden_act |
1 | 0 | 0 |
attr |
Phi3Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Phi3Config.original_max_position_embeddings |
1 | 0 | 0 |
attr |
Phi3Config.initializer_range |
1 | 0 | 0 |
attr |
Phi3Config.rms_norm_eps |
1 | 0 | 0 |
attr |
Phi3Config.use_cache |
1 | 0 | 0 |
attr |
Phi3Config.rope_parameters |
1 | 0 | 0 |
attr |
Phi3Config.sliding_window |
1 | 0 | 0 |
attr |
Phi3Config.bos_token_id |
1 | 0 | 0 |
attr |
Phi3Config.eos_token_id |
1 | 0 | 0 |
attr |
Phi3Config.pad_token_id |
1 | 0 | 0 |
attr |
Phi3Config.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.phi3.modeling_phi3 (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Phi3ForCausalLM.init |
2 | 0 | 0 |
meth |
Phi3ForCausalLM.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
Phi3ForCausalLM.model |
1 | 0 | 0 |
attr |
Phi3ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Phi3ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Phi3Model.init |
2 | 1 | 0 |
attr |
Phi3Model.padding_idx |
1 | 0 | 0 |
attr |
Phi3Model.vocab_size |
1 | 0 | 0 |
attr |
Phi3Model.embed_tokens |
1 | 0 | 0 |
attr |
Phi3Model.layers |
1 | 0 | 0 |
attr |
Phi3Model.norm |
1 | 0 | 0 |
attr |
Phi3Model.rotary_emb |
1 | 0 | 0 |
attr |
Phi3Model.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.phi3.modular_phi3 (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Phi3ForCausalLM.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
Phi3Model |
1 | 0 | 0 |
transformers.models.phi4_multimodal.configuration_phi4_multimodal (80 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Phi4MultimodalConfig.init |
25 | 23 | 0 |
meth |
Phi4MultimodalConfig.convert_rope_params_to_dict |
4 | 2 | 0 |
meth |
Phi4MultimodalConfig.validate_rope |
2 | 1 | 0 |
attr |
Phi4MultimodalConfig.vision_config |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.audio_config |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.vocab_size |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.hidden_size |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.intermediate_size |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.resid_pdrop |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.embd_pdrop |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.attention_dropout |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.hidden_act |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.original_max_position_embeddings |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.initializer_range |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.use_cache |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.rope_parameters |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.sliding_window |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.bos_token_id |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.eos_token_id |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.pad_token_id |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.tie_word_embeddings |
1 | 0 | 0 |
meth |
Phi4MultimodalAudioConfig.init |
26 | 24 | 0 |
attr |
Phi4MultimodalAudioConfig.hidden_size |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.intermediate_size |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.activation |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.chunk_size |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.left_chunk |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.num_blocks |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.dropout_rate |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.ext_pw_out_channel |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.depthwise_separable_out_channel |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.depthwise_multiplier |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.kernel_size |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.conv_activation |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.input_size |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.conv_glu_type |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.time_reduction |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.bias_max_distance |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.bias_symmetric |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.nemo_activation |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.nemo_conv_channels |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.downsample_rate |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.audio_token_id |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.initializer_range |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.feature_layer |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.nemo_final_size |
1 | 0 | 0 |
meth |
Phi4MultimodalVisionConfig.init |
15 | 3 | 0 |
attr |
Phi4MultimodalVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
Phi4MultimodalVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
Phi4MultimodalVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Phi4MultimodalVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Phi4MultimodalVisionConfig.num_channels |
1 | 0 | 0 |
attr |
Phi4MultimodalVisionConfig.patch_size |
1 | 0 | 0 |
attr |
Phi4MultimodalVisionConfig.image_size |
1 | 0 | 0 |
attr |
Phi4MultimodalVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
Phi4MultimodalVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Phi4MultimodalVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
Phi4MultimodalVisionConfig.crop_size |
1 | 0 | 0 |
attr |
Phi4MultimodalVisionConfig.image_token_id |
1 | 0 | 0 |
attr |
Phi4MultimodalVisionConfig.feature_layer |
1 | 0 | 0 |
transformers.models.phi4_multimodal.feature_extraction_phi4_multimodal (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Phi4MultimodalFeatureExtractor.init |
14 | 12 | 0 |
meth |
Phi4MultimodalFeatureExtractor.call |
11 | 10 | 0 |
meth |
Phi4MultimodalFeatureExtractor._compute_audio_embed_size |
2 | 0 | 0 |
attr |
Phi4MultimodalFeatureExtractor.hop_length |
1 | 0 | 0 |
attr |
Phi4MultimodalFeatureExtractor.n_fft |
1 | 0 | 0 |
attr |
Phi4MultimodalFeatureExtractor.win_length |
1 | 0 | 0 |
attr |
Phi4MultimodalFeatureExtractor.preemphasis |
1 | 0 | 0 |
attr |
Phi4MultimodalFeatureExtractor.padding_value |
1 | 0 | 0 |
attr |
Phi4MultimodalFeatureExtractor.audio_compression_rate |
1 | 0 | 0 |
attr |
Phi4MultimodalFeatureExtractor.audio_downsample_rate |
1 | 0 | 0 |
attr |
Phi4MultimodalFeatureExtractor.audio_feat_stride |
1 | 0 | 0 |
attr |
Phi4MultimodalFeatureExtractor.mel_filters |
1 | 0 | 0 |
transformers.models.phi4_multimodal.image_processing_phi4_multimodal_fast (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Phi4MultimodalImageProcessorFast.init |
2 | 1 | 0 |
meth |
Phi4MultimodalImageProcessorFast.find_closest_aspect_ratio |
6 | 0 | 0 |
meth |
Phi4MultimodalImageProcessorFast.dynamic_preprocess |
7 | 0 | 0 |
meth |
Phi4MultimodalImageProcessorFast.pad_to_max_num_crops |
3 | 0 | 0 |
meth |
Phi4MultimodalImageProcessorFast.pad_mask_to_max_num_crops |
3 | 0 | 0 |
meth |
Phi4MultimodalImageProcessorFast._preprocess |
13 | 11 | 0 |
transformers.models.phi4_multimodal.modeling_phi4_multimodal (71 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Phi4MultimodalVisionModel.init |
2 | 1 | 0 |
meth |
Phi4MultimodalVisionModel.forward |
4 | 3 | 0 |
attr |
Phi4MultimodalVisionModel.embeddings |
1 | 0 | 0 |
attr |
Phi4MultimodalVisionModel.encoder |
1 | 0 | 0 |
attr |
Phi4MultimodalVisionModel.post_layernorm |
1 | 0 | 0 |
attr |
Phi4MultimodalVisionModel.head |
1 | 0 | 0 |
meth |
Phi4MultimodalAudioModel.init |
2 | 1 | 0 |
meth |
Phi4MultimodalAudioModel._streaming_mask |
5 | 0 | 0 |
meth |
Phi4MultimodalAudioModel.forward_embeddings |
3 | 0 | 0 |
meth |
Phi4MultimodalAudioModel.calculate_hs_mask |
4 | 0 | 0 |
meth |
Phi4MultimodalAudioModel.forward |
4 | 2 | 0 |
attr |
Phi4MultimodalAudioModel.encoder_embedding |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioModel.embed |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioModel.relative_attention_bias_layer |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioModel.encoders |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Phi4MultimodalPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Phi4MultimodalForCausalLM.init |
2 | 0 | 0 |
meth |
Phi4MultimodalForCausalLM.forward |
19 | 15 | 0 |
meth |
Phi4MultimodalForCausalLM.prepare_inputs_for_generation |
16 | 0 | 0 |
attr |
Phi4MultimodalForCausalLM.model |
1 | 0 | 0 |
attr |
Phi4MultimodalForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Phi4MultimodalForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Phi4MultimodalAudioPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Phi4MultimodalModel.init |
2 | 1 | 0 |
meth |
Phi4MultimodalModel.forward |
17 | 13 | 0 |
attr |
Phi4MultimodalModel.padding_idx |
1 | 0 | 0 |
attr |
Phi4MultimodalModel.vocab_size |
1 | 0 | 0 |
attr |
Phi4MultimodalModel.embed_tokens |
1 | 0 | 0 |
attr |
Phi4MultimodalModel.layers |
1 | 0 | 0 |
attr |
Phi4MultimodalModel.norm |
1 | 0 | 0 |
attr |
Phi4MultimodalModel.rotary_emb |
1 | 0 | 0 |
attr |
Phi4MultimodalModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
Phi4MultimodalModel.embed_dropout |
1 | 0 | 0 |
attr |
Phi4MultimodalModel.embed_tokens_extend |
1 | 0 | 0 |
meth |
Phi4MultimodalVisionPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.phi4_multimodal.modular_phi4_multimodal (116 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Phi4MultimodalAudioModel.init |
2 | 1 | 0 |
meth |
Phi4MultimodalAudioModel._streaming_mask |
5 | 0 | 0 |
meth |
Phi4MultimodalAudioModel.forward_embeddings |
3 | 0 | 0 |
meth |
Phi4MultimodalAudioModel.calculate_hs_mask |
4 | 0 | 0 |
meth |
Phi4MultimodalAudioModel.forward |
4 | 2 | 0 |
attr |
Phi4MultimodalAudioModel.encoder_embedding |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioModel.embed |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioModel.relative_attention_bias_layer |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioModel.encoders |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Phi4MultimodalPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Phi4MultimodalForCausalLM.init |
2 | 0 | 0 |
meth |
Phi4MultimodalForCausalLM.forward |
19 | 15 | 0 |
meth |
Phi4MultimodalForCausalLM.prepare_inputs_for_generation |
16 | 0 | 0 |
attr |
Phi4MultimodalForCausalLM.model |
1 | 0 | 0 |
attr |
Phi4MultimodalForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Phi4MultimodalForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Phi4MultimodalAudioPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Phi4MultimodalVisionPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Phi4MultimodalModel.init |
2 | 1 | 0 |
meth |
Phi4MultimodalModel.forward |
17 | 13 | 0 |
attr |
Phi4MultimodalModel.padding_idx |
1 | 0 | 0 |
attr |
Phi4MultimodalModel.vocab_size |
1 | 0 | 0 |
attr |
Phi4MultimodalModel.embed_tokens |
1 | 0 | 0 |
attr |
Phi4MultimodalModel.embed_dropout |
1 | 0 | 0 |
attr |
Phi4MultimodalModel.embed_tokens_extend |
1 | 0 | 0 |
attr |
Phi4MultimodalModel.layers |
1 | 0 | 0 |
attr |
Phi4MultimodalModel.norm |
1 | 0 | 0 |
attr |
Phi4MultimodalModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Phi4MultimodalVisionModel.init |
2 | 1 | 0 |
meth |
Phi4MultimodalVisionModel.forward |
4 | 3 | 0 |
attr |
Phi4MultimodalVisionModel.embeddings |
1 | 0 | 0 |
attr |
Phi4MultimodalVisionModel.encoder |
1 | 0 | 0 |
attr |
Phi4MultimodalVisionModel.post_layernorm |
1 | 0 | 0 |
attr |
Phi4MultimodalVisionModel.head |
1 | 0 | 0 |
meth |
Phi4MultimodalConfig.init |
25 | 23 | 0 |
attr |
Phi4MultimodalConfig.vision_config |
1 | 0 | 0 |
attr |
Phi4MultimodalConfig.audio_config |
1 | 0 | 0 |
meth |
Phi4MultimodalVisionConfig.init |
15 | 3 | 0 |
attr |
Phi4MultimodalVisionConfig.crop_size |
1 | 0 | 0 |
attr |
Phi4MultimodalVisionConfig.image_token_id |
1 | 0 | 0 |
attr |
Phi4MultimodalVisionConfig.feature_layer |
1 | 0 | 0 |
meth |
Phi4MultimodalAudioConfig.init |
26 | 24 | 0 |
attr |
Phi4MultimodalAudioConfig.hidden_size |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.intermediate_size |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.activation |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.chunk_size |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.left_chunk |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.num_blocks |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.dropout_rate |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.ext_pw_out_channel |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.depthwise_separable_out_channel |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.depthwise_multiplier |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.kernel_size |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.conv_activation |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.input_size |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.conv_glu_type |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.time_reduction |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.bias_max_distance |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.bias_symmetric |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.nemo_activation |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.nemo_conv_channels |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.downsample_rate |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.audio_token_id |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.initializer_range |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.feature_layer |
1 | 0 | 0 |
attr |
Phi4MultimodalAudioConfig.nemo_final_size |
1 | 0 | 0 |
transformers.models.phi4_multimodal.processing_phi4_multimodal (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Phi4MultimodalProcessor.init |
5 | 0 | 0 |
attr |
Phi4MultimodalProcessor.image_token |
1 | 0 | 0 |
attr |
Phi4MultimodalProcessor.image_token_id |
1 | 0 | 0 |
attr |
Phi4MultimodalProcessor.audio_token |
1 | 0 | 0 |
attr |
Phi4MultimodalProcessor.audio_token_id |
1 | 0 | 0 |
transformers.models.phimoe.configuration_phimoe (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PhimoeConfig.init |
28 | 26 | 0 |
meth |
PhimoeConfig.validate_rope |
2 | 0 | 0 |
attr |
PhimoeConfig.vocab_size |
1 | 0 | 0 |
attr |
PhimoeConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
PhimoeConfig.hidden_size |
1 | 0 | 0 |
attr |
PhimoeConfig.intermediate_size |
1 | 0 | 0 |
attr |
PhimoeConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
PhimoeConfig.num_attention_heads |
1 | 0 | 0 |
attr |
PhimoeConfig.sliding_window |
1 | 0 | 0 |
attr |
PhimoeConfig.attention_bias |
1 | 0 | 0 |
attr |
PhimoeConfig.lm_head_bias |
1 | 0 | 0 |
attr |
PhimoeConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
PhimoeConfig.hidden_act |
1 | 0 | 0 |
attr |
PhimoeConfig.initializer_range |
1 | 0 | 0 |
attr |
PhimoeConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
PhimoeConfig.use_cache |
1 | 0 | 0 |
attr |
PhimoeConfig.attention_dropout |
1 | 0 | 0 |
attr |
PhimoeConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
PhimoeConfig.num_local_experts |
1 | 0 | 0 |
attr |
PhimoeConfig.output_router_logits |
1 | 0 | 0 |
attr |
PhimoeConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
PhimoeConfig.router_jitter_noise |
1 | 0 | 0 |
attr |
PhimoeConfig.input_jitter_noise |
1 | 0 | 0 |
attr |
PhimoeConfig.rope_parameters |
1 | 0 | 0 |
attr |
PhimoeConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
PhimoeConfig.pad_token_id |
1 | 0 | 0 |
attr |
PhimoeConfig.bos_token_id |
1 | 0 | 0 |
attr |
PhimoeConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.phimoe.modeling_phimoe (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PhimoePreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
PhimoePreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
PhimoeForCausalLM.init |
2 | 0 | 0 |
meth |
PhimoeForCausalLM.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
PhimoeForCausalLM.model |
1 | 0 | 0 |
attr |
PhimoeForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
PhimoeForCausalLM.lm_head |
1 | 0 | 0 |
attr |
PhimoeForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
PhimoeForCausalLM.num_experts |
1 | 0 | 0 |
attr |
PhimoeForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
meth |
PhimoeModel.init |
2 | 1 | 0 |
attr |
PhimoeModel.padding_idx |
1 | 0 | 0 |
attr |
PhimoeModel.vocab_size |
1 | 0 | 0 |
attr |
PhimoeModel.embed_tokens |
1 | 0 | 0 |
attr |
PhimoeModel.layers |
1 | 0 | 0 |
attr |
PhimoeModel.norm |
1 | 0 | 0 |
attr |
PhimoeModel.rotary_emb |
1 | 0 | 0 |
attr |
PhimoeModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.phimoe.modular_phimoe (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
PhimoePreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
PhimoeForCausalLM.init |
2 | 0 | 0 |
meth |
PhimoeForCausalLM.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
PhimoeForCausalLM.lm_head |
1 | 0 | 0 |
meth |
PhimoeModel.init |
2 | 1 | 0 |
attr |
PhimoeModel.norm |
1 | 0 | 0 |
transformers.models.phobert.tokenization_phobert (31 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PhobertTokenizer.init |
11 | 0 | 0 |
meth |
PhobertTokenizer.get_vocab |
1 | 0 | 0 |
meth |
PhobertTokenizer.bpe |
2 | 0 | 0 |
meth |
PhobertTokenizer._tokenize |
2 | 0 | 0 |
meth |
PhobertTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
PhobertTokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
PhobertTokenizer.convert_tokens_to_string |
2 | 0 | 0 |
meth |
PhobertTokenizer.add_from_file |
2 | 0 | 0 |
prop |
PhobertTokenizer.vocab_size |
1 | 0 | 0 |
attr |
PhobertTokenizer.vocab_file |
1 | 0 | 0 |
attr |
PhobertTokenizer.merges_file |
1 | 0 | 0 |
attr |
PhobertTokenizer.encoder |
1 | 0 | 0 |
attr |
PhobertTokenizer.decoder |
1 | 0 | 0 |
attr |
PhobertTokenizer.bpe_ranks |
1 | 0 | 0 |
attr |
PhobertTokenizer.cache |
1 | 0 | 0 |
transformers.models.pix2struct.configuration_pix2struct (92 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Pix2StructVisionConfig.init |
17 | 0 | 0 |
attr |
Pix2StructVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
Pix2StructVisionConfig.patch_embed_hidden_size |
1 | 0 | 0 |
attr |
Pix2StructVisionConfig.d_ff |
1 | 0 | 0 |
attr |
Pix2StructVisionConfig.dropout_rate |
1 | 0 | 0 |
attr |
Pix2StructVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Pix2StructVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Pix2StructVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
Pix2StructVisionConfig.initializer_factor |
1 | 0 | 0 |
attr |
Pix2StructVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
Pix2StructVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Pix2StructVisionConfig.dense_act_fn |
1 | 0 | 0 |
attr |
Pix2StructVisionConfig.seq_len |
1 | 0 | 0 |
attr |
Pix2StructVisionConfig.relative_attention_num_buckets |
1 | 0 | 0 |
attr |
Pix2StructVisionConfig.relative_attention_max_distance |
1 | 0 | 0 |
attr |
Pix2StructVisionConfig.d_kv |
1 | 0 | 0 |
meth |
Pix2StructConfig.init |
9 | 0 | 0 |
attr |
Pix2StructConfig.text_config |
1 | 0 | 0 |
attr |
Pix2StructConfig.vision_config |
1 | 0 | 0 |
attr |
Pix2StructConfig.decoder_start_token_id |
1 | 0 | 0 |
attr |
Pix2StructConfig.pad_token_id |
1 | 0 | 0 |
attr |
Pix2StructConfig.eos_token_id |
1 | 0 | 0 |
attr |
Pix2StructConfig.initializer_factor |
1 | 0 | 0 |
attr |
Pix2StructConfig.initializer_range |
1 | 0 | 0 |
attr |
Pix2StructConfig.is_vqa |
1 | 0 | 0 |
attr |
Pix2StructConfig.tie_word_embeddings |
1 | 0 | 0 |
meth |
Pix2StructTextConfig.init |
22 | 0 | 0 |
attr |
Pix2StructTextConfig.vocab_size |
1 | 0 | 0 |
attr |
Pix2StructTextConfig.hidden_size |
1 | 0 | 0 |
attr |
Pix2StructTextConfig.d_kv |
1 | 0 | 0 |
attr |
Pix2StructTextConfig.d_ff |
1 | 0 | 0 |
attr |
Pix2StructTextConfig.num_layers |
1 | 0 | 0 |
attr |
Pix2StructTextConfig.num_heads |
1 | 0 | 0 |
attr |
Pix2StructTextConfig.relative_attention_num_buckets |
1 | 0 | 0 |
attr |
Pix2StructTextConfig.relative_attention_max_distance |
1 | 0 | 0 |
attr |
Pix2StructTextConfig.dropout_rate |
1 | 0 | 0 |
attr |
Pix2StructTextConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
Pix2StructTextConfig.initializer_factor |
1 | 0 | 0 |
attr |
Pix2StructTextConfig.use_cache |
1 | 0 | 0 |
attr |
Pix2StructTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Pix2StructTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
Pix2StructTextConfig.decoder_start_token_id |
1 | 0 | 0 |
attr |
Pix2StructTextConfig.dense_act_fn |
1 | 0 | 0 |
attr |
Pix2StructTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Pix2StructTextConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Pix2StructTextConfig.is_decoder |
1 | 0 | 0 |
attr |
Pix2StructTextConfig.add_cross_attention |
1 | 0 | 0 |
transformers.models.pix2struct.image_processing_pix2struct (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Pix2StructImageProcessor.init |
7 | 6 | 0 |
meth |
Pix2StructImageProcessor.extract_flattened_patches |
6 | 5 | 0 |
meth |
Pix2StructImageProcessor.normalize |
5 | 4 | 0 |
meth |
Pix2StructImageProcessor.preprocess |
11 | 10 | 0 |
attr |
Pix2StructImageProcessor.patch_size |
1 | 0 | 0 |
attr |
Pix2StructImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
Pix2StructImageProcessor.do_convert_rgb |
1 | 0 | 0 |
attr |
Pix2StructImageProcessor.max_patches |
1 | 0 | 0 |
attr |
Pix2StructImageProcessor.is_vqa |
1 | 0 | 0 |
transformers.models.pix2struct.image_processing_pix2struct_fast (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Pix2StructImageProcessorFast._further_process_kwargs |
3 | 2 | 0 |
meth |
Pix2StructImageProcessorFast._validate_preprocess_kwargs |
2 | 0 | 0 |
meth |
Pix2StructImageProcessorFast._preprocess |
8 | 7 | 0 |
transformers.models.pix2struct.modeling_pix2struct (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Pix2StructPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Pix2StructPreTrainedModel._shift_right |
2 | 0 | 0 |
prop |
Pix2StructPreTrainedModel.dummy_inputs |
1 | 0 | 0 |
meth |
Pix2StructVisionModel.init |
2 | 1 | 0 |
meth |
Pix2StructVisionModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Pix2StructVisionModel.forward |
7 | 6 | 0 |
attr |
Pix2StructVisionModel.embeddings |
1 | 0 | 0 |
attr |
Pix2StructVisionModel.encoder |
1 | 0 | 0 |
attr |
Pix2StructVisionModel.layernorm |
1 | 0 | 0 |
meth |
Pix2StructTextModel.init |
2 | 0 | 0 |
meth |
Pix2StructTextModel.set_input_embeddings |
2 | 0 | 0 |
meth |
Pix2StructTextModel.forward |
14 | 13 | 0 |
attr |
Pix2StructTextModel.embed_tokens |
1 | 0 | 0 |
attr |
Pix2StructTextModel.layer |
1 | 0 | 0 |
attr |
Pix2StructTextModel.final_layer_norm |
1 | 0 | 0 |
attr |
Pix2StructTextModel.dropout |
1 | 0 | 0 |
attr |
Pix2StructTextModel.lm_head |
1 | 0 | 0 |
attr |
Pix2StructTextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Pix2StructForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Pix2StructForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Pix2StructForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Pix2StructForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
Pix2StructForConditionalGeneration.forward |
15 | 14 | 0 |
attr |
Pix2StructForConditionalGeneration.encoder |
1 | 0 | 0 |
attr |
Pix2StructForConditionalGeneration.decoder |
1 | 0 | 0 |
attr |
Pix2StructForConditionalGeneration.is_vqa |
1 | 0 | 0 |
transformers.models.pix2struct.processing_pix2struct (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Pix2StructProcessor.init |
3 | 0 | 0 |
meth |
Pix2StructProcessor.call |
4 | 3 | 0 |
prop |
Pix2StructProcessor.model_input_names |
1 | 0 | 0 |
transformers.models.pixio.configuration_pixio (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PixioConfig.init |
21 | 0 | 0 |
attr |
PixioConfig.hidden_size |
1 | 0 | 0 |
attr |
PixioConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
PixioConfig.num_attention_heads |
1 | 0 | 0 |
attr |
PixioConfig.mlp_ratio |
1 | 0 | 0 |
attr |
PixioConfig.hidden_act |
1 | 0 | 0 |
attr |
PixioConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
PixioConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
PixioConfig.initializer_range |
1 | 0 | 0 |
attr |
PixioConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
PixioConfig.image_size |
1 | 0 | 0 |
attr |
PixioConfig.patch_size |
1 | 0 | 0 |
attr |
PixioConfig.num_channels |
1 | 0 | 0 |
attr |
PixioConfig.qkv_bias |
1 | 0 | 0 |
attr |
PixioConfig.drop_path_rate |
1 | 0 | 0 |
attr |
PixioConfig.stage_names |
1 | 0 | 0 |
attr |
PixioConfig.apply_layernorm |
1 | 0 | 0 |
attr |
PixioConfig.reshape_hidden_states |
1 | 0 | 0 |
attr |
PixioConfig.n_cls_tokens |
1 | 0 | 0 |
transformers.models.pixio.modeling_pixio (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PixioBackbone.init |
2 | 0 | 0 |
meth |
PixioBackbone.forward |
4 | 3 | 0 |
attr |
PixioBackbone.num_features |
1 | 0 | 0 |
attr |
PixioBackbone.embeddings |
1 | 0 | 0 |
attr |
PixioBackbone.encoder |
1 | 0 | 0 |
attr |
PixioBackbone.layernorm |
1 | 0 | 0 |
meth |
PixioModel.init |
2 | 1 | 0 |
meth |
PixioModel.forward |
4 | 3 | 0 |
attr |
PixioModel.embeddings |
1 | 0 | 0 |
attr |
PixioModel.encoder |
1 | 0 | 0 |
attr |
PixioModel.layernorm |
1 | 0 | 0 |
meth |
PixioPreTrainedModel._init_weights |
2 | 1 | 0 |
transformers.models.pixio.modular_pixio (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PixioBackbone.forward |
4 | 3 | 0 |
meth |
PixioModel.init |
2 | 1 | 0 |
meth |
PixioModel.forward |
4 | 3 | 0 |
attr |
PixioModel.config |
1 | 0 | 0 |
attr |
PixioModel.embeddings |
1 | 0 | 0 |
attr |
PixioModel.encoder |
1 | 0 | 0 |
attr |
PixioModel.layernorm |
1 | 0 | 0 |
meth |
PixioConfig.init |
21 | 0 | 0 |
attr |
PixioConfig.n_cls_tokens |
1 | 0 | 0 |
transformers.models.pixtral.configuration_pixtral (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PixtralVisionConfig.init |
13 | 11 | 0 |
attr |
PixtralVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
PixtralVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
PixtralVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
PixtralVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
PixtralVisionConfig.num_channels |
1 | 0 | 0 |
attr |
PixtralVisionConfig.patch_size |
1 | 0 | 0 |
attr |
PixtralVisionConfig.image_size |
1 | 0 | 0 |
attr |
PixtralVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
PixtralVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
PixtralVisionConfig.head_dim |
1 | 0 | 0 |
attr |
PixtralVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
PixtralVisionConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.pixtral.image_processing_pixtral (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PixtralImageProcessor.init |
12 | 11 | 0 |
meth |
PixtralImageProcessor.resize |
8 | 7 | 0 |
meth |
PixtralImageProcessor._pad_for_batching |
5 | 4 | 0 |
meth |
PixtralImageProcessor.preprocess |
16 | 15 | 0 |
attr |
PixtralImageProcessor.do_resize |
1 | 0 | 0 |
attr |
PixtralImageProcessor.size |
1 | 0 | 0 |
attr |
PixtralImageProcessor.patch_size |
1 | 0 | 0 |
attr |
PixtralImageProcessor.resample |
1 | 0 | 0 |
attr |
PixtralImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
PixtralImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
PixtralImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
PixtralImageProcessor.image_mean |
1 | 0 | 0 |
attr |
PixtralImageProcessor.image_std |
1 | 0 | 0 |
attr |
PixtralImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.pixtral.image_processing_pixtral_fast (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PixtralImageProcessorFast.init |
2 | 1 | 0 |
meth |
PixtralImageProcessorFast.resize |
6 | 5 | 0 |
meth |
PixtralImageProcessorFast._pad_for_batching |
3 | 2 | 0 |
meth |
PixtralImageProcessorFast._preprocess |
16 | 15 | 0 |
transformers.models.pixtral.modeling_pixtral (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PixtralVisionModel.init |
2 | 0 | 0 |
meth |
PixtralVisionModel.get_input_embeddings |
1 | 0 | 0 |
meth |
PixtralVisionModel.forward |
8 | 7 | 0 |
attr |
PixtralVisionModel.patch_conv |
1 | 0 | 0 |
attr |
PixtralVisionModel.patch_size |
1 | 0 | 0 |
attr |
PixtralVisionModel.ln_pre |
1 | 0 | 0 |
attr |
PixtralVisionModel.transformer |
1 | 0 | 0 |
attr |
PixtralVisionModel.patch_positional_embedding |
1 | 0 | 0 |
transformers.models.pixtral.processing_pixtral (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PixtralProcessor.init |
10 | 2 | 0 |
meth |
PixtralProcessor._get_num_multimodal_tokens |
3 | 0 | 0 |
prop |
PixtralProcessor.model_input_names |
1 | 0 | 0 |
attr |
PixtralProcessor.patch_size |
1 | 0 | 0 |
attr |
PixtralProcessor.spatial_merge_size |
1 | 0 | 0 |
attr |
PixtralProcessor.image_token |
1 | 0 | 0 |
attr |
PixtralProcessor.image_token_id |
1 | 0 | 0 |
attr |
PixtralProcessor.image_break_token |
1 | 0 | 0 |
attr |
PixtralProcessor.image_end_token |
1 | 0 | 0 |
attr |
PixtralProcessor.image_break_token_id |
1 | 0 | 0 |
attr |
PixtralProcessor.image_end_token_id |
1 | 0 | 0 |
attr |
PixtralProcessor.image_ids |
1 | 0 | 0 |
transformers.models.plbart.configuration_plbart (54 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PLBartConfig.init |
28 | 0 | 0 |
attr |
PLBartConfig.is_decoder |
1 | 0 | 0 |
attr |
PLBartConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
PLBartConfig.vocab_size |
1 | 0 | 0 |
attr |
PLBartConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
PLBartConfig.d_model |
1 | 0 | 0 |
attr |
PLBartConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
PLBartConfig.encoder_layers |
1 | 0 | 0 |
attr |
PLBartConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
PLBartConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
PLBartConfig.decoder_layers |
1 | 0 | 0 |
attr |
PLBartConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
PLBartConfig.dropout |
1 | 0 | 0 |
attr |
PLBartConfig.attention_dropout |
1 | 0 | 0 |
attr |
PLBartConfig.activation_dropout |
1 | 0 | 0 |
attr |
PLBartConfig.activation_function |
1 | 0 | 0 |
attr |
PLBartConfig.init_std |
1 | 0 | 0 |
attr |
PLBartConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
PLBartConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
PLBartConfig.classifier_dropout |
1 | 0 | 0 |
attr |
PLBartConfig.use_cache |
1 | 0 | 0 |
attr |
PLBartConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
PLBartConfig.scale_embedding |
1 | 0 | 0 |
attr |
PLBartConfig.pad_token_id |
1 | 0 | 0 |
attr |
PLBartConfig.bos_token_id |
1 | 0 | 0 |
attr |
PLBartConfig.eos_token_id |
1 | 0 | 0 |
attr |
PLBartConfig.forced_eos_token_id |
1 | 0 | 0 |
transformers.models.plbart.modeling_plbart (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PLBartForConditionalGeneration.init |
2 | 1 | 0 |
meth |
PLBartForConditionalGeneration.forward |
16 | 15 | 0 |
meth |
PLBartForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
PLBartForConditionalGeneration.model |
1 | 0 | 0 |
attr |
PLBartForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
PLBartPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
PLBartForSequenceClassification.init |
3 | 1 | 0 |
meth |
PLBartForSequenceClassification.forward |
15 | 14 | 0 |
attr |
PLBartForSequenceClassification.model |
1 | 0 | 0 |
attr |
PLBartForSequenceClassification.classification_head |
1 | 0 | 0 |
meth |
PLBartModel.init |
2 | 1 | 0 |
meth |
PLBartModel.get_input_embeddings |
1 | 0 | 0 |
meth |
PLBartModel.set_input_embeddings |
2 | 0 | 0 |
meth |
PLBartModel.forward |
15 | 14 | 0 |
attr |
PLBartModel.shared |
1 | 0 | 0 |
attr |
PLBartModel.encoder |
1 | 0 | 0 |
attr |
PLBartModel.decoder |
1 | 0 | 0 |
meth |
PLBartForCausalLM.init |
2 | 0 | 0 |
meth |
PLBartForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
PLBartForCausalLM.set_input_embeddings |
2 | 0 | 0 |
meth |
PLBartForCausalLM.forward |
15 | 14 | 0 |
attr |
PLBartForCausalLM.model |
1 | 0 | 0 |
attr |
PLBartForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.plbart.modular_plbart (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PLBartForConditionalGeneration.init |
2 | 1 | 0 |
meth |
PLBartForConditionalGeneration.forward |
16 | 15 | 0 |
meth |
PLBartForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
PLBartForConditionalGeneration.model |
1 | 0 | 0 |
attr |
PLBartForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
PLBartPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
PLBartForSequenceClassification.forward |
2 | 0 | 0 |
meth |
PLBartModel.init |
2 | 1 | 0 |
meth |
PLBartModel.get_input_embeddings |
1 | 0 | 0 |
meth |
PLBartModel.set_input_embeddings |
2 | 0 | 0 |
meth |
PLBartModel.forward |
15 | 14 | 0 |
attr |
PLBartModel.shared |
1 | 0 | 0 |
attr |
PLBartModel.encoder |
1 | 0 | 0 |
attr |
PLBartModel.decoder |
1 | 0 | 0 |
meth |
PLBartForCausalLM.forward |
2 | 0 | 0 |
transformers.models.plbart.tokenization_plbart (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PLBartTokenizer.init |
16 | 1 | 0 |
meth |
PLBartTokenizer.get_vocab |
1 | 0 | 0 |
meth |
PLBartTokenizer._build_translation_inputs |
6 | 3 | 0 |
meth |
PLBartTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
PLBartTokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
PLBartTokenizer.prepare_seq2seq_batch |
6 | 5 | 0 |
meth |
PLBartTokenizer._switch_to_input_mode |
1 | 0 | 0 |
meth |
PLBartTokenizer._switch_to_target_mode |
1 | 0 | 0 |
meth |
PLBartTokenizer.set_src_lang_special_tokens |
2 | 1 | 0 |
meth |
PLBartTokenizer.decode |
5 | 0 | 0 |
prop |
PLBartTokenizer.vocab_size |
1 | 0 | 0 |
attr |
PLBartTokenizer.sp_model_kwargs |
1 | 0 | 0 |
attr |
PLBartTokenizer.language_codes |
1 | 0 | 0 |
attr |
PLBartTokenizer.vocab_file |
1 | 0 | 0 |
attr |
PLBartTokenizer.lang_code_to_id |
1 | 0 | 0 |
attr |
PLBartTokenizer.id_to_lang_code |
1 | 0 | 0 |
attr |
PLBartTokenizer.fairseq_tokens_to_ids |
1 | 0 | 0 |
attr |
PLBartTokenizer.fairseq_ids_to_tokens |
1 | 0 | 0 |
attr |
PLBartTokenizer.fairseq_offset |
1 | 0 | 0 |
attr |
PLBartTokenizer.sp_model_size |
1 | 0 | 0 |
attr |
PLBartTokenizer.tgt_lang |
1 | 0 | 0 |
attr |
PLBartTokenizer.cur_lang_code_id |
1 | 0 | 0 |
transformers.models.poolformer.configuration_poolformer (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PoolFormerConfig.init |
18 | 0 | 0 |
attr |
PoolFormerConfig.num_channels |
1 | 0 | 0 |
attr |
PoolFormerConfig.patch_size |
1 | 0 | 0 |
attr |
PoolFormerConfig.stride |
1 | 0 | 0 |
attr |
PoolFormerConfig.padding |
1 | 0 | 0 |
attr |
PoolFormerConfig.pool_size |
1 | 0 | 0 |
attr |
PoolFormerConfig.hidden_sizes |
1 | 0 | 0 |
attr |
PoolFormerConfig.mlp_ratio |
1 | 0 | 0 |
attr |
PoolFormerConfig.depths |
1 | 0 | 0 |
attr |
PoolFormerConfig.patch_sizes |
1 | 0 | 0 |
attr |
PoolFormerConfig.strides |
1 | 0 | 0 |
attr |
PoolFormerConfig.num_encoder_blocks |
1 | 0 | 0 |
attr |
PoolFormerConfig.drop_path_rate |
1 | 0 | 0 |
attr |
PoolFormerConfig.hidden_act |
1 | 0 | 0 |
attr |
PoolFormerConfig.use_layer_scale |
1 | 0 | 0 |
attr |
PoolFormerConfig.layer_scale_init_value |
1 | 0 | 0 |
attr |
PoolFormerConfig.initializer_range |
1 | 0 | 0 |
transformers.models.poolformer.image_processing_poolformer (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PoolFormerImageProcessor.init |
13 | 12 | 0 |
meth |
PoolFormerImageProcessor.resize |
8 | 7 | 0 |
attr |
PoolFormerImageProcessor.do_resize |
1 | 0 | 0 |
attr |
PoolFormerImageProcessor.size |
1 | 0 | 0 |
attr |
PoolFormerImageProcessor.crop_pct |
1 | 0 | 0 |
attr |
PoolFormerImageProcessor.resample |
1 | 0 | 0 |
attr |
PoolFormerImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
PoolFormerImageProcessor.crop_size |
1 | 0 | 0 |
attr |
PoolFormerImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
PoolFormerImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
PoolFormerImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
PoolFormerImageProcessor.image_mean |
1 | 0 | 0 |
attr |
PoolFormerImageProcessor.image_std |
1 | 0 | 0 |
transformers.models.poolformer.image_processing_poolformer_fast (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PoolFormerImageProcessorFast.init |
2 | 1 | 0 |
meth |
PoolFormerImageProcessorFast.resize |
7 | 6 | 0 |
meth |
PoolFormerImageProcessorFast.center_crop |
4 | 3 | 0 |
meth |
PoolFormerImageProcessorFast._preprocess |
16 | 15 | 0 |
transformers.models.poolformer.modeling_poolformer (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PoolFormerForImageClassification.init |
2 | 0 | 0 |
meth |
PoolFormerForImageClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
PoolFormerForImageClassification.set_input_embeddings |
2 | 0 | 0 |
meth |
PoolFormerForImageClassification.forward |
6 | 5 | 0 |
attr |
PoolFormerForImageClassification.num_labels |
1 | 0 | 0 |
attr |
PoolFormerForImageClassification.poolformer |
1 | 0 | 0 |
attr |
PoolFormerForImageClassification.norm |
1 | 0 | 0 |
attr |
PoolFormerForImageClassification.classifier |
1 | 0 | 0 |
meth |
PoolFormerModel.init |
2 | 0 | 0 |
meth |
PoolFormerModel.get_input_embeddings |
1 | 0 | 0 |
meth |
PoolFormerModel.set_input_embeddings |
2 | 0 | 0 |
meth |
PoolFormerModel.forward |
5 | 4 | 0 |
attr |
PoolFormerModel.encoder |
1 | 0 | 0 |
meth |
PoolFormerPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.pop2piano.configuration_pop2piano (47 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Pop2PianoConfig.init |
23 | 0 | 0 |
attr |
Pop2PianoConfig.is_decoder |
1 | 0 | 0 |
attr |
Pop2PianoConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Pop2PianoConfig.vocab_size |
1 | 0 | 0 |
attr |
Pop2PianoConfig.composer_vocab_size |
1 | 0 | 0 |
attr |
Pop2PianoConfig.d_model |
1 | 0 | 0 |
attr |
Pop2PianoConfig.d_kv |
1 | 0 | 0 |
attr |
Pop2PianoConfig.d_ff |
1 | 0 | 0 |
attr |
Pop2PianoConfig.num_layers |
1 | 0 | 0 |
attr |
Pop2PianoConfig.num_decoder_layers |
1 | 0 | 0 |
attr |
Pop2PianoConfig.num_heads |
1 | 0 | 0 |
attr |
Pop2PianoConfig.relative_attention_num_buckets |
1 | 0 | 0 |
attr |
Pop2PianoConfig.relative_attention_max_distance |
1 | 0 | 0 |
attr |
Pop2PianoConfig.dropout_rate |
1 | 0 | 0 |
attr |
Pop2PianoConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
Pop2PianoConfig.initializer_factor |
1 | 0 | 0 |
attr |
Pop2PianoConfig.feed_forward_proj |
1 | 0 | 0 |
attr |
Pop2PianoConfig.use_cache |
1 | 0 | 0 |
attr |
Pop2PianoConfig.dense_act_fn |
1 | 0 | 0 |
attr |
Pop2PianoConfig.is_gated_act |
1 | 0 | 0 |
attr |
Pop2PianoConfig.hidden_size |
1 | 0 | 0 |
attr |
Pop2PianoConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Pop2PianoConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Pop2PianoConfig.pad_token_id |
1 | 0 | 0 |
attr |
Pop2PianoConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.pop2piano.feature_extraction_pop2piano (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Pop2PianoFeatureExtractor.init |
9 | 7 | 0 |
meth |
Pop2PianoFeatureExtractor.mel_spectrogram |
2 | 1 | 0 |
meth |
Pop2PianoFeatureExtractor.extract_rhythm |
2 | 1 | 0 |
meth |
Pop2PianoFeatureExtractor.interpolate_beat_times |
4 | 3 | 0 |
meth |
Pop2PianoFeatureExtractor.preprocess_mel |
3 | 2 | 0 |
meth |
Pop2PianoFeatureExtractor._pad |
3 | 1 | 0 |
meth |
Pop2PianoFeatureExtractor.pad |
5 | 4 | 0 |
meth |
Pop2PianoFeatureExtractor.call |
8 | 7 | 0 |
attr |
Pop2PianoFeatureExtractor.sampling_rate |
1 | 0 | 0 |
attr |
Pop2PianoFeatureExtractor.padding_value |
1 | 0 | 0 |
attr |
Pop2PianoFeatureExtractor.window_size |
1 | 0 | 0 |
attr |
Pop2PianoFeatureExtractor.hop_length |
1 | 0 | 0 |
attr |
Pop2PianoFeatureExtractor.min_frequency |
1 | 0 | 0 |
attr |
Pop2PianoFeatureExtractor.feature_size |
1 | 0 | 0 |
attr |
Pop2PianoFeatureExtractor.num_bars |
1 | 0 | 0 |
attr |
Pop2PianoFeatureExtractor.mel_filters |
1 | 0 | 0 |
transformers.models.pop2piano.modeling_pop2piano (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Pop2PianoPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Pop2PianoPreTrainedModel._shift_right |
2 | 0 | 0 |
meth |
Pop2PianoForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Pop2PianoForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Pop2PianoForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Pop2PianoForConditionalGeneration.get_mel_conditioner_outputs |
5 | 4 | 0 |
meth |
Pop2PianoForConditionalGeneration.forward |
17 | 16 | 0 |
meth |
Pop2PianoForConditionalGeneration.generate |
6 | 0 | 0 |
meth |
Pop2PianoForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
Pop2PianoForConditionalGeneration.model_dim |
1 | 0 | 0 |
attr |
Pop2PianoForConditionalGeneration.shared |
1 | 0 | 0 |
attr |
Pop2PianoForConditionalGeneration.mel_conditioner |
1 | 0 | 0 |
attr |
Pop2PianoForConditionalGeneration.encoder |
1 | 0 | 0 |
attr |
Pop2PianoForConditionalGeneration.decoder |
1 | 0 | 0 |
attr |
Pop2PianoForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.pop2piano.processing_pop2piano (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Pop2PianoProcessor.init |
3 | 0 | 0 |
meth |
Pop2PianoProcessor.call |
12 | 11 | 0 |
meth |
Pop2PianoProcessor.batch_decode |
4 | 3 | 0 |
meth |
Pop2PianoProcessor.save_pretrained |
3 | 0 | 0 |
meth |
Pop2PianoProcessor.from_pretrained |
3 | 0 | 0 |
transformers.models.pop2piano.tokenization_pop2piano (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Pop2PianoTokenizer.init |
9 | 0 | 0 |
meth |
Pop2PianoTokenizer.get_vocab |
1 | 0 | 0 |
meth |
Pop2PianoTokenizer._convert_token_to_id |
3 | 1 | 0 |
meth |
Pop2PianoTokenizer.relative_batch_tokens_ids_to_notes |
5 | 4 | 0 |
meth |
Pop2PianoTokenizer.relative_batch_tokens_ids_to_midi |
6 | 5 | 0 |
meth |
Pop2PianoTokenizer.relative_tokens_ids_to_notes |
4 | 3 | 0 |
meth |
Pop2PianoTokenizer.notes_to_midi |
4 | 3 | 0 |
meth |
Pop2PianoTokenizer.encode_plus |
5 | 4 | 0 |
meth |
Pop2PianoTokenizer.batch_encode_plus |
5 | 4 | 0 |
meth |
Pop2PianoTokenizer.call |
10 | 9 | 0 |
meth |
Pop2PianoTokenizer.batch_decode |
4 | 2 | 0 |
prop |
Pop2PianoTokenizer.vocab_size |
1 | 0 | 0 |
attr |
Pop2PianoTokenizer.default_velocity |
1 | 0 | 0 |
attr |
Pop2PianoTokenizer.num_bars |
1 | 0 | 0 |
attr |
Pop2PianoTokenizer.decoder |
1 | 0 | 0 |
attr |
Pop2PianoTokenizer.encoder |
1 | 0 | 0 |
transformers.models.pp_doclayout_v2.configuration_pp_doclayout_v2 (83 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PPDocLayoutV2Config.init |
43 | 0 | 0 |
attr |
PPDocLayoutV2Config.initializer_range |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.initializer_bias_prior_prob |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.layer_norm_eps |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.batch_norm_eps |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.backbone_config |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.freeze_backbone_batch_norms |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.encoder_hidden_dim |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.encoder_in_channels |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.feat_strides |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.encoder_layers |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.encoder_ffn_dim |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.encoder_attention_heads |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.dropout |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.activation_dropout |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.encode_proj_layers |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.positional_encoding_temperature |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.encoder_activation_function |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.activation_function |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.eval_size |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.normalize_before |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.hidden_expansion |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.d_model |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.num_queries |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.decoder_in_channels |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.decoder_ffn_dim |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.num_feature_levels |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.decoder_n_points |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.decoder_layers |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.decoder_attention_heads |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.decoder_activation_function |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.attention_dropout |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.num_denoising |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.label_noise_ratio |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.box_noise_scale |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.learn_initial_query |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.anchor_image_size |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.disable_custom_kernels |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.class_thresholds |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.class_order |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.reading_order_config |
1 | 0 | 0 |
transformers.models.pp_doclayout_v2.image_processing_pp_doclayout_v2_fast (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PPDocLayoutV2ImageProcessorFast._preprocess |
17 | 16 | 0 |
meth |
PPDocLayoutV2ImageProcessorFast._get_order_seqs |
2 | 0 | 0 |
meth |
PPDocLayoutV2ImageProcessorFast.extract_custom_vertices |
1 | 0 | 0 |
meth |
PPDocLayoutV2ImageProcessorFast.post_process_object_detection |
4 | 2 | 0 |
transformers.models.pp_doclayout_v2.modeling_pp_doclayout_v2 (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PPDocLayoutV2ForObjectDetection.init |
2 | 1 | 0 |
meth |
PPDocLayoutV2ForObjectDetection._set_aux_loss |
3 | 0 | 0 |
attr |
PPDocLayoutV2ForObjectDetection.model |
1 | 0 | 0 |
attr |
PPDocLayoutV2ForObjectDetection.reading_order |
1 | 0 | 0 |
attr |
PPDocLayoutV2ForObjectDetection.num_queries |
1 | 0 | 0 |
meth |
PPDocLayoutV2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
PPDocLayoutV2Model.init |
2 | 1 | 0 |
meth |
PPDocLayoutV2Model.freeze_backbone |
1 | 0 | 0 |
meth |
PPDocLayoutV2Model.unfreeze_backbone |
1 | 0 | 0 |
meth |
PPDocLayoutV2Model.generate_anchors |
5 | 0 | 0 |
attr |
PPDocLayoutV2Model.backbone |
1 | 0 | 0 |
attr |
PPDocLayoutV2Model.encoder_input_proj |
1 | 0 | 0 |
attr |
PPDocLayoutV2Model.encoder |
1 | 0 | 0 |
attr |
PPDocLayoutV2Model.enc_output |
1 | 0 | 0 |
attr |
PPDocLayoutV2Model.enc_score_head |
1 | 0 | 0 |
attr |
PPDocLayoutV2Model.enc_bbox_head |
1 | 0 | 0 |
attr |
PPDocLayoutV2Model.decoder_input_proj |
1 | 0 | 0 |
attr |
PPDocLayoutV2Model.decoder |
1 | 0 | 0 |
attr |
PPDocLayoutV2Model.denoising_class_embed |
1 | 0 | 0 |
attr |
PPDocLayoutV2Model.weight_embedding |
1 | 0 | 0 |
meth |
PPDocLayoutV2ReadingOrder.init |
2 | 0 | 0 |
meth |
PPDocLayoutV2ReadingOrder.forward |
5 | 1 | 0 |
attr |
PPDocLayoutV2ReadingOrder.embeddings |
1 | 0 | 0 |
attr |
PPDocLayoutV2ReadingOrder.label_embeddings |
1 | 0 | 0 |
attr |
PPDocLayoutV2ReadingOrder.label_features_projection |
1 | 0 | 0 |
attr |
PPDocLayoutV2ReadingOrder.encoder |
1 | 0 | 0 |
attr |
PPDocLayoutV2ReadingOrder.relative_head |
1 | 0 | 0 |
transformers.models.pp_doclayout_v2.modular_pp_doclayout_v2 (108 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PPDocLayoutV2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
PPDocLayoutV2Config.init |
43 | 0 | 0 |
attr |
PPDocLayoutV2Config.initializer_range |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.initializer_bias_prior_prob |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.layer_norm_eps |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.batch_norm_eps |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.backbone_config |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.freeze_backbone_batch_norms |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.encoder_hidden_dim |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.encoder_in_channels |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.feat_strides |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.encoder_layers |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.encoder_ffn_dim |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.encoder_attention_heads |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.dropout |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.activation_dropout |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.encode_proj_layers |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.positional_encoding_temperature |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.encoder_activation_function |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.activation_function |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.eval_size |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.normalize_before |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.hidden_expansion |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.d_model |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.num_queries |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.decoder_in_channels |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.decoder_ffn_dim |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.num_feature_levels |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.decoder_n_points |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.decoder_layers |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.decoder_attention_heads |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.decoder_activation_function |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.attention_dropout |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.num_denoising |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.label_noise_ratio |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.box_noise_scale |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.learn_initial_query |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.anchor_image_size |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.disable_custom_kernels |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.class_thresholds |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.class_order |
1 | 0 | 0 |
attr |
PPDocLayoutV2Config.reading_order_config |
1 | 0 | 0 |
meth |
PPDocLayoutV2ImageProcessorFast.extract_custom_vertices |
1 | 0 | 0 |
meth |
PPDocLayoutV2ImageProcessorFast._mask2polygon |
1 | 0 | 0 |
meth |
PPDocLayoutV2ImageProcessorFast._extract_polygon_points_by_masks |
1 | 0 | 0 |
meth |
PPDocLayoutV2ImageProcessorFast.post_process_object_detection |
4 | 2 | 0 |
meth |
PPDocLayoutV2ForObjectDetection.init |
2 | 1 | 0 |
attr |
PPDocLayoutV2ForObjectDetection.reading_order |
1 | 0 | 0 |
attr |
PPDocLayoutV2ForObjectDetection.num_queries |
1 | 0 | 0 |
attr |
PPDocLayoutV2ForObjectDetection.config |
1 | 0 | 0 |
meth |
PPDocLayoutV2Model.init |
2 | 1 | 0 |
attr |
PPDocLayoutV2Model.denoising_class_embed |
1 | 0 | 0 |
meth |
PPDocLayoutV2ReadingOrder.init |
2 | 0 | 0 |
meth |
PPDocLayoutV2ReadingOrder.forward |
5 | 1 | 0 |
attr |
PPDocLayoutV2ReadingOrder.embeddings |
1 | 0 | 0 |
attr |
PPDocLayoutV2ReadingOrder.label_embeddings |
1 | 0 | 0 |
attr |
PPDocLayoutV2ReadingOrder.label_features_projection |
1 | 0 | 0 |
attr |
PPDocLayoutV2ReadingOrder.encoder |
1 | 0 | 0 |
attr |
PPDocLayoutV2ReadingOrder.relative_head |
1 | 0 | 0 |
attr |
PPDocLayoutV2ReadingOrder.config |
1 | 0 | 0 |
transformers.models.pp_doclayout_v3.configuration_pp_doclayout_v3 (91 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PPDocLayoutV3Config.init |
47 | 0 | 0 |
attr |
PPDocLayoutV3Config.initializer_range |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.initializer_bias_prior_prob |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.layer_norm_eps |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.batch_norm_eps |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.backbone_config |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.freeze_backbone_batch_norms |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.encoder_hidden_dim |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.encoder_in_channels |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.feat_strides |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.encoder_layers |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.encoder_ffn_dim |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.encoder_attention_heads |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.dropout |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.activation_dropout |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.encode_proj_layers |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.positional_encoding_temperature |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.encoder_activation_function |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.activation_function |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.eval_size |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.normalize_before |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.hidden_expansion |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.mask_feature_channels |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.x4_feat_dim |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.d_model |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.num_queries |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.num_prototypes |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.decoder_in_channels |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.decoder_ffn_dim |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.num_feature_levels |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.decoder_n_points |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.decoder_layers |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.decoder_attention_heads |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.decoder_activation_function |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.attention_dropout |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.num_denoising |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.label_noise_ratio |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.mask_enhanced |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.box_noise_scale |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.learn_initial_query |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.anchor_image_size |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.disable_custom_kernels |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.global_pointer_head_size |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.gp_dropout_value |
1 | 0 | 0 |
transformers.models.pp_doclayout_v3.image_processing_pp_doclayout_v3_fast (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PPDocLayoutV3ImageProcessorFast._preprocess |
17 | 16 | 0 |
meth |
PPDocLayoutV3ImageProcessorFast._get_order_seqs |
2 | 0 | 0 |
meth |
PPDocLayoutV3ImageProcessorFast.extract_custom_vertices |
3 | 0 | 0 |
meth |
PPDocLayoutV3ImageProcessorFast._mask2polygon |
3 | 0 | 0 |
meth |
PPDocLayoutV3ImageProcessorFast._extract_polygon_points_by_masks |
4 | 0 | 0 |
meth |
PPDocLayoutV3ImageProcessorFast.post_process_object_detection |
4 | 2 | 0 |
transformers.models.pp_doclayout_v3.modeling_pp_doclayout_v3 (31 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PPDocLayoutV3Model.init |
2 | 1 | 0 |
meth |
PPDocLayoutV3Model.freeze_backbone |
1 | 0 | 0 |
meth |
PPDocLayoutV3Model.unfreeze_backbone |
1 | 0 | 0 |
meth |
PPDocLayoutV3Model.generate_anchors |
5 | 0 | 0 |
attr |
PPDocLayoutV3Model.backbone |
1 | 0 | 0 |
attr |
PPDocLayoutV3Model.encoder_input_proj |
1 | 0 | 0 |
attr |
PPDocLayoutV3Model.encoder |
1 | 0 | 0 |
attr |
PPDocLayoutV3Model.enc_output |
1 | 0 | 0 |
attr |
PPDocLayoutV3Model.enc_score_head |
1 | 0 | 0 |
attr |
PPDocLayoutV3Model.enc_bbox_head |
1 | 0 | 0 |
attr |
PPDocLayoutV3Model.decoder_input_proj |
1 | 0 | 0 |
attr |
PPDocLayoutV3Model.decoder |
1 | 0 | 0 |
attr |
PPDocLayoutV3Model.decoder_order_head |
1 | 0 | 0 |
attr |
PPDocLayoutV3Model.decoder_global_pointer |
1 | 0 | 0 |
attr |
PPDocLayoutV3Model.decoder_norm |
1 | 0 | 0 |
attr |
PPDocLayoutV3Model.mask_enhanced |
1 | 0 | 0 |
attr |
PPDocLayoutV3Model.mask_query_head |
1 | 0 | 0 |
attr |
PPDocLayoutV3Model.denoising_class_embed |
1 | 0 | 0 |
attr |
PPDocLayoutV3Model.weight_embedding |
1 | 0 | 0 |
meth |
PPDocLayoutV3ForObjectDetection.init |
2 | 1 | 0 |
meth |
PPDocLayoutV3ForObjectDetection._set_aux_loss |
3 | 0 | 0 |
attr |
PPDocLayoutV3ForObjectDetection.model |
1 | 0 | 0 |
attr |
PPDocLayoutV3ForObjectDetection.num_queries |
1 | 0 | 0 |
meth |
PPDocLayoutV3PreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.pp_doclayout_v3.modular_pp_doclayout_v3 (118 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PPDocLayoutV3Model.init |
2 | 1 | 0 |
attr |
PPDocLayoutV3Model.encoder_input_proj |
1 | 0 | 0 |
attr |
PPDocLayoutV3Model.decoder_order_head |
1 | 0 | 0 |
attr |
PPDocLayoutV3Model.decoder_global_pointer |
1 | 0 | 0 |
attr |
PPDocLayoutV3Model.decoder_norm |
1 | 0 | 0 |
attr |
PPDocLayoutV3Model.decoder |
1 | 0 | 0 |
attr |
PPDocLayoutV3Model.mask_enhanced |
1 | 0 | 0 |
attr |
PPDocLayoutV3Model.mask_query_head |
1 | 0 | 0 |
meth |
PPDocLayoutV3PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
PPDocLayoutV3ImageProcessorFast._preprocess |
17 | 16 | 0 |
meth |
PPDocLayoutV3ImageProcessorFast._get_order_seqs |
2 | 0 | 0 |
meth |
PPDocLayoutV3ImageProcessorFast.extract_custom_vertices |
3 | 0 | 0 |
meth |
PPDocLayoutV3ImageProcessorFast._mask2polygon |
3 | 0 | 0 |
meth |
PPDocLayoutV3ImageProcessorFast._extract_polygon_points_by_masks |
4 | 0 | 0 |
meth |
PPDocLayoutV3ImageProcessorFast.post_process_object_detection |
4 | 2 | 0 |
meth |
PPDocLayoutV3ForObjectDetection.init |
2 | 1 | 0 |
attr |
PPDocLayoutV3ForObjectDetection.num_queries |
1 | 0 | 0 |
meth |
PPDocLayoutV3Config.init |
47 | 0 | 0 |
attr |
PPDocLayoutV3Config.initializer_range |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.initializer_bias_prior_prob |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.layer_norm_eps |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.batch_norm_eps |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.backbone_config |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.freeze_backbone_batch_norms |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.encoder_hidden_dim |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.encoder_in_channels |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.feat_strides |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.encoder_layers |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.encoder_ffn_dim |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.encoder_attention_heads |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.dropout |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.activation_dropout |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.encode_proj_layers |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.positional_encoding_temperature |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.encoder_activation_function |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.activation_function |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.eval_size |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.normalize_before |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.hidden_expansion |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.mask_feature_channels |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.x4_feat_dim |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.d_model |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.num_queries |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.num_prototypes |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.decoder_in_channels |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.decoder_ffn_dim |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.num_feature_levels |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.decoder_n_points |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.decoder_layers |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.decoder_attention_heads |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.decoder_activation_function |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.attention_dropout |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.num_denoising |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.label_noise_ratio |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.mask_enhanced |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.box_noise_scale |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.learn_initial_query |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.anchor_image_size |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.disable_custom_kernels |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.global_pointer_head_size |
1 | 0 | 0 |
attr |
PPDocLayoutV3Config.gp_dropout_value |
1 | 0 | 0 |
transformers.models.prompt_depth_anything.configuration_prompt_depth_anything (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PromptDepthAnythingConfig.init |
13 | 0 | 0 |
attr |
PromptDepthAnythingConfig.backbone_config |
1 | 0 | 0 |
attr |
PromptDepthAnythingConfig.reassemble_hidden_size |
1 | 0 | 0 |
attr |
PromptDepthAnythingConfig.patch_size |
1 | 0 | 0 |
attr |
PromptDepthAnythingConfig.initializer_range |
1 | 0 | 0 |
attr |
PromptDepthAnythingConfig.reassemble_factors |
1 | 0 | 0 |
attr |
PromptDepthAnythingConfig.neck_hidden_sizes |
1 | 0 | 0 |
attr |
PromptDepthAnythingConfig.fusion_hidden_size |
1 | 0 | 0 |
attr |
PromptDepthAnythingConfig.head_in_index |
1 | 0 | 0 |
attr |
PromptDepthAnythingConfig.head_hidden_size |
1 | 0 | 0 |
attr |
PromptDepthAnythingConfig.depth_estimation_type |
1 | 0 | 0 |
attr |
PromptDepthAnythingConfig.max_depth |
1 | 0 | 0 |
transformers.models.prompt_depth_anything.image_processing_prompt_depth_anything (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PromptDepthAnythingImageProcessor.init |
15 | 13 | 0 |
meth |
PromptDepthAnythingImageProcessor.resize |
9 | 8 | 0 |
meth |
PromptDepthAnythingImageProcessor.pad_image |
5 | 4 | 0 |
attr |
PromptDepthAnythingImageProcessor.do_resize |
1 | 0 | 0 |
attr |
PromptDepthAnythingImageProcessor.size |
1 | 0 | 0 |
attr |
PromptDepthAnythingImageProcessor.keep_aspect_ratio |
1 | 0 | 0 |
attr |
PromptDepthAnythingImageProcessor.ensure_multiple_of |
1 | 0 | 0 |
attr |
PromptDepthAnythingImageProcessor.resample |
1 | 0 | 0 |
attr |
PromptDepthAnythingImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
PromptDepthAnythingImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
PromptDepthAnythingImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
PromptDepthAnythingImageProcessor.image_mean |
1 | 0 | 0 |
attr |
PromptDepthAnythingImageProcessor.image_std |
1 | 0 | 0 |
attr |
PromptDepthAnythingImageProcessor.do_pad |
1 | 0 | 0 |
attr |
PromptDepthAnythingImageProcessor.size_divisor |
1 | 0 | 0 |
attr |
PromptDepthAnythingImageProcessor.prompt_scale_to_meter |
1 | 0 | 0 |
transformers.models.prompt_depth_anything.image_processing_prompt_depth_anything_fast (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PromptDepthAnythingImageProcessorFast.init |
2 | 1 | 0 |
meth |
PromptDepthAnythingImageProcessorFast._preprocess |
17 | 16 | 0 |
transformers.models.prompt_depth_anything.modeling_prompt_depth_anything (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PromptDepthAnythingForDepthEstimation.init |
2 | 0 | 0 |
meth |
PromptDepthAnythingForDepthEstimation.forward |
8 | 7 | 0 |
attr |
PromptDepthAnythingForDepthEstimation.backbone |
1 | 0 | 0 |
attr |
PromptDepthAnythingForDepthEstimation.neck |
1 | 0 | 0 |
attr |
PromptDepthAnythingForDepthEstimation.head |
1 | 0 | 0 |
transformers.models.prompt_depth_anything.modular_prompt_depth_anything (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PromptDepthAnythingForDepthEstimation.forward |
8 | 7 | 0 |
transformers.models.prophetnet.configuration_prophetnet (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ProphetNetConfig.init |
30 | 28 | 0 |
prop |
ProphetNetConfig.num_hidden_layers |
2 | 1 | 0 |
attr |
ProphetNetConfig.vocab_size |
1 | 0 | 0 |
attr |
ProphetNetConfig.hidden_size |
1 | 0 | 0 |
attr |
ProphetNetConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
ProphetNetConfig.num_encoder_layers |
1 | 0 | 0 |
attr |
ProphetNetConfig.num_encoder_attention_heads |
1 | 0 | 0 |
attr |
ProphetNetConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
ProphetNetConfig.num_decoder_layers |
1 | 0 | 0 |
attr |
ProphetNetConfig.num_decoder_attention_heads |
1 | 0 | 0 |
attr |
ProphetNetConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ProphetNetConfig.init_std |
1 | 0 | 0 |
attr |
ProphetNetConfig.activation_function |
1 | 0 | 0 |
attr |
ProphetNetConfig.ngram |
1 | 0 | 0 |
attr |
ProphetNetConfig.num_buckets |
1 | 0 | 0 |
attr |
ProphetNetConfig.relative_max_distance |
1 | 0 | 0 |
attr |
ProphetNetConfig.disable_ngram_loss |
1 | 0 | 0 |
attr |
ProphetNetConfig.eps |
1 | 0 | 0 |
attr |
ProphetNetConfig.attention_dropout |
1 | 0 | 0 |
attr |
ProphetNetConfig.activation_dropout |
1 | 0 | 0 |
attr |
ProphetNetConfig.dropout |
1 | 0 | 0 |
attr |
ProphetNetConfig.use_cache |
1 | 0 | 0 |
attr |
ProphetNetConfig.pad_token_id |
1 | 0 | 0 |
attr |
ProphetNetConfig.bos_token_id |
1 | 0 | 0 |
attr |
ProphetNetConfig.eos_token_id |
1 | 0 | 0 |
attr |
ProphetNetConfig.add_cross_attention |
1 | 0 | 0 |
attr |
ProphetNetConfig.decoder_start_token_id |
1 | 0 | 0 |
attr |
ProphetNetConfig.is_decoder |
1 | 0 | 0 |
attr |
ProphetNetConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.prophetnet.modeling_prophetnet (78 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ProphetNetModel.init |
2 | 1 | 0 |
meth |
ProphetNetModel.get_input_embeddings |
1 | 0 | 0 |
meth |
ProphetNetModel.set_input_embeddings |
2 | 0 | 0 |
meth |
ProphetNetModel.forward |
15 | 14 | 0 |
attr |
ProphetNetModel.word_embeddings |
1 | 0 | 0 |
attr |
ProphetNetModel.encoder |
1 | 0 | 0 |
attr |
ProphetNetModel.decoder |
1 | 0 | 0 |
meth |
ProphetNetDecoder.init |
2 | 1 | 0 |
meth |
ProphetNetDecoder.get_input_embeddings |
1 | 0 | 0 |
meth |
ProphetNetDecoder.set_input_embeddings |
2 | 0 | 0 |
meth |
ProphetNetDecoder.forward |
13 | 12 | 0 |
meth |
ProphetNetDecoder.compute_buffered_relative_buckets |
2 | 0 | 0 |
meth |
ProphetNetDecoder.prepare_attention_mask |
3 | 0 | 0 |
meth |
ProphetNetDecoder.prepare_predict_attention_mask |
3 | 0 | 0 |
attr |
ProphetNetDecoder.ngram |
1 | 0 | 0 |
attr |
ProphetNetDecoder.num_buckets |
1 | 0 | 0 |
attr |
ProphetNetDecoder.relative_max_distance |
1 | 0 | 0 |
attr |
ProphetNetDecoder.dropout |
1 | 0 | 0 |
attr |
ProphetNetDecoder.max_target_positions |
1 | 0 | 0 |
attr |
ProphetNetDecoder.word_embeddings |
1 | 0 | 0 |
attr |
ProphetNetDecoder.position_embeddings |
1 | 0 | 0 |
attr |
ProphetNetDecoder.ngram_embeddings |
1 | 0 | 0 |
attr |
ProphetNetDecoder.layers |
1 | 0 | 0 |
attr |
ProphetNetDecoder.embeddings_layer_norm |
1 | 0 | 0 |
attr |
ProphetNetDecoder.gradient_checkpointing |
1 | 0 | 0 |
meth |
ProphetNetPreTrainedModel._shift_right |
2 | 0 | 0 |
meth |
ProphetNetForCausalLM.init |
2 | 1 | 0 |
meth |
ProphetNetForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
ProphetNetForCausalLM.set_input_embeddings |
2 | 0 | 0 |
meth |
ProphetNetForCausalLM.forward |
13 | 12 | 0 |
meth |
ProphetNetForCausalLM._compute_loss |
4 | 0 | 0 |
meth |
ProphetNetForCausalLM.prepare_inputs_for_generation |
7 | 0 | 0 |
attr |
ProphetNetForCausalLM.prophetnet |
1 | 0 | 0 |
attr |
ProphetNetForCausalLM.padding_idx |
1 | 0 | 0 |
attr |
ProphetNetForCausalLM.disable_ngram_loss |
1 | 0 | 0 |
attr |
ProphetNetForCausalLM.lm_head |
1 | 0 | 0 |
meth |
ProphetNetForConditionalGeneration.init |
2 | 1 | 0 |
meth |
ProphetNetForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
ProphetNetForConditionalGeneration.forward |
16 | 15 | 0 |
meth |
ProphetNetForConditionalGeneration._compute_loss |
4 | 0 | 0 |
meth |
ProphetNetForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
meth |
ProphetNetForConditionalGeneration.get_encoder |
2 | 0 | 0 |
attr |
ProphetNetForConditionalGeneration.prophetnet |
1 | 0 | 0 |
attr |
ProphetNetForConditionalGeneration.padding_idx |
1 | 0 | 0 |
attr |
ProphetNetForConditionalGeneration.disable_ngram_loss |
1 | 0 | 0 |
attr |
ProphetNetForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
ProphetNetEncoder.init |
2 | 1 | 0 |
meth |
ProphetNetEncoder.get_input_embeddings |
1 | 0 | 0 |
meth |
ProphetNetEncoder.set_input_embeddings |
2 | 0 | 0 |
meth |
ProphetNetEncoder.forward |
8 | 7 | 0 |
attr |
ProphetNetEncoder.word_embeddings |
1 | 0 | 0 |
attr |
ProphetNetEncoder.position_embeddings |
1 | 0 | 0 |
attr |
ProphetNetEncoder.embeddings_layer_norm |
1 | 0 | 0 |
attr |
ProphetNetEncoder.layers |
1 | 0 | 0 |
attr |
ProphetNetEncoder.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.prophetnet.tokenization_prophetnet (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ProphetNetTokenizer.init |
14 | 12 | 0 |
meth |
ProphetNetTokenizer.get_vocab |
1 | 0 | 0 |
meth |
ProphetNetTokenizer._tokenize |
2 | 0 | 0 |
meth |
ProphetNetTokenizer._convert_token_to_id |
2 | 1 | 0 |
meth |
ProphetNetTokenizer._convert_id_to_token |
2 | 1 | 0 |
meth |
ProphetNetTokenizer.convert_tokens_to_string |
2 | 1 | 0 |
prop |
ProphetNetTokenizer.vocab_size |
1 | 0 | 0 |
attr |
ProphetNetTokenizer.vocab |
1 | 0 | 0 |
attr |
ProphetNetTokenizer.ids_to_tokens |
1 | 0 | 0 |
attr |
ProphetNetTokenizer.do_basic_tokenize |
1 | 0 | 0 |
attr |
ProphetNetTokenizer.wordpiece_tokenizer |
1 | 0 | 0 |
attr |
ProphetNetTokenizer.basic_tokenizer |
1 | 0 | 0 |
transformers.models.pvt.configuration_pvt (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PvtConfig.init |
20 | 18 | 0 |
attr |
PvtConfig.image_size |
1 | 0 | 0 |
attr |
PvtConfig.num_channels |
1 | 0 | 0 |
attr |
PvtConfig.num_encoder_blocks |
1 | 0 | 0 |
attr |
PvtConfig.depths |
1 | 0 | 0 |
attr |
PvtConfig.sequence_reduction_ratios |
1 | 0 | 0 |
attr |
PvtConfig.hidden_sizes |
1 | 0 | 0 |
attr |
PvtConfig.patch_sizes |
1 | 0 | 0 |
attr |
PvtConfig.strides |
1 | 0 | 0 |
attr |
PvtConfig.mlp_ratios |
1 | 0 | 0 |
attr |
PvtConfig.num_attention_heads |
1 | 0 | 0 |
attr |
PvtConfig.hidden_act |
1 | 0 | 0 |
attr |
PvtConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
PvtConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
PvtConfig.initializer_range |
1 | 0 | 0 |
attr |
PvtConfig.drop_path_rate |
1 | 0 | 0 |
attr |
PvtConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
PvtConfig.num_labels |
1 | 0 | 0 |
attr |
PvtConfig.qkv_bias |
1 | 0 | 0 |
transformers.models.pvt.image_processing_pvt (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PvtImageProcessor.init |
10 | 9 | 0 |
meth |
PvtImageProcessor.resize |
7 | 6 | 0 |
meth |
PvtImageProcessor.preprocess |
13 | 12 | 0 |
attr |
PvtImageProcessor.do_resize |
1 | 0 | 0 |
attr |
PvtImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
PvtImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
PvtImageProcessor.size |
1 | 0 | 0 |
attr |
PvtImageProcessor.resample |
1 | 0 | 0 |
attr |
PvtImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
PvtImageProcessor.image_mean |
1 | 0 | 0 |
attr |
PvtImageProcessor.image_std |
1 | 0 | 0 |
transformers.models.pvt.modeling_pvt (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PvtForImageClassification.forward |
7 | 6 | 0 |
attr |
PvtForImageClassification.num_labels |
1 | 0 | 0 |
attr |
PvtForImageClassification.pvt |
1 | 0 | 0 |
attr |
PvtForImageClassification.classifier |
1 | 0 | 0 |
meth |
PvtModel.init |
2 | 1 | 0 |
meth |
PvtModel.forward |
6 | 5 | 0 |
attr |
PvtModel.encoder |
1 | 0 | 0 |
transformers.models.pvt_v2.configuration_pvt_v2 (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PvtV2Config.init |
22 | 18 | 0 |
attr |
PvtV2Config.image_size |
1 | 0 | 0 |
attr |
PvtV2Config.num_channels |
1 | 0 | 0 |
attr |
PvtV2Config.num_encoder_blocks |
1 | 0 | 0 |
attr |
PvtV2Config.depths |
1 | 0 | 0 |
attr |
PvtV2Config.sr_ratios |
1 | 0 | 0 |
attr |
PvtV2Config.hidden_sizes |
1 | 0 | 0 |
attr |
PvtV2Config.patch_sizes |
1 | 0 | 0 |
attr |
PvtV2Config.strides |
1 | 0 | 0 |
attr |
PvtV2Config.mlp_ratios |
1 | 0 | 0 |
attr |
PvtV2Config.num_attention_heads |
1 | 0 | 0 |
attr |
PvtV2Config.hidden_act |
1 | 0 | 0 |
attr |
PvtV2Config.hidden_dropout_prob |
1 | 0 | 0 |
attr |
PvtV2Config.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
PvtV2Config.initializer_range |
1 | 0 | 0 |
attr |
PvtV2Config.drop_path_rate |
1 | 0 | 0 |
attr |
PvtV2Config.layer_norm_eps |
1 | 0 | 0 |
attr |
PvtV2Config.qkv_bias |
1 | 0 | 0 |
attr |
PvtV2Config.linear_attention |
1 | 0 | 0 |
attr |
PvtV2Config.stage_names |
1 | 0 | 0 |
transformers.models.pvt_v2.modeling_pvt_v2 (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PvtV2Model.init |
2 | 1 | 0 |
meth |
PvtV2Model.forward |
6 | 5 | 0 |
attr |
PvtV2Model.encoder |
1 | 0 | 0 |
meth |
PvtV2Backbone.init |
2 | 1 | 0 |
meth |
PvtV2Backbone.forward |
6 | 5 | 0 |
attr |
PvtV2Backbone.num_features |
1 | 0 | 0 |
meth |
PvtV2ForImageClassification.forward |
7 | 6 | 0 |
attr |
PvtV2ForImageClassification.num_labels |
1 | 0 | 0 |
attr |
PvtV2ForImageClassification.pvt_v2 |
1 | 0 | 0 |
attr |
PvtV2ForImageClassification.classifier |
1 | 0 | 0 |
transformers.models.qwen2.configuration_qwen2 (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2Config.init |
23 | 21 | 0 |
attr |
Qwen2Config.vocab_size |
1 | 0 | 0 |
attr |
Qwen2Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Qwen2Config.hidden_size |
1 | 0 | 0 |
attr |
Qwen2Config.intermediate_size |
1 | 0 | 0 |
attr |
Qwen2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Qwen2Config.num_attention_heads |
1 | 0 | 0 |
attr |
Qwen2Config.use_sliding_window |
1 | 0 | 0 |
attr |
Qwen2Config.sliding_window |
1 | 0 | 0 |
attr |
Qwen2Config.max_window_layers |
1 | 0 | 0 |
attr |
Qwen2Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Qwen2Config.hidden_act |
1 | 0 | 0 |
attr |
Qwen2Config.initializer_range |
1 | 0 | 0 |
attr |
Qwen2Config.rms_norm_eps |
1 | 0 | 0 |
attr |
Qwen2Config.use_cache |
1 | 0 | 0 |
attr |
Qwen2Config.attention_dropout |
1 | 0 | 0 |
attr |
Qwen2Config.layer_types |
1 | 0 | 0 |
attr |
Qwen2Config.pad_token_id |
1 | 0 | 0 |
attr |
Qwen2Config.bos_token_id |
1 | 0 | 0 |
attr |
Qwen2Config.eos_token_id |
1 | 0 | 0 |
attr |
Qwen2Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Qwen2Config.rope_parameters |
1 | 0 | 0 |
transformers.models.qwen2.modeling_qwen2 (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2RMSNorm.init |
3 | 2 | 0 |
meth |
Qwen2RMSNorm.extra_repr |
1 | 0 | 0 |
attr |
Qwen2RMSNorm.weight |
1 | 0 | 0 |
attr |
Qwen2RMSNorm.variance_epsilon |
1 | 0 | 0 |
meth |
Qwen2Model.init |
2 | 1 | 0 |
attr |
Qwen2Model.padding_idx |
1 | 0 | 0 |
attr |
Qwen2Model.vocab_size |
1 | 0 | 0 |
attr |
Qwen2Model.embed_tokens |
1 | 0 | 0 |
attr |
Qwen2Model.layers |
1 | 0 | 0 |
attr |
Qwen2Model.norm |
1 | 0 | 0 |
attr |
Qwen2Model.rotary_emb |
1 | 0 | 0 |
attr |
Qwen2Model.gradient_checkpointing |
1 | 0 | 0 |
attr |
Qwen2Model.has_sliding_layers |
1 | 0 | 0 |
meth |
Qwen2ForCausalLM.init |
2 | 0 | 0 |
attr |
Qwen2ForCausalLM.model |
1 | 0 | 0 |
attr |
Qwen2ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Qwen2ForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.qwen2.modular_qwen2 (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Qwen2RMSNorm |
1 | 0 | 0 |
meth |
Qwen2Model.init |
2 | 1 | 0 |
attr |
Qwen2Model.has_sliding_layers |
1 | 0 | 0 |
transformers.models.qwen2.tokenization_qwen2 (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2Tokenizer.init |
9 | 5 | 0 |
attr |
Qwen2Tokenizer.add_prefix_space |
1 | 0 | 0 |
transformers.models.qwen2_5_omni.configuration_qwen2_5_omni (122 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2_5OmniConfig.init |
6 | 1 | 0 |
meth |
Qwen2_5OmniConfig.get_text_config |
3 | 0 | 0 |
attr |
Qwen2_5OmniConfig.thinker_config |
1 | 0 | 0 |
attr |
Qwen2_5OmniConfig.talker_config |
1 | 0 | 0 |
attr |
Qwen2_5OmniConfig.token2wav_config |
1 | 0 | 0 |
attr |
Qwen2_5OmniConfig.enable_audio_output |
1 | 0 | 0 |
meth |
Qwen2_5OmniToken2WavConfig.init |
4 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavConfig.dit_config |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavConfig.bigvgan_config |
1 | 0 | 0 |
meth |
Qwen2_5OmniTalkerConfig.init |
40 | 2 | 0 |
attr |
Qwen2_5OmniTalkerConfig.audio_token_index |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.image_token_index |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.video_token_index |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.tts_text_start_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.tts_text_end_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.tts_text_pad_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.tts_codec_start_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.tts_codec_end_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.tts_codec_pad_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.tts_codec_mask_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.vision_start_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.vision_end_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.vocab_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.head_dim |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.embedding_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.hidden_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.intermediate_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.use_sliding_window |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.sliding_window |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.max_window_layers |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.hidden_act |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.use_cache |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.attention_dropout |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.position_id_per_seconds |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.seconds_per_chunk |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.audio_start_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.audio_end_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.pad_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.spatial_merge_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.layer_types |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.rope_parameters |
1 | 0 | 0 |
meth |
Qwen2_5OmniThinkerConfig.init |
15 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.audio_token_index |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.image_token_index |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.video_token_index |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.user_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.position_id_per_seconds |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.seconds_per_chunk |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.audio_start_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.audio_end_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.vision_config |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.audio_config |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.text_config |
1 | 0 | 0 |
transformers.models.qwen2_5_omni.modeling_qwen2_5_omni (181 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2_5OmniForConditionalGeneration.init |
2 | 0 | 0 |
meth |
Qwen2_5OmniForConditionalGeneration.enable_talker |
1 | 0 | 0 |
meth |
Qwen2_5OmniForConditionalGeneration.load_speakers |
2 | 0 | 0 |
meth |
Qwen2_5OmniForConditionalGeneration.disable_talker |
1 | 0 | 0 |
meth |
Qwen2_5OmniForConditionalGeneration.from_pretrained |
13 | 0 | 0 |
meth |
Qwen2_5OmniForConditionalGeneration.generate |
13 | 11 | 0 |
attr |
Qwen2_5OmniForConditionalGeneration.thinker |
1 | 0 | 0 |
attr |
Qwen2_5OmniForConditionalGeneration.has_talker |
1 | 0 | 0 |
attr |
Qwen2_5OmniForConditionalGeneration.speaker_map |
1 | 0 | 0 |
meth |
Qwen2_5OmniTalkerForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Qwen2_5OmniTalkerForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen2_5OmniTalkerForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Qwen2_5OmniTalkerForConditionalGeneration.forward |
20 | 19 | 0 |
meth |
Qwen2_5OmniTalkerForConditionalGeneration._get_initial_cache_position |
4 | 0 | 0 |
meth |
Qwen2_5OmniTalkerForConditionalGeneration.prepare_inputs_for_generation |
20 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.thinker_to_talker_proj |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.codebook_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.codec_head |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.codec_bos_token |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.codec_eos_token |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.codec_pad_token |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.codec_mask_token |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.text_bos_token |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.text_eos_token |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.text_pad_token |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.spatial_merge_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.rope_deltas |
1 | 0 | 0 |
meth |
Qwen2_5OmniPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Qwen2_5OmniToken2WavDiTModel.init |
2 | 1 | 0 |
meth |
Qwen2_5OmniToken2WavDiTModel._create_block_diff |
2 | 0 | 0 |
meth |
Qwen2_5OmniToken2WavDiTModel.forward |
10 | 0 | 0 |
meth |
Qwen2_5OmniToken2WavDiTModel.sample |
7 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.mel_dim |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.repeats |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.time_embed |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.text_embed |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.input_embed |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.rotary_embed |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.hidden_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.layers |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.block_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.num_attention_heads |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.transformer_blocks |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.norm_out |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.proj_out |
1 | 0 | 0 |
meth |
Qwen2_5OmniPreTrainedModelForConditionalGeneration.get_llm_pos_ids_for_vision |
7 | 6 | 0 |
meth |
Qwen2_5OmniThinkerTextModel.init |
2 | 1 | 0 |
attr |
Qwen2_5OmniThinkerTextModel.padding_idx |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerTextModel.vocab_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerTextModel.embed_tokens |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerTextModel.layers |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerTextModel.norm |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerTextModel.has_sliding_layers |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerTextModel.rotary_emb |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerTextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Qwen2_5OmniToken2WavModel.init |
2 | 1 | 0 |
meth |
Qwen2_5OmniToken2WavModel.forward |
8 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavModel.code2wav_dit_model |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavModel.code2wav_bigvgan_model |
1 | 0 | 0 |
meth |
Qwen2_5OmniThinkerForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Qwen2_5OmniThinkerForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen2_5OmniThinkerForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Qwen2_5OmniThinkerForConditionalGeneration.get_placeholder_mask |
5 | 4 | 0 |
meth |
Qwen2_5OmniThinkerForConditionalGeneration.prepare_inputs_for_generation |
18 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.audio_tower |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.visual |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.spatial_merge_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.rope_deltas |
1 | 0 | 0 |
meth |
Qwen2_5OmniToken2WavBigVGANModel.init |
2 | 1 | 0 |
meth |
Qwen2_5OmniToken2WavBigVGANModel.normalize_spectrogram |
4 | 0 | 0 |
meth |
Qwen2_5OmniToken2WavBigVGANModel.amplitude_to_db |
3 | 0 | 0 |
meth |
Qwen2_5OmniToken2WavBigVGANModel.process_mel_spectrogram |
2 | 0 | 0 |
meth |
Qwen2_5OmniToken2WavBigVGANModel.forward |
3 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavBigVGANModel.num_residual_blocks |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavBigVGANModel.num_upsample_layers |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavBigVGANModel.conv_pre |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavBigVGANModel.ups |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavBigVGANModel.resblocks |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavBigVGANModel.activation_post |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavBigVGANModel.conv_post |
1 | 0 | 0 |
meth |
Qwen2_5OmniTalkerModel.init |
2 | 1 | 0 |
attr |
Qwen2_5OmniTalkerModel.padding_idx |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerModel.vocab_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerModel.embed_tokens |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerModel.layers |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerModel.norm |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerModel.has_sliding_layers |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerModel.rotary_emb |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.qwen2_5_omni.modular_qwen2_5_omni (288 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2_5OmniForConditionalGeneration.init |
2 | 0 | 0 |
meth |
Qwen2_5OmniForConditionalGeneration.enable_talker |
1 | 0 | 0 |
meth |
Qwen2_5OmniForConditionalGeneration.load_speakers |
2 | 0 | 0 |
meth |
Qwen2_5OmniForConditionalGeneration.disable_talker |
1 | 0 | 0 |
meth |
Qwen2_5OmniForConditionalGeneration.from_pretrained |
13 | 0 | 0 |
meth |
Qwen2_5OmniForConditionalGeneration.generate |
13 | 11 | 0 |
attr |
Qwen2_5OmniForConditionalGeneration.thinker |
1 | 0 | 0 |
attr |
Qwen2_5OmniForConditionalGeneration.has_talker |
1 | 0 | 0 |
attr |
Qwen2_5OmniForConditionalGeneration.speaker_map |
1 | 0 | 0 |
meth |
Qwen2_5OmniToken2WavConfig.init |
4 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavConfig.dit_config |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavConfig.bigvgan_config |
1 | 0 | 0 |
meth |
Qwen2_5OmniTalkerForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Qwen2_5OmniTalkerForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen2_5OmniTalkerForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Qwen2_5OmniTalkerForConditionalGeneration.forward |
20 | 19 | 0 |
meth |
Qwen2_5OmniTalkerForConditionalGeneration._get_initial_cache_position |
4 | 0 | 0 |
meth |
Qwen2_5OmniTalkerForConditionalGeneration.prepare_inputs_for_generation |
20 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.thinker_to_talker_proj |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.codebook_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.codec_head |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.codec_bos_token |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.codec_eos_token |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.codec_pad_token |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.codec_mask_token |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.text_bos_token |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.text_eos_token |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.text_pad_token |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.spatial_merge_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerForConditionalGeneration.rope_deltas |
1 | 0 | 0 |
meth |
Qwen2_5OmniTalkerConfig.init |
40 | 2 | 0 |
attr |
Qwen2_5OmniTalkerConfig.audio_token_index |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.image_token_index |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.video_token_index |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.tts_text_start_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.tts_text_end_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.tts_text_pad_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.tts_codec_start_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.tts_codec_end_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.tts_codec_pad_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.tts_codec_mask_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.vision_start_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.vision_end_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.vocab_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.head_dim |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.embedding_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.hidden_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.intermediate_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.use_sliding_window |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.sliding_window |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.max_window_layers |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.hidden_act |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.use_cache |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.attention_dropout |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.position_id_per_seconds |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.seconds_per_chunk |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.audio_start_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.audio_end_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.pad_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.spatial_merge_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.layer_types |
1 | 0 | 0 |
attr |
Qwen2_5OmniTalkerConfig.rope_parameters |
1 | 0 | 0 |
meth |
Qwen2_5OmniPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Qwen2_5OmniConfig.init |
6 | 1 | 0 |
meth |
Qwen2_5OmniConfig.get_text_config |
3 | 0 | 0 |
attr |
Qwen2_5OmniConfig.thinker_config |
1 | 0 | 0 |
attr |
Qwen2_5OmniConfig.talker_config |
1 | 0 | 0 |
attr |
Qwen2_5OmniConfig.token2wav_config |
1 | 0 | 0 |
attr |
Qwen2_5OmniConfig.enable_audio_output |
1 | 0 | 0 |
meth |
Qwen2_5OmniToken2WavDiTModel.init |
2 | 1 | 0 |
meth |
Qwen2_5OmniToken2WavDiTModel._create_block_diff |
2 | 0 | 0 |
meth |
Qwen2_5OmniToken2WavDiTModel.forward |
10 | 0 | 0 |
meth |
Qwen2_5OmniToken2WavDiTModel.sample |
7 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.mel_dim |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.repeats |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.time_embed |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.text_embed |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.input_embed |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.rotary_embed |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.hidden_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.layers |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.block_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.num_attention_heads |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.transformer_blocks |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.norm_out |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavDiTModel.proj_out |
1 | 0 | 0 |
meth |
Qwen2_5OmniPreTrainedModelForConditionalGeneration.get_llm_pos_ids_for_vision |
7 | 6 | 0 |
meth |
Qwen2_5OmniThinkerTextModel.init |
2 | 1 | 0 |
meth |
Qwen2_5OmniTalkerModel.init |
2 | 1 | 0 |
attr |
Qwen2_5OmniTalkerModel.embed_tokens |
1 | 0 | 0 |
meth |
Qwen2_5OmniToken2WavModel.init |
2 | 1 | 0 |
meth |
Qwen2_5OmniToken2WavModel.forward |
8 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavModel.code2wav_dit_model |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavModel.code2wav_bigvgan_model |
1 | 0 | 0 |
meth |
Qwen2_5OmniThinkerForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Qwen2_5OmniThinkerForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen2_5OmniThinkerForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Qwen2_5OmniThinkerForConditionalGeneration.get_placeholder_mask |
5 | 4 | 0 |
meth |
Qwen2_5OmniThinkerForConditionalGeneration.prepare_inputs_for_generation |
18 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.audio_tower |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.visual |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.spatial_merge_size |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerForConditionalGeneration.rope_deltas |
1 | 0 | 0 |
meth |
Qwen2_5OmniToken2WavBigVGANModel.init |
2 | 1 | 0 |
meth |
Qwen2_5OmniToken2WavBigVGANModel.normalize_spectrogram |
4 | 0 | 0 |
meth |
Qwen2_5OmniToken2WavBigVGANModel.amplitude_to_db |
3 | 0 | 0 |
meth |
Qwen2_5OmniToken2WavBigVGANModel.process_mel_spectrogram |
2 | 0 | 0 |
meth |
Qwen2_5OmniToken2WavBigVGANModel.forward |
3 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavBigVGANModel.num_residual_blocks |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavBigVGANModel.num_upsample_layers |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavBigVGANModel.conv_pre |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavBigVGANModel.ups |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavBigVGANModel.resblocks |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavBigVGANModel.activation_post |
1 | 0 | 0 |
attr |
Qwen2_5OmniToken2WavBigVGANModel.conv_post |
1 | 0 | 0 |
meth |
Qwen2_5OmniThinkerConfig.init |
15 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.audio_token_index |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.image_token_index |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.video_token_index |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.user_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.position_id_per_seconds |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.seconds_per_chunk |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.audio_start_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.audio_end_token_id |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.vision_config |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.audio_config |
1 | 0 | 0 |
attr |
Qwen2_5OmniThinkerConfig.text_config |
1 | 0 | 0 |
transformers.models.qwen2_5_omni.processing_qwen2_5_omni (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2_5OmniProcessor.init |
6 | 0 | 0 |
meth |
Qwen2_5OmniProcessor.replace_multimodal_special_tokens |
9 | 0 | 0 |
meth |
Qwen2_5OmniProcessor.apply_chat_template |
4 | 0 | 0 |
meth |
Qwen2_5OmniProcessor.post_process_image_text_to_text |
4 | 0 | 0 |
meth |
Qwen2_5OmniProcessor.post_process_multimodal_output |
5 | 0 | 0 |
prop |
Qwen2_5OmniProcessor.model_input_names |
1 | 0 | 0 |
attr |
Qwen2_5OmniProcessor.image_token |
1 | 0 | 0 |
attr |
Qwen2_5OmniProcessor.audio_token |
1 | 0 | 0 |
attr |
Qwen2_5OmniProcessor.video_token |
1 | 0 | 0 |
attr |
Qwen2_5OmniProcessor.vision_bos_token |
1 | 0 | 0 |
attr |
Qwen2_5OmniProcessor.vision_eos_token |
1 | 0 | 0 |
attr |
Qwen2_5OmniProcessor.audio_bos_token |
1 | 0 | 0 |
attr |
Qwen2_5OmniProcessor.audio_eos_token |
1 | 0 | 0 |
transformers.models.qwen2_5_vl.configuration_qwen2_5_vl (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2_5_VLTextConfig.init |
22 | 20 | 0 |
meth |
Qwen2_5_VLTextConfig.convert_rope_params_to_dict |
3 | 1 | 0 |
attr |
Qwen2_5_VLTextConfig.vocab_size |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextConfig.hidden_size |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextConfig.use_sliding_window |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextConfig.sliding_window |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextConfig.max_window_layers |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextConfig.hidden_act |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextConfig.use_cache |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextConfig.layer_types |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextConfig.pad_token_id |
1 | 0 | 0 |
meth |
Qwen2_5_VLConfig.init |
9 | 0 | 0 |
attr |
Qwen2_5_VLConfig.image_token_id |
1 | 0 | 0 |
attr |
Qwen2_5_VLConfig.video_token_id |
1 | 0 | 0 |
attr |
Qwen2_5_VLConfig.vision_start_token_id |
1 | 0 | 0 |
attr |
Qwen2_5_VLConfig.vision_end_token_id |
1 | 0 | 0 |
attr |
Qwen2_5_VLConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Qwen2_5_VLConfig.vision_config |
1 | 0 | 0 |
attr |
Qwen2_5_VLConfig.text_config |
1 | 0 | 0 |
transformers.models.qwen2_5_vl.modeling_qwen2_5_vl (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2_5_VLTextModel.init |
2 | 1 | 0 |
attr |
Qwen2_5_VLTextModel.padding_idx |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextModel.vocab_size |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextModel.embed_tokens |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextModel.layers |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextModel.norm |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextModel.has_sliding_layers |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextModel.rotary_emb |
1 | 0 | 0 |
attr |
Qwen2_5_VLTextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Qwen2_5_VLForConditionalGeneration.init |
2 | 0 | 0 |
meth |
Qwen2_5_VLForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen2_5_VLForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Qwen2_5_VLForConditionalGeneration.prepare_inputs_for_generation |
15 | 0 | 0 |
meth |
Qwen2_5_VLForConditionalGeneration._prepare_position_ids_for_generation |
3 | 0 | 0 |
meth |
Qwen2_5_VLForConditionalGeneration._expand_inputs_for_generation |
5 | 4 | 0 |
attr |
Qwen2_5_VLForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Qwen2_5_VLForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
Qwen2_5_VLPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Qwen2_5_VLModel.init |
2 | 0 | 0 |
meth |
Qwen2_5_VLModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen2_5_VLModel.set_input_embeddings |
2 | 0 | 0 |
meth |
Qwen2_5_VLModel.get_vision_position_ids |
7 | 6 | 0 |
meth |
Qwen2_5_VLModel.get_rope_index |
8 | 7 | 0 |
meth |
Qwen2_5_VLModel.get_placeholder_mask |
5 | 4 | 0 |
attr |
Qwen2_5_VLModel.visual |
1 | 0 | 0 |
attr |
Qwen2_5_VLModel.language_model |
1 | 0 | 0 |
attr |
Qwen2_5_VLModel.rope_deltas |
1 | 0 | 0 |
transformers.models.qwen2_5_vl.modular_qwen2_5_vl (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2_5_VLProcessor._get_num_multimodal_tokens |
4 | 0 | 0 |
prop |
Qwen2_5_VLProcessor.model_input_names |
1 | 0 | 0 |
meth |
Qwen2_5_VLModel.init |
2 | 0 | 0 |
meth |
Qwen2_5_VLModel.get_rope_index |
8 | 7 | 0 |
attr |
Qwen2_5_VLModel.visual |
1 | 0 | 0 |
meth |
Qwen2_5_VLPreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
Qwen2_5_VLTextModel |
1 | 0 | 0 |
meth |
Qwen2_5_VLForConditionalGeneration.prepare_inputs_for_generation |
15 | 0 | 0 |
transformers.models.qwen2_5_vl.processing_qwen2_5_vl (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2_5_VLProcessor.init |
6 | 0 | 0 |
meth |
Qwen2_5_VLProcessor._get_num_multimodal_tokens |
4 | 0 | 0 |
meth |
Qwen2_5_VLProcessor.post_process_image_text_to_text |
5 | 0 | 0 |
prop |
Qwen2_5_VLProcessor.model_input_names |
1 | 0 | 0 |
attr |
Qwen2_5_VLProcessor.image_token |
1 | 0 | 0 |
attr |
Qwen2_5_VLProcessor.video_token |
1 | 0 | 0 |
attr |
Qwen2_5_VLProcessor.image_token_id |
1 | 0 | 0 |
attr |
Qwen2_5_VLProcessor.video_token_id |
1 | 0 | 0 |
transformers.models.qwen2_audio.configuration_qwen2_audio (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2AudioConfig.init |
5 | 0 | 0 |
attr |
Qwen2AudioConfig.audio_token_index |
1 | 0 | 0 |
attr |
Qwen2AudioConfig.audio_config |
1 | 0 | 0 |
attr |
Qwen2AudioConfig.text_config |
1 | 0 | 0 |
meth |
Qwen2AudioEncoderConfig.init |
15 | 0 | 0 |
attr |
Qwen2AudioEncoderConfig.num_mel_bins |
1 | 0 | 0 |
attr |
Qwen2AudioEncoderConfig.d_model |
1 | 0 | 0 |
attr |
Qwen2AudioEncoderConfig.encoder_layers |
1 | 0 | 0 |
attr |
Qwen2AudioEncoderConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
Qwen2AudioEncoderConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
Qwen2AudioEncoderConfig.dropout |
1 | 0 | 0 |
attr |
Qwen2AudioEncoderConfig.attention_dropout |
1 | 0 | 0 |
attr |
Qwen2AudioEncoderConfig.activation_function |
1 | 0 | 0 |
attr |
Qwen2AudioEncoderConfig.activation_dropout |
1 | 0 | 0 |
attr |
Qwen2AudioEncoderConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
Qwen2AudioEncoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Qwen2AudioEncoderConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen2AudioEncoderConfig.scale_embedding |
1 | 0 | 0 |
attr |
Qwen2AudioEncoderConfig.max_source_positions |
1 | 0 | 0 |
transformers.models.qwen2_audio.modeling_qwen2_audio (50 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2AudioEncoder.init |
2 | 1 | 0 |
meth |
Qwen2AudioEncoder._freeze_parameters |
1 | 0 | 0 |
meth |
Qwen2AudioEncoder.set_input_embeddings |
2 | 1 | 0 |
meth |
Qwen2AudioEncoder.forward |
7 | 0 | 0 |
meth |
Qwen2AudioEncoder._get_feat_extract_output_lengths |
2 | 1 | 0 |
attr |
Qwen2AudioEncoder.dropout |
1 | 0 | 0 |
attr |
Qwen2AudioEncoder.layerdrop |
1 | 0 | 0 |
attr |
Qwen2AudioEncoder.num_mel_bins |
1 | 0 | 0 |
attr |
Qwen2AudioEncoder.max_source_positions |
1 | 0 | 0 |
attr |
Qwen2AudioEncoder.embed_scale |
1 | 0 | 0 |
attr |
Qwen2AudioEncoder.conv1 |
1 | 0 | 0 |
attr |
Qwen2AudioEncoder.conv2 |
1 | 0 | 0 |
attr |
Qwen2AudioEncoder.embed_positions |
1 | 0 | 0 |
attr |
Qwen2AudioEncoder.layers |
1 | 0 | 0 |
attr |
Qwen2AudioEncoder.layer_norm |
1 | 0 | 0 |
attr |
Qwen2AudioEncoder.avg_pooler |
1 | 0 | 0 |
attr |
Qwen2AudioEncoder.gradient_checkpointing |
1 | 0 | 0 |
meth |
Qwen2AudioForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Qwen2AudioForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen2AudioForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Qwen2AudioForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
Qwen2AudioForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
Qwen2AudioForConditionalGeneration.set_decoder |
2 | 0 | 0 |
meth |
Qwen2AudioForConditionalGeneration.get_decoder |
1 | 0 | 0 |
meth |
Qwen2AudioForConditionalGeneration._merge_input_ids_with_audio_features |
7 | 0 | 0 |
meth |
Qwen2AudioForConditionalGeneration.forward |
15 | 14 | 0 |
meth |
Qwen2AudioForConditionalGeneration.prepare_inputs_for_generation |
3 | 0 | 0 |
prop |
Qwen2AudioForConditionalGeneration.padding_side |
2 | 1 | 0 |
attr |
Qwen2AudioForConditionalGeneration.audio_tower |
1 | 0 | 0 |
attr |
Qwen2AudioForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
Qwen2AudioForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
Qwen2AudioForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
Qwen2AudioForConditionalGeneration.pad_token_id |
1 | 0 | 0 |
transformers.models.qwen2_audio.processing_qwen2_audio (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2AudioProcessor.init |
7 | 0 | 0 |
prop |
Qwen2AudioProcessor.model_input_names |
1 | 0 | 0 |
prop |
Qwen2AudioProcessor.default_chat_template |
1 | 0 | 0 |
attr |
Qwen2AudioProcessor.audio_token |
1 | 0 | 0 |
attr |
Qwen2AudioProcessor.audio_token_id |
1 | 0 | 0 |
attr |
Qwen2AudioProcessor.audio_bos_token |
1 | 0 | 0 |
attr |
Qwen2AudioProcessor.audio_eos_token |
1 | 0 | 0 |
transformers.models.qwen2_moe.configuration_qwen2_moe (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2MoeConfig.init |
33 | 31 | 0 |
attr |
Qwen2MoeConfig.layer_types |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.vocab_size |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.hidden_size |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.intermediate_size |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.use_sliding_window |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.sliding_window |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.max_window_layers |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.hidden_act |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.use_cache |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.attention_dropout |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.decoder_sparse_step |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.shared_expert_intermediate_size |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.num_experts |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.output_router_logits |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.mlp_only_layers |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.qkv_bias |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.pad_token_id |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.bos_token_id |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.eos_token_id |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Qwen2MoeConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.qwen2_moe.modeling_qwen2_moe (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2MoeModel.init |
2 | 1 | 0 |
attr |
Qwen2MoeModel.padding_idx |
1 | 0 | 0 |
attr |
Qwen2MoeModel.vocab_size |
1 | 0 | 0 |
attr |
Qwen2MoeModel.embed_tokens |
1 | 0 | 0 |
attr |
Qwen2MoeModel.layers |
1 | 0 | 0 |
attr |
Qwen2MoeModel.norm |
1 | 0 | 0 |
attr |
Qwen2MoeModel.rotary_emb |
1 | 0 | 0 |
attr |
Qwen2MoeModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Qwen2MoePreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
Qwen2MoePreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
Qwen2MoeForCausalLM.init |
2 | 0 | 0 |
attr |
Qwen2MoeForCausalLM.model |
1 | 0 | 0 |
attr |
Qwen2MoeForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Qwen2MoeForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Qwen2MoeForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
Qwen2MoeForCausalLM.num_experts |
1 | 0 | 0 |
attr |
Qwen2MoeForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
transformers.models.qwen2_moe.modular_qwen2_moe (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2MoeModel.init |
2 | 1 | 0 |
attr |
Qwen2MoeModel.layers |
1 | 0 | 0 |
attr |
Qwen2MoeModel.norm |
1 | 0 | 0 |
attr |
Qwen2MoeModel.rotary_emb |
1 | 0 | 0 |
attr |
Qwen2MoePreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
Qwen2MoeForCausalLM.init |
2 | 0 | 0 |
attr |
Qwen2MoeForCausalLM.num_experts |
1 | 0 | 0 |
attr |
Qwen2MoeForCausalLM.model |
1 | 0 | 0 |
transformers.models.qwen2_vl.configuration_qwen2_vl (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2VLTextConfig.init |
22 | 20 | 0 |
meth |
Qwen2VLTextConfig.convert_rope_params_to_dict |
3 | 1 | 0 |
attr |
Qwen2VLTextConfig.vocab_size |
1 | 0 | 0 |
attr |
Qwen2VLTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Qwen2VLTextConfig.hidden_size |
1 | 0 | 0 |
attr |
Qwen2VLTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Qwen2VLTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Qwen2VLTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Qwen2VLTextConfig.use_sliding_window |
1 | 0 | 0 |
attr |
Qwen2VLTextConfig.sliding_window |
1 | 0 | 0 |
attr |
Qwen2VLTextConfig.max_window_layers |
1 | 0 | 0 |
attr |
Qwen2VLTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Qwen2VLTextConfig.hidden_act |
1 | 0 | 0 |
attr |
Qwen2VLTextConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen2VLTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Qwen2VLTextConfig.use_cache |
1 | 0 | 0 |
attr |
Qwen2VLTextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Qwen2VLTextConfig.layer_types |
1 | 0 | 0 |
attr |
Qwen2VLTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Qwen2VLTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
Qwen2VLTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Qwen2VLTextConfig.pad_token_id |
1 | 0 | 0 |
meth |
Qwen2VLConfig.init |
9 | 0 | 0 |
attr |
Qwen2VLConfig.image_token_id |
1 | 0 | 0 |
attr |
Qwen2VLConfig.video_token_id |
1 | 0 | 0 |
attr |
Qwen2VLConfig.vision_start_token_id |
1 | 0 | 0 |
attr |
Qwen2VLConfig.vision_end_token_id |
1 | 0 | 0 |
attr |
Qwen2VLConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Qwen2VLConfig.vision_config |
1 | 0 | 0 |
attr |
Qwen2VLConfig.text_config |
1 | 0 | 0 |
transformers.models.qwen2_vl.image_processing_qwen2_vl (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2VLImageProcessor.init |
16 | 15 | 0 |
meth |
Qwen2VLImageProcessor._preprocess |
16 | 15 | 0 |
meth |
Qwen2VLImageProcessor.preprocess |
19 | 18 | 0 |
meth |
Qwen2VLImageProcessor.get_number_of_image_patches |
4 | 2 | 0 |
attr |
Qwen2VLImageProcessor.min_pixels |
1 | 0 | 0 |
attr |
Qwen2VLImageProcessor.max_pixels |
1 | 0 | 0 |
attr |
Qwen2VLImageProcessor.size |
1 | 0 | 0 |
attr |
Qwen2VLImageProcessor.do_resize |
1 | 0 | 0 |
attr |
Qwen2VLImageProcessor.resample |
1 | 0 | 0 |
attr |
Qwen2VLImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
Qwen2VLImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
Qwen2VLImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
Qwen2VLImageProcessor.image_mean |
1 | 0 | 0 |
attr |
Qwen2VLImageProcessor.image_std |
1 | 0 | 0 |
attr |
Qwen2VLImageProcessor.patch_size |
1 | 0 | 0 |
attr |
Qwen2VLImageProcessor.temporal_patch_size |
1 | 0 | 0 |
attr |
Qwen2VLImageProcessor.merge_size |
1 | 0 | 0 |
attr |
Qwen2VLImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.qwen2_vl.image_processing_qwen2_vl_fast (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2VLImageProcessorFast.init |
2 | 1 | 0 |
meth |
Qwen2VLImageProcessorFast._further_process_kwargs |
5 | 4 | 0 |
meth |
Qwen2VLImageProcessorFast._preprocess |
16 | 14 | 0 |
meth |
Qwen2VLImageProcessorFast.get_number_of_image_patches |
4 | 2 | 0 |
transformers.models.qwen2_vl.modeling_qwen2_vl (46 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2VLPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Qwen2VLForConditionalGeneration.init |
2 | 0 | 0 |
meth |
Qwen2VLForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen2VLForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Qwen2VLForConditionalGeneration.prepare_inputs_for_generation |
14 | 0 | 0 |
meth |
Qwen2VLForConditionalGeneration._prepare_position_ids_for_generation |
3 | 0 | 0 |
meth |
Qwen2VLForConditionalGeneration._expand_inputs_for_generation |
5 | 4 | 0 |
attr |
Qwen2VLForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Qwen2VLForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
Qwen2VLTextModel.init |
2 | 1 | 0 |
attr |
Qwen2VLTextModel.padding_idx |
1 | 0 | 0 |
attr |
Qwen2VLTextModel.vocab_size |
1 | 0 | 0 |
attr |
Qwen2VLTextModel.embed_tokens |
1 | 0 | 0 |
attr |
Qwen2VLTextModel.layers |
1 | 0 | 0 |
attr |
Qwen2VLTextModel.norm |
1 | 0 | 0 |
attr |
Qwen2VLTextModel.has_sliding_layers |
1 | 0 | 0 |
attr |
Qwen2VLTextModel.rotary_emb |
1 | 0 | 0 |
attr |
Qwen2VLTextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Qwen2VLModel.init |
2 | 1 | 0 |
meth |
Qwen2VLModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen2VLModel.set_input_embeddings |
2 | 0 | 0 |
meth |
Qwen2VLModel.get_vision_position_ids |
7 | 6 | 0 |
meth |
Qwen2VLModel.get_rope_index |
7 | 6 | 0 |
meth |
Qwen2VLModel.get_placeholder_mask |
5 | 4 | 0 |
attr |
Qwen2VLModel.visual |
1 | 0 | 0 |
attr |
Qwen2VLModel.language_model |
1 | 0 | 0 |
attr |
Qwen2VLModel.rope_deltas |
1 | 0 | 0 |
transformers.models.qwen2_vl.processing_qwen2_vl (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2VLProcessor.init |
6 | 0 | 0 |
meth |
Qwen2VLProcessor._get_num_multimodal_tokens |
4 | 0 | 0 |
meth |
Qwen2VLProcessor.post_process_image_text_to_text |
5 | 0 | 0 |
prop |
Qwen2VLProcessor.model_input_names |
1 | 0 | 0 |
attr |
Qwen2VLProcessor.image_token |
1 | 0 | 0 |
attr |
Qwen2VLProcessor.video_token |
1 | 0 | 0 |
attr |
Qwen2VLProcessor.image_token_id |
1 | 0 | 0 |
attr |
Qwen2VLProcessor.video_token_id |
1 | 0 | 0 |
transformers.models.qwen2_vl.video_processing_qwen2_vl (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen2VLVideoProcessor.init |
2 | 1 | 0 |
meth |
Qwen2VLVideoProcessor._further_process_kwargs |
5 | 4 | 0 |
meth |
Qwen2VLVideoProcessor.sample_frames |
8 | 6 | 0 |
meth |
Qwen2VLVideoProcessor._preprocess |
15 | 13 | 0 |
meth |
Qwen2VLVideoProcessor.get_num_of_video_patches |
5 | 3 | 0 |
transformers.models.qwen3.configuration_qwen3 (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3Config.init |
25 | 23 | 0 |
attr |
Qwen3Config.vocab_size |
1 | 0 | 0 |
attr |
Qwen3Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Qwen3Config.hidden_size |
1 | 0 | 0 |
attr |
Qwen3Config.intermediate_size |
1 | 0 | 0 |
attr |
Qwen3Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Qwen3Config.num_attention_heads |
1 | 0 | 0 |
attr |
Qwen3Config.use_sliding_window |
1 | 0 | 0 |
attr |
Qwen3Config.sliding_window |
1 | 0 | 0 |
attr |
Qwen3Config.max_window_layers |
1 | 0 | 0 |
attr |
Qwen3Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Qwen3Config.head_dim |
1 | 0 | 0 |
attr |
Qwen3Config.hidden_act |
1 | 0 | 0 |
attr |
Qwen3Config.initializer_range |
1 | 0 | 0 |
attr |
Qwen3Config.rms_norm_eps |
1 | 0 | 0 |
attr |
Qwen3Config.use_cache |
1 | 0 | 0 |
attr |
Qwen3Config.attention_bias |
1 | 0 | 0 |
attr |
Qwen3Config.attention_dropout |
1 | 0 | 0 |
attr |
Qwen3Config.layer_types |
1 | 0 | 0 |
attr |
Qwen3Config.pad_token_id |
1 | 0 | 0 |
attr |
Qwen3Config.bos_token_id |
1 | 0 | 0 |
attr |
Qwen3Config.eos_token_id |
1 | 0 | 0 |
attr |
Qwen3Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Qwen3Config.rope_parameters |
1 | 0 | 0 |
transformers.models.qwen3.modeling_qwen3 (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3ForCausalLM.init |
2 | 0 | 0 |
attr |
Qwen3ForCausalLM.model |
1 | 0 | 0 |
attr |
Qwen3ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Qwen3ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Qwen3Model.init |
2 | 1 | 0 |
attr |
Qwen3Model.padding_idx |
1 | 0 | 0 |
attr |
Qwen3Model.vocab_size |
1 | 0 | 0 |
attr |
Qwen3Model.embed_tokens |
1 | 0 | 0 |
attr |
Qwen3Model.layers |
1 | 0 | 0 |
attr |
Qwen3Model.norm |
1 | 0 | 0 |
attr |
Qwen3Model.rotary_emb |
1 | 0 | 0 |
attr |
Qwen3Model.gradient_checkpointing |
1 | 0 | 0 |
attr |
Qwen3Model.has_sliding_layers |
1 | 0 | 0 |
transformers.models.qwen3.modular_qwen3 (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Qwen3PreTrainedModel |
1 | 0 | 0 |
attr |
Qwen3Model |
1 | 0 | 0 |
transformers.models.qwen3_5.configuration_qwen3_5 (64 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3_5TextConfig.init |
27 | 4 | 0 |
attr |
Qwen3_5TextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.bos_token_id |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.vocab_size |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.hidden_size |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.hidden_act |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.use_cache |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.attention_bias |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.head_dim |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.layer_types |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.linear_conv_kernel_dim |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.linear_key_head_dim |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.linear_value_head_dim |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.linear_num_key_heads |
1 | 0 | 0 |
attr |
Qwen3_5TextConfig.linear_num_value_heads |
1 | 0 | 0 |
meth |
Qwen3_5Config.init |
9 | 0 | 0 |
attr |
Qwen3_5Config.image_token_id |
1 | 0 | 0 |
attr |
Qwen3_5Config.video_token_id |
1 | 0 | 0 |
attr |
Qwen3_5Config.vision_start_token_id |
1 | 0 | 0 |
attr |
Qwen3_5Config.vision_end_token_id |
1 | 0 | 0 |
attr |
Qwen3_5Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Qwen3_5Config.vision_config |
1 | 0 | 0 |
attr |
Qwen3_5Config.text_config |
1 | 0 | 0 |
transformers.models.qwen3_5.modeling_qwen3_5 (68 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3_5ForCausalLM.init |
2 | 0 | 0 |
attr |
Qwen3_5ForCausalLM.model |
1 | 0 | 0 |
attr |
Qwen3_5ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Qwen3_5ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Qwen3_5PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Qwen3_5ForConditionalGeneration.init |
2 | 0 | 0 |
meth |
Qwen3_5ForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen3_5ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Qwen3_5ForConditionalGeneration.prepare_inputs_for_generation |
14 | 0 | 0 |
meth |
Qwen3_5ForConditionalGeneration._prepare_position_ids_for_generation |
3 | 0 | 0 |
meth |
Qwen3_5ForConditionalGeneration._expand_inputs_for_generation |
5 | 4 | 0 |
attr |
Qwen3_5ForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Qwen3_5ForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
Qwen3_5Model.init |
2 | 0 | 0 |
meth |
Qwen3_5Model.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen3_5Model.set_input_embeddings |
2 | 0 | 0 |
meth |
Qwen3_5Model.get_vision_position_ids |
7 | 6 | 0 |
meth |
Qwen3_5Model.get_rope_index |
7 | 6 | 0 |
meth |
Qwen3_5Model.get_placeholder_mask |
5 | 4 | 0 |
attr |
Qwen3_5Model.visual |
1 | 0 | 0 |
attr |
Qwen3_5Model.language_model |
1 | 0 | 0 |
attr |
Qwen3_5Model.rope_deltas |
1 | 0 | 0 |
meth |
Qwen3_5TextModel.init |
2 | 1 | 0 |
meth |
Qwen3_5TextModel._update_linear_attn_mask |
3 | 0 | 0 |
attr |
Qwen3_5TextModel.embed_tokens |
1 | 0 | 0 |
attr |
Qwen3_5TextModel.layers |
1 | 0 | 0 |
attr |
Qwen3_5TextModel.norm |
1 | 0 | 0 |
attr |
Qwen3_5TextModel.rotary_emb |
1 | 0 | 0 |
attr |
Qwen3_5TextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Qwen3_5VisionModel.init |
4 | 1 | 0 |
meth |
Qwen3_5VisionModel.fast_pos_embed_interpolate |
2 | 0 | 0 |
meth |
Qwen3_5VisionModel.forward |
4 | 3 | 0 |
attr |
Qwen3_5VisionModel.spatial_merge_size |
1 | 0 | 0 |
attr |
Qwen3_5VisionModel.patch_size |
1 | 0 | 0 |
attr |
Qwen3_5VisionModel.spatial_merge_unit |
1 | 0 | 0 |
attr |
Qwen3_5VisionModel.patch_embed |
1 | 0 | 0 |
attr |
Qwen3_5VisionModel.pos_embed |
1 | 0 | 0 |
attr |
Qwen3_5VisionModel.num_grid_per_side |
1 | 0 | 0 |
attr |
Qwen3_5VisionModel.rotary_pos_emb |
1 | 0 | 0 |
attr |
Qwen3_5VisionModel.blocks |
1 | 0 | 0 |
attr |
Qwen3_5VisionModel.merger |
1 | 0 | 0 |
attr |
Qwen3_5VisionModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.qwen3_5.modular_qwen3_5 (46 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3_5ForCausalLM.init |
2 | 0 | 0 |
attr |
Qwen3_5ForCausalLM.model |
1 | 0 | 0 |
meth |
Qwen3_5ForConditionalGeneration.get_video_features |
2 | 1 | 0 |
meth |
Qwen3_5ForConditionalGeneration.get_image_features |
2 | 1 | 0 |
meth |
Qwen3_5Model.get_video_features |
2 | 1 | 0 |
meth |
Qwen3_5TextConfig.init |
27 | 4 | 0 |
meth |
Qwen3_5TextModel.init |
2 | 1 | 0 |
attr |
Qwen3_5TextModel.rotary_emb |
1 | 0 | 0 |
meth |
Qwen3_5VisionModel.init |
4 | 1 | 0 |
meth |
Qwen3_5VisionModel.forward |
4 | 3 | 0 |
meth |
Qwen3_5PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Qwen3_5Config.init |
9 | 0 | 0 |
transformers.models.qwen3_5.tokenization_qwen3_5 (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3_5Tokenizer.init |
11 | 5 | 0 |
attr |
Qwen3_5Tokenizer.add_prefix_space |
1 | 0 | 0 |
transformers.models.qwen3_5_moe.configuration_qwen3_5_moe (74 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3_5MoeTextConfig.init |
32 | 4 | 0 |
attr |
Qwen3_5MoeTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.vocab_size |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.hidden_size |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.hidden_act |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.use_cache |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.attention_bias |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.head_dim |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.layer_types |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.linear_conv_kernel_dim |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.linear_key_head_dim |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.linear_value_head_dim |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.linear_num_key_heads |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.linear_num_value_heads |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.shared_expert_intermediate_size |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.num_experts |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.output_router_logits |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextConfig.router_aux_loss_coef |
1 | 0 | 0 |
meth |
Qwen3_5MoeConfig.init |
9 | 0 | 0 |
attr |
Qwen3_5MoeConfig.image_token_id |
1 | 0 | 0 |
attr |
Qwen3_5MoeConfig.video_token_id |
1 | 0 | 0 |
attr |
Qwen3_5MoeConfig.vision_start_token_id |
1 | 0 | 0 |
attr |
Qwen3_5MoeConfig.vision_end_token_id |
1 | 0 | 0 |
attr |
Qwen3_5MoeConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Qwen3_5MoeConfig.vision_config |
1 | 0 | 0 |
attr |
Qwen3_5MoeConfig.text_config |
1 | 0 | 0 |
transformers.models.qwen3_5_moe.modeling_qwen3_5_moe (72 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3_5MoePreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
Qwen3_5MoePreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
Qwen3_5MoeModel.init |
2 | 0 | 0 |
meth |
Qwen3_5MoeModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen3_5MoeModel.set_input_embeddings |
2 | 0 | 0 |
meth |
Qwen3_5MoeModel.get_vision_position_ids |
7 | 6 | 0 |
meth |
Qwen3_5MoeModel.get_rope_index |
7 | 6 | 0 |
meth |
Qwen3_5MoeModel.get_placeholder_mask |
5 | 4 | 0 |
attr |
Qwen3_5MoeModel.visual |
1 | 0 | 0 |
attr |
Qwen3_5MoeModel.language_model |
1 | 0 | 0 |
attr |
Qwen3_5MoeModel.rope_deltas |
1 | 0 | 0 |
meth |
Qwen3_5MoeTextModel.init |
2 | 1 | 0 |
meth |
Qwen3_5MoeTextModel._update_linear_attn_mask |
3 | 0 | 0 |
attr |
Qwen3_5MoeTextModel.embed_tokens |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextModel.layers |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextModel.norm |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextModel.rotary_emb |
1 | 0 | 0 |
attr |
Qwen3_5MoeTextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Qwen3_5MoeForConditionalGeneration.init |
2 | 0 | 0 |
meth |
Qwen3_5MoeForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen3_5MoeForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Qwen3_5MoeForConditionalGeneration.prepare_inputs_for_generation |
14 | 0 | 0 |
meth |
Qwen3_5MoeForConditionalGeneration._prepare_position_ids_for_generation |
3 | 0 | 0 |
meth |
Qwen3_5MoeForConditionalGeneration._expand_inputs_for_generation |
5 | 4 | 0 |
attr |
Qwen3_5MoeForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Qwen3_5MoeForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
Qwen3_5MoeForCausalLM.init |
2 | 0 | 0 |
attr |
Qwen3_5MoeForCausalLM.model |
1 | 0 | 0 |
attr |
Qwen3_5MoeForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Qwen3_5MoeForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Qwen3_5MoeForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
Qwen3_5MoeForCausalLM.num_experts |
1 | 0 | 0 |
attr |
Qwen3_5MoeForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
meth |
Qwen3_5MoeVisionModel.init |
4 | 1 | 0 |
meth |
Qwen3_5MoeVisionModel.fast_pos_embed_interpolate |
2 | 0 | 0 |
meth |
Qwen3_5MoeVisionModel.forward |
4 | 3 | 0 |
attr |
Qwen3_5MoeVisionModel.spatial_merge_size |
1 | 0 | 0 |
attr |
Qwen3_5MoeVisionModel.patch_size |
1 | 0 | 0 |
attr |
Qwen3_5MoeVisionModel.spatial_merge_unit |
1 | 0 | 0 |
attr |
Qwen3_5MoeVisionModel.patch_embed |
1 | 0 | 0 |
attr |
Qwen3_5MoeVisionModel.pos_embed |
1 | 0 | 0 |
attr |
Qwen3_5MoeVisionModel.num_grid_per_side |
1 | 0 | 0 |
attr |
Qwen3_5MoeVisionModel.rotary_pos_emb |
1 | 0 | 0 |
attr |
Qwen3_5MoeVisionModel.blocks |
1 | 0 | 0 |
attr |
Qwen3_5MoeVisionModel.merger |
1 | 0 | 0 |
attr |
Qwen3_5MoeVisionModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.qwen3_5_moe.modular_qwen3_5_moe (46 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3_5MoePreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Qwen3_5MoeConfig.init |
9 | 0 | 0 |
meth |
Qwen3_5MoeForCausalLM.init |
2 | 0 | 0 |
attr |
Qwen3_5MoeForCausalLM.model |
1 | 0 | 0 |
meth |
Qwen3_5MoeTextConfig.init |
32 | 4 | 0 |
meth |
Qwen3_5MoeForConditionalGeneration.forward |
2 | 0 | 0 |
meth |
Qwen3_5MoeForConditionalGeneration.get_video_features |
2 | 1 | 0 |
meth |
Qwen3_5MoeForConditionalGeneration.get_image_features |
2 | 1 | 0 |
transformers.models.qwen3_moe.configuration_qwen3_moe (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3MoeConfig.init |
30 | 28 | 0 |
attr |
Qwen3MoeConfig.vocab_size |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.hidden_size |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.intermediate_size |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.use_sliding_window |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.sliding_window |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.hidden_act |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.use_cache |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.attention_bias |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.attention_dropout |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.rope_parameters |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.decoder_sparse_step |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.num_experts |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.output_router_logits |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.mlp_only_layers |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.pad_token_id |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.bos_token_id |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.eos_token_id |
1 | 0 | 0 |
attr |
Qwen3MoeConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.qwen3_moe.modeling_qwen3_moe (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3MoePreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
Qwen3MoePreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
Qwen3MoeModel.init |
2 | 1 | 0 |
attr |
Qwen3MoeModel.padding_idx |
1 | 0 | 0 |
attr |
Qwen3MoeModel.vocab_size |
1 | 0 | 0 |
attr |
Qwen3MoeModel.embed_tokens |
1 | 0 | 0 |
attr |
Qwen3MoeModel.layers |
1 | 0 | 0 |
attr |
Qwen3MoeModel.norm |
1 | 0 | 0 |
attr |
Qwen3MoeModel.rotary_emb |
1 | 0 | 0 |
attr |
Qwen3MoeModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Qwen3MoeForCausalLM.init |
2 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.model |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.num_experts |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
transformers.models.qwen3_moe.modular_qwen3_moe (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Qwen3MoePreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
Qwen3MoeForCausalLM.init |
2 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.model |
1 | 0 | 0 |
attr |
Qwen3MoeForCausalLM.num_experts |
1 | 0 | 0 |
transformers.models.qwen3_next.configuration_qwen3_next (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3NextConfig.init |
36 | 34 | 0 |
attr |
Qwen3NextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Qwen3NextConfig.bos_token_id |
1 | 0 | 0 |
attr |
Qwen3NextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Qwen3NextConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Qwen3NextConfig.vocab_size |
1 | 0 | 0 |
attr |
Qwen3NextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Qwen3NextConfig.hidden_size |
1 | 0 | 0 |
attr |
Qwen3NextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Qwen3NextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Qwen3NextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Qwen3NextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Qwen3NextConfig.hidden_act |
1 | 0 | 0 |
attr |
Qwen3NextConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen3NextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Qwen3NextConfig.use_cache |
1 | 0 | 0 |
attr |
Qwen3NextConfig.attention_bias |
1 | 0 | 0 |
attr |
Qwen3NextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Qwen3NextConfig.head_dim |
1 | 0 | 0 |
attr |
Qwen3NextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Qwen3NextConfig.layer_types |
1 | 0 | 0 |
attr |
Qwen3NextConfig.linear_conv_kernel_dim |
1 | 0 | 0 |
attr |
Qwen3NextConfig.linear_key_head_dim |
1 | 0 | 0 |
attr |
Qwen3NextConfig.linear_value_head_dim |
1 | 0 | 0 |
attr |
Qwen3NextConfig.linear_num_key_heads |
1 | 0 | 0 |
attr |
Qwen3NextConfig.linear_num_value_heads |
1 | 0 | 0 |
attr |
Qwen3NextConfig.decoder_sparse_step |
1 | 0 | 0 |
attr |
Qwen3NextConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
Qwen3NextConfig.shared_expert_intermediate_size |
1 | 0 | 0 |
attr |
Qwen3NextConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
Qwen3NextConfig.num_experts |
1 | 0 | 0 |
attr |
Qwen3NextConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
Qwen3NextConfig.output_router_logits |
1 | 0 | 0 |
attr |
Qwen3NextConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
Qwen3NextConfig.mlp_only_layers |
1 | 0 | 0 |
transformers.models.qwen3_next.modeling_qwen3_next (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3NextForCausalLM.init |
2 | 0 | 0 |
attr |
Qwen3NextForCausalLM.model |
1 | 0 | 0 |
attr |
Qwen3NextForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Qwen3NextForCausalLM.lm_head |
1 | 0 | 0 |
attr |
Qwen3NextForCausalLM.router_aux_loss_coef |
1 | 0 | 0 |
attr |
Qwen3NextForCausalLM.num_experts |
1 | 0 | 0 |
attr |
Qwen3NextForCausalLM.num_experts_per_tok |
1 | 0 | 0 |
meth |
Qwen3NextPreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
Qwen3NextPreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
Qwen3NextModel.init |
2 | 1 | 0 |
meth |
Qwen3NextModel._update_linear_attn_mask |
3 | 0 | 0 |
attr |
Qwen3NextModel.embed_tokens |
1 | 0 | 0 |
attr |
Qwen3NextModel.layers |
1 | 0 | 0 |
attr |
Qwen3NextModel.norm |
1 | 0 | 0 |
attr |
Qwen3NextModel.rotary_emb |
1 | 0 | 0 |
attr |
Qwen3NextModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.qwen3_next.modular_qwen3_next (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3NextForCausalLM.init |
2 | 0 | 0 |
attr |
Qwen3NextForCausalLM.num_experts |
1 | 0 | 0 |
meth |
Qwen3NextPreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
Qwen3NextPreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
Qwen3NextModel.init |
2 | 1 | 0 |
meth |
Qwen3NextModel._update_linear_attn_mask |
3 | 0 | 0 |
attr |
Qwen3NextModel.embed_tokens |
1 | 0 | 0 |
attr |
Qwen3NextModel.layers |
1 | 0 | 0 |
attr |
Qwen3NextModel.norm |
1 | 0 | 0 |
attr |
Qwen3NextModel.rotary_emb |
1 | 0 | 0 |
attr |
Qwen3NextModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.qwen3_omni_moe.configuration_qwen3_omni_moe (91 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3OmniMoeConfig.init |
14 | 0 | 0 |
meth |
Qwen3OmniMoeConfig.get_text_config |
2 | 1 | 0 |
attr |
Qwen3OmniMoeConfig.thinker_config |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.talker_config |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.code2wav_config |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.enable_audio_output |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.im_start_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.im_end_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.tts_pad_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.tts_bos_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.tts_eos_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.system_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.user_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.assistant_token_id |
1 | 0 | 0 |
meth |
Qwen3OmniMoeTalkerConfig.init |
20 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.num_code_groups |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.thinker_hidden_size |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.codec_eos_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.accept_hidden_layer |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.codec_nothink_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.codec_think_bos_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.codec_think_eos_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.codec_pad_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.codec_bos_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.audio_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.image_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.video_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.position_id_per_seconds |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.audio_start_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.vision_start_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.speaker_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.code_predictor_config |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.text_config |
1 | 0 | 0 |
meth |
Qwen3OmniMoeThinkerConfig.init |
13 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerConfig.user_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerConfig.position_id_per_seconds |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerConfig.audio_start_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerConfig.vision_config |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerConfig.audio_config |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerConfig.text_config |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerConfig.audio_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerConfig.image_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerConfig.video_token_id |
1 | 0 | 0 |
transformers.models.qwen3_omni_moe.modeling_qwen3_omni_moe (214 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3OmniMoeTalkerForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Qwen3OmniMoeTalkerForConditionalGeneration.forward |
21 | 1 | 0 |
meth |
Qwen3OmniMoeTalkerForConditionalGeneration.get_llm_pos_ids_for_vision |
7 | 6 | 0 |
meth |
Qwen3OmniMoeTalkerForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen3OmniMoeTalkerForConditionalGeneration._update_model_kwargs_for_generation |
5 | 0 | 0 |
meth |
Qwen3OmniMoeTalkerForConditionalGeneration.prepare_inputs_for_generation |
8 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration._can_record_outputs |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.router_aux_loss_coef |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.num_experts |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.num_experts_per_tok |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.text_projection |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.hidden_projection |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.codec_head |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.code_predictor |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.rope_deltas |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.spatial_merge_size |
1 | 0 | 0 |
meth |
Qwen3OmniMoeThinkerTextPreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerTextPreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
Qwen3OmniMoeTalkerCodePredictorModelForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Qwen3OmniMoeTalkerCodePredictorModelForConditionalGeneration.forward |
11 | 1 | 0 |
meth |
Qwen3OmniMoeTalkerCodePredictorModelForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen3OmniMoeTalkerCodePredictorModelForConditionalGeneration._update_model_kwargs_for_generation |
5 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerCodePredictorModelForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerCodePredictorModelForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerCodePredictorModelForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
Qwen3OmniMoePreTrainedModelForConditionalGeneration.get_llm_pos_ids_for_vision |
7 | 6 | 0 |
meth |
Qwen3OmniMoeCode2WavTransformerModel.init |
2 | 1 | 0 |
meth |
Qwen3OmniMoeCode2WavTransformerModel.forward |
9 | 1 | 0 |
attr |
Qwen3OmniMoeCode2WavTransformerModel.layers |
1 | 0 | 0 |
attr |
Qwen3OmniMoeCode2WavTransformerModel.norm |
1 | 0 | 0 |
attr |
Qwen3OmniMoeCode2WavTransformerModel.rotary_emb |
1 | 0 | 0 |
attr |
Qwen3OmniMoeCode2WavTransformerModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
Qwen3OmniMoeCode2WavTransformerModel.has_sliding_layers |
1 | 0 | 0 |
attr |
Qwen3OmniMoeCode2WavTransformerModel.window_size |
1 | 0 | 0 |
meth |
Qwen3OmniMoeTalkerCodePredictorModel.init |
2 | 1 | 0 |
meth |
Qwen3OmniMoeTalkerCodePredictorModel.get_input_embeddings |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerCodePredictorModel.padding_idx |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerCodePredictorModel.vocab_size |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerCodePredictorModel.layers |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerCodePredictorModel.norm |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerCodePredictorModel.rotary_emb |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerCodePredictorModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerCodePredictorModel.has_sliding_layers |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerCodePredictorModel.codec_embedding |
1 | 0 | 0 |
meth |
Qwen3OmniMoeCode2Wav.init |
2 | 1 | 0 |
meth |
Qwen3OmniMoeCode2Wav.forward |
3 | 0 | 0 |
meth |
Qwen3OmniMoeCode2Wav.chunked_decode |
4 | 0 | 0 |
attr |
Qwen3OmniMoeCode2Wav.total_upsample |
1 | 0 | 0 |
attr |
Qwen3OmniMoeCode2Wav.pre_transformer |
1 | 0 | 0 |
attr |
Qwen3OmniMoeCode2Wav.code_embedding |
1 | 0 | 0 |
attr |
Qwen3OmniMoeCode2Wav.upsample |
1 | 0 | 0 |
attr |
Qwen3OmniMoeCode2Wav.decoder |
1 | 0 | 0 |
meth |
Qwen3OmniMoeForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Qwen3OmniMoeForConditionalGeneration.enable_talker |
1 | 0 | 0 |
meth |
Qwen3OmniMoeForConditionalGeneration.disable_talker |
1 | 0 | 0 |
meth |
Qwen3OmniMoeForConditionalGeneration._get_talker_user_parts |
6 | 0 | 0 |
meth |
Qwen3OmniMoeForConditionalGeneration._get_talker_assistant_parts |
8 | 0 | 0 |
meth |
Qwen3OmniMoeForConditionalGeneration.generate |
14 | 12 | 0 |
attr |
Qwen3OmniMoeForConditionalGeneration.thinker |
1 | 0 | 0 |
attr |
Qwen3OmniMoeForConditionalGeneration.has_talker |
1 | 0 | 0 |
meth |
Qwen3OmniMoeTalkerModel.init |
2 | 1 | 0 |
meth |
Qwen3OmniMoeTalkerModel._deepstack_process |
4 | 3 | 0 |
meth |
Qwen3OmniMoeTalkerModel.get_input_embeddings |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerModel._can_record_outputs |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerModel.padding_idx |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerModel.vocab_size |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerModel.layers |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerModel.norm |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerModel.rotary_emb |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerModel.codec_embedding |
1 | 0 | 0 |
meth |
Qwen3OmniMoeThinkerForConditionalGeneration.init |
2 | 0 | 0 |
meth |
Qwen3OmniMoeThinkerForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen3OmniMoeThinkerForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Qwen3OmniMoeThinkerForConditionalGeneration.get_placeholder_mask |
5 | 4 | 0 |
meth |
Qwen3OmniMoeThinkerForConditionalGeneration.forward |
21 | 2 | 0 |
meth |
Qwen3OmniMoeThinkerForConditionalGeneration.prepare_inputs_for_generation |
18 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration._can_record_outputs |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.audio_tower |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.visual |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.spatial_merge_size |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.rope_deltas |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.num_experts |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.num_experts_per_tok |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.router_aux_loss_coef |
1 | 0 | 0 |
meth |
Qwen3OmniMoeThinkerTextModel.init |
2 | 1 | 0 |
meth |
Qwen3OmniMoeThinkerTextModel._deepstack_process |
4 | 3 | 0 |
attr |
Qwen3OmniMoeThinkerTextModel._can_record_outputs |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerTextModel.padding_idx |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerTextModel.vocab_size |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerTextModel.embed_tokens |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerTextModel.layers |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerTextModel.norm |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerTextModel.rotary_emb |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerTextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Qwen3OmniMoeCode2WavDecoderBlock.init |
3 | 1 | 0 |
meth |
Qwen3OmniMoeCode2WavDecoderBlock.forward |
3 | 0 | 0 |
attr |
Qwen3OmniMoeCode2WavDecoderBlock.block |
1 | 0 | 0 |
meth |
Qwen3OmniMoePreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.qwen3_omni_moe.modular_qwen3_omni_moe (261 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3OmniMoeTalkerForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Qwen3OmniMoeTalkerForConditionalGeneration.get_llm_pos_ids_for_vision |
7 | 6 | 0 |
meth |
Qwen3OmniMoeTalkerForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen3OmniMoeTalkerForConditionalGeneration.forward |
21 | 0 | 0 |
meth |
Qwen3OmniMoeTalkerForConditionalGeneration._update_model_kwargs_for_generation |
5 | 0 | 0 |
meth |
Qwen3OmniMoeTalkerForConditionalGeneration.prepare_inputs_for_generation |
8 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration._can_record_outputs |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.text_projection |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.hidden_projection |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.codec_head |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.code_predictor |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.rope_deltas |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.spatial_merge_size |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.router_aux_loss_coef |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.num_experts |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerForConditionalGeneration.num_experts_per_tok |
1 | 0 | 0 |
meth |
Qwen3OmniMoeThinkerConfig.init |
13 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerConfig.audio_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerConfig.image_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerConfig.video_token_id |
1 | 0 | 0 |
meth |
Qwen3OmniMoeTalkerCodePredictorModel.init |
2 | 1 | 0 |
meth |
Qwen3OmniMoeTalkerCodePredictorModel.get_input_embeddings |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerCodePredictorModel.layers |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerCodePredictorModel.codec_embedding |
1 | 0 | 0 |
meth |
Qwen3OmniMoeForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Qwen3OmniMoeForConditionalGeneration.enable_talker |
1 | 0 | 0 |
meth |
Qwen3OmniMoeForConditionalGeneration.disable_talker |
1 | 0 | 0 |
meth |
Qwen3OmniMoeForConditionalGeneration._get_talker_user_parts |
6 | 0 | 0 |
meth |
Qwen3OmniMoeForConditionalGeneration._get_talker_assistant_parts |
8 | 0 | 0 |
meth |
Qwen3OmniMoeForConditionalGeneration.generate |
14 | 12 | 0 |
attr |
Qwen3OmniMoeForConditionalGeneration.thinker |
1 | 0 | 0 |
attr |
Qwen3OmniMoeForConditionalGeneration.has_talker |
1 | 0 | 0 |
meth |
Qwen3OmniMoeTalkerModel.init |
2 | 1 | 0 |
meth |
Qwen3OmniMoeTalkerModel.get_input_embeddings |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerModel._can_record_outputs |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerModel.codec_embedding |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerModel.layers |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerModel.rotary_emb |
1 | 0 | 0 |
meth |
Qwen3OmniMoeThinkerTextModel.init |
2 | 1 | 0 |
attr |
Qwen3OmniMoeThinkerTextModel._can_record_outputs |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerTextModel.layers |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerTextModel.rotary_emb |
1 | 0 | 0 |
meth |
Qwen3OmniMoeThinkerForConditionalGeneration.init |
2 | 0 | 0 |
meth |
Qwen3OmniMoeThinkerForConditionalGeneration.forward |
21 | 2 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration._can_record_outputs |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.num_experts |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.num_experts_per_tok |
1 | 0 | 0 |
attr |
Qwen3OmniMoeThinkerForConditionalGeneration.router_aux_loss_coef |
1 | 0 | 0 |
meth |
Qwen3OmniMoeProcessor.replace_multimodal_special_tokens |
9 | 0 | 0 |
meth |
Qwen3OmniMoeProcessor.call |
6 | 4 | 0 |
meth |
Qwen3OmniMoeProcessor.apply_chat_template |
4 | 0 | 0 |
meth |
Qwen3OmniMoeCode2Wav.init |
2 | 1 | 0 |
meth |
Qwen3OmniMoeCode2Wav.forward |
3 | 0 | 0 |
meth |
Qwen3OmniMoeCode2Wav.chunked_decode |
4 | 0 | 0 |
attr |
Qwen3OmniMoeCode2Wav.total_upsample |
1 | 0 | 0 |
attr |
Qwen3OmniMoeCode2Wav.pre_transformer |
1 | 0 | 0 |
attr |
Qwen3OmniMoeCode2Wav.code_embedding |
1 | 0 | 0 |
attr |
Qwen3OmniMoeCode2Wav.upsample |
1 | 0 | 0 |
attr |
Qwen3OmniMoeCode2Wav.decoder |
1 | 0 | 0 |
meth |
Qwen3OmniMoeTalkerCodePredictorModelForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Qwen3OmniMoeTalkerCodePredictorModelForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen3OmniMoeTalkerCodePredictorModelForConditionalGeneration.forward |
11 | 0 | 0 |
meth |
Qwen3OmniMoeTalkerCodePredictorModelForConditionalGeneration._update_model_kwargs_for_generation |
5 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerCodePredictorModelForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerCodePredictorModelForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
Qwen3OmniMoePreTrainedModelForConditionalGeneration.get_llm_pos_ids_for_vision |
7 | 6 | 0 |
meth |
Qwen3OmniMoeCode2WavTransformerModel.init |
2 | 1 | 0 |
meth |
Qwen3OmniMoeCode2WavTransformerModel.forward |
9 | 0 | 0 |
attr |
Qwen3OmniMoeCode2WavTransformerModel.window_size |
1 | 0 | 0 |
attr |
Qwen3OmniMoeCode2WavTransformerModel.layers |
1 | 0 | 0 |
meth |
Qwen3OmniMoeTalkerConfig.init |
20 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.num_code_groups |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.thinker_hidden_size |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.codec_eos_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.accept_hidden_layer |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.codec_nothink_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.codec_think_bos_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.codec_think_eos_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.codec_pad_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.codec_bos_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.audio_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.image_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.video_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.position_id_per_seconds |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.audio_start_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.vision_start_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.speaker_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.code_predictor_config |
1 | 0 | 0 |
attr |
Qwen3OmniMoeTalkerConfig.text_config |
1 | 0 | 0 |
meth |
Qwen3OmniMoeCode2WavDecoderBlock.init |
3 | 1 | 0 |
meth |
Qwen3OmniMoeCode2WavDecoderBlock.forward |
3 | 0 | 0 |
attr |
Qwen3OmniMoeCode2WavDecoderBlock.block |
1 | 0 | 0 |
meth |
Qwen3OmniMoeConfig.init |
14 | 0 | 0 |
meth |
Qwen3OmniMoeConfig.get_text_config |
2 | 1 | 0 |
attr |
Qwen3OmniMoeConfig.thinker_config |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.talker_config |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.code2wav_config |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.enable_audio_output |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.im_start_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.im_end_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.tts_pad_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.tts_bos_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.tts_eos_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.system_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.user_token_id |
1 | 0 | 0 |
attr |
Qwen3OmniMoeConfig.assistant_token_id |
1 | 0 | 0 |
meth |
Qwen3OmniMoePreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.qwen3_omni_moe.processing_qwen3_omni_moe (37 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3OmniMoeProcessor.init |
6 | 0 | 0 |
meth |
Qwen3OmniMoeProcessor.call |
6 | 5 | 0 |
meth |
Qwen3OmniMoeProcessor.replace_multimodal_special_tokens |
9 | 0 | 0 |
meth |
Qwen3OmniMoeProcessor.apply_chat_template |
4 | 0 | 0 |
meth |
Qwen3OmniMoeProcessor.post_process_image_text_to_text |
4 | 0 | 0 |
meth |
Qwen3OmniMoeProcessor.post_process_multimodal_output |
5 | 0 | 0 |
prop |
Qwen3OmniMoeProcessor.model_input_names |
1 | 0 | 0 |
attr |
Qwen3OmniMoeProcessor.image_token |
1 | 0 | 0 |
attr |
Qwen3OmniMoeProcessor.audio_token |
1 | 0 | 0 |
attr |
Qwen3OmniMoeProcessor.video_token |
1 | 0 | 0 |
attr |
Qwen3OmniMoeProcessor.vision_bos_token |
1 | 0 | 0 |
attr |
Qwen3OmniMoeProcessor.vision_eos_token |
1 | 0 | 0 |
attr |
Qwen3OmniMoeProcessor.audio_bos_token |
1 | 0 | 0 |
attr |
Qwen3OmniMoeProcessor.audio_eos_token |
1 | 0 | 0 |
transformers.models.qwen3_vl.configuration_qwen3_vl (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3VLTextConfig.init |
18 | 16 | 0 |
attr |
Qwen3VLTextConfig.vocab_size |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.hidden_size |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.head_dim |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.hidden_act |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.use_cache |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.attention_bias |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.pad_token_id |
1 | 0 | 0 |
meth |
Qwen3VLConfig.init |
9 | 0 | 0 |
attr |
Qwen3VLConfig.image_token_id |
1 | 0 | 0 |
attr |
Qwen3VLConfig.video_token_id |
1 | 0 | 0 |
attr |
Qwen3VLConfig.vision_start_token_id |
1 | 0 | 0 |
attr |
Qwen3VLConfig.vision_end_token_id |
1 | 0 | 0 |
attr |
Qwen3VLConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Qwen3VLConfig.vision_config |
1 | 0 | 0 |
attr |
Qwen3VLConfig.text_config |
1 | 0 | 0 |
transformers.models.qwen3_vl.modeling_qwen3_vl (64 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3VLTextModel.init |
2 | 1 | 0 |
meth |
Qwen3VLTextModel._deepstack_process |
4 | 3 | 0 |
attr |
Qwen3VLTextModel.padding_idx |
1 | 0 | 0 |
attr |
Qwen3VLTextModel.vocab_size |
1 | 0 | 0 |
attr |
Qwen3VLTextModel.embed_tokens |
1 | 0 | 0 |
attr |
Qwen3VLTextModel.layers |
1 | 0 | 0 |
attr |
Qwen3VLTextModel.norm |
1 | 0 | 0 |
attr |
Qwen3VLTextModel.rotary_emb |
1 | 0 | 0 |
attr |
Qwen3VLTextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Qwen3VLModel.init |
2 | 0 | 0 |
meth |
Qwen3VLModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen3VLModel.set_input_embeddings |
2 | 0 | 0 |
meth |
Qwen3VLModel.get_vision_position_ids |
7 | 6 | 0 |
meth |
Qwen3VLModel.get_rope_index |
7 | 6 | 0 |
meth |
Qwen3VLModel.get_placeholder_mask |
5 | 4 | 0 |
attr |
Qwen3VLModel.visual |
1 | 0 | 0 |
attr |
Qwen3VLModel.language_model |
1 | 0 | 0 |
attr |
Qwen3VLModel.rope_deltas |
1 | 0 | 0 |
meth |
Qwen3VLPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Qwen3VLForConditionalGeneration.init |
2 | 0 | 0 |
meth |
Qwen3VLForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen3VLForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Qwen3VLForConditionalGeneration.prepare_inputs_for_generation |
14 | 0 | 0 |
meth |
Qwen3VLForConditionalGeneration._prepare_position_ids_for_generation |
3 | 0 | 0 |
meth |
Qwen3VLForConditionalGeneration._expand_inputs_for_generation |
5 | 4 | 0 |
attr |
Qwen3VLForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Qwen3VLForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
Qwen3VLVisionModel.init |
4 | 1 | 0 |
meth |
Qwen3VLVisionModel.fast_pos_embed_interpolate |
2 | 0 | 0 |
attr |
Qwen3VLVisionModel.spatial_merge_size |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.patch_size |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.spatial_merge_unit |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.patch_embed |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.pos_embed |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.num_grid_per_side |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.rotary_pos_emb |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.blocks |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.merger |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.deepstack_visual_indexes |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.deepstack_merger_list |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.qwen3_vl.modular_qwen3_vl (87 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3VLPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Qwen3VLForConditionalGeneration.get_image_features |
2 | 1 | 0 |
meth |
Qwen3VLForConditionalGeneration.get_video_features |
2 | 1 | 0 |
meth |
Qwen3VLForConditionalGeneration.prepare_inputs_for_generation |
14 | 0 | 0 |
meth |
Qwen3VLForConditionalGeneration._expand_inputs_for_generation |
5 | 4 | 0 |
meth |
Qwen3VLConfig.init |
9 | 0 | 0 |
attr |
Qwen3VLConfig.image_token_id |
1 | 0 | 0 |
attr |
Qwen3VLConfig.video_token_id |
1 | 0 | 0 |
attr |
Qwen3VLConfig.vision_start_token_id |
1 | 0 | 0 |
attr |
Qwen3VLConfig.vision_end_token_id |
1 | 0 | 0 |
attr |
Qwen3VLConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Qwen3VLConfig.vision_config |
1 | 0 | 0 |
attr |
Qwen3VLConfig.text_config |
1 | 0 | 0 |
meth |
Qwen3VLProcessor.init |
6 | 0 | 0 |
meth |
Qwen3VLProcessor._calculate_timestamps |
4 | 3 | 0 |
attr |
Qwen3VLProcessor.vision_start_token |
1 | 0 | 0 |
attr |
Qwen3VLProcessor.vision_end_token |
1 | 0 | 0 |
attr |
Qwen3VLProcessor.vision_start_token_id |
1 | 0 | 0 |
attr |
Qwen3VLProcessor.vision_end_token_id |
1 | 0 | 0 |
meth |
Qwen3VLTextModel.init |
2 | 1 | 0 |
meth |
Qwen3VLTextModel._deepstack_process |
4 | 3 | 0 |
meth |
Qwen3VLModel.init |
2 | 0 | 0 |
attr |
Qwen3VLModel.visual |
1 | 0 | 0 |
attr |
Qwen3VLModel.language_model |
1 | 0 | 0 |
meth |
Qwen3VLTextConfig.init |
18 | 16 | 0 |
attr |
Qwen3VLTextConfig.vocab_size |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.hidden_size |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.head_dim |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.hidden_act |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.use_cache |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.attention_bias |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Qwen3VLTextConfig.pad_token_id |
1 | 0 | 0 |
meth |
Qwen3VLVisionModel.init |
4 | 1 | 0 |
meth |
Qwen3VLVisionModel.fast_pos_embed_interpolate |
2 | 0 | 0 |
attr |
Qwen3VLVisionModel.spatial_merge_size |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.patch_size |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.spatial_merge_unit |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.patch_embed |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.pos_embed |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.num_grid_per_side |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.rotary_pos_emb |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.blocks |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.merger |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.deepstack_visual_indexes |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.deepstack_merger_list |
1 | 0 | 0 |
attr |
Qwen3VLVisionModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.qwen3_vl.processing_qwen3_vl (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3VLProcessor.init |
6 | 0 | 0 |
meth |
Qwen3VLProcessor._get_num_multimodal_tokens |
4 | 0 | 0 |
meth |
Qwen3VLProcessor.post_process_image_text_to_text |
5 | 0 | 0 |
meth |
Qwen3VLProcessor._calculate_timestamps |
4 | 3 | 0 |
prop |
Qwen3VLProcessor.model_input_names |
1 | 0 | 0 |
attr |
Qwen3VLProcessor.image_token |
1 | 0 | 0 |
attr |
Qwen3VLProcessor.video_token |
1 | 0 | 0 |
attr |
Qwen3VLProcessor.image_token_id |
1 | 0 | 0 |
attr |
Qwen3VLProcessor.video_token_id |
1 | 0 | 0 |
attr |
Qwen3VLProcessor.vision_start_token |
1 | 0 | 0 |
attr |
Qwen3VLProcessor.vision_end_token |
1 | 0 | 0 |
attr |
Qwen3VLProcessor.vision_start_token_id |
1 | 0 | 0 |
attr |
Qwen3VLProcessor.vision_end_token_id |
1 | 0 | 0 |
transformers.models.qwen3_vl.video_processing_qwen3_vl (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3VLVideoProcessor.init |
2 | 1 | 0 |
meth |
Qwen3VLVideoProcessor._further_process_kwargs |
3 | 2 | 0 |
meth |
Qwen3VLVideoProcessor.sample_frames |
5 | 3 | 0 |
meth |
Qwen3VLVideoProcessor._preprocess |
16 | 14 | 0 |
transformers.models.qwen3_vl_moe.configuration_qwen3_vl_moe (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3VLMoeTextConfig.init |
23 | 21 | 0 |
attr |
Qwen3VLMoeTextConfig.vocab_size |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.hidden_size |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.hidden_act |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.use_cache |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.attention_bias |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.head_dim |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.decoder_sparse_step |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.num_experts |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.mlp_only_layers |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.pad_token_id |
1 | 0 | 0 |
meth |
Qwen3VLMoeConfig.init |
9 | 0 | 0 |
attr |
Qwen3VLMoeConfig.image_token_id |
1 | 0 | 0 |
attr |
Qwen3VLMoeConfig.video_token_id |
1 | 0 | 0 |
attr |
Qwen3VLMoeConfig.vision_start_token_id |
1 | 0 | 0 |
attr |
Qwen3VLMoeConfig.vision_end_token_id |
1 | 0 | 0 |
attr |
Qwen3VLMoeConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
Qwen3VLMoeConfig.vision_config |
1 | 0 | 0 |
attr |
Qwen3VLMoeConfig.text_config |
1 | 0 | 0 |
transformers.models.qwen3_vl_moe.modeling_qwen3_vl_moe (66 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3VLMoeForConditionalGeneration.init |
2 | 0 | 0 |
meth |
Qwen3VLMoeForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen3VLMoeForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
Qwen3VLMoeForConditionalGeneration.prepare_inputs_for_generation |
14 | 0 | 0 |
meth |
Qwen3VLMoeForConditionalGeneration._prepare_position_ids_for_generation |
3 | 0 | 0 |
meth |
Qwen3VLMoeForConditionalGeneration._expand_inputs_for_generation |
5 | 4 | 0 |
attr |
Qwen3VLMoeForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Qwen3VLMoeForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
Qwen3VLMoeModel.init |
2 | 0 | 0 |
meth |
Qwen3VLMoeModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Qwen3VLMoeModel.set_input_embeddings |
2 | 0 | 0 |
meth |
Qwen3VLMoeModel.get_vision_position_ids |
7 | 6 | 0 |
meth |
Qwen3VLMoeModel.get_rope_index |
7 | 6 | 0 |
meth |
Qwen3VLMoeModel.get_placeholder_mask |
5 | 4 | 0 |
attr |
Qwen3VLMoeModel.visual |
1 | 0 | 0 |
attr |
Qwen3VLMoeModel.language_model |
1 | 0 | 0 |
attr |
Qwen3VLMoeModel.rope_deltas |
1 | 0 | 0 |
meth |
Qwen3VLMoeVisionModel.init |
4 | 1 | 0 |
meth |
Qwen3VLMoeVisionModel.fast_pos_embed_interpolate |
2 | 0 | 0 |
attr |
Qwen3VLMoeVisionModel._can_record_outputs |
1 | 0 | 0 |
attr |
Qwen3VLMoeVisionModel.spatial_merge_size |
1 | 0 | 0 |
attr |
Qwen3VLMoeVisionModel.patch_size |
1 | 0 | 0 |
attr |
Qwen3VLMoeVisionModel.spatial_merge_unit |
1 | 0 | 0 |
attr |
Qwen3VLMoeVisionModel.patch_embed |
1 | 0 | 0 |
attr |
Qwen3VLMoeVisionModel.pos_embed |
1 | 0 | 0 |
attr |
Qwen3VLMoeVisionModel.num_grid_per_side |
1 | 0 | 0 |
attr |
Qwen3VLMoeVisionModel.rotary_pos_emb |
1 | 0 | 0 |
attr |
Qwen3VLMoeVisionModel.blocks |
1 | 0 | 0 |
attr |
Qwen3VLMoeVisionModel.merger |
1 | 0 | 0 |
attr |
Qwen3VLMoeVisionModel.deepstack_visual_indexes |
1 | 0 | 0 |
attr |
Qwen3VLMoeVisionModel.deepstack_merger_list |
1 | 0 | 0 |
attr |
Qwen3VLMoeVisionModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Qwen3VLMoeTextModel.init |
2 | 1 | 0 |
meth |
Qwen3VLMoeTextModel._deepstack_process |
4 | 3 | 0 |
attr |
Qwen3VLMoeTextModel.padding_idx |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextModel.vocab_size |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextModel.embed_tokens |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextModel.layers |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextModel.norm |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextModel.rotary_emb |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
Qwen3VLMoePreTrainedModel._init_weights |
2 | 0 | 0 |
attr |
Qwen3VLMoePreTrainedModel._can_record_outputs |
1 | 0 | 0 |
transformers.models.qwen3_vl_moe.modular_qwen3_vl_moe (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Qwen3VLMoeTextConfig.init |
23 | 21 | 0 |
attr |
Qwen3VLMoeTextConfig.vocab_size |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.hidden_size |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.hidden_act |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.initializer_range |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.use_cache |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.attention_bias |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.head_dim |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.rope_parameters |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.decoder_sparse_step |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.num_experts |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.mlp_only_layers |
1 | 0 | 0 |
attr |
Qwen3VLMoeTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Qwen3VLMoeModel |
1 | 0 | 0 |
attr |
Qwen3VLMoeVisionModel._can_record_outputs |
1 | 0 | 0 |
meth |
Qwen3VLMoePreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.rag.configuration_rag (58 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RagConfig.init |
29 | 0 | 0 |
meth |
RagConfig.from_question_encoder_generator_configs |
4 | 3 | 0 |
attr |
RagConfig.bos_token_id |
1 | 0 | 0 |
attr |
RagConfig.pad_token_id |
1 | 0 | 0 |
attr |
RagConfig.eos_token_id |
1 | 0 | 0 |
attr |
RagConfig.decoder_start_token_id |
1 | 0 | 0 |
attr |
RagConfig.prefix |
1 | 0 | 0 |
attr |
RagConfig.vocab_size |
1 | 0 | 0 |
attr |
RagConfig.question_encoder |
1 | 0 | 0 |
attr |
RagConfig.generator |
1 | 0 | 0 |
attr |
RagConfig.reduce_loss |
1 | 0 | 0 |
attr |
RagConfig.label_smoothing |
1 | 0 | 0 |
attr |
RagConfig.exclude_bos_score |
1 | 0 | 0 |
attr |
RagConfig.do_marginalize |
1 | 0 | 0 |
attr |
RagConfig.title_sep |
1 | 0 | 0 |
attr |
RagConfig.doc_sep |
1 | 0 | 0 |
attr |
RagConfig.n_docs |
1 | 0 | 0 |
attr |
RagConfig.max_combined_length |
1 | 0 | 0 |
attr |
RagConfig.dataset |
1 | 0 | 0 |
attr |
RagConfig.dataset_split |
1 | 0 | 0 |
attr |
RagConfig.index_name |
1 | 0 | 0 |
attr |
RagConfig.retrieval_vector_size |
1 | 0 | 0 |
attr |
RagConfig.retrieval_batch_size |
1 | 0 | 0 |
attr |
RagConfig.passages_path |
1 | 0 | 0 |
attr |
RagConfig.index_path |
1 | 0 | 0 |
attr |
RagConfig.use_dummy_dataset |
1 | 0 | 0 |
attr |
RagConfig.dataset_revision |
1 | 0 | 0 |
attr |
RagConfig.output_retrieved |
1 | 0 | 0 |
attr |
RagConfig.do_deduplication |
1 | 0 | 0 |
attr |
RagConfig.use_cache |
1 | 0 | 0 |
transformers.models.rag.modeling_rag (73 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RagModel.init |
6 | 4 | 0 |
meth |
RagModel.forward |
16 | 15 | 0 |
attr |
RagModel.retriever |
1 | 0 | 0 |
attr |
RagModel.question_encoder |
1 | 0 | 0 |
attr |
RagModel.generator |
1 | 0 | 0 |
attr |
RagModel.ctx_encoder |
1 | 0 | 0 |
attr |
RagModel.context_encoder_training |
1 | 0 | 0 |
meth |
RagPreTrainedModel.from_pretrained_question_encoder_generator |
5 | 4 | 0 |
meth |
RagSequenceForGeneration.init |
6 | 4 | 0 |
meth |
RagSequenceForGeneration.set_retriever |
2 | 1 | 0 |
meth |
RagSequenceForGeneration.set_context_encoder_for_training |
2 | 1 | 0 |
meth |
RagSequenceForGeneration.forward |
19 | 18 | 0 |
meth |
RagSequenceForGeneration.generate |
11 | 10 | 0 |
meth |
RagSequenceForGeneration.get_nll |
8 | 0 | 0 |
meth |
RagSequenceForGeneration._cat_and_pad |
3 | 0 | 0 |
prop |
RagSequenceForGeneration.retriever |
1 | 0 | 0 |
prop |
RagSequenceForGeneration.generator |
1 | 0 | 0 |
prop |
RagSequenceForGeneration.question_encoder |
1 | 0 | 0 |
attr |
RagSequenceForGeneration.rag |
1 | 0 | 0 |
meth |
RagTokenForGeneration.init |
6 | 4 | 0 |
meth |
RagTokenForGeneration.set_retriever |
2 | 1 | 0 |
meth |
RagTokenForGeneration.set_context_encoder_for_training |
2 | 1 | 0 |
meth |
RagTokenForGeneration.prepare_inputs_for_generation |
9 | 0 | 0 |
meth |
RagTokenForGeneration._reorder_cache |
3 | 0 | 0 |
meth |
RagTokenForGeneration.marginalize |
4 | 0 | 0 |
meth |
RagTokenForGeneration.forward |
19 | 18 | 0 |
meth |
RagTokenForGeneration.generate |
12 | 11 | 0 |
meth |
RagTokenForGeneration._temporary_reorder_cache |
3 | 0 | 0 |
meth |
RagTokenForGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
RagTokenForGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
RagTokenForGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
RagTokenForGeneration.shift_tokens_right |
3 | 0 | 0 |
meth |
RagTokenForGeneration.get_nll |
7 | 0 | 0 |
prop |
RagTokenForGeneration.retriever |
1 | 0 | 0 |
prop |
RagTokenForGeneration.generator |
1 | 0 | 0 |
prop |
RagTokenForGeneration.question_encoder |
1 | 0 | 0 |
attr |
RagTokenForGeneration.rag |
1 | 0 | 0 |
transformers.models.rag.retrieval_rag (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RagRetriever.init |
6 | 0 | 0 |
meth |
RagRetriever._build_index |
2 | 0 | 0 |
meth |
RagRetriever.from_pretrained |
4 | 0 | 0 |
meth |
RagRetriever.save_pretrained |
2 | 0 | 0 |
meth |
RagRetriever.init_retrieval |
1 | 0 | 0 |
meth |
RagRetriever.postprocess_docs |
6 | 0 | 0 |
meth |
RagRetriever.set_ctx_encoder_tokenizer |
2 | 1 | 0 |
meth |
RagRetriever.call |
6 | 3 | 0 |
attr |
RagRetriever.index |
1 | 0 | 0 |
attr |
RagRetriever.generator_tokenizer |
1 | 0 | 0 |
attr |
RagRetriever.question_encoder_tokenizer |
1 | 0 | 0 |
attr |
RagRetriever.n_docs |
1 | 0 | 0 |
attr |
RagRetriever.batch_size |
1 | 0 | 0 |
attr |
RagRetriever.config |
1 | 0 | 0 |
attr |
RagRetriever.ctx_encoder_tokenizer |
1 | 0 | 0 |
attr |
RagRetriever.return_tokenized_docs |
1 | 0 | 0 |
transformers.models.rag.tokenization_rag (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RagTokenizer.init |
3 | 0 | 0 |
meth |
RagTokenizer.save_pretrained |
2 | 0 | 0 |
meth |
RagTokenizer.from_pretrained |
3 | 0 | 0 |
meth |
RagTokenizer.call |
3 | 0 | 0 |
meth |
RagTokenizer.batch_decode |
3 | 0 | 0 |
meth |
RagTokenizer.decode |
3 | 0 | 0 |
meth |
RagTokenizer._switch_to_input_mode |
1 | 0 | 0 |
meth |
RagTokenizer._switch_to_target_mode |
1 | 0 | 0 |
attr |
RagTokenizer.question_encoder |
1 | 0 | 0 |
attr |
RagTokenizer.generator |
1 | 0 | 0 |
attr |
RagTokenizer.current_tokenizer |
1 | 0 | 0 |
transformers.models.recurrent_gemma.configuration_recurrent_gemma (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RecurrentGemmaConfig.init |
24 | 22 | 0 |
prop |
RecurrentGemmaConfig.layers_block_type |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.vocab_size |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.hidden_size |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.intermediate_size |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.num_attention_heads |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.lru_width |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.attention_window_size |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.conv1d_width |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.logits_soft_cap |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.use_cache |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.block_types |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.hidden_activation |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.head_dim |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.attention_dropout |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.attention_bias |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.w_init_variance_scale |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.final_w_init_variance_scale |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.pad_token_id |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.bos_token_id |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.eos_token_id |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
RecurrentGemmaConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.recurrent_gemma.modeling_recurrent_gemma (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RecurrentGemmaPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
RecurrentGemmaPreTrainedModel._setup_cache |
5 | 0 | 0 |
meth |
RecurrentGemmaPreTrainedModel.reset_cache |
4 | 0 | 0 |
meth |
RecurrentGemmaForCausalLM.init |
2 | 0 | 0 |
meth |
RecurrentGemmaForCausalLM.forward |
12 | 11 | 0 |
attr |
RecurrentGemmaForCausalLM.model |
1 | 0 | 0 |
attr |
RecurrentGemmaForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
RecurrentGemmaForCausalLM.lm_head |
1 | 0 | 0 |
meth |
RecurrentGemmaModel.init |
2 | 1 | 0 |
meth |
RecurrentGemmaModel.forward |
10 | 9 | 0 |
meth |
RecurrentGemmaModel._update_causal_mask |
4 | 0 | 0 |
attr |
RecurrentGemmaModel.padding_idx |
1 | 0 | 0 |
attr |
RecurrentGemmaModel.vocab_size |
1 | 0 | 0 |
attr |
RecurrentGemmaModel.embed_tokens |
1 | 0 | 0 |
attr |
RecurrentGemmaModel.layers |
1 | 0 | 0 |
attr |
RecurrentGemmaModel.final_norm |
1 | 0 | 0 |
attr |
RecurrentGemmaModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.reformer.configuration_reformer (71 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ReformerConfig.init |
36 | 0 | 0 |
attr |
ReformerConfig.hash_seed |
1 | 0 | 0 |
attr |
ReformerConfig.vocab_size |
1 | 0 | 0 |
attr |
ReformerConfig.attention_head_size |
1 | 0 | 0 |
attr |
ReformerConfig.hidden_size |
1 | 0 | 0 |
attr |
ReformerConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ReformerConfig.num_hashes |
1 | 0 | 0 |
attr |
ReformerConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ReformerConfig.num_buckets |
1 | 0 | 0 |
attr |
ReformerConfig.lsh_attn_chunk_length |
1 | 0 | 0 |
attr |
ReformerConfig.local_attn_chunk_length |
1 | 0 | 0 |
attr |
ReformerConfig.lsh_num_chunks_after |
1 | 0 | 0 |
attr |
ReformerConfig.lsh_num_chunks_before |
1 | 0 | 0 |
attr |
ReformerConfig.local_num_chunks_after |
1 | 0 | 0 |
attr |
ReformerConfig.local_num_chunks_before |
1 | 0 | 0 |
attr |
ReformerConfig.hidden_act |
1 | 0 | 0 |
attr |
ReformerConfig.feed_forward_size |
1 | 0 | 0 |
attr |
ReformerConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
ReformerConfig.lsh_attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
ReformerConfig.local_attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
ReformerConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ReformerConfig.initializer_range |
1 | 0 | 0 |
attr |
ReformerConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
ReformerConfig.axial_pos_embds |
1 | 0 | 0 |
attr |
ReformerConfig.axial_pos_shape |
1 | 0 | 0 |
attr |
ReformerConfig.axial_pos_embds_dim |
1 | 0 | 0 |
attr |
ReformerConfig.axial_norm_std |
1 | 0 | 0 |
attr |
ReformerConfig.chunk_size_lm_head |
1 | 0 | 0 |
attr |
ReformerConfig.attn_layers |
1 | 0 | 0 |
attr |
ReformerConfig.use_cache |
1 | 0 | 0 |
attr |
ReformerConfig.classifier_dropout |
1 | 0 | 0 |
attr |
ReformerConfig.pad_token_id |
1 | 0 | 0 |
attr |
ReformerConfig.eos_token_id |
1 | 0 | 0 |
attr |
ReformerConfig.bos_token_id |
1 | 0 | 0 |
attr |
ReformerConfig.is_decoder |
1 | 0 | 0 |
attr |
ReformerConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.reformer.modeling_reformer (101 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ReformerLayer.init |
3 | 0 | 0 |
meth |
ReformerLayer._init_attention_seed |
1 | 0 | 0 |
meth |
ReformerLayer._init_feed_forward_seed |
1 | 0 | 0 |
meth |
ReformerLayer.forward |
9 | 0 | 0 |
meth |
ReformerLayer.backward_pass |
7 | 0 | 0 |
attr |
ReformerLayer.attention |
1 | 0 | 0 |
attr |
ReformerLayer.attention_seed |
1 | 0 | 0 |
attr |
ReformerLayer.feed_forward_seed |
1 | 0 | 0 |
attr |
ReformerLayer.feed_forward |
1 | 0 | 0 |
meth |
ReformerForQuestionAnswering.init |
2 | 0 | 0 |
meth |
ReformerForQuestionAnswering.forward |
12 | 11 | 0 |
attr |
ReformerForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
ReformerForQuestionAnswering.reformer |
1 | 0 | 0 |
attr |
ReformerForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
ReformerForSequenceClassification.init |
2 | 0 | 0 |
meth |
ReformerForSequenceClassification.forward |
11 | 10 | 0 |
attr |
ReformerForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
ReformerForSequenceClassification.reformer |
1 | 0 | 0 |
attr |
ReformerForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
ReformerAttention.init |
3 | 0 | 0 |
meth |
ReformerAttention.forward |
10 | 0 | 0 |
attr |
ReformerAttention.layer_id |
1 | 0 | 0 |
attr |
ReformerAttention.attn_layers |
1 | 0 | 0 |
attr |
ReformerAttention.layer_norm |
1 | 0 | 0 |
attr |
ReformerAttention.output |
1 | 0 | 0 |
attr |
ReformerAttention.self_attention |
1 | 0 | 0 |
meth |
ReformerForMaskedLM.init |
2 | 0 | 0 |
meth |
ReformerForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
ReformerForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
ReformerForMaskedLM.forward |
11 | 10 | 0 |
attr |
ReformerForMaskedLM.reformer |
1 | 0 | 0 |
attr |
ReformerForMaskedLM.lm_head |
1 | 0 | 0 |
meth |
ReformerPreTrainedModel._init_weights |
2 | 0 | 0 |
prop |
ReformerPreTrainedModel.dummy_inputs |
1 | 0 | 0 |
meth |
ReformerModel.init |
2 | 0 | 0 |
meth |
ReformerModel.get_input_embeddings |
1 | 0 | 0 |
meth |
ReformerModel.set_input_embeddings |
2 | 0 | 0 |
meth |
ReformerModel.forward |
12 | 11 | 0 |
meth |
ReformerModel._pad_to_mult_of_chunk_length |
9 | 0 | 0 |
attr |
ReformerModel.embeddings |
1 | 0 | 0 |
attr |
ReformerModel.encoder |
1 | 0 | 0 |
meth |
ReformerModelWithLMHead.init |
2 | 0 | 0 |
meth |
ReformerModelWithLMHead.get_output_embeddings |
1 | 0 | 0 |
meth |
ReformerModelWithLMHead.set_output_embeddings |
2 | 0 | 0 |
meth |
ReformerModelWithLMHead.forward |
14 | 13 | 0 |
meth |
ReformerModelWithLMHead._prepare_position_ids_for_generation |
3 | 0 | 0 |
meth |
ReformerModelWithLMHead.prepare_inputs_for_generation |
7 | 0 | 0 |
attr |
ReformerModelWithLMHead.reformer |
1 | 0 | 0 |
attr |
ReformerModelWithLMHead.lm_head |
1 | 0 | 0 |
transformers.models.reformer.tokenization_reformer (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ReformerTokenizer.init |
8 | 6 | 0 |
transformers.models.regnet.configuration_regnet (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RegNetConfig.init |
9 | 0 | 0 |
attr |
RegNetConfig.num_channels |
1 | 0 | 0 |
attr |
RegNetConfig.embedding_size |
1 | 0 | 0 |
attr |
RegNetConfig.hidden_sizes |
1 | 0 | 0 |
attr |
RegNetConfig.depths |
1 | 0 | 0 |
attr |
RegNetConfig.groups_width |
1 | 0 | 0 |
attr |
RegNetConfig.layer_type |
1 | 0 | 0 |
attr |
RegNetConfig.hidden_act |
1 | 0 | 0 |
attr |
RegNetConfig.downsample_in_first_stage |
1 | 0 | 0 |
transformers.models.regnet.modeling_regnet (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RegNetForImageClassification.init |
2 | 0 | 0 |
meth |
RegNetForImageClassification.forward |
6 | 5 | 0 |
attr |
RegNetForImageClassification.num_labels |
1 | 0 | 0 |
attr |
RegNetForImageClassification.regnet |
1 | 0 | 0 |
attr |
RegNetForImageClassification.classifier |
1 | 0 | 0 |
meth |
RegNetPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
RegNetModel.init |
2 | 0 | 0 |
meth |
RegNetModel.forward |
5 | 4 | 0 |
attr |
RegNetModel.embedder |
1 | 0 | 0 |
attr |
RegNetModel.encoder |
1 | 0 | 0 |
attr |
RegNetModel.pooler |
1 | 0 | 0 |
transformers.models.rembert.configuration_rembert (45 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RemBertConfig.init |
23 | 0 | 0 |
attr |
RemBertConfig.pad_token_id |
1 | 0 | 0 |
attr |
RemBertConfig.bos_token_id |
1 | 0 | 0 |
attr |
RemBertConfig.eos_token_id |
1 | 0 | 0 |
attr |
RemBertConfig.is_decoder |
1 | 0 | 0 |
attr |
RemBertConfig.add_cross_attention |
1 | 0 | 0 |
attr |
RemBertConfig.vocab_size |
1 | 0 | 0 |
attr |
RemBertConfig.input_embedding_size |
1 | 0 | 0 |
attr |
RemBertConfig.output_embedding_size |
1 | 0 | 0 |
attr |
RemBertConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
RemBertConfig.hidden_size |
1 | 0 | 0 |
attr |
RemBertConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
RemBertConfig.num_attention_heads |
1 | 0 | 0 |
attr |
RemBertConfig.intermediate_size |
1 | 0 | 0 |
attr |
RemBertConfig.hidden_act |
1 | 0 | 0 |
attr |
RemBertConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
RemBertConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
RemBertConfig.classifier_dropout_prob |
1 | 0 | 0 |
attr |
RemBertConfig.initializer_range |
1 | 0 | 0 |
attr |
RemBertConfig.type_vocab_size |
1 | 0 | 0 |
attr |
RemBertConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
RemBertConfig.use_cache |
1 | 0 | 0 |
attr |
RemBertConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.rembert.modeling_rembert (67 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RemBertModel.init |
3 | 0 | 0 |
meth |
RemBertModel.get_input_embeddings |
1 | 0 | 0 |
meth |
RemBertModel.set_input_embeddings |
2 | 0 | 0 |
meth |
RemBertModel.forward |
15 | 14 | 0 |
attr |
RemBertModel.embeddings |
1 | 0 | 0 |
attr |
RemBertModel.encoder |
1 | 0 | 0 |
attr |
RemBertModel.pooler |
1 | 0 | 0 |
meth |
RemBertForSequenceClassification.init |
2 | 0 | 0 |
meth |
RemBertForSequenceClassification.forward |
11 | 10 | 0 |
attr |
RemBertForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
RemBertForSequenceClassification.rembert |
1 | 0 | 0 |
attr |
RemBertForSequenceClassification.dropout |
1 | 0 | 0 |
attr |
RemBertForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
RemBertForTokenClassification.init |
2 | 0 | 0 |
meth |
RemBertForTokenClassification.forward |
11 | 10 | 0 |
attr |
RemBertForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
RemBertForTokenClassification.rembert |
1 | 0 | 0 |
attr |
RemBertForTokenClassification.dropout |
1 | 0 | 0 |
attr |
RemBertForTokenClassification.classifier |
1 | 0 | 0 |
meth |
RemBertForQuestionAnswering.init |
2 | 0 | 0 |
meth |
RemBertForQuestionAnswering.forward |
12 | 11 | 0 |
attr |
RemBertForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
RemBertForQuestionAnswering.rembert |
1 | 0 | 0 |
attr |
RemBertForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
RemBertForCausalLM.init |
2 | 0 | 0 |
meth |
RemBertForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
RemBertForCausalLM.set_output_embeddings |
2 | 0 | 0 |
meth |
RemBertForCausalLM.forward |
16 | 15 | 0 |
attr |
RemBertForCausalLM.rembert |
1 | 0 | 0 |
attr |
RemBertForCausalLM.cls |
1 | 0 | 0 |
meth |
RemBertPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
RemBertForMultipleChoice.init |
2 | 0 | 0 |
meth |
RemBertForMultipleChoice.forward |
11 | 10 | 0 |
attr |
RemBertForMultipleChoice.rembert |
1 | 0 | 0 |
attr |
RemBertForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
RemBertForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
RemBertForMaskedLM.init |
2 | 0 | 0 |
meth |
RemBertForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
RemBertForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
RemBertForMaskedLM.forward |
13 | 12 | 0 |
attr |
RemBertForMaskedLM.rembert |
1 | 0 | 0 |
attr |
RemBertForMaskedLM.cls |
1 | 0 | 0 |
meth |
RemBertLayer.init |
3 | 0 | 0 |
meth |
RemBertLayer.feed_forward_chunk |
2 | 0 | 0 |
attr |
RemBertLayer.chunk_size_feed_forward |
1 | 0 | 0 |
attr |
RemBertLayer.seq_len_dim |
1 | 0 | 0 |
attr |
RemBertLayer.attention |
1 | 0 | 0 |
attr |
RemBertLayer.is_decoder |
1 | 0 | 0 |
attr |
RemBertLayer.add_cross_attention |
1 | 0 | 0 |
attr |
RemBertLayer.intermediate |
1 | 0 | 0 |
attr |
RemBertLayer.output |
1 | 0 | 0 |
attr |
RemBertLayer.crossattention |
1 | 0 | 0 |
transformers.models.rembert.tokenization_rembert (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RemBertTokenizer.init |
14 | 12 | 0 |
attr |
RemBertTokenizer.remove_space |
1 | 0 | 0 |
attr |
RemBertTokenizer.do_lower_case |
1 | 0 | 0 |
attr |
RemBertTokenizer.keep_accents |
1 | 0 | 0 |
transformers.models.resnet.configuration_resnet (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ResNetConfig.init |
12 | 0 | 0 |
attr |
ResNetConfig.num_channels |
1 | 0 | 0 |
attr |
ResNetConfig.embedding_size |
1 | 0 | 0 |
attr |
ResNetConfig.hidden_sizes |
1 | 0 | 0 |
attr |
ResNetConfig.depths |
1 | 0 | 0 |
attr |
ResNetConfig.layer_type |
1 | 0 | 0 |
attr |
ResNetConfig.hidden_act |
1 | 0 | 0 |
attr |
ResNetConfig.downsample_in_first_stage |
1 | 0 | 0 |
attr |
ResNetConfig.downsample_in_bottleneck |
1 | 0 | 0 |
attr |
ResNetConfig.stage_names |
1 | 0 | 0 |
transformers.models.resnet.modeling_resnet (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ResNetModel.init |
2 | 0 | 0 |
meth |
ResNetModel.forward |
5 | 4 | 0 |
attr |
ResNetModel.embedder |
1 | 0 | 0 |
attr |
ResNetModel.encoder |
1 | 0 | 0 |
attr |
ResNetModel.pooler |
1 | 0 | 0 |
meth |
ResNetBackbone.init |
2 | 0 | 0 |
meth |
ResNetBackbone.forward |
5 | 4 | 0 |
attr |
ResNetBackbone.num_features |
1 | 0 | 0 |
attr |
ResNetBackbone.embedder |
1 | 0 | 0 |
attr |
ResNetBackbone.encoder |
1 | 0 | 0 |
meth |
ResNetForImageClassification.init |
2 | 0 | 0 |
meth |
ResNetForImageClassification.forward |
6 | 5 | 0 |
attr |
ResNetForImageClassification.num_labels |
1 | 0 | 0 |
attr |
ResNetForImageClassification.resnet |
1 | 0 | 0 |
attr |
ResNetForImageClassification.classifier |
1 | 0 | 0 |
meth |
ResNetPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.roberta.configuration_roberta (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RobertaConfig.init |
22 | 0 | 0 |
attr |
RobertaConfig.pad_token_id |
1 | 0 | 0 |
attr |
RobertaConfig.bos_token_id |
1 | 0 | 0 |
attr |
RobertaConfig.eos_token_id |
1 | 0 | 0 |
attr |
RobertaConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
RobertaConfig.is_decoder |
1 | 0 | 0 |
attr |
RobertaConfig.add_cross_attention |
1 | 0 | 0 |
attr |
RobertaConfig.vocab_size |
1 | 0 | 0 |
attr |
RobertaConfig.hidden_size |
1 | 0 | 0 |
attr |
RobertaConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
RobertaConfig.num_attention_heads |
1 | 0 | 0 |
attr |
RobertaConfig.hidden_act |
1 | 0 | 0 |
attr |
RobertaConfig.intermediate_size |
1 | 0 | 0 |
attr |
RobertaConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
RobertaConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
RobertaConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
RobertaConfig.type_vocab_size |
1 | 0 | 0 |
attr |
RobertaConfig.initializer_range |
1 | 0 | 0 |
attr |
RobertaConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
RobertaConfig.use_cache |
1 | 0 | 0 |
attr |
RobertaConfig.classifier_dropout |
1 | 0 | 0 |
transformers.models.roberta.modeling_roberta (56 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RobertaForTokenClassification.init |
2 | 0 | 0 |
attr |
RobertaForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
RobertaForTokenClassification.roberta |
1 | 0 | 0 |
attr |
RobertaForTokenClassification.dropout |
1 | 0 | 0 |
attr |
RobertaForTokenClassification.classifier |
1 | 0 | 0 |
meth |
RobertaForSequenceClassification.init |
2 | 0 | 0 |
attr |
RobertaForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
RobertaForSequenceClassification.config |
1 | 0 | 0 |
attr |
RobertaForSequenceClassification.roberta |
1 | 0 | 0 |
attr |
RobertaForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
RobertaForCausalLM.init |
2 | 0 | 0 |
meth |
RobertaForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
RobertaForCausalLM.set_output_embeddings |
2 | 0 | 0 |
attr |
RobertaForCausalLM.roberta |
1 | 0 | 0 |
attr |
RobertaForCausalLM.lm_head |
1 | 0 | 0 |
meth |
RobertaForQuestionAnswering.init |
2 | 0 | 0 |
attr |
RobertaForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
RobertaForQuestionAnswering.roberta |
1 | 0 | 0 |
attr |
RobertaForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
RobertaModel.init |
3 | 0 | 0 |
meth |
RobertaModel.get_input_embeddings |
1 | 0 | 0 |
meth |
RobertaModel.set_input_embeddings |
2 | 0 | 0 |
meth |
RobertaModel._create_attention_masks |
7 | 0 | 0 |
attr |
RobertaModel.config |
1 | 0 | 0 |
attr |
RobertaModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
RobertaModel.embeddings |
1 | 0 | 0 |
attr |
RobertaModel.encoder |
1 | 0 | 0 |
attr |
RobertaModel.pooler |
1 | 0 | 0 |
meth |
RobertaForMultipleChoice.init |
2 | 0 | 0 |
attr |
RobertaForMultipleChoice.roberta |
1 | 0 | 0 |
attr |
RobertaForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
RobertaForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
RobertaForMaskedLM.init |
2 | 0 | 0 |
meth |
RobertaForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
RobertaForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
attr |
RobertaForMaskedLM.roberta |
1 | 0 | 0 |
attr |
RobertaForMaskedLM.lm_head |
1 | 0 | 0 |
meth |
RobertaPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.roberta.modular_roberta (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RobertaForTokenClassification.init |
2 | 0 | 0 |
attr |
RobertaForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
RobertaForTokenClassification.roberta |
1 | 0 | 0 |
attr |
RobertaForTokenClassification.dropout |
1 | 0 | 0 |
attr |
RobertaForTokenClassification.classifier |
1 | 0 | 0 |
meth |
RobertaForSequenceClassification.init |
2 | 0 | 0 |
attr |
RobertaForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
RobertaForSequenceClassification.config |
1 | 0 | 0 |
attr |
RobertaForSequenceClassification.roberta |
1 | 0 | 0 |
attr |
RobertaForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
RobertaForCausalLM.init |
2 | 0 | 0 |
meth |
RobertaForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
RobertaForCausalLM.set_output_embeddings |
2 | 0 | 0 |
attr |
RobertaForCausalLM.roberta |
1 | 0 | 0 |
attr |
RobertaForCausalLM.lm_head |
1 | 0 | 0 |
meth |
RobertaForQuestionAnswering.init |
2 | 0 | 0 |
attr |
RobertaForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
RobertaForQuestionAnswering.roberta |
1 | 0 | 0 |
attr |
RobertaForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
RobertaModel.init |
3 | 0 | 0 |
meth |
RobertaForMultipleChoice.init |
2 | 0 | 0 |
attr |
RobertaForMultipleChoice.roberta |
1 | 0 | 0 |
attr |
RobertaForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
RobertaForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
RobertaForMaskedLM.init |
2 | 0 | 0 |
meth |
RobertaForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
RobertaForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
attr |
RobertaForMaskedLM.roberta |
1 | 0 | 0 |
attr |
RobertaForMaskedLM.lm_head |
1 | 0 | 0 |
meth |
RobertaPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.roberta.tokenization_roberta (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RobertaTokenizer.init |
14 | 12 | 0 |
attr |
RobertaTokenizer.add_prefix_space |
1 | 0 | 0 |
attr |
RobertaTokenizer.trim_offsets |
1 | 0 | 0 |
transformers.models.roberta.tokenization_roberta_old (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RobertaTokenizerFast.init |
15 | 0 | 0 |
meth |
RobertaTokenizerFast._batch_encode_plus |
3 | 1 | 0 |
meth |
RobertaTokenizerFast._encode_plus |
3 | 1 | 0 |
meth |
RobertaTokenizerFast.build_inputs_with_special_tokens |
3 | 0 | 0 |
prop |
RobertaTokenizerFast.mask_token |
2 | 1 | 0 |
transformers.models.roberta_prelayernorm.configuration_roberta_prelayernorm (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RobertaPreLayerNormConfig.init |
22 | 0 | 0 |
attr |
RobertaPreLayerNormConfig.pad_token_id |
1 | 0 | 0 |
attr |
RobertaPreLayerNormConfig.bos_token_id |
1 | 0 | 0 |
attr |
RobertaPreLayerNormConfig.eos_token_id |
1 | 0 | 0 |
attr |
RobertaPreLayerNormConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
RobertaPreLayerNormConfig.is_decoder |
1 | 0 | 0 |
attr |
RobertaPreLayerNormConfig.add_cross_attention |
1 | 0 | 0 |
attr |
RobertaPreLayerNormConfig.vocab_size |
1 | 0 | 0 |
attr |
RobertaPreLayerNormConfig.hidden_size |
1 | 0 | 0 |
attr |
RobertaPreLayerNormConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
RobertaPreLayerNormConfig.num_attention_heads |
1 | 0 | 0 |
attr |
RobertaPreLayerNormConfig.hidden_act |
1 | 0 | 0 |
attr |
RobertaPreLayerNormConfig.intermediate_size |
1 | 0 | 0 |
attr |
RobertaPreLayerNormConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
RobertaPreLayerNormConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
RobertaPreLayerNormConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
RobertaPreLayerNormConfig.type_vocab_size |
1 | 0 | 0 |
attr |
RobertaPreLayerNormConfig.initializer_range |
1 | 0 | 0 |
attr |
RobertaPreLayerNormConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
RobertaPreLayerNormConfig.use_cache |
1 | 0 | 0 |
attr |
RobertaPreLayerNormConfig.classifier_dropout |
1 | 0 | 0 |
transformers.models.roberta_prelayernorm.modeling_roberta_prelayernorm (57 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RobertaPreLayerNormForMaskedLM.init |
2 | 0 | 0 |
meth |
RobertaPreLayerNormForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
RobertaPreLayerNormForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
attr |
RobertaPreLayerNormForMaskedLM.roberta_prelayernorm |
1 | 0 | 0 |
attr |
RobertaPreLayerNormForMaskedLM.lm_head |
1 | 0 | 0 |
meth |
RobertaPreLayerNormForCausalLM.init |
2 | 0 | 0 |
meth |
RobertaPreLayerNormForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
RobertaPreLayerNormForCausalLM.set_output_embeddings |
2 | 0 | 0 |
attr |
RobertaPreLayerNormForCausalLM.roberta_prelayernorm |
1 | 0 | 0 |
attr |
RobertaPreLayerNormForCausalLM.lm_head |
1 | 0 | 0 |
meth |
RobertaPreLayerNormForQuestionAnswering.init |
2 | 0 | 0 |
attr |
RobertaPreLayerNormForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
RobertaPreLayerNormForQuestionAnswering.roberta_prelayernorm |
1 | 0 | 0 |
attr |
RobertaPreLayerNormForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
RobertaPreLayerNormForTokenClassification.init |
2 | 0 | 0 |
attr |
RobertaPreLayerNormForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
RobertaPreLayerNormForTokenClassification.roberta_prelayernorm |
1 | 0 | 0 |
attr |
RobertaPreLayerNormForTokenClassification.dropout |
1 | 0 | 0 |
attr |
RobertaPreLayerNormForTokenClassification.classifier |
1 | 0 | 0 |
meth |
RobertaPreLayerNormModel.init |
3 | 0 | 0 |
meth |
RobertaPreLayerNormModel.get_input_embeddings |
1 | 0 | 0 |
meth |
RobertaPreLayerNormModel.set_input_embeddings |
2 | 0 | 0 |
meth |
RobertaPreLayerNormModel._create_attention_masks |
7 | 0 | 0 |
attr |
RobertaPreLayerNormModel.config |
1 | 0 | 0 |
attr |
RobertaPreLayerNormModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
RobertaPreLayerNormModel.embeddings |
1 | 0 | 0 |
attr |
RobertaPreLayerNormModel.encoder |
1 | 0 | 0 |
attr |
RobertaPreLayerNormModel.LayerNorm |
1 | 0 | 0 |
attr |
RobertaPreLayerNormModel.pooler |
1 | 0 | 0 |
meth |
RobertaPreLayerNormForMultipleChoice.init |
2 | 0 | 0 |
attr |
RobertaPreLayerNormForMultipleChoice.roberta_prelayernorm |
1 | 0 | 0 |
attr |
RobertaPreLayerNormForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
RobertaPreLayerNormForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
RobertaPreLayerNormPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
RobertaPreLayerNormForSequenceClassification.init |
2 | 0 | 0 |
attr |
RobertaPreLayerNormForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
RobertaPreLayerNormForSequenceClassification.config |
1 | 0 | 0 |
attr |
RobertaPreLayerNormForSequenceClassification.roberta_prelayernorm |
1 | 0 | 0 |
attr |
RobertaPreLayerNormForSequenceClassification.classifier |
1 | 0 | 0 |
transformers.models.roc_bert.configuration_roc_bert (52 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RoCBertConfig.init |
27 | 0 | 0 |
attr |
RoCBertConfig.is_decoder |
1 | 0 | 0 |
attr |
RoCBertConfig.add_cross_attention |
1 | 0 | 0 |
attr |
RoCBertConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
RoCBertConfig.vocab_size |
1 | 0 | 0 |
attr |
RoCBertConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
RoCBertConfig.hidden_size |
1 | 0 | 0 |
attr |
RoCBertConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
RoCBertConfig.num_attention_heads |
1 | 0 | 0 |
attr |
RoCBertConfig.intermediate_size |
1 | 0 | 0 |
attr |
RoCBertConfig.hidden_act |
1 | 0 | 0 |
attr |
RoCBertConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
RoCBertConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
RoCBertConfig.initializer_range |
1 | 0 | 0 |
attr |
RoCBertConfig.type_vocab_size |
1 | 0 | 0 |
attr |
RoCBertConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
RoCBertConfig.use_cache |
1 | 0 | 0 |
attr |
RoCBertConfig.enable_pronunciation |
1 | 0 | 0 |
attr |
RoCBertConfig.enable_shape |
1 | 0 | 0 |
attr |
RoCBertConfig.pronunciation_embed_dim |
1 | 0 | 0 |
attr |
RoCBertConfig.pronunciation_vocab_size |
1 | 0 | 0 |
attr |
RoCBertConfig.shape_embed_dim |
1 | 0 | 0 |
attr |
RoCBertConfig.shape_vocab_size |
1 | 0 | 0 |
attr |
RoCBertConfig.concat_input |
1 | 0 | 0 |
attr |
RoCBertConfig.classifier_dropout |
1 | 0 | 0 |
attr |
RoCBertConfig.pad_token_id |
1 | 0 | 0 |
transformers.models.roc_bert.modeling_roc_bert (90 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RoCBertForSequenceClassification.init |
2 | 0 | 0 |
attr |
RoCBertForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
RoCBertForSequenceClassification.config |
1 | 0 | 0 |
attr |
RoCBertForSequenceClassification.roc_bert |
1 | 0 | 0 |
attr |
RoCBertForSequenceClassification.dropout |
1 | 0 | 0 |
attr |
RoCBertForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
RoCBertPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
RoCBertForPreTraining.init |
2 | 0 | 0 |
meth |
RoCBertForPreTraining.get_output_embeddings |
1 | 0 | 0 |
meth |
RoCBertForPreTraining.set_output_embeddings |
2 | 0 | 0 |
attr |
RoCBertForPreTraining.roc_bert |
1 | 0 | 0 |
attr |
RoCBertForPreTraining.cls |
1 | 0 | 0 |
meth |
RoCBertForMaskedLM.init |
2 | 0 | 0 |
meth |
RoCBertForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
RoCBertForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
attr |
RoCBertForMaskedLM.roc_bert |
1 | 0 | 0 |
attr |
RoCBertForMaskedLM.cls |
1 | 0 | 0 |
meth |
RoCBertForMultipleChoice.init |
2 | 0 | 0 |
attr |
RoCBertForMultipleChoice.roc_bert |
1 | 0 | 0 |
attr |
RoCBertForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
RoCBertForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
RoCBertForCausalLM.init |
2 | 0 | 0 |
meth |
RoCBertForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
RoCBertForCausalLM.set_output_embeddings |
2 | 0 | 0 |
meth |
RoCBertForCausalLM.prepare_inputs_for_generation |
7 | 0 | 0 |
attr |
RoCBertForCausalLM.roc_bert |
1 | 0 | 0 |
attr |
RoCBertForCausalLM.cls |
1 | 0 | 0 |
meth |
RoCBertLayer.init |
3 | 0 | 0 |
meth |
RoCBertLayer.feed_forward_chunk |
2 | 0 | 0 |
attr |
RoCBertLayer.chunk_size_feed_forward |
1 | 0 | 0 |
attr |
RoCBertLayer.seq_len_dim |
1 | 0 | 0 |
attr |
RoCBertLayer.attention |
1 | 0 | 0 |
attr |
RoCBertLayer.is_decoder |
1 | 0 | 0 |
attr |
RoCBertLayer.add_cross_attention |
1 | 0 | 0 |
attr |
RoCBertLayer.intermediate |
1 | 0 | 0 |
attr |
RoCBertLayer.output |
1 | 0 | 0 |
attr |
RoCBertLayer.crossattention |
1 | 0 | 0 |
meth |
RoCBertModel.init |
3 | 0 | 0 |
meth |
RoCBertModel.get_input_embeddings |
1 | 0 | 0 |
meth |
RoCBertModel.set_input_embeddings |
2 | 0 | 0 |
meth |
RoCBertModel.get_pronunciation_embeddings |
1 | 0 | 0 |
meth |
RoCBertModel.set_pronunciation_embeddings |
2 | 0 | 0 |
meth |
RoCBertModel.get_shape_embeddings |
1 | 0 | 0 |
meth |
RoCBertModel.set_shape_embeddings |
2 | 0 | 0 |
meth |
RoCBertModel._create_attention_masks |
7 | 0 | 0 |
attr |
RoCBertModel.config |
1 | 0 | 0 |
attr |
RoCBertModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
RoCBertModel.embeddings |
1 | 0 | 0 |
attr |
RoCBertModel.encoder |
1 | 0 | 0 |
attr |
RoCBertModel.pooler |
1 | 0 | 0 |
meth |
RoCBertForQuestionAnswering.init |
2 | 0 | 0 |
attr |
RoCBertForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
RoCBertForQuestionAnswering.roc_bert |
1 | 0 | 0 |
attr |
RoCBertForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
RoCBertForTokenClassification.init |
2 | 0 | 0 |
attr |
RoCBertForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
RoCBertForTokenClassification.roc_bert |
1 | 0 | 0 |
attr |
RoCBertForTokenClassification.dropout |
1 | 0 | 0 |
attr |
RoCBertForTokenClassification.classifier |
1 | 0 | 0 |
transformers.models.roc_bert.tokenization_roc_bert (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RoCBertTokenizer.init |
15 | 0 | 0 |
meth |
RoCBertTokenizer.call |
22 | 21 | 0 |
meth |
RoCBertTokenizer.encode_plus |
20 | 19 | 0 |
meth |
RoCBertTokenizer.batch_encode_plus |
19 | 18 | 0 |
meth |
RoCBertTokenizer.get_vocab |
1 | 0 | 0 |
meth |
RoCBertTokenizer._tokenize |
3 | 0 | 0 |
meth |
RoCBertTokenizer._encode_plus |
20 | 19 | 0 |
meth |
RoCBertTokenizer.prepare_for_model |
24 | 23 | 0 |
meth |
RoCBertTokenizer._batch_encode_plus |
19 | 18 | 0 |
meth |
RoCBertTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
RoCBertTokenizer._convert_token_to_shape_id |
2 | 0 | 0 |
meth |
RoCBertTokenizer._convert_token_to_pronunciation_id |
2 | 0 | 0 |
meth |
RoCBertTokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
RoCBertTokenizer.convert_tokens_to_string |
2 | 0 | 0 |
prop |
RoCBertTokenizer.do_lower_case |
1 | 0 | 0 |
prop |
RoCBertTokenizer.vocab_size |
1 | 0 | 0 |
attr |
RoCBertTokenizer.vocab |
1 | 0 | 0 |
attr |
RoCBertTokenizer.ids_to_tokens |
1 | 0 | 0 |
attr |
RoCBertTokenizer.do_basic_tokenize |
1 | 0 | 0 |
attr |
RoCBertTokenizer.wordpiece_tokenizer |
1 | 0 | 0 |
attr |
RoCBertTokenizer.word_shape |
1 | 0 | 0 |
attr |
RoCBertTokenizer.word_pronunciation |
1 | 0 | 0 |
attr |
RoCBertTokenizer.basic_tokenizer |
1 | 0 | 0 |
transformers.models.roformer.configuration_roformer (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RoFormerConfig.init |
23 | 0 | 0 |
attr |
RoFormerConfig.pad_token_id |
1 | 0 | 0 |
attr |
RoFormerConfig.bos_token_id |
1 | 0 | 0 |
attr |
RoFormerConfig.eos_token_id |
1 | 0 | 0 |
attr |
RoFormerConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
RoFormerConfig.is_decoder |
1 | 0 | 0 |
attr |
RoFormerConfig.add_cross_attention |
1 | 0 | 0 |
attr |
RoFormerConfig.vocab_size |
1 | 0 | 0 |
attr |
RoFormerConfig.embedding_size |
1 | 0 | 0 |
attr |
RoFormerConfig.hidden_size |
1 | 0 | 0 |
attr |
RoFormerConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
RoFormerConfig.num_attention_heads |
1 | 0 | 0 |
attr |
RoFormerConfig.hidden_act |
1 | 0 | 0 |
attr |
RoFormerConfig.intermediate_size |
1 | 0 | 0 |
attr |
RoFormerConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
RoFormerConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
RoFormerConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
RoFormerConfig.type_vocab_size |
1 | 0 | 0 |
attr |
RoFormerConfig.initializer_range |
1 | 0 | 0 |
attr |
RoFormerConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
RoFormerConfig.rotary_value |
1 | 0 | 0 |
attr |
RoFormerConfig.use_cache |
1 | 0 | 0 |
transformers.models.roformer.modeling_roformer (74 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RoFormerForMaskedLM.init |
2 | 0 | 0 |
meth |
RoFormerForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
RoFormerForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
RoFormerForMaskedLM.forward |
12 | 11 | 0 |
attr |
RoFormerForMaskedLM.roformer |
1 | 0 | 0 |
attr |
RoFormerForMaskedLM.cls |
1 | 0 | 0 |
meth |
RoFormerForMultipleChoice.init |
2 | 0 | 0 |
meth |
RoFormerForMultipleChoice.forward |
10 | 9 | 0 |
attr |
RoFormerForMultipleChoice.roformer |
1 | 0 | 0 |
attr |
RoFormerForMultipleChoice.sequence_summary |
1 | 0 | 0 |
attr |
RoFormerForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
RoFormerForQuestionAnswering.init |
2 | 0 | 0 |
meth |
RoFormerForQuestionAnswering.forward |
11 | 10 | 0 |
attr |
RoFormerForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
RoFormerForQuestionAnswering.roformer |
1 | 0 | 0 |
attr |
RoFormerForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
RoFormerForSequenceClassification.init |
2 | 0 | 0 |
meth |
RoFormerForSequenceClassification.forward |
10 | 9 | 0 |
attr |
RoFormerForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
RoFormerForSequenceClassification.roformer |
1 | 0 | 0 |
attr |
RoFormerForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
RoFormerForTokenClassification.init |
2 | 0 | 0 |
meth |
RoFormerForTokenClassification.forward |
10 | 9 | 0 |
attr |
RoFormerForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
RoFormerForTokenClassification.roformer |
1 | 0 | 0 |
attr |
RoFormerForTokenClassification.dropout |
1 | 0 | 0 |
attr |
RoFormerForTokenClassification.classifier |
1 | 0 | 0 |
meth |
RoFormerLayer.init |
3 | 0 | 0 |
meth |
RoFormerLayer.forward |
9 | 0 | 0 |
meth |
RoFormerLayer.feed_forward_chunk |
2 | 0 | 0 |
attr |
RoFormerLayer.chunk_size_feed_forward |
1 | 0 | 0 |
attr |
RoFormerLayer.seq_len_dim |
1 | 0 | 0 |
attr |
RoFormerLayer.attention |
1 | 0 | 0 |
attr |
RoFormerLayer.is_decoder |
1 | 0 | 0 |
attr |
RoFormerLayer.add_cross_attention |
1 | 0 | 0 |
attr |
RoFormerLayer.intermediate |
1 | 0 | 0 |
attr |
RoFormerLayer.output |
1 | 0 | 0 |
attr |
RoFormerLayer.crossattention |
1 | 0 | 0 |
meth |
RoFormerForCausalLM.init |
2 | 0 | 0 |
meth |
RoFormerForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
RoFormerForCausalLM.set_output_embeddings |
2 | 0 | 0 |
meth |
RoFormerForCausalLM.forward |
16 | 15 | 0 |
attr |
RoFormerForCausalLM.roformer |
1 | 0 | 0 |
attr |
RoFormerForCausalLM.cls |
1 | 0 | 0 |
meth |
RoFormerModel.init |
2 | 0 | 0 |
meth |
RoFormerModel.get_input_embeddings |
1 | 0 | 0 |
meth |
RoFormerModel.set_input_embeddings |
2 | 0 | 0 |
meth |
RoFormerModel.forward |
14 | 13 | 0 |
attr |
RoFormerModel.embeddings |
1 | 0 | 0 |
attr |
RoFormerModel.encoder |
1 | 0 | 0 |
attr |
RoFormerModel.embeddings_project |
1 | 0 | 0 |
meth |
RoFormerPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.roformer.tokenization_roformer (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RoFormerTokenizer.init |
11 | 1 | 0 |
meth |
RoFormerTokenizer.getstate |
1 | 0 | 0 |
meth |
RoFormerTokenizer.setstate |
2 | 0 | 0 |
meth |
RoFormerTokenizer.build_inputs_with_special_tokens |
3 | 0 | 0 |
meth |
RoFormerTokenizer.save_pretrained |
6 | 0 | 0 |
transformers.models.roformer.tokenization_utils (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
JiebaPreTokenizer.init |
2 | 1 | 0 |
meth |
JiebaPreTokenizer.pre_tokenize |
2 | 1 | 0 |
attr |
JiebaPreTokenizer.vocab |
1 | 0 | 0 |
attr |
JiebaPreTokenizer.normalizers |
1 | 0 | 0 |
attr |
JiebaPreTokenizer.jieba |
1 | 0 | 0 |
transformers.models.rt_detr.configuration_rt_detr (105 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RTDetrConfig.init |
54 | 0 | 0 |
attr |
RTDetrConfig.initializer_range |
1 | 0 | 0 |
attr |
RTDetrConfig.initializer_bias_prior_prob |
1 | 0 | 0 |
attr |
RTDetrConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
RTDetrConfig.batch_norm_eps |
1 | 0 | 0 |
attr |
RTDetrConfig.backbone_config |
1 | 0 | 0 |
attr |
RTDetrConfig.freeze_backbone_batch_norms |
1 | 0 | 0 |
attr |
RTDetrConfig.encoder_hidden_dim |
1 | 0 | 0 |
attr |
RTDetrConfig.encoder_in_channels |
1 | 0 | 0 |
attr |
RTDetrConfig.feat_strides |
1 | 0 | 0 |
attr |
RTDetrConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
RTDetrConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
RTDetrConfig.dropout |
1 | 0 | 0 |
attr |
RTDetrConfig.activation_dropout |
1 | 0 | 0 |
attr |
RTDetrConfig.encode_proj_layers |
1 | 0 | 0 |
attr |
RTDetrConfig.encoder_layers |
1 | 0 | 0 |
attr |
RTDetrConfig.positional_encoding_temperature |
1 | 0 | 0 |
attr |
RTDetrConfig.eval_size |
1 | 0 | 0 |
attr |
RTDetrConfig.normalize_before |
1 | 0 | 0 |
attr |
RTDetrConfig.encoder_activation_function |
1 | 0 | 0 |
attr |
RTDetrConfig.activation_function |
1 | 0 | 0 |
attr |
RTDetrConfig.hidden_expansion |
1 | 0 | 0 |
attr |
RTDetrConfig.d_model |
1 | 0 | 0 |
attr |
RTDetrConfig.num_queries |
1 | 0 | 0 |
attr |
RTDetrConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
RTDetrConfig.decoder_in_channels |
1 | 0 | 0 |
attr |
RTDetrConfig.num_feature_levels |
1 | 0 | 0 |
attr |
RTDetrConfig.decoder_n_points |
1 | 0 | 0 |
attr |
RTDetrConfig.decoder_layers |
1 | 0 | 0 |
attr |
RTDetrConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
RTDetrConfig.decoder_activation_function |
1 | 0 | 0 |
attr |
RTDetrConfig.attention_dropout |
1 | 0 | 0 |
attr |
RTDetrConfig.num_denoising |
1 | 0 | 0 |
attr |
RTDetrConfig.label_noise_ratio |
1 | 0 | 0 |
attr |
RTDetrConfig.box_noise_scale |
1 | 0 | 0 |
attr |
RTDetrConfig.learn_initial_query |
1 | 0 | 0 |
attr |
RTDetrConfig.anchor_image_size |
1 | 0 | 0 |
attr |
RTDetrConfig.auxiliary_loss |
1 | 0 | 0 |
attr |
RTDetrConfig.disable_custom_kernels |
1 | 0 | 0 |
attr |
RTDetrConfig.with_box_refine |
1 | 0 | 0 |
attr |
RTDetrConfig.matcher_alpha |
1 | 0 | 0 |
attr |
RTDetrConfig.matcher_gamma |
1 | 0 | 0 |
attr |
RTDetrConfig.matcher_class_cost |
1 | 0 | 0 |
attr |
RTDetrConfig.matcher_bbox_cost |
1 | 0 | 0 |
attr |
RTDetrConfig.matcher_giou_cost |
1 | 0 | 0 |
attr |
RTDetrConfig.use_focal_loss |
1 | 0 | 0 |
attr |
RTDetrConfig.focal_loss_alpha |
1 | 0 | 0 |
attr |
RTDetrConfig.focal_loss_gamma |
1 | 0 | 0 |
attr |
RTDetrConfig.weight_loss_vfl |
1 | 0 | 0 |
attr |
RTDetrConfig.weight_loss_bbox |
1 | 0 | 0 |
attr |
RTDetrConfig.weight_loss_giou |
1 | 0 | 0 |
attr |
RTDetrConfig.eos_coefficient |
1 | 0 | 0 |
transformers.models.rt_detr.configuration_rt_detr_resnet (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RTDetrResNetConfig.init |
12 | 0 | 0 |
attr |
RTDetrResNetConfig.num_channels |
1 | 0 | 0 |
attr |
RTDetrResNetConfig.embedding_size |
1 | 0 | 0 |
attr |
RTDetrResNetConfig.hidden_sizes |
1 | 0 | 0 |
attr |
RTDetrResNetConfig.depths |
1 | 0 | 0 |
attr |
RTDetrResNetConfig.layer_type |
1 | 0 | 0 |
attr |
RTDetrResNetConfig.hidden_act |
1 | 0 | 0 |
attr |
RTDetrResNetConfig.downsample_in_first_stage |
1 | 0 | 0 |
attr |
RTDetrResNetConfig.downsample_in_bottleneck |
1 | 0 | 0 |
attr |
RTDetrResNetConfig.stage_names |
1 | 0 | 0 |
transformers.models.rt_detr.image_processing_rt_detr (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RTDetrImageProcessor.init |
14 | 13 | 0 |
meth |
RTDetrImageProcessor.resize |
7 | 6 | 0 |
meth |
RTDetrImageProcessor.resize_annotation |
5 | 2 | 0 |
meth |
RTDetrImageProcessor._update_annotation_for_padded_image |
6 | 4 | 0 |
meth |
RTDetrImageProcessor.preprocess |
20 | 19 | 0 |
meth |
RTDetrImageProcessor.post_process_object_detection |
5 | 3 | 0 |
attr |
RTDetrImageProcessor.format |
1 | 0 | 0 |
attr |
RTDetrImageProcessor.do_resize |
1 | 0 | 0 |
attr |
RTDetrImageProcessor.size |
1 | 0 | 0 |
attr |
RTDetrImageProcessor.resample |
1 | 0 | 0 |
attr |
RTDetrImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
RTDetrImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
RTDetrImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
RTDetrImageProcessor.do_convert_annotations |
1 | 0 | 0 |
attr |
RTDetrImageProcessor.image_mean |
1 | 0 | 0 |
attr |
RTDetrImageProcessor.image_std |
1 | 0 | 0 |
attr |
RTDetrImageProcessor.do_pad |
1 | 0 | 0 |
attr |
RTDetrImageProcessor.pad_size |
1 | 0 | 0 |
transformers.models.rt_detr.image_processing_rt_detr_fast (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RTDetrImageProcessorFast.resize |
5 | 4 | 0 |
meth |
RTDetrImageProcessorFast.resize_annotation |
6 | 5 | 0 |
meth |
RTDetrImageProcessorFast._update_annotation_for_padded_image |
6 | 4 | 0 |
meth |
RTDetrImageProcessorFast.pad |
6 | 5 | 0 |
meth |
RTDetrImageProcessorFast._preprocess |
19 | 18 | 0 |
meth |
RTDetrImageProcessorFast.post_process_object_detection |
5 | 3 | 0 |
attr |
RTDetrImageProcessorFast.do_convert_annotations |
1 | 0 | 0 |
transformers.models.rt_detr.modeling_rt_detr (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RTDetrModel.init |
2 | 1 | 0 |
meth |
RTDetrModel.freeze_backbone |
1 | 0 | 0 |
meth |
RTDetrModel.unfreeze_backbone |
1 | 0 | 0 |
meth |
RTDetrModel.generate_anchors |
5 | 0 | 0 |
attr |
RTDetrModel.backbone |
1 | 0 | 0 |
attr |
RTDetrModel.encoder_input_proj |
1 | 0 | 0 |
attr |
RTDetrModel.encoder |
1 | 0 | 0 |
attr |
RTDetrModel.enc_output |
1 | 0 | 0 |
attr |
RTDetrModel.enc_score_head |
1 | 0 | 0 |
attr |
RTDetrModel.enc_bbox_head |
1 | 0 | 0 |
attr |
RTDetrModel.decoder_input_proj |
1 | 0 | 0 |
attr |
RTDetrModel.decoder |
1 | 0 | 0 |
attr |
RTDetrModel.denoising_class_embed |
1 | 0 | 0 |
attr |
RTDetrModel.weight_embedding |
1 | 0 | 0 |
meth |
RTDetrPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
RTDetrForObjectDetection.init |
2 | 1 | 0 |
meth |
RTDetrForObjectDetection._set_aux_loss |
3 | 0 | 0 |
attr |
RTDetrForObjectDetection.model |
1 | 0 | 0 |
transformers.models.rt_detr.modeling_rt_detr_resnet (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RTDetrResNetPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
RTDetrResNetBackbone.init |
2 | 0 | 0 |
meth |
RTDetrResNetBackbone.forward |
5 | 4 | 0 |
attr |
RTDetrResNetBackbone.num_features |
1 | 0 | 0 |
attr |
RTDetrResNetBackbone.embedder |
1 | 0 | 0 |
attr |
RTDetrResNetBackbone.encoder |
1 | 0 | 0 |
transformers.models.rt_detr.modular_rt_detr (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RTDetrModel.init |
2 | 1 | 0 |
meth |
RTDetrModel.freeze_backbone |
1 | 0 | 0 |
meth |
RTDetrModel.unfreeze_backbone |
1 | 0 | 0 |
meth |
RTDetrModel.generate_anchors |
5 | 0 | 0 |
attr |
RTDetrModel.backbone |
1 | 0 | 0 |
attr |
RTDetrModel.encoder_input_proj |
1 | 0 | 0 |
attr |
RTDetrModel.encoder |
1 | 0 | 0 |
attr |
RTDetrModel.enc_output |
1 | 0 | 0 |
attr |
RTDetrModel.enc_score_head |
1 | 0 | 0 |
attr |
RTDetrModel.enc_bbox_head |
1 | 0 | 0 |
attr |
RTDetrModel.decoder_input_proj |
1 | 0 | 0 |
attr |
RTDetrModel.decoder |
1 | 0 | 0 |
attr |
RTDetrModel.denoising_class_embed |
1 | 0 | 0 |
attr |
RTDetrModel.weight_embedding |
1 | 0 | 0 |
meth |
RTDetrPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
RTDetrImageProcessorFast._preprocess |
19 | 18 | 0 |
meth |
RTDetrImageProcessorFast.post_process_object_detection |
5 | 3 | 0 |
meth |
RTDetrImageProcessorFast.post_process_instance_segmentation |
1 | 0 | 0 |
meth |
RTDetrImageProcessorFast.post_process_semantic_segmentation |
1 | 0 | 0 |
meth |
RTDetrImageProcessorFast.post_process_panoptic_segmentation |
1 | 0 | 0 |
attr |
RTDetrImageProcessorFast.do_convert_annotations |
1 | 0 | 0 |
meth |
RTDetrForObjectDetection.init |
2 | 1 | 0 |
meth |
RTDetrForObjectDetection._set_aux_loss |
3 | 0 | 0 |
attr |
RTDetrForObjectDetection.model |
1 | 0 | 0 |
transformers.models.rt_detr_v2.configuration_rt_detr_v2 (111 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RTDetrV2Config.init |
57 | 0 | 0 |
attr |
RTDetrV2Config.initializer_range |
1 | 0 | 0 |
attr |
RTDetrV2Config.initializer_bias_prior_prob |
1 | 0 | 0 |
attr |
RTDetrV2Config.layer_norm_eps |
1 | 0 | 0 |
attr |
RTDetrV2Config.batch_norm_eps |
1 | 0 | 0 |
attr |
RTDetrV2Config.backbone_config |
1 | 0 | 0 |
attr |
RTDetrV2Config.freeze_backbone_batch_norms |
1 | 0 | 0 |
attr |
RTDetrV2Config.encoder_hidden_dim |
1 | 0 | 0 |
attr |
RTDetrV2Config.encoder_in_channels |
1 | 0 | 0 |
attr |
RTDetrV2Config.feat_strides |
1 | 0 | 0 |
attr |
RTDetrV2Config.encoder_ffn_dim |
1 | 0 | 0 |
attr |
RTDetrV2Config.dropout |
1 | 0 | 0 |
attr |
RTDetrV2Config.activation_dropout |
1 | 0 | 0 |
attr |
RTDetrV2Config.encode_proj_layers |
1 | 0 | 0 |
attr |
RTDetrV2Config.encoder_layers |
1 | 0 | 0 |
attr |
RTDetrV2Config.positional_encoding_temperature |
1 | 0 | 0 |
attr |
RTDetrV2Config.eval_size |
1 | 0 | 0 |
attr |
RTDetrV2Config.normalize_before |
1 | 0 | 0 |
attr |
RTDetrV2Config.encoder_activation_function |
1 | 0 | 0 |
attr |
RTDetrV2Config.activation_function |
1 | 0 | 0 |
attr |
RTDetrV2Config.hidden_expansion |
1 | 0 | 0 |
attr |
RTDetrV2Config.num_queries |
1 | 0 | 0 |
attr |
RTDetrV2Config.decoder_ffn_dim |
1 | 0 | 0 |
attr |
RTDetrV2Config.decoder_in_channels |
1 | 0 | 0 |
attr |
RTDetrV2Config.num_feature_levels |
1 | 0 | 0 |
attr |
RTDetrV2Config.decoder_n_points |
1 | 0 | 0 |
attr |
RTDetrV2Config.decoder_layers |
1 | 0 | 0 |
attr |
RTDetrV2Config.decoder_attention_heads |
1 | 0 | 0 |
attr |
RTDetrV2Config.decoder_activation_function |
1 | 0 | 0 |
attr |
RTDetrV2Config.attention_dropout |
1 | 0 | 0 |
attr |
RTDetrV2Config.num_denoising |
1 | 0 | 0 |
attr |
RTDetrV2Config.label_noise_ratio |
1 | 0 | 0 |
attr |
RTDetrV2Config.box_noise_scale |
1 | 0 | 0 |
attr |
RTDetrV2Config.learn_initial_query |
1 | 0 | 0 |
attr |
RTDetrV2Config.anchor_image_size |
1 | 0 | 0 |
attr |
RTDetrV2Config.auxiliary_loss |
1 | 0 | 0 |
attr |
RTDetrV2Config.with_box_refine |
1 | 0 | 0 |
attr |
RTDetrV2Config.matcher_alpha |
1 | 0 | 0 |
attr |
RTDetrV2Config.matcher_gamma |
1 | 0 | 0 |
attr |
RTDetrV2Config.matcher_class_cost |
1 | 0 | 0 |
attr |
RTDetrV2Config.matcher_bbox_cost |
1 | 0 | 0 |
attr |
RTDetrV2Config.matcher_giou_cost |
1 | 0 | 0 |
attr |
RTDetrV2Config.use_focal_loss |
1 | 0 | 0 |
attr |
RTDetrV2Config.focal_loss_alpha |
1 | 0 | 0 |
attr |
RTDetrV2Config.focal_loss_gamma |
1 | 0 | 0 |
attr |
RTDetrV2Config.weight_loss_vfl |
1 | 0 | 0 |
attr |
RTDetrV2Config.weight_loss_bbox |
1 | 0 | 0 |
attr |
RTDetrV2Config.weight_loss_giou |
1 | 0 | 0 |
attr |
RTDetrV2Config.eos_coefficient |
1 | 0 | 0 |
attr |
RTDetrV2Config.decoder_n_levels |
1 | 0 | 0 |
attr |
RTDetrV2Config.decoder_offset_scale |
1 | 0 | 0 |
attr |
RTDetrV2Config.decoder_method |
1 | 0 | 0 |
attr |
RTDetrV2Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
RTDetrV2Config.d_model |
1 | 0 | 0 |
attr |
RTDetrV2Config.encoder_attention_heads |
1 | 0 | 0 |
transformers.models.rt_detr_v2.modeling_rt_detr_v2 (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RTDetrV2Model.init |
2 | 1 | 0 |
meth |
RTDetrV2Model.freeze_backbone |
1 | 0 | 0 |
meth |
RTDetrV2Model.unfreeze_backbone |
1 | 0 | 0 |
meth |
RTDetrV2Model.generate_anchors |
5 | 0 | 0 |
attr |
RTDetrV2Model.backbone |
1 | 0 | 0 |
attr |
RTDetrV2Model.encoder_input_proj |
1 | 0 | 0 |
attr |
RTDetrV2Model.encoder |
1 | 0 | 0 |
attr |
RTDetrV2Model.enc_output |
1 | 0 | 0 |
attr |
RTDetrV2Model.enc_score_head |
1 | 0 | 0 |
attr |
RTDetrV2Model.enc_bbox_head |
1 | 0 | 0 |
attr |
RTDetrV2Model.decoder_input_proj |
1 | 0 | 0 |
attr |
RTDetrV2Model.decoder |
1 | 0 | 0 |
attr |
RTDetrV2Model.denoising_class_embed |
1 | 0 | 0 |
attr |
RTDetrV2Model.weight_embedding |
1 | 0 | 0 |
meth |
RTDetrV2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
RTDetrV2ForObjectDetection.init |
2 | 1 | 0 |
meth |
RTDetrV2ForObjectDetection._set_aux_loss |
3 | 0 | 0 |
attr |
RTDetrV2ForObjectDetection.model |
1 | 0 | 0 |
attr |
RTDetrV2ForObjectDetection.class_embed |
1 | 0 | 0 |
attr |
RTDetrV2ForObjectDetection.bbox_embed |
1 | 0 | 0 |
transformers.models.rt_detr_v2.modular_rt_detr_v2 (119 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RTDetrV2Model.init |
2 | 1 | 0 |
attr |
RTDetrV2Model.decoder |
1 | 0 | 0 |
meth |
RTDetrV2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
RTDetrV2ForObjectDetection.init |
2 | 1 | 0 |
attr |
RTDetrV2ForObjectDetection.model |
1 | 0 | 0 |
attr |
RTDetrV2ForObjectDetection.class_embed |
1 | 0 | 0 |
attr |
RTDetrV2ForObjectDetection.bbox_embed |
1 | 0 | 0 |
meth |
RTDetrV2Config.init |
57 | 0 | 0 |
attr |
RTDetrV2Config.initializer_range |
1 | 0 | 0 |
attr |
RTDetrV2Config.initializer_bias_prior_prob |
1 | 0 | 0 |
attr |
RTDetrV2Config.layer_norm_eps |
1 | 0 | 0 |
attr |
RTDetrV2Config.batch_norm_eps |
1 | 0 | 0 |
attr |
RTDetrV2Config.backbone_config |
1 | 0 | 0 |
attr |
RTDetrV2Config.freeze_backbone_batch_norms |
1 | 0 | 0 |
attr |
RTDetrV2Config.encoder_hidden_dim |
1 | 0 | 0 |
attr |
RTDetrV2Config.encoder_in_channels |
1 | 0 | 0 |
attr |
RTDetrV2Config.feat_strides |
1 | 0 | 0 |
attr |
RTDetrV2Config.encoder_ffn_dim |
1 | 0 | 0 |
attr |
RTDetrV2Config.dropout |
1 | 0 | 0 |
attr |
RTDetrV2Config.activation_dropout |
1 | 0 | 0 |
attr |
RTDetrV2Config.encode_proj_layers |
1 | 0 | 0 |
attr |
RTDetrV2Config.encoder_layers |
1 | 0 | 0 |
attr |
RTDetrV2Config.positional_encoding_temperature |
1 | 0 | 0 |
attr |
RTDetrV2Config.eval_size |
1 | 0 | 0 |
attr |
RTDetrV2Config.normalize_before |
1 | 0 | 0 |
attr |
RTDetrV2Config.encoder_activation_function |
1 | 0 | 0 |
attr |
RTDetrV2Config.activation_function |
1 | 0 | 0 |
attr |
RTDetrV2Config.hidden_expansion |
1 | 0 | 0 |
attr |
RTDetrV2Config.num_queries |
1 | 0 | 0 |
attr |
RTDetrV2Config.decoder_ffn_dim |
1 | 0 | 0 |
attr |
RTDetrV2Config.decoder_in_channels |
1 | 0 | 0 |
attr |
RTDetrV2Config.num_feature_levels |
1 | 0 | 0 |
attr |
RTDetrV2Config.decoder_n_points |
1 | 0 | 0 |
attr |
RTDetrV2Config.decoder_layers |
1 | 0 | 0 |
attr |
RTDetrV2Config.decoder_attention_heads |
1 | 0 | 0 |
attr |
RTDetrV2Config.decoder_activation_function |
1 | 0 | 0 |
attr |
RTDetrV2Config.attention_dropout |
1 | 0 | 0 |
attr |
RTDetrV2Config.num_denoising |
1 | 0 | 0 |
attr |
RTDetrV2Config.label_noise_ratio |
1 | 0 | 0 |
attr |
RTDetrV2Config.box_noise_scale |
1 | 0 | 0 |
attr |
RTDetrV2Config.learn_initial_query |
1 | 0 | 0 |
attr |
RTDetrV2Config.anchor_image_size |
1 | 0 | 0 |
attr |
RTDetrV2Config.auxiliary_loss |
1 | 0 | 0 |
attr |
RTDetrV2Config.with_box_refine |
1 | 0 | 0 |
attr |
RTDetrV2Config.matcher_alpha |
1 | 0 | 0 |
attr |
RTDetrV2Config.matcher_gamma |
1 | 0 | 0 |
attr |
RTDetrV2Config.matcher_class_cost |
1 | 0 | 0 |
attr |
RTDetrV2Config.matcher_bbox_cost |
1 | 0 | 0 |
attr |
RTDetrV2Config.matcher_giou_cost |
1 | 0 | 0 |
attr |
RTDetrV2Config.use_focal_loss |
1 | 0 | 0 |
attr |
RTDetrV2Config.focal_loss_alpha |
1 | 0 | 0 |
attr |
RTDetrV2Config.focal_loss_gamma |
1 | 0 | 0 |
attr |
RTDetrV2Config.weight_loss_vfl |
1 | 0 | 0 |
attr |
RTDetrV2Config.weight_loss_bbox |
1 | 0 | 0 |
attr |
RTDetrV2Config.weight_loss_giou |
1 | 0 | 0 |
attr |
RTDetrV2Config.eos_coefficient |
1 | 0 | 0 |
attr |
RTDetrV2Config.decoder_n_levels |
1 | 0 | 0 |
attr |
RTDetrV2Config.decoder_offset_scale |
1 | 0 | 0 |
attr |
RTDetrV2Config.decoder_method |
1 | 0 | 0 |
attr |
RTDetrV2Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
RTDetrV2Config.d_model |
1 | 0 | 0 |
attr |
RTDetrV2Config.encoder_attention_heads |
1 | 0 | 0 |
transformers.models.rwkv.configuration_rwkv (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RwkvConfig.init |
14 | 0 | 0 |
attr |
RwkvConfig.vocab_size |
1 | 0 | 0 |
attr |
RwkvConfig.context_length |
1 | 0 | 0 |
attr |
RwkvConfig.hidden_size |
1 | 0 | 0 |
attr |
RwkvConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
RwkvConfig.attention_hidden_size |
1 | 0 | 0 |
attr |
RwkvConfig.intermediate_size |
1 | 0 | 0 |
attr |
RwkvConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
RwkvConfig.rescale_every |
1 | 0 | 0 |
attr |
RwkvConfig.use_cache |
1 | 0 | 0 |
attr |
RwkvConfig.bos_token_id |
1 | 0 | 0 |
attr |
RwkvConfig.eos_token_id |
1 | 0 | 0 |
attr |
RwkvConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.rwkv.modeling_rwkv (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
RwkvForCausalLM.init |
2 | 0 | 0 |
meth |
RwkvForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
RwkvForCausalLM.set_output_embeddings |
2 | 0 | 0 |
meth |
RwkvForCausalLM.forward |
12 | 11 | 0 |
attr |
RwkvForCausalLM.rwkv |
1 | 0 | 0 |
attr |
RwkvForCausalLM.head |
1 | 0 | 0 |
meth |
RwkvPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
RwkvModel.init |
2 | 0 | 0 |
meth |
RwkvModel.get_input_embeddings |
1 | 0 | 0 |
meth |
RwkvModel.set_input_embeddings |
2 | 0 | 0 |
meth |
RwkvModel.forward |
10 | 9 | 0 |
meth |
RwkvModel._rescale_layers |
1 | 0 | 0 |
meth |
RwkvModel._bnb_4bit_dequantize_and_rescale |
3 | 0 | 0 |
attr |
RwkvModel.embeddings |
1 | 0 | 0 |
attr |
RwkvModel.blocks |
1 | 0 | 0 |
attr |
RwkvModel.ln_out |
1 | 0 | 0 |
attr |
RwkvModel.layers_are_rescaled |
1 | 0 | 0 |
attr |
RwkvModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.sam.configuration_sam (92 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SamPromptEncoderConfig.init |
9 | 0 | 0 |
attr |
SamPromptEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
SamPromptEncoderConfig.image_size |
1 | 0 | 0 |
attr |
SamPromptEncoderConfig.patch_size |
1 | 0 | 0 |
attr |
SamPromptEncoderConfig.image_embedding_size |
1 | 0 | 0 |
attr |
SamPromptEncoderConfig.mask_input_channels |
1 | 0 | 0 |
attr |
SamPromptEncoderConfig.num_point_embeddings |
1 | 0 | 0 |
attr |
SamPromptEncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
SamPromptEncoderConfig.layer_norm_eps |
1 | 0 | 0 |
meth |
SamMaskDecoderConfig.init |
12 | 0 | 0 |
attr |
SamMaskDecoderConfig.hidden_size |
1 | 0 | 0 |
attr |
SamMaskDecoderConfig.hidden_act |
1 | 0 | 0 |
attr |
SamMaskDecoderConfig.mlp_dim |
1 | 0 | 0 |
attr |
SamMaskDecoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
SamMaskDecoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
SamMaskDecoderConfig.attention_downsample_rate |
1 | 0 | 0 |
attr |
SamMaskDecoderConfig.num_multimask_outputs |
1 | 0 | 0 |
attr |
SamMaskDecoderConfig.iou_head_depth |
1 | 0 | 0 |
attr |
SamMaskDecoderConfig.iou_head_hidden_dim |
1 | 0 | 0 |
attr |
SamMaskDecoderConfig.layer_norm_eps |
1 | 0 | 0 |
meth |
SamVisionConfig.init |
21 | 0 | 0 |
attr |
SamVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
SamVisionConfig.output_channels |
1 | 0 | 0 |
attr |
SamVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
SamVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
SamVisionConfig.num_channels |
1 | 0 | 0 |
attr |
SamVisionConfig.image_size |
1 | 0 | 0 |
attr |
SamVisionConfig.patch_size |
1 | 0 | 0 |
attr |
SamVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
SamVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
SamVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
SamVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
SamVisionConfig.qkv_bias |
1 | 0 | 0 |
attr |
SamVisionConfig.mlp_ratio |
1 | 0 | 0 |
attr |
SamVisionConfig.use_abs_pos |
1 | 0 | 0 |
attr |
SamVisionConfig.use_rel_pos |
1 | 0 | 0 |
attr |
SamVisionConfig.window_size |
1 | 0 | 0 |
attr |
SamVisionConfig.global_attn_indexes |
1 | 0 | 0 |
attr |
SamVisionConfig.num_pos_feats |
1 | 0 | 0 |
attr |
SamVisionConfig.mlp_dim |
1 | 0 | 0 |
attr |
SamVisionConfig.scale |
1 | 0 | 0 |
meth |
SamConfig.init |
7 | 0 | 0 |
attr |
SamConfig.vision_config |
1 | 0 | 0 |
attr |
SamConfig.prompt_encoder_config |
1 | 0 | 0 |
attr |
SamConfig.mask_decoder_config |
1 | 0 | 0 |
attr |
SamConfig.initializer_range |
1 | 0 | 0 |
attr |
SamConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.sam.image_processing_sam (66 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SamImageProcessor.init |
15 | 14 | 0 |
meth |
SamImageProcessor.pad_image |
6 | 5 | 0 |
meth |
SamImageProcessor._get_preprocess_shape |
3 | 2 | 0 |
meth |
SamImageProcessor.resize |
7 | 6 | 0 |
meth |
SamImageProcessor._preprocess |
13 | 12 | 0 |
meth |
SamImageProcessor.call |
4 | 0 | 0 |
meth |
SamImageProcessor.preprocess |
19 | 18 | 0 |
meth |
SamImageProcessor.post_process_masks |
8 | 0 | 0 |
meth |
SamImageProcessor._post_process_masks_pt |
7 | 0 | 0 |
meth |
SamImageProcessor.post_process_for_mask_generation |
6 | 0 | 0 |
meth |
SamImageProcessor.generate_crop_boxes |
10 | 7 | 0 |
meth |
SamImageProcessor.filter_masks |
10 | 0 | 0 |
meth |
SamImageProcessor._filter_masks_pt |
9 | 0 | 0 |
attr |
SamImageProcessor.do_resize |
1 | 0 | 0 |
attr |
SamImageProcessor.size |
1 | 0 | 0 |
attr |
SamImageProcessor.mask_size |
1 | 0 | 0 |
attr |
SamImageProcessor.resample |
1 | 0 | 0 |
attr |
SamImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
SamImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
SamImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
SamImageProcessor.image_mean |
1 | 0 | 0 |
attr |
SamImageProcessor.image_std |
1 | 0 | 0 |
attr |
SamImageProcessor.do_pad |
1 | 0 | 0 |
attr |
SamImageProcessor.pad_size |
1 | 0 | 0 |
attr |
SamImageProcessor.mask_pad_size |
1 | 0 | 0 |
attr |
SamImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.sam.image_processing_sam_fast (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SamImageProcessorFast.init |
2 | 1 | 0 |
meth |
SamImageProcessorFast._get_preprocess_shape |
3 | 2 | 0 |
meth |
SamImageProcessorFast.resize |
5 | 4 | 0 |
meth |
SamImageProcessorFast._further_process_kwargs |
10 | 9 | 0 |
meth |
SamImageProcessorFast._preprocess |
17 | 16 | 0 |
meth |
SamImageProcessorFast.generate_crop_boxes |
8 | 6 | 0 |
meth |
SamImageProcessorFast.filter_masks |
9 | 0 | 0 |
meth |
SamImageProcessorFast.post_process_masks |
7 | 0 | 0 |
meth |
SamImageProcessorFast.post_process_for_mask_generation |
5 | 0 | 0 |
transformers.models.sam.modeling_sam (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SamVisionModel.init |
2 | 1 | 0 |
attr |
SamVisionModel.vision_encoder |
1 | 0 | 0 |
meth |
SamPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
SamModel.init |
2 | 1 | 0 |
meth |
SamModel.get_input_embeddings |
1 | 0 | 0 |
meth |
SamModel.get_image_wide_positional_embeddings |
1 | 0 | 0 |
meth |
SamModel.get_image_embeddings |
3 | 1 | 0 |
meth |
SamModel.get_prompt_embeddings |
5 | 4 | 0 |
attr |
SamModel._can_record_outputs |
1 | 0 | 0 |
attr |
SamModel.shared_image_embedding |
1 | 0 | 0 |
attr |
SamModel.vision_encoder |
1 | 0 | 0 |
attr |
SamModel.prompt_encoder |
1 | 0 | 0 |
attr |
SamModel.mask_decoder |
1 | 0 | 0 |
transformers.models.sam.processing_sam (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SamProcessor.init |
2 | 0 | 0 |
meth |
SamProcessor.call |
4 | 3 | 0 |
meth |
SamProcessor._normalize_and_convert |
8 | 0 | 0 |
meth |
SamProcessor._pad_points_and_labels |
4 | 0 | 0 |
meth |
SamProcessor._normalize_coordinates |
5 | 3 | 0 |
meth |
SamProcessor._check_and_preprocess_points |
4 | 0 | 0 |
meth |
SamProcessor.post_process_masks |
3 | 0 | 0 |
prop |
SamProcessor.model_input_names |
1 | 0 | 0 |
attr |
SamProcessor.target_size |
1 | 0 | 0 |
transformers.models.sam2.configuration_sam2 (120 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam2MaskDecoderConfig.init |
14 | 0 | 0 |
attr |
Sam2MaskDecoderConfig.hidden_size |
1 | 0 | 0 |
attr |
Sam2MaskDecoderConfig.num_multimask_outputs |
1 | 0 | 0 |
attr |
Sam2MaskDecoderConfig.hidden_act |
1 | 0 | 0 |
attr |
Sam2MaskDecoderConfig.iou_head_depth |
1 | 0 | 0 |
attr |
Sam2MaskDecoderConfig.iou_head_hidden_dim |
1 | 0 | 0 |
attr |
Sam2MaskDecoderConfig.dynamic_multimask_via_stability |
1 | 0 | 0 |
attr |
Sam2MaskDecoderConfig.dynamic_multimask_stability_delta |
1 | 0 | 0 |
attr |
Sam2MaskDecoderConfig.dynamic_multimask_stability_thresh |
1 | 0 | 0 |
attr |
Sam2MaskDecoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Sam2MaskDecoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Sam2MaskDecoderConfig.mlp_dim |
1 | 0 | 0 |
attr |
Sam2MaskDecoderConfig.attention_downsample_rate |
1 | 0 | 0 |
meth |
Sam2HieraDetConfig.init |
21 | 0 | 0 |
attr |
Sam2HieraDetConfig.hidden_size |
1 | 0 | 0 |
attr |
Sam2HieraDetConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Sam2HieraDetConfig.num_channels |
1 | 0 | 0 |
attr |
Sam2HieraDetConfig.image_size |
1 | 0 | 0 |
attr |
Sam2HieraDetConfig.patch_kernel_size |
1 | 0 | 0 |
attr |
Sam2HieraDetConfig.patch_stride |
1 | 0 | 0 |
attr |
Sam2HieraDetConfig.patch_padding |
1 | 0 | 0 |
attr |
Sam2HieraDetConfig.query_stride |
1 | 0 | 0 |
attr |
Sam2HieraDetConfig.window_positional_embedding_background_size |
1 | 0 | 0 |
attr |
Sam2HieraDetConfig.num_query_pool_stages |
1 | 0 | 0 |
attr |
Sam2HieraDetConfig.blocks_per_stage |
1 | 0 | 0 |
attr |
Sam2HieraDetConfig.embed_dim_per_stage |
1 | 0 | 0 |
attr |
Sam2HieraDetConfig.num_attention_heads_per_stage |
1 | 0 | 0 |
attr |
Sam2HieraDetConfig.window_size_per_stage |
1 | 0 | 0 |
attr |
Sam2HieraDetConfig.global_attention_blocks |
1 | 0 | 0 |
attr |
Sam2HieraDetConfig.mlp_ratio |
1 | 0 | 0 |
attr |
Sam2HieraDetConfig.hidden_act |
1 | 0 | 0 |
attr |
Sam2HieraDetConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Sam2HieraDetConfig.initializer_range |
1 | 0 | 0 |
meth |
Sam2Config.init |
6 | 0 | 0 |
attr |
Sam2Config.vision_config |
1 | 0 | 0 |
attr |
Sam2Config.prompt_encoder_config |
1 | 0 | 0 |
attr |
Sam2Config.mask_decoder_config |
1 | 0 | 0 |
attr |
Sam2Config.initializer_range |
1 | 0 | 0 |
meth |
Sam2PromptEncoderConfig.init |
10 | 0 | 0 |
attr |
Sam2PromptEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
Sam2PromptEncoderConfig.image_size |
1 | 0 | 0 |
attr |
Sam2PromptEncoderConfig.patch_size |
1 | 0 | 0 |
attr |
Sam2PromptEncoderConfig.mask_input_channels |
1 | 0 | 0 |
attr |
Sam2PromptEncoderConfig.num_point_embeddings |
1 | 0 | 0 |
attr |
Sam2PromptEncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
Sam2PromptEncoderConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Sam2PromptEncoderConfig.scale |
1 | 0 | 0 |
meth |
Sam2VisionConfig.init |
14 | 0 | 0 |
attr |
Sam2VisionConfig.backbone_config |
1 | 0 | 0 |
attr |
Sam2VisionConfig.backbone_channel_list |
1 | 0 | 0 |
attr |
Sam2VisionConfig.backbone_feature_sizes |
1 | 0 | 0 |
attr |
Sam2VisionConfig.fpn_hidden_size |
1 | 0 | 0 |
attr |
Sam2VisionConfig.fpn_kernel_size |
1 | 0 | 0 |
attr |
Sam2VisionConfig.fpn_stride |
1 | 0 | 0 |
attr |
Sam2VisionConfig.fpn_padding |
1 | 0 | 0 |
attr |
Sam2VisionConfig.fpn_top_down_levels |
1 | 0 | 0 |
attr |
Sam2VisionConfig.num_feature_levels |
1 | 0 | 0 |
attr |
Sam2VisionConfig.hidden_act |
1 | 0 | 0 |
attr |
Sam2VisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Sam2VisionConfig.initializer_range |
1 | 0 | 0 |
transformers.models.sam2.image_processing_sam2_fast (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam2ImageProcessorFast.init |
2 | 1 | 0 |
meth |
Sam2ImageProcessorFast._further_process_kwargs |
8 | 7 | 0 |
meth |
Sam2ImageProcessorFast._preprocess |
4 | 3 | 0 |
meth |
Sam2ImageProcessorFast.generate_crop_boxes |
8 | 6 | 0 |
meth |
Sam2ImageProcessorFast.filter_masks |
9 | 0 | 0 |
meth |
Sam2ImageProcessorFast.post_process_masks |
9 | 0 | 0 |
meth |
Sam2ImageProcessorFast.post_process_for_mask_generation |
5 | 0 | 0 |
transformers.models.sam2.modeling_sam2 (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam2HieraDetModel.init |
2 | 1 | 0 |
meth |
Sam2HieraDetModel.get_input_embeddings |
1 | 0 | 0 |
attr |
Sam2HieraDetModel.patch_embed |
1 | 0 | 0 |
attr |
Sam2HieraDetModel.pos_embed |
1 | 0 | 0 |
attr |
Sam2HieraDetModel.pos_embed_window |
1 | 0 | 0 |
attr |
Sam2HieraDetModel.stage_ends |
1 | 0 | 0 |
attr |
Sam2HieraDetModel.blocks |
1 | 0 | 0 |
meth |
Sam2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Sam2VisionModel.init |
2 | 1 | 0 |
meth |
Sam2VisionModel.get_input_embeddings |
1 | 0 | 0 |
attr |
Sam2VisionModel.config |
1 | 0 | 0 |
attr |
Sam2VisionModel.backbone |
1 | 0 | 0 |
attr |
Sam2VisionModel.neck |
1 | 0 | 0 |
attr |
Sam2VisionModel.num_feature_levels |
1 | 0 | 0 |
meth |
Sam2Model.init |
2 | 1 | 0 |
meth |
Sam2Model.get_input_embeddings |
1 | 0 | 0 |
meth |
Sam2Model.get_prompt_embeddings |
5 | 4 | 0 |
attr |
Sam2Model._can_record_outputs |
1 | 0 | 0 |
attr |
Sam2Model.shared_image_embedding |
1 | 0 | 0 |
attr |
Sam2Model.vision_encoder |
1 | 0 | 0 |
attr |
Sam2Model.prompt_encoder |
1 | 0 | 0 |
attr |
Sam2Model.mask_decoder |
1 | 0 | 0 |
attr |
Sam2Model.num_feature_levels |
1 | 0 | 0 |
attr |
Sam2Model.backbone_feature_sizes |
1 | 0 | 0 |
attr |
Sam2Model.hidden_dim |
1 | 0 | 0 |
attr |
Sam2Model.no_memory_embedding |
1 | 0 | 0 |
transformers.models.sam2.modular_sam2 (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam2ImageProcessorFast.init |
2 | 1 | 0 |
meth |
Sam2ImageProcessorFast._preprocess |
4 | 3 | 0 |
meth |
Sam2ImageProcessorFast._further_process_kwargs |
8 | 7 | 0 |
meth |
Sam2ImageProcessorFast.post_process_masks |
9 | 0 | 0 |
meth |
Sam2ImageProcessorFast._get_preprocess_shape |
1 | 0 | 0 |
meth |
Sam2ImageProcessorFast.resize |
1 | 0 | 0 |
meth |
Sam2VisionModel.init |
2 | 1 | 0 |
meth |
Sam2VisionModel.get_input_embeddings |
1 | 0 | 0 |
attr |
Sam2VisionModel.config |
1 | 0 | 0 |
attr |
Sam2VisionModel.backbone |
1 | 0 | 0 |
attr |
Sam2VisionModel.neck |
1 | 0 | 0 |
attr |
Sam2VisionModel.num_feature_levels |
1 | 0 | 0 |
meth |
Sam2Model.init |
2 | 1 | 0 |
attr |
Sam2Model.shared_image_embedding |
1 | 0 | 0 |
attr |
Sam2Model.vision_encoder |
1 | 0 | 0 |
attr |
Sam2Model.prompt_encoder |
1 | 0 | 0 |
attr |
Sam2Model.mask_decoder |
1 | 0 | 0 |
attr |
Sam2Model.num_feature_levels |
1 | 0 | 0 |
attr |
Sam2Model.backbone_feature_sizes |
1 | 0 | 0 |
attr |
Sam2Model.hidden_dim |
1 | 0 | 0 |
attr |
Sam2Model.no_memory_embedding |
1 | 0 | 0 |
meth |
Sam2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Sam2HieraDetModel.init |
2 | 1 | 0 |
meth |
Sam2HieraDetModel.get_input_embeddings |
1 | 0 | 0 |
attr |
Sam2HieraDetModel.patch_embed |
1 | 0 | 0 |
attr |
Sam2HieraDetModel.pos_embed |
1 | 0 | 0 |
attr |
Sam2HieraDetModel.pos_embed_window |
1 | 0 | 0 |
attr |
Sam2HieraDetModel.stage_ends |
1 | 0 | 0 |
attr |
Sam2HieraDetModel.blocks |
1 | 0 | 0 |
transformers.models.sam2.processing_sam2 (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam2Processor.init |
5 | 2 | 0 |
meth |
Sam2Processor.call |
9 | 8 | 0 |
meth |
Sam2Processor._normalize_coordinates |
5 | 3 | 0 |
meth |
Sam2Processor._convert_to_nested_list |
4 | 0 | 0 |
meth |
Sam2Processor._get_nested_dimensions |
3 | 0 | 0 |
meth |
Sam2Processor._pad_nested_list |
5 | 0 | 0 |
meth |
Sam2Processor._create_empty_nested_structure |
3 | 0 | 0 |
meth |
Sam2Processor._get_nesting_level |
2 | 0 | 0 |
meth |
Sam2Processor._normalize_tensor_coordinates |
5 | 0 | 0 |
meth |
Sam2Processor.post_process_masks |
9 | 0 | 0 |
prop |
Sam2Processor.model_input_names |
1 | 0 | 0 |
attr |
Sam2Processor.point_pad_value |
1 | 0 | 0 |
attr |
Sam2Processor.target_size |
1 | 0 | 0 |
transformers.models.sam2_video.configuration_sam2_video (128 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam2VideoPromptEncoderConfig.init |
10 | 0 | 0 |
attr |
Sam2VideoPromptEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
Sam2VideoPromptEncoderConfig.image_size |
1 | 0 | 0 |
attr |
Sam2VideoPromptEncoderConfig.patch_size |
1 | 0 | 0 |
attr |
Sam2VideoPromptEncoderConfig.mask_input_channels |
1 | 0 | 0 |
attr |
Sam2VideoPromptEncoderConfig.num_point_embeddings |
1 | 0 | 0 |
attr |
Sam2VideoPromptEncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
Sam2VideoPromptEncoderConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Sam2VideoPromptEncoderConfig.scale |
1 | 0 | 0 |
meth |
Sam2VideoMaskDecoderConfig.init |
14 | 0 | 0 |
attr |
Sam2VideoMaskDecoderConfig.hidden_size |
1 | 0 | 0 |
attr |
Sam2VideoMaskDecoderConfig.num_multimask_outputs |
1 | 0 | 0 |
attr |
Sam2VideoMaskDecoderConfig.hidden_act |
1 | 0 | 0 |
attr |
Sam2VideoMaskDecoderConfig.iou_head_depth |
1 | 0 | 0 |
attr |
Sam2VideoMaskDecoderConfig.iou_head_hidden_dim |
1 | 0 | 0 |
attr |
Sam2VideoMaskDecoderConfig.dynamic_multimask_via_stability |
1 | 0 | 0 |
attr |
Sam2VideoMaskDecoderConfig.dynamic_multimask_stability_delta |
1 | 0 | 0 |
attr |
Sam2VideoMaskDecoderConfig.dynamic_multimask_stability_thresh |
1 | 0 | 0 |
attr |
Sam2VideoMaskDecoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Sam2VideoMaskDecoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Sam2VideoMaskDecoderConfig.mlp_dim |
1 | 0 | 0 |
attr |
Sam2VideoMaskDecoderConfig.attention_downsample_rate |
1 | 0 | 0 |
meth |
Sam2VideoConfig.init |
43 | 0 | 0 |
attr |
Sam2VideoConfig.vision_config |
1 | 0 | 0 |
attr |
Sam2VideoConfig.prompt_encoder_config |
1 | 0 | 0 |
attr |
Sam2VideoConfig.mask_decoder_config |
1 | 0 | 0 |
attr |
Sam2VideoConfig.initializer_range |
1 | 0 | 0 |
attr |
Sam2VideoConfig.num_maskmem |
1 | 0 | 0 |
attr |
Sam2VideoConfig.image_size |
1 | 0 | 0 |
attr |
Sam2VideoConfig.sigmoid_scale_for_mem_enc |
1 | 0 | 0 |
attr |
Sam2VideoConfig.sigmoid_bias_for_mem_enc |
1 | 0 | 0 |
attr |
Sam2VideoConfig.multimask_output_in_sam |
1 | 0 | 0 |
attr |
Sam2VideoConfig.multimask_min_pt_num |
1 | 0 | 0 |
attr |
Sam2VideoConfig.multimask_max_pt_num |
1 | 0 | 0 |
attr |
Sam2VideoConfig.multimask_output_for_tracking |
1 | 0 | 0 |
attr |
Sam2VideoConfig.max_object_pointers_in_encoder |
1 | 0 | 0 |
attr |
Sam2VideoConfig.max_cond_frame_num |
1 | 0 | 0 |
attr |
Sam2VideoConfig.enable_occlusion_spatial_embedding |
1 | 0 | 0 |
attr |
Sam2VideoConfig.enable_temporal_pos_encoding_for_object_pointers |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_attention_hidden_size |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_attention_num_layers |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_attention_num_attention_heads |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_attention_downsample_rate |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_attention_feed_forward_hidden_size |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_attention_feed_forward_hidden_act |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_attention_dropout |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_attention_rope_theta |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_attention_rope_feat_sizes |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_attention_rope_dropout |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_encoder_hidden_size |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_encoder_output_channels |
1 | 0 | 0 |
attr |
Sam2VideoConfig.mask_downsampler_embed_dim |
1 | 0 | 0 |
attr |
Sam2VideoConfig.mask_downsampler_kernel_size |
1 | 0 | 0 |
attr |
Sam2VideoConfig.mask_downsampler_stride |
1 | 0 | 0 |
attr |
Sam2VideoConfig.mask_downsampler_padding |
1 | 0 | 0 |
attr |
Sam2VideoConfig.mask_downsampler_total_stride |
1 | 0 | 0 |
attr |
Sam2VideoConfig.mask_downsampler_hidden_act |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_fuser_num_layers |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_fuser_embed_dim |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_fuser_intermediate_dim |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_fuser_kernel_size |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_fuser_padding |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_fuser_layer_scale_init_value |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_fuser_hidden_act |
1 | 0 | 0 |
transformers.models.sam2_video.modeling_sam2_video (56 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam2VideoPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Sam2VideoModel.init |
2 | 1 | 0 |
meth |
Sam2VideoModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Sam2VideoModel.forward |
7 | 6 | 0 |
meth |
Sam2VideoModel._select_closest_cond_frames |
4 | 0 | 0 |
meth |
Sam2VideoModel._batch_encode_memories |
7 | 6 | 0 |
attr |
Sam2VideoModel._can_record_outputs |
1 | 0 | 0 |
attr |
Sam2VideoModel.shared_image_embedding |
1 | 0 | 0 |
attr |
Sam2VideoModel.vision_encoder |
1 | 0 | 0 |
attr |
Sam2VideoModel.prompt_encoder |
1 | 0 | 0 |
attr |
Sam2VideoModel.mask_decoder |
1 | 0 | 0 |
attr |
Sam2VideoModel.num_feature_levels |
1 | 0 | 0 |
attr |
Sam2VideoModel.backbone_feature_sizes |
1 | 0 | 0 |
attr |
Sam2VideoModel.hidden_dim |
1 | 0 | 0 |
attr |
Sam2VideoModel.no_memory_embedding |
1 | 0 | 0 |
attr |
Sam2VideoModel.config |
1 | 0 | 0 |
attr |
Sam2VideoModel.image_size |
1 | 0 | 0 |
attr |
Sam2VideoModel.memory_attention |
1 | 0 | 0 |
attr |
Sam2VideoModel.memory_encoder |
1 | 0 | 0 |
attr |
Sam2VideoModel.no_memory_positional_encoding |
1 | 0 | 0 |
attr |
Sam2VideoModel.mem_dim |
1 | 0 | 0 |
attr |
Sam2VideoModel.num_maskmem |
1 | 0 | 0 |
attr |
Sam2VideoModel.memory_temporal_positional_encoding |
1 | 0 | 0 |
attr |
Sam2VideoModel.no_object_pointer |
1 | 0 | 0 |
attr |
Sam2VideoModel.mask_downsample |
1 | 0 | 0 |
attr |
Sam2VideoModel.object_pointer_proj |
1 | 0 | 0 |
attr |
Sam2VideoModel.occlusion_spatial_embedding_parameter |
1 | 0 | 0 |
attr |
Sam2VideoModel.temporal_positional_encoding_projection_layer |
1 | 0 | 0 |
meth |
Sam2VideoInferenceSession.init |
9 | 8 | 0 |
meth |
Sam2VideoInferenceSession.add_point_inputs |
4 | 3 | 0 |
meth |
Sam2VideoInferenceSession.remove_point_inputs |
3 | 2 | 0 |
meth |
Sam2VideoInferenceSession.add_mask_inputs |
4 | 3 | 0 |
meth |
Sam2VideoInferenceSession.remove_mask_inputs |
3 | 2 | 0 |
meth |
Sam2VideoInferenceSession.store_output |
6 | 5 | 0 |
meth |
Sam2VideoInferenceSession.get_output |
5 | 4 | 0 |
meth |
Sam2VideoInferenceSession.reset_tracking_data |
1 | 0 | 0 |
meth |
Sam2VideoInferenceSession.reset_inference_session |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.processed_frames |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.video_height |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.video_width |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.inference_device |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.inference_state_device |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.video_storage_device |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.dtype |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.max_vision_features_cache_size |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.cache |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.obj_ids |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.point_inputs_per_obj |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.mask_inputs_per_obj |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.output_dict_per_obj |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.frames_tracked_per_obj |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.obj_with_new_inputs |
1 | 0 | 0 |
transformers.models.sam2_video.modular_sam2_video (139 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam2VideoPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Sam2VideoProcessor.init |
6 | 2 | 0 |
meth |
Sam2VideoProcessor.init_video_session |
8 | 7 | 0 |
meth |
Sam2VideoProcessor.process_new_mask_for_video_frame |
5 | 4 | 0 |
attr |
Sam2VideoProcessor.point_pad_value |
1 | 0 | 0 |
attr |
Sam2VideoProcessor.target_size |
1 | 0 | 0 |
meth |
Sam2VideoInferenceSession.init |
9 | 8 | 0 |
meth |
Sam2VideoInferenceSession.add_point_inputs |
4 | 3 | 0 |
meth |
Sam2VideoInferenceSession.remove_point_inputs |
3 | 2 | 0 |
meth |
Sam2VideoInferenceSession.add_mask_inputs |
4 | 3 | 0 |
meth |
Sam2VideoInferenceSession.remove_mask_inputs |
3 | 2 | 0 |
meth |
Sam2VideoInferenceSession.store_output |
6 | 5 | 0 |
meth |
Sam2VideoInferenceSession.get_output |
5 | 4 | 0 |
meth |
Sam2VideoInferenceSession.reset_tracking_data |
1 | 0 | 0 |
meth |
Sam2VideoInferenceSession.reset_inference_session |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.processed_frames |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.video_height |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.video_width |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.inference_device |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.inference_state_device |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.video_storage_device |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.dtype |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.max_vision_features_cache_size |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.cache |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.obj_ids |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.point_inputs_per_obj |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.mask_inputs_per_obj |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.output_dict_per_obj |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.frames_tracked_per_obj |
1 | 0 | 0 |
attr |
Sam2VideoInferenceSession.obj_with_new_inputs |
1 | 0 | 0 |
meth |
Sam2VideoModel.init |
2 | 1 | 0 |
meth |
Sam2VideoModel._select_closest_cond_frames |
4 | 0 | 0 |
meth |
Sam2VideoModel.forward |
7 | 6 | 0 |
meth |
Sam2VideoModel._batch_encode_memories |
7 | 6 | 0 |
attr |
Sam2VideoModel._can_record_outputs |
1 | 0 | 0 |
attr |
Sam2VideoModel.config |
1 | 0 | 0 |
attr |
Sam2VideoModel.image_size |
1 | 0 | 0 |
attr |
Sam2VideoModel.memory_attention |
1 | 0 | 0 |
attr |
Sam2VideoModel.memory_encoder |
1 | 0 | 0 |
attr |
Sam2VideoModel.no_memory_positional_encoding |
1 | 0 | 0 |
attr |
Sam2VideoModel.mem_dim |
1 | 0 | 0 |
attr |
Sam2VideoModel.num_maskmem |
1 | 0 | 0 |
attr |
Sam2VideoModel.memory_temporal_positional_encoding |
1 | 0 | 0 |
attr |
Sam2VideoModel.no_object_pointer |
1 | 0 | 0 |
attr |
Sam2VideoModel.mask_downsample |
1 | 0 | 0 |
attr |
Sam2VideoModel.object_pointer_proj |
1 | 0 | 0 |
attr |
Sam2VideoModel.occlusion_spatial_embedding_parameter |
1 | 0 | 0 |
attr |
Sam2VideoModel.temporal_positional_encoding_projection_layer |
1 | 0 | 0 |
meth |
Sam2VideoConfig.init |
43 | 0 | 0 |
attr |
Sam2VideoConfig.vision_config |
1 | 0 | 0 |
attr |
Sam2VideoConfig.prompt_encoder_config |
1 | 0 | 0 |
attr |
Sam2VideoConfig.mask_decoder_config |
1 | 0 | 0 |
attr |
Sam2VideoConfig.initializer_range |
1 | 0 | 0 |
attr |
Sam2VideoConfig.num_maskmem |
1 | 0 | 0 |
attr |
Sam2VideoConfig.image_size |
1 | 0 | 0 |
attr |
Sam2VideoConfig.sigmoid_scale_for_mem_enc |
1 | 0 | 0 |
attr |
Sam2VideoConfig.sigmoid_bias_for_mem_enc |
1 | 0 | 0 |
attr |
Sam2VideoConfig.multimask_output_in_sam |
1 | 0 | 0 |
attr |
Sam2VideoConfig.multimask_min_pt_num |
1 | 0 | 0 |
attr |
Sam2VideoConfig.multimask_max_pt_num |
1 | 0 | 0 |
attr |
Sam2VideoConfig.multimask_output_for_tracking |
1 | 0 | 0 |
attr |
Sam2VideoConfig.max_object_pointers_in_encoder |
1 | 0 | 0 |
attr |
Sam2VideoConfig.max_cond_frame_num |
1 | 0 | 0 |
attr |
Sam2VideoConfig.enable_occlusion_spatial_embedding |
1 | 0 | 0 |
attr |
Sam2VideoConfig.enable_temporal_pos_encoding_for_object_pointers |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_attention_hidden_size |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_attention_num_layers |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_attention_num_attention_heads |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_attention_downsample_rate |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_attention_feed_forward_hidden_size |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_attention_feed_forward_hidden_act |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_attention_dropout |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_attention_rope_theta |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_attention_rope_feat_sizes |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_attention_rope_dropout |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_encoder_hidden_size |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_encoder_output_channels |
1 | 0 | 0 |
attr |
Sam2VideoConfig.mask_downsampler_embed_dim |
1 | 0 | 0 |
attr |
Sam2VideoConfig.mask_downsampler_kernel_size |
1 | 0 | 0 |
attr |
Sam2VideoConfig.mask_downsampler_stride |
1 | 0 | 0 |
attr |
Sam2VideoConfig.mask_downsampler_padding |
1 | 0 | 0 |
attr |
Sam2VideoConfig.mask_downsampler_total_stride |
1 | 0 | 0 |
attr |
Sam2VideoConfig.mask_downsampler_hidden_act |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_fuser_num_layers |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_fuser_embed_dim |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_fuser_intermediate_dim |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_fuser_kernel_size |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_fuser_padding |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_fuser_layer_scale_init_value |
1 | 0 | 0 |
attr |
Sam2VideoConfig.memory_fuser_hidden_act |
1 | 0 | 0 |
transformers.models.sam2_video.processing_sam2_video (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam2VideoProcessor.init |
6 | 2 | 0 |
meth |
Sam2VideoProcessor.call |
9 | 8 | 0 |
meth |
Sam2VideoProcessor._normalize_coordinates |
5 | 3 | 0 |
meth |
Sam2VideoProcessor._convert_to_nested_list |
4 | 0 | 0 |
meth |
Sam2VideoProcessor._get_nested_dimensions |
3 | 0 | 0 |
meth |
Sam2VideoProcessor._pad_nested_list |
5 | 0 | 0 |
meth |
Sam2VideoProcessor._create_empty_nested_structure |
3 | 0 | 0 |
meth |
Sam2VideoProcessor._get_nesting_level |
2 | 0 | 0 |
meth |
Sam2VideoProcessor._normalize_tensor_coordinates |
5 | 0 | 0 |
meth |
Sam2VideoProcessor.post_process_masks |
9 | 0 | 0 |
meth |
Sam2VideoProcessor.init_video_session |
8 | 7 | 0 |
meth |
Sam2VideoProcessor.process_new_mask_for_video_frame |
5 | 4 | 0 |
prop |
Sam2VideoProcessor.model_input_names |
1 | 0 | 0 |
attr |
Sam2VideoProcessor.point_pad_value |
1 | 0 | 0 |
attr |
Sam2VideoProcessor.target_size |
1 | 0 | 0 |
transformers.models.sam2_video.video_processing_sam2_video (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam2VideoVideoProcessor._preprocess |
5 | 4 | 0 |
meth |
Sam2VideoVideoProcessor.post_process_masks |
7 | 0 | 0 |
transformers.models.sam3.configuration_sam3 (150 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam3DETREncoderConfig.init |
11 | 0 | 0 |
attr |
Sam3DETREncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
Sam3DETREncoderConfig.num_layers |
1 | 0 | 0 |
attr |
Sam3DETREncoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Sam3DETREncoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
Sam3DETREncoderConfig.dropout |
1 | 0 | 0 |
attr |
Sam3DETREncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
Sam3DETREncoderConfig.hidden_dropout |
1 | 0 | 0 |
attr |
Sam3DETREncoderConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Sam3DETREncoderConfig.initializer_range |
1 | 0 | 0 |
meth |
Sam3ViTConfig.init |
19 | 0 | 0 |
attr |
Sam3ViTConfig.hidden_size |
1 | 0 | 0 |
attr |
Sam3ViTConfig.intermediate_size |
1 | 0 | 0 |
attr |
Sam3ViTConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Sam3ViTConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Sam3ViTConfig.num_channels |
1 | 0 | 0 |
attr |
Sam3ViTConfig.image_size |
1 | 0 | 0 |
attr |
Sam3ViTConfig.patch_size |
1 | 0 | 0 |
attr |
Sam3ViTConfig.hidden_act |
1 | 0 | 0 |
attr |
Sam3ViTConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Sam3ViTConfig.attention_dropout |
1 | 0 | 0 |
attr |
Sam3ViTConfig.rope_theta |
1 | 0 | 0 |
attr |
Sam3ViTConfig.window_size |
1 | 0 | 0 |
attr |
Sam3ViTConfig.global_attn_indexes |
1 | 0 | 0 |
attr |
Sam3ViTConfig.layer_scale_init_value |
1 | 0 | 0 |
attr |
Sam3ViTConfig.pretrain_image_size |
1 | 0 | 0 |
attr |
Sam3ViTConfig.hidden_dropout |
1 | 0 | 0 |
attr |
Sam3ViTConfig.initializer_range |
1 | 0 | 0 |
meth |
Sam3MaskDecoderConfig.init |
8 | 0 | 0 |
attr |
Sam3MaskDecoderConfig.hidden_size |
1 | 0 | 0 |
attr |
Sam3MaskDecoderConfig.num_upsampling_stages |
1 | 0 | 0 |
attr |
Sam3MaskDecoderConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Sam3MaskDecoderConfig.dropout |
1 | 0 | 0 |
attr |
Sam3MaskDecoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Sam3MaskDecoderConfig.initializer_range |
1 | 0 | 0 |
meth |
Sam3Config.init |
9 | 0 | 0 |
prop |
Sam3Config.image_size |
2 | 0 | 0 |
attr |
Sam3Config.initializer_range |
1 | 0 | 0 |
attr |
Sam3Config.vision_config |
1 | 0 | 0 |
attr |
Sam3Config.text_config |
1 | 0 | 0 |
attr |
Sam3Config.geometry_encoder_config |
1 | 0 | 0 |
attr |
Sam3Config.detr_encoder_config |
1 | 0 | 0 |
attr |
Sam3Config.detr_decoder_config |
1 | 0 | 0 |
attr |
Sam3Config.mask_decoder_config |
1 | 0 | 0 |
meth |
Sam3DETRDecoderConfig.init |
12 | 0 | 0 |
attr |
Sam3DETRDecoderConfig.hidden_size |
1 | 0 | 0 |
attr |
Sam3DETRDecoderConfig.num_layers |
1 | 0 | 0 |
attr |
Sam3DETRDecoderConfig.num_queries |
1 | 0 | 0 |
attr |
Sam3DETRDecoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Sam3DETRDecoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
Sam3DETRDecoderConfig.dropout |
1 | 0 | 0 |
attr |
Sam3DETRDecoderConfig.hidden_act |
1 | 0 | 0 |
attr |
Sam3DETRDecoderConfig.hidden_dropout |
1 | 0 | 0 |
attr |
Sam3DETRDecoderConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Sam3DETRDecoderConfig.initializer_range |
1 | 0 | 0 |
meth |
Sam3VisionConfig.init |
9 | 0 | 0 |
prop |
Sam3VisionConfig.image_size |
2 | 0 | 0 |
attr |
Sam3VisionConfig.backbone_config |
1 | 0 | 0 |
attr |
Sam3VisionConfig.fpn_hidden_size |
1 | 0 | 0 |
attr |
Sam3VisionConfig.scale_factors |
1 | 0 | 0 |
attr |
Sam3VisionConfig.backbone_feature_sizes |
1 | 0 | 0 |
attr |
Sam3VisionConfig.hidden_act |
1 | 0 | 0 |
attr |
Sam3VisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Sam3VisionConfig.initializer_range |
1 | 0 | 0 |
meth |
Sam3GeometryEncoderConfig.init |
12 | 0 | 0 |
attr |
Sam3GeometryEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
Sam3GeometryEncoderConfig.num_layers |
1 | 0 | 0 |
attr |
Sam3GeometryEncoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Sam3GeometryEncoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
Sam3GeometryEncoderConfig.dropout |
1 | 0 | 0 |
attr |
Sam3GeometryEncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
Sam3GeometryEncoderConfig.hidden_dropout |
1 | 0 | 0 |
attr |
Sam3GeometryEncoderConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Sam3GeometryEncoderConfig.roi_size |
1 | 0 | 0 |
attr |
Sam3GeometryEncoderConfig.initializer_range |
1 | 0 | 0 |
transformers.models.sam3.image_processing_sam3_fast (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam3ImageProcessorFast.init |
2 | 1 | 0 |
meth |
Sam3ImageProcessorFast._further_process_kwargs |
8 | 7 | 0 |
meth |
Sam3ImageProcessorFast._preprocess |
4 | 3 | 0 |
meth |
Sam3ImageProcessorFast.generate_crop_boxes |
8 | 6 | 0 |
meth |
Sam3ImageProcessorFast.filter_masks |
9 | 0 | 0 |
meth |
Sam3ImageProcessorFast.post_process_masks |
9 | 0 | 0 |
meth |
Sam3ImageProcessorFast.post_process_for_mask_generation |
5 | 0 | 0 |
meth |
Sam3ImageProcessorFast.post_process_semantic_segmentation |
4 | 2 | 0 |
meth |
Sam3ImageProcessorFast.post_process_object_detection |
4 | 2 | 0 |
meth |
Sam3ImageProcessorFast.post_process_instance_segmentation |
5 | 3 | 0 |
transformers.models.sam3.modeling_sam3 (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam3Model.init |
2 | 1 | 0 |
attr |
Sam3Model.vision_encoder |
1 | 0 | 0 |
attr |
Sam3Model.text_encoder |
1 | 0 | 0 |
attr |
Sam3Model.vocab_size |
1 | 0 | 0 |
attr |
Sam3Model.text_projection |
1 | 0 | 0 |
attr |
Sam3Model.geometry_encoder |
1 | 0 | 0 |
attr |
Sam3Model.detr_encoder |
1 | 0 | 0 |
attr |
Sam3Model.detr_decoder |
1 | 0 | 0 |
attr |
Sam3Model.mask_decoder |
1 | 0 | 0 |
attr |
Sam3Model.dot_product_scoring |
1 | 0 | 0 |
meth |
Sam3PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Sam3VisionModel.init |
2 | 1 | 0 |
meth |
Sam3VisionModel.get_input_embeddings |
1 | 0 | 0 |
attr |
Sam3VisionModel.config |
1 | 0 | 0 |
attr |
Sam3VisionModel.backbone |
1 | 0 | 0 |
attr |
Sam3VisionModel.neck |
1 | 0 | 0 |
meth |
Sam3ViTModel.init |
2 | 1 | 0 |
attr |
Sam3ViTModel.config |
1 | 0 | 0 |
attr |
Sam3ViTModel.embeddings |
1 | 0 | 0 |
attr |
Sam3ViTModel.layer_norm |
1 | 0 | 0 |
attr |
Sam3ViTModel.layers |
1 | 0 | 0 |
transformers.models.sam3.modular_sam3 (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam3ImageProcessorFast.post_process_semantic_segmentation |
4 | 2 | 0 |
meth |
Sam3ImageProcessorFast.post_process_object_detection |
4 | 2 | 0 |
meth |
Sam3ImageProcessorFast.post_process_instance_segmentation |
5 | 3 | 0 |
transformers.models.sam3.processing_sam3 (47 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam3Processor.init |
6 | 2 | 0 |
meth |
Sam3Processor.call |
9 | 8 | 0 |
meth |
Sam3Processor._normalize_coordinates |
4 | 2 | 0 |
meth |
Sam3Processor._convert_to_nested_list |
4 | 0 | 0 |
meth |
Sam3Processor._resolve_text_prompts |
3 | 0 | 0 |
meth |
Sam3Processor._get_nested_dimensions |
3 | 0 | 0 |
meth |
Sam3Processor._pad_nested_list |
5 | 0 | 0 |
meth |
Sam3Processor._create_empty_nested_structure |
3 | 0 | 0 |
meth |
Sam3Processor._get_nesting_level |
2 | 0 | 0 |
meth |
Sam3Processor._normalize_tensor_coordinates |
5 | 0 | 0 |
meth |
Sam3Processor.post_process_semantic_segmentation |
4 | 0 | 0 |
meth |
Sam3Processor.post_process_object_detection |
4 | 0 | 0 |
meth |
Sam3Processor.post_process_instance_segmentation |
5 | 0 | 0 |
attr |
Sam3Processor.point_pad_value |
1 | 0 | 0 |
attr |
Sam3Processor.target_size |
1 | 0 | 0 |
transformers.models.sam3_tracker.configuration_sam3_tracker (54 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam3TrackerConfig.init |
6 | 0 | 0 |
attr |
Sam3TrackerConfig.vision_config |
1 | 0 | 0 |
attr |
Sam3TrackerConfig.prompt_encoder_config |
1 | 0 | 0 |
attr |
Sam3TrackerConfig.mask_decoder_config |
1 | 0 | 0 |
attr |
Sam3TrackerConfig.initializer_range |
1 | 0 | 0 |
meth |
Sam3TrackerPromptEncoderConfig.init |
10 | 0 | 0 |
attr |
Sam3TrackerPromptEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
Sam3TrackerPromptEncoderConfig.image_size |
1 | 0 | 0 |
attr |
Sam3TrackerPromptEncoderConfig.patch_size |
1 | 0 | 0 |
attr |
Sam3TrackerPromptEncoderConfig.mask_input_channels |
1 | 0 | 0 |
attr |
Sam3TrackerPromptEncoderConfig.num_point_embeddings |
1 | 0 | 0 |
attr |
Sam3TrackerPromptEncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
Sam3TrackerPromptEncoderConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Sam3TrackerPromptEncoderConfig.scale |
1 | 0 | 0 |
meth |
Sam3TrackerMaskDecoderConfig.init |
14 | 0 | 0 |
attr |
Sam3TrackerMaskDecoderConfig.hidden_size |
1 | 0 | 0 |
attr |
Sam3TrackerMaskDecoderConfig.num_multimask_outputs |
1 | 0 | 0 |
attr |
Sam3TrackerMaskDecoderConfig.hidden_act |
1 | 0 | 0 |
attr |
Sam3TrackerMaskDecoderConfig.iou_head_depth |
1 | 0 | 0 |
attr |
Sam3TrackerMaskDecoderConfig.iou_head_hidden_dim |
1 | 0 | 0 |
attr |
Sam3TrackerMaskDecoderConfig.dynamic_multimask_via_stability |
1 | 0 | 0 |
attr |
Sam3TrackerMaskDecoderConfig.dynamic_multimask_stability_delta |
1 | 0 | 0 |
attr |
Sam3TrackerMaskDecoderConfig.dynamic_multimask_stability_thresh |
1 | 0 | 0 |
attr |
Sam3TrackerMaskDecoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Sam3TrackerMaskDecoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Sam3TrackerMaskDecoderConfig.mlp_dim |
1 | 0 | 0 |
attr |
Sam3TrackerMaskDecoderConfig.attention_downsample_rate |
1 | 0 | 0 |
transformers.models.sam3_tracker.modeling_sam3_tracker (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam3TrackerModel.init |
2 | 1 | 0 |
meth |
Sam3TrackerModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Sam3TrackerModel.get_prompt_embeddings |
5 | 4 | 0 |
attr |
Sam3TrackerModel._can_record_outputs |
1 | 0 | 0 |
attr |
Sam3TrackerModel.shared_image_embedding |
1 | 0 | 0 |
attr |
Sam3TrackerModel.vision_encoder |
1 | 0 | 0 |
attr |
Sam3TrackerModel.prompt_encoder |
1 | 0 | 0 |
attr |
Sam3TrackerModel.mask_decoder |
1 | 0 | 0 |
attr |
Sam3TrackerModel.backbone_feature_sizes |
1 | 0 | 0 |
attr |
Sam3TrackerModel.hidden_dim |
1 | 0 | 0 |
attr |
Sam3TrackerModel.no_memory_embedding |
1 | 0 | 0 |
meth |
Sam3TrackerPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.sam3_tracker.modular_sam3_tracker (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam3TrackerPromptEncoderConfig.init |
10 | 0 | 0 |
meth |
Sam3TrackerModel.init |
2 | 1 | 0 |
attr |
Sam3TrackerModel.shared_image_embedding |
1 | 0 | 0 |
attr |
Sam3TrackerModel.vision_encoder |
1 | 0 | 0 |
attr |
Sam3TrackerModel.prompt_encoder |
1 | 0 | 0 |
attr |
Sam3TrackerModel.mask_decoder |
1 | 0 | 0 |
attr |
Sam3TrackerModel.backbone_feature_sizes |
1 | 0 | 0 |
attr |
Sam3TrackerModel.hidden_dim |
1 | 0 | 0 |
attr |
Sam3TrackerModel.no_memory_embedding |
1 | 0 | 0 |
meth |
Sam3TrackerConfig.init |
6 | 0 | 0 |
attr |
Sam3TrackerConfig.vision_config |
1 | 0 | 0 |
attr |
Sam3TrackerConfig.prompt_encoder_config |
1 | 0 | 0 |
attr |
Sam3TrackerConfig.mask_decoder_config |
1 | 0 | 0 |
attr |
Sam3TrackerConfig.initializer_range |
1 | 0 | 0 |
meth |
Sam3TrackerPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.sam3_tracker.processing_sam3_tracker (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam3TrackerProcessor.init |
5 | 2 | 0 |
meth |
Sam3TrackerProcessor.call |
9 | 8 | 0 |
meth |
Sam3TrackerProcessor._normalize_coordinates |
5 | 3 | 0 |
meth |
Sam3TrackerProcessor._convert_to_nested_list |
4 | 0 | 0 |
meth |
Sam3TrackerProcessor._get_nested_dimensions |
3 | 0 | 0 |
meth |
Sam3TrackerProcessor._pad_nested_list |
5 | 0 | 0 |
meth |
Sam3TrackerProcessor._create_empty_nested_structure |
3 | 0 | 0 |
meth |
Sam3TrackerProcessor._get_nesting_level |
2 | 0 | 0 |
meth |
Sam3TrackerProcessor._normalize_tensor_coordinates |
5 | 0 | 0 |
meth |
Sam3TrackerProcessor.post_process_masks |
9 | 0 | 0 |
prop |
Sam3TrackerProcessor.model_input_names |
1 | 0 | 0 |
attr |
Sam3TrackerProcessor.point_pad_value |
1 | 0 | 0 |
attr |
Sam3TrackerProcessor.target_size |
1 | 0 | 0 |
transformers.models.sam3_tracker_video.configuration_sam3_tracker_video (130 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam3TrackerVideoConfig.init |
43 | 0 | 0 |
prop |
Sam3TrackerVideoConfig.image_size |
2 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.vision_config |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.prompt_encoder_config |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.mask_decoder_config |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.initializer_range |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.num_maskmem |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.image_size |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.sigmoid_scale_for_mem_enc |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.sigmoid_bias_for_mem_enc |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.multimask_output_in_sam |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.multimask_min_pt_num |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.multimask_max_pt_num |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.multimask_output_for_tracking |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.max_object_pointers_in_encoder |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.max_cond_frame_num |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.enable_occlusion_spatial_embedding |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.enable_temporal_pos_encoding_for_object_pointers |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_attention_hidden_size |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_attention_num_layers |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_attention_num_attention_heads |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_attention_downsample_rate |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_attention_feed_forward_hidden_size |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_attention_feed_forward_hidden_act |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_attention_dropout |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_attention_rope_theta |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_attention_rope_feat_sizes |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_attention_rope_dropout |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_encoder_hidden_size |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_encoder_output_channels |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.mask_downsampler_embed_dim |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.mask_downsampler_kernel_size |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.mask_downsampler_stride |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.mask_downsampler_padding |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.mask_downsampler_total_stride |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.mask_downsampler_hidden_act |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_fuser_num_layers |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_fuser_embed_dim |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_fuser_intermediate_dim |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_fuser_kernel_size |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_fuser_padding |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_fuser_layer_scale_init_value |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_fuser_hidden_act |
1 | 0 | 0 |
meth |
Sam3TrackerVideoPromptEncoderConfig.init |
10 | 0 | 0 |
attr |
Sam3TrackerVideoPromptEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
Sam3TrackerVideoPromptEncoderConfig.image_size |
1 | 0 | 0 |
attr |
Sam3TrackerVideoPromptEncoderConfig.patch_size |
1 | 0 | 0 |
attr |
Sam3TrackerVideoPromptEncoderConfig.mask_input_channels |
1 | 0 | 0 |
attr |
Sam3TrackerVideoPromptEncoderConfig.num_point_embeddings |
1 | 0 | 0 |
attr |
Sam3TrackerVideoPromptEncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
Sam3TrackerVideoPromptEncoderConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Sam3TrackerVideoPromptEncoderConfig.scale |
1 | 0 | 0 |
meth |
Sam3TrackerVideoMaskDecoderConfig.init |
14 | 0 | 0 |
attr |
Sam3TrackerVideoMaskDecoderConfig.hidden_size |
1 | 0 | 0 |
attr |
Sam3TrackerVideoMaskDecoderConfig.num_multimask_outputs |
1 | 0 | 0 |
attr |
Sam3TrackerVideoMaskDecoderConfig.hidden_act |
1 | 0 | 0 |
attr |
Sam3TrackerVideoMaskDecoderConfig.iou_head_depth |
1 | 0 | 0 |
attr |
Sam3TrackerVideoMaskDecoderConfig.iou_head_hidden_dim |
1 | 0 | 0 |
attr |
Sam3TrackerVideoMaskDecoderConfig.dynamic_multimask_via_stability |
1 | 0 | 0 |
attr |
Sam3TrackerVideoMaskDecoderConfig.dynamic_multimask_stability_delta |
1 | 0 | 0 |
attr |
Sam3TrackerVideoMaskDecoderConfig.dynamic_multimask_stability_thresh |
1 | 0 | 0 |
attr |
Sam3TrackerVideoMaskDecoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Sam3TrackerVideoMaskDecoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Sam3TrackerVideoMaskDecoderConfig.mlp_dim |
1 | 0 | 0 |
attr |
Sam3TrackerVideoMaskDecoderConfig.attention_downsample_rate |
1 | 0 | 0 |
transformers.models.sam3_tracker_video.modeling_sam3_tracker_video (55 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam3TrackerVideoPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Sam3TrackerVideoModel.init |
3 | 2 | 0 |
meth |
Sam3TrackerVideoModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Sam3TrackerVideoModel.forward |
7 | 6 | 0 |
meth |
Sam3TrackerVideoModel._select_closest_cond_frames |
4 | 0 | 0 |
meth |
Sam3TrackerVideoModel._batch_encode_memories |
7 | 6 | 0 |
attr |
Sam3TrackerVideoModel._can_record_outputs |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.shared_image_embedding |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.vision_encoder |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.prompt_encoder |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.mask_decoder |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.backbone_feature_sizes |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.hidden_dim |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.no_memory_embedding |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.config |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.image_size |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.memory_attention |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.memory_encoder |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.no_memory_positional_encoding |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.mem_dim |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.num_maskmem |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.memory_temporal_positional_encoding |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.no_object_pointer |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.mask_downsample |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.object_pointer_proj |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.occlusion_spatial_embedding_parameter |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.temporal_positional_encoding_projection_layer |
1 | 0 | 0 |
meth |
Sam3TrackerVideoInferenceSession.init |
9 | 8 | 0 |
meth |
Sam3TrackerVideoInferenceSession.add_point_inputs |
4 | 3 | 0 |
meth |
Sam3TrackerVideoInferenceSession.remove_point_inputs |
3 | 2 | 0 |
meth |
Sam3TrackerVideoInferenceSession.add_mask_inputs |
4 | 3 | 0 |
meth |
Sam3TrackerVideoInferenceSession.remove_mask_inputs |
3 | 2 | 0 |
meth |
Sam3TrackerVideoInferenceSession.store_output |
6 | 5 | 0 |
meth |
Sam3TrackerVideoInferenceSession.get_output |
5 | 4 | 0 |
meth |
Sam3TrackerVideoInferenceSession.reset_tracking_data |
1 | 0 | 0 |
meth |
Sam3TrackerVideoInferenceSession.reset_inference_session |
1 | 0 | 0 |
attr |
Sam3TrackerVideoInferenceSession.processed_frames |
1 | 0 | 0 |
attr |
Sam3TrackerVideoInferenceSession.video_height |
1 | 0 | 0 |
attr |
Sam3TrackerVideoInferenceSession.video_width |
1 | 0 | 0 |
attr |
Sam3TrackerVideoInferenceSession.inference_device |
1 | 0 | 0 |
attr |
Sam3TrackerVideoInferenceSession.inference_state_device |
1 | 0 | 0 |
attr |
Sam3TrackerVideoInferenceSession.video_storage_device |
1 | 0 | 0 |
attr |
Sam3TrackerVideoInferenceSession.dtype |
1 | 0 | 0 |
attr |
Sam3TrackerVideoInferenceSession.max_vision_features_cache_size |
1 | 0 | 0 |
attr |
Sam3TrackerVideoInferenceSession.cache |
1 | 0 | 0 |
attr |
Sam3TrackerVideoInferenceSession.obj_ids |
1 | 0 | 0 |
attr |
Sam3TrackerVideoInferenceSession.point_inputs_per_obj |
1 | 0 | 0 |
attr |
Sam3TrackerVideoInferenceSession.mask_inputs_per_obj |
1 | 0 | 0 |
attr |
Sam3TrackerVideoInferenceSession.output_dict_per_obj |
1 | 0 | 0 |
attr |
Sam3TrackerVideoInferenceSession.frames_tracked_per_obj |
1 | 0 | 0 |
attr |
Sam3TrackerVideoInferenceSession.obj_with_new_inputs |
1 | 0 | 0 |
transformers.models.sam3_tracker_video.modular_sam3_tracker_video (117 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam3TrackerVideoPromptEncoderConfig.init |
10 | 0 | 0 |
meth |
Sam3TrackerVideoModel.init |
3 | 2 | 0 |
attr |
Sam3TrackerVideoModel.shared_image_embedding |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.vision_encoder |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.prompt_encoder |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.mask_decoder |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.backbone_feature_sizes |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.hidden_dim |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.no_memory_embedding |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.config |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.image_size |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.memory_attention |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.memory_encoder |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.no_memory_positional_encoding |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.mem_dim |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.num_maskmem |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.memory_temporal_positional_encoding |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.no_object_pointer |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.mask_downsample |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.object_pointer_proj |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.occlusion_spatial_embedding_parameter |
1 | 0 | 0 |
attr |
Sam3TrackerVideoModel.temporal_positional_encoding_projection_layer |
1 | 0 | 0 |
meth |
Sam3TrackerVideoConfig.init |
43 | 0 | 0 |
prop |
Sam3TrackerVideoConfig.image_size |
2 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.vision_config |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.prompt_encoder_config |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.mask_decoder_config |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.initializer_range |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.num_maskmem |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.image_size |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.sigmoid_scale_for_mem_enc |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.sigmoid_bias_for_mem_enc |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.multimask_output_in_sam |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.multimask_min_pt_num |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.multimask_max_pt_num |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.multimask_output_for_tracking |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.max_object_pointers_in_encoder |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.max_cond_frame_num |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.enable_occlusion_spatial_embedding |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.enable_temporal_pos_encoding_for_object_pointers |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_attention_hidden_size |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_attention_num_layers |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_attention_num_attention_heads |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_attention_downsample_rate |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_attention_feed_forward_hidden_size |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_attention_feed_forward_hidden_act |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_attention_dropout |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_attention_rope_theta |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_attention_rope_feat_sizes |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_attention_rope_dropout |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_encoder_hidden_size |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_encoder_output_channels |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.mask_downsampler_embed_dim |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.mask_downsampler_kernel_size |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.mask_downsampler_stride |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.mask_downsampler_padding |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.mask_downsampler_total_stride |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.mask_downsampler_hidden_act |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_fuser_num_layers |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_fuser_embed_dim |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_fuser_intermediate_dim |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_fuser_kernel_size |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_fuser_padding |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_fuser_layer_scale_init_value |
1 | 0 | 0 |
attr |
Sam3TrackerVideoConfig.memory_fuser_hidden_act |
1 | 0 | 0 |
transformers.models.sam3_tracker_video.processing_sam3_tracker_video (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam3TrackerVideoProcessor.init |
6 | 2 | 0 |
meth |
Sam3TrackerVideoProcessor.call |
9 | 8 | 0 |
meth |
Sam3TrackerVideoProcessor._normalize_coordinates |
5 | 3 | 0 |
meth |
Sam3TrackerVideoProcessor._convert_to_nested_list |
4 | 0 | 0 |
meth |
Sam3TrackerVideoProcessor._get_nested_dimensions |
3 | 0 | 0 |
meth |
Sam3TrackerVideoProcessor._pad_nested_list |
5 | 0 | 0 |
meth |
Sam3TrackerVideoProcessor._create_empty_nested_structure |
3 | 0 | 0 |
meth |
Sam3TrackerVideoProcessor._get_nesting_level |
2 | 0 | 0 |
meth |
Sam3TrackerVideoProcessor._normalize_tensor_coordinates |
5 | 0 | 0 |
meth |
Sam3TrackerVideoProcessor.post_process_masks |
9 | 0 | 0 |
meth |
Sam3TrackerVideoProcessor.init_video_session |
8 | 7 | 0 |
meth |
Sam3TrackerVideoProcessor.process_new_mask_for_video_frame |
5 | 4 | 0 |
prop |
Sam3TrackerVideoProcessor.model_input_names |
1 | 0 | 0 |
attr |
Sam3TrackerVideoProcessor.point_pad_value |
1 | 0 | 0 |
attr |
Sam3TrackerVideoProcessor.target_size |
1 | 0 | 0 |
transformers.models.sam3_video.configuration_sam3_video (52 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam3VideoConfig.init |
26 | 0 | 0 |
prop |
Sam3VideoConfig.image_size |
2 | 0 | 0 |
attr |
Sam3VideoConfig.initializer_range |
1 | 0 | 0 |
attr |
Sam3VideoConfig.low_res_mask_size |
1 | 0 | 0 |
attr |
Sam3VideoConfig.score_threshold_detection |
1 | 0 | 0 |
attr |
Sam3VideoConfig.det_nms_thresh |
1 | 0 | 0 |
attr |
Sam3VideoConfig.assoc_iou_thresh |
1 | 0 | 0 |
attr |
Sam3VideoConfig.trk_assoc_iou_thresh |
1 | 0 | 0 |
attr |
Sam3VideoConfig.new_det_thresh |
1 | 0 | 0 |
attr |
Sam3VideoConfig.recondition_on_trk_masks |
1 | 0 | 0 |
attr |
Sam3VideoConfig.hotstart_delay |
1 | 0 | 0 |
attr |
Sam3VideoConfig.hotstart_unmatch_thresh |
1 | 0 | 0 |
attr |
Sam3VideoConfig.hotstart_dup_thresh |
1 | 0 | 0 |
attr |
Sam3VideoConfig.suppress_unmatched_only_within_hotstart |
1 | 0 | 0 |
attr |
Sam3VideoConfig.init_trk_keep_alive |
1 | 0 | 0 |
attr |
Sam3VideoConfig.max_trk_keep_alive |
1 | 0 | 0 |
attr |
Sam3VideoConfig.min_trk_keep_alive |
1 | 0 | 0 |
attr |
Sam3VideoConfig.suppress_overlapping_based_on_recent_occlusion_threshold |
1 | 0 | 0 |
attr |
Sam3VideoConfig.decrease_trk_keep_alive_for_empty_masklets |
1 | 0 | 0 |
attr |
Sam3VideoConfig.fill_hole_area |
1 | 0 | 0 |
attr |
Sam3VideoConfig.max_num_objects |
1 | 0 | 0 |
attr |
Sam3VideoConfig.recondition_every_nth_frame |
1 | 0 | 0 |
attr |
Sam3VideoConfig.high_conf_thresh |
1 | 0 | 0 |
attr |
Sam3VideoConfig.high_iou_thresh |
1 | 0 | 0 |
attr |
Sam3VideoConfig.detector_config |
1 | 0 | 0 |
attr |
Sam3VideoConfig.tracker_config |
1 | 0 | 0 |
transformers.models.sam3_video.modeling_sam3_video (93 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam3VideoModel.init |
2 | 1 | 0 |
meth |
Sam3VideoModel.get_vision_features_for_tracker |
2 | 1 | 0 |
meth |
Sam3VideoModel.run_detection |
3 | 2 | 0 |
meth |
Sam3VideoModel.run_tracker_propagation |
4 | 3 | 0 |
meth |
Sam3VideoModel._associate_det_trk |
7 | 6 | 0 |
meth |
Sam3VideoModel._process_hotstart |
10 | 9 | 0 |
meth |
Sam3VideoModel.run_memory_encoder |
5 | 4 | 0 |
meth |
Sam3VideoModel._get_objects_to_suppress_based_on_most_recently_occluded |
5 | 4 | 0 |
meth |
Sam3VideoModel._suppress_overlapping_based_on_recent_occlusion |
7 | 6 | 0 |
meth |
Sam3VideoModel._apply_non_overlapping_constraints |
2 | 0 | 0 |
meth |
Sam3VideoModel._suppress_shrinked_masks |
4 | 0 | 0 |
meth |
Sam3VideoModel._suppress_object_pw_area_shrinkage |
3 | 1 | 0 |
meth |
Sam3VideoModel._suppress_object_pw_area_shrinkage_impl |
2 | 0 | 0 |
meth |
Sam3VideoModel._tracker_update_memories |
5 | 4 | 0 |
meth |
Sam3VideoModel.run_tracker_update_planning_phase |
9 | 8 | 0 |
meth |
Sam3VideoModel._tracker_add_new_objects |
6 | 5 | 0 |
meth |
Sam3VideoModel.run_tracker_update_execution_phase |
6 | 5 | 0 |
meth |
Sam3VideoModel.build_outputs |
6 | 5 | 0 |
meth |
Sam3VideoModel._det_track_one_frame |
5 | 4 | 0 |
meth |
Sam3VideoModel.forward |
6 | 4 | 0 |
meth |
Sam3VideoModel._get_processing_order |
5 | 4 | 0 |
attr |
Sam3VideoModel.config |
1 | 0 | 0 |
attr |
Sam3VideoModel.detector_model |
1 | 0 | 0 |
attr |
Sam3VideoModel.tracker_model |
1 | 0 | 0 |
attr |
Sam3VideoModel.low_res_mask_size |
1 | 0 | 0 |
attr |
Sam3VideoModel.score_threshold_detection |
1 | 0 | 0 |
attr |
Sam3VideoModel.det_nms_thresh |
1 | 0 | 0 |
attr |
Sam3VideoModel.assoc_iou_thresh |
1 | 0 | 0 |
attr |
Sam3VideoModel.trk_assoc_iou_thresh |
1 | 0 | 0 |
attr |
Sam3VideoModel.new_det_thresh |
1 | 0 | 0 |
attr |
Sam3VideoModel.recondition_on_trk_masks |
1 | 0 | 0 |
attr |
Sam3VideoModel.hotstart_delay |
1 | 0 | 0 |
attr |
Sam3VideoModel.hotstart_unmatch_thresh |
1 | 0 | 0 |
attr |
Sam3VideoModel.hotstart_dup_thresh |
1 | 0 | 0 |
attr |
Sam3VideoModel.suppress_unmatched_only_within_hotstart |
1 | 0 | 0 |
attr |
Sam3VideoModel.init_trk_keep_alive |
1 | 0 | 0 |
attr |
Sam3VideoModel.max_trk_keep_alive |
1 | 0 | 0 |
attr |
Sam3VideoModel.min_trk_keep_alive |
1 | 0 | 0 |
attr |
Sam3VideoModel.suppress_overlapping_based_on_recent_occlusion_threshold |
1 | 0 | 0 |
attr |
Sam3VideoModel.decrease_trk_keep_alive_for_empty_masklets |
1 | 0 | 0 |
attr |
Sam3VideoModel.fill_hole_area |
1 | 0 | 0 |
attr |
Sam3VideoModel.max_num_objects |
1 | 0 | 0 |
attr |
Sam3VideoModel.recondition_every_nth_frame |
1 | 0 | 0 |
attr |
Sam3VideoModel.high_conf_thresh |
1 | 0 | 0 |
attr |
Sam3VideoModel.high_iou_thresh |
1 | 0 | 0 |
attr |
Sam3VideoModel.tracker_neck |
1 | 0 | 0 |
meth |
Sam3VideoInferenceSession.init |
9 | 8 | 0 |
meth |
Sam3VideoInferenceSession.add_mask_inputs |
4 | 3 | 0 |
meth |
Sam3VideoInferenceSession.remove_mask_inputs |
3 | 2 | 0 |
meth |
Sam3VideoInferenceSession.remove_object |
3 | 2 | 0 |
meth |
Sam3VideoInferenceSession.store_output |
6 | 5 | 0 |
meth |
Sam3VideoInferenceSession.get_output |
5 | 4 | 0 |
meth |
Sam3VideoInferenceSession.reset_tracking_data |
1 | 0 | 0 |
meth |
Sam3VideoInferenceSession.reset_inference_session |
1 | 0 | 0 |
meth |
Sam3VideoInferenceSession.reset_state |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.processed_frames |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.video_height |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.video_width |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.inference_device |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.inference_state_device |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.video_storage_device |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.dtype |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.max_vision_features_cache_size |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.cache |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.obj_ids |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.mask_inputs_per_obj |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.point_inputs_per_obj |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.output_dict_per_obj |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.frames_tracked_per_obj |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.prompts |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.prompt_input_ids |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.prompt_embeddings |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.prompt_attention_masks |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.obj_id_to_prompt_id |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.obj_id_to_score |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.obj_id_to_tracker_score_frame_wise |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.obj_id_to_last_occluded |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.max_obj_id |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.obj_first_frame_idx |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.unmatched_frame_inds |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.overlap_pair_to_frame_inds |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.trk_keep_alive |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.removed_obj_ids |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.suppressed_obj_ids |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.hotstart_removed_obj_ids |
1 | 0 | 0 |
attr |
Sam3VideoInferenceSession.output_buffer |
1 | 0 | 0 |
transformers.models.sam3_video.processing_sam3_video (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Sam3VideoProcessor.init |
6 | 1 | 0 |
meth |
Sam3VideoProcessor.call |
6 | 5 | 0 |
meth |
Sam3VideoProcessor.add_text_prompt |
3 | 2 | 0 |
meth |
Sam3VideoProcessor.init_video_session |
8 | 7 | 0 |
meth |
Sam3VideoProcessor._apply_non_overlapping_constraints |
2 | 0 | 0 |
meth |
Sam3VideoProcessor._apply_object_wise_non_overlapping_constraints |
5 | 0 | 0 |
meth |
Sam3VideoProcessor._apply_object_wise_non_overlapping_constraints_impl |
4 | 0 | 0 |
meth |
Sam3VideoProcessor.postprocess_outputs |
4 | 1 | 0 |
attr |
Sam3VideoProcessor.target_size |
1 | 0 | 0 |
transformers.models.sam_hq.configuration_sam_hq (94 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SamHQPromptEncoderConfig.init |
9 | 0 | 0 |
attr |
SamHQPromptEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
SamHQPromptEncoderConfig.image_size |
1 | 0 | 0 |
attr |
SamHQPromptEncoderConfig.patch_size |
1 | 0 | 0 |
attr |
SamHQPromptEncoderConfig.image_embedding_size |
1 | 0 | 0 |
attr |
SamHQPromptEncoderConfig.mask_input_channels |
1 | 0 | 0 |
attr |
SamHQPromptEncoderConfig.num_point_embeddings |
1 | 0 | 0 |
attr |
SamHQPromptEncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
SamHQPromptEncoderConfig.layer_norm_eps |
1 | 0 | 0 |
meth |
SamHQVisionConfig.init |
21 | 0 | 0 |
attr |
SamHQVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
SamHQVisionConfig.output_channels |
1 | 0 | 0 |
attr |
SamHQVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
SamHQVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
SamHQVisionConfig.num_channels |
1 | 0 | 0 |
attr |
SamHQVisionConfig.image_size |
1 | 0 | 0 |
attr |
SamHQVisionConfig.patch_size |
1 | 0 | 0 |
attr |
SamHQVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
SamHQVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
SamHQVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
SamHQVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
SamHQVisionConfig.qkv_bias |
1 | 0 | 0 |
attr |
SamHQVisionConfig.mlp_ratio |
1 | 0 | 0 |
attr |
SamHQVisionConfig.use_abs_pos |
1 | 0 | 0 |
attr |
SamHQVisionConfig.use_rel_pos |
1 | 0 | 0 |
attr |
SamHQVisionConfig.window_size |
1 | 0 | 0 |
attr |
SamHQVisionConfig.global_attn_indexes |
1 | 0 | 0 |
attr |
SamHQVisionConfig.num_pos_feats |
1 | 0 | 0 |
attr |
SamHQVisionConfig.mlp_dim |
1 | 0 | 0 |
attr |
SamHQVisionConfig.scale |
1 | 0 | 0 |
meth |
SamHQMaskDecoderConfig.init |
13 | 0 | 0 |
attr |
SamHQMaskDecoderConfig.hidden_size |
1 | 0 | 0 |
attr |
SamHQMaskDecoderConfig.hidden_act |
1 | 0 | 0 |
attr |
SamHQMaskDecoderConfig.mlp_dim |
1 | 0 | 0 |
attr |
SamHQMaskDecoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
SamHQMaskDecoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
SamHQMaskDecoderConfig.attention_downsample_rate |
1 | 0 | 0 |
attr |
SamHQMaskDecoderConfig.num_multimask_outputs |
1 | 0 | 0 |
attr |
SamHQMaskDecoderConfig.iou_head_depth |
1 | 0 | 0 |
attr |
SamHQMaskDecoderConfig.iou_head_hidden_dim |
1 | 0 | 0 |
attr |
SamHQMaskDecoderConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
SamHQMaskDecoderConfig.vit_dim |
1 | 0 | 0 |
meth |
SamHQConfig.init |
7 | 0 | 0 |
attr |
SamHQConfig.vision_config |
1 | 0 | 0 |
attr |
SamHQConfig.prompt_encoder_config |
1 | 0 | 0 |
attr |
SamHQConfig.mask_decoder_config |
1 | 0 | 0 |
attr |
SamHQConfig.initializer_range |
1 | 0 | 0 |
attr |
SamHQConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.sam_hq.modeling_sam_hq (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SamHQModel.init |
2 | 0 | 0 |
meth |
SamHQModel.get_input_embeddings |
1 | 0 | 0 |
meth |
SamHQModel.get_image_wide_positional_embeddings |
1 | 0 | 0 |
meth |
SamHQModel.get_image_embeddings |
2 | 0 | 0 |
meth |
SamHQModel.get_prompt_embeddings |
5 | 4 | 0 |
attr |
SamHQModel._can_record_outputs |
1 | 0 | 0 |
attr |
SamHQModel.shared_image_embedding |
1 | 0 | 0 |
attr |
SamHQModel.vision_encoder |
1 | 0 | 0 |
attr |
SamHQModel.prompt_encoder |
1 | 0 | 0 |
attr |
SamHQModel.mask_decoder |
1 | 0 | 0 |
meth |
SamHQPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
SamHQVisionModel.init |
2 | 1 | 0 |
attr |
SamHQVisionModel.vision_encoder |
1 | 0 | 0 |
transformers.models.sam_hq.modular_sam_hq (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SamHQMaskDecoderConfig.init |
3 | 0 | 0 |
attr |
SamHQMaskDecoderConfig.vit_dim |
1 | 0 | 0 |
meth |
SamHQModel.init |
2 | 0 | 0 |
meth |
SamHQModel.get_image_embeddings |
2 | 0 | 0 |
attr |
SamHQModel.vision_encoder |
1 | 0 | 0 |
attr |
SamHQModel.mask_decoder |
1 | 0 | 0 |
transformers.models.sam_hq.processing_sam_hq (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SamHQProcessor.init |
2 | 0 | 0 |
meth |
SamHQProcessor._normalize_and_convert |
8 | 0 | 0 |
meth |
SamHQProcessor._pad_points_and_labels |
4 | 0 | 0 |
meth |
SamHQProcessor._normalize_coordinates |
5 | 3 | 0 |
meth |
SamHQProcessor._preprocess_input |
5 | 0 | 0 |
meth |
SamHQProcessor._check_and_preprocess_points |
4 | 0 | 0 |
meth |
SamHQProcessor.post_process_masks |
3 | 0 | 0 |
meth |
SamHQProcessor._to_tensor |
4 | 0 | 0 |
meth |
SamHQProcessor._normalize_batch_coordinates |
4 | 0 | 0 |
prop |
SamHQProcessor.model_input_names |
1 | 0 | 0 |
attr |
SamHQProcessor.target_size |
1 | 0 | 0 |
transformers.models.seamless_m4t.configuration_seamless_m4t (149 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SeamlessM4TConfig.init |
75 | 0 | 0 |
attr |
SeamlessM4TConfig.vocab_size |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.t2u_vocab_size |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.hidden_size |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.initializer_range |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.use_cache |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.max_new_tokens |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.activation_function |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.dropout |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.attention_dropout |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.activation_dropout |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.scale_embedding |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.num_attention_heads |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.encoder_layers |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.decoder_layers |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.speech_encoder_layers |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.speech_encoder_hidden_act |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.speech_encoder_dropout |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.speech_encoder_attention_heads |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.speech_encoder_layerdrop |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.speech_encoder_intermediate_size |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.feature_projection_input_dim |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.num_conv_pos_embeddings |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.num_conv_pos_embedding_groups |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.adaptor_kernel_size |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.adaptor_stride |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.adaptor_dropout |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.num_adapter_layers |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.position_embeddings_type |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.rotary_embedding_base |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.max_source_positions |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.conv_depthwise_kernel_size |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.add_adapter |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.t2u_bos_token_id |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.t2u_pad_token_id |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.t2u_eos_token_id |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.t2u_decoder_start_token_id |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.t2u_max_new_tokens |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.t2u_encoder_layers |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.t2u_encoder_ffn_dim |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.t2u_encoder_attention_heads |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.t2u_decoder_layers |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.t2u_decoder_ffn_dim |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.t2u_decoder_attention_heads |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.t2u_max_position_embeddings |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.sampling_rate |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.upsample_initial_channel |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.upsample_rates |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.upsample_kernel_sizes |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.resblock_kernel_sizes |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.resblock_dilation_sizes |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.leaky_relu_slope |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.unit_hifi_gan_vocab_size |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.unit_embed_dim |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.lang_embed_dim |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.spkr_embed_dim |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.vocoder_num_langs |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.vocoder_num_spkrs |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.variance_predictor_kernel_size |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.var_pred_dropout |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.vocoder_offset |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.pad_token_id |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.bos_token_id |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.eos_token_id |
1 | 0 | 0 |
attr |
SeamlessM4TConfig.decoder_start_token_id |
1 | 0 | 0 |
transformers.models.seamless_m4t.feature_extraction_seamless_m4t (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SeamlessM4TFeatureExtractor.init |
7 | 0 | 0 |
meth |
SeamlessM4TFeatureExtractor.call |
11 | 10 | 0 |
attr |
SeamlessM4TFeatureExtractor.num_mel_bins |
1 | 0 | 0 |
attr |
SeamlessM4TFeatureExtractor.return_attention_mask |
1 | 0 | 0 |
attr |
SeamlessM4TFeatureExtractor.stride |
1 | 0 | 0 |
attr |
SeamlessM4TFeatureExtractor.mel_filters |
1 | 0 | 0 |
attr |
SeamlessM4TFeatureExtractor.window |
1 | 0 | 0 |
transformers.models.seamless_m4t.modeling_seamless_m4t (129 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SeamlessM4TForSpeechToSpeech.init |
2 | 0 | 0 |
meth |
SeamlessM4TForSpeechToSpeech.get_encoder |
1 | 0 | 0 |
meth |
SeamlessM4TForSpeechToSpeech.get_decoder |
1 | 0 | 0 |
meth |
SeamlessM4TForSpeechToSpeech.get_input_embeddings |
1 | 0 | 0 |
meth |
SeamlessM4TForSpeechToSpeech.set_input_embeddings |
2 | 0 | 0 |
meth |
SeamlessM4TForSpeechToSpeech.forward |
15 | 14 | 0 |
meth |
SeamlessM4TForSpeechToSpeech.generate |
6 | 5 | 0 |
attr |
SeamlessM4TForSpeechToSpeech.shared |
1 | 0 | 0 |
attr |
SeamlessM4TForSpeechToSpeech.speech_encoder |
1 | 0 | 0 |
attr |
SeamlessM4TForSpeechToSpeech.text_decoder |
1 | 0 | 0 |
attr |
SeamlessM4TForSpeechToSpeech.lm_head |
1 | 0 | 0 |
attr |
SeamlessM4TForSpeechToSpeech.t2u_model |
1 | 0 | 0 |
attr |
SeamlessM4TForSpeechToSpeech.vocoder |
1 | 0 | 0 |
meth |
SeamlessM4TTextToUnitForConditionalGeneration.init |
3 | 2 | 0 |
meth |
SeamlessM4TTextToUnitForConditionalGeneration.get_encoder |
1 | 0 | 0 |
meth |
SeamlessM4TTextToUnitForConditionalGeneration.get_decoder |
1 | 0 | 0 |
meth |
SeamlessM4TTextToUnitForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
SeamlessM4TTextToUnitForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
SeamlessM4TTextToUnitForConditionalGeneration.forward |
16 | 15 | 0 |
meth |
SeamlessM4TTextToUnitForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
SeamlessM4TTextToUnitForConditionalGeneration.model |
1 | 0 | 0 |
attr |
SeamlessM4TTextToUnitForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
SeamlessM4TForTextToText.init |
2 | 1 | 0 |
meth |
SeamlessM4TForTextToText.get_encoder |
1 | 0 | 0 |
meth |
SeamlessM4TForTextToText.get_decoder |
1 | 0 | 0 |
meth |
SeamlessM4TForTextToText.get_input_embeddings |
1 | 0 | 0 |
meth |
SeamlessM4TForTextToText.set_input_embeddings |
2 | 0 | 0 |
meth |
SeamlessM4TForTextToText.forward |
15 | 14 | 0 |
meth |
SeamlessM4TForTextToText.generate |
9 | 0 | 0 |
attr |
SeamlessM4TForTextToText.shared |
1 | 0 | 0 |
attr |
SeamlessM4TForTextToText.text_encoder |
1 | 0 | 0 |
attr |
SeamlessM4TForTextToText.text_decoder |
1 | 0 | 0 |
attr |
SeamlessM4TForTextToText.lm_head |
1 | 0 | 0 |
meth |
SeamlessM4TCodeHifiGan.init |
2 | 0 | 0 |
meth |
SeamlessM4TCodeHifiGan._get_dur_output_lengths |
3 | 0 | 0 |
meth |
SeamlessM4TCodeHifiGan._get_output_hifigan_lengths |
2 | 1 | 0 |
meth |
SeamlessM4TCodeHifiGan.forward |
5 | 4 | 0 |
meth |
SeamlessM4TCodeHifiGan.apply_weight_norm |
1 | 0 | 0 |
meth |
SeamlessM4TCodeHifiGan.remove_weight_norm |
1 | 0 | 0 |
attr |
SeamlessM4TCodeHifiGan.pad_token_id |
1 | 0 | 0 |
attr |
SeamlessM4TCodeHifiGan.dur_predictor |
1 | 0 | 0 |
attr |
SeamlessM4TCodeHifiGan.unit_embedding |
1 | 0 | 0 |
attr |
SeamlessM4TCodeHifiGan.speaker_embedding |
1 | 0 | 0 |
attr |
SeamlessM4TCodeHifiGan.language_embedding |
1 | 0 | 0 |
attr |
SeamlessM4TCodeHifiGan.hifi_gan |
1 | 0 | 0 |
meth |
SeamlessM4TPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
SeamlessM4TPreTrainedModel._compute_sub_sample_lengths_from_attention_mask |
2 | 0 | 0 |
meth |
SeamlessM4TTextToUnitModel.init |
3 | 2 | 0 |
meth |
SeamlessM4TTextToUnitModel.forward |
15 | 14 | 0 |
attr |
SeamlessM4TTextToUnitModel.encoder |
1 | 0 | 0 |
attr |
SeamlessM4TTextToUnitModel.decoder |
1 | 0 | 0 |
meth |
SeamlessM4TModel.init |
3 | 0 | 0 |
meth |
SeamlessM4TModel.set_modality |
2 | 0 | 0 |
meth |
SeamlessM4TModel.get_encoder |
1 | 0 | 0 |
meth |
SeamlessM4TModel.get_input_embeddings |
1 | 0 | 0 |
meth |
SeamlessM4TModel.set_input_embeddings |
2 | 0 | 0 |
meth |
SeamlessM4TModel.forward |
16 | 15 | 0 |
meth |
SeamlessM4TModel.generate |
8 | 7 | 0 |
attr |
SeamlessM4TModel.shared |
1 | 0 | 0 |
attr |
SeamlessM4TModel.text_encoder |
1 | 0 | 0 |
attr |
SeamlessM4TModel.speech_encoder |
1 | 0 | 0 |
attr |
SeamlessM4TModel.text_decoder |
1 | 0 | 0 |
attr |
SeamlessM4TModel.lm_head |
1 | 0 | 0 |
attr |
SeamlessM4TModel.current_modality |
1 | 0 | 0 |
attr |
SeamlessM4TModel.t2u_model |
1 | 0 | 0 |
attr |
SeamlessM4TModel.vocoder |
1 | 0 | 0 |
attr |
SeamlessM4TModel.main_input_name |
1 | 0 | 0 |
meth |
SeamlessM4TForSpeechToText.init |
2 | 1 | 0 |
meth |
SeamlessM4TForSpeechToText.get_encoder |
1 | 0 | 0 |
meth |
SeamlessM4TForSpeechToText.get_decoder |
1 | 0 | 0 |
meth |
SeamlessM4TForSpeechToText.get_input_embeddings |
1 | 0 | 0 |
meth |
SeamlessM4TForSpeechToText.set_input_embeddings |
2 | 0 | 0 |
meth |
SeamlessM4TForSpeechToText.forward |
15 | 14 | 0 |
meth |
SeamlessM4TForSpeechToText.generate |
9 | 0 | 0 |
attr |
SeamlessM4TForSpeechToText.shared |
1 | 0 | 0 |
attr |
SeamlessM4TForSpeechToText.speech_encoder |
1 | 0 | 0 |
attr |
SeamlessM4TForSpeechToText.text_decoder |
1 | 0 | 0 |
attr |
SeamlessM4TForSpeechToText.lm_head |
1 | 0 | 0 |
meth |
SeamlessM4TForTextToSpeech.init |
2 | 1 | 0 |
meth |
SeamlessM4TForTextToSpeech.get_encoder |
1 | 0 | 0 |
meth |
SeamlessM4TForTextToSpeech.get_decoder |
1 | 0 | 0 |
meth |
SeamlessM4TForTextToSpeech.get_input_embeddings |
1 | 0 | 0 |
meth |
SeamlessM4TForTextToSpeech.set_input_embeddings |
2 | 0 | 0 |
meth |
SeamlessM4TForTextToSpeech.forward |
16 | 15 | 0 |
meth |
SeamlessM4TForTextToSpeech.generate |
6 | 5 | 0 |
attr |
SeamlessM4TForTextToSpeech.shared |
1 | 0 | 0 |
attr |
SeamlessM4TForTextToSpeech.text_encoder |
1 | 0 | 0 |
attr |
SeamlessM4TForTextToSpeech.text_decoder |
1 | 0 | 0 |
attr |
SeamlessM4TForTextToSpeech.lm_head |
1 | 0 | 0 |
attr |
SeamlessM4TForTextToSpeech.t2u_model |
1 | 0 | 0 |
attr |
SeamlessM4TForTextToSpeech.vocoder |
1 | 0 | 0 |
meth |
SeamlessM4THifiGan.init |
2 | 1 | 0 |
attr |
SeamlessM4THifiGan.leaky_relu_slope |
1 | 0 | 0 |
attr |
SeamlessM4THifiGan.num_kernels |
1 | 0 | 0 |
attr |
SeamlessM4THifiGan.num_upsamples |
1 | 0 | 0 |
attr |
SeamlessM4THifiGan.conv_pre |
1 | 0 | 0 |
attr |
SeamlessM4THifiGan.upsampler |
1 | 0 | 0 |
attr |
SeamlessM4THifiGan.resblocks |
1 | 0 | 0 |
attr |
SeamlessM4THifiGan.conv_post |
1 | 0 | 0 |
transformers.models.seamless_m4t.processing_seamless_m4t (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SeamlessM4TProcessor.init |
3 | 0 | 0 |
meth |
SeamlessM4TProcessor.call |
4 | 3 | 0 |
transformers.models.seamless_m4t.tokenization_seamless_m4t (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SeamlessM4TTokenizer.init |
15 | 2 | 0 |
meth |
SeamlessM4TTokenizer.convert_from_spm_model |
3 | 0 | 0 |
meth |
SeamlessM4TTokenizer._build_translation_inputs |
6 | 3 | 0 |
meth |
SeamlessM4TTokenizer.prepare_seq2seq_batch |
11 | 10 | 0 |
meth |
SeamlessM4TTokenizer._switch_to_input_mode |
1 | 0 | 0 |
meth |
SeamlessM4TTokenizer._switch_to_target_mode |
1 | 0 | 0 |
meth |
SeamlessM4TTokenizer.set_src_lang_special_tokens |
2 | 1 | 0 |
meth |
SeamlessM4TTokenizer.call |
10 | 8 | 0 |
attr |
SeamlessM4TTokenizer.fairseq_offset |
1 | 0 | 0 |
attr |
SeamlessM4TTokenizer.fairseq_tokens_to_ids |
1 | 0 | 0 |
attr |
SeamlessM4TTokenizer.fairseq_ids_to_tokens |
1 | 0 | 0 |
transformers.models.seamless_m4t_v2.configuration_seamless_m4t_v2 (155 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SeamlessM4Tv2Config.init |
78 | 0 | 0 |
attr |
SeamlessM4Tv2Config.vocab_size |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.t2u_vocab_size |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.char_vocab_size |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.hidden_size |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.initializer_range |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.layer_norm_eps |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.max_position_embeddings |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.use_cache |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.max_new_tokens |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.encoder_layerdrop |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.decoder_layerdrop |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.activation_function |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.dropout |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.attention_dropout |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.activation_dropout |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.scale_embedding |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.num_attention_heads |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.encoder_layers |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.encoder_ffn_dim |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.encoder_attention_heads |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.decoder_layers |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.decoder_ffn_dim |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.decoder_attention_heads |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.speech_encoder_layers |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.speech_encoder_hidden_act |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.speech_encoder_dropout |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.speech_encoder_attention_heads |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.speech_encoder_layerdrop |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.speech_encoder_intermediate_size |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.feature_projection_input_dim |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.adaptor_kernel_size |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.adaptor_stride |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.adaptor_dropout |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.num_adapter_layers |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.position_embeddings_type |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.conv_depthwise_kernel_size |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.add_adapter |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.left_max_position_embeddings |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.right_max_position_embeddings |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.speech_encoder_chunk_size |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.speech_encoder_left_chunk_num |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.t2u_bos_token_id |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.t2u_pad_token_id |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.t2u_eos_token_id |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.t2u_encoder_layers |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.t2u_encoder_ffn_dim |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.t2u_encoder_attention_heads |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.t2u_decoder_layers |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.t2u_decoder_ffn_dim |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.t2u_decoder_attention_heads |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.t2u_max_position_embeddings |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.t2u_variance_predictor_embed_dim |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.t2u_variance_predictor_hidden_dim |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.t2u_variance_predictor_kernel_size |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.t2u_variance_pred_dropout |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.sampling_rate |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.upsample_initial_channel |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.upsample_rates |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.upsample_kernel_sizes |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.resblock_kernel_sizes |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.resblock_dilation_sizes |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.leaky_relu_slope |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.unit_hifi_gan_vocab_size |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.unit_embed_dim |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.lang_embed_dim |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.spkr_embed_dim |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.vocoder_num_langs |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.vocoder_num_spkrs |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.variance_predictor_kernel_size |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.var_pred_dropout |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.vocoder_offset |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.pad_token_id |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.bos_token_id |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.eos_token_id |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Config.decoder_start_token_id |
1 | 0 | 0 |
transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2 (110 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SeamlessM4Tv2ForSpeechToSpeech.init |
2 | 0 | 0 |
meth |
SeamlessM4Tv2ForSpeechToSpeech.get_encoder |
1 | 0 | 0 |
meth |
SeamlessM4Tv2ForSpeechToSpeech.get_decoder |
1 | 0 | 0 |
meth |
SeamlessM4Tv2ForSpeechToSpeech.get_input_embeddings |
1 | 0 | 0 |
meth |
SeamlessM4Tv2ForSpeechToSpeech.set_input_embeddings |
2 | 0 | 0 |
meth |
SeamlessM4Tv2ForSpeechToSpeech.forward |
15 | 14 | 0 |
meth |
SeamlessM4Tv2ForSpeechToSpeech.generate |
6 | 5 | 0 |
attr |
SeamlessM4Tv2ForSpeechToSpeech.shared |
1 | 0 | 0 |
attr |
SeamlessM4Tv2ForSpeechToSpeech.speech_encoder |
1 | 0 | 0 |
attr |
SeamlessM4Tv2ForSpeechToSpeech.text_decoder |
1 | 0 | 0 |
attr |
SeamlessM4Tv2ForSpeechToSpeech.lm_head |
1 | 0 | 0 |
attr |
SeamlessM4Tv2ForSpeechToSpeech.t2u_model |
1 | 0 | 0 |
attr |
SeamlessM4Tv2ForSpeechToSpeech.vocoder |
1 | 0 | 0 |
meth |
SeamlessM4Tv2PreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
SeamlessM4Tv2PreTrainedModel._compute_sub_sample_lengths_from_attention_mask |
2 | 0 | 0 |
meth |
SeamlessM4Tv2PreTrainedModel._indices_to_subwords |
2 | 0 | 0 |
meth |
SeamlessM4Tv2PreTrainedModel._count_character_length_in_subword |
7 | 0 | 0 |
meth |
SeamlessM4Tv2PreTrainedModel._get_char_input_ids |
6 | 0 | 0 |
meth |
SeamlessM4Tv2PreTrainedModel._hard_upsample |
3 | 0 | 0 |
meth |
SeamlessM4Tv2ForTextToSpeech.init |
2 | 1 | 0 |
meth |
SeamlessM4Tv2ForTextToSpeech.get_encoder |
1 | 0 | 0 |
meth |
SeamlessM4Tv2ForTextToSpeech.get_decoder |
1 | 0 | 0 |
meth |
SeamlessM4Tv2ForTextToSpeech.get_input_embeddings |
1 | 0 | 0 |
meth |
SeamlessM4Tv2ForTextToSpeech.set_input_embeddings |
2 | 0 | 0 |
meth |
SeamlessM4Tv2ForTextToSpeech.forward |
16 | 15 | 0 |
meth |
SeamlessM4Tv2ForTextToSpeech.generate |
6 | 5 | 0 |
attr |
SeamlessM4Tv2ForTextToSpeech.shared |
1 | 0 | 0 |
attr |
SeamlessM4Tv2ForTextToSpeech.text_encoder |
1 | 0 | 0 |
attr |
SeamlessM4Tv2ForTextToSpeech.text_decoder |
1 | 0 | 0 |
attr |
SeamlessM4Tv2ForTextToSpeech.lm_head |
1 | 0 | 0 |
attr |
SeamlessM4Tv2ForTextToSpeech.t2u_model |
1 | 0 | 0 |
attr |
SeamlessM4Tv2ForTextToSpeech.vocoder |
1 | 0 | 0 |
meth |
SeamlessM4Tv2Model.init |
3 | 0 | 0 |
meth |
SeamlessM4Tv2Model.set_modality |
2 | 0 | 0 |
meth |
SeamlessM4Tv2Model.get_encoder |
1 | 0 | 0 |
meth |
SeamlessM4Tv2Model.get_input_embeddings |
1 | 0 | 0 |
meth |
SeamlessM4Tv2Model.set_input_embeddings |
2 | 0 | 0 |
meth |
SeamlessM4Tv2Model.forward |
16 | 15 | 0 |
meth |
SeamlessM4Tv2Model.generate |
8 | 7 | 0 |
attr |
SeamlessM4Tv2Model.shared |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Model.text_encoder |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Model.speech_encoder |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Model.text_decoder |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Model.lm_head |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Model.current_modality |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Model.t2u_model |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Model.vocoder |
1 | 0 | 0 |
attr |
SeamlessM4Tv2Model.main_input_name |
1 | 0 | 0 |
meth |
SeamlessM4Tv2ForSpeechToText.init |
2 | 1 | 0 |
meth |
SeamlessM4Tv2ForSpeechToText.get_encoder |
1 | 0 | 0 |
meth |
SeamlessM4Tv2ForSpeechToText.get_decoder |
1 | 0 | 0 |
meth |
SeamlessM4Tv2ForSpeechToText.get_input_embeddings |
1 | 0 | 0 |
meth |
SeamlessM4Tv2ForSpeechToText.set_input_embeddings |
2 | 0 | 0 |
meth |
SeamlessM4Tv2ForSpeechToText.forward |
15 | 14 | 0 |
meth |
SeamlessM4Tv2ForSpeechToText.generate |
9 | 0 | 0 |
attr |
SeamlessM4Tv2ForSpeechToText.shared |
1 | 0 | 0 |
attr |
SeamlessM4Tv2ForSpeechToText.speech_encoder |
1 | 0 | 0 |
attr |
SeamlessM4Tv2ForSpeechToText.text_decoder |
1 | 0 | 0 |
attr |
SeamlessM4Tv2ForSpeechToText.lm_head |
1 | 0 | 0 |
meth |
SeamlessM4Tv2ForTextToText.init |
2 | 1 | 0 |
meth |
SeamlessM4Tv2ForTextToText.get_encoder |
1 | 0 | 0 |
meth |
SeamlessM4Tv2ForTextToText.get_decoder |
1 | 0 | 0 |
meth |
SeamlessM4Tv2ForTextToText.get_input_embeddings |
1 | 0 | 0 |
meth |
SeamlessM4Tv2ForTextToText.set_input_embeddings |
2 | 0 | 0 |
meth |
SeamlessM4Tv2ForTextToText.forward |
15 | 14 | 0 |
meth |
SeamlessM4Tv2ForTextToText.generate |
9 | 0 | 0 |
attr |
SeamlessM4Tv2ForTextToText.shared |
1 | 0 | 0 |
attr |
SeamlessM4Tv2ForTextToText.text_encoder |
1 | 0 | 0 |
attr |
SeamlessM4Tv2ForTextToText.text_decoder |
1 | 0 | 0 |
attr |
SeamlessM4Tv2ForTextToText.lm_head |
1 | 0 | 0 |
transformers.models.seed_oss.configuration_seed_oss (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SeedOssConfig.init |
25 | 23 | 0 |
attr |
SeedOssConfig.vocab_size |
1 | 0 | 0 |
attr |
SeedOssConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
SeedOssConfig.hidden_size |
1 | 0 | 0 |
attr |
SeedOssConfig.intermediate_size |
1 | 0 | 0 |
attr |
SeedOssConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
SeedOssConfig.num_attention_heads |
1 | 0 | 0 |
attr |
SeedOssConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
SeedOssConfig.hidden_act |
1 | 0 | 0 |
attr |
SeedOssConfig.initializer_range |
1 | 0 | 0 |
attr |
SeedOssConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
SeedOssConfig.pretraining_tp |
1 | 0 | 0 |
attr |
SeedOssConfig.use_cache |
1 | 0 | 0 |
attr |
SeedOssConfig.attention_bias |
1 | 0 | 0 |
attr |
SeedOssConfig.attention_out_bias |
1 | 0 | 0 |
attr |
SeedOssConfig.attention_dropout |
1 | 0 | 0 |
attr |
SeedOssConfig.residual_dropout |
1 | 0 | 0 |
attr |
SeedOssConfig.mlp_bias |
1 | 0 | 0 |
attr |
SeedOssConfig.head_dim |
1 | 0 | 0 |
attr |
SeedOssConfig.rope_parameters |
1 | 0 | 0 |
attr |
SeedOssConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
SeedOssConfig.pad_token_id |
1 | 0 | 0 |
attr |
SeedOssConfig.bos_token_id |
1 | 0 | 0 |
attr |
SeedOssConfig.eos_token_id |
1 | 0 | 0 |
transformers.models.seed_oss.modeling_seed_oss (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SeedOssForCausalLM.init |
2 | 0 | 0 |
attr |
SeedOssForCausalLM.model |
1 | 0 | 0 |
attr |
SeedOssForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
SeedOssForCausalLM.lm_head |
1 | 0 | 0 |
meth |
SeedOssModel.init |
2 | 1 | 0 |
attr |
SeedOssModel.padding_idx |
1 | 0 | 0 |
attr |
SeedOssModel.vocab_size |
1 | 0 | 0 |
attr |
SeedOssModel.embed_tokens |
1 | 0 | 0 |
attr |
SeedOssModel.layers |
1 | 0 | 0 |
attr |
SeedOssModel.norm |
1 | 0 | 0 |
attr |
SeedOssModel.rotary_emb |
1 | 0 | 0 |
attr |
SeedOssModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.segformer.configuration_segformer (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SegformerConfig.init |
20 | 0 | 0 |
attr |
SegformerConfig.num_channels |
1 | 0 | 0 |
attr |
SegformerConfig.num_encoder_blocks |
1 | 0 | 0 |
attr |
SegformerConfig.depths |
1 | 0 | 0 |
attr |
SegformerConfig.sr_ratios |
1 | 0 | 0 |
attr |
SegformerConfig.hidden_sizes |
1 | 0 | 0 |
attr |
SegformerConfig.patch_sizes |
1 | 0 | 0 |
attr |
SegformerConfig.strides |
1 | 0 | 0 |
attr |
SegformerConfig.mlp_ratios |
1 | 0 | 0 |
attr |
SegformerConfig.num_attention_heads |
1 | 0 | 0 |
attr |
SegformerConfig.hidden_act |
1 | 0 | 0 |
attr |
SegformerConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
SegformerConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
SegformerConfig.classifier_dropout_prob |
1 | 0 | 0 |
attr |
SegformerConfig.initializer_range |
1 | 0 | 0 |
attr |
SegformerConfig.drop_path_rate |
1 | 0 | 0 |
attr |
SegformerConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
SegformerConfig.decoder_hidden_size |
1 | 0 | 0 |
attr |
SegformerConfig.reshape_last_stage |
1 | 0 | 0 |
attr |
SegformerConfig.semantic_loss_ignore_index |
1 | 0 | 0 |
transformers.models.segformer.image_processing_segformer (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SegformerImageProcessor.init |
11 | 10 | 0 |
meth |
SegformerImageProcessor.resize |
7 | 6 | 0 |
meth |
SegformerImageProcessor._preprocess |
12 | 11 | 0 |
meth |
SegformerImageProcessor.call |
4 | 0 | 0 |
meth |
SegformerImageProcessor.post_process_semantic_segmentation |
3 | 1 | 0 |
attr |
SegformerImageProcessor.do_resize |
1 | 0 | 0 |
attr |
SegformerImageProcessor.size |
1 | 0 | 0 |
attr |
SegformerImageProcessor.resample |
1 | 0 | 0 |
attr |
SegformerImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
SegformerImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
SegformerImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
SegformerImageProcessor.image_mean |
1 | 0 | 0 |
attr |
SegformerImageProcessor.image_std |
1 | 0 | 0 |
attr |
SegformerImageProcessor.do_reduce_labels |
1 | 0 | 0 |
transformers.models.segformer.image_processing_segformer_fast (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SegformerImageProcessorFast.init |
2 | 1 | 0 |
meth |
SegformerImageProcessorFast.reduce_label |
2 | 1 | 0 |
meth |
SegformerImageProcessorFast._preprocess |
14 | 13 | 0 |
meth |
SegformerImageProcessorFast.post_process_semantic_segmentation |
3 | 1 | 0 |
transformers.models.segformer.modeling_segformer (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SegformerDecodeHead.init |
2 | 0 | 0 |
meth |
SegformerDecodeHead.forward |
3 | 2 | 0 |
attr |
SegformerDecodeHead.linear_c |
1 | 0 | 0 |
attr |
SegformerDecodeHead.linear_fuse |
1 | 0 | 0 |
attr |
SegformerDecodeHead.batch_norm |
1 | 0 | 0 |
attr |
SegformerDecodeHead.activation |
1 | 0 | 0 |
attr |
SegformerDecodeHead.dropout |
1 | 0 | 0 |
attr |
SegformerDecodeHead.classifier |
1 | 0 | 0 |
attr |
SegformerDecodeHead.config |
1 | 0 | 0 |
meth |
SegformerModel.init |
2 | 0 | 0 |
meth |
SegformerModel.forward |
6 | 5 | 0 |
attr |
SegformerModel.encoder |
1 | 0 | 0 |
meth |
SegformerLayer.init |
7 | 0 | 0 |
meth |
SegformerLayer.forward |
5 | 0 | 0 |
attr |
SegformerLayer.layer_norm_1 |
1 | 0 | 0 |
attr |
SegformerLayer.attention |
1 | 0 | 0 |
attr |
SegformerLayer.drop_path |
1 | 0 | 0 |
attr |
SegformerLayer.layer_norm_2 |
1 | 0 | 0 |
attr |
SegformerLayer.mlp |
1 | 0 | 0 |
meth |
SegformerForImageClassification.init |
2 | 0 | 0 |
meth |
SegformerForImageClassification.forward |
7 | 6 | 0 |
attr |
SegformerForImageClassification.num_labels |
1 | 0 | 0 |
attr |
SegformerForImageClassification.segformer |
1 | 0 | 0 |
attr |
SegformerForImageClassification.classifier |
1 | 0 | 0 |
meth |
SegformerForSemanticSegmentation.init |
2 | 0 | 0 |
meth |
SegformerForSemanticSegmentation.forward |
7 | 6 | 0 |
attr |
SegformerForSemanticSegmentation.segformer |
1 | 0 | 0 |
attr |
SegformerForSemanticSegmentation.decode_head |
1 | 0 | 0 |
transformers.models.segformer.modular_segformer (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SegformerImageProcessorFast._preprocess |
14 | 13 | 0 |
transformers.models.seggpt.configuration_seggpt (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SegGptConfig.init |
21 | 0 | 0 |
attr |
SegGptConfig.hidden_size |
1 | 0 | 0 |
attr |
SegGptConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
SegGptConfig.num_attention_heads |
1 | 0 | 0 |
attr |
SegGptConfig.hidden_act |
1 | 0 | 0 |
attr |
SegGptConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
SegGptConfig.initializer_range |
1 | 0 | 0 |
attr |
SegGptConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
SegGptConfig.image_size |
1 | 0 | 0 |
attr |
SegGptConfig.patch_size |
1 | 0 | 0 |
attr |
SegGptConfig.num_channels |
1 | 0 | 0 |
attr |
SegGptConfig.qkv_bias |
1 | 0 | 0 |
attr |
SegGptConfig.drop_path_rate |
1 | 0 | 0 |
attr |
SegGptConfig.pretrain_image_size |
1 | 0 | 0 |
attr |
SegGptConfig.decoder_hidden_size |
1 | 0 | 0 |
attr |
SegGptConfig.use_relative_position_embeddings |
1 | 0 | 0 |
attr |
SegGptConfig.merge_index |
1 | 0 | 0 |
attr |
SegGptConfig.intermediate_hidden_state_indices |
1 | 0 | 0 |
attr |
SegGptConfig.beta |
1 | 0 | 0 |
attr |
SegGptConfig.mlp_dim |
1 | 0 | 0 |
transformers.models.seggpt.image_processing_seggpt (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SegGptImageProcessor.init |
11 | 10 | 0 |
meth |
SegGptImageProcessor.resize |
7 | 6 | 0 |
meth |
SegGptImageProcessor._preprocess_step |
15 | 13 | 0 |
meth |
SegGptImageProcessor.preprocess |
18 | 16 | 0 |
meth |
SegGptImageProcessor.post_process_semantic_segmentation |
4 | 2 | 0 |
attr |
SegGptImageProcessor.do_resize |
1 | 0 | 0 |
attr |
SegGptImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
SegGptImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
SegGptImageProcessor.size |
1 | 0 | 0 |
attr |
SegGptImageProcessor.resample |
1 | 0 | 0 |
attr |
SegGptImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
SegGptImageProcessor.image_mean |
1 | 0 | 0 |
attr |
SegGptImageProcessor.image_std |
1 | 0 | 0 |
attr |
SegGptImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.seggpt.modeling_seggpt (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SegGptForImageSegmentation.init |
2 | 1 | 0 |
meth |
SegGptForImageSegmentation.forward |
12 | 11 | 0 |
attr |
SegGptForImageSegmentation.model |
1 | 0 | 0 |
attr |
SegGptForImageSegmentation.decoder |
1 | 0 | 0 |
meth |
SegGptModel.init |
2 | 1 | 0 |
meth |
SegGptModel.forward |
12 | 11 | 0 |
attr |
SegGptModel.embeddings |
1 | 0 | 0 |
attr |
SegGptModel.encoder |
1 | 0 | 0 |
transformers.models.sew.configuration_sew (78 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SEWConfig.init |
39 | 0 | 0 |
prop |
SEWConfig.inputs_to_logits_ratio |
1 | 0 | 0 |
attr |
SEWConfig.pad_token_id |
1 | 0 | 0 |
attr |
SEWConfig.bos_token_id |
1 | 0 | 0 |
attr |
SEWConfig.eos_token_id |
1 | 0 | 0 |
attr |
SEWConfig.hidden_size |
1 | 0 | 0 |
attr |
SEWConfig.feat_extract_norm |
1 | 0 | 0 |
attr |
SEWConfig.feat_extract_activation |
1 | 0 | 0 |
attr |
SEWConfig.conv_dim |
1 | 0 | 0 |
attr |
SEWConfig.conv_stride |
1 | 0 | 0 |
attr |
SEWConfig.conv_kernel |
1 | 0 | 0 |
attr |
SEWConfig.conv_bias |
1 | 0 | 0 |
attr |
SEWConfig.num_conv_pos_embeddings |
1 | 0 | 0 |
attr |
SEWConfig.num_conv_pos_embedding_groups |
1 | 0 | 0 |
attr |
SEWConfig.num_feat_extract_layers |
1 | 0 | 0 |
attr |
SEWConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
SEWConfig.intermediate_size |
1 | 0 | 0 |
attr |
SEWConfig.squeeze_factor |
1 | 0 | 0 |
attr |
SEWConfig.hidden_act |
1 | 0 | 0 |
attr |
SEWConfig.num_attention_heads |
1 | 0 | 0 |
attr |
SEWConfig.hidden_dropout |
1 | 0 | 0 |
attr |
SEWConfig.attention_dropout |
1 | 0 | 0 |
attr |
SEWConfig.activation_dropout |
1 | 0 | 0 |
attr |
SEWConfig.feat_proj_dropout |
1 | 0 | 0 |
attr |
SEWConfig.final_dropout |
1 | 0 | 0 |
attr |
SEWConfig.layerdrop |
1 | 0 | 0 |
attr |
SEWConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
SEWConfig.initializer_range |
1 | 0 | 0 |
attr |
SEWConfig.vocab_size |
1 | 0 | 0 |
attr |
SEWConfig.apply_spec_augment |
1 | 0 | 0 |
attr |
SEWConfig.mask_time_prob |
1 | 0 | 0 |
attr |
SEWConfig.mask_time_length |
1 | 0 | 0 |
attr |
SEWConfig.mask_time_min_masks |
1 | 0 | 0 |
attr |
SEWConfig.mask_feature_prob |
1 | 0 | 0 |
attr |
SEWConfig.mask_feature_length |
1 | 0 | 0 |
attr |
SEWConfig.mask_feature_min_masks |
1 | 0 | 0 |
attr |
SEWConfig.ctc_loss_reduction |
1 | 0 | 0 |
attr |
SEWConfig.ctc_zero_infinity |
1 | 0 | 0 |
attr |
SEWConfig.use_weighted_layer_sum |
1 | 0 | 0 |
attr |
SEWConfig.classifier_proj_size |
1 | 0 | 0 |
transformers.models.sew.modeling_sew (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SEWForCTC.init |
3 | 1 | 0 |
meth |
SEWForCTC.tie_weights |
2 | 0 | 0 |
meth |
SEWForCTC.freeze_feature_encoder |
1 | 0 | 0 |
meth |
SEWForCTC.freeze_base_model |
1 | 0 | 0 |
meth |
SEWForCTC.forward |
8 | 7 | 0 |
attr |
SEWForCTC.sew |
1 | 0 | 0 |
attr |
SEWForCTC.dropout |
1 | 0 | 0 |
attr |
SEWForCTC.target_lang |
1 | 0 | 0 |
attr |
SEWForCTC.lm_head |
1 | 0 | 0 |
meth |
SEWModel.init |
2 | 1 | 0 |
meth |
SEWModel._mask_hidden_states |
4 | 3 | 0 |
meth |
SEWModel.forward |
8 | 7 | 0 |
attr |
SEWModel.feature_extractor |
1 | 0 | 0 |
attr |
SEWModel.layer_norm |
1 | 0 | 0 |
attr |
SEWModel.project_features |
1 | 0 | 0 |
attr |
SEWModel.feature_dropout |
1 | 0 | 0 |
attr |
SEWModel.encoder |
1 | 0 | 0 |
attr |
SEWModel.feature_projection |
1 | 0 | 0 |
attr |
SEWModel.masked_spec_embed |
1 | 0 | 0 |
meth |
SEWForSequenceClassification.init |
2 | 0 | 0 |
meth |
SEWForSequenceClassification.freeze_feature_encoder |
1 | 0 | 0 |
meth |
SEWForSequenceClassification.freeze_base_model |
1 | 0 | 0 |
meth |
SEWForSequenceClassification.forward |
8 | 7 | 0 |
attr |
SEWForSequenceClassification.sew |
1 | 0 | 0 |
attr |
SEWForSequenceClassification.projector |
1 | 0 | 0 |
attr |
SEWForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
SEWForSequenceClassification.layer_weights |
1 | 0 | 0 |
meth |
SEWPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
SEWPreTrainedModel._get_feat_extract_output_lengths |
2 | 1 | 0 |
meth |
SEWPreTrainedModel._get_feature_vector_attention_mask |
3 | 2 | 0 |
transformers.models.sew.modular_sew (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SEWModel.init |
2 | 1 | 0 |
meth |
SEWModel._mask_hidden_states |
4 | 3 | 0 |
meth |
SEWModel.forward |
8 | 7 | 0 |
attr |
SEWModel.feature_extractor |
1 | 0 | 0 |
attr |
SEWModel.layer_norm |
1 | 0 | 0 |
attr |
SEWModel.project_features |
1 | 0 | 0 |
attr |
SEWModel.feature_dropout |
1 | 0 | 0 |
attr |
SEWModel.encoder |
1 | 0 | 0 |
attr |
SEWModel.feature_projection |
1 | 0 | 0 |
attr |
SEWModel.masked_spec_embed |
1 | 0 | 0 |
meth |
SEWPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
SEWPreTrainedModel._get_feat_extract_output_lengths |
2 | 1 | 0 |
meth |
SEWPreTrainedModel._get_feature_vector_attention_mask |
3 | 2 | 0 |
transformers.models.sew_d.configuration_sew_d (90 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SEWDConfig.init |
45 | 0 | 0 |
meth |
SEWDConfig.to_dict |
1 | 0 | 0 |
prop |
SEWDConfig.inputs_to_logits_ratio |
1 | 0 | 0 |
attr |
SEWDConfig.pad_token_id |
1 | 0 | 0 |
attr |
SEWDConfig.bos_token_id |
1 | 0 | 0 |
attr |
SEWDConfig.eos_token_id |
1 | 0 | 0 |
attr |
SEWDConfig.hidden_size |
1 | 0 | 0 |
attr |
SEWDConfig.feat_extract_norm |
1 | 0 | 0 |
attr |
SEWDConfig.feat_extract_activation |
1 | 0 | 0 |
attr |
SEWDConfig.conv_dim |
1 | 0 | 0 |
attr |
SEWDConfig.conv_stride |
1 | 0 | 0 |
attr |
SEWDConfig.conv_kernel |
1 | 0 | 0 |
attr |
SEWDConfig.conv_bias |
1 | 0 | 0 |
attr |
SEWDConfig.num_conv_pos_embeddings |
1 | 0 | 0 |
attr |
SEWDConfig.num_conv_pos_embedding_groups |
1 | 0 | 0 |
attr |
SEWDConfig.num_feat_extract_layers |
1 | 0 | 0 |
attr |
SEWDConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
SEWDConfig.intermediate_size |
1 | 0 | 0 |
attr |
SEWDConfig.squeeze_factor |
1 | 0 | 0 |
attr |
SEWDConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
SEWDConfig.position_buckets |
1 | 0 | 0 |
attr |
SEWDConfig.share_att_key |
1 | 0 | 0 |
attr |
SEWDConfig.relative_attention |
1 | 0 | 0 |
attr |
SEWDConfig.norm_rel_ebd |
1 | 0 | 0 |
attr |
SEWDConfig.pos_att_type |
1 | 0 | 0 |
attr |
SEWDConfig.hidden_act |
1 | 0 | 0 |
attr |
SEWDConfig.num_attention_heads |
1 | 0 | 0 |
attr |
SEWDConfig.attention_dropout |
1 | 0 | 0 |
attr |
SEWDConfig.activation_dropout |
1 | 0 | 0 |
attr |
SEWDConfig.feat_proj_dropout |
1 | 0 | 0 |
attr |
SEWDConfig.final_dropout |
1 | 0 | 0 |
attr |
SEWDConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
SEWDConfig.feature_layer_norm_eps |
1 | 0 | 0 |
attr |
SEWDConfig.initializer_range |
1 | 0 | 0 |
attr |
SEWDConfig.vocab_size |
1 | 0 | 0 |
attr |
SEWDConfig.apply_spec_augment |
1 | 0 | 0 |
attr |
SEWDConfig.mask_time_prob |
1 | 0 | 0 |
attr |
SEWDConfig.mask_time_length |
1 | 0 | 0 |
attr |
SEWDConfig.mask_time_min_masks |
1 | 0 | 0 |
attr |
SEWDConfig.mask_feature_prob |
1 | 0 | 0 |
attr |
SEWDConfig.mask_feature_length |
1 | 0 | 0 |
attr |
SEWDConfig.mask_feature_min_masks |
1 | 0 | 0 |
attr |
SEWDConfig.ctc_loss_reduction |
1 | 0 | 0 |
attr |
SEWDConfig.ctc_zero_infinity |
1 | 0 | 0 |
attr |
SEWDConfig.use_weighted_layer_sum |
1 | 0 | 0 |
attr |
SEWDConfig.classifier_proj_size |
1 | 0 | 0 |
transformers.models.sew_d.modeling_sew_d (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SEWDPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
SEWDPreTrainedModel._get_feat_extract_output_lengths |
2 | 1 | 0 |
meth |
SEWDPreTrainedModel._get_feature_vector_attention_mask |
3 | 2 | 0 |
meth |
SEWDForSequenceClassification.init |
2 | 0 | 0 |
meth |
SEWDForSequenceClassification.freeze_feature_encoder |
1 | 0 | 0 |
meth |
SEWDForSequenceClassification.freeze_base_model |
1 | 0 | 0 |
meth |
SEWDForSequenceClassification.forward |
8 | 7 | 0 |
attr |
SEWDForSequenceClassification.sew_d |
1 | 0 | 0 |
attr |
SEWDForSequenceClassification.projector |
1 | 0 | 0 |
attr |
SEWDForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
SEWDForSequenceClassification.layer_weights |
1 | 0 | 0 |
meth |
SEWDModel.init |
2 | 1 | 0 |
meth |
SEWDModel._mask_hidden_states |
4 | 3 | 0 |
meth |
SEWDModel.forward |
8 | 7 | 0 |
attr |
SEWDModel.feature_extractor |
1 | 0 | 0 |
attr |
SEWDModel.layer_norm |
1 | 0 | 0 |
attr |
SEWDModel.project_features |
1 | 0 | 0 |
attr |
SEWDModel.feature_dropout |
1 | 0 | 0 |
attr |
SEWDModel.encoder |
1 | 0 | 0 |
attr |
SEWDModel.feature_projection |
1 | 0 | 0 |
attr |
SEWDModel.masked_spec_embed |
1 | 0 | 0 |
meth |
SEWDForCTC.init |
3 | 1 | 0 |
meth |
SEWDForCTC.tie_weights |
2 | 0 | 0 |
meth |
SEWDForCTC.freeze_feature_encoder |
1 | 0 | 0 |
meth |
SEWDForCTC.freeze_base_model |
1 | 0 | 0 |
meth |
SEWDForCTC.forward |
8 | 7 | 0 |
attr |
SEWDForCTC.sew_d |
1 | 0 | 0 |
attr |
SEWDForCTC.dropout |
1 | 0 | 0 |
attr |
SEWDForCTC.target_lang |
1 | 0 | 0 |
attr |
SEWDForCTC.lm_head |
1 | 0 | 0 |
transformers.models.shieldgemma2.configuration_shieldgemma2 (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ShieldGemma2Config.init |
9 | 5 | 0 |
attr |
ShieldGemma2Config.vision_config |
1 | 0 | 0 |
attr |
ShieldGemma2Config.text_config |
1 | 0 | 0 |
attr |
ShieldGemma2Config.mm_tokens_per_image |
1 | 0 | 0 |
attr |
ShieldGemma2Config.boi_token_index |
1 | 0 | 0 |
attr |
ShieldGemma2Config.eoi_token_index |
1 | 0 | 0 |
attr |
ShieldGemma2Config.image_token_index |
1 | 0 | 0 |
attr |
ShieldGemma2Config.initializer_range |
1 | 0 | 0 |
transformers.models.shieldgemma2.modeling_shieldgemma2 (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ShieldGemma2ForImageClassification.init |
2 | 1 | 0 |
meth |
ShieldGemma2ForImageClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
ShieldGemma2ForImageClassification.set_input_embeddings |
2 | 0 | 0 |
meth |
ShieldGemma2ForImageClassification.get_output_embeddings |
1 | 0 | 0 |
meth |
ShieldGemma2ForImageClassification.set_output_embeddings |
2 | 0 | 0 |
meth |
ShieldGemma2ForImageClassification.forward |
16 | 15 | 0 |
attr |
ShieldGemma2ForImageClassification.yes_token_index |
1 | 0 | 0 |
attr |
ShieldGemma2ForImageClassification.no_token_index |
1 | 0 | 0 |
attr |
ShieldGemma2ForImageClassification.model |
1 | 0 | 0 |
transformers.models.shieldgemma2.processing_shieldgemma2 (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ShieldGemma2Processor.init |
7 | 0 | 0 |
meth |
ShieldGemma2Processor.call |
4 | 3 | 0 |
attr |
ShieldGemma2Processor.policy_definitions |
1 | 0 | 0 |
transformers.models.siglip.configuration_siglip (57 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SiglipTextConfig.init |
15 | 0 | 0 |
attr |
SiglipTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
SiglipTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
SiglipTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
SiglipTextConfig.vocab_size |
1 | 0 | 0 |
attr |
SiglipTextConfig.hidden_size |
1 | 0 | 0 |
attr |
SiglipTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
SiglipTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
SiglipTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
SiglipTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
SiglipTextConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
SiglipTextConfig.hidden_act |
1 | 0 | 0 |
attr |
SiglipTextConfig.attention_dropout |
1 | 0 | 0 |
attr |
SiglipTextConfig.projection_size |
1 | 0 | 0 |
meth |
SiglipVisionConfig.init |
12 | 0 | 0 |
attr |
SiglipVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
SiglipVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
SiglipVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
SiglipVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
SiglipVisionConfig.num_channels |
1 | 0 | 0 |
attr |
SiglipVisionConfig.patch_size |
1 | 0 | 0 |
attr |
SiglipVisionConfig.image_size |
1 | 0 | 0 |
attr |
SiglipVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
SiglipVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
SiglipVisionConfig.hidden_act |
1 | 0 | 0 |
meth |
SiglipConfig.init |
4 | 0 | 0 |
attr |
SiglipConfig.text_config |
1 | 0 | 0 |
attr |
SiglipConfig.vision_config |
1 | 0 | 0 |
attr |
SiglipConfig.initializer_factor |
1 | 0 | 0 |
transformers.models.siglip.image_processing_siglip (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SiglipImageProcessor.init |
11 | 10 | 0 |
attr |
SiglipImageProcessor.do_resize |
1 | 0 | 0 |
attr |
SiglipImageProcessor.size |
1 | 0 | 0 |
attr |
SiglipImageProcessor.resample |
1 | 0 | 0 |
attr |
SiglipImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
SiglipImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
SiglipImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
SiglipImageProcessor.image_mean |
1 | 0 | 0 |
attr |
SiglipImageProcessor.image_std |
1 | 0 | 0 |
attr |
SiglipImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.siglip.modeling_siglip (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SiglipPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
SiglipTextModel.init |
2 | 1 | 0 |
meth |
SiglipTextModel.set_input_embeddings |
2 | 0 | 0 |
attr |
SiglipTextModel.text_model |
1 | 0 | 0 |
meth |
SiglipForImageClassification.set_input_embeddings |
2 | 1 | 0 |
attr |
SiglipForImageClassification.num_labels |
1 | 0 | 0 |
attr |
SiglipForImageClassification.vision_model |
1 | 0 | 0 |
attr |
SiglipForImageClassification.classifier |
1 | 0 | 0 |
meth |
SiglipModel.init |
2 | 1 | 0 |
meth |
SiglipModel.set_input_embeddings |
2 | 1 | 0 |
attr |
SiglipModel.text_model |
1 | 0 | 0 |
attr |
SiglipModel.vision_model |
1 | 0 | 0 |
attr |
SiglipModel.logit_scale |
1 | 0 | 0 |
attr |
SiglipModel.logit_bias |
1 | 0 | 0 |
meth |
SiglipVisionModel.init |
2 | 1 | 0 |
meth |
SiglipVisionModel.forward |
4 | 3 | 0 |
attr |
SiglipVisionModel.vision_model |
1 | 0 | 0 |
transformers.models.siglip.processing_siglip (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SiglipProcessor.init |
3 | 0 | 0 |
transformers.models.siglip.tokenization_siglip (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SiglipTokenizer.init |
10 | 2 | 0 |
meth |
SiglipTokenizer.get_vocab |
1 | 0 | 0 |
meth |
SiglipTokenizer.getstate |
1 | 0 | 0 |
meth |
SiglipTokenizer.setstate |
2 | 0 | 0 |
meth |
SiglipTokenizer.canonicalize_text |
3 | 0 | 0 |
meth |
SiglipTokenizer.tokenize |
4 | 2 | 0 |
meth |
SiglipTokenizer._tokenize |
3 | 0 | 0 |
meth |
SiglipTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
SiglipTokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
SiglipTokenizer.convert_tokens_to_string |
2 | 0 | 0 |
prop |
SiglipTokenizer.vocab_size |
1 | 0 | 0 |
prop |
SiglipTokenizer.unk_token_length |
1 | 0 | 0 |
attr |
SiglipTokenizer.sp_model_kwargs |
1 | 0 | 0 |
attr |
SiglipTokenizer.do_lower_case |
1 | 0 | 0 |
transformers.models.siglip2.configuration_siglip2 (57 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Siglip2TextConfig.init |
15 | 0 | 0 |
attr |
Siglip2TextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Siglip2TextConfig.bos_token_id |
1 | 0 | 0 |
attr |
Siglip2TextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Siglip2TextConfig.vocab_size |
1 | 0 | 0 |
attr |
Siglip2TextConfig.hidden_size |
1 | 0 | 0 |
attr |
Siglip2TextConfig.intermediate_size |
1 | 0 | 0 |
attr |
Siglip2TextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Siglip2TextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Siglip2TextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
Siglip2TextConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Siglip2TextConfig.hidden_act |
1 | 0 | 0 |
attr |
Siglip2TextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Siglip2TextConfig.projection_size |
1 | 0 | 0 |
meth |
Siglip2Config.init |
4 | 0 | 0 |
attr |
Siglip2Config.text_config |
1 | 0 | 0 |
attr |
Siglip2Config.vision_config |
1 | 0 | 0 |
attr |
Siglip2Config.initializer_factor |
1 | 0 | 0 |
meth |
Siglip2VisionConfig.init |
12 | 0 | 0 |
attr |
Siglip2VisionConfig.hidden_size |
1 | 0 | 0 |
attr |
Siglip2VisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
Siglip2VisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Siglip2VisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Siglip2VisionConfig.num_channels |
1 | 0 | 0 |
attr |
Siglip2VisionConfig.patch_size |
1 | 0 | 0 |
attr |
Siglip2VisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
Siglip2VisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Siglip2VisionConfig.hidden_act |
1 | 0 | 0 |
attr |
Siglip2VisionConfig.num_patches |
1 | 0 | 0 |
transformers.models.siglip2.image_processing_siglip2 (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Siglip2ImageProcessor.init |
12 | 10 | 0 |
attr |
Siglip2ImageProcessor.do_resize |
1 | 0 | 0 |
attr |
Siglip2ImageProcessor.resample |
1 | 0 | 0 |
attr |
Siglip2ImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
Siglip2ImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
Siglip2ImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
Siglip2ImageProcessor.image_mean |
1 | 0 | 0 |
attr |
Siglip2ImageProcessor.image_std |
1 | 0 | 0 |
attr |
Siglip2ImageProcessor.do_convert_rgb |
1 | 0 | 0 |
attr |
Siglip2ImageProcessor.patch_size |
1 | 0 | 0 |
attr |
Siglip2ImageProcessor.max_num_patches |
1 | 0 | 0 |
transformers.models.siglip2.image_processing_siglip2_fast (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Siglip2ImageProcessorFast.init |
2 | 1 | 0 |
meth |
Siglip2ImageProcessorFast._validate_preprocess_kwargs |
2 | 1 | 0 |
meth |
Siglip2ImageProcessorFast._preprocess |
13 | 12 | 0 |
transformers.models.siglip2.modeling_siglip2 (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Siglip2Model.init |
2 | 1 | 0 |
meth |
Siglip2Model.set_input_embeddings |
2 | 1 | 0 |
meth |
Siglip2Model.forward |
11 | 10 | 0 |
attr |
Siglip2Model.text_model |
1 | 0 | 0 |
attr |
Siglip2Model.vision_model |
1 | 0 | 0 |
attr |
Siglip2Model.logit_scale |
1 | 0 | 0 |
attr |
Siglip2Model.logit_bias |
1 | 0 | 0 |
meth |
Siglip2VisionModel.init |
2 | 1 | 0 |
attr |
Siglip2VisionModel.vision_model |
1 | 0 | 0 |
meth |
Siglip2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Siglip2TextModel.init |
2 | 1 | 0 |
meth |
Siglip2TextModel.set_input_embeddings |
2 | 0 | 0 |
attr |
Siglip2TextModel.text_model |
1 | 0 | 0 |
meth |
Siglip2ForImageClassification.set_input_embeddings |
2 | 1 | 0 |
meth |
Siglip2ForImageClassification.forward |
8 | 7 | 0 |
attr |
Siglip2ForImageClassification.num_labels |
1 | 0 | 0 |
attr |
Siglip2ForImageClassification.vision_model |
1 | 0 | 0 |
attr |
Siglip2ForImageClassification.classifier |
1 | 0 | 0 |
transformers.models.siglip2.modular_siglip2 (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Siglip2Model.forward |
11 | 10 | 0 |
meth |
Siglip2VisionConfig.init |
12 | 0 | 0 |
attr |
Siglip2VisionConfig.num_patches |
1 | 0 | 0 |
meth |
Siglip2Tokenizer.init |
9 | 7 | 0 |
meth |
Siglip2ForImageClassification.forward |
8 | 7 | 0 |
transformers.models.siglip2.processing_siglip2 (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Siglip2Processor.init |
3 | 0 | 0 |
transformers.models.siglip2.tokenization_siglip2 (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Siglip2Tokenizer.init |
9 | 7 | 0 |
transformers.models.smollm3.configuration_smollm3 (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SmolLM3Config.init |
26 | 24 | 0 |
attr |
SmolLM3Config.pad_token_id |
1 | 0 | 0 |
attr |
SmolLM3Config.bos_token_id |
1 | 0 | 0 |
attr |
SmolLM3Config.eos_token_id |
1 | 0 | 0 |
attr |
SmolLM3Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
SmolLM3Config.vocab_size |
1 | 0 | 0 |
attr |
SmolLM3Config.max_position_embeddings |
1 | 0 | 0 |
attr |
SmolLM3Config.mlp_bias |
1 | 0 | 0 |
attr |
SmolLM3Config.hidden_size |
1 | 0 | 0 |
attr |
SmolLM3Config.intermediate_size |
1 | 0 | 0 |
attr |
SmolLM3Config.num_hidden_layers |
1 | 0 | 0 |
attr |
SmolLM3Config.num_attention_heads |
1 | 0 | 0 |
attr |
SmolLM3Config.use_sliding_window |
1 | 0 | 0 |
attr |
SmolLM3Config.sliding_window |
1 | 0 | 0 |
attr |
SmolLM3Config.num_key_value_heads |
1 | 0 | 0 |
attr |
SmolLM3Config.hidden_act |
1 | 0 | 0 |
attr |
SmolLM3Config.initializer_range |
1 | 0 | 0 |
attr |
SmolLM3Config.rms_norm_eps |
1 | 0 | 0 |
attr |
SmolLM3Config.use_cache |
1 | 0 | 0 |
attr |
SmolLM3Config.attention_bias |
1 | 0 | 0 |
attr |
SmolLM3Config.attention_dropout |
1 | 0 | 0 |
attr |
SmolLM3Config.no_rope_layer_interval |
1 | 0 | 0 |
attr |
SmolLM3Config.layer_types |
1 | 0 | 0 |
attr |
SmolLM3Config.rope_parameters |
1 | 0 | 0 |
attr |
SmolLM3Config.no_rope_layers |
1 | 0 | 0 |
transformers.models.smollm3.modeling_smollm3 (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SmolLM3ForCausalLM.init |
2 | 0 | 0 |
attr |
SmolLM3ForCausalLM.model |
1 | 0 | 0 |
attr |
SmolLM3ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
SmolLM3ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
SmolLM3Model.init |
2 | 1 | 0 |
attr |
SmolLM3Model.padding_idx |
1 | 0 | 0 |
attr |
SmolLM3Model.vocab_size |
1 | 0 | 0 |
attr |
SmolLM3Model.embed_tokens |
1 | 0 | 0 |
attr |
SmolLM3Model.layers |
1 | 0 | 0 |
attr |
SmolLM3Model.norm |
1 | 0 | 0 |
attr |
SmolLM3Model.rotary_emb |
1 | 0 | 0 |
attr |
SmolLM3Model.gradient_checkpointing |
1 | 0 | 0 |
attr |
SmolLM3Model.has_sliding_layers |
1 | 0 | 0 |
transformers.models.smollm3.modular_smollm3 (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SmolLM3Config.init |
26 | 24 | 0 |
attr |
SmolLM3Config.pad_token_id |
1 | 0 | 0 |
attr |
SmolLM3Config.bos_token_id |
1 | 0 | 0 |
attr |
SmolLM3Config.eos_token_id |
1 | 0 | 0 |
attr |
SmolLM3Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
SmolLM3Config.vocab_size |
1 | 0 | 0 |
attr |
SmolLM3Config.max_position_embeddings |
1 | 0 | 0 |
attr |
SmolLM3Config.mlp_bias |
1 | 0 | 0 |
attr |
SmolLM3Config.hidden_size |
1 | 0 | 0 |
attr |
SmolLM3Config.intermediate_size |
1 | 0 | 0 |
attr |
SmolLM3Config.num_hidden_layers |
1 | 0 | 0 |
attr |
SmolLM3Config.num_attention_heads |
1 | 0 | 0 |
attr |
SmolLM3Config.use_sliding_window |
1 | 0 | 0 |
attr |
SmolLM3Config.sliding_window |
1 | 0 | 0 |
attr |
SmolLM3Config.num_key_value_heads |
1 | 0 | 0 |
attr |
SmolLM3Config.hidden_act |
1 | 0 | 0 |
attr |
SmolLM3Config.initializer_range |
1 | 0 | 0 |
attr |
SmolLM3Config.rms_norm_eps |
1 | 0 | 0 |
attr |
SmolLM3Config.use_cache |
1 | 0 | 0 |
attr |
SmolLM3Config.attention_bias |
1 | 0 | 0 |
attr |
SmolLM3Config.attention_dropout |
1 | 0 | 0 |
attr |
SmolLM3Config.no_rope_layer_interval |
1 | 0 | 0 |
attr |
SmolLM3Config.layer_types |
1 | 0 | 0 |
attr |
SmolLM3Config.rope_parameters |
1 | 0 | 0 |
attr |
SmolLM3Config.no_rope_layers |
1 | 0 | 0 |
transformers.models.smolvlm.configuration_smolvlm (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SmolVLMVisionConfig.init |
13 | 0 | 0 |
attr |
SmolVLMVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
SmolVLMVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
SmolVLMVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
SmolVLMVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
SmolVLMVisionConfig.num_channels |
1 | 0 | 0 |
attr |
SmolVLMVisionConfig.patch_size |
1 | 0 | 0 |
attr |
SmolVLMVisionConfig.image_size |
1 | 0 | 0 |
attr |
SmolVLMVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
SmolVLMVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
SmolVLMVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
SmolVLMVisionConfig.initializer_range |
1 | 0 | 0 |
meth |
SmolVLMConfig.init |
9 | 0 | 0 |
attr |
SmolVLMConfig.image_token_id |
1 | 0 | 0 |
attr |
SmolVLMConfig.use_cache |
1 | 0 | 0 |
attr |
SmolVLMConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
SmolVLMConfig.text_config |
1 | 0 | 0 |
attr |
SmolVLMConfig.scale_factor |
1 | 0 | 0 |
attr |
SmolVLMConfig.vision_config |
1 | 0 | 0 |
transformers.models.smolvlm.image_processing_smolvlm (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SmolVLMImageProcessor.init |
14 | 13 | 0 |
meth |
SmolVLMImageProcessor.resize |
7 | 6 | 0 |
meth |
SmolVLMImageProcessor.split_image |
6 | 4 | 0 |
meth |
SmolVLMImageProcessor.resize_for_vision_encoder |
6 | 5 | 0 |
meth |
SmolVLMImageProcessor.preprocess |
18 | 17 | 0 |
meth |
SmolVLMImageProcessor.get_number_of_image_patches |
4 | 3 | 0 |
attr |
SmolVLMImageProcessor.do_convert_rgb |
1 | 0 | 0 |
attr |
SmolVLMImageProcessor.do_resize |
1 | 0 | 0 |
attr |
SmolVLMImageProcessor.size |
1 | 0 | 0 |
attr |
SmolVLMImageProcessor.resample |
1 | 0 | 0 |
attr |
SmolVLMImageProcessor.do_image_splitting |
1 | 0 | 0 |
attr |
SmolVLMImageProcessor.max_image_size |
1 | 0 | 0 |
attr |
SmolVLMImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
SmolVLMImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
SmolVLMImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
SmolVLMImageProcessor.image_mean |
1 | 0 | 0 |
attr |
SmolVLMImageProcessor.image_std |
1 | 0 | 0 |
attr |
SmolVLMImageProcessor.do_pad |
1 | 0 | 0 |
transformers.models.smolvlm.image_processing_smolvlm_fast (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SmolVLMImageProcessorFast.resize |
6 | 5 | 0 |
meth |
SmolVLMImageProcessorFast.split_images |
4 | 3 | 0 |
meth |
SmolVLMImageProcessorFast.resize_for_vision_encoder |
4 | 3 | 0 |
meth |
SmolVLMImageProcessorFast.pad |
5 | 4 | 0 |
meth |
SmolVLMImageProcessorFast._preprocess |
17 | 16 | 0 |
meth |
SmolVLMImageProcessorFast.to_dict |
1 | 0 | 0 |
meth |
SmolVLMImageProcessorFast.get_number_of_image_patches |
4 | 3 | 0 |
transformers.models.smolvlm.modeling_smolvlm (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SmolVLMModel.init |
2 | 1 | 0 |
meth |
SmolVLMModel.get_input_embeddings |
1 | 0 | 0 |
meth |
SmolVLMModel.set_input_embeddings |
2 | 0 | 0 |
meth |
SmolVLMModel.inputs_merger |
4 | 3 | 0 |
attr |
SmolVLMModel.padding_idx |
1 | 0 | 0 |
attr |
SmolVLMModel.vocab_size |
1 | 0 | 0 |
attr |
SmolVLMModel.vision_model |
1 | 0 | 0 |
attr |
SmolVLMModel.connector |
1 | 0 | 0 |
attr |
SmolVLMModel.text_model |
1 | 0 | 0 |
attr |
SmolVLMModel.image_seq_len |
1 | 0 | 0 |
attr |
SmolVLMModel.image_token_id |
1 | 0 | 0 |
meth |
SmolVLMVisionTransformer.init |
2 | 1 | 0 |
meth |
SmolVLMVisionTransformer.get_input_embeddings |
1 | 0 | 0 |
meth |
SmolVLMVisionTransformer.set_input_embeddings |
2 | 0 | 0 |
meth |
SmolVLMVisionTransformer.forward |
4 | 3 | 0 |
attr |
SmolVLMVisionTransformer.embeddings |
1 | 0 | 0 |
attr |
SmolVLMVisionTransformer.encoder |
1 | 0 | 0 |
attr |
SmolVLMVisionTransformer.patch_size |
1 | 0 | 0 |
attr |
SmolVLMVisionTransformer.post_layernorm |
1 | 0 | 0 |
meth |
SmolVLMForConditionalGeneration.init |
2 | 0 | 0 |
meth |
SmolVLMForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
SmolVLMForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
SmolVLMForConditionalGeneration.prepare_inputs_for_generation |
12 | 0 | 0 |
attr |
SmolVLMForConditionalGeneration.model |
1 | 0 | 0 |
attr |
SmolVLMForConditionalGeneration.image_token_id |
1 | 0 | 0 |
attr |
SmolVLMForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
SmolVLMForConditionalGeneration.vocab_size |
1 | 0 | 0 |
transformers.models.smolvlm.modular_smolvlm (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SmolVLMModel.inputs_merger |
4 | 3 | 0 |
meth |
SmolVLMForConditionalGeneration.init |
2 | 0 | 0 |
meth |
SmolVLMForConditionalGeneration.forward |
2 | 0 | 0 |
attr |
SmolVLMForConditionalGeneration.model |
1 | 0 | 0 |
attr |
SmolVLMForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.smolvlm.processing_smolvlm (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SmolVLMProcessor.init |
7 | 2 | 0 |
meth |
SmolVLMProcessor.expand_text_with_image_tokens |
4 | 0 | 0 |
meth |
SmolVLMProcessor.expand_text_with_video_tokens |
3 | 0 | 0 |
attr |
SmolVLMProcessor.fake_image_token |
1 | 0 | 0 |
attr |
SmolVLMProcessor.image_token |
1 | 0 | 0 |
attr |
SmolVLMProcessor.image_token_id |
1 | 0 | 0 |
attr |
SmolVLMProcessor.end_of_utterance_token |
1 | 0 | 0 |
attr |
SmolVLMProcessor.global_image_token |
1 | 0 | 0 |
attr |
SmolVLMProcessor.image_seq_len |
1 | 0 | 0 |
attr |
SmolVLMProcessor.video_token |
1 | 0 | 0 |
transformers.models.smolvlm.video_processing_smolvlm (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SmolVLMVideoProcessor.init |
2 | 1 | 0 |
meth |
SmolVLMVideoProcessor.resize |
6 | 5 | 0 |
meth |
SmolVLMVideoProcessor.pad |
6 | 5 | 0 |
meth |
SmolVLMVideoProcessor.sample_frames |
6 | 4 | 0 |
meth |
SmolVLMVideoProcessor._preprocess |
14 | 12 | 0 |
attr |
SmolVLMVideoProcessor.size |
1 | 0 | 0 |
attr |
SmolVLMVideoProcessor.num_frames |
1 | 0 | 0 |
attr |
SmolVLMVideoProcessor.fps |
1 | 0 | 0 |
transformers.models.solar_open.configuration_solar_open (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SolarOpenConfig.init |
28 | 26 | 0 |
attr |
SolarOpenConfig.head_dim |
1 | 0 | 0 |
attr |
SolarOpenConfig.vocab_size |
1 | 0 | 0 |
attr |
SolarOpenConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
SolarOpenConfig.hidden_size |
1 | 0 | 0 |
attr |
SolarOpenConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
SolarOpenConfig.num_attention_heads |
1 | 0 | 0 |
attr |
SolarOpenConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
SolarOpenConfig.hidden_act |
1 | 0 | 0 |
attr |
SolarOpenConfig.initializer_range |
1 | 0 | 0 |
attr |
SolarOpenConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
SolarOpenConfig.use_cache |
1 | 0 | 0 |
attr |
SolarOpenConfig.attention_bias |
1 | 0 | 0 |
attr |
SolarOpenConfig.attention_dropout |
1 | 0 | 0 |
attr |
SolarOpenConfig.rope_parameters |
1 | 0 | 0 |
attr |
SolarOpenConfig.moe_intermediate_size |
1 | 0 | 0 |
attr |
SolarOpenConfig.num_experts_per_tok |
1 | 0 | 0 |
attr |
SolarOpenConfig.n_group |
1 | 0 | 0 |
attr |
SolarOpenConfig.topk_group |
1 | 0 | 0 |
attr |
SolarOpenConfig.n_shared_experts |
1 | 0 | 0 |
attr |
SolarOpenConfig.n_routed_experts |
1 | 0 | 0 |
attr |
SolarOpenConfig.routed_scaling_factor |
1 | 0 | 0 |
attr |
SolarOpenConfig.norm_topk_prob |
1 | 0 | 0 |
attr |
SolarOpenConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
SolarOpenConfig.bos_token_id |
1 | 0 | 0 |
attr |
SolarOpenConfig.eos_token_id |
1 | 0 | 0 |
attr |
SolarOpenConfig.pad_token_id |
1 | 0 | 0 |
transformers.models.solar_open.modeling_solar_open (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SolarOpenForCausalLM.init |
2 | 0 | 0 |
attr |
SolarOpenForCausalLM.model |
1 | 0 | 0 |
attr |
SolarOpenForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
SolarOpenForCausalLM.lm_head |
1 | 0 | 0 |
meth |
SolarOpenModel.init |
2 | 1 | 0 |
attr |
SolarOpenModel.padding_idx |
1 | 0 | 0 |
attr |
SolarOpenModel.vocab_size |
1 | 0 | 0 |
attr |
SolarOpenModel.embed_tokens |
1 | 0 | 0 |
attr |
SolarOpenModel.layers |
1 | 0 | 0 |
attr |
SolarOpenModel.norm |
1 | 0 | 0 |
attr |
SolarOpenModel.rotary_emb |
1 | 0 | 0 |
attr |
SolarOpenModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
SolarOpenPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.solar_open.modular_solar_open (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SolarOpenConfig.init |
28 | 26 | 0 |
attr |
SolarOpenConfig.head_dim |
1 | 0 | 0 |
transformers.models.speech_encoder_decoder.configuration_speech_encoder_decoder (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SpeechEncoderDecoderConfig.init |
2 | 0 | 0 |
meth |
SpeechEncoderDecoderConfig.from_encoder_decoder_configs |
4 | 3 | 0 |
attr |
SpeechEncoderDecoderConfig.encoder |
1 | 0 | 0 |
attr |
SpeechEncoderDecoderConfig.decoder |
1 | 0 | 0 |
attr |
SpeechEncoderDecoderConfig.is_encoder_decoder |
1 | 0 | 0 |
transformers.models.speech_encoder_decoder.modeling_speech_encoder_decoder (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SpeechEncoderDecoderModel.init |
4 | 3 | 0 |
meth |
SpeechEncoderDecoderModel.get_input_embeddings |
1 | 0 | 0 |
meth |
SpeechEncoderDecoderModel.get_output_embeddings |
1 | 0 | 0 |
meth |
SpeechEncoderDecoderModel.set_output_embeddings |
2 | 0 | 0 |
meth |
SpeechEncoderDecoderModel.freeze_feature_encoder |
1 | 0 | 0 |
meth |
SpeechEncoderDecoderModel.from_encoder_decoder_pretrained |
5 | 3 | 0 |
meth |
SpeechEncoderDecoderModel.forward |
16 | 15 | 0 |
meth |
SpeechEncoderDecoderModel.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
meth |
SpeechEncoderDecoderModel.resize_token_embeddings |
3 | 0 | 0 |
attr |
SpeechEncoderDecoderModel.encoder |
1 | 0 | 0 |
attr |
SpeechEncoderDecoderModel.decoder |
1 | 0 | 0 |
attr |
SpeechEncoderDecoderModel.encoder_output_dim |
1 | 0 | 0 |
attr |
SpeechEncoderDecoderModel.enc_to_dec_proj |
1 | 0 | 0 |
transformers.models.speech_to_text.configuration_speech_to_text (62 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Speech2TextConfig.init |
32 | 0 | 0 |
attr |
Speech2TextConfig.vocab_size |
1 | 0 | 0 |
attr |
Speech2TextConfig.d_model |
1 | 0 | 0 |
attr |
Speech2TextConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
Speech2TextConfig.encoder_layers |
1 | 0 | 0 |
attr |
Speech2TextConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
Speech2TextConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
Speech2TextConfig.decoder_layers |
1 | 0 | 0 |
attr |
Speech2TextConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
Speech2TextConfig.dropout |
1 | 0 | 0 |
attr |
Speech2TextConfig.attention_dropout |
1 | 0 | 0 |
attr |
Speech2TextConfig.activation_dropout |
1 | 0 | 0 |
attr |
Speech2TextConfig.activation_function |
1 | 0 | 0 |
attr |
Speech2TextConfig.init_std |
1 | 0 | 0 |
attr |
Speech2TextConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
Speech2TextConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
Speech2TextConfig.use_cache |
1 | 0 | 0 |
attr |
Speech2TextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Speech2TextConfig.scale_embedding |
1 | 0 | 0 |
attr |
Speech2TextConfig.max_source_positions |
1 | 0 | 0 |
attr |
Speech2TextConfig.max_target_positions |
1 | 0 | 0 |
attr |
Speech2TextConfig.num_conv_layers |
1 | 0 | 0 |
attr |
Speech2TextConfig.conv_kernel_sizes |
1 | 0 | 0 |
attr |
Speech2TextConfig.conv_channels |
1 | 0 | 0 |
attr |
Speech2TextConfig.input_feat_per_channel |
1 | 0 | 0 |
attr |
Speech2TextConfig.input_channels |
1 | 0 | 0 |
attr |
Speech2TextConfig.pad_token_id |
1 | 0 | 0 |
attr |
Speech2TextConfig.bos_token_id |
1 | 0 | 0 |
attr |
Speech2TextConfig.eos_token_id |
1 | 0 | 0 |
attr |
Speech2TextConfig.decoder_start_token_id |
1 | 0 | 0 |
attr |
Speech2TextConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.speech_to_text.feature_extraction_speech_to_text (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Speech2TextFeatureExtractor.init |
10 | 0 | 0 |
meth |
Speech2TextFeatureExtractor.call |
10 | 9 | 0 |
attr |
Speech2TextFeatureExtractor.num_mel_bins |
1 | 0 | 0 |
attr |
Speech2TextFeatureExtractor.dither |
1 | 0 | 0 |
attr |
Speech2TextFeatureExtractor.do_ceptral_normalize |
1 | 0 | 0 |
attr |
Speech2TextFeatureExtractor.normalize_means |
1 | 0 | 0 |
attr |
Speech2TextFeatureExtractor.normalize_vars |
1 | 0 | 0 |
attr |
Speech2TextFeatureExtractor.return_attention_mask |
1 | 0 | 0 |
attr |
Speech2TextFeatureExtractor.mel_filters |
1 | 0 | 0 |
attr |
Speech2TextFeatureExtractor.window |
1 | 0 | 0 |
transformers.models.speech_to_text.modeling_speech_to_text (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Speech2TextForConditionalGeneration.init |
2 | 1 | 0 |
meth |
Speech2TextForConditionalGeneration.forward |
15 | 14 | 0 |
attr |
Speech2TextForConditionalGeneration.model |
1 | 0 | 0 |
attr |
Speech2TextForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
Speech2TextModel.init |
2 | 1 | 0 |
meth |
Speech2TextModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Speech2TextModel.set_input_embeddings |
2 | 0 | 0 |
meth |
Speech2TextModel.forward |
14 | 13 | 0 |
attr |
Speech2TextModel.encoder |
1 | 0 | 0 |
attr |
Speech2TextModel.decoder |
1 | 0 | 0 |
meth |
Speech2TextPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Speech2TextPreTrainedModel._get_feat_extract_output_lengths |
2 | 1 | 0 |
meth |
Speech2TextPreTrainedModel._get_feature_vector_attention_mask |
3 | 0 | 0 |
transformers.models.speech_to_text.processing_speech_to_text (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Speech2TextProcessor.init |
3 | 0 | 0 |
meth |
Speech2TextProcessor.call |
3 | 0 | 0 |
transformers.models.speech_to_text.tokenization_speech_to_text (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Speech2TextTokenizer.init |
14 | 2 | 0 |
meth |
Speech2TextTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
Speech2TextTokenizer.build_inputs_with_special_tokens |
3 | 1 | 0 |
prop |
Speech2TextTokenizer.tgt_lang |
2 | 1 | 0 |
attr |
Speech2TextTokenizer.sp_model_kwargs |
1 | 0 | 0 |
attr |
Speech2TextTokenizer.do_upper_case |
1 | 0 | 0 |
attr |
Speech2TextTokenizer.do_lower_case |
1 | 0 | 0 |
attr |
Speech2TextTokenizer.encoder |
1 | 0 | 0 |
attr |
Speech2TextTokenizer.decoder |
1 | 0 | 0 |
attr |
Speech2TextTokenizer.spm_file |
1 | 0 | 0 |
attr |
Speech2TextTokenizer.sp_model |
1 | 0 | 0 |
attr |
Speech2TextTokenizer.lang_codes |
1 | 0 | 0 |
attr |
Speech2TextTokenizer.langs |
1 | 0 | 0 |
attr |
Speech2TextTokenizer.lang_tokens |
1 | 0 | 0 |
attr |
Speech2TextTokenizer.lang_code_to_id |
1 | 0 | 0 |
transformers.models.speecht5.configuration_speecht5 (142 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SpeechT5HifiGanConfig.init |
12 | 0 | 0 |
attr |
SpeechT5HifiGanConfig.model_in_dim |
1 | 0 | 0 |
attr |
SpeechT5HifiGanConfig.sampling_rate |
1 | 0 | 0 |
attr |
SpeechT5HifiGanConfig.upsample_initial_channel |
1 | 0 | 0 |
attr |
SpeechT5HifiGanConfig.upsample_rates |
1 | 0 | 0 |
attr |
SpeechT5HifiGanConfig.upsample_kernel_sizes |
1 | 0 | 0 |
attr |
SpeechT5HifiGanConfig.resblock_kernel_sizes |
1 | 0 | 0 |
attr |
SpeechT5HifiGanConfig.resblock_dilation_sizes |
1 | 0 | 0 |
attr |
SpeechT5HifiGanConfig.initializer_range |
1 | 0 | 0 |
attr |
SpeechT5HifiGanConfig.leaky_relu_slope |
1 | 0 | 0 |
attr |
SpeechT5HifiGanConfig.normalize_before |
1 | 0 | 0 |
meth |
SpeechT5Config.init |
60 | 0 | 0 |
meth |
SpeechT5Config.inputs_to_logits_ratio |
1 | 0 | 0 |
attr |
SpeechT5Config.vocab_size |
1 | 0 | 0 |
attr |
SpeechT5Config.hidden_size |
1 | 0 | 0 |
attr |
SpeechT5Config.encoder_layers |
1 | 0 | 0 |
attr |
SpeechT5Config.encoder_ffn_dim |
1 | 0 | 0 |
attr |
SpeechT5Config.encoder_attention_heads |
1 | 0 | 0 |
attr |
SpeechT5Config.encoder_layerdrop |
1 | 0 | 0 |
attr |
SpeechT5Config.decoder_layers |
1 | 0 | 0 |
attr |
SpeechT5Config.decoder_ffn_dim |
1 | 0 | 0 |
attr |
SpeechT5Config.decoder_attention_heads |
1 | 0 | 0 |
attr |
SpeechT5Config.decoder_layerdrop |
1 | 0 | 0 |
attr |
SpeechT5Config.hidden_act |
1 | 0 | 0 |
attr |
SpeechT5Config.positional_dropout |
1 | 0 | 0 |
attr |
SpeechT5Config.hidden_dropout |
1 | 0 | 0 |
attr |
SpeechT5Config.attention_dropout |
1 | 0 | 0 |
attr |
SpeechT5Config.activation_dropout |
1 | 0 | 0 |
attr |
SpeechT5Config.initializer_range |
1 | 0 | 0 |
attr |
SpeechT5Config.layer_norm_eps |
1 | 0 | 0 |
attr |
SpeechT5Config.scale_embedding |
1 | 0 | 0 |
attr |
SpeechT5Config.feat_extract_norm |
1 | 0 | 0 |
attr |
SpeechT5Config.feat_proj_dropout |
1 | 0 | 0 |
attr |
SpeechT5Config.feat_extract_activation |
1 | 0 | 0 |
attr |
SpeechT5Config.conv_dim |
1 | 0 | 0 |
attr |
SpeechT5Config.conv_stride |
1 | 0 | 0 |
attr |
SpeechT5Config.conv_kernel |
1 | 0 | 0 |
attr |
SpeechT5Config.conv_bias |
1 | 0 | 0 |
attr |
SpeechT5Config.num_conv_pos_embeddings |
1 | 0 | 0 |
attr |
SpeechT5Config.num_conv_pos_embedding_groups |
1 | 0 | 0 |
attr |
SpeechT5Config.num_feat_extract_layers |
1 | 0 | 0 |
attr |
SpeechT5Config.apply_spec_augment |
1 | 0 | 0 |
attr |
SpeechT5Config.mask_time_prob |
1 | 0 | 0 |
attr |
SpeechT5Config.mask_time_length |
1 | 0 | 0 |
attr |
SpeechT5Config.mask_time_min_masks |
1 | 0 | 0 |
attr |
SpeechT5Config.mask_feature_prob |
1 | 0 | 0 |
attr |
SpeechT5Config.mask_feature_length |
1 | 0 | 0 |
attr |
SpeechT5Config.mask_feature_min_masks |
1 | 0 | 0 |
attr |
SpeechT5Config.num_mel_bins |
1 | 0 | 0 |
attr |
SpeechT5Config.speech_decoder_prenet_layers |
1 | 0 | 0 |
attr |
SpeechT5Config.speech_decoder_prenet_units |
1 | 0 | 0 |
attr |
SpeechT5Config.speech_decoder_prenet_dropout |
1 | 0 | 0 |
attr |
SpeechT5Config.speaker_embedding_dim |
1 | 0 | 0 |
attr |
SpeechT5Config.speech_decoder_postnet_layers |
1 | 0 | 0 |
attr |
SpeechT5Config.speech_decoder_postnet_units |
1 | 0 | 0 |
attr |
SpeechT5Config.speech_decoder_postnet_kernel |
1 | 0 | 0 |
attr |
SpeechT5Config.speech_decoder_postnet_dropout |
1 | 0 | 0 |
attr |
SpeechT5Config.reduction_factor |
1 | 0 | 0 |
attr |
SpeechT5Config.max_speech_positions |
1 | 0 | 0 |
attr |
SpeechT5Config.max_text_positions |
1 | 0 | 0 |
attr |
SpeechT5Config.encoder_max_relative_position |
1 | 0 | 0 |
attr |
SpeechT5Config.use_guided_attention_loss |
1 | 0 | 0 |
attr |
SpeechT5Config.guided_attention_loss_num_heads |
1 | 0 | 0 |
attr |
SpeechT5Config.guided_attention_loss_sigma |
1 | 0 | 0 |
attr |
SpeechT5Config.guided_attention_loss_scale |
1 | 0 | 0 |
attr |
SpeechT5Config.use_cache |
1 | 0 | 0 |
attr |
SpeechT5Config.is_encoder_decoder |
1 | 0 | 0 |
attr |
SpeechT5Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
SpeechT5Config.pad_token_id |
1 | 0 | 0 |
attr |
SpeechT5Config.bos_token_id |
1 | 0 | 0 |
attr |
SpeechT5Config.eos_token_id |
1 | 0 | 0 |
attr |
SpeechT5Config.decoder_start_token_id |
1 | 0 | 0 |
transformers.models.speecht5.feature_extraction_speecht5 (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SpeechT5FeatureExtractor.init |
14 | 12 | 0 |
meth |
SpeechT5FeatureExtractor.call |
11 | 10 | 0 |
meth |
SpeechT5FeatureExtractor._process_audio |
10 | 9 | 0 |
attr |
SpeechT5FeatureExtractor.do_normalize |
1 | 0 | 0 |
attr |
SpeechT5FeatureExtractor.return_attention_mask |
1 | 0 | 0 |
attr |
SpeechT5FeatureExtractor.num_mel_bins |
1 | 0 | 0 |
attr |
SpeechT5FeatureExtractor.hop_length |
1 | 0 | 0 |
attr |
SpeechT5FeatureExtractor.win_length |
1 | 0 | 0 |
attr |
SpeechT5FeatureExtractor.win_function |
1 | 0 | 0 |
attr |
SpeechT5FeatureExtractor.fmin |
1 | 0 | 0 |
attr |
SpeechT5FeatureExtractor.fmax |
1 | 0 | 0 |
attr |
SpeechT5FeatureExtractor.mel_floor |
1 | 0 | 0 |
attr |
SpeechT5FeatureExtractor.sample_size |
1 | 0 | 0 |
attr |
SpeechT5FeatureExtractor.sample_stride |
1 | 0 | 0 |
attr |
SpeechT5FeatureExtractor.n_fft |
1 | 0 | 0 |
attr |
SpeechT5FeatureExtractor.n_freqs |
1 | 0 | 0 |
attr |
SpeechT5FeatureExtractor.window |
1 | 0 | 0 |
attr |
SpeechT5FeatureExtractor.mel_filters |
1 | 0 | 0 |
transformers.models.speecht5.modeling_speecht5 (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SpeechT5PreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
SpeechT5Model.init |
4 | 3 | 0 |
meth |
SpeechT5Model.get_input_embeddings |
1 | 0 | 0 |
meth |
SpeechT5Model.set_input_embeddings |
2 | 0 | 0 |
meth |
SpeechT5Model.freeze_feature_encoder |
1 | 0 | 0 |
meth |
SpeechT5Model.forward |
14 | 13 | 0 |
attr |
SpeechT5Model.encoder |
1 | 0 | 0 |
attr |
SpeechT5Model.decoder |
1 | 0 | 0 |
meth |
SpeechT5ForSpeechToText.init |
2 | 1 | 0 |
meth |
SpeechT5ForSpeechToText.freeze_feature_encoder |
1 | 0 | 0 |
meth |
SpeechT5ForSpeechToText.get_output_embeddings |
1 | 0 | 0 |
meth |
SpeechT5ForSpeechToText.set_output_embeddings |
2 | 0 | 0 |
meth |
SpeechT5ForSpeechToText.forward |
14 | 13 | 0 |
attr |
SpeechT5ForSpeechToText.speecht5 |
1 | 0 | 0 |
attr |
SpeechT5ForSpeechToText.text_decoder_postnet |
1 | 0 | 0 |
meth |
SpeechT5HifiGan.init |
2 | 1 | 0 |
meth |
SpeechT5HifiGan._init_weights |
2 | 0 | 0 |
meth |
SpeechT5HifiGan.apply_weight_norm |
1 | 0 | 0 |
meth |
SpeechT5HifiGan.remove_weight_norm |
1 | 0 | 0 |
meth |
SpeechT5HifiGan.forward |
3 | 2 | 0 |
attr |
SpeechT5HifiGan.num_kernels |
1 | 0 | 0 |
attr |
SpeechT5HifiGan.num_upsamples |
1 | 0 | 0 |
attr |
SpeechT5HifiGan.conv_pre |
1 | 0 | 0 |
attr |
SpeechT5HifiGan.upsampler |
1 | 0 | 0 |
attr |
SpeechT5HifiGan.resblocks |
1 | 0 | 0 |
attr |
SpeechT5HifiGan.conv_post |
1 | 0 | 0 |
meth |
SpeechT5ForSpeechToSpeech.init |
2 | 1 | 0 |
meth |
SpeechT5ForSpeechToSpeech.freeze_feature_encoder |
1 | 0 | 0 |
meth |
SpeechT5ForSpeechToSpeech.forward |
16 | 15 | 0 |
attr |
SpeechT5ForSpeechToSpeech.speecht5 |
1 | 0 | 0 |
attr |
SpeechT5ForSpeechToSpeech.speech_decoder_postnet |
1 | 0 | 0 |
meth |
SpeechT5ForTextToSpeech.init |
2 | 1 | 0 |
meth |
SpeechT5ForTextToSpeech.forward |
16 | 15 | 0 |
meth |
SpeechT5ForTextToSpeech.generate |
11 | 10 | 0 |
attr |
SpeechT5ForTextToSpeech.speecht5 |
1 | 0 | 0 |
attr |
SpeechT5ForTextToSpeech.speech_decoder_postnet |
1 | 0 | 0 |
transformers.models.speecht5.number_normalizer (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EnglishNumberNormalizer.init |
1 | 0 | 0 |
meth |
EnglishNumberNormalizer.spell_number |
2 | 0 | 0 |
meth |
EnglishNumberNormalizer.convert |
2 | 0 | 0 |
meth |
EnglishNumberNormalizer.call |
2 | 0 | 0 |
attr |
EnglishNumberNormalizer.ones |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.teens |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.tens |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.thousands |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.currency_symbols |
1 | 0 | 0 |
transformers.models.speecht5.processing_speecht5 (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SpeechT5Processor.init |
3 | 0 | 0 |
meth |
SpeechT5Processor.call |
3 | 0 | 0 |
meth |
SpeechT5Processor.pad |
3 | 0 | 0 |
transformers.models.speecht5.tokenization_speecht5 (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SpeechT5Tokenizer.init |
9 | 2 | 0 |
meth |
SpeechT5Tokenizer.prepare_for_tokenization |
4 | 0 | 0 |
meth |
SpeechT5Tokenizer.build_inputs_with_special_tokens |
3 | 1 | 0 |
prop |
SpeechT5Tokenizer.normalizer |
2 | 0 | 0 |
attr |
SpeechT5Tokenizer.normalize |
1 | 0 | 0 |
transformers.models.splinter.configuration_splinter (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SplinterConfig.init |
18 | 0 | 0 |
attr |
SplinterConfig.pad_token_id |
1 | 0 | 0 |
attr |
SplinterConfig.bos_token_id |
1 | 0 | 0 |
attr |
SplinterConfig.eos_token_id |
1 | 0 | 0 |
attr |
SplinterConfig.vocab_size |
1 | 0 | 0 |
attr |
SplinterConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
SplinterConfig.hidden_size |
1 | 0 | 0 |
attr |
SplinterConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
SplinterConfig.num_attention_heads |
1 | 0 | 0 |
attr |
SplinterConfig.intermediate_size |
1 | 0 | 0 |
attr |
SplinterConfig.hidden_act |
1 | 0 | 0 |
attr |
SplinterConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
SplinterConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
SplinterConfig.initializer_range |
1 | 0 | 0 |
attr |
SplinterConfig.type_vocab_size |
1 | 0 | 0 |
attr |
SplinterConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
SplinterConfig.question_token_id |
1 | 0 | 0 |
transformers.models.splinter.modeling_splinter (31 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SplinterModel.init |
2 | 0 | 0 |
meth |
SplinterModel.get_input_embeddings |
1 | 0 | 0 |
meth |
SplinterModel.set_input_embeddings |
2 | 0 | 0 |
meth |
SplinterModel.forward |
10 | 9 | 0 |
attr |
SplinterModel.embeddings |
1 | 0 | 0 |
attr |
SplinterModel.encoder |
1 | 0 | 0 |
meth |
SplinterPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
SplinterLayer.init |
2 | 0 | 0 |
meth |
SplinterLayer.feed_forward_chunk |
2 | 0 | 0 |
attr |
SplinterLayer.chunk_size_feed_forward |
1 | 0 | 0 |
attr |
SplinterLayer.seq_len_dim |
1 | 0 | 0 |
attr |
SplinterLayer.attention |
1 | 0 | 0 |
attr |
SplinterLayer.intermediate |
1 | 0 | 0 |
attr |
SplinterLayer.output |
1 | 0 | 0 |
meth |
SplinterForQuestionAnswering.init |
2 | 0 | 0 |
meth |
SplinterForQuestionAnswering.forward |
13 | 12 | 0 |
attr |
SplinterForQuestionAnswering.splinter |
1 | 0 | 0 |
attr |
SplinterForQuestionAnswering.splinter_qass |
1 | 0 | 0 |
attr |
SplinterForQuestionAnswering.question_token_id |
1 | 0 | 0 |
meth |
SplinterForPreTraining.init |
2 | 0 | 0 |
meth |
SplinterForPreTraining.forward |
13 | 12 | 0 |
attr |
SplinterForPreTraining.splinter |
1 | 0 | 0 |
attr |
SplinterForPreTraining.splinter_qass |
1 | 0 | 0 |
attr |
SplinterForPreTraining.question_token_id |
1 | 0 | 0 |
transformers.models.splinter.tokenization_splinter (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SplinterTokenizer.init |
12 | 10 | 0 |
meth |
SplinterTokenizer.update_post_processor |
1 | 0 | 0 |
prop |
SplinterTokenizer.question_token_id |
1 | 0 | 0 |
attr |
SplinterTokenizer.do_lower_case |
1 | 0 | 0 |
attr |
SplinterTokenizer.tokenize_chinese_chars |
1 | 0 | 0 |
attr |
SplinterTokenizer.strip_accents |
1 | 0 | 0 |
attr |
SplinterTokenizer.question_token |
1 | 0 | 0 |
transformers.models.squeezebert.configuration_squeezebert (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SqueezeBertConfig.init |
25 | 0 | 0 |
attr |
SqueezeBertConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
SqueezeBertConfig.pad_token_id |
1 | 0 | 0 |
attr |
SqueezeBertConfig.bos_token_id |
1 | 0 | 0 |
attr |
SqueezeBertConfig.eos_token_id |
1 | 0 | 0 |
attr |
SqueezeBertConfig.vocab_size |
1 | 0 | 0 |
attr |
SqueezeBertConfig.hidden_size |
1 | 0 | 0 |
attr |
SqueezeBertConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
SqueezeBertConfig.num_attention_heads |
1 | 0 | 0 |
attr |
SqueezeBertConfig.hidden_act |
1 | 0 | 0 |
attr |
SqueezeBertConfig.intermediate_size |
1 | 0 | 0 |
attr |
SqueezeBertConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
SqueezeBertConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
SqueezeBertConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
SqueezeBertConfig.type_vocab_size |
1 | 0 | 0 |
attr |
SqueezeBertConfig.initializer_range |
1 | 0 | 0 |
attr |
SqueezeBertConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
SqueezeBertConfig.embedding_size |
1 | 0 | 0 |
attr |
SqueezeBertConfig.q_groups |
1 | 0 | 0 |
attr |
SqueezeBertConfig.k_groups |
1 | 0 | 0 |
attr |
SqueezeBertConfig.v_groups |
1 | 0 | 0 |
attr |
SqueezeBertConfig.post_attention_groups |
1 | 0 | 0 |
attr |
SqueezeBertConfig.intermediate_groups |
1 | 0 | 0 |
attr |
SqueezeBertConfig.output_groups |
1 | 0 | 0 |
transformers.models.squeezebert.modeling_squeezebert (55 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SqueezeBertForQuestionAnswering.init |
2 | 0 | 0 |
meth |
SqueezeBertForQuestionAnswering.forward |
12 | 11 | 0 |
attr |
SqueezeBertForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
SqueezeBertForQuestionAnswering.transformer |
1 | 0 | 0 |
attr |
SqueezeBertForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
SqueezeBertForMultipleChoice.init |
2 | 0 | 0 |
meth |
SqueezeBertForMultipleChoice.forward |
11 | 10 | 0 |
attr |
SqueezeBertForMultipleChoice.transformer |
1 | 0 | 0 |
attr |
SqueezeBertForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
SqueezeBertForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
SqueezeBertForMaskedLM.init |
2 | 0 | 0 |
meth |
SqueezeBertForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
SqueezeBertForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
SqueezeBertForMaskedLM.forward |
11 | 10 | 0 |
attr |
SqueezeBertForMaskedLM.transformer |
1 | 0 | 0 |
attr |
SqueezeBertForMaskedLM.cls |
1 | 0 | 0 |
meth |
SqueezeBertPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
SqueezeBertModule.init |
2 | 0 | 0 |
meth |
SqueezeBertModule.forward |
4 | 0 | 0 |
attr |
SqueezeBertModule.attention |
1 | 0 | 0 |
attr |
SqueezeBertModule.post_attention |
1 | 0 | 0 |
attr |
SqueezeBertModule.intermediate |
1 | 0 | 0 |
attr |
SqueezeBertModule.output |
1 | 0 | 0 |
meth |
SqueezeBertModel.init |
2 | 0 | 0 |
meth |
SqueezeBertModel.get_input_embeddings |
1 | 0 | 0 |
meth |
SqueezeBertModel.set_input_embeddings |
2 | 0 | 0 |
meth |
SqueezeBertModel.forward |
10 | 9 | 0 |
attr |
SqueezeBertModel.embeddings |
1 | 0 | 0 |
attr |
SqueezeBertModel.encoder |
1 | 0 | 0 |
attr |
SqueezeBertModel.pooler |
1 | 0 | 0 |
meth |
SqueezeBertForTokenClassification.init |
2 | 0 | 0 |
meth |
SqueezeBertForTokenClassification.forward |
11 | 10 | 0 |
attr |
SqueezeBertForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
SqueezeBertForTokenClassification.transformer |
1 | 0 | 0 |
attr |
SqueezeBertForTokenClassification.dropout |
1 | 0 | 0 |
attr |
SqueezeBertForTokenClassification.classifier |
1 | 0 | 0 |
meth |
SqueezeBertForSequenceClassification.init |
2 | 0 | 0 |
meth |
SqueezeBertForSequenceClassification.forward |
11 | 10 | 0 |
attr |
SqueezeBertForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
SqueezeBertForSequenceClassification.transformer |
1 | 0 | 0 |
attr |
SqueezeBertForSequenceClassification.dropout |
1 | 0 | 0 |
attr |
SqueezeBertForSequenceClassification.classifier |
1 | 0 | 0 |
transformers.models.stablelm.configuration_stablelm (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
StableLmConfig.init |
23 | 21 | 0 |
attr |
StableLmConfig.vocab_size |
1 | 0 | 0 |
attr |
StableLmConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
StableLmConfig.hidden_size |
1 | 0 | 0 |
attr |
StableLmConfig.intermediate_size |
1 | 0 | 0 |
attr |
StableLmConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
StableLmConfig.num_attention_heads |
1 | 0 | 0 |
attr |
StableLmConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
StableLmConfig.hidden_act |
1 | 0 | 0 |
attr |
StableLmConfig.initializer_range |
1 | 0 | 0 |
attr |
StableLmConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
StableLmConfig.use_cache |
1 | 0 | 0 |
attr |
StableLmConfig.use_qkv_bias |
1 | 0 | 0 |
attr |
StableLmConfig.qk_layernorm |
1 | 0 | 0 |
attr |
StableLmConfig.use_parallel_residual |
1 | 0 | 0 |
attr |
StableLmConfig.hidden_dropout |
1 | 0 | 0 |
attr |
StableLmConfig.attention_dropout |
1 | 0 | 0 |
attr |
StableLmConfig.rope_parameters |
1 | 0 | 0 |
attr |
StableLmConfig.bos_token_id |
1 | 0 | 0 |
attr |
StableLmConfig.eos_token_id |
1 | 0 | 0 |
attr |
StableLmConfig.pad_token_id |
1 | 0 | 0 |
attr |
StableLmConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.stablelm.modeling_stablelm (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
StableLmForCausalLM.init |
2 | 0 | 0 |
meth |
StableLmForCausalLM.forward |
13 | 12 | 0 |
attr |
StableLmForCausalLM.model |
1 | 0 | 0 |
attr |
StableLmForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
StableLmForCausalLM.lm_head |
1 | 0 | 0 |
meth |
StableLmModel.init |
2 | 1 | 0 |
meth |
StableLmModel.forward |
11 | 10 | 0 |
attr |
StableLmModel.padding_idx |
1 | 0 | 0 |
attr |
StableLmModel.vocab_size |
1 | 0 | 0 |
attr |
StableLmModel.embed_tokens |
1 | 0 | 0 |
attr |
StableLmModel.layers |
1 | 0 | 0 |
attr |
StableLmModel.norm |
1 | 0 | 0 |
attr |
StableLmModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
StableLmModel.rotary_emb |
1 | 0 | 0 |
transformers.models.starcoder2.configuration_starcoder2 (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Starcoder2Config.init |
23 | 21 | 0 |
attr |
Starcoder2Config.vocab_size |
1 | 0 | 0 |
attr |
Starcoder2Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Starcoder2Config.hidden_size |
1 | 0 | 0 |
attr |
Starcoder2Config.intermediate_size |
1 | 0 | 0 |
attr |
Starcoder2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Starcoder2Config.num_attention_heads |
1 | 0 | 0 |
attr |
Starcoder2Config.sliding_window |
1 | 0 | 0 |
attr |
Starcoder2Config.use_bias |
1 | 0 | 0 |
attr |
Starcoder2Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Starcoder2Config.hidden_act |
1 | 0 | 0 |
attr |
Starcoder2Config.initializer_range |
1 | 0 | 0 |
attr |
Starcoder2Config.norm_epsilon |
1 | 0 | 0 |
attr |
Starcoder2Config.use_cache |
1 | 0 | 0 |
attr |
Starcoder2Config.attention_dropout |
1 | 0 | 0 |
attr |
Starcoder2Config.residual_dropout |
1 | 0 | 0 |
attr |
Starcoder2Config.embedding_dropout |
1 | 0 | 0 |
attr |
Starcoder2Config.rope_parameters |
1 | 0 | 0 |
attr |
Starcoder2Config.bos_token_id |
1 | 0 | 0 |
attr |
Starcoder2Config.eos_token_id |
1 | 0 | 0 |
attr |
Starcoder2Config.pad_token_id |
1 | 0 | 0 |
attr |
Starcoder2Config.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.starcoder2.modeling_starcoder2 (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Starcoder2ForCausalLM.init |
2 | 0 | 0 |
attr |
Starcoder2ForCausalLM.model |
1 | 0 | 0 |
attr |
Starcoder2ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Starcoder2ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Starcoder2Model.init |
2 | 1 | 0 |
attr |
Starcoder2Model.padding_idx |
1 | 0 | 0 |
attr |
Starcoder2Model.vocab_size |
1 | 0 | 0 |
attr |
Starcoder2Model.embed_tokens |
1 | 0 | 0 |
attr |
Starcoder2Model.layers |
1 | 0 | 0 |
attr |
Starcoder2Model.norm |
1 | 0 | 0 |
attr |
Starcoder2Model.rotary_emb |
1 | 0 | 0 |
attr |
Starcoder2Model.gradient_checkpointing |
1 | 0 | 0 |
attr |
Starcoder2Model.embedding_dropout |
1 | 0 | 0 |
transformers.models.starcoder2.modular_starcoder2 (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
Starcoder2PreTrainedModel |
1 | 0 | 0 |
meth |
Starcoder2Model.init |
2 | 1 | 0 |
attr |
Starcoder2Model.layers |
1 | 0 | 0 |
attr |
Starcoder2Model.norm |
1 | 0 | 0 |
attr |
Starcoder2Model.embedding_dropout |
1 | 0 | 0 |
transformers.models.superglue.configuration_superglue (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SuperGlueConfig.init |
11 | 8 | 0 |
attr |
SuperGlueConfig.gnn_layers_types |
1 | 0 | 0 |
attr |
SuperGlueConfig.keypoint_encoder_sizes |
1 | 0 | 0 |
attr |
SuperGlueConfig.hidden_size |
1 | 0 | 0 |
attr |
SuperGlueConfig.num_attention_heads |
1 | 0 | 0 |
attr |
SuperGlueConfig.sinkhorn_iterations |
1 | 0 | 0 |
attr |
SuperGlueConfig.matching_threshold |
1 | 0 | 0 |
attr |
SuperGlueConfig.keypoint_detector_config |
1 | 0 | 0 |
attr |
SuperGlueConfig.initializer_range |
1 | 0 | 0 |
attr |
SuperGlueConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
SuperGlueConfig.is_decoder |
1 | 0 | 0 |
transformers.models.superglue.image_processing_superglue (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SuperGlueImageProcessor.init |
8 | 7 | 0 |
meth |
SuperGlueImageProcessor.resize |
6 | 4 | 0 |
meth |
SuperGlueImageProcessor.preprocess |
12 | 10 | 0 |
meth |
SuperGlueImageProcessor._get_color |
2 | 0 | 0 |
attr |
SuperGlueImageProcessor.do_resize |
1 | 0 | 0 |
attr |
SuperGlueImageProcessor.size |
1 | 0 | 0 |
attr |
SuperGlueImageProcessor.resample |
1 | 0 | 0 |
attr |
SuperGlueImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
SuperGlueImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
SuperGlueImageProcessor.do_grayscale |
1 | 0 | 0 |
transformers.models.superglue.image_processing_superglue_fast (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SuperGlueImageProcessorFast.init |
2 | 1 | 0 |
meth |
SuperGlueImageProcessorFast._prepare_images_structure |
3 | 2 | 0 |
meth |
SuperGlueImageProcessorFast._preprocess |
11 | 10 | 0 |
meth |
SuperGlueImageProcessorFast.visualize_keypoint_matching |
3 | 2 | 0 |
meth |
SuperGlueImageProcessorFast._get_color |
2 | 0 | 0 |
transformers.models.superglue.modeling_superglue (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SuperGlueForKeypointMatching.forward |
7 | 6 | 0 |
attr |
SuperGlueForKeypointMatching.keypoint_detector |
1 | 0 | 0 |
attr |
SuperGlueForKeypointMatching.keypoint_encoder |
1 | 0 | 0 |
attr |
SuperGlueForKeypointMatching.gnn |
1 | 0 | 0 |
attr |
SuperGlueForKeypointMatching.final_projection |
1 | 0 | 0 |
transformers.models.superpoint.configuration_superpoint (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SuperPointConfig.init |
11 | 8 | 0 |
attr |
SuperPointConfig.encoder_hidden_sizes |
1 | 0 | 0 |
attr |
SuperPointConfig.decoder_hidden_size |
1 | 0 | 0 |
attr |
SuperPointConfig.keypoint_decoder_dim |
1 | 0 | 0 |
attr |
SuperPointConfig.descriptor_decoder_dim |
1 | 0 | 0 |
attr |
SuperPointConfig.keypoint_threshold |
1 | 0 | 0 |
attr |
SuperPointConfig.max_keypoints |
1 | 0 | 0 |
attr |
SuperPointConfig.nms_radius |
1 | 0 | 0 |
attr |
SuperPointConfig.border_removal_distance |
1 | 0 | 0 |
attr |
SuperPointConfig.initializer_range |
1 | 0 | 0 |
transformers.models.superpoint.image_processing_superpoint (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SuperPointImageProcessor.init |
8 | 7 | 0 |
meth |
SuperPointImageProcessor.resize |
6 | 4 | 0 |
meth |
SuperPointImageProcessor.preprocess |
12 | 10 | 0 |
attr |
SuperPointImageProcessor.do_resize |
1 | 0 | 0 |
attr |
SuperPointImageProcessor.size |
1 | 0 | 0 |
attr |
SuperPointImageProcessor.resample |
1 | 0 | 0 |
attr |
SuperPointImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
SuperPointImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
SuperPointImageProcessor.do_grayscale |
1 | 0 | 0 |
transformers.models.superpoint.image_processing_superpoint_fast (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SuperPointImageProcessorFast.init |
2 | 1 | 0 |
meth |
SuperPointImageProcessorFast._preprocess |
11 | 10 | 0 |
transformers.models.superpoint.modeling_superpoint (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SuperPointForKeypointDetection.forward |
6 | 5 | 0 |
attr |
SuperPointForKeypointDetection.encoder |
1 | 0 | 0 |
attr |
SuperPointForKeypointDetection.keypoint_decoder |
1 | 0 | 0 |
attr |
SuperPointForKeypointDetection.descriptor_decoder |
1 | 0 | 0 |
transformers.models.swiftformer.configuration_swiftformer (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SwiftFormerConfig.init |
18 | 0 | 0 |
attr |
SwiftFormerConfig.image_size |
1 | 0 | 0 |
attr |
SwiftFormerConfig.num_channels |
1 | 0 | 0 |
attr |
SwiftFormerConfig.depths |
1 | 0 | 0 |
attr |
SwiftFormerConfig.embed_dims |
1 | 0 | 0 |
attr |
SwiftFormerConfig.mlp_ratio |
1 | 0 | 0 |
attr |
SwiftFormerConfig.downsamples |
1 | 0 | 0 |
attr |
SwiftFormerConfig.hidden_act |
1 | 0 | 0 |
attr |
SwiftFormerConfig.down_patch_size |
1 | 0 | 0 |
attr |
SwiftFormerConfig.down_stride |
1 | 0 | 0 |
attr |
SwiftFormerConfig.down_pad |
1 | 0 | 0 |
attr |
SwiftFormerConfig.drop_path_rate |
1 | 0 | 0 |
attr |
SwiftFormerConfig.drop_mlp_rate |
1 | 0 | 0 |
attr |
SwiftFormerConfig.drop_conv_encoder_rate |
1 | 0 | 0 |
attr |
SwiftFormerConfig.use_layer_scale |
1 | 0 | 0 |
attr |
SwiftFormerConfig.layer_scale_init_value |
1 | 0 | 0 |
attr |
SwiftFormerConfig.batch_norm_eps |
1 | 0 | 0 |
transformers.models.swiftformer.modeling_swiftformer (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SwiftFormerForImageClassification.forward |
6 | 5 | 0 |
attr |
SwiftFormerForImageClassification.num_labels |
1 | 0 | 0 |
attr |
SwiftFormerForImageClassification.swiftformer |
1 | 0 | 0 |
attr |
SwiftFormerForImageClassification.norm |
1 | 0 | 0 |
attr |
SwiftFormerForImageClassification.head |
1 | 0 | 0 |
attr |
SwiftFormerForImageClassification.dist_head |
1 | 0 | 0 |
meth |
SwiftFormerModel.init |
2 | 1 | 0 |
meth |
SwiftFormerModel.forward |
5 | 4 | 0 |
attr |
SwiftFormerModel.patch_embed |
1 | 0 | 0 |
attr |
SwiftFormerModel.encoder |
1 | 0 | 0 |
transformers.models.swin.configuration_swin (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SwinConfig.init |
21 | 0 | 0 |
attr |
SwinConfig.image_size |
1 | 0 | 0 |
attr |
SwinConfig.patch_size |
1 | 0 | 0 |
attr |
SwinConfig.num_channels |
1 | 0 | 0 |
attr |
SwinConfig.embed_dim |
1 | 0 | 0 |
attr |
SwinConfig.depths |
1 | 0 | 0 |
attr |
SwinConfig.num_layers |
1 | 0 | 0 |
attr |
SwinConfig.num_heads |
1 | 0 | 0 |
attr |
SwinConfig.window_size |
1 | 0 | 0 |
attr |
SwinConfig.mlp_ratio |
1 | 0 | 0 |
attr |
SwinConfig.qkv_bias |
1 | 0 | 0 |
attr |
SwinConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
SwinConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
SwinConfig.drop_path_rate |
1 | 0 | 0 |
attr |
SwinConfig.hidden_act |
1 | 0 | 0 |
attr |
SwinConfig.use_absolute_embeddings |
1 | 0 | 0 |
attr |
SwinConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
SwinConfig.initializer_range |
1 | 0 | 0 |
attr |
SwinConfig.encoder_stride |
1 | 0 | 0 |
attr |
SwinConfig.hidden_size |
1 | 0 | 0 |
attr |
SwinConfig.stage_names |
1 | 0 | 0 |
transformers.models.swin.modeling_swin (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SwinForMaskedImageModeling.init |
2 | 0 | 0 |
meth |
SwinForMaskedImageModeling.forward |
8 | 7 | 0 |
attr |
SwinForMaskedImageModeling.swin |
1 | 0 | 0 |
attr |
SwinForMaskedImageModeling.decoder |
1 | 0 | 0 |
meth |
SwinModel.init |
4 | 0 | 0 |
meth |
SwinModel.get_input_embeddings |
1 | 0 | 0 |
meth |
SwinModel.forward |
8 | 7 | 0 |
attr |
SwinModel.num_layers |
1 | 0 | 0 |
attr |
SwinModel.num_features |
1 | 0 | 0 |
attr |
SwinModel.embeddings |
1 | 0 | 0 |
attr |
SwinModel.encoder |
1 | 0 | 0 |
attr |
SwinModel.layernorm |
1 | 0 | 0 |
attr |
SwinModel.pooler |
1 | 0 | 0 |
meth |
SwinPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
SwinBackbone.init |
2 | 1 | 0 |
meth |
SwinBackbone.get_input_embeddings |
1 | 0 | 0 |
meth |
SwinBackbone.forward |
6 | 5 | 0 |
attr |
SwinBackbone.num_features |
1 | 0 | 0 |
attr |
SwinBackbone.embeddings |
1 | 0 | 0 |
attr |
SwinBackbone.encoder |
1 | 0 | 0 |
attr |
SwinBackbone.hidden_states_norms |
1 | 0 | 0 |
meth |
SwinForImageClassification.init |
2 | 0 | 0 |
meth |
SwinForImageClassification.forward |
8 | 7 | 0 |
attr |
SwinForImageClassification.num_labels |
1 | 0 | 0 |
attr |
SwinForImageClassification.swin |
1 | 0 | 0 |
attr |
SwinForImageClassification.classifier |
1 | 0 | 0 |
transformers.models.swin2sr.configuration_swin2sr (45 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Swin2SRConfig.init |
23 | 0 | 0 |
attr |
Swin2SRConfig.image_size |
1 | 0 | 0 |
attr |
Swin2SRConfig.patch_size |
1 | 0 | 0 |
attr |
Swin2SRConfig.num_channels |
1 | 0 | 0 |
attr |
Swin2SRConfig.num_channels_out |
1 | 0 | 0 |
attr |
Swin2SRConfig.embed_dim |
1 | 0 | 0 |
attr |
Swin2SRConfig.depths |
1 | 0 | 0 |
attr |
Swin2SRConfig.num_layers |
1 | 0 | 0 |
attr |
Swin2SRConfig.num_heads |
1 | 0 | 0 |
attr |
Swin2SRConfig.window_size |
1 | 0 | 0 |
attr |
Swin2SRConfig.mlp_ratio |
1 | 0 | 0 |
attr |
Swin2SRConfig.qkv_bias |
1 | 0 | 0 |
attr |
Swin2SRConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
Swin2SRConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
Swin2SRConfig.drop_path_rate |
1 | 0 | 0 |
attr |
Swin2SRConfig.hidden_act |
1 | 0 | 0 |
attr |
Swin2SRConfig.use_absolute_embeddings |
1 | 0 | 0 |
attr |
Swin2SRConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Swin2SRConfig.initializer_range |
1 | 0 | 0 |
attr |
Swin2SRConfig.upscale |
1 | 0 | 0 |
attr |
Swin2SRConfig.img_range |
1 | 0 | 0 |
attr |
Swin2SRConfig.resi_connection |
1 | 0 | 0 |
attr |
Swin2SRConfig.upsampler |
1 | 0 | 0 |
transformers.models.swin2sr.image_processing_swin2sr (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Swin2SRImageProcessor.init |
6 | 5 | 0 |
meth |
Swin2SRImageProcessor.pad |
5 | 4 | 0 |
meth |
Swin2SRImageProcessor.preprocess |
9 | 8 | 0 |
attr |
Swin2SRImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
Swin2SRImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
Swin2SRImageProcessor.do_pad |
1 | 0 | 0 |
attr |
Swin2SRImageProcessor.size_divisor |
1 | 0 | 0 |
transformers.models.swin2sr.image_processing_swin2sr_fast (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Swin2SRImageProcessorFast.init |
2 | 1 | 0 |
meth |
Swin2SRImageProcessorFast._preprocess |
9 | 8 | 0 |
transformers.models.swin2sr.modeling_swin2sr (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Swin2SRModel.init |
2 | 0 | 0 |
meth |
Swin2SRModel.get_input_embeddings |
1 | 0 | 0 |
meth |
Swin2SRModel.pad_and_normalize |
2 | 0 | 0 |
meth |
Swin2SRModel.forward |
6 | 5 | 0 |
attr |
Swin2SRModel.img_range |
1 | 0 | 0 |
attr |
Swin2SRModel.first_convolution |
1 | 0 | 0 |
attr |
Swin2SRModel.embeddings |
1 | 0 | 0 |
attr |
Swin2SRModel.encoder |
1 | 0 | 0 |
attr |
Swin2SRModel.layernorm |
1 | 0 | 0 |
attr |
Swin2SRModel.patch_unembed |
1 | 0 | 0 |
attr |
Swin2SRModel.conv_after_body |
1 | 0 | 0 |
meth |
Swin2SRPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Swin2SRForImageSuperResolution.init |
2 | 0 | 0 |
meth |
Swin2SRForImageSuperResolution.forward |
7 | 6 | 0 |
attr |
Swin2SRForImageSuperResolution.swin2sr |
1 | 0 | 0 |
attr |
Swin2SRForImageSuperResolution.upsampler |
1 | 0 | 0 |
attr |
Swin2SRForImageSuperResolution.upscale |
1 | 0 | 0 |
attr |
Swin2SRForImageSuperResolution.upsample |
1 | 0 | 0 |
attr |
Swin2SRForImageSuperResolution.final_convolution |
1 | 0 | 0 |
transformers.models.swinv2.configuration_swinv2 (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Swinv2Config.init |
22 | 0 | 0 |
attr |
Swinv2Config.image_size |
1 | 0 | 0 |
attr |
Swinv2Config.patch_size |
1 | 0 | 0 |
attr |
Swinv2Config.num_channels |
1 | 0 | 0 |
attr |
Swinv2Config.embed_dim |
1 | 0 | 0 |
attr |
Swinv2Config.depths |
1 | 0 | 0 |
attr |
Swinv2Config.num_layers |
1 | 0 | 0 |
attr |
Swinv2Config.num_heads |
1 | 0 | 0 |
attr |
Swinv2Config.window_size |
1 | 0 | 0 |
attr |
Swinv2Config.pretrained_window_sizes |
1 | 0 | 0 |
attr |
Swinv2Config.mlp_ratio |
1 | 0 | 0 |
attr |
Swinv2Config.qkv_bias |
1 | 0 | 0 |
attr |
Swinv2Config.hidden_dropout_prob |
1 | 0 | 0 |
attr |
Swinv2Config.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
Swinv2Config.drop_path_rate |
1 | 0 | 0 |
attr |
Swinv2Config.hidden_act |
1 | 0 | 0 |
attr |
Swinv2Config.use_absolute_embeddings |
1 | 0 | 0 |
attr |
Swinv2Config.layer_norm_eps |
1 | 0 | 0 |
attr |
Swinv2Config.initializer_range |
1 | 0 | 0 |
attr |
Swinv2Config.encoder_stride |
1 | 0 | 0 |
attr |
Swinv2Config.stage_names |
1 | 0 | 0 |
attr |
Swinv2Config.hidden_size |
1 | 0 | 0 |
transformers.models.swinv2.modeling_swinv2 (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Swinv2Backbone.init |
2 | 0 | 0 |
meth |
Swinv2Backbone.get_input_embeddings |
1 | 0 | 0 |
meth |
Swinv2Backbone.forward |
6 | 5 | 0 |
attr |
Swinv2Backbone.num_features |
1 | 0 | 0 |
attr |
Swinv2Backbone.embeddings |
1 | 0 | 0 |
attr |
Swinv2Backbone.encoder |
1 | 0 | 0 |
meth |
Swinv2ForMaskedImageModeling.init |
2 | 0 | 0 |
meth |
Swinv2ForMaskedImageModeling.forward |
8 | 7 | 0 |
attr |
Swinv2ForMaskedImageModeling.swinv2 |
1 | 0 | 0 |
attr |
Swinv2ForMaskedImageModeling.decoder |
1 | 0 | 0 |
meth |
Swinv2Model.init |
4 | 0 | 0 |
meth |
Swinv2Model.get_input_embeddings |
1 | 0 | 0 |
meth |
Swinv2Model.forward |
8 | 7 | 0 |
attr |
Swinv2Model.num_layers |
1 | 0 | 0 |
attr |
Swinv2Model.num_features |
1 | 0 | 0 |
attr |
Swinv2Model.embeddings |
1 | 0 | 0 |
attr |
Swinv2Model.encoder |
1 | 0 | 0 |
attr |
Swinv2Model.layernorm |
1 | 0 | 0 |
attr |
Swinv2Model.pooler |
1 | 0 | 0 |
meth |
Swinv2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Swinv2ForImageClassification.init |
2 | 0 | 0 |
meth |
Swinv2ForImageClassification.forward |
8 | 7 | 0 |
attr |
Swinv2ForImageClassification.num_labels |
1 | 0 | 0 |
attr |
Swinv2ForImageClassification.swinv2 |
1 | 0 | 0 |
attr |
Swinv2ForImageClassification.classifier |
1 | 0 | 0 |
transformers.models.switch_transformers.configuration_switch_transformers (67 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SwitchTransformersConfig.init |
34 | 0 | 0 |
attr |
SwitchTransformersConfig.is_decoder |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.add_cross_attention |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.vocab_size |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.d_model |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.d_kv |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.d_ff |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.num_sparse_encoder_layers |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.num_layers |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.num_decoder_layers |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.num_sparse_decoder_layers |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.num_heads |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.num_experts |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.expert_capacity |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.router_bias |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.router_jitter_noise |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.router_dtype |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.router_ignore_padding_tokens |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.relative_attention_num_buckets |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.relative_attention_max_distance |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.dropout_rate |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.initializer_factor |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.use_cache |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.add_router_probs |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.router_z_loss_coef |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.router_aux_loss_coef |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.dense_act_fn |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.pad_token_id |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.bos_token_id |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.eos_token_id |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.encoder_sparse_step |
1 | 0 | 0 |
attr |
SwitchTransformersConfig.decoder_sparse_step |
1 | 0 | 0 |
transformers.models.switch_transformers.modeling_switch_transformers (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SwitchTransformersEncoderModel.init |
2 | 1 | 0 |
meth |
SwitchTransformersEncoderModel.get_input_embeddings |
1 | 0 | 0 |
meth |
SwitchTransformersEncoderModel.set_input_embeddings |
2 | 0 | 0 |
attr |
SwitchTransformersEncoderModel.shared |
1 | 0 | 0 |
attr |
SwitchTransformersEncoderModel.encoder |
1 | 0 | 0 |
meth |
SwitchTransformersPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
SwitchTransformersPreTrainedModel._shift_right |
2 | 0 | 0 |
meth |
SwitchTransformersTop1Router.init |
2 | 1 | 0 |
attr |
SwitchTransformersTop1Router.num_experts |
1 | 0 | 0 |
attr |
SwitchTransformersTop1Router.expert_capacity |
1 | 0 | 0 |
attr |
SwitchTransformersTop1Router.classifier |
1 | 0 | 0 |
attr |
SwitchTransformersTop1Router.jitter_noise |
1 | 0 | 0 |
attr |
SwitchTransformersTop1Router.ignore_padding_tokens |
1 | 0 | 0 |
attr |
SwitchTransformersTop1Router.dtype |
1 | 0 | 0 |
meth |
SwitchTransformersForConditionalGeneration.init |
2 | 1 | 0 |
meth |
SwitchTransformersForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
SwitchTransformersForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
SwitchTransformersForConditionalGeneration._unpack_router_logits |
2 | 0 | 0 |
meth |
SwitchTransformersForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
SwitchTransformersForConditionalGeneration.model_dim |
1 | 0 | 0 |
attr |
SwitchTransformersForConditionalGeneration.shared |
1 | 0 | 0 |
attr |
SwitchTransformersForConditionalGeneration.encoder |
1 | 0 | 0 |
attr |
SwitchTransformersForConditionalGeneration.decoder |
1 | 0 | 0 |
attr |
SwitchTransformersForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
SwitchTransformersForConditionalGeneration.router_z_loss_coef |
1 | 0 | 0 |
attr |
SwitchTransformersForConditionalGeneration.router_aux_loss_coef |
1 | 0 | 0 |
meth |
SwitchTransformersModel.init |
2 | 1 | 0 |
meth |
SwitchTransformersModel.set_input_embeddings |
2 | 0 | 0 |
attr |
SwitchTransformersModel.shared |
1 | 0 | 0 |
attr |
SwitchTransformersModel.encoder |
1 | 0 | 0 |
attr |
SwitchTransformersModel.decoder |
1 | 0 | 0 |
meth |
SwitchTransformersSparseMLP.init |
2 | 1 | 0 |
attr |
SwitchTransformersSparseMLP.router |
1 | 0 | 0 |
attr |
SwitchTransformersSparseMLP.experts |
1 | 0 | 0 |
transformers.models.switch_transformers.modular_switch_transformers (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SwitchTransformersEncoderModel.init |
2 | 1 | 0 |
meth |
SwitchTransformersEncoderModel.get_input_embeddings |
1 | 0 | 0 |
meth |
SwitchTransformersEncoderModel.set_input_embeddings |
2 | 0 | 0 |
attr |
SwitchTransformersEncoderModel.shared |
1 | 0 | 0 |
attr |
SwitchTransformersEncoderModel.encoder |
1 | 0 | 0 |
meth |
SwitchTransformersPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
SwitchTransformersPreTrainedModel._shift_right |
2 | 0 | 0 |
meth |
SwitchTransformersTop1Router.init |
2 | 1 | 0 |
attr |
SwitchTransformersTop1Router.num_experts |
1 | 0 | 0 |
attr |
SwitchTransformersTop1Router.expert_capacity |
1 | 0 | 0 |
attr |
SwitchTransformersTop1Router.classifier |
1 | 0 | 0 |
attr |
SwitchTransformersTop1Router.jitter_noise |
1 | 0 | 0 |
attr |
SwitchTransformersTop1Router.ignore_padding_tokens |
1 | 0 | 0 |
attr |
SwitchTransformersTop1Router.dtype |
1 | 0 | 0 |
meth |
SwitchTransformersForConditionalGeneration.init |
2 | 1 | 0 |
meth |
SwitchTransformersForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
SwitchTransformersForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
SwitchTransformersForConditionalGeneration._unpack_router_logits |
2 | 0 | 0 |
meth |
SwitchTransformersForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
SwitchTransformersForConditionalGeneration.model_dim |
1 | 0 | 0 |
attr |
SwitchTransformersForConditionalGeneration.shared |
1 | 0 | 0 |
attr |
SwitchTransformersForConditionalGeneration.encoder |
1 | 0 | 0 |
attr |
SwitchTransformersForConditionalGeneration.decoder |
1 | 0 | 0 |
attr |
SwitchTransformersForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
SwitchTransformersForConditionalGeneration.router_z_loss_coef |
1 | 0 | 0 |
attr |
SwitchTransformersForConditionalGeneration.router_aux_loss_coef |
1 | 0 | 0 |
meth |
SwitchTransformersModel.init |
2 | 1 | 0 |
meth |
SwitchTransformersModel.set_input_embeddings |
2 | 0 | 0 |
attr |
SwitchTransformersModel.shared |
1 | 0 | 0 |
attr |
SwitchTransformersModel.encoder |
1 | 0 | 0 |
attr |
SwitchTransformersModel.decoder |
1 | 0 | 0 |
meth |
SwitchTransformersSparseMLP.init |
2 | 1 | 0 |
attr |
SwitchTransformersSparseMLP.router |
1 | 0 | 0 |
attr |
SwitchTransformersSparseMLP.experts |
1 | 0 | 0 |
transformers.models.t5.configuration_t5 (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
T5Config.init |
22 | 0 | 0 |
attr |
T5Config.is_decoder |
1 | 0 | 0 |
attr |
T5Config.vocab_size |
1 | 0 | 0 |
attr |
T5Config.d_model |
1 | 0 | 0 |
attr |
T5Config.d_kv |
1 | 0 | 0 |
attr |
T5Config.d_ff |
1 | 0 | 0 |
attr |
T5Config.num_layers |
1 | 0 | 0 |
attr |
T5Config.num_decoder_layers |
1 | 0 | 0 |
attr |
T5Config.num_heads |
1 | 0 | 0 |
attr |
T5Config.relative_attention_num_buckets |
1 | 0 | 0 |
attr |
T5Config.relative_attention_max_distance |
1 | 0 | 0 |
attr |
T5Config.dropout_rate |
1 | 0 | 0 |
attr |
T5Config.classifier_dropout |
1 | 0 | 0 |
attr |
T5Config.layer_norm_epsilon |
1 | 0 | 0 |
attr |
T5Config.initializer_factor |
1 | 0 | 0 |
attr |
T5Config.feed_forward_proj |
1 | 0 | 0 |
attr |
T5Config.use_cache |
1 | 0 | 0 |
attr |
T5Config.pad_token_id |
1 | 0 | 0 |
attr |
T5Config.eos_token_id |
1 | 0 | 0 |
attr |
T5Config.dense_act_fn |
1 | 0 | 0 |
attr |
T5Config.is_gated_act |
1 | 0 | 0 |
attr |
T5Config.scale_decoder_outputs |
1 | 0 | 0 |
attr |
T5Config.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.t5.modeling_t5 (52 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
T5ForSequenceClassification.init |
2 | 1 | 0 |
meth |
T5ForSequenceClassification.forward |
14 | 13 | 0 |
attr |
T5ForSequenceClassification.transformer |
1 | 0 | 0 |
attr |
T5ForSequenceClassification.classification_head |
1 | 0 | 0 |
meth |
T5ForTokenClassification.init |
2 | 1 | 0 |
meth |
T5ForTokenClassification.forward |
9 | 8 | 0 |
attr |
T5ForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
T5ForTokenClassification.transformer |
1 | 0 | 0 |
attr |
T5ForTokenClassification.dropout |
1 | 0 | 0 |
attr |
T5ForTokenClassification.classifier |
1 | 0 | 0 |
meth |
T5ForConditionalGeneration.init |
2 | 1 | 0 |
meth |
T5ForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
T5ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
T5ForConditionalGeneration.forward |
16 | 15 | 0 |
meth |
T5ForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
T5ForConditionalGeneration.model_dim |
1 | 0 | 0 |
attr |
T5ForConditionalGeneration.shared |
1 | 0 | 0 |
attr |
T5ForConditionalGeneration.encoder |
1 | 0 | 0 |
attr |
T5ForConditionalGeneration.decoder |
1 | 0 | 0 |
attr |
T5ForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
T5ForQuestionAnswering.init |
2 | 1 | 0 |
meth |
T5ForQuestionAnswering.get_input_embeddings |
1 | 0 | 0 |
meth |
T5ForQuestionAnswering.set_input_embeddings |
2 | 0 | 0 |
meth |
T5ForQuestionAnswering.forward |
15 | 14 | 0 |
attr |
T5ForQuestionAnswering.model_dim |
1 | 0 | 0 |
attr |
T5ForQuestionAnswering.shared |
1 | 0 | 0 |
attr |
T5ForQuestionAnswering.encoder |
1 | 0 | 0 |
attr |
T5ForQuestionAnswering.decoder |
1 | 0 | 0 |
attr |
T5ForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
T5ForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
T5PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
T5PreTrainedModel._shift_right |
2 | 0 | 0 |
prop |
T5PreTrainedModel.dummy_inputs |
1 | 0 | 0 |
meth |
T5Model.init |
2 | 1 | 0 |
meth |
T5Model.get_input_embeddings |
1 | 0 | 0 |
meth |
T5Model.set_input_embeddings |
2 | 0 | 0 |
meth |
T5Model.forward |
15 | 14 | 0 |
attr |
T5Model.shared |
1 | 0 | 0 |
attr |
T5Model.encoder |
1 | 0 | 0 |
attr |
T5Model.decoder |
1 | 0 | 0 |
meth |
T5EncoderModel.init |
2 | 1 | 0 |
meth |
T5EncoderModel.get_input_embeddings |
1 | 0 | 0 |
meth |
T5EncoderModel.set_input_embeddings |
2 | 0 | 0 |
meth |
T5EncoderModel.forward |
8 | 7 | 0 |
attr |
T5EncoderModel.shared |
1 | 0 | 0 |
attr |
T5EncoderModel.encoder |
1 | 0 | 0 |
transformers.models.t5.tokenization_t5 (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
T5Tokenizer.init |
9 | 1 | 0 |
meth |
T5Tokenizer.get_sentinel_tokens |
1 | 0 | 0 |
meth |
T5Tokenizer.get_sentinel_token_ids |
1 | 0 | 0 |
transformers.models.t5gemma.configuration_t5gemma (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
T5GemmaModuleConfig.init |
27 | 25 | 0 |
attr |
T5GemmaModuleConfig.is_decoder |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.pad_token_id |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.bos_token_id |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.eos_token_id |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.vocab_size |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.hidden_size |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.intermediate_size |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.num_attention_heads |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.head_dim |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.initializer_range |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.use_cache |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.attention_bias |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.attention_dropout |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.hidden_activation |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.query_pre_attn_scalar |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.sliding_window |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.final_logit_softcapping |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.attn_logit_softcapping |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.layer_types |
1 | 0 | 0 |
attr |
T5GemmaModuleConfig.rope_parameters |
1 | 0 | 0 |
meth |
T5GemmaConfig.init |
10 | 8 | 0 |
attr |
T5GemmaConfig.encoder |
1 | 0 | 0 |
attr |
T5GemmaConfig.decoder |
1 | 0 | 0 |
attr |
T5GemmaConfig.is_encoder_decoder |
1 | 0 | 0 |
attr |
T5GemmaConfig.initializer_range |
1 | 0 | 0 |
attr |
T5GemmaConfig.classifier_dropout_rate |
1 | 0 | 0 |
attr |
T5GemmaConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
T5GemmaConfig.vocab_size |
1 | 0 | 0 |
transformers.models.t5gemma.modeling_t5gemma (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
T5GemmaForSequenceClassification.init |
3 | 2 | 0 |
meth |
T5GemmaForSequenceClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
T5GemmaForSequenceClassification.set_input_embeddings |
2 | 0 | 0 |
attr |
T5GemmaForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
T5GemmaForSequenceClassification.score |
1 | 0 | 0 |
attr |
T5GemmaForSequenceClassification.model |
1 | 0 | 0 |
meth |
T5GemmaModel.init |
2 | 1 | 0 |
meth |
T5GemmaModel.get_input_embeddings |
1 | 0 | 0 |
meth |
T5GemmaModel.set_input_embeddings |
2 | 0 | 0 |
attr |
T5GemmaModel.encoder |
1 | 0 | 0 |
attr |
T5GemmaModel.decoder |
1 | 0 | 0 |
meth |
T5GemmaForConditionalGeneration.init |
2 | 1 | 0 |
meth |
T5GemmaForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
T5GemmaForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
T5GemmaForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
T5GemmaForConditionalGeneration.model |
1 | 0 | 0 |
attr |
T5GemmaForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
T5GemmaForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
T5GemmaForConditionalGeneration.loss_type |
1 | 0 | 0 |
meth |
T5GemmaEncoderModel.init |
2 | 1 | 0 |
meth |
T5GemmaEncoderModel.get_input_embeddings |
1 | 0 | 0 |
meth |
T5GemmaEncoderModel.set_input_embeddings |
2 | 0 | 0 |
attr |
T5GemmaEncoderModel.encoder |
1 | 0 | 0 |
meth |
T5GemmaForTokenClassification.init |
3 | 2 | 0 |
meth |
T5GemmaForTokenClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
T5GemmaForTokenClassification.set_input_embeddings |
2 | 0 | 0 |
attr |
T5GemmaForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
T5GemmaForTokenClassification.score |
1 | 0 | 0 |
attr |
T5GemmaForTokenClassification.model |
1 | 0 | 0 |
meth |
T5GemmaPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
T5GemmaPreTrainedModel._shift_right |
2 | 0 | 0 |
attr |
T5GemmaPreTrainedModel._can_record_outputs |
1 | 0 | 0 |
transformers.models.t5gemma.modular_t5gemma (51 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
T5GemmaModuleConfig.init |
27 | 25 | 0 |
attr |
T5GemmaModuleConfig.is_decoder |
1 | 0 | 0 |
meth |
T5GemmaForSequenceClassification.init |
3 | 2 | 0 |
meth |
T5GemmaForSequenceClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
T5GemmaForSequenceClassification.set_input_embeddings |
2 | 0 | 0 |
attr |
T5GemmaForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
T5GemmaForSequenceClassification.score |
1 | 0 | 0 |
attr |
T5GemmaForSequenceClassification.model |
1 | 0 | 0 |
meth |
T5GemmaEncoderModel.init |
2 | 1 | 0 |
meth |
T5GemmaEncoderModel.get_input_embeddings |
1 | 0 | 0 |
meth |
T5GemmaEncoderModel.set_input_embeddings |
2 | 0 | 0 |
attr |
T5GemmaEncoderModel.encoder |
1 | 0 | 0 |
meth |
T5GemmaConfig.init |
10 | 8 | 0 |
attr |
T5GemmaConfig.encoder |
1 | 0 | 0 |
attr |
T5GemmaConfig.decoder |
1 | 0 | 0 |
attr |
T5GemmaConfig.is_encoder_decoder |
1 | 0 | 0 |
attr |
T5GemmaConfig.initializer_range |
1 | 0 | 0 |
attr |
T5GemmaConfig.classifier_dropout_rate |
1 | 0 | 0 |
attr |
T5GemmaConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
T5GemmaConfig.vocab_size |
1 | 0 | 0 |
meth |
T5GemmaModel.init |
2 | 1 | 0 |
meth |
T5GemmaModel.get_input_embeddings |
1 | 0 | 0 |
meth |
T5GemmaModel.set_input_embeddings |
2 | 0 | 0 |
attr |
T5GemmaModel.encoder |
1 | 0 | 0 |
attr |
T5GemmaModel.decoder |
1 | 0 | 0 |
meth |
T5GemmaForConditionalGeneration.init |
2 | 1 | 0 |
meth |
T5GemmaForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
T5GemmaForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
T5GemmaForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
T5GemmaForConditionalGeneration.model |
1 | 0 | 0 |
attr |
T5GemmaForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
T5GemmaForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
T5GemmaForConditionalGeneration.loss_type |
1 | 0 | 0 |
meth |
T5GemmaForTokenClassification.init |
3 | 2 | 0 |
meth |
T5GemmaForTokenClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
T5GemmaForTokenClassification.set_input_embeddings |
2 | 0 | 0 |
attr |
T5GemmaForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
T5GemmaForTokenClassification.score |
1 | 0 | 0 |
attr |
T5GemmaForTokenClassification.model |
1 | 0 | 0 |
meth |
T5GemmaPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
T5GemmaPreTrainedModel._shift_right |
2 | 0 | 0 |
attr |
T5GemmaPreTrainedModel._can_record_outputs |
1 | 0 | 0 |
transformers.models.t5gemma2.configuration_t5gemma2 (75 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
T5Gemma2EncoderConfig.init |
10 | 8 | 0 |
attr |
T5Gemma2EncoderConfig.text_config |
1 | 0 | 0 |
attr |
T5Gemma2EncoderConfig.vision_config |
1 | 0 | 0 |
attr |
T5Gemma2EncoderConfig.mm_tokens_per_image |
1 | 0 | 0 |
attr |
T5Gemma2EncoderConfig.boi_token_index |
1 | 0 | 0 |
attr |
T5Gemma2EncoderConfig.eoi_token_index |
1 | 0 | 0 |
attr |
T5Gemma2EncoderConfig.image_token_index |
1 | 0 | 0 |
attr |
T5Gemma2EncoderConfig.initializer_range |
1 | 0 | 0 |
attr |
T5Gemma2EncoderConfig.tie_word_embeddings |
1 | 0 | 0 |
meth |
T5Gemma2DecoderConfig.init |
25 | 23 | 0 |
meth |
T5Gemma2DecoderConfig.convert_rope_params_to_dict |
3 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.pad_token_id |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.bos_token_id |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.eos_token_id |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.vocab_size |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.hidden_size |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.head_dim |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.initializer_range |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.use_cache |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.attention_bias |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.attention_dropout |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.hidden_activation |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.query_pre_attn_scalar |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.sliding_window |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.final_logit_softcapping |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.attn_logit_softcapping |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.layer_types |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.rope_parameters |
1 | 0 | 0 |
meth |
T5Gemma2Config.init |
11 | 9 | 0 |
attr |
T5Gemma2Config.encoder |
1 | 0 | 0 |
attr |
T5Gemma2Config.decoder |
1 | 0 | 0 |
attr |
T5Gemma2Config.classifier_dropout_rate |
1 | 0 | 0 |
attr |
T5Gemma2Config.initializer_range |
1 | 0 | 0 |
attr |
T5Gemma2Config.eoi_token_index |
1 | 0 | 0 |
attr |
T5Gemma2Config.image_token_index |
1 | 0 | 0 |
attr |
T5Gemma2Config.tie_word_embeddings |
1 | 0 | 0 |
meth |
T5Gemma2TextConfig.init |
25 | 23 | 0 |
meth |
T5Gemma2TextConfig.convert_rope_params_to_dict |
3 | 0 | 0 |
attr |
T5Gemma2TextConfig.pad_token_id |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.bos_token_id |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.eos_token_id |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.vocab_size |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.hidden_size |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.intermediate_size |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.head_dim |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.initializer_range |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.use_cache |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.attention_bias |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.attention_dropout |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.hidden_activation |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.query_pre_attn_scalar |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.sliding_window |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.final_logit_softcapping |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.attn_logit_softcapping |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.layer_types |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.t5gemma2.modeling_t5gemma2 (51 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
T5Gemma2ForConditionalGeneration.init |
2 | 1 | 0 |
meth |
T5Gemma2ForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
T5Gemma2ForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
T5Gemma2ForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
T5Gemma2ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
T5Gemma2ForConditionalGeneration.get_encoder |
1 | 0 | 0 |
meth |
T5Gemma2ForConditionalGeneration.get_decoder |
1 | 0 | 0 |
prop |
T5Gemma2ForConditionalGeneration.vision_tower |
1 | 0 | 0 |
attr |
T5Gemma2ForConditionalGeneration.model |
1 | 0 | 0 |
attr |
T5Gemma2ForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
T5Gemma2ForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
T5Gemma2ForConditionalGeneration.loss_type |
1 | 0 | 0 |
meth |
T5Gemma2ForTokenClassification.init |
2 | 1 | 0 |
meth |
T5Gemma2ForTokenClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
T5Gemma2ForTokenClassification.set_input_embeddings |
2 | 0 | 0 |
attr |
T5Gemma2ForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
T5Gemma2ForTokenClassification.hidden_size |
1 | 0 | 0 |
attr |
T5Gemma2ForTokenClassification.model |
1 | 0 | 0 |
attr |
T5Gemma2ForTokenClassification.score |
1 | 0 | 0 |
meth |
T5Gemma2Model.init |
2 | 1 | 0 |
meth |
T5Gemma2Model.get_encoder |
1 | 0 | 0 |
meth |
T5Gemma2Model.get_decoder |
1 | 0 | 0 |
meth |
T5Gemma2Model.get_input_embeddings |
1 | 0 | 0 |
meth |
T5Gemma2Model.set_input_embeddings |
2 | 0 | 0 |
attr |
T5Gemma2Model.encoder |
1 | 0 | 0 |
attr |
T5Gemma2Model.decoder |
1 | 0 | 0 |
meth |
T5Gemma2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
T5Gemma2PreTrainedModel.prepare_decoder_input_ids_from_labels |
2 | 0 | 0 |
attr |
T5Gemma2PreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
T5Gemma2ForSequenceClassification.init |
2 | 1 | 0 |
meth |
T5Gemma2ForSequenceClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
T5Gemma2ForSequenceClassification.set_input_embeddings |
2 | 0 | 0 |
attr |
T5Gemma2ForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
T5Gemma2ForSequenceClassification.hidden_size |
1 | 0 | 0 |
attr |
T5Gemma2ForSequenceClassification.model |
1 | 0 | 0 |
attr |
T5Gemma2ForSequenceClassification.score |
1 | 0 | 0 |
meth |
T5Gemma2Encoder.init |
3 | 2 | 0 |
meth |
T5Gemma2Encoder.get_input_embeddings |
1 | 0 | 0 |
meth |
T5Gemma2Encoder.set_input_embeddings |
2 | 0 | 0 |
meth |
T5Gemma2Encoder.get_image_placeholder_mask |
4 | 3 | 0 |
attr |
T5Gemma2Encoder.text_model |
1 | 0 | 0 |
attr |
T5Gemma2Encoder.vision_tower |
1 | 0 | 0 |
attr |
T5Gemma2Encoder.multi_modal_projector |
1 | 0 | 0 |
transformers.models.t5gemma2.modular_t5gemma2 (110 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
T5Gemma2ForConditionalGeneration.init |
2 | 1 | 0 |
meth |
T5Gemma2ForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
T5Gemma2ForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
T5Gemma2ForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
T5Gemma2ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
T5Gemma2ForConditionalGeneration.get_encoder |
1 | 0 | 0 |
meth |
T5Gemma2ForConditionalGeneration.get_decoder |
1 | 0 | 0 |
prop |
T5Gemma2ForConditionalGeneration.vision_tower |
1 | 0 | 0 |
attr |
T5Gemma2ForConditionalGeneration.model |
1 | 0 | 0 |
attr |
T5Gemma2ForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
T5Gemma2ForConditionalGeneration.lm_head |
1 | 0 | 0 |
attr |
T5Gemma2ForConditionalGeneration.loss_type |
1 | 0 | 0 |
meth |
T5Gemma2Model.init |
2 | 1 | 0 |
meth |
T5Gemma2Model.get_encoder |
1 | 0 | 0 |
meth |
T5Gemma2Model.get_decoder |
1 | 0 | 0 |
meth |
T5Gemma2Model.get_input_embeddings |
1 | 0 | 0 |
meth |
T5Gemma2Model.set_input_embeddings |
2 | 0 | 0 |
attr |
T5Gemma2Model.encoder |
1 | 0 | 0 |
attr |
T5Gemma2Model.decoder |
1 | 0 | 0 |
meth |
T5Gemma2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
T5Gemma2PreTrainedModel.prepare_decoder_input_ids_from_labels |
2 | 0 | 0 |
attr |
T5Gemma2PreTrainedModel._can_record_outputs |
1 | 0 | 0 |
meth |
T5Gemma2Encoder.init |
3 | 2 | 0 |
meth |
T5Gemma2Encoder.get_input_embeddings |
1 | 0 | 0 |
meth |
T5Gemma2Encoder.set_input_embeddings |
2 | 0 | 0 |
meth |
T5Gemma2Encoder.get_image_placeholder_mask |
4 | 3 | 0 |
attr |
T5Gemma2Encoder.text_model |
1 | 0 | 0 |
attr |
T5Gemma2Encoder.vision_tower |
1 | 0 | 0 |
attr |
T5Gemma2Encoder.multi_modal_projector |
1 | 0 | 0 |
meth |
T5Gemma2DecoderConfig.init |
25 | 23 | 0 |
attr |
T5Gemma2DecoderConfig.pad_token_id |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.bos_token_id |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.eos_token_id |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.vocab_size |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.hidden_size |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.head_dim |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.initializer_range |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.use_cache |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.attention_bias |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.attention_dropout |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.hidden_activation |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.query_pre_attn_scalar |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.sliding_window |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.final_logit_softcapping |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.attn_logit_softcapping |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.layer_types |
1 | 0 | 0 |
attr |
T5Gemma2DecoderConfig.rope_parameters |
1 | 0 | 0 |
meth |
T5Gemma2Config.init |
11 | 9 | 0 |
attr |
T5Gemma2Config.encoder |
1 | 0 | 0 |
attr |
T5Gemma2Config.decoder |
1 | 0 | 0 |
attr |
T5Gemma2Config.classifier_dropout_rate |
1 | 0 | 0 |
attr |
T5Gemma2Config.initializer_range |
1 | 0 | 0 |
attr |
T5Gemma2Config.eoi_token_index |
1 | 0 | 0 |
attr |
T5Gemma2Config.image_token_index |
1 | 0 | 0 |
attr |
T5Gemma2Config.tie_word_embeddings |
1 | 0 | 0 |
meth |
T5Gemma2TextConfig.init |
25 | 23 | 0 |
attr |
T5Gemma2TextConfig.pad_token_id |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.bos_token_id |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.eos_token_id |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.vocab_size |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.hidden_size |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.intermediate_size |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.head_dim |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.initializer_range |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.use_cache |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.attention_bias |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.attention_dropout |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.hidden_activation |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.query_pre_attn_scalar |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.sliding_window |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.final_logit_softcapping |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.attn_logit_softcapping |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.layer_types |
1 | 0 | 0 |
attr |
T5Gemma2TextConfig.rope_parameters |
1 | 0 | 0 |
meth |
T5Gemma2ForTokenClassification.init |
2 | 1 | 0 |
meth |
T5Gemma2ForTokenClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
T5Gemma2ForTokenClassification.set_input_embeddings |
2 | 0 | 0 |
attr |
T5Gemma2ForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
T5Gemma2ForTokenClassification.hidden_size |
1 | 0 | 0 |
attr |
T5Gemma2ForTokenClassification.model |
1 | 0 | 0 |
attr |
T5Gemma2ForTokenClassification.score |
1 | 0 | 0 |
meth |
T5Gemma2ForSequenceClassification.init |
2 | 1 | 0 |
meth |
T5Gemma2ForSequenceClassification.get_input_embeddings |
1 | 0 | 0 |
meth |
T5Gemma2ForSequenceClassification.set_input_embeddings |
2 | 0 | 0 |
attr |
T5Gemma2ForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
T5Gemma2ForSequenceClassification.hidden_size |
1 | 0 | 0 |
attr |
T5Gemma2ForSequenceClassification.model |
1 | 0 | 0 |
attr |
T5Gemma2ForSequenceClassification.score |
1 | 0 | 0 |
transformers.models.table_transformer.configuration_table_transformer (61 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TableTransformerConfig.init |
32 | 0 | 0 |
attr |
TableTransformerConfig.backbone_config |
1 | 0 | 0 |
attr |
TableTransformerConfig.num_channels |
1 | 0 | 0 |
attr |
TableTransformerConfig.num_queries |
1 | 0 | 0 |
attr |
TableTransformerConfig.d_model |
1 | 0 | 0 |
attr |
TableTransformerConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
TableTransformerConfig.encoder_layers |
1 | 0 | 0 |
attr |
TableTransformerConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
TableTransformerConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
TableTransformerConfig.decoder_layers |
1 | 0 | 0 |
attr |
TableTransformerConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
TableTransformerConfig.dropout |
1 | 0 | 0 |
attr |
TableTransformerConfig.attention_dropout |
1 | 0 | 0 |
attr |
TableTransformerConfig.activation_dropout |
1 | 0 | 0 |
attr |
TableTransformerConfig.activation_function |
1 | 0 | 0 |
attr |
TableTransformerConfig.init_std |
1 | 0 | 0 |
attr |
TableTransformerConfig.init_xavier_std |
1 | 0 | 0 |
attr |
TableTransformerConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
TableTransformerConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
TableTransformerConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
TableTransformerConfig.auxiliary_loss |
1 | 0 | 0 |
attr |
TableTransformerConfig.position_embedding_type |
1 | 0 | 0 |
attr |
TableTransformerConfig.class_cost |
1 | 0 | 0 |
attr |
TableTransformerConfig.bbox_cost |
1 | 0 | 0 |
attr |
TableTransformerConfig.giou_cost |
1 | 0 | 0 |
attr |
TableTransformerConfig.mask_loss_coefficient |
1 | 0 | 0 |
attr |
TableTransformerConfig.dice_loss_coefficient |
1 | 0 | 0 |
attr |
TableTransformerConfig.bbox_loss_coefficient |
1 | 0 | 0 |
attr |
TableTransformerConfig.giou_loss_coefficient |
1 | 0 | 0 |
attr |
TableTransformerConfig.eos_coefficient |
1 | 0 | 0 |
transformers.models.table_transformer.modeling_table_transformer (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TableTransformerPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
TableTransformerForObjectDetection.init |
2 | 1 | 0 |
meth |
TableTransformerForObjectDetection.forward |
12 | 11 | 0 |
attr |
TableTransformerForObjectDetection.model |
1 | 0 | 0 |
attr |
TableTransformerForObjectDetection.class_labels_classifier |
1 | 0 | 0 |
attr |
TableTransformerForObjectDetection.bbox_predictor |
1 | 0 | 0 |
meth |
TableTransformerModel.init |
2 | 1 | 0 |
meth |
TableTransformerModel.freeze_backbone |
1 | 0 | 0 |
meth |
TableTransformerModel.unfreeze_backbone |
1 | 0 | 0 |
meth |
TableTransformerModel.forward |
11 | 10 | 0 |
attr |
TableTransformerModel.backbone |
1 | 0 | 0 |
attr |
TableTransformerModel.input_projection |
1 | 0 | 0 |
attr |
TableTransformerModel.query_position_embeddings |
1 | 0 | 0 |
attr |
TableTransformerModel.encoder |
1 | 0 | 0 |
attr |
TableTransformerModel.decoder |
1 | 0 | 0 |
transformers.models.tapas.configuration_tapas (86 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TapasConfig.init |
44 | 0 | 0 |
attr |
TapasConfig.is_decoder |
1 | 0 | 0 |
attr |
TapasConfig.add_cross_attention |
1 | 0 | 0 |
attr |
TapasConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
TapasConfig.pad_token_id |
1 | 0 | 0 |
attr |
TapasConfig.bos_token_id |
1 | 0 | 0 |
attr |
TapasConfig.eos_token_id |
1 | 0 | 0 |
attr |
TapasConfig.vocab_size |
1 | 0 | 0 |
attr |
TapasConfig.hidden_size |
1 | 0 | 0 |
attr |
TapasConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
TapasConfig.num_attention_heads |
1 | 0 | 0 |
attr |
TapasConfig.hidden_act |
1 | 0 | 0 |
attr |
TapasConfig.intermediate_size |
1 | 0 | 0 |
attr |
TapasConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
TapasConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
TapasConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
TapasConfig.type_vocab_sizes |
1 | 0 | 0 |
attr |
TapasConfig.initializer_range |
1 | 0 | 0 |
attr |
TapasConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
TapasConfig.positive_label_weight |
1 | 0 | 0 |
attr |
TapasConfig.num_aggregation_labels |
1 | 0 | 0 |
attr |
TapasConfig.aggregation_loss_weight |
1 | 0 | 0 |
attr |
TapasConfig.use_answer_as_supervision |
1 | 0 | 0 |
attr |
TapasConfig.answer_loss_importance |
1 | 0 | 0 |
attr |
TapasConfig.use_normalized_answer_loss |
1 | 0 | 0 |
attr |
TapasConfig.huber_loss_delta |
1 | 0 | 0 |
attr |
TapasConfig.temperature |
1 | 0 | 0 |
attr |
TapasConfig.aggregation_temperature |
1 | 0 | 0 |
attr |
TapasConfig.use_gumbel_for_cells |
1 | 0 | 0 |
attr |
TapasConfig.use_gumbel_for_aggregation |
1 | 0 | 0 |
attr |
TapasConfig.average_approximation_function |
1 | 0 | 0 |
attr |
TapasConfig.cell_selection_preference |
1 | 0 | 0 |
attr |
TapasConfig.answer_loss_cutoff |
1 | 0 | 0 |
attr |
TapasConfig.max_num_rows |
1 | 0 | 0 |
attr |
TapasConfig.max_num_columns |
1 | 0 | 0 |
attr |
TapasConfig.average_logits_per_cell |
1 | 0 | 0 |
attr |
TapasConfig.select_one_column |
1 | 0 | 0 |
attr |
TapasConfig.allow_empty_column_selection |
1 | 0 | 0 |
attr |
TapasConfig.init_cell_selection_weights_to_zero |
1 | 0 | 0 |
attr |
TapasConfig.reset_position_index_per_cell |
1 | 0 | 0 |
attr |
TapasConfig.disable_per_token_loss |
1 | 0 | 0 |
attr |
TapasConfig.aggregation_labels |
1 | 0 | 0 |
attr |
TapasConfig.no_aggregation_label_index |
1 | 0 | 0 |
transformers.models.tapas.modeling_tapas (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TapasForMaskedLM.init |
2 | 0 | 0 |
meth |
TapasForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
TapasForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
TapasForMaskedLM.forward |
13 | 12 | 0 |
attr |
TapasForMaskedLM.tapas |
1 | 0 | 0 |
attr |
TapasForMaskedLM.cls |
1 | 0 | 0 |
meth |
TapasPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
TapasForSequenceClassification.init |
2 | 0 | 0 |
meth |
TapasForSequenceClassification.forward |
11 | 10 | 0 |
attr |
TapasForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
TapasForSequenceClassification.tapas |
1 | 0 | 0 |
attr |
TapasForSequenceClassification.dropout |
1 | 0 | 0 |
attr |
TapasForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
TapasForQuestionAnswering.init |
2 | 1 | 0 |
meth |
TapasForQuestionAnswering.forward |
16 | 15 | 0 |
attr |
TapasForQuestionAnswering.tapas |
1 | 0 | 0 |
attr |
TapasForQuestionAnswering.dropout |
1 | 0 | 0 |
attr |
TapasForQuestionAnswering.output_bias |
1 | 0 | 0 |
attr |
TapasForQuestionAnswering.column_output_bias |
1 | 0 | 0 |
attr |
TapasForQuestionAnswering.output_weights |
1 | 0 | 0 |
attr |
TapasForQuestionAnswering.column_output_weights |
1 | 0 | 0 |
attr |
TapasForQuestionAnswering.aggregation_classifier |
1 | 0 | 0 |
meth |
TapasModel.init |
3 | 0 | 0 |
meth |
TapasModel.get_input_embeddings |
1 | 0 | 0 |
meth |
TapasModel.set_input_embeddings |
2 | 0 | 0 |
meth |
TapasModel.forward |
12 | 11 | 0 |
attr |
TapasModel.embeddings |
1 | 0 | 0 |
attr |
TapasModel.encoder |
1 | 0 | 0 |
attr |
TapasModel.pooler |
1 | 0 | 0 |
transformers.models.tapas.tokenization_tapas (164 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TapasTokenizer.init |
24 | 7 | 0 |
meth |
TapasTokenizer.get_vocab |
1 | 0 | 0 |
meth |
TapasTokenizer._tokenize |
2 | 0 | 0 |
meth |
TapasTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
TapasTokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
TapasTokenizer.convert_tokens_to_string |
2 | 0 | 0 |
meth |
TapasTokenizer.call |
20 | 19 | 0 |
meth |
TapasTokenizer.batch_encode_plus |
20 | 19 | 0 |
meth |
TapasTokenizer._get_question_tokens |
2 | 0 | 0 |
meth |
TapasTokenizer._batch_encode_plus |
20 | 18 | 0 |
meth |
TapasTokenizer._batch_prepare_for_model |
22 | 21 | 0 |
meth |
TapasTokenizer.encode |
9 | 8 | 0 |
meth |
TapasTokenizer.encode_plus |
19 | 18 | 0 |
meth |
TapasTokenizer._encode_plus |
19 | 17 | 0 |
meth |
TapasTokenizer.prepare_for_model |
22 | 21 | 0 |
meth |
TapasTokenizer._tokenize_table |
2 | 0 | 0 |
meth |
TapasTokenizer._question_encoding_cost |
2 | 0 | 0 |
meth |
TapasTokenizer._get_token_budget |
3 | 0 | 0 |
meth |
TapasTokenizer._get_table_values |
5 | 1 | 0 |
meth |
TapasTokenizer._get_table_boundaries |
2 | 0 | 0 |
meth |
TapasTokenizer._get_table_cost |
5 | 0 | 0 |
meth |
TapasTokenizer._get_max_num_tokens |
6 | 0 | 0 |
meth |
TapasTokenizer._get_num_columns |
2 | 0 | 0 |
meth |
TapasTokenizer._get_num_rows |
3 | 0 | 0 |
meth |
TapasTokenizer._serialize_text |
2 | 0 | 0 |
meth |
TapasTokenizer._serialize |
6 | 0 | 0 |
meth |
TapasTokenizer._get_column_values |
3 | 0 | 0 |
meth |
TapasTokenizer._get_cell_token_indexes |
5 | 0 | 0 |
meth |
TapasTokenizer._get_numeric_column_ranks |
4 | 0 | 0 |
meth |
TapasTokenizer._get_numeric_sort_key_fn |
3 | 0 | 0 |
meth |
TapasTokenizer._get_numeric_relations |
5 | 0 | 0 |
meth |
TapasTokenizer._get_numeric_values |
4 | 0 | 0 |
meth |
TapasTokenizer._get_numeric_values_scale |
4 | 0 | 0 |
meth |
TapasTokenizer._pad_to_seq_length |
2 | 0 | 0 |
meth |
TapasTokenizer._get_all_answer_ids_from_coordinates |
4 | 0 | 0 |
meth |
TapasTokenizer._get_all_answer_ids |
4 | 0 | 0 |
meth |
TapasTokenizer._find_tokens |
3 | 0 | 0 |
meth |
TapasTokenizer._find_answer_coordinates_from_answer_text |
3 | 0 | 0 |
meth |
TapasTokenizer._find_answer_ids_from_answer_texts |
5 | 0 | 0 |
meth |
TapasTokenizer._get_answer_ids |
4 | 0 | 0 |
meth |
TapasTokenizer.get_answer_ids |
6 | 0 | 0 |
meth |
TapasTokenizer._get_cell_token_probs |
5 | 0 | 0 |
meth |
TapasTokenizer._get_mean_cell_probs |
5 | 0 | 0 |
meth |
TapasTokenizer.convert_logits_to_predictions |
5 | 0 | 0 |
prop |
TapasTokenizer.do_lower_case |
1 | 0 | 0 |
prop |
TapasTokenizer.vocab_size |
1 | 0 | 0 |
attr |
TapasTokenizer.vocab |
1 | 0 | 0 |
attr |
TapasTokenizer.ids_to_tokens |
1 | 0 | 0 |
attr |
TapasTokenizer.do_basic_tokenize |
1 | 0 | 0 |
attr |
TapasTokenizer.wordpiece_tokenizer |
1 | 0 | 0 |
attr |
TapasTokenizer.cell_trim_length |
1 | 0 | 0 |
attr |
TapasTokenizer.max_column_id |
1 | 0 | 0 |
attr |
TapasTokenizer.max_row_id |
1 | 0 | 0 |
attr |
TapasTokenizer.strip_column_names |
1 | 0 | 0 |
attr |
TapasTokenizer.update_answer_coordinates |
1 | 0 | 0 |
attr |
TapasTokenizer.min_question_length |
1 | 0 | 0 |
attr |
TapasTokenizer.max_question_length |
1 | 0 | 0 |
attr |
TapasTokenizer.basic_tokenizer |
1 | 0 | 0 |
attr |
TapasTokenizer.tokens_trie |
1 | 0 | 0 |
transformers.models.textnet.configuration_textnet (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TextNetConfig.init |
15 | 0 | 0 |
attr |
TextNetConfig.stem_kernel_size |
1 | 0 | 0 |
attr |
TextNetConfig.stem_stride |
1 | 0 | 0 |
attr |
TextNetConfig.stem_num_channels |
1 | 0 | 0 |
attr |
TextNetConfig.stem_out_channels |
1 | 0 | 0 |
attr |
TextNetConfig.stem_act_func |
1 | 0 | 0 |
attr |
TextNetConfig.image_size |
1 | 0 | 0 |
attr |
TextNetConfig.conv_layer_kernel_sizes |
1 | 0 | 0 |
attr |
TextNetConfig.conv_layer_strides |
1 | 0 | 0 |
attr |
TextNetConfig.initializer_range |
1 | 0 | 0 |
attr |
TextNetConfig.hidden_sizes |
1 | 0 | 0 |
attr |
TextNetConfig.batch_norm_eps |
1 | 0 | 0 |
attr |
TextNetConfig.depths |
1 | 0 | 0 |
attr |
TextNetConfig.stage_names |
1 | 0 | 0 |
transformers.models.textnet.image_processing_textnet (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TextNetImageProcessor.init |
14 | 13 | 0 |
meth |
TextNetImageProcessor.resize |
7 | 6 | 0 |
meth |
TextNetImageProcessor.preprocess |
18 | 17 | 0 |
attr |
TextNetImageProcessor.do_resize |
1 | 0 | 0 |
attr |
TextNetImageProcessor.size |
1 | 0 | 0 |
attr |
TextNetImageProcessor.size_divisor |
1 | 0 | 0 |
attr |
TextNetImageProcessor.resample |
1 | 0 | 0 |
attr |
TextNetImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
TextNetImageProcessor.crop_size |
1 | 0 | 0 |
attr |
TextNetImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
TextNetImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
TextNetImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
TextNetImageProcessor.image_mean |
1 | 0 | 0 |
attr |
TextNetImageProcessor.image_std |
1 | 0 | 0 |
attr |
TextNetImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.textnet.image_processing_textnet_fast (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TextNetImageProcessorFast.resize |
7 | 6 | 0 |
meth |
TextNetImageProcessorFast._preprocess |
16 | 15 | 0 |
transformers.models.textnet.modeling_textnet (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TextNetBackbone.init |
2 | 0 | 0 |
meth |
TextNetBackbone.forward |
5 | 4 | 0 |
attr |
TextNetBackbone.textnet |
1 | 0 | 0 |
attr |
TextNetBackbone.num_features |
1 | 0 | 0 |
meth |
TextNetForImageClassification.init |
2 | 0 | 0 |
meth |
TextNetForImageClassification.forward |
6 | 5 | 0 |
attr |
TextNetForImageClassification.num_labels |
1 | 0 | 0 |
attr |
TextNetForImageClassification.textnet |
1 | 0 | 0 |
attr |
TextNetForImageClassification.avg_pool |
1 | 0 | 0 |
attr |
TextNetForImageClassification.flatten |
1 | 0 | 0 |
attr |
TextNetForImageClassification.fc |
1 | 0 | 0 |
attr |
TextNetForImageClassification.classifier |
1 | 0 | 0 |
meth |
TextNetModel.init |
2 | 0 | 0 |
meth |
TextNetModel.forward |
5 | 4 | 0 |
attr |
TextNetModel.stem |
1 | 0 | 0 |
attr |
TextNetModel.encoder |
1 | 0 | 0 |
attr |
TextNetModel.pooler |
1 | 0 | 0 |
transformers.models.time_series_transformer.configuration_time_series_transformer (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TimeSeriesTransformerConfig.init |
32 | 29 | 0 |
attr |
TimeSeriesTransformerConfig.prediction_length |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.context_length |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.distribution_output |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.loss |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.input_size |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.num_time_features |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.lags_sequence |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.scaling |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.num_dynamic_real_features |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.num_static_real_features |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.num_static_categorical_features |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.num_parallel_samples |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.feature_size |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.d_model |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.encoder_layers |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.decoder_layers |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.dropout |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.attention_dropout |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.activation_dropout |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.activation_function |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.init_std |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.use_cache |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.cardinality |
1 | 0 | 0 |
attr |
TimeSeriesTransformerConfig.embedding_dimension |
1 | 0 | 0 |
transformers.models.time_series_transformer.modeling_time_series_transformer (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TimeSeriesTransformerModel.init |
2 | 1 | 0 |
meth |
TimeSeriesTransformerModel.create_network_inputs |
8 | 7 | 0 |
meth |
TimeSeriesTransformerModel.forward |
17 | 16 | 0 |
attr |
TimeSeriesTransformerModel.encoder |
1 | 0 | 0 |
attr |
TimeSeriesTransformerModel.decoder |
1 | 0 | 0 |
attr |
TimeSeriesTransformerModel.scaler |
1 | 0 | 0 |
attr |
TimeSeriesTransformerModel.embedder |
1 | 0 | 0 |
meth |
TimeSeriesTransformerForPrediction.init |
2 | 1 | 0 |
meth |
TimeSeriesTransformerForPrediction.output_params |
2 | 0 | 0 |
meth |
TimeSeriesTransformerForPrediction.output_distribution |
5 | 1 | 0 |
meth |
TimeSeriesTransformerForPrediction.forward |
18 | 17 | 0 |
attr |
TimeSeriesTransformerForPrediction.model |
1 | 0 | 0 |
attr |
TimeSeriesTransformerForPrediction.parameter_projection |
1 | 0 | 0 |
attr |
TimeSeriesTransformerForPrediction.target_shape |
1 | 0 | 0 |
attr |
TimeSeriesTransformerForPrediction.distribution_output |
1 | 0 | 0 |
attr |
TimeSeriesTransformerForPrediction.loss |
1 | 0 | 0 |
meth |
TimeSeriesTransformerPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.timesfm.configuration_timesfm (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TimesFmConfig.init |
20 | 18 | 0 |
attr |
TimesFmConfig.patch_length |
1 | 0 | 0 |
attr |
TimesFmConfig.context_length |
1 | 0 | 0 |
attr |
TimesFmConfig.horizon_length |
1 | 0 | 0 |
attr |
TimesFmConfig.quantiles |
1 | 0 | 0 |
attr |
TimesFmConfig.pad_val |
1 | 0 | 0 |
attr |
TimesFmConfig.freq_size |
1 | 0 | 0 |
attr |
TimesFmConfig.hidden_size |
1 | 0 | 0 |
attr |
TimesFmConfig.intermediate_size |
1 | 0 | 0 |
attr |
TimesFmConfig.head_dim |
1 | 0 | 0 |
attr |
TimesFmConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
TimesFmConfig.num_attention_heads |
1 | 0 | 0 |
attr |
TimesFmConfig.tolerance |
1 | 0 | 0 |
attr |
TimesFmConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
TimesFmConfig.attention_dropout |
1 | 0 | 0 |
attr |
TimesFmConfig.use_positional_embedding |
1 | 0 | 0 |
attr |
TimesFmConfig.initializer_range |
1 | 0 | 0 |
attr |
TimesFmConfig.min_timescale |
1 | 0 | 0 |
attr |
TimesFmConfig.max_timescale |
1 | 0 | 0 |
transformers.models.timesfm.modeling_timesfm (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TimesFmModel.init |
2 | 1 | 0 |
meth |
TimesFmModel.forward |
7 | 6 | 0 |
attr |
TimesFmModel.input_ff_layer |
1 | 0 | 0 |
attr |
TimesFmModel.freq_emb |
1 | 0 | 0 |
attr |
TimesFmModel.layers |
1 | 0 | 0 |
attr |
TimesFmModel.position_emb |
1 | 0 | 0 |
meth |
TimesFmPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
TimesFmModelForPrediction.init |
2 | 1 | 0 |
meth |
TimesFmModelForPrediction.forward |
11 | 10 | 0 |
attr |
TimesFmModelForPrediction.context_len |
1 | 0 | 0 |
attr |
TimesFmModelForPrediction.horizon_len |
1 | 0 | 0 |
attr |
TimesFmModelForPrediction.decoder |
1 | 0 | 0 |
attr |
TimesFmModelForPrediction.horizon_ff_layer |
1 | 0 | 0 |
transformers.models.timesfm.modular_timesfm (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TimesFmModel.init |
2 | 1 | 0 |
meth |
TimesFmModel.forward |
7 | 6 | 0 |
attr |
TimesFmModel.input_ff_layer |
1 | 0 | 0 |
attr |
TimesFmModel.freq_emb |
1 | 0 | 0 |
attr |
TimesFmModel.layers |
1 | 0 | 0 |
attr |
TimesFmModel.position_emb |
1 | 0 | 0 |
meth |
TimesFmPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
TimesFmModelForPrediction.init |
2 | 1 | 0 |
meth |
TimesFmModelForPrediction.forward |
11 | 10 | 0 |
attr |
TimesFmModelForPrediction.context_len |
1 | 0 | 0 |
attr |
TimesFmModelForPrediction.horizon_len |
1 | 0 | 0 |
attr |
TimesFmModelForPrediction.decoder |
1 | 0 | 0 |
attr |
TimesFmModelForPrediction.horizon_ff_layer |
1 | 0 | 0 |
transformers.models.timesfm2_5.configuration_timesfm2_5 (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TimesFm2_5Config.init |
25 | 23 | 0 |
attr |
TimesFm2_5Config.num_key_value_heads |
1 | 0 | 0 |
attr |
TimesFm2_5Config.attention_bias |
1 | 0 | 0 |
attr |
TimesFm2_5Config.output_quantile_len |
1 | 0 | 0 |
attr |
TimesFm2_5Config.decode_index |
1 | 0 | 0 |
attr |
TimesFm2_5Config.use_bias |
1 | 0 | 0 |
attr |
TimesFm2_5Config.activation |
1 | 0 | 0 |
attr |
TimesFm2_5Config.use_continuous_quantile_head |
1 | 0 | 0 |
attr |
TimesFm2_5Config.force_flip_invariance |
1 | 0 | 0 |
attr |
TimesFm2_5Config.infer_is_positive |
1 | 0 | 0 |
attr |
TimesFm2_5Config.max_position_embeddings |
1 | 0 | 0 |
attr |
TimesFm2_5Config.rope_parameters |
1 | 0 | 0 |
attr |
TimesFm2_5Config.patch_length |
1 | 0 | 0 |
attr |
TimesFm2_5Config.context_length |
1 | 0 | 0 |
attr |
TimesFm2_5Config.horizon_length |
1 | 0 | 0 |
attr |
TimesFm2_5Config.quantiles |
1 | 0 | 0 |
attr |
TimesFm2_5Config.hidden_size |
1 | 0 | 0 |
attr |
TimesFm2_5Config.intermediate_size |
1 | 0 | 0 |
attr |
TimesFm2_5Config.head_dim |
1 | 0 | 0 |
attr |
TimesFm2_5Config.num_hidden_layers |
1 | 0 | 0 |
attr |
TimesFm2_5Config.num_attention_heads |
1 | 0 | 0 |
attr |
TimesFm2_5Config.rms_norm_eps |
1 | 0 | 0 |
attr |
TimesFm2_5Config.attention_dropout |
1 | 0 | 0 |
attr |
TimesFm2_5Config.initializer_range |
1 | 0 | 0 |
transformers.models.timesfm2_5.modeling_timesfm2_5 (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TimesFm2_5Model.init |
2 | 1 | 0 |
attr |
TimesFm2_5Model.tolerance |
1 | 0 | 0 |
attr |
TimesFm2_5Model.input_ff_layer |
1 | 0 | 0 |
attr |
TimesFm2_5Model.layers |
1 | 0 | 0 |
attr |
TimesFm2_5Model.rotary_emb |
1 | 0 | 0 |
attr |
TimesFm2_5Model.gradient_checkpointing |
1 | 0 | 0 |
meth |
TimesFm2_5ModelForPrediction.init |
2 | 1 | 0 |
attr |
TimesFm2_5ModelForPrediction.context_len |
1 | 0 | 0 |
attr |
TimesFm2_5ModelForPrediction.horizon_len |
1 | 0 | 0 |
attr |
TimesFm2_5ModelForPrediction.model |
1 | 0 | 0 |
attr |
TimesFm2_5ModelForPrediction.output_projection_point |
1 | 0 | 0 |
attr |
TimesFm2_5ModelForPrediction.output_projection_quantiles |
1 | 0 | 0 |
meth |
TimesFm2_5PreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.timesfm2_5.modular_timesfm2_5 (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TimesFm2_5Model.init |
2 | 1 | 0 |
attr |
TimesFm2_5Model.config |
1 | 0 | 0 |
attr |
TimesFm2_5Model.tolerance |
1 | 0 | 0 |
attr |
TimesFm2_5Model.input_ff_layer |
1 | 0 | 0 |
attr |
TimesFm2_5Model.layers |
1 | 0 | 0 |
attr |
TimesFm2_5Model.rotary_emb |
1 | 0 | 0 |
attr |
TimesFm2_5Model.gradient_checkpointing |
1 | 0 | 0 |
meth |
TimesFm2_5ModelForPrediction.init |
2 | 1 | 0 |
attr |
TimesFm2_5ModelForPrediction.config |
1 | 0 | 0 |
attr |
TimesFm2_5ModelForPrediction.context_len |
1 | 0 | 0 |
attr |
TimesFm2_5ModelForPrediction.horizon_len |
1 | 0 | 0 |
attr |
TimesFm2_5ModelForPrediction.model |
1 | 0 | 0 |
attr |
TimesFm2_5ModelForPrediction.output_projection_point |
1 | 0 | 0 |
attr |
TimesFm2_5ModelForPrediction.output_projection_quantiles |
1 | 0 | 0 |
meth |
TimesFm2_5Config.init |
25 | 23 | 0 |
attr |
TimesFm2_5Config.num_key_value_heads |
1 | 0 | 0 |
attr |
TimesFm2_5Config.attention_bias |
1 | 0 | 0 |
attr |
TimesFm2_5Config.output_quantile_len |
1 | 0 | 0 |
attr |
TimesFm2_5Config.decode_index |
1 | 0 | 0 |
attr |
TimesFm2_5Config.use_bias |
1 | 0 | 0 |
attr |
TimesFm2_5Config.activation |
1 | 0 | 0 |
attr |
TimesFm2_5Config.use_continuous_quantile_head |
1 | 0 | 0 |
attr |
TimesFm2_5Config.force_flip_invariance |
1 | 0 | 0 |
attr |
TimesFm2_5Config.infer_is_positive |
1 | 0 | 0 |
attr |
TimesFm2_5Config.max_position_embeddings |
1 | 0 | 0 |
attr |
TimesFm2_5Config.rope_parameters |
1 | 0 | 0 |
transformers.models.timesformer.configuration_timesformer (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TimesformerConfig.init |
18 | 0 | 0 |
attr |
TimesformerConfig.image_size |
1 | 0 | 0 |
attr |
TimesformerConfig.patch_size |
1 | 0 | 0 |
attr |
TimesformerConfig.num_channels |
1 | 0 | 0 |
attr |
TimesformerConfig.num_frames |
1 | 0 | 0 |
attr |
TimesformerConfig.hidden_size |
1 | 0 | 0 |
attr |
TimesformerConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
TimesformerConfig.num_attention_heads |
1 | 0 | 0 |
attr |
TimesformerConfig.intermediate_size |
1 | 0 | 0 |
attr |
TimesformerConfig.hidden_act |
1 | 0 | 0 |
attr |
TimesformerConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
TimesformerConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
TimesformerConfig.initializer_range |
1 | 0 | 0 |
attr |
TimesformerConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
TimesformerConfig.qkv_bias |
1 | 0 | 0 |
attr |
TimesformerConfig.attention_type |
1 | 0 | 0 |
attr |
TimesformerConfig.drop_path_rate |
1 | 0 | 0 |
transformers.models.timesformer.modeling_timesformer (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TimesformerModel.init |
2 | 0 | 0 |
meth |
TimesformerModel.get_input_embeddings |
1 | 0 | 0 |
meth |
TimesformerModel.forward |
6 | 5 | 0 |
attr |
TimesformerModel.embeddings |
1 | 0 | 0 |
attr |
TimesformerModel.encoder |
1 | 0 | 0 |
attr |
TimesformerModel.layernorm |
1 | 0 | 0 |
meth |
TimesformerPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
TimesformerForVideoClassification.init |
2 | 0 | 0 |
meth |
TimesformerForVideoClassification.forward |
7 | 6 | 0 |
attr |
TimesformerForVideoClassification.num_labels |
1 | 0 | 0 |
attr |
TimesformerForVideoClassification.timesformer |
1 | 0 | 0 |
attr |
TimesformerForVideoClassification.classifier |
1 | 0 | 0 |
transformers.models.timm_backbone.configuration_timm_backbone (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TimmBackboneConfig.init |
8 | 0 | 0 |
prop |
TimmBackboneConfig.out_indices |
2 | 1 | 0 |
prop |
TimmBackboneConfig.out_features |
2 | 1 | 0 |
attr |
TimmBackboneConfig.backbone |
1 | 0 | 0 |
attr |
TimmBackboneConfig.num_channels |
1 | 0 | 0 |
attr |
TimmBackboneConfig.features_only |
1 | 0 | 0 |
attr |
TimmBackboneConfig.out_indices |
1 | 0 | 0 |
attr |
TimmBackboneConfig.output_stride |
1 | 0 | 0 |
attr |
TimmBackboneConfig.freeze_batch_norm_2d |
1 | 0 | 0 |
transformers.models.timm_backbone.modeling_timm_backbone (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TimmBackbone.init |
3 | 0 | 0 |
meth |
TimmBackbone.from_pretrained |
4 | 0 | 0 |
meth |
TimmBackbone.freeze_batch_norm_2d |
1 | 0 | 0 |
meth |
TimmBackbone.unfreeze_batch_norm_2d |
1 | 0 | 0 |
meth |
TimmBackbone._init_weights |
2 | 0 | 0 |
meth |
TimmBackbone.forward |
6 | 5 | 0 |
transformers.models.timm_wrapper.configuration_timm_wrapper (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TimmWrapperConfig.init |
6 | 4 | 0 |
meth |
TimmWrapperConfig.from_dict |
3 | 1 | 0 |
attr |
TimmWrapperConfig.architecture |
1 | 0 | 0 |
attr |
TimmWrapperConfig.initializer_range |
1 | 0 | 0 |
attr |
TimmWrapperConfig.do_pooling |
1 | 0 | 0 |
attr |
TimmWrapperConfig.model_args |
1 | 0 | 0 |
transformers.models.timm_wrapper.image_processing_timm_wrapper (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TimmWrapperImageProcessor.init |
4 | 2 | 0 |
meth |
TimmWrapperImageProcessor.get_image_processor_dict |
3 | 2 | 0 |
meth |
TimmWrapperImageProcessor.save_pretrained |
3 | 0 | 0 |
attr |
TimmWrapperImageProcessor.data_config |
1 | 0 | 0 |
attr |
TimmWrapperImageProcessor.val_transforms |
1 | 0 | 0 |
attr |
TimmWrapperImageProcessor.train_transforms |
1 | 0 | 0 |
transformers.models.timm_wrapper.modeling_timm_wrapper (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TimmWrapperForImageClassification.init |
2 | 1 | 0 |
meth |
TimmWrapperForImageClassification.forward |
7 | 6 | 0 |
attr |
TimmWrapperForImageClassification.timm_model |
1 | 0 | 0 |
attr |
TimmWrapperForImageClassification.num_labels |
1 | 0 | 0 |
meth |
TimmWrapperPreTrainedModel.post_init |
1 | 0 | 0 |
meth |
TimmWrapperPreTrainedModel.load_state_dict |
4 | 0 | 0 |
meth |
TimmWrapperPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
TimmWrapperPreTrainedModel._timm_model_supports_gradient_checkpointing |
1 | 0 | 0 |
meth |
TimmWrapperPreTrainedModel._set_gradient_checkpointing |
4 | 1 | 0 |
meth |
TimmWrapperPreTrainedModel.get_input_embeddings |
1 | 0 | 0 |
meth |
TimmWrapperPreTrainedModel.set_input_embeddings |
2 | 0 | 0 |
meth |
TimmWrapperModel.init |
2 | 1 | 0 |
meth |
TimmWrapperModel.forward |
8 | 7 | 0 |
attr |
TimmWrapperModel.features_only |
1 | 0 | 0 |
attr |
TimmWrapperModel.timm_model |
1 | 0 | 0 |
transformers.models.trocr.configuration_trocr (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TrOCRConfig.init |
25 | 0 | 0 |
attr |
TrOCRConfig.cross_attention_hidden_size |
1 | 0 | 0 |
attr |
TrOCRConfig.is_decoder |
1 | 0 | 0 |
attr |
TrOCRConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
TrOCRConfig.vocab_size |
1 | 0 | 0 |
attr |
TrOCRConfig.d_model |
1 | 0 | 0 |
attr |
TrOCRConfig.decoder_layers |
1 | 0 | 0 |
attr |
TrOCRConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
TrOCRConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
TrOCRConfig.activation_function |
1 | 0 | 0 |
attr |
TrOCRConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
TrOCRConfig.dropout |
1 | 0 | 0 |
attr |
TrOCRConfig.attention_dropout |
1 | 0 | 0 |
attr |
TrOCRConfig.activation_dropout |
1 | 0 | 0 |
attr |
TrOCRConfig.init_std |
1 | 0 | 0 |
attr |
TrOCRConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
TrOCRConfig.use_cache |
1 | 0 | 0 |
attr |
TrOCRConfig.scale_embedding |
1 | 0 | 0 |
attr |
TrOCRConfig.use_learned_position_embeddings |
1 | 0 | 0 |
attr |
TrOCRConfig.layernorm_embedding |
1 | 0 | 0 |
attr |
TrOCRConfig.pad_token_id |
1 | 0 | 0 |
attr |
TrOCRConfig.bos_token_id |
1 | 0 | 0 |
attr |
TrOCRConfig.eos_token_id |
1 | 0 | 0 |
attr |
TrOCRConfig.decoder_start_token_id |
1 | 0 | 0 |
transformers.models.trocr.modeling_trocr (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TrOCRForCausalLM.init |
2 | 0 | 0 |
meth |
TrOCRForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
TrOCRForCausalLM.set_input_embeddings |
2 | 0 | 0 |
meth |
TrOCRForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
TrOCRForCausalLM.set_output_embeddings |
2 | 0 | 0 |
meth |
TrOCRForCausalLM.forward |
14 | 13 | 0 |
attr |
TrOCRForCausalLM.model |
1 | 0 | 0 |
attr |
TrOCRForCausalLM.output_projection |
1 | 0 | 0 |
transformers.models.trocr.processing_trocr (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TrOCRProcessor.init |
4 | 0 | 0 |
prop |
TrOCRProcessor.model_input_names |
1 | 0 | 0 |
transformers.models.tvp.configuration_tvp (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TvpConfig.init |
25 | 0 | 0 |
attr |
TvpConfig.backbone_config |
1 | 0 | 0 |
attr |
TvpConfig.distance_loss_weight |
1 | 0 | 0 |
attr |
TvpConfig.duration_loss_weight |
1 | 0 | 0 |
attr |
TvpConfig.visual_prompter_type |
1 | 0 | 0 |
attr |
TvpConfig.visual_prompter_apply |
1 | 0 | 0 |
attr |
TvpConfig.visual_prompt_size |
1 | 0 | 0 |
attr |
TvpConfig.max_img_size |
1 | 0 | 0 |
attr |
TvpConfig.num_frames |
1 | 0 | 0 |
attr |
TvpConfig.vocab_size |
1 | 0 | 0 |
attr |
TvpConfig.type_vocab_size |
1 | 0 | 0 |
attr |
TvpConfig.hidden_size |
1 | 0 | 0 |
attr |
TvpConfig.intermediate_size |
1 | 0 | 0 |
attr |
TvpConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
TvpConfig.num_attention_heads |
1 | 0 | 0 |
attr |
TvpConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
TvpConfig.max_grid_col_position_embeddings |
1 | 0 | 0 |
attr |
TvpConfig.max_grid_row_position_embeddings |
1 | 0 | 0 |
attr |
TvpConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
TvpConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
TvpConfig.hidden_act |
1 | 0 | 0 |
attr |
TvpConfig.initializer_range |
1 | 0 | 0 |
attr |
TvpConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
TvpConfig.pad_token_id |
1 | 0 | 0 |
transformers.models.tvp.image_processing_tvp (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TvpImageProcessor.init |
17 | 16 | 0 |
meth |
TvpImageProcessor.resize |
7 | 6 | 0 |
meth |
TvpImageProcessor.pad_image |
8 | 6 | 0 |
meth |
TvpImageProcessor._preprocess_image |
20 | 19 | 0 |
attr |
TvpImageProcessor.do_resize |
1 | 0 | 0 |
attr |
TvpImageProcessor.size |
1 | 0 | 0 |
attr |
TvpImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
TvpImageProcessor.crop_size |
1 | 0 | 0 |
attr |
TvpImageProcessor.resample |
1 | 0 | 0 |
attr |
TvpImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
TvpImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
TvpImageProcessor.do_pad |
1 | 0 | 0 |
attr |
TvpImageProcessor.pad_size |
1 | 0 | 0 |
attr |
TvpImageProcessor.constant_values |
1 | 0 | 0 |
attr |
TvpImageProcessor.pad_mode |
1 | 0 | 0 |
attr |
TvpImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
TvpImageProcessor.do_flip_channel_order |
1 | 0 | 0 |
attr |
TvpImageProcessor.image_mean |
1 | 0 | 0 |
attr |
TvpImageProcessor.image_std |
1 | 0 | 0 |
transformers.models.tvp.image_processing_tvp_fast (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TvpImageProcessorFast.init |
2 | 1 | 0 |
meth |
TvpImageProcessorFast._prepare_images_structure |
3 | 2 | 0 |
meth |
TvpImageProcessorFast.resize |
6 | 5 | 0 |
meth |
TvpImageProcessorFast._preprocess |
20 | 19 | 0 |
transformers.models.tvp.modeling_tvp (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TvpPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
TvpModel.init |
2 | 0 | 0 |
meth |
TvpModel.get_input_embeddings |
1 | 0 | 0 |
meth |
TvpModel.set_input_embeddings |
2 | 0 | 0 |
meth |
TvpModel.forward |
9 | 8 | 0 |
attr |
TvpModel.vision_model |
1 | 0 | 0 |
attr |
TvpModel.embeddings |
1 | 0 | 0 |
attr |
TvpModel.visual_embeddings |
1 | 0 | 0 |
attr |
TvpModel.encoder |
1 | 0 | 0 |
attr |
TvpModel.pooler |
1 | 0 | 0 |
attr |
TvpModel.text_prompt |
1 | 0 | 0 |
attr |
TvpModel.dropout |
1 | 0 | 0 |
attr |
TvpModel.visual_prompter |
1 | 0 | 0 |
meth |
TvpForVideoGrounding.init |
2 | 0 | 0 |
meth |
TvpForVideoGrounding.forward |
10 | 9 | 0 |
attr |
TvpForVideoGrounding.model |
1 | 0 | 0 |
attr |
TvpForVideoGrounding.video_grounding_head |
1 | 0 | 0 |
transformers.models.tvp.processing_tvp (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TvpProcessor.init |
4 | 0 | 0 |
meth |
TvpProcessor.post_process_video_grounding |
3 | 0 | 0 |
attr |
TvpProcessor.video_processor |
1 | 0 | 0 |
transformers.models.udop.configuration_udop (53 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UdopConfig.init |
27 | 0 | 0 |
attr |
UdopConfig.is_decoder |
1 | 0 | 0 |
attr |
UdopConfig.add_cross_attention |
1 | 0 | 0 |
attr |
UdopConfig.vocab_size |
1 | 0 | 0 |
attr |
UdopConfig.d_model |
1 | 0 | 0 |
attr |
UdopConfig.d_kv |
1 | 0 | 0 |
attr |
UdopConfig.d_ff |
1 | 0 | 0 |
attr |
UdopConfig.num_layers |
1 | 0 | 0 |
attr |
UdopConfig.num_decoder_layers |
1 | 0 | 0 |
attr |
UdopConfig.num_heads |
1 | 0 | 0 |
attr |
UdopConfig.relative_attention_num_buckets |
1 | 0 | 0 |
attr |
UdopConfig.relative_attention_max_distance |
1 | 0 | 0 |
attr |
UdopConfig.dropout_rate |
1 | 0 | 0 |
attr |
UdopConfig.layer_norm_epsilon |
1 | 0 | 0 |
attr |
UdopConfig.initializer_factor |
1 | 0 | 0 |
attr |
UdopConfig.feed_forward_proj |
1 | 0 | 0 |
attr |
UdopConfig.use_cache |
1 | 0 | 0 |
attr |
UdopConfig.pad_token_id |
1 | 0 | 0 |
attr |
UdopConfig.eos_token_id |
1 | 0 | 0 |
attr |
UdopConfig.max_2d_position_embeddings |
1 | 0 | 0 |
attr |
UdopConfig.image_size |
1 | 0 | 0 |
attr |
UdopConfig.patch_size |
1 | 0 | 0 |
attr |
UdopConfig.num_channels |
1 | 0 | 0 |
attr |
UdopConfig.relative_bias_args |
1 | 0 | 0 |
attr |
UdopConfig.dense_act_fn |
1 | 0 | 0 |
attr |
UdopConfig.is_gated_act |
1 | 0 | 0 |
attr |
UdopConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.udop.modeling_udop (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UdopModel.init |
2 | 0 | 0 |
meth |
UdopModel.get_input_embeddings |
1 | 0 | 0 |
meth |
UdopModel.set_input_embeddings |
2 | 0 | 0 |
meth |
UdopModel.forward |
18 | 17 | 0 |
attr |
UdopModel.shared |
1 | 0 | 0 |
attr |
UdopModel.patch_embed |
1 | 0 | 0 |
attr |
UdopModel.encoder |
1 | 0 | 0 |
attr |
UdopModel.decoder |
1 | 0 | 0 |
meth |
UdopForConditionalGeneration.init |
2 | 0 | 0 |
meth |
UdopForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
UdopForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
UdopForConditionalGeneration.forward |
19 | 18 | 0 |
attr |
UdopForConditionalGeneration.shared |
1 | 0 | 0 |
attr |
UdopForConditionalGeneration.patch_embed |
1 | 0 | 0 |
attr |
UdopForConditionalGeneration.encoder |
1 | 0 | 0 |
attr |
UdopForConditionalGeneration.decoder |
1 | 0 | 0 |
attr |
UdopForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
UdopPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
UdopPreTrainedModel._shift_right |
2 | 0 | 0 |
meth |
UdopEncoderModel.init |
2 | 1 | 0 |
meth |
UdopEncoderModel.get_input_embeddings |
1 | 0 | 0 |
meth |
UdopEncoderModel.set_input_embeddings |
2 | 0 | 0 |
meth |
UdopEncoderModel.forward |
11 | 10 | 0 |
attr |
UdopEncoderModel.shared |
1 | 0 | 0 |
attr |
UdopEncoderModel.patch_embed |
1 | 0 | 0 |
attr |
UdopEncoderModel.encoder |
1 | 0 | 0 |
transformers.models.udop.processing_udop (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UdopProcessor.init |
3 | 0 | 0 |
meth |
UdopProcessor.get_overflowing_images |
3 | 0 | 0 |
prop |
UdopProcessor.model_input_names |
1 | 0 | 0 |
transformers.models.udop.tokenization_udop (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UdopTokenizer.init |
12 | 1 | 0 |
meth |
UdopTokenizer.call |
8 | 7 | 0 |
meth |
UdopTokenizer.call_boxes |
21 | 20 | 0 |
meth |
UdopTokenizer.tokenize |
5 | 4 | 0 |
meth |
UdopTokenizer.batch_encode_plus_boxes |
22 | 21 | 0 |
meth |
UdopTokenizer._batch_encode_plus_boxes |
21 | 20 | 0 |
meth |
UdopTokenizer._encode_plus_boxes |
21 | 20 | 0 |
meth |
UdopTokenizer.encode_boxes |
12 | 11 | 0 |
meth |
UdopTokenizer.encode_plus_boxes |
22 | 21 | 0 |
attr |
UdopTokenizer.sep_token_box |
1 | 0 | 0 |
attr |
UdopTokenizer.pad_token_box |
1 | 0 | 0 |
attr |
UdopTokenizer.pad_token_label |
1 | 0 | 0 |
attr |
UdopTokenizer.only_label_first_subword |
1 | 0 | 0 |
transformers.models.umt5.configuration_umt5 (46 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UMT5Config.init |
23 | 0 | 0 |
attr |
UMT5Config.is_decoder |
1 | 0 | 0 |
attr |
UMT5Config.vocab_size |
1 | 0 | 0 |
attr |
UMT5Config.d_model |
1 | 0 | 0 |
attr |
UMT5Config.d_kv |
1 | 0 | 0 |
attr |
UMT5Config.d_ff |
1 | 0 | 0 |
attr |
UMT5Config.num_layers |
1 | 0 | 0 |
attr |
UMT5Config.num_decoder_layers |
1 | 0 | 0 |
attr |
UMT5Config.num_heads |
1 | 0 | 0 |
attr |
UMT5Config.relative_attention_num_buckets |
1 | 0 | 0 |
attr |
UMT5Config.relative_attention_max_distance |
1 | 0 | 0 |
attr |
UMT5Config.dropout_rate |
1 | 0 | 0 |
attr |
UMT5Config.classifier_dropout |
1 | 0 | 0 |
attr |
UMT5Config.layer_norm_epsilon |
1 | 0 | 0 |
attr |
UMT5Config.initializer_factor |
1 | 0 | 0 |
attr |
UMT5Config.feed_forward_proj |
1 | 0 | 0 |
attr |
UMT5Config.use_cache |
1 | 0 | 0 |
attr |
UMT5Config.dense_act_fn |
1 | 0 | 0 |
attr |
UMT5Config.is_gated_act |
1 | 0 | 0 |
attr |
UMT5Config.tokenizer_class |
1 | 0 | 0 |
attr |
UMT5Config.pad_token_id |
1 | 0 | 0 |
attr |
UMT5Config.eos_token_id |
1 | 0 | 0 |
attr |
UMT5Config.decoder_start_token_id |
1 | 0 | 0 |
attr |
UMT5Config.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.umt5.modeling_umt5 (56 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UMT5EncoderModel.init |
2 | 0 | 0 |
meth |
UMT5EncoderModel.get_input_embeddings |
1 | 0 | 0 |
meth |
UMT5EncoderModel.set_input_embeddings |
2 | 0 | 0 |
meth |
UMT5EncoderModel.forward |
8 | 7 | 0 |
attr |
UMT5EncoderModel.shared |
1 | 0 | 0 |
attr |
UMT5EncoderModel.encoder |
1 | 0 | 0 |
meth |
UMT5ForTokenClassification.init |
2 | 1 | 0 |
meth |
UMT5ForTokenClassification.forward |
9 | 8 | 0 |
attr |
UMT5ForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
UMT5ForTokenClassification.transformer |
1 | 0 | 0 |
attr |
UMT5ForTokenClassification.dropout |
1 | 0 | 0 |
attr |
UMT5ForTokenClassification.classifier |
1 | 0 | 0 |
meth |
UMT5ForSequenceClassification.init |
2 | 1 | 0 |
meth |
UMT5ForSequenceClassification.forward |
14 | 13 | 0 |
attr |
UMT5ForSequenceClassification.transformer |
1 | 0 | 0 |
attr |
UMT5ForSequenceClassification.classification_head |
1 | 0 | 0 |
meth |
UMT5Model.init |
2 | 0 | 0 |
meth |
UMT5Model.get_input_embeddings |
1 | 0 | 0 |
meth |
UMT5Model.set_input_embeddings |
2 | 0 | 0 |
meth |
UMT5Model.forward |
15 | 14 | 0 |
attr |
UMT5Model.shared |
1 | 0 | 0 |
attr |
UMT5Model.encoder |
1 | 0 | 0 |
attr |
UMT5Model.decoder |
1 | 0 | 0 |
meth |
UMT5PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
UMT5PreTrainedModel._shift_right |
2 | 0 | 0 |
prop |
UMT5PreTrainedModel.dummy_inputs |
1 | 0 | 0 |
meth |
UMT5ForConditionalGeneration.init |
2 | 0 | 0 |
meth |
UMT5ForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
UMT5ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
UMT5ForConditionalGeneration.forward |
16 | 15 | 0 |
meth |
UMT5ForConditionalGeneration.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
UMT5ForConditionalGeneration.model_dim |
1 | 0 | 0 |
attr |
UMT5ForConditionalGeneration.shared |
1 | 0 | 0 |
attr |
UMT5ForConditionalGeneration.encoder |
1 | 0 | 0 |
attr |
UMT5ForConditionalGeneration.decoder |
1 | 0 | 0 |
attr |
UMT5ForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
UMT5ForQuestionAnswering.init |
2 | 0 | 0 |
meth |
UMT5ForQuestionAnswering.get_input_embeddings |
1 | 0 | 0 |
meth |
UMT5ForQuestionAnswering.set_input_embeddings |
2 | 0 | 0 |
meth |
UMT5ForQuestionAnswering.forward |
15 | 14 | 0 |
attr |
UMT5ForQuestionAnswering.model_dim |
1 | 0 | 0 |
attr |
UMT5ForQuestionAnswering.shared |
1 | 0 | 0 |
attr |
UMT5ForQuestionAnswering.encoder |
1 | 0 | 0 |
attr |
UMT5ForQuestionAnswering.decoder |
1 | 0 | 0 |
attr |
UMT5ForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
UMT5ForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
transformers.models.unispeech.configuration_unispeech (98 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UniSpeechConfig.init |
49 | 0 | 0 |
prop |
UniSpeechConfig.inputs_to_logits_ratio |
1 | 0 | 0 |
attr |
UniSpeechConfig.pad_token_id |
1 | 0 | 0 |
attr |
UniSpeechConfig.bos_token_id |
1 | 0 | 0 |
attr |
UniSpeechConfig.eos_token_id |
1 | 0 | 0 |
attr |
UniSpeechConfig.hidden_size |
1 | 0 | 0 |
attr |
UniSpeechConfig.feat_extract_norm |
1 | 0 | 0 |
attr |
UniSpeechConfig.feat_extract_activation |
1 | 0 | 0 |
attr |
UniSpeechConfig.conv_dim |
1 | 0 | 0 |
attr |
UniSpeechConfig.conv_stride |
1 | 0 | 0 |
attr |
UniSpeechConfig.conv_kernel |
1 | 0 | 0 |
attr |
UniSpeechConfig.conv_bias |
1 | 0 | 0 |
attr |
UniSpeechConfig.num_conv_pos_embeddings |
1 | 0 | 0 |
attr |
UniSpeechConfig.num_conv_pos_embedding_groups |
1 | 0 | 0 |
attr |
UniSpeechConfig.num_feat_extract_layers |
1 | 0 | 0 |
attr |
UniSpeechConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
UniSpeechConfig.intermediate_size |
1 | 0 | 0 |
attr |
UniSpeechConfig.hidden_act |
1 | 0 | 0 |
attr |
UniSpeechConfig.num_attention_heads |
1 | 0 | 0 |
attr |
UniSpeechConfig.hidden_dropout |
1 | 0 | 0 |
attr |
UniSpeechConfig.attention_dropout |
1 | 0 | 0 |
attr |
UniSpeechConfig.activation_dropout |
1 | 0 | 0 |
attr |
UniSpeechConfig.feat_proj_dropout |
1 | 0 | 0 |
attr |
UniSpeechConfig.final_dropout |
1 | 0 | 0 |
attr |
UniSpeechConfig.layerdrop |
1 | 0 | 0 |
attr |
UniSpeechConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
UniSpeechConfig.initializer_range |
1 | 0 | 0 |
attr |
UniSpeechConfig.num_ctc_classes |
1 | 0 | 0 |
attr |
UniSpeechConfig.vocab_size |
1 | 0 | 0 |
attr |
UniSpeechConfig.do_stable_layer_norm |
1 | 0 | 0 |
attr |
UniSpeechConfig.use_weighted_layer_sum |
1 | 0 | 0 |
attr |
UniSpeechConfig.classifier_proj_size |
1 | 0 | 0 |
attr |
UniSpeechConfig.apply_spec_augment |
1 | 0 | 0 |
attr |
UniSpeechConfig.mask_time_prob |
1 | 0 | 0 |
attr |
UniSpeechConfig.mask_time_length |
1 | 0 | 0 |
attr |
UniSpeechConfig.mask_time_min_masks |
1 | 0 | 0 |
attr |
UniSpeechConfig.mask_feature_prob |
1 | 0 | 0 |
attr |
UniSpeechConfig.mask_feature_length |
1 | 0 | 0 |
attr |
UniSpeechConfig.mask_feature_min_masks |
1 | 0 | 0 |
attr |
UniSpeechConfig.num_codevectors_per_group |
1 | 0 | 0 |
attr |
UniSpeechConfig.num_codevector_groups |
1 | 0 | 0 |
attr |
UniSpeechConfig.contrastive_logits_temperature |
1 | 0 | 0 |
attr |
UniSpeechConfig.feat_quantizer_dropout |
1 | 0 | 0 |
attr |
UniSpeechConfig.num_negatives |
1 | 0 | 0 |
attr |
UniSpeechConfig.codevector_dim |
1 | 0 | 0 |
attr |
UniSpeechConfig.proj_codevector_dim |
1 | 0 | 0 |
attr |
UniSpeechConfig.diversity_loss_weight |
1 | 0 | 0 |
attr |
UniSpeechConfig.ctc_loss_reduction |
1 | 0 | 0 |
attr |
UniSpeechConfig.ctc_zero_infinity |
1 | 0 | 0 |
attr |
UniSpeechConfig.replace_prob |
1 | 0 | 0 |
transformers.models.unispeech.modeling_unispeech (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UniSpeechPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
UniSpeechPreTrainedModel._get_feat_extract_output_lengths |
2 | 1 | 0 |
meth |
UniSpeechPreTrainedModel._get_feature_vector_attention_mask |
3 | 2 | 0 |
meth |
UniSpeechForPreTraining.init |
2 | 1 | 0 |
meth |
UniSpeechForPreTraining.set_gumbel_temperature |
2 | 1 | 0 |
meth |
UniSpeechForPreTraining.freeze_feature_encoder |
1 | 0 | 0 |
meth |
UniSpeechForPreTraining.compute_contrastive_logits |
5 | 4 | 0 |
meth |
UniSpeechForPreTraining.forward |
7 | 6 | 0 |
attr |
UniSpeechForPreTraining.unispeech |
1 | 0 | 0 |
attr |
UniSpeechForPreTraining.dropout_features |
1 | 0 | 0 |
attr |
UniSpeechForPreTraining.quantizer |
1 | 0 | 0 |
attr |
UniSpeechForPreTraining.project_q |
1 | 0 | 0 |
attr |
UniSpeechForPreTraining.project_hid |
1 | 0 | 0 |
attr |
UniSpeechForPreTraining.ctc_proj |
1 | 0 | 0 |
attr |
UniSpeechForPreTraining.dropout |
1 | 0 | 0 |
meth |
UniSpeechForCTC.init |
3 | 1 | 0 |
meth |
UniSpeechForCTC.tie_weights |
2 | 0 | 0 |
meth |
UniSpeechForCTC.freeze_feature_encoder |
1 | 0 | 0 |
meth |
UniSpeechForCTC.freeze_base_model |
1 | 0 | 0 |
meth |
UniSpeechForCTC.forward |
8 | 7 | 0 |
attr |
UniSpeechForCTC.unispeech |
1 | 0 | 0 |
attr |
UniSpeechForCTC.dropout |
1 | 0 | 0 |
attr |
UniSpeechForCTC.target_lang |
1 | 0 | 0 |
attr |
UniSpeechForCTC.lm_head |
1 | 0 | 0 |
meth |
UniSpeechForSequenceClassification.init |
2 | 0 | 0 |
meth |
UniSpeechForSequenceClassification.freeze_feature_encoder |
1 | 0 | 0 |
meth |
UniSpeechForSequenceClassification.freeze_base_model |
1 | 0 | 0 |
meth |
UniSpeechForSequenceClassification.forward |
8 | 7 | 0 |
attr |
UniSpeechForSequenceClassification.unispeech |
1 | 0 | 0 |
attr |
UniSpeechForSequenceClassification.projector |
1 | 0 | 0 |
attr |
UniSpeechForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
UniSpeechForSequenceClassification.layer_weights |
1 | 0 | 0 |
meth |
UniSpeechModel.init |
2 | 1 | 0 |
meth |
UniSpeechModel._mask_hidden_states |
4 | 3 | 0 |
meth |
UniSpeechModel.forward |
8 | 7 | 0 |
attr |
UniSpeechModel.feature_extractor |
1 | 0 | 0 |
attr |
UniSpeechModel.feature_projection |
1 | 0 | 0 |
attr |
UniSpeechModel.masked_spec_embed |
1 | 0 | 0 |
attr |
UniSpeechModel.encoder |
1 | 0 | 0 |
transformers.models.unispeech.modular_unispeech (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UniSpeechPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
UniSpeechPreTrainedModel._get_feat_extract_output_lengths |
2 | 1 | 0 |
meth |
UniSpeechPreTrainedModel._get_feature_vector_attention_mask |
3 | 2 | 0 |
meth |
UniSpeechForPreTraining.init |
2 | 1 | 0 |
meth |
UniSpeechForPreTraining.set_gumbel_temperature |
2 | 1 | 0 |
meth |
UniSpeechForPreTraining.freeze_feature_encoder |
1 | 0 | 0 |
meth |
UniSpeechForPreTraining.compute_contrastive_logits |
5 | 4 | 0 |
meth |
UniSpeechForPreTraining.forward |
7 | 6 | 0 |
attr |
UniSpeechForPreTraining.unispeech |
1 | 0 | 0 |
attr |
UniSpeechForPreTraining.dropout_features |
1 | 0 | 0 |
attr |
UniSpeechForPreTraining.quantizer |
1 | 0 | 0 |
attr |
UniSpeechForPreTraining.project_q |
1 | 0 | 0 |
attr |
UniSpeechForPreTraining.project_hid |
1 | 0 | 0 |
attr |
UniSpeechForPreTraining.ctc_proj |
1 | 0 | 0 |
attr |
UniSpeechForPreTraining.dropout |
1 | 0 | 0 |
meth |
UniSpeechModel.init |
2 | 1 | 0 |
meth |
UniSpeechModel.freeze_feature_encoder |
1 | 0 | 0 |
meth |
UniSpeechModel.forward |
8 | 7 | 0 |
attr |
UniSpeechModel.feature_extractor |
1 | 0 | 0 |
attr |
UniSpeechModel.feature_projection |
1 | 0 | 0 |
attr |
UniSpeechModel.masked_spec_embed |
1 | 0 | 0 |
attr |
UniSpeechModel.encoder |
1 | 0 | 0 |
transformers.models.unispeech_sat.configuration_unispeech_sat (104 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UniSpeechSatConfig.init |
52 | 0 | 0 |
prop |
UniSpeechSatConfig.inputs_to_logits_ratio |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.pad_token_id |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.bos_token_id |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.eos_token_id |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.hidden_size |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.feat_extract_norm |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.feat_extract_activation |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.conv_dim |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.conv_stride |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.conv_kernel |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.conv_bias |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.num_conv_pos_embeddings |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.num_conv_pos_embedding_groups |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.num_feat_extract_layers |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.intermediate_size |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.hidden_act |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.num_attention_heads |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.hidden_dropout |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.attention_dropout |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.activation_dropout |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.feat_proj_dropout |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.final_dropout |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.layerdrop |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.initializer_range |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.vocab_size |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.num_clusters |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.do_stable_layer_norm |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.use_weighted_layer_sum |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.apply_spec_augment |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.mask_time_prob |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.mask_time_length |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.mask_time_min_masks |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.mask_feature_prob |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.mask_feature_length |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.mask_feature_min_masks |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.num_codevectors_per_group |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.num_codevector_groups |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.contrastive_logits_temperature |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.feat_quantizer_dropout |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.num_negatives |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.codevector_dim |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.proj_codevector_dim |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.diversity_loss_weight |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.ctc_loss_reduction |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.ctc_zero_infinity |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.classifier_proj_size |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.tdnn_dim |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.tdnn_kernel |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.tdnn_dilation |
1 | 0 | 0 |
attr |
UniSpeechSatConfig.xvector_output_dim |
1 | 0 | 0 |
transformers.models.unispeech_sat.modeling_unispeech_sat (67 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UniSpeechSatModel.init |
2 | 1 | 0 |
meth |
UniSpeechSatModel._mask_hidden_states |
4 | 3 | 0 |
meth |
UniSpeechSatModel.forward |
8 | 7 | 0 |
attr |
UniSpeechSatModel.feature_extractor |
1 | 0 | 0 |
attr |
UniSpeechSatModel.feature_projection |
1 | 0 | 0 |
attr |
UniSpeechSatModel.masked_spec_embed |
1 | 0 | 0 |
attr |
UniSpeechSatModel.encoder |
1 | 0 | 0 |
meth |
UniSpeechSatForCTC.init |
3 | 1 | 0 |
meth |
UniSpeechSatForCTC.tie_weights |
2 | 0 | 0 |
meth |
UniSpeechSatForCTC.freeze_feature_encoder |
1 | 0 | 0 |
meth |
UniSpeechSatForCTC.freeze_base_model |
1 | 0 | 0 |
meth |
UniSpeechSatForCTC.forward |
8 | 7 | 0 |
attr |
UniSpeechSatForCTC.unispeech_sat |
1 | 0 | 0 |
attr |
UniSpeechSatForCTC.dropout |
1 | 0 | 0 |
attr |
UniSpeechSatForCTC.target_lang |
1 | 0 | 0 |
attr |
UniSpeechSatForCTC.lm_head |
1 | 0 | 0 |
meth |
UniSpeechSatForAudioFrameClassification.init |
2 | 0 | 0 |
meth |
UniSpeechSatForAudioFrameClassification.freeze_feature_encoder |
1 | 0 | 0 |
meth |
UniSpeechSatForAudioFrameClassification.freeze_base_model |
1 | 0 | 0 |
meth |
UniSpeechSatForAudioFrameClassification.forward |
8 | 7 | 0 |
attr |
UniSpeechSatForAudioFrameClassification.unispeech_sat |
1 | 0 | 0 |
attr |
UniSpeechSatForAudioFrameClassification.classifier |
1 | 0 | 0 |
attr |
UniSpeechSatForAudioFrameClassification.num_labels |
1 | 0 | 0 |
attr |
UniSpeechSatForAudioFrameClassification.layer_weights |
1 | 0 | 0 |
meth |
UniSpeechSatForXVector.init |
2 | 0 | 0 |
meth |
UniSpeechSatForXVector.freeze_feature_encoder |
1 | 0 | 0 |
meth |
UniSpeechSatForXVector.freeze_base_model |
1 | 0 | 0 |
meth |
UniSpeechSatForXVector._get_tdnn_output_lengths |
2 | 1 | 0 |
meth |
UniSpeechSatForXVector.forward |
8 | 7 | 0 |
attr |
UniSpeechSatForXVector.unispeech_sat |
1 | 0 | 0 |
attr |
UniSpeechSatForXVector.projector |
1 | 0 | 0 |
attr |
UniSpeechSatForXVector.tdnn |
1 | 0 | 0 |
attr |
UniSpeechSatForXVector.feature_extractor |
1 | 0 | 0 |
attr |
UniSpeechSatForXVector.classifier |
1 | 0 | 0 |
attr |
UniSpeechSatForXVector.objective |
1 | 0 | 0 |
attr |
UniSpeechSatForXVector.layer_weights |
1 | 0 | 0 |
meth |
UniSpeechSatPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
UniSpeechSatPreTrainedModel._get_feat_extract_output_lengths |
2 | 1 | 0 |
meth |
UniSpeechSatPreTrainedModel._get_feature_vector_attention_mask |
3 | 2 | 0 |
meth |
UniSpeechSatForSequenceClassification.init |
2 | 0 | 0 |
meth |
UniSpeechSatForSequenceClassification.freeze_feature_encoder |
1 | 0 | 0 |
meth |
UniSpeechSatForSequenceClassification.freeze_base_model |
1 | 0 | 0 |
meth |
UniSpeechSatForSequenceClassification.forward |
8 | 7 | 0 |
attr |
UniSpeechSatForSequenceClassification.unispeech_sat |
1 | 0 | 0 |
attr |
UniSpeechSatForSequenceClassification.projector |
1 | 0 | 0 |
attr |
UniSpeechSatForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
UniSpeechSatForSequenceClassification.layer_weights |
1 | 0 | 0 |
meth |
UniSpeechSatForPreTraining.init |
2 | 1 | 0 |
meth |
UniSpeechSatForPreTraining.set_gumbel_temperature |
2 | 1 | 0 |
meth |
UniSpeechSatForPreTraining.freeze_feature_encoder |
1 | 0 | 0 |
meth |
UniSpeechSatForPreTraining.compute_contrastive_logits |
5 | 4 | 0 |
meth |
UniSpeechSatForPreTraining.forward |
7 | 6 | 0 |
attr |
UniSpeechSatForPreTraining.unispeech_sat |
1 | 0 | 0 |
attr |
UniSpeechSatForPreTraining.dropout_features |
1 | 0 | 0 |
attr |
UniSpeechSatForPreTraining.quantizer |
1 | 0 | 0 |
attr |
UniSpeechSatForPreTraining.project_q |
1 | 0 | 0 |
attr |
UniSpeechSatForPreTraining.project_hid |
1 | 0 | 0 |
attr |
UniSpeechSatForPreTraining.dropout |
1 | 0 | 0 |
attr |
UniSpeechSatForPreTraining.speaker_proj |
1 | 0 | 0 |
attr |
UniSpeechSatForPreTraining.label_embeddings_concat |
1 | 0 | 0 |
attr |
UniSpeechSatForPreTraining.layer_norm_for_extract |
1 | 0 | 0 |
transformers.models.unispeech_sat.modular_unispeech_sat (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UniSpeechSatModel.init |
2 | 1 | 0 |
meth |
UniSpeechSatModel.freeze_feature_encoder |
1 | 0 | 0 |
meth |
UniSpeechSatModel.forward |
8 | 7 | 0 |
attr |
UniSpeechSatModel.feature_extractor |
1 | 0 | 0 |
attr |
UniSpeechSatModel.feature_projection |
1 | 0 | 0 |
attr |
UniSpeechSatModel.masked_spec_embed |
1 | 0 | 0 |
attr |
UniSpeechSatModel.encoder |
1 | 0 | 0 |
meth |
UniSpeechSatPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
UniSpeechSatPreTrainedModel._get_feat_extract_output_lengths |
2 | 1 | 0 |
meth |
UniSpeechSatPreTrainedModel._get_feature_vector_attention_mask |
3 | 2 | 0 |
meth |
UniSpeechSatForPreTraining.init |
2 | 1 | 0 |
meth |
UniSpeechSatForPreTraining.set_gumbel_temperature |
2 | 1 | 0 |
meth |
UniSpeechSatForPreTraining.freeze_feature_encoder |
1 | 0 | 0 |
meth |
UniSpeechSatForPreTraining.compute_contrastive_logits |
5 | 4 | 0 |
meth |
UniSpeechSatForPreTraining.forward |
7 | 6 | 0 |
attr |
UniSpeechSatForPreTraining.unispeech_sat |
1 | 0 | 0 |
attr |
UniSpeechSatForPreTraining.dropout_features |
1 | 0 | 0 |
attr |
UniSpeechSatForPreTraining.quantizer |
1 | 0 | 0 |
attr |
UniSpeechSatForPreTraining.project_q |
1 | 0 | 0 |
attr |
UniSpeechSatForPreTraining.project_hid |
1 | 0 | 0 |
attr |
UniSpeechSatForPreTraining.dropout |
1 | 0 | 0 |
attr |
UniSpeechSatForPreTraining.speaker_proj |
1 | 0 | 0 |
attr |
UniSpeechSatForPreTraining.label_embeddings_concat |
1 | 0 | 0 |
attr |
UniSpeechSatForPreTraining.layer_norm_for_extract |
1 | 0 | 0 |
transformers.models.univnet.configuration_univnet (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UnivNetConfig.init |
14 | 0 | 0 |
attr |
UnivNetConfig.model_in_channels |
1 | 0 | 0 |
attr |
UnivNetConfig.model_hidden_channels |
1 | 0 | 0 |
attr |
UnivNetConfig.num_mel_bins |
1 | 0 | 0 |
attr |
UnivNetConfig.resblock_kernel_sizes |
1 | 0 | 0 |
attr |
UnivNetConfig.resblock_stride_sizes |
1 | 0 | 0 |
attr |
UnivNetConfig.resblock_dilation_sizes |
1 | 0 | 0 |
attr |
UnivNetConfig.kernel_predictor_num_blocks |
1 | 0 | 0 |
attr |
UnivNetConfig.kernel_predictor_hidden_channels |
1 | 0 | 0 |
attr |
UnivNetConfig.kernel_predictor_conv_size |
1 | 0 | 0 |
attr |
UnivNetConfig.kernel_predictor_dropout |
1 | 0 | 0 |
attr |
UnivNetConfig.initializer_range |
1 | 0 | 0 |
attr |
UnivNetConfig.leaky_relu_slope |
1 | 0 | 0 |
transformers.models.univnet.feature_extraction_univnet (31 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UnivNetFeatureExtractor.init |
23 | 20 | 0 |
meth |
UnivNetFeatureExtractor.normalize |
2 | 0 | 0 |
meth |
UnivNetFeatureExtractor.denormalize |
2 | 0 | 0 |
meth |
UnivNetFeatureExtractor.batch_decode |
3 | 1 | 0 |
attr |
UnivNetFeatureExtractor.do_normalize |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.num_mel_bins |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.hop_length |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.win_length |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.win_function |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.filter_length |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.fmin |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.fmax |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.mel_floor |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.max_length_s |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.num_max_samples |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.n_freqs |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.window |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.mel_filters |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.center |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.compression_factor |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.compression_clip_val |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.normalize_min |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.normalize_max |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.model_in_channels |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.pad_end_length |
1 | 0 | 0 |
attr |
UnivNetFeatureExtractor.n_fft |
1 | 0 | 0 |
transformers.models.univnet.modeling_univnet (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UnivNetModel.init |
2 | 1 | 0 |
meth |
UnivNetModel.forward |
7 | 6 | 0 |
meth |
UnivNetModel.apply_weight_norm |
1 | 0 | 0 |
meth |
UnivNetModel.remove_weight_norm |
1 | 0 | 0 |
attr |
UnivNetModel.num_kernels |
1 | 0 | 0 |
attr |
UnivNetModel.leaky_relu_slope |
1 | 0 | 0 |
attr |
UnivNetModel.conv_pre |
1 | 0 | 0 |
attr |
UnivNetModel.resblocks |
1 | 0 | 0 |
attr |
UnivNetModel.conv_post |
1 | 0 | 0 |
transformers.models.upernet.configuration_upernet (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UperNetConfig.init |
13 | 0 | 0 |
attr |
UperNetConfig.backbone_config |
1 | 0 | 0 |
attr |
UperNetConfig.hidden_size |
1 | 0 | 0 |
attr |
UperNetConfig.initializer_range |
1 | 0 | 0 |
attr |
UperNetConfig.pool_scales |
1 | 0 | 0 |
attr |
UperNetConfig.use_auxiliary_head |
1 | 0 | 0 |
attr |
UperNetConfig.auxiliary_loss_weight |
1 | 0 | 0 |
attr |
UperNetConfig.auxiliary_in_channels |
1 | 0 | 0 |
attr |
UperNetConfig.auxiliary_channels |
1 | 0 | 0 |
attr |
UperNetConfig.auxiliary_num_convs |
1 | 0 | 0 |
attr |
UperNetConfig.auxiliary_concat_input |
1 | 0 | 0 |
attr |
UperNetConfig.loss_ignore_index |
1 | 0 | 0 |
transformers.models.upernet.modeling_upernet (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
UperNetForSemanticSegmentation.init |
2 | 0 | 0 |
meth |
UperNetForSemanticSegmentation.forward |
7 | 6 | 0 |
attr |
UperNetForSemanticSegmentation.backbone |
1 | 0 | 0 |
attr |
UperNetForSemanticSegmentation.decode_head |
1 | 0 | 0 |
attr |
UperNetForSemanticSegmentation.auxiliary_head |
1 | 0 | 0 |
transformers.models.vaultgemma.configuration_vaultgemma (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VaultGemmaConfig.init |
26 | 24 | 0 |
attr |
VaultGemmaConfig.pad_token_id |
1 | 0 | 0 |
attr |
VaultGemmaConfig.bos_token_id |
1 | 0 | 0 |
attr |
VaultGemmaConfig.eos_token_id |
1 | 0 | 0 |
attr |
VaultGemmaConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
VaultGemmaConfig.vocab_size |
1 | 0 | 0 |
attr |
VaultGemmaConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
VaultGemmaConfig.hidden_size |
1 | 0 | 0 |
attr |
VaultGemmaConfig.intermediate_size |
1 | 0 | 0 |
attr |
VaultGemmaConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
VaultGemmaConfig.num_attention_heads |
1 | 0 | 0 |
attr |
VaultGemmaConfig.head_dim |
1 | 0 | 0 |
attr |
VaultGemmaConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
VaultGemmaConfig.initializer_range |
1 | 0 | 0 |
attr |
VaultGemmaConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
VaultGemmaConfig.use_cache |
1 | 0 | 0 |
attr |
VaultGemmaConfig.attention_bias |
1 | 0 | 0 |
attr |
VaultGemmaConfig.attention_dropout |
1 | 0 | 0 |
attr |
VaultGemmaConfig.hidden_activation |
1 | 0 | 0 |
attr |
VaultGemmaConfig.query_pre_attn_scalar |
1 | 0 | 0 |
attr |
VaultGemmaConfig.sliding_window |
1 | 0 | 0 |
attr |
VaultGemmaConfig.final_logit_softcapping |
1 | 0 | 0 |
attr |
VaultGemmaConfig.attn_logit_softcapping |
1 | 0 | 0 |
attr |
VaultGemmaConfig.layer_types |
1 | 0 | 0 |
attr |
VaultGemmaConfig.rope_parameters |
1 | 0 | 0 |
transformers.models.vaultgemma.modeling_vaultgemma (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VaultGemmaModel.init |
2 | 1 | 0 |
attr |
VaultGemmaModel.padding_idx |
1 | 0 | 0 |
attr |
VaultGemmaModel.vocab_size |
1 | 0 | 0 |
attr |
VaultGemmaModel.embed_tokens |
1 | 0 | 0 |
attr |
VaultGemmaModel.layers |
1 | 0 | 0 |
attr |
VaultGemmaModel.norm |
1 | 0 | 0 |
attr |
VaultGemmaModel.rotary_emb |
1 | 0 | 0 |
attr |
VaultGemmaModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
VaultGemmaPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
VaultGemmaForCausalLM.init |
2 | 0 | 0 |
attr |
VaultGemmaForCausalLM.model |
1 | 0 | 0 |
attr |
VaultGemmaForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
VaultGemmaForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.vaultgemma.modular_vaultgemma (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
VaultGemmaModel |
1 | 0 | 0 |
meth |
VaultGemmaConfig.init |
26 | 24 | 0 |
attr |
VaultGemmaPreTrainedModel |
1 | 0 | 0 |
transformers.models.vibevoice_acoustic_tokenizer.configuration_vibevoice_acoustic_tokenizer (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
prop |
VibeVoiceAcousticTokenizerDecoderConfig.upsampling_ratios |
1 | 0 | 0 |
meth |
VibeVoiceAcousticTokenizerConfig.init |
14 | 0 | 0 |
prop |
VibeVoiceAcousticTokenizerConfig.hop_length |
1 | 0 | 0 |
prop |
VibeVoiceAcousticTokenizerConfig.encoder_config |
1 | 0 | 0 |
prop |
VibeVoiceAcousticTokenizerConfig.decoder_config |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerConfig.channels |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerConfig.hidden_size |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerConfig.hidden_act |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerConfig.kernel_size |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerConfig.layer_scale_init_value |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerConfig.ffn_expansion |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerConfig.initializer_range |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerConfig.num_filters |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerConfig.downsampling_ratios |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerConfig.depths |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerConfig.vae_std |
1 | 0 | 0 |
transformers.models.vibevoice_acoustic_tokenizer.feature_extraction_vibevoice_acoustic_tokenizer (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VibeVoiceAcousticTokenizerFeatureExtractor.init |
8 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerFeatureExtractor.normalize_audio |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerFeatureExtractor.target_dB_FS |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerFeatureExtractor.eps |
1 | 0 | 0 |
transformers.models.vibevoice_acoustic_tokenizer.modeling_vibevoice_acoustic_tokenizer (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VibeVoiceAcousticTokenizerEncoderModel.init |
2 | 0 | 0 |
meth |
VibeVoiceAcousticTokenizerEncoderModel.forward |
5 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerEncoderModel.stem |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerEncoderModel.conv_layers |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerEncoderModel.head |
1 | 0 | 0 |
meth |
VibeVoiceAcousticTokenizerDecoderModel.init |
2 | 0 | 0 |
meth |
VibeVoiceAcousticTokenizerDecoderModel.forward |
5 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerDecoderModel.stem |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerDecoderModel.conv_layers |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerDecoderModel.head |
1 | 0 | 0 |
meth |
VibeVoiceAcousticTokenizerPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
VibeVoiceAcousticTokenizerModel.init |
2 | 0 | 0 |
meth |
VibeVoiceAcousticTokenizerModel.encode |
5 | 0 | 0 |
meth |
VibeVoiceAcousticTokenizerModel.decode |
4 | 0 | 0 |
meth |
VibeVoiceAcousticTokenizerModel.forward |
6 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerModel.encoder |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerModel.decoder |
1 | 0 | 0 |
transformers.models.vibevoice_acoustic_tokenizer.modular_vibevoice_acoustic_tokenizer (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VibeVoiceAcousticTokenizerEncoderModel.init |
2 | 0 | 0 |
meth |
VibeVoiceAcousticTokenizerEncoderModel.forward |
5 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerEncoderModel.stem |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerEncoderModel.conv_layers |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerEncoderModel.head |
1 | 0 | 0 |
meth |
VibeVoiceAcousticTokenizerDecoderModel.init |
2 | 0 | 0 |
meth |
VibeVoiceAcousticTokenizerDecoderModel.forward |
5 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerDecoderModel.stem |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerDecoderModel.conv_layers |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerDecoderModel.head |
1 | 0 | 0 |
meth |
VibeVoiceAcousticTokenizerPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
VibeVoiceAcousticTokenizerModel.init |
2 | 0 | 0 |
meth |
VibeVoiceAcousticTokenizerModel.encode |
5 | 0 | 0 |
meth |
VibeVoiceAcousticTokenizerModel.decode |
4 | 0 | 0 |
meth |
VibeVoiceAcousticTokenizerModel.forward |
6 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerModel.encoder |
1 | 0 | 0 |
attr |
VibeVoiceAcousticTokenizerModel.decoder |
1 | 0 | 0 |
transformers.models.vibevoice_asr.configuration_vibevoice_asr (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VibeVoiceAsrConfig.init |
9 | 0 | 0 |
attr |
VibeVoiceAsrConfig.acoustic_tokenizer_encoder_config |
1 | 0 | 0 |
attr |
VibeVoiceAsrConfig.semantic_tokenizer_encoder_config |
1 | 0 | 0 |
attr |
VibeVoiceAsrConfig.text_config |
1 | 0 | 0 |
attr |
VibeVoiceAsrConfig.audio_token_id |
1 | 0 | 0 |
attr |
VibeVoiceAsrConfig.audio_bos_token_id |
1 | 0 | 0 |
attr |
VibeVoiceAsrConfig.audio_eos_token_id |
1 | 0 | 0 |
attr |
VibeVoiceAsrConfig.acoustic_tokenizer_chunk_size |
1 | 0 | 0 |
transformers.models.vibevoice_asr.modeling_vibevoice_asr (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VibeVoiceAsrForConditionalGeneration.init |
2 | 1 | 0 |
meth |
VibeVoiceAsrForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
VibeVoiceAsrForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
VibeVoiceAsrForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
VibeVoiceAsrForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
VibeVoiceAsrForConditionalGeneration.set_decoder |
2 | 0 | 0 |
meth |
VibeVoiceAsrForConditionalGeneration.get_decoder |
1 | 0 | 0 |
meth |
VibeVoiceAsrForConditionalGeneration.prepare_inputs_for_generation |
4 | 0 | 0 |
attr |
VibeVoiceAsrForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
VibeVoiceAsrForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
VibeVoiceAsrForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
VibeVoiceAsrForConditionalGeneration.acoustic_tokenizer_encoder |
1 | 0 | 0 |
attr |
VibeVoiceAsrForConditionalGeneration.semantic_tokenizer_encoder |
1 | 0 | 0 |
meth |
VibeVoiceAsrPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.vibevoice_asr.modular_vibevoice_asr (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VibeVoiceAsrForConditionalGeneration.init |
2 | 1 | 0 |
meth |
VibeVoiceAsrForConditionalGeneration.get_audio_features |
5 | 4 | 0 |
meth |
VibeVoiceAsrForConditionalGeneration.prepare_inputs_for_generation |
4 | 0 | 0 |
attr |
VibeVoiceAsrForConditionalGeneration.acoustic_tokenizer_encoder |
1 | 0 | 0 |
attr |
VibeVoiceAsrForConditionalGeneration.semantic_tokenizer_encoder |
1 | 0 | 0 |
meth |
VibeVoiceAsrConfig.init |
9 | 0 | 0 |
attr |
VibeVoiceAsrConfig.acoustic_tokenizer_encoder_config |
1 | 0 | 0 |
attr |
VibeVoiceAsrConfig.semantic_tokenizer_encoder_config |
1 | 0 | 0 |
attr |
VibeVoiceAsrConfig.text_config |
1 | 0 | 0 |
attr |
VibeVoiceAsrConfig.audio_token_id |
1 | 0 | 0 |
attr |
VibeVoiceAsrConfig.audio_bos_token_id |
1 | 0 | 0 |
attr |
VibeVoiceAsrConfig.audio_eos_token_id |
1 | 0 | 0 |
attr |
VibeVoiceAsrConfig.acoustic_tokenizer_chunk_size |
1 | 0 | 0 |
transformers.models.vibevoice_asr.processing_vibevoice_asr (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VibeVoiceAsrProcessor.init |
8 | 0 | 0 |
meth |
VibeVoiceAsrProcessor.decode |
4 | 0 | 0 |
attr |
VibeVoiceAsrProcessor.audio_token |
1 | 0 | 0 |
attr |
VibeVoiceAsrProcessor.audio_token_id |
1 | 0 | 0 |
attr |
VibeVoiceAsrProcessor.audio_bos_token |
1 | 0 | 0 |
attr |
VibeVoiceAsrProcessor.audio_bos_token_id |
1 | 0 | 0 |
attr |
VibeVoiceAsrProcessor.audio_eos_token |
1 | 0 | 0 |
attr |
VibeVoiceAsrProcessor.audio_eos_token_id |
1 | 0 | 0 |
attr |
VibeVoiceAsrProcessor.audio_duration_token |
1 | 0 | 0 |
transformers.models.video_llama_3.configuration_video_llama_3 (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VideoLlama3Config.init |
7 | 0 | 0 |
attr |
VideoLlama3Config.image_token_id |
1 | 0 | 0 |
attr |
VideoLlama3Config.video_token_id |
1 | 0 | 0 |
attr |
VideoLlama3Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
VideoLlama3Config.vision_config |
1 | 0 | 0 |
attr |
VideoLlama3Config.text_config |
1 | 0 | 0 |
meth |
VideoLlama3VisionConfig.init |
12 | 0 | 0 |
attr |
VideoLlama3VisionConfig.hidden_size |
1 | 0 | 0 |
attr |
VideoLlama3VisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
VideoLlama3VisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
VideoLlama3VisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
VideoLlama3VisionConfig.num_channels |
1 | 0 | 0 |
attr |
VideoLlama3VisionConfig.patch_size |
1 | 0 | 0 |
attr |
VideoLlama3VisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
VideoLlama3VisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
VideoLlama3VisionConfig.hidden_act |
1 | 0 | 0 |
attr |
VideoLlama3VisionConfig.initializer_range |
1 | 0 | 0 |
transformers.models.video_llama_3.image_processing_video_llama_3 (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VideoLlama3ImageProcessor.init |
16 | 15 | 0 |
meth |
VideoLlama3ImageProcessor._preprocess |
16 | 15 | 0 |
meth |
VideoLlama3ImageProcessor.preprocess |
20 | 19 | 0 |
meth |
VideoLlama3ImageProcessor.get_number_of_image_patches |
4 | 2 | 0 |
attr |
VideoLlama3ImageProcessor.min_pixels |
1 | 0 | 0 |
attr |
VideoLlama3ImageProcessor.max_pixels |
1 | 0 | 0 |
attr |
VideoLlama3ImageProcessor.size |
1 | 0 | 0 |
attr |
VideoLlama3ImageProcessor.do_resize |
1 | 0 | 0 |
attr |
VideoLlama3ImageProcessor.resample |
1 | 0 | 0 |
attr |
VideoLlama3ImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
VideoLlama3ImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
VideoLlama3ImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
VideoLlama3ImageProcessor.image_mean |
1 | 0 | 0 |
attr |
VideoLlama3ImageProcessor.image_std |
1 | 0 | 0 |
attr |
VideoLlama3ImageProcessor.patch_size |
1 | 0 | 0 |
attr |
VideoLlama3ImageProcessor.temporal_patch_size |
1 | 0 | 0 |
attr |
VideoLlama3ImageProcessor.merge_size |
1 | 0 | 0 |
attr |
VideoLlama3ImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.video_llama_3.image_processing_video_llama_3_fast (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VideoLlama3ImageProcessorFast.init |
2 | 1 | 0 |
meth |
VideoLlama3ImageProcessorFast._further_process_kwargs |
5 | 4 | 0 |
meth |
VideoLlama3ImageProcessorFast._preprocess |
16 | 14 | 0 |
meth |
VideoLlama3ImageProcessorFast.get_number_of_image_patches |
4 | 2 | 0 |
transformers.models.video_llama_3.modeling_video_llama_3 (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VideoLlama3Model.init |
2 | 1 | 0 |
meth |
VideoLlama3Model.get_input_embeddings |
1 | 0 | 0 |
meth |
VideoLlama3Model.set_input_embeddings |
2 | 0 | 0 |
meth |
VideoLlama3Model.get_placeholder_mask |
5 | 4 | 0 |
attr |
VideoLlama3Model.vision_model |
1 | 0 | 0 |
attr |
VideoLlama3Model.projector |
1 | 0 | 0 |
attr |
VideoLlama3Model.language_model |
1 | 0 | 0 |
meth |
VideoLlama3VisionModel.init |
2 | 1 | 0 |
meth |
VideoLlama3VisionModel.pixel_unshuffle |
4 | 3 | 0 |
attr |
VideoLlama3VisionModel.rotary_pos_emb |
1 | 0 | 0 |
attr |
VideoLlama3VisionModel.embeddings |
1 | 0 | 0 |
attr |
VideoLlama3VisionModel.encoder |
1 | 0 | 0 |
attr |
VideoLlama3VisionModel.post_layernorm |
1 | 0 | 0 |
meth |
VideoLlama3PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
VideoLlama3ForConditionalGeneration.init |
2 | 1 | 0 |
meth |
VideoLlama3ForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
VideoLlama3ForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
VideoLlama3ForConditionalGeneration.prepare_inputs_for_generation |
17 | 8 | 0 |
meth |
VideoLlama3ForConditionalGeneration._expand_inputs_for_generation |
5 | 4 | 0 |
attr |
VideoLlama3ForConditionalGeneration.model |
1 | 0 | 0 |
attr |
VideoLlama3ForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.video_llama_3.modular_video_llama_3 (59 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VideoLlama3VideoProcessor._preprocess |
18 | 16 | 0 |
meth |
VideoLlama3VisionModel.init |
2 | 1 | 0 |
meth |
VideoLlama3VisionModel.pixel_unshuffle |
4 | 3 | 0 |
attr |
VideoLlama3VisionModel.rotary_pos_emb |
1 | 0 | 0 |
attr |
VideoLlama3VisionModel.embeddings |
1 | 0 | 0 |
attr |
VideoLlama3VisionModel.encoder |
1 | 0 | 0 |
attr |
VideoLlama3VisionModel.post_layernorm |
1 | 0 | 0 |
meth |
VideoLlama3Model.init |
2 | 1 | 0 |
meth |
VideoLlama3Model.get_rope_index |
1 | 0 | 0 |
meth |
VideoLlama3Model.get_vision_position_ids |
1 | 0 | 0 |
meth |
VideoLlama3Model.compute_3d_position_ids |
1 | 0 | 0 |
attr |
VideoLlama3Model.vision_model |
1 | 0 | 0 |
attr |
VideoLlama3Model.projector |
1 | 0 | 0 |
attr |
VideoLlama3Model.language_model |
1 | 0 | 0 |
meth |
VideoLlama3Processor.model_input_names |
1 | 0 | 0 |
meth |
VideoLlama3PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
VideoLlama3ImageProcessor.init |
16 | 15 | 0 |
meth |
VideoLlama3ImageProcessor.preprocess |
20 | 19 | 0 |
attr |
VideoLlama3ImageProcessor.image_mean |
1 | 0 | 0 |
attr |
VideoLlama3ImageProcessor.image_std |
1 | 0 | 0 |
meth |
VideoLlama3Config.init |
7 | 0 | 0 |
attr |
VideoLlama3Config.image_token_id |
1 | 0 | 0 |
attr |
VideoLlama3Config.video_token_id |
1 | 0 | 0 |
attr |
VideoLlama3Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
VideoLlama3Config.vision_config |
1 | 0 | 0 |
attr |
VideoLlama3Config.text_config |
1 | 0 | 0 |
meth |
VideoLlama3ForConditionalGeneration.init |
2 | 1 | 0 |
meth |
VideoLlama3ForConditionalGeneration.prepare_inputs_for_generation |
17 | 8 | 0 |
meth |
VideoLlama3ForConditionalGeneration._prepare_position_ids_for_generation |
1 | 0 | 0 |
meth |
VideoLlama3ForConditionalGeneration._expand_inputs_for_generation |
5 | 4 | 0 |
meth |
VideoLlama3VisionConfig.init |
12 | 0 | 0 |
attr |
VideoLlama3VisionConfig.initializer_range |
1 | 0 | 0 |
transformers.models.video_llama_3.processing_video_llama_3 (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VideoLlama3Processor.init |
6 | 0 | 0 |
meth |
VideoLlama3Processor._get_num_multimodal_tokens |
4 | 0 | 0 |
meth |
VideoLlama3Processor.post_process_image_text_to_text |
5 | 0 | 0 |
attr |
VideoLlama3Processor.image_token |
1 | 0 | 0 |
attr |
VideoLlama3Processor.video_token |
1 | 0 | 0 |
attr |
VideoLlama3Processor.image_token_id |
1 | 0 | 0 |
attr |
VideoLlama3Processor.video_token_id |
1 | 0 | 0 |
transformers.models.video_llama_3.video_processing_video_llama_3 (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VideoLlama3VideoProcessor.init |
2 | 1 | 0 |
meth |
VideoLlama3VideoProcessor._further_process_kwargs |
5 | 4 | 0 |
meth |
VideoLlama3VideoProcessor.sample_frames |
8 | 6 | 0 |
meth |
VideoLlama3VideoProcessor._preprocess |
18 | 16 | 0 |
meth |
VideoLlama3VideoProcessor.get_num_of_video_patches |
5 | 3 | 0 |
transformers.models.video_llava.configuration_video_llava (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VideoLlavaConfig.init |
13 | 0 | 0 |
attr |
VideoLlavaConfig.image_token_index |
1 | 0 | 0 |
attr |
VideoLlavaConfig.video_token_index |
1 | 0 | 0 |
attr |
VideoLlavaConfig.projector_hidden_act |
1 | 0 | 0 |
attr |
VideoLlavaConfig.vision_feature_select_strategy |
1 | 0 | 0 |
attr |
VideoLlavaConfig.vision_feature_layer |
1 | 0 | 0 |
attr |
VideoLlavaConfig.image_seq_length |
1 | 0 | 0 |
attr |
VideoLlavaConfig.video_seq_length |
1 | 0 | 0 |
attr |
VideoLlavaConfig.multimodal_projector_bias |
1 | 0 | 0 |
attr |
VideoLlavaConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
VideoLlavaConfig.vision_config |
1 | 0 | 0 |
attr |
VideoLlavaConfig.text_config |
1 | 0 | 0 |
transformers.models.video_llava.image_processing_video_llava (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VideoLlavaImageProcessor.init |
13 | 12 | 0 |
meth |
VideoLlavaImageProcessor.resize |
7 | 6 | 0 |
attr |
VideoLlavaImageProcessor.do_resize |
1 | 0 | 0 |
attr |
VideoLlavaImageProcessor.size |
1 | 0 | 0 |
attr |
VideoLlavaImageProcessor.resample |
1 | 0 | 0 |
attr |
VideoLlavaImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
VideoLlavaImageProcessor.crop_size |
1 | 0 | 0 |
attr |
VideoLlavaImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
VideoLlavaImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
VideoLlavaImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
VideoLlavaImageProcessor.image_mean |
1 | 0 | 0 |
attr |
VideoLlavaImageProcessor.image_std |
1 | 0 | 0 |
attr |
VideoLlavaImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.video_llava.modeling_video_llava (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VideoLlavaForConditionalGeneration.init |
2 | 1 | 0 |
meth |
VideoLlavaForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
VideoLlavaForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
VideoLlavaForConditionalGeneration.prepare_inputs_for_generation |
11 | 0 | 0 |
attr |
VideoLlavaForConditionalGeneration.model |
1 | 0 | 0 |
attr |
VideoLlavaForConditionalGeneration.lm_head |
1 | 0 | 0 |
meth |
VideoLlavaPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
VideoLlavaModel.init |
2 | 1 | 0 |
meth |
VideoLlavaModel.get_input_embeddings |
1 | 0 | 0 |
meth |
VideoLlavaModel.set_input_embeddings |
2 | 0 | 0 |
meth |
VideoLlavaModel.get_placeholder_mask |
5 | 4 | 0 |
attr |
VideoLlavaModel.video_tower |
1 | 0 | 0 |
attr |
VideoLlavaModel.image_tower |
1 | 0 | 0 |
attr |
VideoLlavaModel.multi_modal_projector |
1 | 0 | 0 |
attr |
VideoLlavaModel.vocab_size |
1 | 0 | 0 |
attr |
VideoLlavaModel.language_model |
1 | 0 | 0 |
transformers.models.video_llava.processing_video_llava (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VideoLlavaProcessor.init |
11 | 0 | 0 |
attr |
VideoLlavaProcessor.patch_size |
1 | 0 | 0 |
attr |
VideoLlavaProcessor.num_additional_image_tokens |
1 | 0 | 0 |
attr |
VideoLlavaProcessor.vision_feature_select_strategy |
1 | 0 | 0 |
attr |
VideoLlavaProcessor.image_token |
1 | 0 | 0 |
attr |
VideoLlavaProcessor.video_token |
1 | 0 | 0 |
attr |
VideoLlavaProcessor.image_token_id |
1 | 0 | 0 |
attr |
VideoLlavaProcessor.video_token_id |
1 | 0 | 0 |
transformers.models.videomae.configuration_videomae (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VideoMAEConfig.init |
23 | 0 | 0 |
attr |
VideoMAEConfig.image_size |
1 | 0 | 0 |
attr |
VideoMAEConfig.patch_size |
1 | 0 | 0 |
attr |
VideoMAEConfig.num_channels |
1 | 0 | 0 |
attr |
VideoMAEConfig.num_frames |
1 | 0 | 0 |
attr |
VideoMAEConfig.tubelet_size |
1 | 0 | 0 |
attr |
VideoMAEConfig.hidden_size |
1 | 0 | 0 |
attr |
VideoMAEConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
VideoMAEConfig.num_attention_heads |
1 | 0 | 0 |
attr |
VideoMAEConfig.intermediate_size |
1 | 0 | 0 |
attr |
VideoMAEConfig.hidden_act |
1 | 0 | 0 |
attr |
VideoMAEConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
VideoMAEConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
VideoMAEConfig.initializer_range |
1 | 0 | 0 |
attr |
VideoMAEConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
VideoMAEConfig.qkv_bias |
1 | 0 | 0 |
attr |
VideoMAEConfig.use_mean_pooling |
1 | 0 | 0 |
attr |
VideoMAEConfig.decoder_num_attention_heads |
1 | 0 | 0 |
attr |
VideoMAEConfig.decoder_hidden_size |
1 | 0 | 0 |
attr |
VideoMAEConfig.decoder_num_hidden_layers |
1 | 0 | 0 |
attr |
VideoMAEConfig.decoder_intermediate_size |
1 | 0 | 0 |
attr |
VideoMAEConfig.norm_pix_loss |
1 | 0 | 0 |
transformers.models.videomae.image_processing_videomae (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VideoMAEImageProcessor.init |
12 | 11 | 0 |
meth |
VideoMAEImageProcessor.resize |
7 | 6 | 0 |
attr |
VideoMAEImageProcessor.do_resize |
1 | 0 | 0 |
attr |
VideoMAEImageProcessor.size |
1 | 0 | 0 |
attr |
VideoMAEImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
VideoMAEImageProcessor.crop_size |
1 | 0 | 0 |
attr |
VideoMAEImageProcessor.resample |
1 | 0 | 0 |
attr |
VideoMAEImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
VideoMAEImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
VideoMAEImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
VideoMAEImageProcessor.image_mean |
1 | 0 | 0 |
attr |
VideoMAEImageProcessor.image_std |
1 | 0 | 0 |
transformers.models.videomae.modeling_videomae (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VideoMAEForVideoClassification.init |
2 | 0 | 0 |
attr |
VideoMAEForVideoClassification.num_labels |
1 | 0 | 0 |
attr |
VideoMAEForVideoClassification.videomae |
1 | 0 | 0 |
attr |
VideoMAEForVideoClassification.fc_norm |
1 | 0 | 0 |
attr |
VideoMAEForVideoClassification.classifier |
1 | 0 | 0 |
meth |
VideoMAEModel.init |
2 | 0 | 0 |
meth |
VideoMAEModel.get_input_embeddings |
1 | 0 | 0 |
attr |
VideoMAEModel.embeddings |
1 | 0 | 0 |
attr |
VideoMAEModel.encoder |
1 | 0 | 0 |
attr |
VideoMAEModel.layernorm |
1 | 0 | 0 |
meth |
VideoMAEForPreTraining.init |
2 | 0 | 0 |
attr |
VideoMAEForPreTraining.videomae |
1 | 0 | 0 |
attr |
VideoMAEForPreTraining.encoder_to_decoder |
1 | 0 | 0 |
attr |
VideoMAEForPreTraining.mask_token |
1 | 0 | 0 |
attr |
VideoMAEForPreTraining.position_embeddings |
1 | 0 | 0 |
attr |
VideoMAEForPreTraining.decoder |
1 | 0 | 0 |
transformers.models.videomae.video_processing_videomae (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VideoMAEVideoProcessor.preprocess |
3 | 0 | 0 |
transformers.models.vilt.configuration_vilt (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ViltConfig.init |
23 | 0 | 0 |
attr |
ViltConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
ViltConfig.pad_token_id |
1 | 0 | 0 |
attr |
ViltConfig.vocab_size |
1 | 0 | 0 |
attr |
ViltConfig.type_vocab_size |
1 | 0 | 0 |
attr |
ViltConfig.modality_type_vocab_size |
1 | 0 | 0 |
attr |
ViltConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ViltConfig.hidden_size |
1 | 0 | 0 |
attr |
ViltConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ViltConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ViltConfig.intermediate_size |
1 | 0 | 0 |
attr |
ViltConfig.hidden_act |
1 | 0 | 0 |
attr |
ViltConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
ViltConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
ViltConfig.initializer_range |
1 | 0 | 0 |
attr |
ViltConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
ViltConfig.image_size |
1 | 0 | 0 |
attr |
ViltConfig.patch_size |
1 | 0 | 0 |
attr |
ViltConfig.num_channels |
1 | 0 | 0 |
attr |
ViltConfig.qkv_bias |
1 | 0 | 0 |
attr |
ViltConfig.max_image_length |
1 | 0 | 0 |
attr |
ViltConfig.num_images |
1 | 0 | 0 |
transformers.models.vilt.image_processing_vilt (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ViltImageProcessor.init |
12 | 11 | 0 |
meth |
ViltImageProcessor.resize |
8 | 7 | 0 |
attr |
ViltImageProcessor.do_resize |
1 | 0 | 0 |
attr |
ViltImageProcessor.size |
1 | 0 | 0 |
attr |
ViltImageProcessor.size_divisor |
1 | 0 | 0 |
attr |
ViltImageProcessor.resample |
1 | 0 | 0 |
attr |
ViltImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
ViltImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
ViltImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
ViltImageProcessor.image_mean |
1 | 0 | 0 |
attr |
ViltImageProcessor.image_std |
1 | 0 | 0 |
attr |
ViltImageProcessor.do_pad |
1 | 0 | 0 |
transformers.models.vilt.image_processing_vilt_fast (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ViltImageProcessorFast._preprocess |
15 | 14 | 0 |
transformers.models.vilt.modeling_vilt (58 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ViltForQuestionAnswering.init |
2 | 0 | 0 |
meth |
ViltForQuestionAnswering.forward |
13 | 12 | 0 |
attr |
ViltForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
ViltForQuestionAnswering.vilt |
1 | 0 | 0 |
attr |
ViltForQuestionAnswering.classifier |
1 | 0 | 0 |
meth |
ViltForImageAndTextRetrieval.init |
2 | 0 | 0 |
meth |
ViltForImageAndTextRetrieval.forward |
13 | 12 | 0 |
attr |
ViltForImageAndTextRetrieval.vilt |
1 | 0 | 0 |
attr |
ViltForImageAndTextRetrieval.rank_output |
1 | 0 | 0 |
meth |
ViltForImagesAndTextClassification.init |
2 | 0 | 0 |
meth |
ViltForImagesAndTextClassification.forward |
13 | 12 | 0 |
attr |
ViltForImagesAndTextClassification.num_labels |
1 | 0 | 0 |
attr |
ViltForImagesAndTextClassification.vilt |
1 | 0 | 0 |
attr |
ViltForImagesAndTextClassification.classifier |
1 | 0 | 0 |
meth |
ViltForTokenClassification.init |
2 | 0 | 0 |
meth |
ViltForTokenClassification.forward |
13 | 12 | 0 |
attr |
ViltForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
ViltForTokenClassification.vilt |
1 | 0 | 0 |
attr |
ViltForTokenClassification.dropout |
1 | 0 | 0 |
attr |
ViltForTokenClassification.classifier |
1 | 0 | 0 |
meth |
ViltModel.init |
3 | 0 | 0 |
meth |
ViltModel.get_input_embeddings |
1 | 0 | 0 |
meth |
ViltModel.set_input_embeddings |
2 | 0 | 0 |
meth |
ViltModel.forward |
13 | 12 | 0 |
attr |
ViltModel.embeddings |
1 | 0 | 0 |
attr |
ViltModel.encoder |
1 | 0 | 0 |
attr |
ViltModel.layernorm |
1 | 0 | 0 |
attr |
ViltModel.pooler |
1 | 0 | 0 |
meth |
ViltForMaskedLM.init |
2 | 0 | 0 |
meth |
ViltForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
ViltForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
ViltForMaskedLM.forward |
13 | 12 | 0 |
attr |
ViltForMaskedLM.vilt |
1 | 0 | 0 |
attr |
ViltForMaskedLM.mlm_score |
1 | 0 | 0 |
meth |
ViltLayer.init |
2 | 0 | 0 |
meth |
ViltLayer.forward |
4 | 0 | 0 |
attr |
ViltLayer.chunk_size_feed_forward |
1 | 0 | 0 |
attr |
ViltLayer.seq_len_dim |
1 | 0 | 0 |
attr |
ViltLayer.attention |
1 | 0 | 0 |
attr |
ViltLayer.intermediate |
1 | 0 | 0 |
attr |
ViltLayer.output |
1 | 0 | 0 |
attr |
ViltLayer.layernorm_before |
1 | 0 | 0 |
attr |
ViltLayer.layernorm_after |
1 | 0 | 0 |
meth |
ViltPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.vilt.processing_vilt (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ViltProcessor.init |
4 | 0 | 0 |
transformers.models.vipllava.configuration_vipllava (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VipLlavaConfig.init |
10 | 0 | 0 |
attr |
VipLlavaConfig.image_token_index |
1 | 0 | 0 |
attr |
VipLlavaConfig.projector_hidden_act |
1 | 0 | 0 |
attr |
VipLlavaConfig.projector_layernorm_eps |
1 | 0 | 0 |
attr |
VipLlavaConfig.vision_feature_layers |
1 | 0 | 0 |
attr |
VipLlavaConfig.image_seq_length |
1 | 0 | 0 |
attr |
VipLlavaConfig.vision_config |
1 | 0 | 0 |
attr |
VipLlavaConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
VipLlavaConfig.text_config |
1 | 0 | 0 |
transformers.models.vipllava.modeling_vipllava (26 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VipLlavaModel.init |
2 | 1 | 0 |
meth |
VipLlavaModel.get_input_embeddings |
1 | 0 | 0 |
meth |
VipLlavaModel.set_input_embeddings |
2 | 0 | 0 |
meth |
VipLlavaModel.get_placeholder_mask |
4 | 3 | 0 |
meth |
VipLlavaModel.forward |
14 | 13 | 0 |
attr |
VipLlavaModel.vision_tower |
1 | 0 | 0 |
attr |
VipLlavaModel.multi_modal_projector |
1 | 0 | 0 |
attr |
VipLlavaModel.language_model |
1 | 0 | 0 |
meth |
VipLlavaForConditionalGeneration.init |
2 | 1 | 0 |
meth |
VipLlavaForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
VipLlavaForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
VipLlavaForConditionalGeneration.forward |
16 | 15 | 0 |
meth |
VipLlavaForConditionalGeneration.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
VipLlavaForConditionalGeneration.model |
1 | 0 | 0 |
attr |
VipLlavaForConditionalGeneration.lm_head |
1 | 0 | 0 |
transformers.models.vipllava.modular_vipllava (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VipLlavaModel.forward |
14 | 13 | 0 |
meth |
VipLlavaForConditionalGeneration.forward |
16 | 15 | 0 |
transformers.models.vision_encoder_decoder.configuration_vision_encoder_decoder (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VisionEncoderDecoderConfig.init |
2 | 0 | 0 |
meth |
VisionEncoderDecoderConfig.from_encoder_decoder_configs |
4 | 3 | 0 |
attr |
VisionEncoderDecoderConfig.encoder |
1 | 0 | 0 |
attr |
VisionEncoderDecoderConfig.decoder |
1 | 0 | 0 |
attr |
VisionEncoderDecoderConfig.is_encoder_decoder |
1 | 0 | 0 |
transformers.models.vision_encoder_decoder.modeling_vision_encoder_decoder (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VisionEncoderDecoderModel.init |
4 | 3 | 0 |
meth |
VisionEncoderDecoderModel.get_input_embeddings |
1 | 0 | 0 |
meth |
VisionEncoderDecoderModel.get_output_embeddings |
1 | 0 | 0 |
meth |
VisionEncoderDecoderModel.set_output_embeddings |
2 | 0 | 0 |
meth |
VisionEncoderDecoderModel.from_encoder_decoder_pretrained |
5 | 3 | 0 |
meth |
VisionEncoderDecoderModel.forward |
14 | 13 | 0 |
meth |
VisionEncoderDecoderModel.prepare_decoder_input_ids_from_labels |
2 | 1 | 0 |
attr |
VisionEncoderDecoderModel.encoder |
1 | 0 | 0 |
attr |
VisionEncoderDecoderModel.decoder |
1 | 0 | 0 |
attr |
VisionEncoderDecoderModel.enc_to_dec_proj |
1 | 0 | 0 |
transformers.models.vision_text_dual_encoder.configuration_vision_text_dual_encoder (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VisionTextDualEncoderConfig.init |
4 | 0 | 0 |
meth |
VisionTextDualEncoderConfig.from_vision_text_configs |
4 | 2 | 0 |
attr |
VisionTextDualEncoderConfig.text_config |
1 | 0 | 0 |
attr |
VisionTextDualEncoderConfig.projection_dim |
1 | 0 | 0 |
attr |
VisionTextDualEncoderConfig.logit_scale_init_value |
1 | 0 | 0 |
attr |
VisionTextDualEncoderConfig.vision_config |
1 | 0 | 0 |
transformers.models.vision_text_dual_encoder.modeling_vision_text_dual_encoder (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VisionTextDualEncoderModel.init |
4 | 3 | 0 |
meth |
VisionTextDualEncoderModel.forward |
11 | 10 | 0 |
meth |
VisionTextDualEncoderModel.from_vision_text_pretrained |
5 | 3 | 0 |
attr |
VisionTextDualEncoderModel.vision_model |
1 | 0 | 0 |
attr |
VisionTextDualEncoderModel.text_model |
1 | 0 | 0 |
attr |
VisionTextDualEncoderModel.vision_embed_dim |
1 | 0 | 0 |
attr |
VisionTextDualEncoderModel.text_embed_dim |
1 | 0 | 0 |
attr |
VisionTextDualEncoderModel.projection_dim |
1 | 0 | 0 |
attr |
VisionTextDualEncoderModel.visual_projection |
1 | 0 | 0 |
attr |
VisionTextDualEncoderModel.text_projection |
1 | 0 | 0 |
attr |
VisionTextDualEncoderModel.logit_scale |
1 | 0 | 0 |
transformers.models.vision_text_dual_encoder.processing_vision_text_dual_encoder (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VisionTextDualEncoderProcessor.init |
4 | 0 | 0 |
transformers.models.visual_bert.configuration_visual_bert (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VisualBertConfig.init |
21 | 0 | 0 |
attr |
VisualBertConfig.pad_token_id |
1 | 0 | 0 |
attr |
VisualBertConfig.bos_token_id |
1 | 0 | 0 |
attr |
VisualBertConfig.eos_token_id |
1 | 0 | 0 |
attr |
VisualBertConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
VisualBertConfig.vocab_size |
1 | 0 | 0 |
attr |
VisualBertConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
VisualBertConfig.hidden_size |
1 | 0 | 0 |
attr |
VisualBertConfig.visual_embedding_dim |
1 | 0 | 0 |
attr |
VisualBertConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
VisualBertConfig.num_attention_heads |
1 | 0 | 0 |
attr |
VisualBertConfig.intermediate_size |
1 | 0 | 0 |
attr |
VisualBertConfig.hidden_act |
1 | 0 | 0 |
attr |
VisualBertConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
VisualBertConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
VisualBertConfig.initializer_range |
1 | 0 | 0 |
attr |
VisualBertConfig.type_vocab_size |
1 | 0 | 0 |
attr |
VisualBertConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
VisualBertConfig.bypass_transformer |
1 | 0 | 0 |
attr |
VisualBertConfig.special_visual_initialize |
1 | 0 | 0 |
transformers.models.visual_bert.modeling_visual_bert (62 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VisualBertForQuestionAnswering.init |
2 | 0 | 0 |
meth |
VisualBertForQuestionAnswering.forward |
15 | 14 | 0 |
attr |
VisualBertForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
VisualBertForQuestionAnswering.visual_bert |
1 | 0 | 0 |
attr |
VisualBertForQuestionAnswering.dropout |
1 | 0 | 0 |
attr |
VisualBertForQuestionAnswering.cls |
1 | 0 | 0 |
meth |
VisualBertForRegionToPhraseAlignment.init |
2 | 0 | 0 |
meth |
VisualBertForRegionToPhraseAlignment.forward |
16 | 15 | 0 |
attr |
VisualBertForRegionToPhraseAlignment.visual_bert |
1 | 0 | 0 |
attr |
VisualBertForRegionToPhraseAlignment.dropout |
1 | 0 | 0 |
attr |
VisualBertForRegionToPhraseAlignment.cls |
1 | 0 | 0 |
attr |
VisualBertForRegionToPhraseAlignment.attention |
1 | 0 | 0 |
meth |
VisualBertModel.init |
3 | 0 | 0 |
meth |
VisualBertModel.get_input_embeddings |
1 | 0 | 0 |
meth |
VisualBertModel.set_input_embeddings |
2 | 0 | 0 |
meth |
VisualBertModel.forward |
14 | 13 | 0 |
attr |
VisualBertModel.embeddings |
1 | 0 | 0 |
attr |
VisualBertModel.encoder |
1 | 0 | 0 |
attr |
VisualBertModel.pooler |
1 | 0 | 0 |
attr |
VisualBertModel.bypass_transformer |
1 | 0 | 0 |
attr |
VisualBertModel.additional_layer |
1 | 0 | 0 |
meth |
VisualBertForPreTraining.init |
2 | 0 | 0 |
meth |
VisualBertForPreTraining.get_output_embeddings |
1 | 0 | 0 |
meth |
VisualBertForPreTraining.set_output_embeddings |
2 | 0 | 0 |
meth |
VisualBertForPreTraining.forward |
16 | 15 | 0 |
attr |
VisualBertForPreTraining.visual_bert |
1 | 0 | 0 |
attr |
VisualBertForPreTraining.cls |
1 | 0 | 0 |
meth |
VisualBertLayer.init |
2 | 0 | 0 |
meth |
VisualBertLayer.forward |
4 | 0 | 0 |
meth |
VisualBertLayer.feed_forward_chunk |
2 | 0 | 0 |
attr |
VisualBertLayer.chunk_size_feed_forward |
1 | 0 | 0 |
attr |
VisualBertLayer.seq_len_dim |
1 | 0 | 0 |
attr |
VisualBertLayer.attention |
1 | 0 | 0 |
attr |
VisualBertLayer.intermediate |
1 | 0 | 0 |
attr |
VisualBertLayer.output |
1 | 0 | 0 |
meth |
VisualBertPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
VisualBertForMultipleChoice.init |
2 | 0 | 0 |
meth |
VisualBertForMultipleChoice.forward |
15 | 14 | 0 |
attr |
VisualBertForMultipleChoice.visual_bert |
1 | 0 | 0 |
attr |
VisualBertForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
VisualBertForMultipleChoice.cls |
1 | 0 | 0 |
meth |
VisualBertForVisualReasoning.init |
2 | 0 | 0 |
meth |
VisualBertForVisualReasoning.forward |
15 | 14 | 0 |
attr |
VisualBertForVisualReasoning.num_labels |
1 | 0 | 0 |
attr |
VisualBertForVisualReasoning.visual_bert |
1 | 0 | 0 |
attr |
VisualBertForVisualReasoning.dropout |
1 | 0 | 0 |
attr |
VisualBertForVisualReasoning.cls |
1 | 0 | 0 |
transformers.models.vit.configuration_vit (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ViTConfig.init |
18 | 0 | 0 |
attr |
ViTConfig.hidden_size |
1 | 0 | 0 |
attr |
ViTConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ViTConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ViTConfig.intermediate_size |
1 | 0 | 0 |
attr |
ViTConfig.hidden_act |
1 | 0 | 0 |
attr |
ViTConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
ViTConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
ViTConfig.initializer_range |
1 | 0 | 0 |
attr |
ViTConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
ViTConfig.image_size |
1 | 0 | 0 |
attr |
ViTConfig.patch_size |
1 | 0 | 0 |
attr |
ViTConfig.num_channels |
1 | 0 | 0 |
attr |
ViTConfig.qkv_bias |
1 | 0 | 0 |
attr |
ViTConfig.encoder_stride |
1 | 0 | 0 |
attr |
ViTConfig.pooler_output_size |
1 | 0 | 0 |
attr |
ViTConfig.pooler_act |
1 | 0 | 0 |
transformers.models.vit.image_processing_vit (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ViTImageProcessor.init |
11 | 10 | 0 |
meth |
ViTImageProcessor.resize |
7 | 6 | 0 |
meth |
ViTImageProcessor.preprocess |
14 | 13 | 0 |
attr |
ViTImageProcessor.do_resize |
1 | 0 | 0 |
attr |
ViTImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
ViTImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
ViTImageProcessor.size |
1 | 0 | 0 |
attr |
ViTImageProcessor.resample |
1 | 0 | 0 |
attr |
ViTImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
ViTImageProcessor.image_mean |
1 | 0 | 0 |
attr |
ViTImageProcessor.image_std |
1 | 0 | 0 |
attr |
ViTImageProcessor.do_convert_rgb |
1 | 0 | 0 |
transformers.models.vit.modeling_vit (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ViTForImageClassification.init |
2 | 1 | 0 |
attr |
ViTForImageClassification.num_labels |
1 | 0 | 0 |
attr |
ViTForImageClassification.vit |
1 | 0 | 0 |
attr |
ViTForImageClassification.classifier |
1 | 0 | 0 |
meth |
ViTModel.init |
4 | 3 | 0 |
attr |
ViTModel.embeddings |
1 | 0 | 0 |
attr |
ViTModel.encoder |
1 | 0 | 0 |
attr |
ViTModel.layernorm |
1 | 0 | 0 |
attr |
ViTModel.pooler |
1 | 0 | 0 |
meth |
ViTPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
ViTForMaskedImageModeling.init |
2 | 1 | 0 |
attr |
ViTForMaskedImageModeling.vit |
1 | 0 | 0 |
attr |
ViTForMaskedImageModeling.decoder |
1 | 0 | 0 |
transformers.models.vit_mae.configuration_vit_mae (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ViTMAEConfig.init |
21 | 0 | 0 |
attr |
ViTMAEConfig.hidden_size |
1 | 0 | 0 |
attr |
ViTMAEConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ViTMAEConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ViTMAEConfig.intermediate_size |
1 | 0 | 0 |
attr |
ViTMAEConfig.hidden_act |
1 | 0 | 0 |
attr |
ViTMAEConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
ViTMAEConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
ViTMAEConfig.initializer_range |
1 | 0 | 0 |
attr |
ViTMAEConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
ViTMAEConfig.image_size |
1 | 0 | 0 |
attr |
ViTMAEConfig.patch_size |
1 | 0 | 0 |
attr |
ViTMAEConfig.num_channels |
1 | 0 | 0 |
attr |
ViTMAEConfig.qkv_bias |
1 | 0 | 0 |
attr |
ViTMAEConfig.decoder_num_attention_heads |
1 | 0 | 0 |
attr |
ViTMAEConfig.decoder_hidden_size |
1 | 0 | 0 |
attr |
ViTMAEConfig.decoder_num_hidden_layers |
1 | 0 | 0 |
attr |
ViTMAEConfig.decoder_intermediate_size |
1 | 0 | 0 |
attr |
ViTMAEConfig.mask_ratio |
1 | 0 | 0 |
attr |
ViTMAEConfig.norm_pix_loss |
1 | 0 | 0 |
transformers.models.vit_mae.modeling_vit_mae (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ViTMAEModel.init |
2 | 0 | 0 |
meth |
ViTMAEModel.get_input_embeddings |
1 | 0 | 0 |
attr |
ViTMAEModel.embeddings |
1 | 0 | 0 |
attr |
ViTMAEModel.encoder |
1 | 0 | 0 |
attr |
ViTMAEModel.layernorm |
1 | 0 | 0 |
meth |
ViTMAELayer.init |
2 | 1 | 0 |
attr |
ViTMAELayer.chunk_size_feed_forward |
1 | 0 | 0 |
attr |
ViTMAELayer.seq_len_dim |
1 | 0 | 0 |
attr |
ViTMAELayer.attention |
1 | 0 | 0 |
attr |
ViTMAELayer.intermediate |
1 | 0 | 0 |
attr |
ViTMAELayer.output |
1 | 0 | 0 |
attr |
ViTMAELayer.layernorm_before |
1 | 0 | 0 |
attr |
ViTMAELayer.layernorm_after |
1 | 0 | 0 |
meth |
ViTMAEForPreTraining.init |
2 | 1 | 0 |
meth |
ViTMAEForPreTraining.get_input_embeddings |
1 | 0 | 0 |
meth |
ViTMAEForPreTraining.patchify |
3 | 1 | 0 |
meth |
ViTMAEForPreTraining.unpatchify |
3 | 1 | 0 |
meth |
ViTMAEForPreTraining.forward_loss |
5 | 1 | 0 |
attr |
ViTMAEForPreTraining.vit |
1 | 0 | 0 |
attr |
ViTMAEForPreTraining.decoder |
1 | 0 | 0 |
meth |
ViTMAEPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.vit_msn.configuration_vit_msn (28 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ViTMSNConfig.init |
15 | 0 | 0 |
attr |
ViTMSNConfig.hidden_size |
1 | 0 | 0 |
attr |
ViTMSNConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ViTMSNConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ViTMSNConfig.intermediate_size |
1 | 0 | 0 |
attr |
ViTMSNConfig.hidden_act |
1 | 0 | 0 |
attr |
ViTMSNConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
ViTMSNConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
ViTMSNConfig.initializer_range |
1 | 0 | 0 |
attr |
ViTMSNConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
ViTMSNConfig.image_size |
1 | 0 | 0 |
attr |
ViTMSNConfig.patch_size |
1 | 0 | 0 |
attr |
ViTMSNConfig.num_channels |
1 | 0 | 0 |
attr |
ViTMSNConfig.qkv_bias |
1 | 0 | 0 |
transformers.models.vit_msn.modeling_vit_msn (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
ViTMSNForImageClassification.num_labels |
1 | 0 | 0 |
attr |
ViTMSNForImageClassification.vit |
1 | 0 | 0 |
attr |
ViTMSNForImageClassification.classifier |
1 | 0 | 0 |
meth |
ViTMSNModel.init |
3 | 2 | 0 |
attr |
ViTMSNModel.embeddings |
1 | 0 | 0 |
attr |
ViTMSNModel.encoder |
1 | 0 | 0 |
attr |
ViTMSNModel.layernorm |
1 | 0 | 0 |
transformers.models.vitdet.configuration_vitdet (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VitDetConfig.init |
23 | 0 | 0 |
attr |
VitDetConfig.hidden_size |
1 | 0 | 0 |
attr |
VitDetConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
VitDetConfig.num_attention_heads |
1 | 0 | 0 |
attr |
VitDetConfig.mlp_ratio |
1 | 0 | 0 |
attr |
VitDetConfig.hidden_act |
1 | 0 | 0 |
attr |
VitDetConfig.dropout_prob |
1 | 0 | 0 |
attr |
VitDetConfig.initializer_range |
1 | 0 | 0 |
attr |
VitDetConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
VitDetConfig.image_size |
1 | 0 | 0 |
attr |
VitDetConfig.pretrain_image_size |
1 | 0 | 0 |
attr |
VitDetConfig.patch_size |
1 | 0 | 0 |
attr |
VitDetConfig.num_channels |
1 | 0 | 0 |
attr |
VitDetConfig.qkv_bias |
1 | 0 | 0 |
attr |
VitDetConfig.drop_path_rate |
1 | 0 | 0 |
attr |
VitDetConfig.window_block_indices |
1 | 0 | 0 |
attr |
VitDetConfig.residual_block_indices |
1 | 0 | 0 |
attr |
VitDetConfig.use_absolute_position_embeddings |
1 | 0 | 0 |
attr |
VitDetConfig.use_relative_position_embeddings |
1 | 0 | 0 |
attr |
VitDetConfig.window_size |
1 | 0 | 0 |
attr |
VitDetConfig.stage_names |
1 | 0 | 0 |
transformers.models.vitdet.modeling_vitdet (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VitDetModel.init |
2 | 1 | 0 |
meth |
VitDetModel.forward |
6 | 5 | 0 |
attr |
VitDetModel.embeddings |
1 | 0 | 0 |
attr |
VitDetModel.encoder |
1 | 0 | 0 |
meth |
VitDetBackbone.init |
2 | 0 | 0 |
meth |
VitDetBackbone.forward |
6 | 5 | 0 |
attr |
VitDetBackbone.embeddings |
1 | 0 | 0 |
attr |
VitDetBackbone.encoder |
1 | 0 | 0 |
attr |
VitDetBackbone.num_features |
1 | 0 | 0 |
transformers.models.vitmatte.configuration_vitmatte (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VitMatteConfig.init |
8 | 6 | 0 |
attr |
VitMatteConfig.backbone_config |
1 | 0 | 0 |
attr |
VitMatteConfig.batch_norm_eps |
1 | 0 | 0 |
attr |
VitMatteConfig.hidden_size |
1 | 0 | 0 |
attr |
VitMatteConfig.initializer_range |
1 | 0 | 0 |
attr |
VitMatteConfig.convstream_hidden_sizes |
1 | 0 | 0 |
attr |
VitMatteConfig.fusion_hidden_sizes |
1 | 0 | 0 |
transformers.models.vitmatte.image_processing_vitmatte (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VitMatteImageProcessor.init |
9 | 8 | 0 |
meth |
VitMatteImageProcessor.preprocess |
13 | 12 | 0 |
attr |
VitMatteImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
VitMatteImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
VitMatteImageProcessor.do_pad |
1 | 0 | 0 |
attr |
VitMatteImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
VitMatteImageProcessor.image_mean |
1 | 0 | 0 |
attr |
VitMatteImageProcessor.image_std |
1 | 0 | 0 |
attr |
VitMatteImageProcessor.size_divisor |
1 | 0 | 0 |
transformers.models.vitmatte.modeling_vitmatte (7 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VitMattePreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
VitMatteForImageMatting.init |
2 | 0 | 0 |
meth |
VitMatteForImageMatting.forward |
7 | 5 | 0 |
attr |
VitMatteForImageMatting.backbone |
1 | 0 | 0 |
attr |
VitMatteForImageMatting.decoder |
1 | 0 | 0 |
transformers.models.vitpose.configuration_vitpose (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VitPoseConfig.init |
6 | 4 | 0 |
attr |
VitPoseConfig.backbone_config |
1 | 0 | 0 |
attr |
VitPoseConfig.initializer_range |
1 | 0 | 0 |
attr |
VitPoseConfig.scale_factor |
1 | 0 | 0 |
attr |
VitPoseConfig.use_simple_decoder |
1 | 0 | 0 |
transformers.models.vitpose.image_processing_vitpose (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VitPoseImageProcessor.init |
9 | 7 | 0 |
meth |
VitPoseImageProcessor.keypoints_from_heatmaps |
5 | 4 | 0 |
meth |
VitPoseImageProcessor.post_process_pose_estimation |
6 | 5 | 0 |
attr |
VitPoseImageProcessor.do_affine_transform |
1 | 0 | 0 |
attr |
VitPoseImageProcessor.size |
1 | 0 | 0 |
attr |
VitPoseImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
VitPoseImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
VitPoseImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
VitPoseImageProcessor.image_mean |
1 | 0 | 0 |
attr |
VitPoseImageProcessor.image_std |
1 | 0 | 0 |
attr |
VitPoseImageProcessor.normalize_factor |
1 | 0 | 0 |
transformers.models.vitpose.image_processing_vitpose_fast (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VitPoseImageProcessorFast._preprocess |
13 | 12 | 0 |
meth |
VitPoseImageProcessorFast.keypoints_from_heatmaps |
5 | 4 | 0 |
meth |
VitPoseImageProcessorFast.post_process_pose_estimation |
6 | 5 | 0 |
transformers.models.vitpose.modeling_vitpose (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VitPoseForPoseEstimation.init |
2 | 1 | 0 |
attr |
VitPoseForPoseEstimation.backbone |
1 | 0 | 0 |
attr |
VitPoseForPoseEstimation.head |
1 | 0 | 0 |
meth |
VitPosePreTrainedModel._init_weights |
2 | 1 | 0 |
transformers.models.vitpose_backbone.configuration_vitpose_backbone (35 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VitPoseBackboneConfig.init |
19 | 0 | 0 |
attr |
VitPoseBackboneConfig.hidden_size |
1 | 0 | 0 |
attr |
VitPoseBackboneConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
VitPoseBackboneConfig.num_attention_heads |
1 | 0 | 0 |
attr |
VitPoseBackboneConfig.mlp_ratio |
1 | 0 | 0 |
attr |
VitPoseBackboneConfig.num_experts |
1 | 0 | 0 |
attr |
VitPoseBackboneConfig.part_features |
1 | 0 | 0 |
attr |
VitPoseBackboneConfig.hidden_act |
1 | 0 | 0 |
attr |
VitPoseBackboneConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
VitPoseBackboneConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
VitPoseBackboneConfig.initializer_range |
1 | 0 | 0 |
attr |
VitPoseBackboneConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
VitPoseBackboneConfig.image_size |
1 | 0 | 0 |
attr |
VitPoseBackboneConfig.patch_size |
1 | 0 | 0 |
attr |
VitPoseBackboneConfig.num_channels |
1 | 0 | 0 |
attr |
VitPoseBackboneConfig.qkv_bias |
1 | 0 | 0 |
attr |
VitPoseBackboneConfig.stage_names |
1 | 0 | 0 |
transformers.models.vitpose_backbone.modeling_vitpose_backbone (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VitPoseBackbonePreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
VitPoseBackbone.init |
2 | 1 | 0 |
meth |
VitPoseBackbone.forward |
5 | 3 | 0 |
attr |
VitPoseBackbone.num_features |
1 | 0 | 0 |
attr |
VitPoseBackbone.embeddings |
1 | 0 | 0 |
attr |
VitPoseBackbone.encoder |
1 | 0 | 0 |
attr |
VitPoseBackbone.layernorm |
1 | 0 | 0 |
transformers.models.vits.configuration_vits (92 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VitsConfig.init |
47 | 0 | 0 |
attr |
VitsConfig.vocab_size |
1 | 0 | 0 |
attr |
VitsConfig.hidden_size |
1 | 0 | 0 |
attr |
VitsConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
VitsConfig.num_attention_heads |
1 | 0 | 0 |
attr |
VitsConfig.window_size |
1 | 0 | 0 |
attr |
VitsConfig.use_bias |
1 | 0 | 0 |
attr |
VitsConfig.ffn_dim |
1 | 0 | 0 |
attr |
VitsConfig.layerdrop |
1 | 0 | 0 |
attr |
VitsConfig.ffn_kernel_size |
1 | 0 | 0 |
attr |
VitsConfig.flow_size |
1 | 0 | 0 |
attr |
VitsConfig.spectrogram_bins |
1 | 0 | 0 |
attr |
VitsConfig.hidden_act |
1 | 0 | 0 |
attr |
VitsConfig.hidden_dropout |
1 | 0 | 0 |
attr |
VitsConfig.attention_dropout |
1 | 0 | 0 |
attr |
VitsConfig.activation_dropout |
1 | 0 | 0 |
attr |
VitsConfig.initializer_range |
1 | 0 | 0 |
attr |
VitsConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
VitsConfig.use_stochastic_duration_prediction |
1 | 0 | 0 |
attr |
VitsConfig.num_speakers |
1 | 0 | 0 |
attr |
VitsConfig.speaker_embedding_size |
1 | 0 | 0 |
attr |
VitsConfig.upsample_initial_channel |
1 | 0 | 0 |
attr |
VitsConfig.upsample_rates |
1 | 0 | 0 |
attr |
VitsConfig.upsample_kernel_sizes |
1 | 0 | 0 |
attr |
VitsConfig.resblock_kernel_sizes |
1 | 0 | 0 |
attr |
VitsConfig.resblock_dilation_sizes |
1 | 0 | 0 |
attr |
VitsConfig.leaky_relu_slope |
1 | 0 | 0 |
attr |
VitsConfig.depth_separable_channels |
1 | 0 | 0 |
attr |
VitsConfig.depth_separable_num_layers |
1 | 0 | 0 |
attr |
VitsConfig.duration_predictor_flow_bins |
1 | 0 | 0 |
attr |
VitsConfig.duration_predictor_tail_bound |
1 | 0 | 0 |
attr |
VitsConfig.duration_predictor_kernel_size |
1 | 0 | 0 |
attr |
VitsConfig.duration_predictor_dropout |
1 | 0 | 0 |
attr |
VitsConfig.duration_predictor_num_flows |
1 | 0 | 0 |
attr |
VitsConfig.duration_predictor_filter_channels |
1 | 0 | 0 |
attr |
VitsConfig.prior_encoder_num_flows |
1 | 0 | 0 |
attr |
VitsConfig.prior_encoder_num_wavenet_layers |
1 | 0 | 0 |
attr |
VitsConfig.posterior_encoder_num_wavenet_layers |
1 | 0 | 0 |
attr |
VitsConfig.wavenet_kernel_size |
1 | 0 | 0 |
attr |
VitsConfig.wavenet_dilation_rate |
1 | 0 | 0 |
attr |
VitsConfig.wavenet_dropout |
1 | 0 | 0 |
attr |
VitsConfig.speaking_rate |
1 | 0 | 0 |
attr |
VitsConfig.noise_scale |
1 | 0 | 0 |
attr |
VitsConfig.noise_scale_duration |
1 | 0 | 0 |
attr |
VitsConfig.sampling_rate |
1 | 0 | 0 |
attr |
VitsConfig.pad_token_id |
1 | 0 | 0 |
transformers.models.vits.modeling_vits (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VitsModel.init |
2 | 1 | 0 |
meth |
VitsModel.forward |
10 | 9 | 0 |
attr |
VitsModel.text_encoder |
1 | 0 | 0 |
attr |
VitsModel.flow |
1 | 0 | 0 |
attr |
VitsModel.decoder |
1 | 0 | 0 |
attr |
VitsModel.posterior_encoder |
1 | 0 | 0 |
attr |
VitsModel.speaking_rate |
1 | 0 | 0 |
attr |
VitsModel.noise_scale |
1 | 0 | 0 |
attr |
VitsModel.noise_scale_duration |
1 | 0 | 0 |
attr |
VitsModel.duration_predictor |
1 | 0 | 0 |
attr |
VitsModel.embed_speaker |
1 | 0 | 0 |
meth |
VitsPreTrainedModel._init_weights |
2 | 1 | 0 |
transformers.models.vits.tokenization_vits (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VitsTokenizer.init |
10 | 1 | 0 |
meth |
VitsTokenizer.get_vocab |
1 | 0 | 0 |
meth |
VitsTokenizer.normalize_text |
2 | 0 | 0 |
meth |
VitsTokenizer._preprocess_char |
2 | 0 | 0 |
meth |
VitsTokenizer.prepare_for_tokenization |
5 | 4 | 0 |
meth |
VitsTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
VitsTokenizer._convert_id_to_token |
2 | 0 | 0 |
prop |
VitsTokenizer.vocab_size |
1 | 0 | 0 |
attr |
VitsTokenizer.decoder |
1 | 0 | 0 |
attr |
VitsTokenizer.language |
1 | 0 | 0 |
attr |
VitsTokenizer.add_blank |
1 | 0 | 0 |
attr |
VitsTokenizer.normalize |
1 | 0 | 0 |
attr |
VitsTokenizer.phonemize |
1 | 0 | 0 |
attr |
VitsTokenizer.is_uroman |
1 | 0 | 0 |
attr |
VitsTokenizer.encoder |
1 | 0 | 0 |
transformers.models.vivit.configuration_vivit (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VivitConfig.init |
16 | 0 | 0 |
attr |
VivitConfig.hidden_size |
1 | 0 | 0 |
attr |
VivitConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
VivitConfig.num_attention_heads |
1 | 0 | 0 |
attr |
VivitConfig.intermediate_size |
1 | 0 | 0 |
attr |
VivitConfig.hidden_act |
1 | 0 | 0 |
attr |
VivitConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
VivitConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
VivitConfig.initializer_range |
1 | 0 | 0 |
attr |
VivitConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
VivitConfig.image_size |
1 | 0 | 0 |
attr |
VivitConfig.num_frames |
1 | 0 | 0 |
attr |
VivitConfig.tubelet_size |
1 | 0 | 0 |
attr |
VivitConfig.num_channels |
1 | 0 | 0 |
attr |
VivitConfig.qkv_bias |
1 | 0 | 0 |
transformers.models.vivit.image_processing_vivit (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VivitImageProcessor.init |
13 | 12 | 0 |
meth |
VivitImageProcessor.resize |
7 | 6 | 0 |
meth |
VivitImageProcessor.rescale |
7 | 5 | 0 |
attr |
VivitImageProcessor.do_resize |
1 | 0 | 0 |
attr |
VivitImageProcessor.size |
1 | 0 | 0 |
attr |
VivitImageProcessor.do_center_crop |
1 | 0 | 0 |
attr |
VivitImageProcessor.crop_size |
1 | 0 | 0 |
attr |
VivitImageProcessor.resample |
1 | 0 | 0 |
attr |
VivitImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
VivitImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
VivitImageProcessor.offset |
1 | 0 | 0 |
attr |
VivitImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
VivitImageProcessor.image_mean |
1 | 0 | 0 |
attr |
VivitImageProcessor.image_std |
1 | 0 | 0 |
transformers.models.vivit.modeling_vivit (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VivitPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
VivitForVideoClassification.init |
2 | 1 | 0 |
attr |
VivitForVideoClassification.num_labels |
1 | 0 | 0 |
attr |
VivitForVideoClassification.vivit |
1 | 0 | 0 |
attr |
VivitForVideoClassification.classifier |
1 | 0 | 0 |
meth |
VivitModel.init |
3 | 2 | 0 |
meth |
VivitModel.get_input_embeddings |
1 | 0 | 0 |
attr |
VivitModel.embeddings |
1 | 0 | 0 |
attr |
VivitModel.encoder |
1 | 0 | 0 |
attr |
VivitModel.layernorm |
1 | 0 | 0 |
attr |
VivitModel.pooler |
1 | 0 | 0 |
transformers.models.vjepa2.configuration_vjepa2 (49 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VJEPA2Config.init |
25 | 0 | 0 |
attr |
VJEPA2Config.crop_size |
1 | 0 | 0 |
attr |
VJEPA2Config.frames_per_clip |
1 | 0 | 0 |
attr |
VJEPA2Config.patch_size |
1 | 0 | 0 |
attr |
VJEPA2Config.tubelet_size |
1 | 0 | 0 |
attr |
VJEPA2Config.hidden_size |
1 | 0 | 0 |
attr |
VJEPA2Config.in_chans |
1 | 0 | 0 |
attr |
VJEPA2Config.num_attention_heads |
1 | 0 | 0 |
attr |
VJEPA2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
VJEPA2Config.drop_path_rate |
1 | 0 | 0 |
attr |
VJEPA2Config.mlp_ratio |
1 | 0 | 0 |
attr |
VJEPA2Config.layer_norm_eps |
1 | 0 | 0 |
attr |
VJEPA2Config.qkv_bias |
1 | 0 | 0 |
attr |
VJEPA2Config.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
VJEPA2Config.hidden_act |
1 | 0 | 0 |
attr |
VJEPA2Config.initializer_range |
1 | 0 | 0 |
attr |
VJEPA2Config.image_size |
1 | 0 | 0 |
attr |
VJEPA2Config.attention_dropout |
1 | 0 | 0 |
attr |
VJEPA2Config.num_pooler_layers |
1 | 0 | 0 |
attr |
VJEPA2Config.pred_hidden_size |
1 | 0 | 0 |
attr |
VJEPA2Config.pred_num_attention_heads |
1 | 0 | 0 |
attr |
VJEPA2Config.pred_num_hidden_layers |
1 | 0 | 0 |
attr |
VJEPA2Config.pred_num_mask_tokens |
1 | 0 | 0 |
attr |
VJEPA2Config.pred_zero_init_mask_tokens |
1 | 0 | 0 |
attr |
VJEPA2Config.pred_mlp_ratio |
1 | 0 | 0 |
transformers.models.vjepa2.modeling_vjepa2 (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VJEPA2ForVideoClassification.init |
2 | 1 | 0 |
meth |
VJEPA2ForVideoClassification.forward |
6 | 5 | 0 |
attr |
VJEPA2ForVideoClassification.num_labels |
1 | 0 | 0 |
attr |
VJEPA2ForVideoClassification.vjepa2 |
1 | 0 | 0 |
attr |
VJEPA2ForVideoClassification.pooler |
1 | 0 | 0 |
attr |
VJEPA2ForVideoClassification.classifier |
1 | 0 | 0 |
meth |
VJEPA2Model.init |
2 | 1 | 0 |
meth |
VJEPA2Model.forward |
8 | 7 | 0 |
meth |
VJEPA2Model.get_vision_features |
2 | 1 | 0 |
attr |
VJEPA2Model.encoder |
1 | 0 | 0 |
attr |
VJEPA2Model.predictor |
1 | 0 | 0 |
meth |
VJEPA2PreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.vjepa2.video_processing_vjepa2 (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VJEPA2VideoProcessor.init |
2 | 1 | 0 |
attr |
VJEPA2VideoProcessor.size |
1 | 0 | 0 |
transformers.models.voxtral.configuration_voxtral (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VoxtralEncoderConfig.init |
13 | 0 | 0 |
attr |
VoxtralEncoderConfig.vocab_size |
1 | 0 | 0 |
attr |
VoxtralEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
VoxtralEncoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
VoxtralEncoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
VoxtralEncoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
VoxtralEncoderConfig.scale_embedding |
1 | 0 | 0 |
attr |
VoxtralEncoderConfig.activation_function |
1 | 0 | 0 |
attr |
VoxtralEncoderConfig.num_mel_bins |
1 | 0 | 0 |
attr |
VoxtralEncoderConfig.max_source_positions |
1 | 0 | 0 |
attr |
VoxtralEncoderConfig.initializer_range |
1 | 0 | 0 |
attr |
VoxtralEncoderConfig.dropout |
1 | 0 | 0 |
attr |
VoxtralEncoderConfig.layerdrop |
1 | 0 | 0 |
attr |
VoxtralEncoderConfig.activation_dropout |
1 | 0 | 0 |
attr |
VoxtralEncoderConfig.attention_dropout |
1 | 0 | 0 |
meth |
VoxtralConfig.init |
6 | 0 | 0 |
attr |
VoxtralConfig.audio_config |
1 | 0 | 0 |
attr |
VoxtralConfig.text_config |
1 | 0 | 0 |
attr |
VoxtralConfig.hidden_size |
1 | 0 | 0 |
attr |
VoxtralConfig.audio_token_id |
1 | 0 | 0 |
attr |
VoxtralConfig.projector_hidden_act |
1 | 0 | 0 |
transformers.models.voxtral.modeling_voxtral (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VoxtralEncoder.init |
2 | 1 | 0 |
meth |
VoxtralEncoder._freeze_parameters |
1 | 0 | 0 |
meth |
VoxtralEncoder.set_input_embeddings |
2 | 1 | 0 |
meth |
VoxtralEncoder.forward |
4 | 2 | 0 |
meth |
VoxtralEncoder._get_feat_extract_output_lengths |
2 | 1 | 0 |
attr |
VoxtralEncoder.dropout |
1 | 0 | 0 |
attr |
VoxtralEncoder.layerdrop |
1 | 0 | 0 |
attr |
VoxtralEncoder.num_mel_bins |
1 | 0 | 0 |
attr |
VoxtralEncoder.max_source_positions |
1 | 0 | 0 |
attr |
VoxtralEncoder.embed_scale |
1 | 0 | 0 |
attr |
VoxtralEncoder.conv1 |
1 | 0 | 0 |
attr |
VoxtralEncoder.conv2 |
1 | 0 | 0 |
attr |
VoxtralEncoder.embed_positions |
1 | 0 | 0 |
attr |
VoxtralEncoder.layers |
1 | 0 | 0 |
attr |
VoxtralEncoder.layer_norm |
1 | 0 | 0 |
attr |
VoxtralEncoder.avg_pooler |
1 | 0 | 0 |
attr |
VoxtralEncoder.gradient_checkpointing |
1 | 0 | 0 |
meth |
VoxtralForConditionalGeneration.init |
2 | 0 | 0 |
meth |
VoxtralForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
VoxtralForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
VoxtralForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
VoxtralForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
VoxtralForConditionalGeneration.set_decoder |
2 | 0 | 0 |
meth |
VoxtralForConditionalGeneration.get_decoder |
1 | 0 | 0 |
meth |
VoxtralForConditionalGeneration.prepare_inputs_for_generation |
3 | 0 | 0 |
attr |
VoxtralForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
VoxtralForConditionalGeneration.audio_tower |
1 | 0 | 0 |
attr |
VoxtralForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
VoxtralForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
transformers.models.voxtral.modular_voxtral (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VoxtralEncoder.forward |
4 | 2 | 0 |
meth |
VoxtralForConditionalGeneration.init |
2 | 0 | 0 |
meth |
VoxtralForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
VoxtralForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
VoxtralForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
VoxtralForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
VoxtralForConditionalGeneration.set_decoder |
2 | 0 | 0 |
meth |
VoxtralForConditionalGeneration.get_decoder |
1 | 0 | 0 |
meth |
VoxtralForConditionalGeneration.prepare_inputs_for_generation |
3 | 0 | 0 |
attr |
VoxtralForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
VoxtralForConditionalGeneration.audio_tower |
1 | 0 | 0 |
attr |
VoxtralForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
VoxtralForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
transformers.models.voxtral.processing_voxtral (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VoxtralProcessor.init |
3 | 0 | 0 |
meth |
VoxtralProcessor._retrieve_input_features |
4 | 0 | 0 |
meth |
VoxtralProcessor.call |
3 | 2 | 0 |
meth |
VoxtralProcessor.apply_transcription_request |
7 | 6 | 0 |
attr |
VoxtralProcessor.audio_token_id |
1 | 0 | 0 |
attr |
VoxtralProcessor.audio_token |
1 | 0 | 0 |
transformers.models.voxtral_realtime.configuration_voxtral_realtime (47 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VoxtralRealtimeConfig.init |
8 | 0 | 0 |
attr |
VoxtralRealtimeConfig.audio_config |
1 | 0 | 0 |
attr |
VoxtralRealtimeConfig.text_config |
1 | 0 | 0 |
attr |
VoxtralRealtimeConfig.hidden_size |
1 | 0 | 0 |
attr |
VoxtralRealtimeConfig.projector_hidden_act |
1 | 0 | 0 |
attr |
VoxtralRealtimeConfig.audio_length_per_tok |
1 | 0 | 0 |
attr |
VoxtralRealtimeConfig.default_num_delay_tokens |
1 | 0 | 0 |
attr |
VoxtralRealtimeConfig.downsample_factor |
1 | 0 | 0 |
meth |
VoxtralRealtimeEncoderConfig.init |
17 | 1 | 0 |
attr |
VoxtralRealtimeEncoderConfig.vocab_size |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoderConfig.hidden_size |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoderConfig.intermediate_size |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoderConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoderConfig.num_attention_heads |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoderConfig.activation_function |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoderConfig.num_mel_bins |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoderConfig.initializer_range |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoderConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoderConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoderConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoderConfig.rope_parameters |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoderConfig.hidden_act |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoderConfig.sliding_window |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoderConfig.head_dim |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoderConfig.attention_dropout |
1 | 0 | 0 |
transformers.models.voxtral_realtime.feature_extraction_voxtral_realtime (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VoxtralRealtimeFeatureExtractor.init |
9 | 0 | 0 |
meth |
VoxtralRealtimeFeatureExtractor._torch_extract_fbank_features |
4 | 2 | 0 |
meth |
VoxtralRealtimeFeatureExtractor.call |
14 | 13 | 0 |
attr |
VoxtralRealtimeFeatureExtractor.hop_length |
1 | 0 | 0 |
attr |
VoxtralRealtimeFeatureExtractor.n_fft |
1 | 0 | 0 |
attr |
VoxtralRealtimeFeatureExtractor.win_length |
1 | 0 | 0 |
attr |
VoxtralRealtimeFeatureExtractor.mel_filters |
1 | 0 | 0 |
attr |
VoxtralRealtimeFeatureExtractor.global_log_mel_max |
1 | 0 | 0 |
transformers.models.voxtral_realtime.modeling_voxtral_realtime (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VoxtralRealtimeForConditionalGeneration.init |
2 | 0 | 0 |
meth |
VoxtralRealtimeForConditionalGeneration.get_input_embeddings |
1 | 0 | 0 |
meth |
VoxtralRealtimeForConditionalGeneration.set_input_embeddings |
2 | 0 | 0 |
meth |
VoxtralRealtimeForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
VoxtralRealtimeForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
VoxtralRealtimeForConditionalGeneration.set_decoder |
2 | 0 | 0 |
meth |
VoxtralRealtimeForConditionalGeneration.get_decoder |
1 | 0 | 0 |
meth |
VoxtralRealtimeForConditionalGeneration.prepare_inputs_for_generation |
4 | 1 | 0 |
meth |
VoxtralRealtimeForConditionalGeneration._update_model_kwargs_for_generation |
5 | 2 | 0 |
meth |
VoxtralRealtimeForConditionalGeneration._prepare_cache_for_generation |
6 | 3 | 0 |
meth |
VoxtralRealtimeForConditionalGeneration._prepare_generation_config |
3 | 0 | 0 |
meth |
VoxtralRealtimeForConditionalGeneration._prepare_generated_length |
7 | 0 | 0 |
attr |
VoxtralRealtimeForConditionalGeneration.vocab_size |
1 | 0 | 0 |
attr |
VoxtralRealtimeForConditionalGeneration.audio_tower |
1 | 0 | 0 |
attr |
VoxtralRealtimeForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
VoxtralRealtimeForConditionalGeneration.multi_modal_projector |
1 | 0 | 0 |
attr |
VoxtralRealtimeForConditionalGeneration.time_embedding |
1 | 0 | 0 |
meth |
VoxtralRealtimePreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
VoxtralRealtimeEncoder.init |
2 | 0 | 0 |
attr |
VoxtralRealtimeEncoder.embedder |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoder.layers |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoder.norm |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoder.rotary_emb |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoder.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.voxtral_realtime.modular_voxtral_realtime (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VoxtralRealtimeForConditionalGeneration.init |
2 | 0 | 0 |
meth |
VoxtralRealtimeForConditionalGeneration.prepare_inputs_for_generation |
4 | 1 | 0 |
meth |
VoxtralRealtimeForConditionalGeneration._update_model_kwargs_for_generation |
5 | 2 | 0 |
meth |
VoxtralRealtimeForConditionalGeneration._prepare_cache_for_generation |
6 | 3 | 0 |
meth |
VoxtralRealtimeForConditionalGeneration._prepare_generation_config |
3 | 0 | 0 |
meth |
VoxtralRealtimeForConditionalGeneration._prepare_generated_length |
7 | 0 | 0 |
attr |
VoxtralRealtimeForConditionalGeneration.language_model |
1 | 0 | 0 |
attr |
VoxtralRealtimeForConditionalGeneration.time_embedding |
1 | 0 | 0 |
meth |
VoxtralRealtimePreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
VoxtralRealtimeEncoder.init |
2 | 0 | 0 |
attr |
VoxtralRealtimeEncoder.embedder |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoder.layers |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoder.norm |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoder.rotary_emb |
1 | 0 | 0 |
attr |
VoxtralRealtimeEncoder.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.voxtral_realtime.processing_voxtral_realtime (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VoxtralRealtimeProcessor.init |
3 | 0 | 0 |
meth |
VoxtralRealtimeProcessor.call |
5 | 4 | 0 |
prop |
VoxtralRealtimeProcessor.mistral_common_audio_config |
1 | 0 | 0 |
prop |
VoxtralRealtimeProcessor.num_delay_tokens |
1 | 0 | 0 |
prop |
VoxtralRealtimeProcessor.num_right_pad_tokens |
1 | 0 | 0 |
prop |
VoxtralRealtimeProcessor.audio_length_per_tok |
1 | 0 | 0 |
prop |
VoxtralRealtimeProcessor.raw_audio_length_per_tok |
1 | 0 | 0 |
prop |
VoxtralRealtimeProcessor.num_mel_frames_first_audio_chunk |
1 | 0 | 0 |
transformers.models.wav2vec2.configuration_wav2vec2 (114 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Wav2Vec2Config.init |
57 | 0 | 0 |
prop |
Wav2Vec2Config.inputs_to_logits_ratio |
1 | 0 | 0 |
attr |
Wav2Vec2Config.pad_token_id |
1 | 0 | 0 |
attr |
Wav2Vec2Config.bos_token_id |
1 | 0 | 0 |
attr |
Wav2Vec2Config.eos_token_id |
1 | 0 | 0 |
attr |
Wav2Vec2Config.hidden_size |
1 | 0 | 0 |
attr |
Wav2Vec2Config.feat_extract_norm |
1 | 0 | 0 |
attr |
Wav2Vec2Config.feat_extract_activation |
1 | 0 | 0 |
attr |
Wav2Vec2Config.conv_dim |
1 | 0 | 0 |
attr |
Wav2Vec2Config.conv_stride |
1 | 0 | 0 |
attr |
Wav2Vec2Config.conv_kernel |
1 | 0 | 0 |
attr |
Wav2Vec2Config.conv_bias |
1 | 0 | 0 |
attr |
Wav2Vec2Config.num_conv_pos_embeddings |
1 | 0 | 0 |
attr |
Wav2Vec2Config.num_conv_pos_embedding_groups |
1 | 0 | 0 |
attr |
Wav2Vec2Config.num_feat_extract_layers |
1 | 0 | 0 |
attr |
Wav2Vec2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Wav2Vec2Config.intermediate_size |
1 | 0 | 0 |
attr |
Wav2Vec2Config.hidden_act |
1 | 0 | 0 |
attr |
Wav2Vec2Config.num_attention_heads |
1 | 0 | 0 |
attr |
Wav2Vec2Config.hidden_dropout |
1 | 0 | 0 |
attr |
Wav2Vec2Config.attention_dropout |
1 | 0 | 0 |
attr |
Wav2Vec2Config.activation_dropout |
1 | 0 | 0 |
attr |
Wav2Vec2Config.feat_proj_dropout |
1 | 0 | 0 |
attr |
Wav2Vec2Config.final_dropout |
1 | 0 | 0 |
attr |
Wav2Vec2Config.layerdrop |
1 | 0 | 0 |
attr |
Wav2Vec2Config.layer_norm_eps |
1 | 0 | 0 |
attr |
Wav2Vec2Config.initializer_range |
1 | 0 | 0 |
attr |
Wav2Vec2Config.vocab_size |
1 | 0 | 0 |
attr |
Wav2Vec2Config.do_stable_layer_norm |
1 | 0 | 0 |
attr |
Wav2Vec2Config.use_weighted_layer_sum |
1 | 0 | 0 |
attr |
Wav2Vec2Config.apply_spec_augment |
1 | 0 | 0 |
attr |
Wav2Vec2Config.mask_time_prob |
1 | 0 | 0 |
attr |
Wav2Vec2Config.mask_time_length |
1 | 0 | 0 |
attr |
Wav2Vec2Config.mask_time_min_masks |
1 | 0 | 0 |
attr |
Wav2Vec2Config.mask_feature_prob |
1 | 0 | 0 |
attr |
Wav2Vec2Config.mask_feature_length |
1 | 0 | 0 |
attr |
Wav2Vec2Config.mask_feature_min_masks |
1 | 0 | 0 |
attr |
Wav2Vec2Config.num_codevectors_per_group |
1 | 0 | 0 |
attr |
Wav2Vec2Config.num_codevector_groups |
1 | 0 | 0 |
attr |
Wav2Vec2Config.contrastive_logits_temperature |
1 | 0 | 0 |
attr |
Wav2Vec2Config.feat_quantizer_dropout |
1 | 0 | 0 |
attr |
Wav2Vec2Config.num_negatives |
1 | 0 | 0 |
attr |
Wav2Vec2Config.codevector_dim |
1 | 0 | 0 |
attr |
Wav2Vec2Config.proj_codevector_dim |
1 | 0 | 0 |
attr |
Wav2Vec2Config.diversity_loss_weight |
1 | 0 | 0 |
attr |
Wav2Vec2Config.ctc_loss_reduction |
1 | 0 | 0 |
attr |
Wav2Vec2Config.ctc_zero_infinity |
1 | 0 | 0 |
attr |
Wav2Vec2Config.add_adapter |
1 | 0 | 0 |
attr |
Wav2Vec2Config.adapter_kernel_size |
1 | 0 | 0 |
attr |
Wav2Vec2Config.adapter_stride |
1 | 0 | 0 |
attr |
Wav2Vec2Config.num_adapter_layers |
1 | 0 | 0 |
attr |
Wav2Vec2Config.output_hidden_size |
1 | 0 | 0 |
attr |
Wav2Vec2Config.adapter_attn_dim |
1 | 0 | 0 |
attr |
Wav2Vec2Config.classifier_proj_size |
1 | 0 | 0 |
attr |
Wav2Vec2Config.tdnn_dim |
1 | 0 | 0 |
attr |
Wav2Vec2Config.tdnn_kernel |
1 | 0 | 0 |
attr |
Wav2Vec2Config.tdnn_dilation |
1 | 0 | 0 |
attr |
Wav2Vec2Config.xvector_output_dim |
1 | 0 | 0 |
transformers.models.wav2vec2.feature_extraction_wav2vec2 (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Wav2Vec2FeatureExtractor.init |
7 | 0 | 0 |
meth |
Wav2Vec2FeatureExtractor.call |
10 | 9 | 0 |
attr |
Wav2Vec2FeatureExtractor.return_attention_mask |
1 | 0 | 0 |
attr |
Wav2Vec2FeatureExtractor.do_normalize |
1 | 0 | 0 |
transformers.models.wav2vec2.modeling_wav2vec2 (71 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Wav2Vec2ForPreTraining.init |
2 | 1 | 0 |
meth |
Wav2Vec2ForPreTraining.set_gumbel_temperature |
2 | 1 | 0 |
meth |
Wav2Vec2ForPreTraining.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Wav2Vec2ForPreTraining.compute_contrastive_logits |
5 | 4 | 0 |
meth |
Wav2Vec2ForPreTraining.forward |
9 | 8 | 0 |
attr |
Wav2Vec2ForPreTraining.wav2vec2 |
1 | 0 | 0 |
attr |
Wav2Vec2ForPreTraining.dropout_features |
1 | 0 | 0 |
attr |
Wav2Vec2ForPreTraining.quantizer |
1 | 0 | 0 |
attr |
Wav2Vec2ForPreTraining.project_hid |
1 | 0 | 0 |
attr |
Wav2Vec2ForPreTraining.project_q |
1 | 0 | 0 |
meth |
Wav2Vec2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Wav2Vec2PreTrainedModel._get_feat_extract_output_lengths |
3 | 2 | 0 |
meth |
Wav2Vec2PreTrainedModel._get_feature_vector_attention_mask |
4 | 2 | 0 |
meth |
Wav2Vec2PreTrainedModel._get_adapters |
1 | 0 | 0 |
meth |
Wav2Vec2PreTrainedModel.init_adapter_layers |
1 | 0 | 0 |
meth |
Wav2Vec2PreTrainedModel.load_adapter |
4 | 1 | 0 |
meth |
Wav2Vec2ForXVector.init |
2 | 0 | 0 |
meth |
Wav2Vec2ForXVector.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Wav2Vec2ForXVector.freeze_base_model |
1 | 0 | 0 |
meth |
Wav2Vec2ForXVector._get_tdnn_output_lengths |
2 | 1 | 0 |
meth |
Wav2Vec2ForXVector.forward |
8 | 7 | 0 |
attr |
Wav2Vec2ForXVector.wav2vec2 |
1 | 0 | 0 |
attr |
Wav2Vec2ForXVector.projector |
1 | 0 | 0 |
attr |
Wav2Vec2ForXVector.tdnn |
1 | 0 | 0 |
attr |
Wav2Vec2ForXVector.feature_extractor |
1 | 0 | 0 |
attr |
Wav2Vec2ForXVector.classifier |
1 | 0 | 0 |
attr |
Wav2Vec2ForXVector.objective |
1 | 0 | 0 |
attr |
Wav2Vec2ForXVector.layer_weights |
1 | 0 | 0 |
meth |
Wav2Vec2ForSequenceClassification.init |
2 | 0 | 0 |
meth |
Wav2Vec2ForSequenceClassification.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Wav2Vec2ForSequenceClassification.freeze_base_model |
1 | 0 | 0 |
meth |
Wav2Vec2ForSequenceClassification.forward |
8 | 7 | 0 |
attr |
Wav2Vec2ForSequenceClassification.wav2vec2 |
1 | 0 | 0 |
attr |
Wav2Vec2ForSequenceClassification.projector |
1 | 0 | 0 |
attr |
Wav2Vec2ForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
Wav2Vec2ForSequenceClassification.layer_weights |
1 | 0 | 0 |
meth |
Wav2Vec2ForAudioFrameClassification.init |
2 | 0 | 0 |
meth |
Wav2Vec2ForAudioFrameClassification.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Wav2Vec2ForAudioFrameClassification.freeze_base_model |
1 | 0 | 0 |
meth |
Wav2Vec2ForAudioFrameClassification.forward |
8 | 7 | 0 |
attr |
Wav2Vec2ForAudioFrameClassification.wav2vec2 |
1 | 0 | 0 |
attr |
Wav2Vec2ForAudioFrameClassification.classifier |
1 | 0 | 0 |
attr |
Wav2Vec2ForAudioFrameClassification.num_labels |
1 | 0 | 0 |
attr |
Wav2Vec2ForAudioFrameClassification.layer_weights |
1 | 0 | 0 |
meth |
Wav2Vec2Model.init |
2 | 1 | 0 |
meth |
Wav2Vec2Model.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Wav2Vec2Model._mask_hidden_states |
4 | 3 | 0 |
meth |
Wav2Vec2Model.forward |
8 | 7 | 0 |
attr |
Wav2Vec2Model.feature_extractor |
1 | 0 | 0 |
attr |
Wav2Vec2Model.feature_projection |
1 | 0 | 0 |
attr |
Wav2Vec2Model.adapter |
1 | 0 | 0 |
attr |
Wav2Vec2Model.masked_spec_embed |
1 | 0 | 0 |
attr |
Wav2Vec2Model.encoder |
1 | 0 | 0 |
meth |
Wav2Vec2ForCTC.init |
3 | 1 | 0 |
meth |
Wav2Vec2ForCTC.tie_weights |
2 | 0 | 0 |
meth |
Wav2Vec2ForCTC.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Wav2Vec2ForCTC.freeze_base_model |
1 | 0 | 0 |
meth |
Wav2Vec2ForCTC.forward |
8 | 7 | 0 |
attr |
Wav2Vec2ForCTC.wav2vec2 |
1 | 0 | 0 |
attr |
Wav2Vec2ForCTC.dropout |
1 | 0 | 0 |
attr |
Wav2Vec2ForCTC.target_lang |
1 | 0 | 0 |
attr |
Wav2Vec2ForCTC.lm_head |
1 | 0 | 0 |
transformers.models.wav2vec2.processing_wav2vec2 (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Wav2Vec2Processor.init |
3 | 0 | 0 |
meth |
Wav2Vec2Processor.call |
4 | 3 | 0 |
meth |
Wav2Vec2Processor.pad |
3 | 0 | 0 |
prop |
Wav2Vec2Processor.model_input_names |
1 | 0 | 0 |
transformers.models.wav2vec2.tokenization_wav2vec2 (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Wav2Vec2CTCTokenizer.init |
11 | 0 | 0 |
meth |
Wav2Vec2CTCTokenizer.set_target_lang |
2 | 1 | 0 |
meth |
Wav2Vec2CTCTokenizer._tokenize |
3 | 0 | 0 |
meth |
Wav2Vec2CTCTokenizer.prepare_for_tokenization |
4 | 0 | 0 |
meth |
Wav2Vec2CTCTokenizer.batch_decode |
7 | 6 | 0 |
meth |
Wav2Vec2CTCTokenizer.decode |
7 | 6 | 0 |
prop |
Wav2Vec2CTCTokenizer.word_delimiter_token |
2 | 1 | 0 |
prop |
Wav2Vec2CTCTokenizer.word_delimiter_token_id |
2 | 1 | 0 |
attr |
Wav2Vec2CTCTokenizer.do_lower_case |
1 | 0 | 0 |
attr |
Wav2Vec2CTCTokenizer.replace_word_delimiter_char |
1 | 0 | 0 |
attr |
Wav2Vec2CTCTokenizer.target_lang |
1 | 0 | 0 |
attr |
Wav2Vec2CTCTokenizer.decoder |
1 | 0 | 0 |
attr |
Wav2Vec2CTCTokenizer.vocab |
1 | 0 | 0 |
attr |
Wav2Vec2CTCTokenizer.encoder |
1 | 0 | 0 |
transformers.models.wav2vec2_bert.configuration_wav2vec2_bert (97 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Wav2Vec2BertConfig.init |
49 | 0 | 0 |
prop |
Wav2Vec2BertConfig.inputs_to_logits_ratio |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.pad_token_id |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.bos_token_id |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.eos_token_id |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.hidden_size |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.intermediate_size |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.hidden_act |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.feature_projection_input_dim |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.hidden_dropout |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.attention_dropout |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.activation_dropout |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.feat_proj_dropout |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.final_dropout |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.layerdrop |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.initializer_range |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.vocab_size |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.use_weighted_layer_sum |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.max_source_positions |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.position_embeddings_type |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.rotary_embedding_base |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.left_max_position_embeddings |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.right_max_position_embeddings |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.conv_depthwise_kernel_size |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.conformer_conv_dropout |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.apply_spec_augment |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.mask_time_prob |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.mask_time_length |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.mask_time_min_masks |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.mask_feature_prob |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.mask_feature_length |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.mask_feature_min_masks |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.ctc_loss_reduction |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.ctc_zero_infinity |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.add_adapter |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.adapter_kernel_size |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.adapter_stride |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.num_adapter_layers |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.adapter_act |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.output_hidden_size |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.use_intermediate_ffn_before_adapter |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.classifier_proj_size |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.tdnn_dim |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.tdnn_kernel |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.tdnn_dilation |
1 | 0 | 0 |
attr |
Wav2Vec2BertConfig.xvector_output_dim |
1 | 0 | 0 |
transformers.models.wav2vec2_bert.modeling_wav2vec2_bert (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Wav2Vec2BertPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Wav2Vec2BertPreTrainedModel._get_feat_extract_output_lengths |
3 | 2 | 0 |
meth |
Wav2Vec2BertPreTrainedModel._get_feature_vector_attention_mask |
4 | 2 | 0 |
meth |
Wav2Vec2BertModel.init |
2 | 1 | 0 |
meth |
Wav2Vec2BertModel._mask_hidden_states |
4 | 3 | 0 |
meth |
Wav2Vec2BertModel.forward |
8 | 7 | 0 |
attr |
Wav2Vec2BertModel.feature_projection |
1 | 0 | 0 |
attr |
Wav2Vec2BertModel.encoder |
1 | 0 | 0 |
attr |
Wav2Vec2BertModel.adapter |
1 | 0 | 0 |
attr |
Wav2Vec2BertModel.intermediate_ffn |
1 | 0 | 0 |
attr |
Wav2Vec2BertModel.masked_spec_embed |
1 | 0 | 0 |
meth |
Wav2Vec2BertForCTC.init |
3 | 1 | 0 |
meth |
Wav2Vec2BertForCTC.forward |
8 | 7 | 0 |
attr |
Wav2Vec2BertForCTC.wav2vec2_bert |
1 | 0 | 0 |
attr |
Wav2Vec2BertForCTC.dropout |
1 | 0 | 0 |
attr |
Wav2Vec2BertForCTC.target_lang |
1 | 0 | 0 |
attr |
Wav2Vec2BertForCTC.lm_head |
1 | 0 | 0 |
meth |
Wav2Vec2BertForXVector.init |
2 | 0 | 0 |
meth |
Wav2Vec2BertForXVector.freeze_base_model |
1 | 0 | 0 |
meth |
Wav2Vec2BertForXVector._get_tdnn_output_lengths |
2 | 1 | 0 |
meth |
Wav2Vec2BertForXVector.forward |
8 | 7 | 0 |
attr |
Wav2Vec2BertForXVector.wav2vec2_bert |
1 | 0 | 0 |
attr |
Wav2Vec2BertForXVector.projector |
1 | 0 | 0 |
attr |
Wav2Vec2BertForXVector.tdnn |
1 | 0 | 0 |
attr |
Wav2Vec2BertForXVector.feature_extractor |
1 | 0 | 0 |
attr |
Wav2Vec2BertForXVector.classifier |
1 | 0 | 0 |
attr |
Wav2Vec2BertForXVector.objective |
1 | 0 | 0 |
attr |
Wav2Vec2BertForXVector.layer_weights |
1 | 0 | 0 |
meth |
Wav2Vec2BertForSequenceClassification.init |
2 | 0 | 0 |
meth |
Wav2Vec2BertForSequenceClassification.freeze_base_model |
1 | 0 | 0 |
meth |
Wav2Vec2BertForSequenceClassification.forward |
8 | 7 | 0 |
attr |
Wav2Vec2BertForSequenceClassification.wav2vec2_bert |
1 | 0 | 0 |
attr |
Wav2Vec2BertForSequenceClassification.projector |
1 | 0 | 0 |
attr |
Wav2Vec2BertForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
Wav2Vec2BertForSequenceClassification.layer_weights |
1 | 0 | 0 |
meth |
Wav2Vec2BertForAudioFrameClassification.init |
2 | 0 | 0 |
meth |
Wav2Vec2BertForAudioFrameClassification.freeze_base_model |
1 | 0 | 0 |
meth |
Wav2Vec2BertForAudioFrameClassification.forward |
8 | 7 | 0 |
attr |
Wav2Vec2BertForAudioFrameClassification.wav2vec2_bert |
1 | 0 | 0 |
attr |
Wav2Vec2BertForAudioFrameClassification.classifier |
1 | 0 | 0 |
attr |
Wav2Vec2BertForAudioFrameClassification.num_labels |
1 | 0 | 0 |
attr |
Wav2Vec2BertForAudioFrameClassification.layer_weights |
1 | 0 | 0 |
transformers.models.wav2vec2_bert.modular_wav2vec2_bert (30 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Wav2Vec2BertPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Wav2Vec2BertPreTrainedModel._get_feat_extract_output_lengths |
3 | 2 | 0 |
meth |
Wav2Vec2BertPreTrainedModel._get_feature_vector_attention_mask |
4 | 2 | 0 |
meth |
Wav2Vec2BertModel.init |
2 | 1 | 0 |
meth |
Wav2Vec2BertModel.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Wav2Vec2BertModel.forward |
8 | 7 | 0 |
attr |
Wav2Vec2BertModel.feature_projection |
1 | 0 | 0 |
attr |
Wav2Vec2BertModel.encoder |
1 | 0 | 0 |
attr |
Wav2Vec2BertModel.adapter |
1 | 0 | 0 |
attr |
Wav2Vec2BertModel.intermediate_ffn |
1 | 0 | 0 |
attr |
Wav2Vec2BertModel.masked_spec_embed |
1 | 0 | 0 |
meth |
Wav2Vec2BertForCTC.init |
3 | 1 | 0 |
meth |
Wav2Vec2BertForCTC.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Wav2Vec2BertForCTC.forward |
8 | 7 | 0 |
meth |
Wav2Vec2BertForXVector.init |
2 | 0 | 0 |
meth |
Wav2Vec2BertForXVector.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Wav2Vec2BertForXVector.forward |
8 | 7 | 0 |
meth |
Wav2Vec2BertForSequenceClassification.init |
2 | 0 | 0 |
meth |
Wav2Vec2BertForSequenceClassification.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Wav2Vec2BertForSequenceClassification.freeze_base_model |
1 | 0 | 0 |
meth |
Wav2Vec2BertForSequenceClassification.forward |
8 | 7 | 0 |
meth |
Wav2Vec2BertForAudioFrameClassification.init |
2 | 0 | 0 |
meth |
Wav2Vec2BertForAudioFrameClassification.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Wav2Vec2BertForAudioFrameClassification.forward |
8 | 7 | 0 |
transformers.models.wav2vec2_bert.processing_wav2vec2_bert (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Wav2Vec2BertProcessor.init |
3 | 0 | 0 |
meth |
Wav2Vec2BertProcessor.call |
4 | 3 | 0 |
meth |
Wav2Vec2BertProcessor.pad |
4 | 0 | 0 |
prop |
Wav2Vec2BertProcessor.model_input_names |
1 | 0 | 0 |
transformers.models.wav2vec2_conformer.configuration_wav2vec2_conformer (120 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Wav2Vec2ConformerConfig.init |
60 | 0 | 0 |
prop |
Wav2Vec2ConformerConfig.inputs_to_logits_ratio |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.pad_token_id |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.bos_token_id |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.eos_token_id |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.hidden_size |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.feat_extract_norm |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.feat_extract_activation |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.conv_dim |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.conv_stride |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.conv_kernel |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.conv_bias |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.num_conv_pos_embeddings |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.num_conv_pos_embedding_groups |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.num_feat_extract_layers |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.intermediate_size |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.hidden_act |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.num_attention_heads |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.hidden_dropout |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.attention_dropout |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.activation_dropout |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.feat_proj_dropout |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.final_dropout |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.layerdrop |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.initializer_range |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.vocab_size |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.use_weighted_layer_sum |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.max_source_positions |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.position_embeddings_type |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.rotary_embedding_base |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.conv_depthwise_kernel_size |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.conformer_conv_dropout |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.apply_spec_augment |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.mask_time_prob |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.mask_time_length |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.mask_time_min_masks |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.mask_feature_prob |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.mask_feature_length |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.mask_feature_min_masks |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.num_codevectors_per_group |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.num_codevector_groups |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.contrastive_logits_temperature |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.feat_quantizer_dropout |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.num_negatives |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.codevector_dim |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.proj_codevector_dim |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.diversity_loss_weight |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.ctc_loss_reduction |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.ctc_zero_infinity |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.add_adapter |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.adapter_kernel_size |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.adapter_stride |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.num_adapter_layers |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.output_hidden_size |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.classifier_proj_size |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.tdnn_dim |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.tdnn_kernel |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.tdnn_dilation |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerConfig.xvector_output_dim |
1 | 0 | 0 |
transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer (63 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Wav2Vec2ConformerForCTC.init |
3 | 1 | 0 |
meth |
Wav2Vec2ConformerForCTC.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Wav2Vec2ConformerForCTC.forward |
8 | 7 | 0 |
attr |
Wav2Vec2ConformerForCTC.wav2vec2_conformer |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerForCTC.dropout |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerForCTC.target_lang |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerForCTC.lm_head |
1 | 0 | 0 |
meth |
Wav2Vec2ConformerPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Wav2Vec2ConformerPreTrainedModel._get_feat_extract_output_lengths |
3 | 2 | 0 |
meth |
Wav2Vec2ConformerPreTrainedModel._get_feature_vector_attention_mask |
4 | 2 | 0 |
meth |
Wav2Vec2ConformerForAudioFrameClassification.init |
2 | 0 | 0 |
meth |
Wav2Vec2ConformerForAudioFrameClassification.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Wav2Vec2ConformerForAudioFrameClassification.freeze_base_model |
1 | 0 | 0 |
meth |
Wav2Vec2ConformerForAudioFrameClassification.forward |
8 | 7 | 0 |
attr |
Wav2Vec2ConformerForAudioFrameClassification.wav2vec2_conformer |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerForAudioFrameClassification.classifier |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerForAudioFrameClassification.num_labels |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerForAudioFrameClassification.layer_weights |
1 | 0 | 0 |
meth |
Wav2Vec2ConformerForSequenceClassification.init |
2 | 0 | 0 |
meth |
Wav2Vec2ConformerForSequenceClassification.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Wav2Vec2ConformerForSequenceClassification.freeze_base_model |
1 | 0 | 0 |
meth |
Wav2Vec2ConformerForSequenceClassification.forward |
8 | 7 | 0 |
attr |
Wav2Vec2ConformerForSequenceClassification.wav2vec2_conformer |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerForSequenceClassification.projector |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerForSequenceClassification.layer_weights |
1 | 0 | 0 |
meth |
Wav2Vec2ConformerForPreTraining.init |
2 | 1 | 0 |
meth |
Wav2Vec2ConformerForPreTraining.set_gumbel_temperature |
2 | 1 | 0 |
meth |
Wav2Vec2ConformerForPreTraining.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Wav2Vec2ConformerForPreTraining.compute_contrastive_logits |
5 | 4 | 0 |
meth |
Wav2Vec2ConformerForPreTraining.forward |
9 | 8 | 0 |
attr |
Wav2Vec2ConformerForPreTraining.wav2vec2_conformer |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerForPreTraining.dropout_features |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerForPreTraining.quantizer |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerForPreTraining.project_hid |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerForPreTraining.project_q |
1 | 0 | 0 |
meth |
Wav2Vec2ConformerForXVector.init |
2 | 0 | 0 |
meth |
Wav2Vec2ConformerForXVector.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Wav2Vec2ConformerForXVector.freeze_base_model |
1 | 0 | 0 |
meth |
Wav2Vec2ConformerForXVector._get_tdnn_output_lengths |
2 | 1 | 0 |
meth |
Wav2Vec2ConformerForXVector.forward |
8 | 7 | 0 |
attr |
Wav2Vec2ConformerForXVector.wav2vec2_conformer |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerForXVector.projector |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerForXVector.tdnn |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerForXVector.feature_extractor |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerForXVector.classifier |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerForXVector.objective |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerForXVector.layer_weights |
1 | 0 | 0 |
meth |
Wav2Vec2ConformerModel.init |
2 | 1 | 0 |
meth |
Wav2Vec2ConformerModel.freeze_feature_encoder |
1 | 0 | 0 |
meth |
Wav2Vec2ConformerModel._mask_hidden_states |
4 | 3 | 0 |
meth |
Wav2Vec2ConformerModel.forward |
8 | 7 | 0 |
attr |
Wav2Vec2ConformerModel.feature_extractor |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerModel.feature_projection |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerModel.encoder |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerModel.adapter |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerModel.masked_spec_embed |
1 | 0 | 0 |
transformers.models.wav2vec2_conformer.modular_wav2vec2_conformer (22 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Wav2Vec2ConformerForCTC.init |
3 | 1 | 0 |
meth |
Wav2Vec2ConformerForCTC.tie_weights |
1 | 0 | 0 |
meth |
Wav2Vec2ConformerForCTC.freeze_base_model |
1 | 0 | 0 |
meth |
Wav2Vec2ConformerPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Wav2Vec2ConformerPreTrainedModel._get_feat_extract_output_lengths |
3 | 2 | 0 |
meth |
Wav2Vec2ConformerPreTrainedModel._get_feature_vector_attention_mask |
4 | 2 | 0 |
meth |
Wav2Vec2ConformerForAudioFrameClassification.init |
2 | 0 | 0 |
meth |
Wav2Vec2ConformerForSequenceClassification.init |
2 | 0 | 0 |
meth |
Wav2Vec2ConformerForPreTraining.init |
2 | 1 | 0 |
meth |
Wav2Vec2ConformerForXVector.init |
2 | 0 | 0 |
meth |
Wav2Vec2ConformerModel.init |
2 | 1 | 0 |
attr |
Wav2Vec2ConformerModel.feature_extractor |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerModel.feature_projection |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerModel.encoder |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerModel.adapter |
1 | 0 | 0 |
attr |
Wav2Vec2ConformerModel.masked_spec_embed |
1 | 0 | 0 |
transformers.models.wav2vec2_phoneme.tokenization_wav2vec2_phoneme (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Wav2Vec2PhonemeCTCTokenizer.init |
12 | 0 | 0 |
meth |
Wav2Vec2PhonemeCTCTokenizer.init_backend |
2 | 1 | 0 |
meth |
Wav2Vec2PhonemeCTCTokenizer._tokenize |
3 | 0 | 0 |
meth |
Wav2Vec2PhonemeCTCTokenizer.decode |
6 | 5 | 0 |
meth |
Wav2Vec2PhonemeCTCTokenizer.batch_decode |
6 | 5 | 0 |
prop |
Wav2Vec2PhonemeCTCTokenizer.word_delimiter_token |
2 | 1 | 0 |
prop |
Wav2Vec2PhonemeCTCTokenizer.word_delimiter_token_id |
2 | 1 | 0 |
prop |
Wav2Vec2PhonemeCTCTokenizer.phone_delimiter_token |
2 | 1 | 0 |
prop |
Wav2Vec2PhonemeCTCTokenizer.phone_delimiter_token_id |
2 | 1 | 0 |
attr |
Wav2Vec2PhonemeCTCTokenizer.do_phonemize |
1 | 0 | 0 |
attr |
Wav2Vec2PhonemeCTCTokenizer.phonemizer_lang |
1 | 0 | 0 |
attr |
Wav2Vec2PhonemeCTCTokenizer.phonemizer_backend |
1 | 0 | 0 |
attr |
Wav2Vec2PhonemeCTCTokenizer.decoder |
1 | 0 | 0 |
attr |
Wav2Vec2PhonemeCTCTokenizer.encoder |
1 | 0 | 0 |
transformers.models.wav2vec2_with_lm.processing_wav2vec2_with_lm (20 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Wav2Vec2ProcessorWithLM.init |
4 | 3 | 0 |
meth |
Wav2Vec2ProcessorWithLM.save_pretrained |
2 | 0 | 0 |
meth |
Wav2Vec2ProcessorWithLM.from_pretrained |
3 | 0 | 0 |
meth |
Wav2Vec2ProcessorWithLM._set_language_model_attribute |
4 | 3 | 0 |
meth |
Wav2Vec2ProcessorWithLM.get_missing_alphabet_tokens |
3 | 0 | 0 |
meth |
Wav2Vec2ProcessorWithLM.call |
3 | 0 | 0 |
meth |
Wav2Vec2ProcessorWithLM.pad |
3 | 0 | 0 |
meth |
Wav2Vec2ProcessorWithLM.batch_decode |
15 | 14 | 0 |
meth |
Wav2Vec2ProcessorWithLM.decode |
13 | 12 | 0 |
prop |
Wav2Vec2ProcessorWithLM.language_model |
1 | 0 | 0 |
attr |
Wav2Vec2ProcessorWithLM.decoder |
1 | 0 | 0 |
transformers.models.wavlm.configuration_wavlm (114 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
WavLMConfig.init |
57 | 0 | 0 |
prop |
WavLMConfig.inputs_to_logits_ratio |
1 | 0 | 0 |
attr |
WavLMConfig.pad_token_id |
1 | 0 | 0 |
attr |
WavLMConfig.bos_token_id |
1 | 0 | 0 |
attr |
WavLMConfig.eos_token_id |
1 | 0 | 0 |
attr |
WavLMConfig.hidden_size |
1 | 0 | 0 |
attr |
WavLMConfig.feat_extract_norm |
1 | 0 | 0 |
attr |
WavLMConfig.feat_extract_activation |
1 | 0 | 0 |
attr |
WavLMConfig.conv_dim |
1 | 0 | 0 |
attr |
WavLMConfig.conv_stride |
1 | 0 | 0 |
attr |
WavLMConfig.conv_kernel |
1 | 0 | 0 |
attr |
WavLMConfig.conv_bias |
1 | 0 | 0 |
attr |
WavLMConfig.num_buckets |
1 | 0 | 0 |
attr |
WavLMConfig.max_bucket_distance |
1 | 0 | 0 |
attr |
WavLMConfig.num_conv_pos_embeddings |
1 | 0 | 0 |
attr |
WavLMConfig.num_conv_pos_embedding_groups |
1 | 0 | 0 |
attr |
WavLMConfig.num_feat_extract_layers |
1 | 0 | 0 |
attr |
WavLMConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
WavLMConfig.intermediate_size |
1 | 0 | 0 |
attr |
WavLMConfig.hidden_act |
1 | 0 | 0 |
attr |
WavLMConfig.num_attention_heads |
1 | 0 | 0 |
attr |
WavLMConfig.hidden_dropout |
1 | 0 | 0 |
attr |
WavLMConfig.attention_dropout |
1 | 0 | 0 |
attr |
WavLMConfig.activation_dropout |
1 | 0 | 0 |
attr |
WavLMConfig.feat_proj_dropout |
1 | 0 | 0 |
attr |
WavLMConfig.final_dropout |
1 | 0 | 0 |
attr |
WavLMConfig.layerdrop |
1 | 0 | 0 |
attr |
WavLMConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
WavLMConfig.initializer_range |
1 | 0 | 0 |
attr |
WavLMConfig.num_ctc_classes |
1 | 0 | 0 |
attr |
WavLMConfig.vocab_size |
1 | 0 | 0 |
attr |
WavLMConfig.do_stable_layer_norm |
1 | 0 | 0 |
attr |
WavLMConfig.use_weighted_layer_sum |
1 | 0 | 0 |
attr |
WavLMConfig.classifier_proj_size |
1 | 0 | 0 |
attr |
WavLMConfig.apply_spec_augment |
1 | 0 | 0 |
attr |
WavLMConfig.mask_time_prob |
1 | 0 | 0 |
attr |
WavLMConfig.mask_time_length |
1 | 0 | 0 |
attr |
WavLMConfig.mask_time_min_masks |
1 | 0 | 0 |
attr |
WavLMConfig.mask_feature_prob |
1 | 0 | 0 |
attr |
WavLMConfig.mask_feature_length |
1 | 0 | 0 |
attr |
WavLMConfig.num_codevectors_per_group |
1 | 0 | 0 |
attr |
WavLMConfig.num_codevector_groups |
1 | 0 | 0 |
attr |
WavLMConfig.contrastive_logits_temperature |
1 | 0 | 0 |
attr |
WavLMConfig.num_negatives |
1 | 0 | 0 |
attr |
WavLMConfig.codevector_dim |
1 | 0 | 0 |
attr |
WavLMConfig.proj_codevector_dim |
1 | 0 | 0 |
attr |
WavLMConfig.diversity_loss_weight |
1 | 0 | 0 |
attr |
WavLMConfig.ctc_loss_reduction |
1 | 0 | 0 |
attr |
WavLMConfig.ctc_zero_infinity |
1 | 0 | 0 |
attr |
WavLMConfig.add_adapter |
1 | 0 | 0 |
attr |
WavLMConfig.adapter_kernel_size |
1 | 0 | 0 |
attr |
WavLMConfig.adapter_stride |
1 | 0 | 0 |
attr |
WavLMConfig.num_adapter_layers |
1 | 0 | 0 |
attr |
WavLMConfig.output_hidden_size |
1 | 0 | 0 |
attr |
WavLMConfig.tdnn_dim |
1 | 0 | 0 |
attr |
WavLMConfig.tdnn_kernel |
1 | 0 | 0 |
attr |
WavLMConfig.tdnn_dilation |
1 | 0 | 0 |
attr |
WavLMConfig.xvector_output_dim |
1 | 0 | 0 |
transformers.models.wavlm.modeling_wavlm (56 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
WavLMPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
WavLMPreTrainedModel._get_feat_extract_output_lengths |
3 | 2 | 0 |
meth |
WavLMPreTrainedModel._get_feature_vector_attention_mask |
4 | 2 | 0 |
meth |
WavLMModel.init |
2 | 1 | 0 |
meth |
WavLMModel.freeze_feature_encoder |
1 | 0 | 0 |
meth |
WavLMModel._mask_hidden_states |
4 | 3 | 0 |
meth |
WavLMModel.forward |
8 | 7 | 0 |
attr |
WavLMModel.feature_extractor |
1 | 0 | 0 |
attr |
WavLMModel.feature_projection |
1 | 0 | 0 |
attr |
WavLMModel.adapter |
1 | 0 | 0 |
attr |
WavLMModel.masked_spec_embed |
1 | 0 | 0 |
attr |
WavLMModel.encoder |
1 | 0 | 0 |
meth |
WavLMForXVector.init |
2 | 0 | 0 |
meth |
WavLMForXVector.freeze_feature_encoder |
1 | 0 | 0 |
meth |
WavLMForXVector.freeze_base_model |
1 | 0 | 0 |
meth |
WavLMForXVector._get_tdnn_output_lengths |
2 | 1 | 0 |
meth |
WavLMForXVector.forward |
8 | 7 | 0 |
attr |
WavLMForXVector.wavlm |
1 | 0 | 0 |
attr |
WavLMForXVector.projector |
1 | 0 | 0 |
attr |
WavLMForXVector.tdnn |
1 | 0 | 0 |
attr |
WavLMForXVector.feature_extractor |
1 | 0 | 0 |
attr |
WavLMForXVector.classifier |
1 | 0 | 0 |
attr |
WavLMForXVector.objective |
1 | 0 | 0 |
attr |
WavLMForXVector.layer_weights |
1 | 0 | 0 |
meth |
WavLMForAudioFrameClassification.init |
2 | 0 | 0 |
meth |
WavLMForAudioFrameClassification.freeze_feature_encoder |
1 | 0 | 0 |
meth |
WavLMForAudioFrameClassification.freeze_base_model |
1 | 0 | 0 |
meth |
WavLMForAudioFrameClassification.forward |
8 | 7 | 0 |
attr |
WavLMForAudioFrameClassification.wavlm |
1 | 0 | 0 |
attr |
WavLMForAudioFrameClassification.classifier |
1 | 0 | 0 |
attr |
WavLMForAudioFrameClassification.num_labels |
1 | 0 | 0 |
attr |
WavLMForAudioFrameClassification.layer_weights |
1 | 0 | 0 |
meth |
WavLMForSequenceClassification.init |
2 | 0 | 0 |
meth |
WavLMForSequenceClassification.freeze_feature_encoder |
1 | 0 | 0 |
meth |
WavLMForSequenceClassification.freeze_base_model |
1 | 0 | 0 |
meth |
WavLMForSequenceClassification.forward |
8 | 7 | 0 |
attr |
WavLMForSequenceClassification.wavlm |
1 | 0 | 0 |
attr |
WavLMForSequenceClassification.projector |
1 | 0 | 0 |
attr |
WavLMForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
WavLMForSequenceClassification.layer_weights |
1 | 0 | 0 |
meth |
WavLMForCTC.init |
3 | 1 | 0 |
meth |
WavLMForCTC.tie_weights |
2 | 0 | 0 |
meth |
WavLMForCTC.freeze_feature_encoder |
1 | 0 | 0 |
meth |
WavLMForCTC.freeze_base_model |
1 | 0 | 0 |
meth |
WavLMForCTC.forward |
8 | 7 | 0 |
attr |
WavLMForCTC.wavlm |
1 | 0 | 0 |
attr |
WavLMForCTC.dropout |
1 | 0 | 0 |
attr |
WavLMForCTC.target_lang |
1 | 0 | 0 |
attr |
WavLMForCTC.lm_head |
1 | 0 | 0 |
transformers.models.wavlm.modular_wavlm (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
WavLMPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
WavLMPreTrainedModel._get_adapters |
1 | 0 | 0 |
meth |
WavLMPreTrainedModel.init_adapter_layers |
1 | 0 | 0 |
meth |
WavLMPreTrainedModel.load_adapter |
1 | 0 | 0 |
transformers.models.whisper.configuration_whisper (76 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
WhisperConfig.init |
40 | 0 | 0 |
attr |
WhisperConfig.vocab_size |
1 | 0 | 0 |
attr |
WhisperConfig.num_mel_bins |
1 | 0 | 0 |
attr |
WhisperConfig.d_model |
1 | 0 | 0 |
attr |
WhisperConfig.encoder_layers |
1 | 0 | 0 |
attr |
WhisperConfig.encoder_attention_heads |
1 | 0 | 0 |
attr |
WhisperConfig.decoder_layers |
1 | 0 | 0 |
attr |
WhisperConfig.decoder_attention_heads |
1 | 0 | 0 |
attr |
WhisperConfig.decoder_ffn_dim |
1 | 0 | 0 |
attr |
WhisperConfig.encoder_ffn_dim |
1 | 0 | 0 |
attr |
WhisperConfig.dropout |
1 | 0 | 0 |
attr |
WhisperConfig.attention_dropout |
1 | 0 | 0 |
attr |
WhisperConfig.activation_dropout |
1 | 0 | 0 |
attr |
WhisperConfig.activation_function |
1 | 0 | 0 |
attr |
WhisperConfig.init_std |
1 | 0 | 0 |
attr |
WhisperConfig.encoder_layerdrop |
1 | 0 | 0 |
attr |
WhisperConfig.decoder_layerdrop |
1 | 0 | 0 |
attr |
WhisperConfig.use_cache |
1 | 0 | 0 |
attr |
WhisperConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
WhisperConfig.scale_embedding |
1 | 0 | 0 |
attr |
WhisperConfig.max_source_positions |
1 | 0 | 0 |
attr |
WhisperConfig.max_target_positions |
1 | 0 | 0 |
attr |
WhisperConfig.classifier_proj_size |
1 | 0 | 0 |
attr |
WhisperConfig.use_weighted_layer_sum |
1 | 0 | 0 |
attr |
WhisperConfig.apply_spec_augment |
1 | 0 | 0 |
attr |
WhisperConfig.mask_time_prob |
1 | 0 | 0 |
attr |
WhisperConfig.mask_time_length |
1 | 0 | 0 |
attr |
WhisperConfig.mask_time_min_masks |
1 | 0 | 0 |
attr |
WhisperConfig.mask_feature_prob |
1 | 0 | 0 |
attr |
WhisperConfig.mask_feature_length |
1 | 0 | 0 |
attr |
WhisperConfig.mask_feature_min_masks |
1 | 0 | 0 |
attr |
WhisperConfig.median_filter_width |
1 | 0 | 0 |
attr |
WhisperConfig.pad_token_id |
1 | 0 | 0 |
attr |
WhisperConfig.bos_token_id |
1 | 0 | 0 |
attr |
WhisperConfig.eos_token_id |
1 | 0 | 0 |
attr |
WhisperConfig.decoder_start_token_id |
1 | 0 | 0 |
attr |
WhisperConfig.tie_word_embeddings |
1 | 0 | 0 |
transformers.models.whisper.english_normalizer (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
remove_symbols_and_diacritics |
3 | 1 | 0 |
meth |
EnglishSpellingNormalizer.init |
2 | 0 | 0 |
meth |
EnglishSpellingNormalizer.call |
2 | 1 | 0 |
attr |
EnglishSpellingNormalizer.mapping |
1 | 0 | 0 |
func |
remove_symbols |
2 | 1 | 0 |
meth |
EnglishTextNormalizer.init |
2 | 0 | 0 |
meth |
EnglishTextNormalizer.call |
2 | 1 | 0 |
attr |
EnglishTextNormalizer.ignore_patterns |
1 | 0 | 0 |
attr |
EnglishTextNormalizer.replacers |
1 | 0 | 0 |
attr |
EnglishTextNormalizer.standardize_numbers |
1 | 0 | 0 |
attr |
EnglishTextNormalizer.standardize_spellings |
1 | 0 | 0 |
meth |
EnglishNumberNormalizer.init |
1 | 0 | 0 |
meth |
EnglishNumberNormalizer.preprocess |
2 | 1 | 0 |
meth |
EnglishNumberNormalizer.postprocess |
2 | 1 | 0 |
meth |
EnglishNumberNormalizer.call |
2 | 1 | 0 |
attr |
EnglishNumberNormalizer.zeros |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.ones |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.ones_plural |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.ones_ordinal |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.ones_suffixed |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.tens |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.tens_plural |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.tens_ordinal |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.tens_suffixed |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.multipliers |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.multipliers_plural |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.multipliers_ordinal |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.multipliers_suffixed |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.decimals |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.preceding_prefixers |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.following_prefixers |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.prefixes |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.suffixers |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.specials |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.words |
1 | 0 | 0 |
attr |
EnglishNumberNormalizer.literal_words |
1 | 0 | 0 |
meth |
BasicTextNormalizer.init |
3 | 2 | 0 |
meth |
BasicTextNormalizer.call |
2 | 1 | 0 |
attr |
BasicTextNormalizer.clean |
1 | 0 | 0 |
attr |
BasicTextNormalizer.split_letters |
1 | 0 | 0 |
transformers.models.whisper.feature_extraction_whisper (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
WhisperFeatureExtractor.init |
10 | 0 | 0 |
meth |
WhisperFeatureExtractor.call |
12 | 11 | 0 |
attr |
WhisperFeatureExtractor.n_fft |
1 | 0 | 0 |
attr |
WhisperFeatureExtractor.hop_length |
1 | 0 | 0 |
attr |
WhisperFeatureExtractor.chunk_length |
1 | 0 | 0 |
attr |
WhisperFeatureExtractor.n_samples |
1 | 0 | 0 |
attr |
WhisperFeatureExtractor.nb_max_frames |
1 | 0 | 0 |
attr |
WhisperFeatureExtractor.sampling_rate |
1 | 0 | 0 |
attr |
WhisperFeatureExtractor.dither |
1 | 0 | 0 |
attr |
WhisperFeatureExtractor.mel_filters |
1 | 0 | 0 |
transformers.models.whisper.generation_whisper (177 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
WhisperGenerationMixin._extract_token_timestamps |
6 | 0 | 0 |
meth |
WhisperGenerationMixin.generate |
28 | 26 | 0 |
meth |
WhisperGenerationMixin.generate_with_fallback |
18 | 0 | 0 |
meth |
WhisperGenerationMixin._prepare_segments |
4 | 0 | 0 |
meth |
WhisperGenerationMixin._postprocess_outputs |
8 | 0 | 0 |
meth |
WhisperGenerationMixin._stack_split_outputs |
5 | 0 | 0 |
meth |
WhisperGenerationMixin._need_fallback |
8 | 0 | 0 |
meth |
WhisperGenerationMixin._expand_variables_for_generation |
8 | 0 | 0 |
meth |
WhisperGenerationMixin._setup_no_speech_detection |
5 | 0 | 0 |
meth |
WhisperGenerationMixin._retrieve_total_input_frames |
4 | 0 | 0 |
meth |
WhisperGenerationMixin._maybe_warn_unused_inputs |
7 | 0 | 0 |
meth |
WhisperGenerationMixin._set_return_outputs |
5 | 0 | 0 |
meth |
WhisperGenerationMixin._set_return_timestamps |
4 | 0 | 0 |
meth |
WhisperGenerationMixin._set_language_and_task |
5 | 0 | 0 |
meth |
WhisperGenerationMixin._retrieve_init_tokens |
7 | 0 | 0 |
meth |
WhisperGenerationMixin._check_decoder_input_ids |
2 | 0 | 0 |
meth |
WhisperGenerationMixin._set_num_frames |
5 | 0 | 0 |
meth |
WhisperGenerationMixin._set_thresholds_and_condition |
6 | 0 | 0 |
meth |
WhisperGenerationMixin._set_prompt_condition_type |
3 | 0 | 0 |
meth |
WhisperGenerationMixin._set_condition_on_prev_tokens |
3 | 0 | 0 |
meth |
WhisperGenerationMixin._retrieve_max_frames_and_seek |
5 | 0 | 0 |
meth |
WhisperGenerationMixin._retrieve_logit_processors |
6 | 0 | 0 |
meth |
WhisperGenerationMixin._maybe_reduce_batch |
6 | 0 | 0 |
meth |
WhisperGenerationMixin._get_input_segment |
7 | 0 | 0 |
meth |
WhisperGenerationMixin._prepare_decoder_input_ids |
13 | 0 | 0 |
meth |
WhisperGenerationMixin._set_max_new_tokens_and_length |
4 | 0 | 0 |
meth |
WhisperGenerationMixin._retrieve_compression_ratio |
3 | 0 | 0 |
meth |
WhisperGenerationMixin._retrieve_avg_logprobs |
4 | 0 | 0 |
meth |
WhisperGenerationMixin._retrieve_segment |
13 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.models.whisper.modeling_whisper (40 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
WhisperPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
WhisperPreTrainedModel._get_feat_extract_output_lengths |
2 | 1 | 0 |
meth |
WhisperForConditionalGeneration.init |
2 | 1 | 0 |
meth |
WhisperForConditionalGeneration.get_output_embeddings |
1 | 0 | 0 |
meth |
WhisperForConditionalGeneration.set_output_embeddings |
2 | 0 | 0 |
meth |
WhisperForConditionalGeneration.freeze_encoder |
1 | 0 | 0 |
meth |
WhisperForConditionalGeneration.forward |
16 | 15 | 0 |
attr |
WhisperForConditionalGeneration.model |
1 | 0 | 0 |
attr |
WhisperForConditionalGeneration.proj_out |
1 | 0 | 0 |
attr |
WhisperForConditionalGeneration.max_target_positions |
1 | 0 | 0 |
meth |
WhisperModel.init |
2 | 1 | 0 |
meth |
WhisperModel.get_input_embeddings |
1 | 0 | 0 |
meth |
WhisperModel.set_input_embeddings |
2 | 0 | 0 |
meth |
WhisperModel.freeze_encoder |
1 | 0 | 0 |
meth |
WhisperModel._mask_input_features |
3 | 2 | 0 |
meth |
WhisperModel.forward |
15 | 14 | 0 |
attr |
WhisperModel.encoder |
1 | 0 | 0 |
attr |
WhisperModel.decoder |
1 | 0 | 0 |
meth |
WhisperForAudioClassification.init |
2 | 0 | 0 |
meth |
WhisperForAudioClassification.freeze_encoder |
1 | 0 | 0 |
meth |
WhisperForAudioClassification.set_input_embeddings |
2 | 1 | 0 |
meth |
WhisperForAudioClassification.forward |
8 | 7 | 0 |
attr |
WhisperForAudioClassification.encoder |
1 | 0 | 0 |
attr |
WhisperForAudioClassification.projector |
1 | 0 | 0 |
attr |
WhisperForAudioClassification.classifier |
1 | 0 | 0 |
attr |
WhisperForAudioClassification.layer_weights |
1 | 0 | 0 |
meth |
WhisperForCausalLM.init |
2 | 0 | 0 |
meth |
WhisperForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
WhisperForCausalLM.set_output_embeddings |
2 | 0 | 0 |
meth |
WhisperForCausalLM.set_input_embeddings |
2 | 0 | 0 |
meth |
WhisperForCausalLM.forward |
13 | 12 | 0 |
attr |
WhisperForCausalLM.model |
1 | 0 | 0 |
attr |
WhisperForCausalLM.proj_out |
1 | 0 | 0 |
transformers.models.whisper.processing_whisper (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
WhisperProcessor.init |
3 | 0 | 0 |
meth |
WhisperProcessor.get_decoder_prompt_ids |
4 | 0 | 0 |
meth |
WhisperProcessor.call |
3 | 0 | 0 |
meth |
WhisperProcessor.get_prompt_ids |
3 | 1 | 0 |
transformers.models.whisper.tokenization_whisper (56 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
WhisperTokenizer.init |
12 | 1 | 0 |
meth |
WhisperTokenizer._decode_with_timestamps |
5 | 1 | 0 |
meth |
WhisperTokenizer._compute_offsets |
4 | 0 | 0 |
meth |
WhisperTokenizer.timestamp_ids |
2 | 0 | 0 |
meth |
WhisperTokenizer._preprocess_token_ids |
3 | 1 | 0 |
meth |
WhisperTokenizer._filter_timestamp_ids |
2 | 0 | 0 |
meth |
WhisperTokenizer.decode |
11 | 9 | 0 |
meth |
WhisperTokenizer._decode |
6 | 4 | 0 |
meth |
WhisperTokenizer.normalize |
2 | 0 | 0 |
meth |
WhisperTokenizer.basic_normalize |
3 | 0 | 0 |
meth |
WhisperTokenizer.set_prefix_tokens |
4 | 3 | 0 |
meth |
WhisperTokenizer.build_inputs_with_special_tokens |
3 | 1 | 0 |
meth |
WhisperTokenizer.get_decoder_prompt_ids |
4 | 0 | 0 |
meth |
WhisperTokenizer._decode_asr |
5 | 0 | 0 |
meth |
WhisperTokenizer.get_prompt_ids |
3 | 1 | 0 |
meth |
WhisperTokenizer._strip_prompt |
4 | 3 | 0 |
meth |
WhisperTokenizer._convert_to_list |
2 | 0 | 0 |
attr |
WhisperTokenizer.timestamp_pat |
1 | 0 | 0 |
attr |
WhisperTokenizer.language |
1 | 0 | 0 |
attr |
WhisperTokenizer.task |
1 | 0 | 0 |
attr |
WhisperTokenizer.predict_timestamps |
1 | 0 | 0 |
attr |
WhisperTokenizer.english_spelling_normalizer |
1 | 0 | 0 |
transformers.models.x_clip.configuration_x_clip (91 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XCLIPConfig.init |
12 | 0 | 0 |
attr |
XCLIPConfig.text_config |
1 | 0 | 0 |
attr |
XCLIPConfig.vision_config |
1 | 0 | 0 |
attr |
XCLIPConfig.projection_dim |
1 | 0 | 0 |
attr |
XCLIPConfig.prompt_layers |
1 | 0 | 0 |
attr |
XCLIPConfig.prompt_alpha |
1 | 0 | 0 |
attr |
XCLIPConfig.prompt_hidden_act |
1 | 0 | 0 |
attr |
XCLIPConfig.prompt_num_attention_heads |
1 | 0 | 0 |
attr |
XCLIPConfig.prompt_attention_dropout |
1 | 0 | 0 |
attr |
XCLIPConfig.prompt_projection_dropout |
1 | 0 | 0 |
attr |
XCLIPConfig.logit_scale_init_value |
1 | 0 | 0 |
attr |
XCLIPConfig.initializer_factor |
1 | 0 | 0 |
meth |
XCLIPTextConfig.init |
16 | 0 | 0 |
attr |
XCLIPTextConfig.pad_token_id |
1 | 0 | 0 |
attr |
XCLIPTextConfig.bos_token_id |
1 | 0 | 0 |
attr |
XCLIPTextConfig.eos_token_id |
1 | 0 | 0 |
attr |
XCLIPTextConfig.vocab_size |
1 | 0 | 0 |
attr |
XCLIPTextConfig.hidden_size |
1 | 0 | 0 |
attr |
XCLIPTextConfig.intermediate_size |
1 | 0 | 0 |
attr |
XCLIPTextConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
XCLIPTextConfig.num_attention_heads |
1 | 0 | 0 |
attr |
XCLIPTextConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
XCLIPTextConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
XCLIPTextConfig.hidden_act |
1 | 0 | 0 |
attr |
XCLIPTextConfig.initializer_range |
1 | 0 | 0 |
attr |
XCLIPTextConfig.initializer_factor |
1 | 0 | 0 |
attr |
XCLIPTextConfig.attention_dropout |
1 | 0 | 0 |
meth |
XCLIPVisionConfig.init |
20 | 0 | 0 |
attr |
XCLIPVisionConfig.hidden_size |
1 | 0 | 0 |
attr |
XCLIPVisionConfig.intermediate_size |
1 | 0 | 0 |
attr |
XCLIPVisionConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
XCLIPVisionConfig.num_attention_heads |
1 | 0 | 0 |
attr |
XCLIPVisionConfig.mit_hidden_size |
1 | 0 | 0 |
attr |
XCLIPVisionConfig.mit_intermediate_size |
1 | 0 | 0 |
attr |
XCLIPVisionConfig.mit_num_hidden_layers |
1 | 0 | 0 |
attr |
XCLIPVisionConfig.mit_num_attention_heads |
1 | 0 | 0 |
attr |
XCLIPVisionConfig.num_channels |
1 | 0 | 0 |
attr |
XCLIPVisionConfig.patch_size |
1 | 0 | 0 |
attr |
XCLIPVisionConfig.num_frames |
1 | 0 | 0 |
attr |
XCLIPVisionConfig.image_size |
1 | 0 | 0 |
attr |
XCLIPVisionConfig.initializer_range |
1 | 0 | 0 |
attr |
XCLIPVisionConfig.initializer_factor |
1 | 0 | 0 |
attr |
XCLIPVisionConfig.attention_dropout |
1 | 0 | 0 |
attr |
XCLIPVisionConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
XCLIPVisionConfig.hidden_act |
1 | 0 | 0 |
attr |
XCLIPVisionConfig.drop_path_rate |
1 | 0 | 0 |
transformers.models.x_clip.modeling_x_clip (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XCLIPPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
XCLIPTextModel.init |
2 | 1 | 0 |
meth |
XCLIPTextModel.set_input_embeddings |
2 | 0 | 0 |
meth |
XCLIPTextModel.forward |
8 | 7 | 0 |
attr |
XCLIPTextModel.text_model |
1 | 0 | 0 |
meth |
XCLIPVisionModel.init |
2 | 1 | 0 |
meth |
XCLIPVisionModel.forward |
6 | 5 | 0 |
attr |
XCLIPVisionModel.vision_model |
1 | 0 | 0 |
meth |
XCLIPModel.init |
2 | 1 | 0 |
meth |
XCLIPModel.forward |
11 | 10 | 0 |
attr |
XCLIPModel.projection_dim |
1 | 0 | 0 |
attr |
XCLIPModel.text_embed_dim |
1 | 0 | 0 |
attr |
XCLIPModel.vision_embed_dim |
1 | 0 | 0 |
attr |
XCLIPModel.text_model |
1 | 0 | 0 |
attr |
XCLIPModel.vision_model |
1 | 0 | 0 |
attr |
XCLIPModel.visual_projection |
1 | 0 | 0 |
attr |
XCLIPModel.text_projection |
1 | 0 | 0 |
attr |
XCLIPModel.logit_scale |
1 | 0 | 0 |
attr |
XCLIPModel.prompts_visual_layernorm |
1 | 0 | 0 |
attr |
XCLIPModel.prompts_visual_projection |
1 | 0 | 0 |
attr |
XCLIPModel.mit |
1 | 0 | 0 |
attr |
XCLIPModel.prompts_generator |
1 | 0 | 0 |
transformers.models.x_clip.processing_x_clip (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XCLIPProcessor.init |
4 | 0 | 0 |
attr |
XCLIPProcessor.video_processor |
1 | 0 | 0 |
transformers.models.xcodec.configuration_xcodec (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XcodecConfig.init |
14 | 10 | 0 |
attr |
XcodecConfig.acoustic_model_config |
1 | 0 | 0 |
attr |
XcodecConfig.semantic_model_config |
1 | 0 | 0 |
attr |
XcodecConfig.target_bandwidths |
1 | 0 | 0 |
attr |
XcodecConfig.sample_rate |
1 | 0 | 0 |
attr |
XcodecConfig.kernel_size |
1 | 0 | 0 |
attr |
XcodecConfig.channel_ratios |
1 | 0 | 0 |
attr |
XcodecConfig.strides |
1 | 0 | 0 |
attr |
XcodecConfig.block_dilations |
1 | 0 | 0 |
attr |
XcodecConfig.unit_kernel_size |
1 | 0 | 0 |
attr |
XcodecConfig.codebook_size |
1 | 0 | 0 |
attr |
XcodecConfig.initializer_range |
1 | 0 | 0 |
attr |
XcodecConfig.codebook_dim |
1 | 0 | 0 |
transformers.models.xcodec.modeling_xcodec (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XcodecPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
XcodecPreTrainedModel.apply_weight_norm |
1 | 0 | 0 |
meth |
XcodecPreTrainedModel.remove_weight_norm |
1 | 0 | 0 |
meth |
XcodecPreTrainedModel._get_conv1d_layers |
2 | 0 | 0 |
meth |
XcodecPreTrainedModel._get_conv1d_output_lengths |
3 | 0 | 0 |
meth |
XcodecModel.init |
2 | 0 | 0 |
meth |
XcodecModel._adjust_dac_decoder |
2 | 1 | 0 |
attr |
XcodecModel.config |
1 | 0 | 0 |
attr |
XcodecModel.pad |
1 | 0 | 0 |
attr |
XcodecModel.acoustic_encoder |
1 | 0 | 0 |
attr |
XcodecModel.acoustic_decoder |
1 | 0 | 0 |
attr |
XcodecModel.encoder_semantic |
1 | 0 | 0 |
attr |
XcodecModel.decoder_semantic |
1 | 0 | 0 |
attr |
XcodecModel.semantic_model |
1 | 0 | 0 |
attr |
XcodecModel.fc |
1 | 0 | 0 |
attr |
XcodecModel.fc1 |
1 | 0 | 0 |
attr |
XcodecModel.fc2 |
1 | 0 | 0 |
attr |
XcodecModel.quantizer |
1 | 0 | 0 |
transformers.models.xglm.configuration_xglm (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XGLMConfig.init |
22 | 0 | 0 |
attr |
XGLMConfig.add_cross_attention |
1 | 0 | 0 |
attr |
XGLMConfig.vocab_size |
1 | 0 | 0 |
attr |
XGLMConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
XGLMConfig.d_model |
1 | 0 | 0 |
attr |
XGLMConfig.ffn_dim |
1 | 0 | 0 |
attr |
XGLMConfig.num_layers |
1 | 0 | 0 |
attr |
XGLMConfig.attention_heads |
1 | 0 | 0 |
attr |
XGLMConfig.activation_function |
1 | 0 | 0 |
attr |
XGLMConfig.dropout |
1 | 0 | 0 |
attr |
XGLMConfig.attention_dropout |
1 | 0 | 0 |
attr |
XGLMConfig.activation_dropout |
1 | 0 | 0 |
attr |
XGLMConfig.layerdrop |
1 | 0 | 0 |
attr |
XGLMConfig.init_std |
1 | 0 | 0 |
attr |
XGLMConfig.scale_embedding |
1 | 0 | 0 |
attr |
XGLMConfig.use_cache |
1 | 0 | 0 |
attr |
XGLMConfig.pad_token_id |
1 | 0 | 0 |
attr |
XGLMConfig.bos_token_id |
1 | 0 | 0 |
attr |
XGLMConfig.eos_token_id |
1 | 0 | 0 |
attr |
XGLMConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
XGLMConfig.decoder_start_token_id |
1 | 0 | 0 |
transformers.models.xglm.modeling_xglm (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XGLMPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
XGLMModel.init |
2 | 1 | 0 |
meth |
XGLMModel.forward |
14 | 13 | 0 |
attr |
XGLMModel.dropout |
1 | 0 | 0 |
attr |
XGLMModel.layerdrop |
1 | 0 | 0 |
attr |
XGLMModel.padding_idx |
1 | 0 | 0 |
attr |
XGLMModel.max_target_positions |
1 | 0 | 0 |
attr |
XGLMModel.embed_tokens |
1 | 0 | 0 |
attr |
XGLMModel.embed_positions |
1 | 0 | 0 |
attr |
XGLMModel.layers |
1 | 0 | 0 |
attr |
XGLMModel.layer_norm |
1 | 0 | 0 |
attr |
XGLMModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
XGLMForCausalLM.init |
2 | 0 | 0 |
meth |
XGLMForCausalLM.forward |
16 | 15 | 0 |
attr |
XGLMForCausalLM.model |
1 | 0 | 0 |
attr |
XGLMForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.xglm.tokenization_xglm (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XGLMTokenizer.init |
10 | 8 | 0 |
attr |
XGLMTokenizer.num_madeup_words |
1 | 0 | 0 |
attr |
XGLMTokenizer.add_prefix_space |
1 | 0 | 0 |
transformers.models.xlm.configuration_xlm (67 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XLMConfig.init |
34 | 0 | 0 |
attr |
XLMConfig.vocab_size |
1 | 0 | 0 |
attr |
XLMConfig.emb_dim |
1 | 0 | 0 |
attr |
XLMConfig.n_layers |
1 | 0 | 0 |
attr |
XLMConfig.n_heads |
1 | 0 | 0 |
attr |
XLMConfig.dropout |
1 | 0 | 0 |
attr |
XLMConfig.attention_dropout |
1 | 0 | 0 |
attr |
XLMConfig.gelu_activation |
1 | 0 | 0 |
attr |
XLMConfig.sinusoidal_embeddings |
1 | 0 | 0 |
attr |
XLMConfig.causal |
1 | 0 | 0 |
attr |
XLMConfig.asm |
1 | 0 | 0 |
attr |
XLMConfig.n_langs |
1 | 0 | 0 |
attr |
XLMConfig.use_lang_emb |
1 | 0 | 0 |
attr |
XLMConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
XLMConfig.unk_index |
1 | 0 | 0 |
attr |
XLMConfig.mask_index |
1 | 0 | 0 |
attr |
XLMConfig.is_encoder |
1 | 0 | 0 |
attr |
XLMConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
XLMConfig.embed_init_std |
1 | 0 | 0 |
attr |
XLMConfig.init_std |
1 | 0 | 0 |
attr |
XLMConfig.summary_type |
1 | 0 | 0 |
attr |
XLMConfig.summary_use_proj |
1 | 0 | 0 |
attr |
XLMConfig.summary_activation |
1 | 0 | 0 |
attr |
XLMConfig.summary_proj_to_labels |
1 | 0 | 0 |
attr |
XLMConfig.summary_first_dropout |
1 | 0 | 0 |
attr |
XLMConfig.start_n_top |
1 | 0 | 0 |
attr |
XLMConfig.end_n_top |
1 | 0 | 0 |
attr |
XLMConfig.mask_token_id |
1 | 0 | 0 |
attr |
XLMConfig.lang_id |
1 | 0 | 0 |
attr |
XLMConfig.pad_token_id |
1 | 0 | 0 |
attr |
XLMConfig.bos_token_id |
1 | 0 | 0 |
attr |
XLMConfig.eos_token_id |
1 | 0 | 0 |
attr |
XLMConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
XLMConfig.n_words |
1 | 0 | 0 |
transformers.models.xlm.modeling_xlm (74 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XLMForQuestionAnsweringSimple.init |
2 | 0 | 0 |
meth |
XLMForQuestionAnsweringSimple.forward |
15 | 14 | 0 |
attr |
XLMForQuestionAnsweringSimple.transformer |
1 | 0 | 0 |
attr |
XLMForQuestionAnsweringSimple.qa_outputs |
1 | 0 | 0 |
meth |
XLMWithLMHeadModel.init |
2 | 0 | 0 |
meth |
XLMWithLMHeadModel.get_output_embeddings |
1 | 0 | 0 |
meth |
XLMWithLMHeadModel.set_output_embeddings |
2 | 0 | 0 |
meth |
XLMWithLMHeadModel.prepare_inputs_for_generation |
4 | 0 | 0 |
meth |
XLMWithLMHeadModel.forward |
16 | 15 | 0 |
attr |
XLMWithLMHeadModel.transformer |
1 | 0 | 0 |
attr |
XLMWithLMHeadModel.pred_layer |
1 | 0 | 0 |
meth |
XLMForSequenceClassification.init |
2 | 0 | 0 |
meth |
XLMForSequenceClassification.forward |
14 | 13 | 0 |
attr |
XLMForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
XLMForSequenceClassification.transformer |
1 | 0 | 0 |
attr |
XLMForSequenceClassification.sequence_summary |
1 | 0 | 0 |
meth |
XLMForMultipleChoice.init |
4 | 0 | 0 |
meth |
XLMForMultipleChoice.forward |
14 | 13 | 0 |
attr |
XLMForMultipleChoice.transformer |
1 | 0 | 0 |
attr |
XLMForMultipleChoice.sequence_summary |
1 | 0 | 0 |
attr |
XLMForMultipleChoice.logits_proj |
1 | 0 | 0 |
meth |
XLMForQuestionAnswering.init |
2 | 0 | 0 |
meth |
XLMForQuestionAnswering.forward |
18 | 17 | 0 |
attr |
XLMForQuestionAnswering.transformer |
1 | 0 | 0 |
attr |
XLMForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
XLMModel.init |
2 | 0 | 0 |
meth |
XLMModel.get_input_embeddings |
1 | 0 | 0 |
meth |
XLMModel.set_input_embeddings |
2 | 0 | 0 |
meth |
XLMModel.forward |
14 | 13 | 0 |
attr |
XLMModel.is_encoder |
1 | 0 | 0 |
attr |
XLMModel.is_decoder |
1 | 0 | 0 |
attr |
XLMModel.causal |
1 | 0 | 0 |
attr |
XLMModel.n_langs |
1 | 0 | 0 |
attr |
XLMModel.use_lang_emb |
1 | 0 | 0 |
attr |
XLMModel.n_words |
1 | 0 | 0 |
attr |
XLMModel.eos_index |
1 | 0 | 0 |
attr |
XLMModel.pad_index |
1 | 0 | 0 |
attr |
XLMModel.dim |
1 | 0 | 0 |
attr |
XLMModel.hidden_dim |
1 | 0 | 0 |
attr |
XLMModel.n_heads |
1 | 0 | 0 |
attr |
XLMModel.n_layers |
1 | 0 | 0 |
attr |
XLMModel.dropout |
1 | 0 | 0 |
attr |
XLMModel.attention_dropout |
1 | 0 | 0 |
attr |
XLMModel.position_embeddings |
1 | 0 | 0 |
attr |
XLMModel.embeddings |
1 | 0 | 0 |
attr |
XLMModel.layer_norm_emb |
1 | 0 | 0 |
attr |
XLMModel.attentions |
1 | 0 | 0 |
attr |
XLMModel.layer_norm1 |
1 | 0 | 0 |
attr |
XLMModel.ffns |
1 | 0 | 0 |
attr |
XLMModel.layer_norm2 |
1 | 0 | 0 |
attr |
XLMModel.lang_embeddings |
1 | 0 | 0 |
meth |
XLMPreTrainedModel._init_weights |
2 | 0 | 0 |
prop |
XLMPreTrainedModel.dummy_inputs |
1 | 0 | 0 |
meth |
XLMForTokenClassification.init |
2 | 0 | 0 |
meth |
XLMForTokenClassification.forward |
14 | 13 | 0 |
attr |
XLMForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
XLMForTokenClassification.transformer |
1 | 0 | 0 |
attr |
XLMForTokenClassification.dropout |
1 | 0 | 0 |
attr |
XLMForTokenClassification.classifier |
1 | 0 | 0 |
transformers.models.xlm.tokenization_xlm (56 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XLMTokenizer.init |
14 | 0 | 0 |
meth |
XLMTokenizer.moses_punct_norm |
3 | 0 | 0 |
meth |
XLMTokenizer.moses_tokenize |
3 | 0 | 0 |
meth |
XLMTokenizer.moses_pipeline |
3 | 0 | 0 |
meth |
XLMTokenizer.ja_tokenize |
2 | 0 | 0 |
meth |
XLMTokenizer.get_vocab |
1 | 0 | 0 |
meth |
XLMTokenizer.bpe |
2 | 0 | 0 |
meth |
XLMTokenizer._tokenize |
4 | 0 | 0 |
meth |
XLMTokenizer._convert_token_to_id |
2 | 0 | 0 |
meth |
XLMTokenizer._convert_id_to_token |
2 | 0 | 0 |
meth |
XLMTokenizer.convert_tokens_to_string |
2 | 0 | 0 |
meth |
XLMTokenizer.getstate |
1 | 0 | 0 |
meth |
XLMTokenizer.setstate |
2 | 0 | 0 |
prop |
XLMTokenizer.do_lower_case |
1 | 0 | 0 |
prop |
XLMTokenizer.vocab_size |
1 | 0 | 0 |
attr |
XLMTokenizer.sm |
1 | 0 | 0 |
attr |
XLMTokenizer.cache_moses_punct_normalizer |
1 | 0 | 0 |
attr |
XLMTokenizer.cache_moses_tokenizer |
1 | 0 | 0 |
attr |
XLMTokenizer.lang_with_custom_tokenizer |
1 | 0 | 0 |
attr |
XLMTokenizer.do_lowercase_and_remove_accent |
1 | 0 | 0 |
attr |
XLMTokenizer.lang2id |
1 | 0 | 0 |
attr |
XLMTokenizer.id2lang |
1 | 0 | 0 |
attr |
XLMTokenizer.ja_word_tokenizer |
1 | 0 | 0 |
attr |
XLMTokenizer.zh_word_tokenizer |
1 | 0 | 0 |
attr |
XLMTokenizer.decoder |
1 | 0 | 0 |
attr |
XLMTokenizer.bpe_ranks |
1 | 0 | 0 |
attr |
XLMTokenizer.cache |
1 | 0 | 0 |
attr |
XLMTokenizer.encoder |
1 | 0 | 0 |
transformers.models.xlm_roberta.configuration_xlm_roberta (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XLMRobertaConfig.init |
22 | 0 | 0 |
attr |
XLMRobertaConfig.pad_token_id |
1 | 0 | 0 |
attr |
XLMRobertaConfig.bos_token_id |
1 | 0 | 0 |
attr |
XLMRobertaConfig.eos_token_id |
1 | 0 | 0 |
attr |
XLMRobertaConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
XLMRobertaConfig.is_decoder |
1 | 0 | 0 |
attr |
XLMRobertaConfig.add_cross_attention |
1 | 0 | 0 |
attr |
XLMRobertaConfig.vocab_size |
1 | 0 | 0 |
attr |
XLMRobertaConfig.hidden_size |
1 | 0 | 0 |
attr |
XLMRobertaConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
XLMRobertaConfig.num_attention_heads |
1 | 0 | 0 |
attr |
XLMRobertaConfig.hidden_act |
1 | 0 | 0 |
attr |
XLMRobertaConfig.intermediate_size |
1 | 0 | 0 |
attr |
XLMRobertaConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
XLMRobertaConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
XLMRobertaConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
XLMRobertaConfig.type_vocab_size |
1 | 0 | 0 |
attr |
XLMRobertaConfig.initializer_range |
1 | 0 | 0 |
attr |
XLMRobertaConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
XLMRobertaConfig.use_cache |
1 | 0 | 0 |
attr |
XLMRobertaConfig.classifier_dropout |
1 | 0 | 0 |
transformers.models.xlm_roberta.modeling_xlm_roberta (56 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XLMRobertaPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
XLMRobertaForQuestionAnswering.init |
2 | 0 | 0 |
attr |
XLMRobertaForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
XLMRobertaForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
attr |
XLMRobertaForQuestionAnswering.roberta |
1 | 0 | 0 |
meth |
XLMRobertaForCausalLM.init |
2 | 0 | 0 |
meth |
XLMRobertaForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
XLMRobertaForCausalLM.set_output_embeddings |
2 | 0 | 0 |
attr |
XLMRobertaForCausalLM.lm_head |
1 | 0 | 0 |
attr |
XLMRobertaForCausalLM.roberta |
1 | 0 | 0 |
meth |
XLMRobertaForSequenceClassification.init |
2 | 0 | 0 |
attr |
XLMRobertaForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
XLMRobertaForSequenceClassification.config |
1 | 0 | 0 |
attr |
XLMRobertaForSequenceClassification.classifier |
1 | 0 | 0 |
attr |
XLMRobertaForSequenceClassification.roberta |
1 | 0 | 0 |
meth |
XLMRobertaForMaskedLM.init |
2 | 0 | 0 |
meth |
XLMRobertaForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
XLMRobertaForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
attr |
XLMRobertaForMaskedLM.lm_head |
1 | 0 | 0 |
attr |
XLMRobertaForMaskedLM.roberta |
1 | 0 | 0 |
meth |
XLMRobertaForTokenClassification.init |
2 | 0 | 0 |
attr |
XLMRobertaForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
XLMRobertaForTokenClassification.dropout |
1 | 0 | 0 |
attr |
XLMRobertaForTokenClassification.classifier |
1 | 0 | 0 |
attr |
XLMRobertaForTokenClassification.roberta |
1 | 0 | 0 |
meth |
XLMRobertaModel.init |
3 | 0 | 0 |
meth |
XLMRobertaModel.get_input_embeddings |
1 | 0 | 0 |
meth |
XLMRobertaModel.set_input_embeddings |
2 | 0 | 0 |
meth |
XLMRobertaModel._create_attention_masks |
7 | 0 | 0 |
attr |
XLMRobertaModel.config |
1 | 0 | 0 |
attr |
XLMRobertaModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
XLMRobertaModel.embeddings |
1 | 0 | 0 |
attr |
XLMRobertaModel.encoder |
1 | 0 | 0 |
attr |
XLMRobertaModel.pooler |
1 | 0 | 0 |
meth |
XLMRobertaForMultipleChoice.init |
2 | 0 | 0 |
attr |
XLMRobertaForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
XLMRobertaForMultipleChoice.classifier |
1 | 0 | 0 |
attr |
XLMRobertaForMultipleChoice.roberta |
1 | 0 | 0 |
transformers.models.xlm_roberta.modular_xlm_roberta (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XLMRobertaForQuestionAnswering.init |
2 | 0 | 0 |
attr |
XLMRobertaForQuestionAnswering.roberta |
1 | 0 | 0 |
meth |
XLMRobertaForCausalLM.init |
2 | 0 | 0 |
attr |
XLMRobertaForCausalLM.roberta |
1 | 0 | 0 |
meth |
XLMRobertaForSequenceClassification.init |
2 | 0 | 0 |
attr |
XLMRobertaForSequenceClassification.roberta |
1 | 0 | 0 |
meth |
XLMRobertaForMaskedLM.init |
2 | 0 | 0 |
attr |
XLMRobertaForMaskedLM.roberta |
1 | 0 | 0 |
meth |
XLMRobertaForTokenClassification.init |
2 | 0 | 0 |
attr |
XLMRobertaForTokenClassification.roberta |
1 | 0 | 0 |
meth |
XLMRobertaForMultipleChoice.init |
2 | 0 | 0 |
attr |
XLMRobertaForMultipleChoice.roberta |
1 | 0 | 0 |
transformers.models.xlm_roberta.tokenization_xlm_roberta (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XLMRobertaTokenizer.init |
12 | 10 | 0 |
attr |
XLMRobertaTokenizer.add_prefix_space |
1 | 0 | 0 |
transformers.models.xlm_roberta_xl.configuration_xlm_roberta_xl (42 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XLMRobertaXLConfig.init |
22 | 0 | 0 |
attr |
XLMRobertaXLConfig.is_decoder |
1 | 0 | 0 |
attr |
XLMRobertaXLConfig.add_cross_attention |
1 | 0 | 0 |
attr |
XLMRobertaXLConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
XLMRobertaXLConfig.pad_token_id |
1 | 0 | 0 |
attr |
XLMRobertaXLConfig.bos_token_id |
1 | 0 | 0 |
attr |
XLMRobertaXLConfig.eos_token_id |
1 | 0 | 0 |
attr |
XLMRobertaXLConfig.vocab_size |
1 | 0 | 0 |
attr |
XLMRobertaXLConfig.hidden_size |
1 | 0 | 0 |
attr |
XLMRobertaXLConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
XLMRobertaXLConfig.num_attention_heads |
1 | 0 | 0 |
attr |
XLMRobertaXLConfig.hidden_act |
1 | 0 | 0 |
attr |
XLMRobertaXLConfig.intermediate_size |
1 | 0 | 0 |
attr |
XLMRobertaXLConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
XLMRobertaXLConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
XLMRobertaXLConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
XLMRobertaXLConfig.type_vocab_size |
1 | 0 | 0 |
attr |
XLMRobertaXLConfig.initializer_range |
1 | 0 | 0 |
attr |
XLMRobertaXLConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
XLMRobertaXLConfig.use_cache |
1 | 0 | 0 |
attr |
XLMRobertaXLConfig.classifier_dropout |
1 | 0 | 0 |
transformers.models.xlm_roberta_xl.modeling_xlm_roberta_xl (56 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XLMRobertaXLPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
XLMRobertaXLForTokenClassification.init |
2 | 0 | 0 |
attr |
XLMRobertaXLForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
XLMRobertaXLForTokenClassification.roberta |
1 | 0 | 0 |
attr |
XLMRobertaXLForTokenClassification.dropout |
1 | 0 | 0 |
attr |
XLMRobertaXLForTokenClassification.classifier |
1 | 0 | 0 |
meth |
XLMRobertaXLForSequenceClassification.init |
2 | 0 | 0 |
attr |
XLMRobertaXLForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
XLMRobertaXLForSequenceClassification.config |
1 | 0 | 0 |
attr |
XLMRobertaXLForSequenceClassification.roberta |
1 | 0 | 0 |
attr |
XLMRobertaXLForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
XLMRobertaXLForQuestionAnswering.init |
2 | 0 | 0 |
attr |
XLMRobertaXLForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
XLMRobertaXLForQuestionAnswering.roberta |
1 | 0 | 0 |
attr |
XLMRobertaXLForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
XLMRobertaXLForCausalLM.init |
2 | 0 | 0 |
meth |
XLMRobertaXLForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
XLMRobertaXLForCausalLM.set_output_embeddings |
2 | 0 | 0 |
attr |
XLMRobertaXLForCausalLM.roberta |
1 | 0 | 0 |
attr |
XLMRobertaXLForCausalLM.lm_head |
1 | 0 | 0 |
meth |
XLMRobertaXLModel.init |
3 | 0 | 0 |
meth |
XLMRobertaXLModel.get_input_embeddings |
1 | 0 | 0 |
meth |
XLMRobertaXLModel.set_input_embeddings |
2 | 0 | 0 |
meth |
XLMRobertaXLModel._create_attention_masks |
7 | 0 | 0 |
attr |
XLMRobertaXLModel.config |
1 | 0 | 0 |
attr |
XLMRobertaXLModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
XLMRobertaXLModel.embeddings |
1 | 0 | 0 |
attr |
XLMRobertaXLModel.encoder |
1 | 0 | 0 |
attr |
XLMRobertaXLModel.pooler |
1 | 0 | 0 |
meth |
XLMRobertaXLForMultipleChoice.init |
2 | 0 | 0 |
attr |
XLMRobertaXLForMultipleChoice.roberta |
1 | 0 | 0 |
attr |
XLMRobertaXLForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
XLMRobertaXLForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
XLMRobertaXLForMaskedLM.init |
2 | 0 | 0 |
meth |
XLMRobertaXLForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
XLMRobertaXLForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
attr |
XLMRobertaXLForMaskedLM.roberta |
1 | 0 | 0 |
attr |
XLMRobertaXLForMaskedLM.lm_head |
1 | 0 | 0 |
transformers.models.xlm_roberta_xl.modular_xlm_roberta_xl (36 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XLMRobertaXLForTokenClassification.init |
2 | 0 | 0 |
attr |
XLMRobertaXLForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
XLMRobertaXLForTokenClassification.roberta |
1 | 0 | 0 |
attr |
XLMRobertaXLForTokenClassification.dropout |
1 | 0 | 0 |
attr |
XLMRobertaXLForTokenClassification.classifier |
1 | 0 | 0 |
meth |
XLMRobertaXLForSequenceClassification.init |
2 | 0 | 0 |
attr |
XLMRobertaXLForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
XLMRobertaXLForSequenceClassification.config |
1 | 0 | 0 |
attr |
XLMRobertaXLForSequenceClassification.roberta |
1 | 0 | 0 |
attr |
XLMRobertaXLForSequenceClassification.classifier |
1 | 0 | 0 |
meth |
XLMRobertaXLForQuestionAnswering.init |
2 | 0 | 0 |
attr |
XLMRobertaXLForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
XLMRobertaXLForQuestionAnswering.roberta |
1 | 0 | 0 |
attr |
XLMRobertaXLForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
XLMRobertaXLForCausalLM.init |
2 | 0 | 0 |
meth |
XLMRobertaXLForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
XLMRobertaXLForCausalLM.set_output_embeddings |
2 | 0 | 0 |
attr |
XLMRobertaXLForCausalLM.roberta |
1 | 0 | 0 |
attr |
XLMRobertaXLForCausalLM.lm_head |
1 | 0 | 0 |
meth |
XLMRobertaXLForMultipleChoice.init |
2 | 0 | 0 |
attr |
XLMRobertaXLForMultipleChoice.roberta |
1 | 0 | 0 |
attr |
XLMRobertaXLForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
XLMRobertaXLForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
XLMRobertaXLForMaskedLM.init |
2 | 0 | 0 |
meth |
XLMRobertaXLForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
XLMRobertaXLForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
attr |
XLMRobertaXLForMaskedLM.roberta |
1 | 0 | 0 |
attr |
XLMRobertaXLForMaskedLM.lm_head |
1 | 0 | 0 |
transformers.models.xlnet.configuration_xlnet (59 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XLNetConfig.init |
29 | 0 | 0 |
prop |
XLNetConfig.max_position_embeddings |
2 | 0 | 0 |
attr |
XLNetConfig.vocab_size |
1 | 0 | 0 |
attr |
XLNetConfig.d_model |
1 | 0 | 0 |
attr |
XLNetConfig.n_layer |
1 | 0 | 0 |
attr |
XLNetConfig.n_head |
1 | 0 | 0 |
attr |
XLNetConfig.d_head |
1 | 0 | 0 |
attr |
XLNetConfig.ff_activation |
1 | 0 | 0 |
attr |
XLNetConfig.d_inner |
1 | 0 | 0 |
attr |
XLNetConfig.attn_type |
1 | 0 | 0 |
attr |
XLNetConfig.initializer_range |
1 | 0 | 0 |
attr |
XLNetConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
XLNetConfig.dropout |
1 | 0 | 0 |
attr |
XLNetConfig.mem_len |
1 | 0 | 0 |
attr |
XLNetConfig.reuse_len |
1 | 0 | 0 |
attr |
XLNetConfig.bi_data |
1 | 0 | 0 |
attr |
XLNetConfig.clamp_len |
1 | 0 | 0 |
attr |
XLNetConfig.same_length |
1 | 0 | 0 |
attr |
XLNetConfig.summary_type |
1 | 0 | 0 |
attr |
XLNetConfig.summary_use_proj |
1 | 0 | 0 |
attr |
XLNetConfig.summary_activation |
1 | 0 | 0 |
attr |
XLNetConfig.summary_last_dropout |
1 | 0 | 0 |
attr |
XLNetConfig.start_n_top |
1 | 0 | 0 |
attr |
XLNetConfig.end_n_top |
1 | 0 | 0 |
attr |
XLNetConfig.bos_token_id |
1 | 0 | 0 |
attr |
XLNetConfig.pad_token_id |
1 | 0 | 0 |
attr |
XLNetConfig.eos_token_id |
1 | 0 | 0 |
attr |
XLNetConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
XLNetConfig.use_mems_eval |
1 | 0 | 0 |
attr |
XLNetConfig.use_mems_train |
1 | 0 | 0 |
transformers.models.xlnet.modeling_xlnet (84 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XLNetForQuestionAnsweringSimple.init |
2 | 0 | 0 |
meth |
XLNetForQuestionAnsweringSimple.forward |
16 | 15 | 0 |
attr |
XLNetForQuestionAnsweringSimple.num_labels |
1 | 0 | 0 |
attr |
XLNetForQuestionAnsweringSimple.transformer |
1 | 0 | 0 |
attr |
XLNetForQuestionAnsweringSimple.qa_outputs |
1 | 0 | 0 |
meth |
XLNetForSequenceClassification.init |
2 | 0 | 0 |
meth |
XLNetForSequenceClassification.forward |
15 | 14 | 0 |
attr |
XLNetForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
XLNetForSequenceClassification.transformer |
1 | 0 | 0 |
attr |
XLNetForSequenceClassification.sequence_summary |
1 | 0 | 0 |
attr |
XLNetForSequenceClassification.logits_proj |
1 | 0 | 0 |
meth |
XLNetModel.init |
2 | 0 | 0 |
meth |
XLNetModel.get_input_embeddings |
1 | 0 | 0 |
meth |
XLNetModel.set_input_embeddings |
2 | 0 | 0 |
meth |
XLNetModel.create_mask |
3 | 0 | 0 |
meth |
XLNetModel.cache_mem |
3 | 0 | 0 |
meth |
XLNetModel.positional_embedding |
4 | 0 | 0 |
meth |
XLNetModel.relative_positional_encoding |
4 | 0 | 0 |
meth |
XLNetModel.forward |
14 | 13 | 0 |
attr |
XLNetModel.mem_len |
1 | 0 | 0 |
attr |
XLNetModel.reuse_len |
1 | 0 | 0 |
attr |
XLNetModel.d_model |
1 | 0 | 0 |
attr |
XLNetModel.same_length |
1 | 0 | 0 |
attr |
XLNetModel.attn_type |
1 | 0 | 0 |
attr |
XLNetModel.bi_data |
1 | 0 | 0 |
attr |
XLNetModel.clamp_len |
1 | 0 | 0 |
attr |
XLNetModel.n_layer |
1 | 0 | 0 |
attr |
XLNetModel.word_embedding |
1 | 0 | 0 |
attr |
XLNetModel.mask_emb |
1 | 0 | 0 |
attr |
XLNetModel.layer |
1 | 0 | 0 |
attr |
XLNetModel.dropout |
1 | 0 | 0 |
meth |
XLNetLMHeadModel.init |
2 | 0 | 0 |
meth |
XLNetLMHeadModel.get_output_embeddings |
1 | 0 | 0 |
meth |
XLNetLMHeadModel.set_output_embeddings |
2 | 0 | 0 |
meth |
XLNetLMHeadModel.prepare_inputs_for_generation |
6 | 0 | 0 |
meth |
XLNetLMHeadModel.forward |
16 | 15 | 0 |
attr |
XLNetLMHeadModel.attn_type |
1 | 0 | 0 |
attr |
XLNetLMHeadModel.same_length |
1 | 0 | 0 |
attr |
XLNetLMHeadModel.transformer |
1 | 0 | 0 |
attr |
XLNetLMHeadModel.lm_loss |
1 | 0 | 0 |
meth |
XLNetForMultipleChoice.init |
2 | 0 | 0 |
meth |
XLNetForMultipleChoice.forward |
15 | 14 | 0 |
attr |
XLNetForMultipleChoice.transformer |
1 | 0 | 0 |
attr |
XLNetForMultipleChoice.sequence_summary |
1 | 0 | 0 |
attr |
XLNetForMultipleChoice.logits_proj |
1 | 0 | 0 |
meth |
XLNetPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
XLNetForQuestionAnswering.init |
2 | 0 | 0 |
meth |
XLNetForQuestionAnswering.forward |
19 | 18 | 0 |
attr |
XLNetForQuestionAnswering.start_n_top |
1 | 0 | 0 |
attr |
XLNetForQuestionAnswering.end_n_top |
1 | 0 | 0 |
attr |
XLNetForQuestionAnswering.transformer |
1 | 0 | 0 |
attr |
XLNetForQuestionAnswering.start_logits |
1 | 0 | 0 |
attr |
XLNetForQuestionAnswering.end_logits |
1 | 0 | 0 |
attr |
XLNetForQuestionAnswering.answer_class |
1 | 0 | 0 |
meth |
XLNetForTokenClassification.init |
2 | 0 | 0 |
meth |
XLNetForTokenClassification.forward |
15 | 14 | 0 |
attr |
XLNetForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
XLNetForTokenClassification.transformer |
1 | 0 | 0 |
attr |
XLNetForTokenClassification.classifier |
1 | 0 | 0 |
transformers.models.xlnet.tokenization_xlnet (16 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XLNetTokenizer.init |
15 | 2 | 0 |
attr |
XLNetTokenizer.do_lower_case |
1 | 0 | 0 |
attr |
XLNetTokenizer.remove_space |
1 | 0 | 0 |
attr |
XLNetTokenizer.keep_accents |
1 | 0 | 0 |
transformers.models.xlstm.configuration_xlstm (39 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
xLSTMConfig.init |
34 | 32 | 0 |
meth |
xLSTMConfig.to_xlstm_block_config |
1 | 0 | 0 |
prop |
xLSTMConfig.qk_dim |
1 | 0 | 0 |
prop |
xLSTMConfig.v_dim |
1 | 0 | 0 |
prop |
xLSTMConfig.qk_head_dim |
1 | 0 | 0 |
prop |
xLSTMConfig.v_head_dim |
1 | 0 | 0 |
attr |
xLSTMConfig.vocab_size |
1 | 0 | 0 |
attr |
xLSTMConfig.hidden_size |
1 | 0 | 0 |
attr |
xLSTMConfig.embedding_dim |
1 | 0 | 0 |
attr |
xLSTMConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
xLSTMConfig.num_blocks |
1 | 0 | 0 |
attr |
xLSTMConfig.num_heads |
1 | 0 | 0 |
attr |
xLSTMConfig.use_bias |
1 | 0 | 0 |
attr |
xLSTMConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
xLSTMConfig.add_out_norm |
1 | 0 | 0 |
attr |
xLSTMConfig.norm_eps |
1 | 0 | 0 |
attr |
xLSTMConfig.norm_reduction_force_float32 |
1 | 0 | 0 |
attr |
xLSTMConfig.qk_dim_factor |
1 | 0 | 0 |
attr |
xLSTMConfig.v_dim_factor |
1 | 0 | 0 |
attr |
xLSTMConfig.chunkwise_kernel |
1 | 0 | 0 |
attr |
xLSTMConfig.sequence_kernel |
1 | 0 | 0 |
attr |
xLSTMConfig.step_kernel |
1 | 0 | 0 |
attr |
xLSTMConfig.mode |
1 | 0 | 0 |
attr |
xLSTMConfig.chunk_size |
1 | 0 | 0 |
attr |
xLSTMConfig.return_last_states |
1 | 0 | 0 |
attr |
xLSTMConfig.autocast_kernel_dtype |
1 | 0 | 0 |
attr |
xLSTMConfig.eps |
1 | 0 | 0 |
attr |
xLSTMConfig.inference_state_dtype |
1 | 0 | 0 |
attr |
xLSTMConfig.ffn_proj_factor |
1 | 0 | 0 |
attr |
xLSTMConfig.ffn_round_up_to_multiple_of |
1 | 0 | 0 |
attr |
xLSTMConfig.gate_soft_cap |
1 | 0 | 0 |
attr |
xLSTMConfig.output_logit_soft_cap |
1 | 0 | 0 |
attr |
xLSTMConfig.weight_mode |
1 | 0 | 0 |
attr |
xLSTMConfig.use_cache |
1 | 0 | 0 |
attr |
xLSTMConfig.pad_token_id |
1 | 0 | 0 |
attr |
xLSTMConfig.bos_token_id |
1 | 0 | 0 |
attr |
xLSTMConfig.eos_token_id |
1 | 0 | 0 |
attr |
xLSTMConfig.max_inference_chunksize |
1 | 0 | 0 |
transformers.models.xlstm.modeling_xlstm (25 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
xLSTMForCausalLM.init |
2 | 0 | 0 |
meth |
xLSTMForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
xLSTMForCausalLM.set_output_embeddings |
2 | 0 | 0 |
meth |
xLSTMForCausalLM.get_input_embeddings |
1 | 0 | 0 |
meth |
xLSTMForCausalLM.set_input_embeddings |
2 | 0 | 0 |
meth |
xLSTMForCausalLM.forward |
8 | 7 | 0 |
attr |
xLSTMForCausalLM.backbone |
1 | 0 | 0 |
attr |
xLSTMForCausalLM.lm_head |
1 | 0 | 0 |
meth |
xLSTMPreTrainedModel._module_name_map |
2 | 0 | 0 |
meth |
xLSTMPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
xLSTMModel.init |
2 | 0 | 0 |
meth |
xLSTMModel.get_input_embeddings |
1 | 0 | 0 |
meth |
xLSTMModel.set_input_embeddings |
2 | 0 | 0 |
meth |
xLSTMModel.forward |
7 | 6 | 0 |
attr |
xLSTMModel.embeddings |
1 | 0 | 0 |
attr |
xLSTMModel.blocks |
1 | 0 | 0 |
attr |
xLSTMModel.out_norm |
1 | 0 | 0 |
attr |
xLSTMModel.gradient_checkpointing |
1 | 0 | 0 |
transformers.models.xmod.configuration_xmod (56 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XmodConfig.init |
29 | 0 | 0 |
attr |
XmodConfig.pad_token_id |
1 | 0 | 0 |
attr |
XmodConfig.bos_token_id |
1 | 0 | 0 |
attr |
XmodConfig.eos_token_id |
1 | 0 | 0 |
attr |
XmodConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
XmodConfig.is_decoder |
1 | 0 | 0 |
attr |
XmodConfig.add_cross_attention |
1 | 0 | 0 |
attr |
XmodConfig.vocab_size |
1 | 0 | 0 |
attr |
XmodConfig.hidden_size |
1 | 0 | 0 |
attr |
XmodConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
XmodConfig.num_attention_heads |
1 | 0 | 0 |
attr |
XmodConfig.hidden_act |
1 | 0 | 0 |
attr |
XmodConfig.intermediate_size |
1 | 0 | 0 |
attr |
XmodConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
XmodConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
XmodConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
XmodConfig.type_vocab_size |
1 | 0 | 0 |
attr |
XmodConfig.initializer_range |
1 | 0 | 0 |
attr |
XmodConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
XmodConfig.use_cache |
1 | 0 | 0 |
attr |
XmodConfig.classifier_dropout |
1 | 0 | 0 |
attr |
XmodConfig.pre_norm |
1 | 0 | 0 |
attr |
XmodConfig.adapter_reduction_factor |
1 | 0 | 0 |
attr |
XmodConfig.adapter_layer_norm |
1 | 0 | 0 |
attr |
XmodConfig.adapter_reuse_layer_norm |
1 | 0 | 0 |
attr |
XmodConfig.ln_before_adapter |
1 | 0 | 0 |
attr |
XmodConfig.languages |
1 | 0 | 0 |
attr |
XmodConfig.default_language |
1 | 0 | 0 |
transformers.models.xmod.modeling_xmod (58 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
XmodModel.init |
3 | 0 | 0 |
meth |
XmodModel.get_input_embeddings |
1 | 0 | 0 |
meth |
XmodModel.set_input_embeddings |
2 | 0 | 0 |
meth |
XmodModel._create_attention_masks |
7 | 0 | 0 |
attr |
XmodModel.config |
1 | 0 | 0 |
attr |
XmodModel.gradient_checkpointing |
1 | 0 | 0 |
attr |
XmodModel.embeddings |
1 | 0 | 0 |
attr |
XmodModel.encoder |
1 | 0 | 0 |
attr |
XmodModel.pooler |
1 | 0 | 0 |
meth |
XmodPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
XmodPreTrainedModel.set_default_language |
2 | 1 | 0 |
meth |
XmodPreTrainedModel.freeze_embeddings_and_language_adapters |
1 | 0 | 0 |
meth |
XmodForTokenClassification.init |
2 | 0 | 0 |
attr |
XmodForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
XmodForTokenClassification.roberta |
1 | 0 | 0 |
attr |
XmodForTokenClassification.dropout |
1 | 0 | 0 |
attr |
XmodForTokenClassification.classifier |
1 | 0 | 0 |
meth |
XmodForMultipleChoice.init |
2 | 0 | 0 |
attr |
XmodForMultipleChoice.roberta |
1 | 0 | 0 |
attr |
XmodForMultipleChoice.dropout |
1 | 0 | 0 |
attr |
XmodForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
XmodForCausalLM.init |
2 | 0 | 0 |
meth |
XmodForCausalLM.get_output_embeddings |
1 | 0 | 0 |
meth |
XmodForCausalLM.set_output_embeddings |
2 | 0 | 0 |
attr |
XmodForCausalLM.roberta |
1 | 0 | 0 |
attr |
XmodForCausalLM.lm_head |
1 | 0 | 0 |
meth |
XmodForQuestionAnswering.init |
2 | 0 | 0 |
attr |
XmodForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
XmodForQuestionAnswering.roberta |
1 | 0 | 0 |
attr |
XmodForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
XmodForMaskedLM.init |
2 | 0 | 0 |
meth |
XmodForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
XmodForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
attr |
XmodForMaskedLM.roberta |
1 | 0 | 0 |
attr |
XmodForMaskedLM.lm_head |
1 | 0 | 0 |
meth |
XmodForSequenceClassification.init |
2 | 0 | 0 |
attr |
XmodForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
XmodForSequenceClassification.config |
1 | 0 | 0 |
attr |
XmodForSequenceClassification.roberta |
1 | 0 | 0 |
attr |
XmodForSequenceClassification.classifier |
1 | 0 | 0 |
transformers.models.yolos.configuration_yolos (46 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
YolosConfig.init |
24 | 0 | 0 |
attr |
YolosConfig.hidden_size |
1 | 0 | 0 |
attr |
YolosConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
YolosConfig.num_attention_heads |
1 | 0 | 0 |
attr |
YolosConfig.intermediate_size |
1 | 0 | 0 |
attr |
YolosConfig.hidden_act |
1 | 0 | 0 |
attr |
YolosConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
YolosConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
YolosConfig.initializer_range |
1 | 0 | 0 |
attr |
YolosConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
YolosConfig.image_size |
1 | 0 | 0 |
attr |
YolosConfig.patch_size |
1 | 0 | 0 |
attr |
YolosConfig.num_channels |
1 | 0 | 0 |
attr |
YolosConfig.qkv_bias |
1 | 0 | 0 |
attr |
YolosConfig.num_detection_tokens |
1 | 0 | 0 |
attr |
YolosConfig.use_mid_position_embeddings |
1 | 0 | 0 |
attr |
YolosConfig.auxiliary_loss |
1 | 0 | 0 |
attr |
YolosConfig.class_cost |
1 | 0 | 0 |
attr |
YolosConfig.bbox_cost |
1 | 0 | 0 |
attr |
YolosConfig.giou_cost |
1 | 0 | 0 |
attr |
YolosConfig.bbox_loss_coefficient |
1 | 0 | 0 |
attr |
YolosConfig.giou_loss_coefficient |
1 | 0 | 0 |
attr |
YolosConfig.eos_coefficient |
1 | 0 | 0 |
transformers.models.yolos.image_processing_yolos (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
YolosImageProcessor.init |
14 | 13 | 0 |
meth |
YolosImageProcessor.resize |
7 | 6 | 0 |
meth |
YolosImageProcessor.resize_annotation |
5 | 2 | 0 |
meth |
YolosImageProcessor._update_annotation_for_padded_image |
6 | 4 | 0 |
meth |
YolosImageProcessor.preprocess |
21 | 19 | 0 |
meth |
YolosImageProcessor.post_process_object_detection |
4 | 2 | 0 |
attr |
YolosImageProcessor.format |
1 | 0 | 0 |
attr |
YolosImageProcessor.do_resize |
1 | 0 | 0 |
attr |
YolosImageProcessor.size |
1 | 0 | 0 |
attr |
YolosImageProcessor.resample |
1 | 0 | 0 |
attr |
YolosImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
YolosImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
YolosImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
YolosImageProcessor.do_convert_annotations |
1 | 0 | 0 |
attr |
YolosImageProcessor.image_mean |
1 | 0 | 0 |
attr |
YolosImageProcessor.image_std |
1 | 0 | 0 |
attr |
YolosImageProcessor.do_pad |
1 | 0 | 0 |
attr |
YolosImageProcessor.pad_size |
1 | 0 | 0 |
transformers.models.yolos.image_processing_yolos_fast (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
YolosImageProcessorFast.resize |
5 | 4 | 0 |
meth |
YolosImageProcessorFast.resize_annotation |
6 | 5 | 0 |
meth |
YolosImageProcessorFast._update_annotation_for_padded_image |
6 | 4 | 0 |
meth |
YolosImageProcessorFast.pad |
6 | 5 | 0 |
meth |
YolosImageProcessorFast._preprocess |
19 | 18 | 0 |
meth |
YolosImageProcessorFast.post_process_object_detection |
4 | 2 | 0 |
attr |
YolosImageProcessorFast.size |
1 | 0 | 0 |
attr |
YolosImageProcessorFast.do_convert_annotations |
1 | 0 | 0 |
transformers.models.yolos.modeling_yolos (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
YolosModel.init |
3 | 2 | 0 |
attr |
YolosModel.embeddings |
1 | 0 | 0 |
attr |
YolosModel.encoder |
1 | 0 | 0 |
attr |
YolosModel.layernorm |
1 | 0 | 0 |
attr |
YolosModel.pooler |
1 | 0 | 0 |
meth |
YolosForObjectDetection.init |
2 | 1 | 0 |
meth |
YolosForObjectDetection._set_aux_loss |
3 | 0 | 0 |
attr |
YolosForObjectDetection.vit |
1 | 0 | 0 |
attr |
YolosForObjectDetection.class_labels_classifier |
1 | 0 | 0 |
attr |
YolosForObjectDetection.bbox_predictor |
1 | 0 | 0 |
transformers.models.yolos.modular_yolos (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
YolosImageProcessorFast.post_process_object_detection |
4 | 2 | 0 |
meth |
YolosImageProcessorFast.post_process_instance_segmentation |
1 | 0 | 0 |
meth |
YolosImageProcessorFast.post_process_semantic_segmentation |
1 | 0 | 0 |
meth |
YolosImageProcessorFast.post_process_panoptic_segmentation |
1 | 0 | 0 |
transformers.models.yoso.configuration_yoso (48 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
YosoConfig.init |
25 | 0 | 0 |
attr |
YosoConfig.pad_token_id |
1 | 0 | 0 |
attr |
YosoConfig.bos_token_id |
1 | 0 | 0 |
attr |
YosoConfig.eos_token_id |
1 | 0 | 0 |
attr |
YosoConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
YosoConfig.add_cross_attention |
1 | 0 | 0 |
attr |
YosoConfig.vocab_size |
1 | 0 | 0 |
attr |
YosoConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
YosoConfig.hidden_size |
1 | 0 | 0 |
attr |
YosoConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
YosoConfig.num_attention_heads |
1 | 0 | 0 |
attr |
YosoConfig.intermediate_size |
1 | 0 | 0 |
attr |
YosoConfig.hidden_act |
1 | 0 | 0 |
attr |
YosoConfig.hidden_dropout_prob |
1 | 0 | 0 |
attr |
YosoConfig.attention_probs_dropout_prob |
1 | 0 | 0 |
attr |
YosoConfig.initializer_range |
1 | 0 | 0 |
attr |
YosoConfig.type_vocab_size |
1 | 0 | 0 |
attr |
YosoConfig.layer_norm_eps |
1 | 0 | 0 |
attr |
YosoConfig.use_expectation |
1 | 0 | 0 |
attr |
YosoConfig.hash_code_len |
1 | 0 | 0 |
attr |
YosoConfig.num_hash |
1 | 0 | 0 |
attr |
YosoConfig.conv_window |
1 | 0 | 0 |
attr |
YosoConfig.use_fast_hash |
1 | 0 | 0 |
attr |
YosoConfig.lsh_backward |
1 | 0 | 0 |
transformers.models.yoso.modeling_yoso (56 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
YosoPreTrainedModel._init_weights |
2 | 1 | 0 |
meth |
YosoForMultipleChoice.init |
2 | 0 | 0 |
meth |
YosoForMultipleChoice.forward |
11 | 10 | 0 |
attr |
YosoForMultipleChoice.yoso |
1 | 0 | 0 |
attr |
YosoForMultipleChoice.pre_classifier |
1 | 0 | 0 |
attr |
YosoForMultipleChoice.classifier |
1 | 0 | 0 |
meth |
YosoForMaskedLM.init |
2 | 0 | 0 |
meth |
YosoForMaskedLM.get_output_embeddings |
1 | 0 | 0 |
meth |
YosoForMaskedLM.set_output_embeddings |
2 | 0 | 0 |
meth |
YosoForMaskedLM.forward |
11 | 10 | 0 |
attr |
YosoForMaskedLM.yoso |
1 | 0 | 0 |
attr |
YosoForMaskedLM.cls |
1 | 0 | 0 |
meth |
YosoForTokenClassification.init |
2 | 0 | 0 |
meth |
YosoForTokenClassification.forward |
11 | 10 | 0 |
attr |
YosoForTokenClassification.num_labels |
1 | 0 | 0 |
attr |
YosoForTokenClassification.yoso |
1 | 0 | 0 |
attr |
YosoForTokenClassification.dropout |
1 | 0 | 0 |
attr |
YosoForTokenClassification.classifier |
1 | 0 | 0 |
meth |
YosoLayer.init |
2 | 0 | 0 |
meth |
YosoLayer.forward |
4 | 0 | 0 |
meth |
YosoLayer.feed_forward_chunk |
2 | 0 | 0 |
attr |
YosoLayer.chunk_size_feed_forward |
1 | 0 | 0 |
attr |
YosoLayer.seq_len_dim |
1 | 0 | 0 |
attr |
YosoLayer.attention |
1 | 0 | 0 |
attr |
YosoLayer.add_cross_attention |
1 | 0 | 0 |
attr |
YosoLayer.intermediate |
1 | 0 | 0 |
attr |
YosoLayer.output |
1 | 0 | 0 |
meth |
YosoModel.init |
2 | 0 | 0 |
meth |
YosoModel.get_input_embeddings |
1 | 0 | 0 |
meth |
YosoModel.set_input_embeddings |
2 | 0 | 0 |
meth |
YosoModel.forward |
10 | 9 | 0 |
attr |
YosoModel.embeddings |
1 | 0 | 0 |
attr |
YosoModel.encoder |
1 | 0 | 0 |
meth |
YosoForQuestionAnswering.init |
2 | 0 | 0 |
meth |
YosoForQuestionAnswering.forward |
12 | 11 | 0 |
attr |
YosoForQuestionAnswering.num_labels |
1 | 0 | 0 |
attr |
YosoForQuestionAnswering.yoso |
1 | 0 | 0 |
attr |
YosoForQuestionAnswering.qa_outputs |
1 | 0 | 0 |
meth |
YosoForSequenceClassification.init |
2 | 0 | 0 |
meth |
YosoForSequenceClassification.forward |
11 | 10 | 0 |
attr |
YosoForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
YosoForSequenceClassification.yoso |
1 | 0 | 0 |
attr |
YosoForSequenceClassification.classifier |
1 | 0 | 0 |
transformers.models.youtu.configuration_youtu (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
YoutuConfig.init |
27 | 25 | 0 |
attr |
YoutuConfig.vocab_size |
1 | 0 | 0 |
attr |
YoutuConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
YoutuConfig.hidden_size |
1 | 0 | 0 |
attr |
YoutuConfig.intermediate_size |
1 | 0 | 0 |
attr |
YoutuConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
YoutuConfig.num_attention_heads |
1 | 0 | 0 |
attr |
YoutuConfig.kv_lora_rank |
1 | 0 | 0 |
attr |
YoutuConfig.q_lora_rank |
1 | 0 | 0 |
attr |
YoutuConfig.qk_rope_head_dim |
1 | 0 | 0 |
attr |
YoutuConfig.v_head_dim |
1 | 0 | 0 |
attr |
YoutuConfig.qk_nope_head_dim |
1 | 0 | 0 |
attr |
YoutuConfig.qk_head_dim |
1 | 0 | 0 |
attr |
YoutuConfig.head_dim |
1 | 0 | 0 |
attr |
YoutuConfig.rope_interleave |
1 | 0 | 0 |
attr |
YoutuConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
YoutuConfig.hidden_act |
1 | 0 | 0 |
attr |
YoutuConfig.initializer_range |
1 | 0 | 0 |
attr |
YoutuConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
YoutuConfig.use_cache |
1 | 0 | 0 |
attr |
YoutuConfig.attention_bias |
1 | 0 | 0 |
attr |
YoutuConfig.attention_dropout |
1 | 0 | 0 |
attr |
YoutuConfig.rope_parameters |
1 | 0 | 0 |
attr |
YoutuConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
YoutuConfig.pad_token_id |
1 | 0 | 0 |
attr |
YoutuConfig.bos_token_id |
1 | 0 | 0 |
attr |
YoutuConfig.eos_token_id |
1 | 0 | 0 |
attr |
YoutuConfig.embedding_initializer_range |
1 | 0 | 0 |
transformers.models.youtu.modeling_youtu (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
YoutuForCausalLM.init |
2 | 0 | 0 |
attr |
YoutuForCausalLM.model |
1 | 0 | 0 |
attr |
YoutuForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
YoutuForCausalLM.lm_head |
1 | 0 | 0 |
meth |
YoutuModel.init |
2 | 1 | 0 |
attr |
YoutuModel.padding_idx |
1 | 0 | 0 |
attr |
YoutuModel.vocab_size |
1 | 0 | 0 |
attr |
YoutuModel.embed_tokens |
1 | 0 | 0 |
attr |
YoutuModel.layers |
1 | 0 | 0 |
attr |
YoutuModel.norm |
1 | 0 | 0 |
attr |
YoutuModel.rotary_emb |
1 | 0 | 0 |
attr |
YoutuModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
YoutuPreTrainedModel._init_weights |
2 | 0 | 0 |
transformers.models.youtu.modular_youtu (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
YoutuPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
YoutuConfig.init |
27 | 25 | 0 |
meth |
YoutuConfig.convert_rope_params_to_dict |
3 | 1 | 0 |
attr |
YoutuConfig.embedding_initializer_range |
1 | 0 | 0 |
attr |
YoutuConfig.initializer_range |
1 | 0 | 0 |
transformers.models.zamba.configuration_zamba (73 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ZambaConfig.init |
35 | 0 | 0 |
meth |
ZambaConfig._layers_block_type |
4 | 0 | 0 |
attr |
ZambaConfig.vocab_size |
1 | 0 | 0 |
attr |
ZambaConfig.tie_word_embeddings |
1 | 0 | 0 |
attr |
ZambaConfig.hidden_size |
1 | 0 | 0 |
attr |
ZambaConfig.intermediate_size |
1 | 0 | 0 |
attr |
ZambaConfig.num_hidden_layers |
1 | 0 | 0 |
attr |
ZambaConfig.num_attention_heads |
1 | 0 | 0 |
attr |
ZambaConfig.max_position_embeddings |
1 | 0 | 0 |
attr |
ZambaConfig.attention_dropout |
1 | 0 | 0 |
attr |
ZambaConfig.num_key_value_heads |
1 | 0 | 0 |
attr |
ZambaConfig.n_mamba_heads |
1 | 0 | 0 |
attr |
ZambaConfig.hidden_act |
1 | 0 | 0 |
attr |
ZambaConfig.hidden_mamba_act |
1 | 0 | 0 |
attr |
ZambaConfig.initializer_range |
1 | 0 | 0 |
attr |
ZambaConfig.rms_norm_eps |
1 | 0 | 0 |
attr |
ZambaConfig.use_cache |
1 | 0 | 0 |
attr |
ZambaConfig.num_logits_to_keep |
1 | 0 | 0 |
attr |
ZambaConfig.attn_layer_period |
1 | 0 | 0 |
attr |
ZambaConfig.attn_layer_offset |
1 | 0 | 0 |
attr |
ZambaConfig.use_mamba_kernels |
1 | 0 | 0 |
attr |
ZambaConfig.mamba_d_state |
1 | 0 | 0 |
attr |
ZambaConfig.mamba_d_conv |
1 | 0 | 0 |
attr |
ZambaConfig.mamba_expand |
1 | 0 | 0 |
attr |
ZambaConfig.mamba_dt_rank |
1 | 0 | 0 |
attr |
ZambaConfig.time_step_min |
1 | 0 | 0 |
attr |
ZambaConfig.time_step_max |
1 | 0 | 0 |
attr |
ZambaConfig.time_step_floor |
1 | 0 | 0 |
attr |
ZambaConfig.mamba_conv_bias |
1 | 0 | 0 |
attr |
ZambaConfig.mamba_proj_bias |
1 | 0 | 0 |
attr |
ZambaConfig.layers_block_type |
1 | 0 | 0 |
attr |
ZambaConfig.pad_token_id |
1 | 0 | 0 |
attr |
ZambaConfig.bos_token_id |
1 | 0 | 0 |
attr |
ZambaConfig.eos_token_id |
1 | 0 | 0 |
attr |
ZambaConfig.attention_hidden_size |
1 | 0 | 0 |
attr |
ZambaConfig.attention_head_dim |
1 | 0 | 0 |
transformers.models.zamba.modeling_zamba (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ZambaModel.init |
2 | 1 | 0 |
meth |
ZambaModel.forward |
12 | 11 | 0 |
attr |
ZambaModel.padding_idx |
1 | 0 | 0 |
attr |
ZambaModel.vocab_size |
1 | 0 | 0 |
attr |
ZambaModel.embed_tokens |
1 | 0 | 0 |
attr |
ZambaModel.layers_block_type |
1 | 0 | 0 |
attr |
ZambaModel.layers |
1 | 0 | 0 |
attr |
ZambaModel.final_layernorm |
1 | 0 | 0 |
attr |
ZambaModel.gradient_checkpointing |
1 | 0 | 0 |
meth |
ZambaForSequenceClassification.init |
2 | 0 | 0 |
meth |
ZambaForSequenceClassification.forward |
12 | 11 | 0 |
attr |
ZambaForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
ZambaForSequenceClassification.model |
1 | 0 | 0 |
attr |
ZambaForSequenceClassification.score |
1 | 0 | 0 |
meth |
ZambaPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
ZambaForCausalLM.init |
2 | 1 | 0 |
meth |
ZambaForCausalLM.forward |
14 | 13 | 0 |
meth |
ZambaForCausalLM.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
ZambaForCausalLM.model |
1 | 0 | 0 |
attr |
ZambaForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
ZambaForCausalLM.lm_head |
1 | 0 | 0 |
transformers.models.zamba2.configuration_zamba2 (45 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Zamba2Config.init |
39 | 37 | 0 |
attr |
Zamba2Config.pad_token_id |
1 | 0 | 0 |
attr |
Zamba2Config.bos_token_id |
1 | 0 | 0 |
attr |
Zamba2Config.eos_token_id |
1 | 0 | 0 |
attr |
Zamba2Config.tie_word_embeddings |
1 | 0 | 0 |
attr |
Zamba2Config.vocab_size |
1 | 0 | 0 |
attr |
Zamba2Config.max_position_embeddings |
1 | 0 | 0 |
attr |
Zamba2Config.hidden_size |
1 | 0 | 0 |
attr |
Zamba2Config.hidden_act |
1 | 0 | 0 |
attr |
Zamba2Config.num_hidden_layers |
1 | 0 | 0 |
attr |
Zamba2Config.num_attention_heads |
1 | 0 | 0 |
attr |
Zamba2Config.num_mem_blocks |
1 | 0 | 0 |
attr |
Zamba2Config.attention_hidden_size |
1 | 0 | 0 |
attr |
Zamba2Config.attention_head_dim |
1 | 0 | 0 |
attr |
Zamba2Config.attention_dropout |
1 | 0 | 0 |
attr |
Zamba2Config.use_mem_rope |
1 | 0 | 0 |
attr |
Zamba2Config.use_long_context |
1 | 0 | 0 |
attr |
Zamba2Config.rope_parameters |
1 | 0 | 0 |
attr |
Zamba2Config.mamba_d_state |
1 | 0 | 0 |
attr |
Zamba2Config.mamba_d_conv |
1 | 0 | 0 |
attr |
Zamba2Config.mamba_expand |
1 | 0 | 0 |
attr |
Zamba2Config.add_bias_linear |
1 | 0 | 0 |
attr |
Zamba2Config.mamba_ngroups |
1 | 0 | 0 |
attr |
Zamba2Config.n_mamba_heads |
1 | 0 | 0 |
attr |
Zamba2Config.mamba_headdim |
1 | 0 | 0 |
attr |
Zamba2Config.use_conv_bias |
1 | 0 | 0 |
attr |
Zamba2Config.chunk_size |
1 | 0 | 0 |
attr |
Zamba2Config.time_step_limit |
1 | 0 | 0 |
attr |
Zamba2Config.use_shared_attention_adapter |
1 | 0 | 0 |
attr |
Zamba2Config.adapter_rank |
1 | 0 | 0 |
attr |
Zamba2Config.time_step_min |
1 | 0 | 0 |
attr |
Zamba2Config.time_step_max |
1 | 0 | 0 |
attr |
Zamba2Config.time_step_floor |
1 | 0 | 0 |
attr |
Zamba2Config.num_key_value_heads |
1 | 0 | 0 |
attr |
Zamba2Config.kv_channels |
1 | 0 | 0 |
attr |
Zamba2Config.num_query_groups |
1 | 0 | 0 |
attr |
Zamba2Config.initializer_range |
1 | 0 | 0 |
attr |
Zamba2Config.rms_norm_eps |
1 | 0 | 0 |
attr |
Zamba2Config.use_cache |
1 | 0 | 0 |
attr |
Zamba2Config.num_logits_to_keep |
1 | 0 | 0 |
attr |
Zamba2Config.hybrid_layer_ids |
1 | 0 | 0 |
attr |
Zamba2Config.use_mem_eff_path |
1 | 0 | 0 |
attr |
Zamba2Config.intermediate_size |
1 | 0 | 0 |
attr |
Zamba2Config.layers_block_type |
1 | 0 | 0 |
transformers.models.zamba2.modeling_zamba2 (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Zamba2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Zamba2ForCausalLM.init |
2 | 1 | 0 |
meth |
Zamba2ForCausalLM.forward |
14 | 13 | 0 |
meth |
Zamba2ForCausalLM.prepare_inputs_for_generation |
10 | 0 | 0 |
attr |
Zamba2ForCausalLM.model |
1 | 0 | 0 |
attr |
Zamba2ForCausalLM.vocab_size |
1 | 0 | 0 |
attr |
Zamba2ForCausalLM.lm_head |
1 | 0 | 0 |
meth |
Zamba2Model.init |
2 | 1 | 0 |
meth |
Zamba2Model.forward |
12 | 11 | 0 |
meth |
Zamba2Model.get_layers |
1 | 0 | 0 |
attr |
Zamba2Model.padding_idx |
1 | 0 | 0 |
attr |
Zamba2Model.vocab_size |
1 | 0 | 0 |
attr |
Zamba2Model.embed_tokens |
1 | 0 | 0 |
attr |
Zamba2Model.layers_block_type |
1 | 0 | 0 |
attr |
Zamba2Model.layers |
1 | 0 | 0 |
attr |
Zamba2Model.final_layernorm |
1 | 0 | 0 |
attr |
Zamba2Model.gradient_checkpointing |
1 | 0 | 0 |
attr |
Zamba2Model.rotary_emb |
1 | 0 | 0 |
meth |
Zamba2ForSequenceClassification.init |
2 | 0 | 0 |
meth |
Zamba2ForSequenceClassification.forward |
12 | 11 | 0 |
attr |
Zamba2ForSequenceClassification.num_labels |
1 | 0 | 0 |
attr |
Zamba2ForSequenceClassification.model |
1 | 0 | 0 |
attr |
Zamba2ForSequenceClassification.score |
1 | 0 | 0 |
transformers.models.zamba2.modular_zamba2 (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Zamba2PreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
Zamba2Model.init |
2 | 1 | 0 |
meth |
Zamba2Model.get_layers |
1 | 0 | 0 |
meth |
Zamba2Model.forward |
12 | 11 | 0 |
attr |
Zamba2Model.padding_idx |
1 | 0 | 0 |
attr |
Zamba2Model.vocab_size |
1 | 0 | 0 |
attr |
Zamba2Model.embed_tokens |
1 | 0 | 0 |
attr |
Zamba2Model.layers_block_type |
1 | 0 | 0 |
attr |
Zamba2Model.layers |
1 | 0 | 0 |
attr |
Zamba2Model.final_layernorm |
1 | 0 | 0 |
attr |
Zamba2Model.gradient_checkpointing |
1 | 0 | 0 |
attr |
Zamba2Model.rotary_emb |
1 | 0 | 0 |
transformers.models.zoedepth.configuration_zoedepth (56 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ZoeDepthConfig.init |
29 | 0 | 0 |
attr |
ZoeDepthConfig.backbone_config |
1 | 0 | 0 |
attr |
ZoeDepthConfig.hidden_act |
1 | 0 | 0 |
attr |
ZoeDepthConfig.initializer_range |
1 | 0 | 0 |
attr |
ZoeDepthConfig.batch_norm_eps |
1 | 0 | 0 |
attr |
ZoeDepthConfig.readout_type |
1 | 0 | 0 |
attr |
ZoeDepthConfig.reassemble_factors |
1 | 0 | 0 |
attr |
ZoeDepthConfig.neck_hidden_sizes |
1 | 0 | 0 |
attr |
ZoeDepthConfig.fusion_hidden_size |
1 | 0 | 0 |
attr |
ZoeDepthConfig.head_in_index |
1 | 0 | 0 |
attr |
ZoeDepthConfig.use_batch_norm_in_fusion_residual |
1 | 0 | 0 |
attr |
ZoeDepthConfig.use_bias_in_fusion_residual |
1 | 0 | 0 |
attr |
ZoeDepthConfig.num_relative_features |
1 | 0 | 0 |
attr |
ZoeDepthConfig.add_projection |
1 | 0 | 0 |
attr |
ZoeDepthConfig.bottleneck_features |
1 | 0 | 0 |
attr |
ZoeDepthConfig.num_attractors |
1 | 0 | 0 |
attr |
ZoeDepthConfig.bin_embedding_dim |
1 | 0 | 0 |
attr |
ZoeDepthConfig.attractor_alpha |
1 | 0 | 0 |
attr |
ZoeDepthConfig.attractor_gamma |
1 | 0 | 0 |
attr |
ZoeDepthConfig.attractor_kind |
1 | 0 | 0 |
attr |
ZoeDepthConfig.min_temp |
1 | 0 | 0 |
attr |
ZoeDepthConfig.max_temp |
1 | 0 | 0 |
attr |
ZoeDepthConfig.bin_centers_type |
1 | 0 | 0 |
attr |
ZoeDepthConfig.bin_configurations |
1 | 0 | 0 |
attr |
ZoeDepthConfig.num_patch_transformer_layers |
1 | 0 | 0 |
attr |
ZoeDepthConfig.patch_transformer_hidden_size |
1 | 0 | 0 |
attr |
ZoeDepthConfig.patch_transformer_intermediate_size |
1 | 0 | 0 |
attr |
ZoeDepthConfig.patch_transformer_num_attention_heads |
1 | 0 | 0 |
transformers.models.zoedepth.image_processing_zoedepth (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ZoeDepthImageProcessor.init |
13 | 12 | 0 |
meth |
ZoeDepthImageProcessor.pad_image |
5 | 4 | 0 |
attr |
ZoeDepthImageProcessor.do_rescale |
1 | 0 | 0 |
attr |
ZoeDepthImageProcessor.rescale_factor |
1 | 0 | 0 |
attr |
ZoeDepthImageProcessor.do_pad |
1 | 0 | 0 |
attr |
ZoeDepthImageProcessor.do_normalize |
1 | 0 | 0 |
attr |
ZoeDepthImageProcessor.image_mean |
1 | 0 | 0 |
attr |
ZoeDepthImageProcessor.image_std |
1 | 0 | 0 |
attr |
ZoeDepthImageProcessor.do_resize |
1 | 0 | 0 |
attr |
ZoeDepthImageProcessor.size |
1 | 0 | 0 |
attr |
ZoeDepthImageProcessor.keep_aspect_ratio |
1 | 0 | 0 |
attr |
ZoeDepthImageProcessor.ensure_multiple_of |
1 | 0 | 0 |
attr |
ZoeDepthImageProcessor.resample |
1 | 0 | 0 |
transformers.models.zoedepth.image_processing_zoedepth_fast (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ZoeDepthImageProcessorFast._pad_images |
2 | 1 | 0 |
meth |
ZoeDepthImageProcessorFast._preprocess |
16 | 15 | 0 |
transformers.models.zoedepth.modeling_zoedepth (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ZoeDepthPreTrainedModel._init_weights |
2 | 0 | 0 |
meth |
ZoeDepthForDepthEstimation.init |
2 | 0 | 0 |
meth |
ZoeDepthForDepthEstimation.forward |
7 | 6 | 0 |
attr |
ZoeDepthForDepthEstimation.backbone |
1 | 0 | 0 |
attr |
ZoeDepthForDepthEstimation.neck |
1 | 0 | 0 |
attr |
ZoeDepthForDepthEstimation.relative_head |
1 | 0 | 0 |
attr |
ZoeDepthForDepthEstimation.metric_head |
1 | 0 | 0 |
attr |
ZoeDepthForDepthEstimation.patch_size |
1 | 0 | 0 |
transformers.monkey_patching (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
apply_patches |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.optimization (55 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_wsd_schedule |
11 | 10 | 0 |
func |
get_constant_schedule |
3 | 2 | 0 |
func |
get_linear_schedule_with_warmup |
5 | 0 | 0 |
func |
get_constant_schedule_with_warmup |
4 | 3 | 0 |
func |
get_polynomial_decay_schedule_with_warmup |
7 | 0 | 0 |
func |
get_cosine_with_hard_restarts_schedule_with_warmup |
6 | 5 | 0 |
func |
get_scheduler |
6 | 5 | 0 |
func |
get_cosine_with_min_lr_schedule_with_warmup_lr_rate |
9 | 8 | 0 |
meth |
Adafactor.init |
11 | 0 | 0 |
meth |
Adafactor._get_lr |
3 | 0 | 0 |
meth |
Adafactor._get_options |
3 | 0 | 0 |
meth |
Adafactor._rms |
2 | 0 | 0 |
meth |
Adafactor._approx_sq_grad |
3 | 0 | 0 |
meth |
Adafactor.step |
2 | 0 | 0 |
func |
get_inverse_sqrt_schedule |
5 | 4 | 0 |
func |
get_cosine_schedule_with_warmup |
6 | 5 | 0 |
func |
get_cosine_with_min_lr_schedule_with_warmup |
8 | 7 | 0 |
func |
get_reduce_on_plateau_schedule |
3 | 1 | 0 |
meth |
AdafactorSchedule.init |
3 | 0 | 0 |
meth |
AdafactorSchedule.get_lr |
1 | 0 | 0 |
func |
get_adafactor_schedule |
3 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.pipelines (6 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
pipeline |
18 | 18 | 1 |
attr |
PIPELINE_REGISTRY |
1 | 0 | 0 |
func |
get_task |
4 | 3 | 0 |
func |
clean_custom_task |
2 | 0 | 0 |
attr |
SUPPORTED_TASKS |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.pipelines.any_to_any (33 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AnyToAnyPipeline.init |
3 | 0 | 0 |
meth |
AnyToAnyPipeline._sanitize_parameters |
13 | 1 | 0 |
meth |
AnyToAnyPipeline.call |
6 | 6 | 1 |
meth |
AnyToAnyPipeline.preprocess |
5 | 0 | 0 |
meth |
AnyToAnyPipeline._forward |
3 | 0 | 0 |
meth |
AnyToAnyPipeline.postprocess |
6 | 0 | 0 |
attr |
AnyToAnyPipeline._default_generation_config |
1 | 0 | 0 |
meth |
Chat.init |
2 | 1 | 0 |
attr |
Chat.messages |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.pipelines.audio_classification (16 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AudioClassificationPipeline.init |
3 | 0 | 0 |
meth |
AudioClassificationPipeline.call |
3 | 3 | 1 |
meth |
AudioClassificationPipeline._sanitize_parameters |
4 | 0 | 0 |
meth |
AudioClassificationPipeline.preprocess |
2 | 0 | 0 |
meth |
AudioClassificationPipeline._forward |
2 | 0 | 0 |
meth |
AudioClassificationPipeline.postprocess |
4 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.pipelines.audio_utils (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
ffmpeg_microphone |
6 | 5 | 0 |
func |
ffmpeg_microphone_live |
8 | 7 | 0 |
func |
chunk_bytes_iter |
5 | 3 | 0 |
transformers.pipelines.automatic_speech_recognition (37 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AutomaticSpeechRecognitionPipeline.init |
7 | 5 | 0 |
meth |
AutomaticSpeechRecognitionPipeline.call |
3 | 3 | 1 |
meth |
AutomaticSpeechRecognitionPipeline._sanitize_parameters |
8 | 0 | 0 |
meth |
AutomaticSpeechRecognitionPipeline.preprocess |
4 | 0 | 0 |
meth |
AutomaticSpeechRecognitionPipeline._forward |
4 | 0 | 0 |
meth |
AutomaticSpeechRecognitionPipeline.postprocess |
5 | 1 | 0 |
prop |
AutomaticSpeechRecognitionPipeline._align_to |
1 | 0 | 0 |
attr |
AutomaticSpeechRecognitionPipeline._default_generation_config |
1 | 0 | 0 |
attr |
AutomaticSpeechRecognitionPipeline.type |
1 | 0 | 0 |
attr |
AutomaticSpeechRecognitionPipeline.decoder |
1 | 0 | 0 |
func |
chunk_iter |
7 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
rescale_stride |
3 | 0 | 0 |
transformers.pipelines.base (105 missing, 3 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PipedPipelineDataFormat.iter |
1 | 0 | 0 |
meth |
PipedPipelineDataFormat.save |
2 | 1 | 0 |
meth |
PipelineDataFormat.init |
5 | 4 | 0 |
meth |
PipelineDataFormat.iter |
1 | 0 | 0 |
meth |
PipelineDataFormat.save |
2 | 1 | 0 |
meth |
PipelineDataFormat.from_str |
6 | 5 | 0 |
attr |
PipelineDataFormat.output_path |
1 | 0 | 0 |
attr |
PipelineDataFormat.input_path |
1 | 0 | 0 |
attr |
PipelineDataFormat.column |
1 | 0 | 0 |
attr |
PipelineDataFormat.is_multi_columns |
1 | 0 | 0 |
meth |
CsvPipelineDataFormat.init |
5 | 3 | 0 |
meth |
CsvPipelineDataFormat.iter |
1 | 0 | 0 |
meth |
CsvPipelineDataFormat.save |
2 | 1 | 0 |
meth |
JsonPipelineDataFormat.init |
5 | 3 | 0 |
meth |
JsonPipelineDataFormat.iter |
1 | 0 | 0 |
meth |
JsonPipelineDataFormat.save |
2 | 1 | 0 |
meth |
Pipeline.init |
10 | 8 | 0 |
meth |
Pipeline.repr |
1 | 0 | 0 |
meth |
Pipeline.save_pretrained |
3 | 2 | 1 |
meth |
Pipeline.transform |
2 | 0 | 0 |
meth |
Pipeline.predict |
2 | 0 | 0 |
meth |
Pipeline.device_placement |
1 | 0 | 0 |
meth |
Pipeline.ensure_tensor_on_device |
2 | 0 | 0 |
meth |
Pipeline._ensure_tensor_on_device |
3 | 0 | 0 |
meth |
Pipeline.check_model_type |
2 | 1 | 0 |
meth |
Pipeline._sanitize_parameters |
2 | 0 | 0 |
meth |
Pipeline.preprocess |
3 | 3 | 1 |
meth |
Pipeline.postprocess |
3 | 3 | 1 |
meth |
Pipeline.get_inference_context |
1 | 0 | 0 |
meth |
Pipeline.forward |
3 | 0 | 0 |
meth |
Pipeline.get_iterator |
7 | 2 | 0 |
meth |
Pipeline.call |
6 | 0 | 0 |
meth |
Pipeline.run_multi |
5 | 0 | 0 |
meth |
Pipeline.run_single |
5 | 0 | 0 |
meth |
Pipeline.iterate |
5 | 0 | 0 |
attr |
Pipeline.task |
1 | 0 | 0 |
attr |
Pipeline.model |
1 | 0 | 0 |
attr |
Pipeline.tokenizer |
1 | 0 | 0 |
attr |
Pipeline.feature_extractor |
1 | 0 | 0 |
attr |
Pipeline.image_processor |
1 | 0 | 0 |
attr |
Pipeline.processor |
1 | 0 | 0 |
attr |
Pipeline.binary_output |
1 | 0 | 0 |
attr |
Pipeline.call_count |
1 | 0 | 0 |
attr |
Pipeline.device |
1 | 0 | 0 |
attr |
Pipeline.prefix |
1 | 0 | 0 |
attr |
Pipeline.generation_config |
1 | 0 | 0 |
func |
pad_collate_fn |
3 | 0 | 0 |
meth |
ChunkPipeline.run_single |
5 | 0 | 0 |
meth |
ChunkPipeline.get_iterator |
7 | 2 | 0 |
func |
no_collate_fn |
2 | 0 | 0 |
attr |
PIPELINE_INIT_ARGS |
1 | 0 | 0 |
func |
load_model |
6 | 3 | 0 |
meth |
ArgumentHandler.call |
3 | 0 | 0 |
meth |
PipelineRegistry.to_dict |
1 | 0 | 0 |
attr |
PipelineRegistry.supported_tasks |
1 | 0 | 0 |
attr |
PipelineRegistry.task_aliases |
1 | 0 | 0 |
meth |
PipelineException.init |
4 | 3 | 0 |
attr |
PipelineException.task |
1 | 0 | 0 |
attr |
PipelineException.model |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.pipelines.depth_estimation (15 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DepthEstimationPipeline.init |
3 | 0 | 0 |
meth |
DepthEstimationPipeline.call |
3 | 3 | 1 |
meth |
DepthEstimationPipeline._sanitize_parameters |
4 | 0 | 0 |
meth |
DepthEstimationPipeline.preprocess |
3 | 0 | 0 |
meth |
DepthEstimationPipeline._forward |
2 | 0 | 0 |
meth |
DepthEstimationPipeline.postprocess |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.pipelines.document_question_answering (50 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
DocumentQuestionAnsweringPipeline.init |
3 | 0 | 0 |
meth |
DocumentQuestionAnsweringPipeline._sanitize_parameters |
12 | 2 | 0 |
meth |
DocumentQuestionAnsweringPipeline.call |
5 | 5 | 1 |
meth |
DocumentQuestionAnsweringPipeline.preprocess |
9 | 1 | 0 |
meth |
DocumentQuestionAnsweringPipeline._forward |
3 | 0 | 0 |
meth |
DocumentQuestionAnsweringPipeline.postprocess |
4 | 0 | 0 |
meth |
DocumentQuestionAnsweringPipeline.postprocess_encoder_decoder_single |
3 | 0 | 0 |
meth |
DocumentQuestionAnsweringPipeline.postprocess_extractive_qa |
6 | 0 | 0 |
attr |
DocumentQuestionAnsweringPipeline._default_generation_config |
1 | 0 | 0 |
attr |
DocumentQuestionAnsweringPipeline.model_type |
1 | 0 | 0 |
func |
apply_tesseract |
4 | 3 | 0 |
func |
normalize_box |
4 | 0 | 0 |
func |
select_starts_ends |
9 | 4 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.pipelines.feature_extraction (12 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FeatureExtractionPipeline._sanitize_parameters |
5 | 0 | 0 |
meth |
FeatureExtractionPipeline.preprocess |
3 | 1 | 0 |
meth |
FeatureExtractionPipeline._forward |
2 | 0 | 0 |
meth |
FeatureExtractionPipeline.postprocess |
3 | 0 | 0 |
meth |
FeatureExtractionPipeline.call |
3 | 3 | 1 |
transformers.pipelines.fill_mask (18 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FillMaskPipeline.ensure_exactly_one_mask_token |
2 | 1 | 0 |
meth |
FillMaskPipeline.preprocess |
5 | 1 | 0 |
meth |
FillMaskPipeline._forward |
2 | 0 | 0 |
meth |
FillMaskPipeline.postprocess |
4 | 0 | 0 |
meth |
FillMaskPipeline.get_target_ids |
2 | 0 | 0 |
meth |
FillMaskPipeline._sanitize_parameters |
4 | 0 | 0 |
meth |
FillMaskPipeline.call |
3 | 3 | 1 |
attr |
logger |
1 | 0 | 0 |
transformers.pipelines.image_classification (21 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ImageClassificationPipeline.init |
3 | 0 | 0 |
meth |
ImageClassificationPipeline._sanitize_parameters |
4 | 0 | 0 |
meth |
ImageClassificationPipeline.call |
3 | 3 | 1 |
meth |
ImageClassificationPipeline.preprocess |
3 | 0 | 0 |
meth |
ImageClassificationPipeline._forward |
2 | 0 | 0 |
meth |
ImageClassificationPipeline.postprocess |
4 | 0 | 0 |
func |
sigmoid |
2 | 0 | 0 |
func |
softmax |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.pipelines.image_feature_extraction (14 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ImageFeatureExtractionPipeline._sanitize_parameters |
5 | 0 | 0 |
meth |
ImageFeatureExtractionPipeline.preprocess |
4 | 1 | 0 |
meth |
ImageFeatureExtractionPipeline._forward |
2 | 0 | 0 |
meth |
ImageFeatureExtractionPipeline.postprocess |
4 | 0 | 0 |
meth |
ImageFeatureExtractionPipeline.call |
3 | 3 | 1 |
transformers.pipelines.image_segmentation (18 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ImageSegmentationPipeline.init |
3 | 0 | 0 |
meth |
ImageSegmentationPipeline._sanitize_parameters |
2 | 0 | 0 |
meth |
ImageSegmentationPipeline.call |
3 | 3 | 1 |
meth |
ImageSegmentationPipeline.preprocess |
4 | 0 | 0 |
meth |
ImageSegmentationPipeline._forward |
2 | 0 | 0 |
meth |
ImageSegmentationPipeline.postprocess |
6 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.pipelines.image_text_to_text (30 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ImageTextToTextPipeline.init |
3 | 0 | 0 |
meth |
ImageTextToTextPipeline._sanitize_parameters |
12 | 1 | 0 |
meth |
ImageTextToTextPipeline.call |
4 | 4 | 1 |
meth |
ImageTextToTextPipeline.preprocess |
5 | 0 | 0 |
meth |
ImageTextToTextPipeline._forward |
3 | 0 | 0 |
meth |
ImageTextToTextPipeline.postprocess |
6 | 0 | 0 |
attr |
ImageTextToTextPipeline._default_generation_config |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.pipelines.keypoint_matching (13 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KeypointMatchingPipeline.init |
3 | 0 | 0 |
meth |
KeypointMatchingPipeline._sanitize_parameters |
3 | 0 | 0 |
meth |
KeypointMatchingPipeline.call |
4 | 4 | 1 |
meth |
KeypointMatchingPipeline.preprocess |
3 | 0 | 0 |
meth |
KeypointMatchingPipeline._forward |
2 | 0 | 0 |
meth |
KeypointMatchingPipeline.postprocess |
3 | 1 | 0 |
func |
validate_image_pairs |
2 | 2 | 1 |
transformers.pipelines.mask_generation (21 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MaskGenerationPipeline.init |
2 | 0 | 0 |
meth |
MaskGenerationPipeline._sanitize_parameters |
2 | 0 | 0 |
meth |
MaskGenerationPipeline.call |
4 | 4 | 2 |
meth |
MaskGenerationPipeline.preprocess |
8 | 5 | 0 |
meth |
MaskGenerationPipeline._forward |
8 | 0 | 0 |
meth |
MaskGenerationPipeline.postprocess |
5 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.pipelines.object_detection (14 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ObjectDetectionPipeline.init |
3 | 0 | 0 |
meth |
ObjectDetectionPipeline._sanitize_parameters |
2 | 0 | 0 |
meth |
ObjectDetectionPipeline.call |
4 | 4 | 2 |
meth |
ObjectDetectionPipeline.preprocess |
3 | 0 | 0 |
meth |
ObjectDetectionPipeline._forward |
2 | 0 | 0 |
meth |
ObjectDetectionPipeline.postprocess |
3 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.pipelines.pt_utils (45 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PipelineChunkIterator.init |
5 | 0 | 0 |
meth |
PipelineChunkIterator.iter |
1 | 0 | 0 |
meth |
PipelineChunkIterator.next |
1 | 0 | 0 |
meth |
PipelineIterator.init |
5 | 0 | 0 |
meth |
PipelineIterator.len |
1 | 0 | 0 |
meth |
PipelineIterator.iter |
1 | 0 | 0 |
meth |
PipelineIterator.loader_batch_item |
1 | 0 | 0 |
meth |
PipelineIterator.next |
1 | 0 | 0 |
attr |
PipelineIterator.loader |
1 | 0 | 0 |
attr |
PipelineIterator.infer |
1 | 0 | 0 |
attr |
PipelineIterator.params |
1 | 0 | 0 |
attr |
PipelineIterator.loader_batch_size |
1 | 0 | 0 |
meth |
PipelineDataset.init |
4 | 0 | 0 |
meth |
PipelineDataset.len |
1 | 0 | 0 |
meth |
PipelineDataset.getitem |
2 | 0 | 0 |
attr |
PipelineDataset.dataset |
1 | 0 | 0 |
attr |
PipelineDataset.process |
1 | 0 | 0 |
attr |
PipelineDataset.params |
1 | 0 | 0 |
meth |
KeyDataset.init |
3 | 2 | 0 |
meth |
KeyDataset.len |
1 | 0 | 0 |
meth |
KeyDataset.getitem |
2 | 0 | 0 |
attr |
KeyDataset.dataset |
1 | 0 | 0 |
attr |
KeyDataset.key |
1 | 0 | 0 |
meth |
KeyPairDataset.init |
4 | 3 | 0 |
meth |
KeyPairDataset.len |
1 | 0 | 0 |
meth |
KeyPairDataset.getitem |
2 | 0 | 0 |
attr |
KeyPairDataset.dataset |
1 | 0 | 0 |
attr |
KeyPairDataset.key1 |
1 | 0 | 0 |
attr |
KeyPairDataset.key2 |
1 | 0 | 0 |
meth |
PipelinePackIterator.iter |
1 | 0 | 0 |
meth |
PipelinePackIterator.next |
1 | 0 | 0 |
transformers.pipelines.table_question_answering (32 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TableQuestionAnsweringPipeline.init |
3 | 0 | 0 |
meth |
TableQuestionAnsweringPipeline.batch_inference |
2 | 0 | 0 |
meth |
TableQuestionAnsweringPipeline.sequential_inference |
2 | 0 | 0 |
meth |
TableQuestionAnsweringPipeline.call |
3 | 0 | 0 |
meth |
TableQuestionAnsweringPipeline._sanitize_parameters |
5 | 0 | 0 |
meth |
TableQuestionAnsweringPipeline.preprocess |
4 | 0 | 0 |
meth |
TableQuestionAnsweringPipeline._forward |
4 | 0 | 0 |
meth |
TableQuestionAnsweringPipeline.postprocess |
2 | 0 | 0 |
attr |
TableQuestionAnsweringPipeline._default_generation_config |
1 | 0 | 0 |
attr |
TableQuestionAnsweringPipeline.aggregate |
1 | 0 | 0 |
attr |
TableQuestionAnsweringPipeline.type |
1 | 0 | 0 |
meth |
TableQuestionAnsweringArgumentHandler.call |
4 | 0 | 0 |
transformers.pipelines.text_classification (19 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TextClassificationPipeline.init |
2 | 0 | 0 |
meth |
TextClassificationPipeline._sanitize_parameters |
4 | 0 | 0 |
meth |
TextClassificationPipeline.call |
3 | 3 | 1 |
meth |
TextClassificationPipeline.preprocess |
3 | 1 | 0 |
meth |
TextClassificationPipeline._forward |
2 | 0 | 0 |
meth |
TextClassificationPipeline.postprocess |
5 | 0 | 0 |
func |
softmax |
2 | 0 | 0 |
func |
sigmoid |
2 | 0 | 0 |
transformers.pipelines.text_generation (46 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TextGenerationPipeline.init |
3 | 0 | 0 |
meth |
TextGenerationPipeline._sanitize_parameters |
17 | 0 | 0 |
meth |
TextGenerationPipeline._parse_and_tokenize |
3 | 0 | 0 |
meth |
TextGenerationPipeline.call |
3 | 3 | 1 |
meth |
TextGenerationPipeline.preprocess |
13 | 0 | 0 |
meth |
TextGenerationPipeline._forward |
3 | 0 | 0 |
meth |
TextGenerationPipeline.postprocess |
6 | 0 | 0 |
attr |
TextGenerationPipeline._default_generation_config |
1 | 0 | 0 |
transformers.pipelines.text_to_audio (21 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TextToAudioPipeline.init |
5 | 0 | 0 |
meth |
TextToAudioPipeline.preprocess |
3 | 0 | 0 |
meth |
TextToAudioPipeline._forward |
3 | 0 | 0 |
meth |
TextToAudioPipeline.call |
3 | 3 | 1 |
meth |
TextToAudioPipeline._sanitize_parameters |
4 | 0 | 0 |
meth |
TextToAudioPipeline.postprocess |
2 | 0 | 0 |
attr |
TextToAudioPipeline._default_generation_config |
1 | 0 | 0 |
attr |
TextToAudioPipeline.vocoder |
1 | 0 | 0 |
attr |
TextToAudioPipeline.sampling_rate |
1 | 0 | 0 |
attr |
TextToAudioPipeline.processor |
1 | 0 | 0 |
transformers.pipelines.token_classification (19 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TokenClassificationPipeline.init |
3 | 0 | 0 |
meth |
TokenClassificationPipeline._sanitize_parameters |
7 | 5 | 0 |
meth |
TokenClassificationPipeline.call |
3 | 3 | 1 |
meth |
TokenClassificationPipeline.preprocess |
4 | 0 | 0 |
meth |
TokenClassificationPipeline._forward |
2 | 0 | 0 |
meth |
TokenClassificationPipeline.postprocess |
4 | 0 | 0 |
meth |
TokenClassificationPipeline.aggregate_overlapping_entities |
2 | 0 | 0 |
meth |
TokenClassificationArgumentHandler.call |
3 | 1 | 0 |
transformers.pipelines.video_classification (22 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VideoClassificationPipeline.init |
3 | 0 | 0 |
meth |
VideoClassificationPipeline._sanitize_parameters |
5 | 0 | 0 |
meth |
VideoClassificationPipeline.call |
3 | 3 | 1 |
meth |
VideoClassificationPipeline.preprocess |
4 | 0 | 0 |
meth |
VideoClassificationPipeline._forward |
2 | 0 | 0 |
meth |
VideoClassificationPipeline.postprocess |
4 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
read_video_pyav |
3 | 0 | 0 |
transformers.pipelines.zero_shot_audio_classification (13 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ZeroShotAudioClassificationPipeline.init |
2 | 0 | 0 |
meth |
ZeroShotAudioClassificationPipeline.call |
3 | 3 | 1 |
meth |
ZeroShotAudioClassificationPipeline._sanitize_parameters |
2 | 0 | 0 |
meth |
ZeroShotAudioClassificationPipeline.preprocess |
4 | 0 | 0 |
meth |
ZeroShotAudioClassificationPipeline._forward |
2 | 0 | 0 |
meth |
ZeroShotAudioClassificationPipeline.postprocess |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.pipelines.zero_shot_classification (31 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ZeroShotClassificationPipeline.init |
3 | 0 | 0 |
meth |
ZeroShotClassificationPipeline._parse_and_tokenize |
6 | 0 | 0 |
meth |
ZeroShotClassificationPipeline._sanitize_parameters |
2 | 0 | 0 |
meth |
ZeroShotClassificationPipeline.call |
4 | 1 | 0 |
meth |
ZeroShotClassificationPipeline.preprocess |
4 | 0 | 0 |
meth |
ZeroShotClassificationPipeline._forward |
2 | 0 | 0 |
meth |
ZeroShotClassificationPipeline.postprocess |
3 | 0 | 0 |
prop |
ZeroShotClassificationPipeline.entailment_id |
1 | 0 | 0 |
meth |
ZeroShotClassificationArgumentHandler._parse_labels |
2 | 0 | 0 |
meth |
ZeroShotClassificationArgumentHandler.call |
4 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.pipelines.zero_shot_image_classification (16 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ZeroShotImageClassificationPipeline.init |
2 | 0 | 0 |
meth |
ZeroShotImageClassificationPipeline.call |
4 | 4 | 1 |
meth |
ZeroShotImageClassificationPipeline._sanitize_parameters |
3 | 0 | 0 |
meth |
ZeroShotImageClassificationPipeline.preprocess |
6 | 0 | 0 |
meth |
ZeroShotImageClassificationPipeline._forward |
2 | 0 | 0 |
meth |
ZeroShotImageClassificationPipeline.postprocess |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.pipelines.zero_shot_object_detection (14 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ZeroShotObjectDetectionPipeline.init |
2 | 0 | 0 |
meth |
ZeroShotObjectDetectionPipeline.call |
4 | 4 | 1 |
meth |
ZeroShotObjectDetectionPipeline._sanitize_parameters |
2 | 0 | 0 |
meth |
ZeroShotObjectDetectionPipeline.preprocess |
3 | 0 | 0 |
meth |
ZeroShotObjectDetectionPipeline._forward |
2 | 0 | 0 |
meth |
ZeroShotObjectDetectionPipeline.postprocess |
4 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.processing_utils (59 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ProcessorMixin.init |
3 | 0 | 0 |
meth |
ProcessorMixin.call |
6 | 5 | 0 |
meth |
ProcessorMixin.check_argument_for_proper_class |
3 | 0 | 0 |
meth |
ProcessorMixin.to_json_file |
2 | 1 | 0 |
meth |
ProcessorMixin.repr |
1 | 0 | 0 |
meth |
ProcessorMixin.save_pretrained |
4 | 1 | 0 |
meth |
ProcessorMixin.get_processor_dict |
3 | 2 | 0 |
meth |
ProcessorMixin.from_args_and_dict |
4 | 1 | 0 |
meth |
ProcessorMixin._merge_kwargs |
4 | 3 | 0 |
meth |
ProcessorMixin.from_pretrained |
8 | 7 | 0 |
meth |
ProcessorMixin.get_attributes |
1 | 0 | 0 |
meth |
ProcessorMixin.register_for_auto_class |
2 | 0 | 0 |
meth |
ProcessorMixin._load_tokenizer_from_pretrained |
5 | 0 | 0 |
meth |
ProcessorMixin._get_arguments_from_pretrained |
4 | 0 | 0 |
meth |
ProcessorMixin.get_possibly_dynamic_module |
2 | 0 | 0 |
meth |
ProcessorMixin.batch_decode |
3 | 0 | 0 |
meth |
ProcessorMixin.decode |
3 | 0 | 0 |
meth |
ProcessorMixin.validate_init_kwargs |
3 | 0 | 0 |
meth |
ProcessorMixin.post_process_multimodal_output |
5 | 0 | 0 |
meth |
ProcessorMixin.post_process_image_text_to_text |
4 | 0 | 0 |
meth |
ProcessorMixin._check_special_mm_tokens |
4 | 3 | 0 |
prop |
ProcessorMixin.model_input_names |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
attr |
transformers_module |
1 | 0 | 0 |
attr |
MODALITY_TO_AUTOPROCESSOR_MAPPING |
1 | 0 | 0 |
meth |
MultiModalData.contains |
2 | 0 | 0 |
meth |
MultiModalData.getitem |
2 | 0 | 0 |
transformers.pytorch_utils (27 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
apply_chunking_to_forward |
5 | 4 | 0 |
meth |
Conv1D.init |
3 | 0 | 0 |
meth |
Conv1D.forward |
2 | 0 | 0 |
attr |
Conv1D.nf |
1 | 0 | 0 |
attr |
Conv1D.nx |
1 | 0 | 0 |
attr |
Conv1D.weight |
1 | 0 | 0 |
attr |
Conv1D.bias |
1 | 0 | 0 |
attr |
is_torch_greater_or_equal_than_2_3 |
1 | 0 | 0 |
attr |
is_torch_greater_or_equal_than_1_12 |
1 | 0 | 0 |
func |
softmax_backward_data |
4 | 0 | 0 |
attr |
is_torch_greater_or_equal_than_2_1 |
1 | 0 | 0 |
attr |
is_torch_greater_or_equal_than_2_2 |
1 | 0 | 0 |
attr |
is_torch_greater_or_equal_than_2_4 |
1 | 0 | 0 |
func |
compile_compatible_method_lru_cache |
3 | 0 | 0 |
attr |
is_torch_greater_or_equal_than_2_8 |
1 | 0 | 0 |
attr |
is_torch_greater_or_equal_than_2_6 |
1 | 0 | 0 |
attr |
is_torch_greater_or_equal_than_2_0 |
1 | 0 | 0 |
attr |
is_torch_greater_or_equal_than_1_13 |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.quantizers.auto (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
register_quantization_config |
2 | 1 | 0 |
func |
register_quantizer |
2 | 1 | 0 |
func |
get_hf_quantizer |
6 | 0 | 0 |
meth |
AutoQuantizationConfig.from_dict |
2 | 1 | 0 |
meth |
AutoQuantizationConfig.from_pretrained |
3 | 0 | 0 |
meth |
AutoHfQuantizer.from_config |
3 | 1 | 0 |
meth |
AutoHfQuantizer.from_pretrained |
3 | 0 | 0 |
meth |
AutoHfQuantizer.merge_quantization_configs |
3 | 2 | 0 |
meth |
AutoHfQuantizer.supports_quant_method |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.quantizers.base (44 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HfQuantizer.init |
3 | 1 | 0 |
meth |
HfQuantizer.param_needs_quantization |
4 | 3 | 0 |
meth |
HfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
HfQuantizer.update_tp_plan |
2 | 0 | 0 |
meth |
HfQuantizer.update_ep_plan |
2 | 0 | 0 |
meth |
HfQuantizer._process_model_before_weight_loading |
3 | 0 | 0 |
meth |
HfQuantizer.preprocess_model |
4 | 1 | 0 |
meth |
HfQuantizer._process_model_after_weight_loading |
3 | 1 | 0 |
meth |
HfQuantizer.postprocess_model |
3 | 1 | 0 |
meth |
HfQuantizer.remove_quantization_config |
2 | 0 | 0 |
meth |
HfQuantizer.dequantize |
3 | 0 | 0 |
meth |
HfQuantizer._dequantize |
3 | 0 | 0 |
meth |
HfQuantizer.get_modules_to_not_convert |
5 | 4 | 0 |
meth |
HfQuantizer.get_state_dict_and_metadata |
2 | 0 | 0 |
meth |
HfQuantizer.is_serializable |
1 | 0 | 0 |
meth |
HfQuantizer._convert_model_for_quantization |
2 | 0 | 0 |
meth |
HfQuantizer.get_quantize_ops |
1 | 0 | 0 |
meth |
HfQuantizer.get_weight_conversions |
1 | 0 | 0 |
prop |
HfQuantizer.is_trainable |
1 | 0 | 0 |
attr |
HfQuantizer.quantization_config |
1 | 0 | 0 |
attr |
HfQuantizer.pre_quantized |
1 | 0 | 0 |
func |
get_keys_to_not_convert |
2 | 1 | 0 |
meth |
SequentialLlama4TextExperts.init |
2 | 0 | 0 |
attr |
SequentialLlama4TextExperts.num_experts |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.quantizers.quantizer_aqlm (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AqlmHfQuantizer.init |
3 | 1 | 0 |
meth |
AqlmHfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
AqlmHfQuantizer._process_model_before_weight_loading |
3 | 1 | 0 |
meth |
AqlmHfQuantizer.is_serializable |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.quantizers.quantizer_auto_round (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
AutoRoundQuantizer.init |
3 | 1 | 0 |
meth |
AutoRoundQuantizer.validate_environment |
3 | 0 | 0 |
meth |
AutoRoundQuantizer._process_model_before_weight_loading |
3 | 1 | 0 |
meth |
AutoRoundQuantizer._process_model_after_weight_loading |
3 | 1 | 0 |
meth |
AutoRoundQuantizer.is_serializable |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.quantizers.quantizer_awq (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
AwqQuantizer.init |
3 | 0 | 0 |
meth |
AwqQuantizer.validate_environment |
2 | 0 | 0 |
meth |
AwqQuantizer.update_dtype |
2 | 0 | 0 |
meth |
AwqQuantizer._process_model_before_weight_loading |
3 | 1 | 0 |
meth |
AwqQuantizer._process_model_after_weight_loading |
3 | 0 | 0 |
meth |
AwqQuantizer.is_serializable |
1 | 0 | 0 |
prop |
AwqQuantizer.is_trainable |
1 | 0 | 0 |
transformers.quantizers.quantizer_bitnet (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
BitNetHfQuantizer.init |
3 | 0 | 0 |
meth |
BitNetHfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
BitNetHfQuantizer._process_model_before_weight_loading |
3 | 1 | 0 |
meth |
BitNetHfQuantizer.is_serializable |
1 | 0 | 0 |
meth |
BitNetHfQuantizer.get_weight_conversions |
1 | 0 | 0 |
transformers.quantizers.quantizer_bnb_4bit (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Bnb4BitHfQuantizer.init |
3 | 0 | 0 |
meth |
Bnb4BitHfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
Bnb4BitHfQuantizer.param_needs_quantization |
4 | 3 | 0 |
meth |
Bnb4BitHfQuantizer.update_device_map |
2 | 0 | 0 |
meth |
Bnb4BitHfQuantizer._process_model_before_weight_loading |
4 | 1 | 0 |
meth |
Bnb4BitHfQuantizer._process_model_after_weight_loading |
3 | 1 | 0 |
meth |
Bnb4BitHfQuantizer.is_serializable |
1 | 0 | 0 |
meth |
Bnb4BitHfQuantizer._dequantize |
3 | 0 | 0 |
meth |
Bnb4BitHfQuantizer.get_quantize_ops |
1 | 0 | 0 |
meth |
Bnb4BitHfQuantizer.get_weight_conversions |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.quantizers.quantizer_bnb_8bit (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Bnb8BitHfQuantizer.init |
3 | 0 | 0 |
meth |
Bnb8BitHfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
Bnb8BitHfQuantizer.update_device_map |
2 | 0 | 0 |
meth |
Bnb8BitHfQuantizer.param_needs_quantization |
4 | 3 | 0 |
meth |
Bnb8BitHfQuantizer._process_model_after_weight_loading |
3 | 1 | 0 |
meth |
Bnb8BitHfQuantizer._process_model_before_weight_loading |
4 | 1 | 0 |
meth |
Bnb8BitHfQuantizer.is_serializable |
1 | 0 | 0 |
meth |
Bnb8BitHfQuantizer._dequantize |
3 | 0 | 0 |
meth |
Bnb8BitHfQuantizer.get_quantize_ops |
1 | 0 | 0 |
meth |
Bnb8BitHfQuantizer.get_weight_conversions |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.quantizers.quantizer_compressed_tensors (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
CompressedTensorsHfQuantizer.init |
3 | 1 | 0 |
meth |
CompressedTensorsHfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
CompressedTensorsHfQuantizer._process_model_before_weight_loading |
3 | 0 | 0 |
meth |
CompressedTensorsHfQuantizer._process_model_after_weight_loading |
3 | 0 | 0 |
meth |
CompressedTensorsHfQuantizer.update_tp_plan |
2 | 0 | 0 |
prop |
CompressedTensorsHfQuantizer.is_trainable |
1 | 0 | 0 |
attr |
CompressedTensorsHfQuantizer.compressor |
1 | 0 | 0 |
attr |
CompressedTensorsHfQuantizer.run_compressed |
1 | 0 | 0 |
attr |
CompressedTensorsHfQuantizer.quantization_config |
1 | 0 | 0 |
transformers.quantizers.quantizer_eetq (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EetqHfQuantizer.init |
3 | 0 | 0 |
meth |
EetqHfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
EetqHfQuantizer.param_needs_quantization |
4 | 3 | 0 |
meth |
EetqHfQuantizer._process_model_before_weight_loading |
3 | 1 | 0 |
meth |
EetqHfQuantizer.is_serializable |
1 | 0 | 0 |
meth |
EetqHfQuantizer.get_quantize_ops |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.quantizers.quantizer_fbgemm_fp8 (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
FbgemmFp8HfQuantizer.init |
3 | 0 | 0 |
meth |
FbgemmFp8HfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
FbgemmFp8HfQuantizer.param_needs_quantization |
4 | 3 | 0 |
meth |
FbgemmFp8HfQuantizer._process_model_before_weight_loading |
3 | 1 | 0 |
meth |
FbgemmFp8HfQuantizer._process_model_after_weight_loading |
3 | 0 | 0 |
meth |
FbgemmFp8HfQuantizer.update_tp_plan |
2 | 0 | 0 |
meth |
FbgemmFp8HfQuantizer.is_serializable |
1 | 0 | 0 |
meth |
FbgemmFp8HfQuantizer.get_quantize_ops |
1 | 0 | 0 |
transformers.quantizers.quantizer_finegrained_fp8 (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
FineGrainedFP8HfQuantizer.init |
3 | 0 | 0 |
meth |
FineGrainedFP8HfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
FineGrainedFP8HfQuantizer.param_needs_quantization |
4 | 3 | 0 |
meth |
FineGrainedFP8HfQuantizer._process_model_before_weight_loading |
3 | 1 | 0 |
meth |
FineGrainedFP8HfQuantizer.update_tp_plan |
2 | 0 | 0 |
meth |
FineGrainedFP8HfQuantizer.is_serializable |
1 | 0 | 0 |
meth |
FineGrainedFP8HfQuantizer.get_quantize_ops |
1 | 0 | 0 |
meth |
FineGrainedFP8HfQuantizer.get_weight_conversions |
1 | 0 | 0 |
transformers.quantizers.quantizer_fouroversix (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FourOverSixHfQuantizer.init |
3 | 0 | 0 |
meth |
FourOverSixHfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
FourOverSixHfQuantizer.param_needs_quantization |
4 | 3 | 0 |
meth |
FourOverSixHfQuantizer._process_model_before_weight_loading |
4 | 1 | 0 |
meth |
FourOverSixHfQuantizer._process_model_after_weight_loading |
3 | 1 | 0 |
meth |
FourOverSixHfQuantizer.is_serializable |
1 | 0 | 0 |
meth |
FourOverSixHfQuantizer.get_quantize_ops |
1 | 0 | 0 |
transformers.quantizers.quantizer_fp_quant (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
FPQuantHfQuantizer.init |
3 | 1 | 0 |
meth |
FPQuantHfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
FPQuantHfQuantizer.param_needs_quantization |
4 | 3 | 0 |
meth |
FPQuantHfQuantizer._process_model_before_weight_loading |
3 | 1 | 0 |
meth |
FPQuantHfQuantizer.is_serializable |
1 | 0 | 0 |
meth |
FPQuantHfQuantizer.get_quantize_ops |
1 | 0 | 0 |
meth |
FPQuantHfQuantizer.get_weight_conversions |
1 | 0 | 0 |
prop |
FPQuantHfQuantizer.is_trainable |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.quantizers.quantizer_gptq (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
GptqHfQuantizer.init |
3 | 1 | 0 |
meth |
GptqHfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
GptqHfQuantizer.update_device_map |
2 | 0 | 0 |
meth |
GptqHfQuantizer._process_model_before_weight_loading |
3 | 1 | 0 |
meth |
GptqHfQuantizer._process_model_after_weight_loading |
3 | 1 | 0 |
meth |
GptqHfQuantizer.is_serializable |
1 | 0 | 0 |
attr |
GptqHfQuantizer.optimum_quantizer |
1 | 0 | 0 |
transformers.quantizers.quantizer_higgs (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
HiggsHfQuantizer.init |
3 | 1 | 0 |
meth |
HiggsHfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
HiggsHfQuantizer._process_model_before_weight_loading |
3 | 1 | 0 |
meth |
HiggsHfQuantizer._process_model_after_weight_loading |
3 | 1 | 0 |
meth |
HiggsHfQuantizer.is_serializable |
1 | 0 | 0 |
meth |
HiggsHfQuantizer.param_needs_quantization |
4 | 3 | 0 |
meth |
HiggsHfQuantizer._dequantize |
2 | 0 | 0 |
transformers.quantizers.quantizer_hqq (19 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
HqqHfQuantizer.init |
3 | 0 | 0 |
meth |
HqqHfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
HqqHfQuantizer.param_needs_quantization |
4 | 3 | 0 |
meth |
HqqHfQuantizer._patch_layer_for_multigpu |
2 | 0 | 0 |
meth |
HqqHfQuantizer._process_model_before_weight_loading |
3 | 1 | 0 |
meth |
HqqHfQuantizer._process_model_after_weight_loading |
3 | 1 | 0 |
meth |
HqqHfQuantizer.is_serializable |
1 | 0 | 0 |
attr |
HqqHfQuantizer.dtype |
1 | 0 | 0 |
attr |
HqqHfQuantizer.using_multi_gpu |
1 | 0 | 0 |
attr |
HqqHfQuantizer.hqq_keys |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
prop |
weight |
1 | 0 | 0 |
transformers.quantizers.quantizer_metal (13 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MetalHfQuantizer.init |
3 | 0 | 0 |
meth |
MetalHfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
MetalHfQuantizer.param_needs_quantization |
4 | 3 | 0 |
meth |
MetalHfQuantizer._process_model_before_weight_loading |
3 | 1 | 0 |
meth |
MetalHfQuantizer.is_serializable |
1 | 0 | 0 |
meth |
MetalHfQuantizer.get_quantize_ops |
1 | 0 | 0 |
meth |
MetalHfQuantizer.get_weight_conversions |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.quantizers.quantizer_mxfp4 (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Mxfp4HfQuantizer.init |
3 | 0 | 0 |
meth |
Mxfp4HfQuantizer._lazy_import_kernels |
1 | 0 | 0 |
meth |
Mxfp4HfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
Mxfp4HfQuantizer.param_needs_quantization |
4 | 3 | 0 |
meth |
Mxfp4HfQuantizer._process_model_after_weight_loading |
3 | 1 | 0 |
meth |
Mxfp4HfQuantizer._process_model_before_weight_loading |
4 | 2 | 0 |
meth |
Mxfp4HfQuantizer.update_tp_plan |
2 | 0 | 0 |
meth |
Mxfp4HfQuantizer.update_ep_plan |
2 | 0 | 0 |
meth |
Mxfp4HfQuantizer.get_state_dict_and_metadata |
2 | 0 | 0 |
meth |
Mxfp4HfQuantizer.is_serializable |
1 | 0 | 0 |
meth |
Mxfp4HfQuantizer.get_quantize_ops |
1 | 0 | 0 |
meth |
Mxfp4HfQuantizer.get_weight_conversions |
1 | 0 | 0 |
attr |
Mxfp4HfQuantizer.triton_kernels_hub |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.quantizers.quantizer_quanto (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
QuantoHfQuantizer.init |
3 | 1 | 0 |
meth |
QuantoHfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
QuantoHfQuantizer.param_needs_quantization |
4 | 3 | 0 |
meth |
QuantoHfQuantizer._process_model_before_weight_loading |
3 | 1 | 0 |
meth |
QuantoHfQuantizer.is_serializable |
1 | 0 | 0 |
meth |
QuantoHfQuantizer.get_quantize_ops |
1 | 0 | 0 |
attr |
QuantoHfQuantizer.quantized_param_size |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.quantizers.quantizer_quark (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
QuarkHfQuantizer.init |
3 | 0 | 0 |
meth |
QuarkHfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
QuarkHfQuantizer._process_model_before_weight_loading |
3 | 1 | 0 |
meth |
QuarkHfQuantizer.param_needs_quantization |
4 | 3 | 0 |
meth |
QuarkHfQuantizer.is_serializable |
1 | 0 | 0 |
meth |
QuarkHfQuantizer.get_weight_conversions |
1 | 0 | 0 |
prop |
QuarkHfQuantizer.is_trainable |
1 | 0 | 0 |
attr |
QuarkHfQuantizer.json_export_config |
1 | 0 | 0 |
transformers.quantizers.quantizer_sinq (15 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SinqHfQuantizer.init |
3 | 1 | 0 |
meth |
SinqHfQuantizer.update_device_map |
2 | 0 | 0 |
meth |
SinqHfQuantizer.validate_environment |
3 | 1 | 0 |
meth |
SinqHfQuantizer.param_needs_quantization |
4 | 3 | 0 |
meth |
SinqHfQuantizer.get_quantize_ops |
1 | 0 | 0 |
meth |
SinqHfQuantizer.get_weight_conversions |
1 | 0 | 0 |
meth |
SinqHfQuantizer._process_model_before_weight_loading |
5 | 2 | 0 |
meth |
SinqHfQuantizer._process_model_after_weight_loading |
3 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.quantizers.quantizer_spqr (10 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
SpQRHfQuantizer.init |
3 | 1 | 0 |
meth |
SpQRHfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
SpQRHfQuantizer._process_model_before_weight_loading |
3 | 1 | 0 |
meth |
SpQRHfQuantizer.is_serializable |
1 | 0 | 0 |
prop |
SpQRHfQuantizer.is_trainable |
1 | 0 | 0 |
transformers.quantizers.quantizer_torchao (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
logger |
1 | 0 | 0 |
meth |
TorchAoHfQuantizer.init |
3 | 0 | 0 |
meth |
TorchAoHfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
TorchAoHfQuantizer.update_dtype |
2 | 0 | 0 |
meth |
TorchAoHfQuantizer.get_state_dict_and_metadata |
2 | 0 | 0 |
meth |
TorchAoHfQuantizer._process_model_before_weight_loading |
4 | 1 | 0 |
meth |
TorchAoHfQuantizer.param_needs_quantization |
4 | 3 | 0 |
meth |
TorchAoHfQuantizer._process_model_after_weight_loading |
3 | 0 | 0 |
meth |
TorchAoHfQuantizer.set_metadata |
2 | 1 | 0 |
meth |
TorchAoHfQuantizer.get_quantize_ops |
1 | 0 | 0 |
meth |
TorchAoHfQuantizer.get_weight_conversions |
1 | 0 | 0 |
attr |
TorchAoHfQuantizer.quantized_param_size |
1 | 0 | 0 |
attr |
TORCHAO_VERSION |
1 | 0 | 0 |
transformers.quantizers.quantizer_vptq (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
VptqHfQuantizer.init |
3 | 1 | 0 |
meth |
VptqHfQuantizer.validate_environment |
3 | 0 | 0 |
meth |
VptqHfQuantizer._process_model_before_weight_loading |
3 | 1 | 0 |
meth |
VptqHfQuantizer.is_serializable |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.quantizers.quantizers_utils (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_module_from_name |
3 | 2 | 0 |
func |
should_convert_module |
3 | 1 | 0 |
transformers.safetensors_conversion (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_conversion_pr_reference |
4 | 2 | 0 |
func |
spawn_conversion |
4 | 3 | 0 |
func |
auto_conversion |
6 | 4 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.testing_utils (409 missing, 3 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
require_numba |
2 | 0 | 0 |
func |
parse_flag_from_env |
3 | 0 | 0 |
func |
pytest_addoption_shared |
2 | 0 | 0 |
func |
get_torch_dist_unique_port |
1 | 0 | 0 |
func |
require_torch_xpu |
2 | 0 | 0 |
attr |
IS_XPU_SYSTEM |
1 | 0 | 0 |
func |
require_torch_mps |
2 | 0 | 0 |
func |
require_torchaudio |
2 | 0 | 0 |
func |
is_staging_test |
2 | 0 | 0 |
func |
require_cv2 |
2 | 0 | 0 |
func |
require_optimum |
2 | 0 | 0 |
func |
require_flash_attn_3 |
2 | 0 | 0 |
meth |
CaptureLogger.init |
2 | 0 | 0 |
meth |
CaptureLogger.enter |
1 | 0 | 0 |
meth |
CaptureLogger.exit |
2 | 0 | 0 |
meth |
CaptureLogger.repr |
1 | 0 | 0 |
attr |
CaptureLogger.logger |
1 | 0 | 0 |
attr |
CaptureLogger.io |
1 | 0 | 0 |
attr |
CaptureLogger.sh |
1 | 0 | 0 |
attr |
CaptureLogger.out |
1 | 0 | 0 |
func |
backend_reset_max_memory_allocated |
2 | 1 | 0 |
func |
require_fouroversix |
2 | 0 | 0 |
func |
require_deepspeed |
2 | 0 | 0 |
func |
require_cython |
2 | 0 | 0 |
func |
assert_screenout |
3 | 0 | 0 |
func |
get_gpu_count |
1 | 0 | 0 |
func |
require_torchao_version_greater_or_equal |
2 | 0 | 0 |
func |
require_torch_large_accelerator |
3 | 1 | 0 |
func |
cleanup |
3 | 1 | 0 |
func |
execute_subprocess_async |
7 | 1 | 0 |
func |
require_accelerate |
3 | 1 | 0 |
meth |
Expectations.get_expectation |
1 | 1 | 1 |
meth |
Expectations.find_expectation |
2 | 2 | 1 |
meth |
Expectations.repr |
1 | 0 | 0 |
func |
nested_simplify |
3 | 0 | 0 |
func |
preprocess_string |
3 | 0 | 0 |
func |
require_torch_multi_hpu |
2 | 0 | 0 |
attr |
device_spec_module |
1 | 0 | 0 |
func |
cmd_exists |
2 | 0 | 0 |
meth |
CaptureStd.init |
4 | 0 | 0 |
meth |
CaptureStd.enter |
1 | 0 | 0 |
meth |
CaptureStd.exit |
2 | 0 | 0 |
meth |
CaptureStd.repr |
1 | 0 | 0 |
attr |
CaptureStd.replay |
1 | 0 | 0 |
attr |
CaptureStd.out_buf |
1 | 0 | 0 |
attr |
CaptureStd.out |
1 | 0 | 0 |
attr |
CaptureStd.err_buf |
1 | 0 | 0 |
attr |
CaptureStd.err |
1 | 0 | 0 |
func |
require_spqr |
2 | 0 | 0 |
func |
require_nltk |
2 | 0 | 0 |
func |
update_mapping_from_spec |
3 | 2 | 0 |
func |
require_torch_tf32 |
2 | 0 | 0 |
func |
require_fbgemm_gpu |
2 | 0 | 0 |
func |
require_torch_bf16 |
2 | 0 | 0 |
func |
require_sudachi_projection |
2 | 0 | 0 |
func |
require_huggingface_hub_greater_or_equal |
2 | 1 | 0 |
func |
require_sudachi |
2 | 0 | 0 |
func |
require_jumanpp |
2 | 0 | 0 |
func |
to_2tuple |
2 | 0 | 0 |
func |
run_test_in_subprocess |
5 | 0 | 0 |
func |
require_non_hpu |
2 | 0 | 0 |
func |
require_torch_greater_or_equal |
2 | 1 | 0 |
meth |
RequestCounter.enter |
1 | 0 | 0 |
meth |
RequestCounter.exit |
3 | 1 | 0 |
func |
require_jinja |
2 | 0 | 0 |
func |
require_torch_multi_gpu |
2 | 0 | 0 |
func |
tooslow |
2 | 0 | 0 |
func |
require_clearml |
2 | 0 | 0 |
func |
require_apex |
2 | 0 | 0 |
func |
backend_torch_accelerator_module |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
require_tensorboard |
2 | 0 | 0 |
func |
hub_retry |
3 | 2 | 0 |
func |
require_optimum_quanto |
2 | 0 | 0 |
func |
require_scipy |
2 | 0 | 0 |
attr |
IS_NPU_SYSTEM |
1 | 0 | 0 |
func |
require_detectron2 |
2 | 0 | 0 |
func |
require_compressed_tensors |
2 | 0 | 0 |
attr |
device_spec_dir |
1 | 0 | 0 |
meth |
CaptureStdout.init |
2 | 0 | 0 |
func |
require_optuna |
2 | 0 | 0 |
func |
get_steps_per_epoch |
2 | 2 | 1 |
func |
require_liger_kernel |
2 | 0 | 0 |
func |
run_test_using_subprocess |
2 | 0 | 0 |
func |
require_pytorch_quantization |
2 | 0 | 0 |
func |
is_training_test |
2 | 0 | 0 |
func |
require_decord |
2 | 0 | 0 |
func |
require_auto_round |
2 | 0 | 0 |
func |
require_peft |
2 | 0 | 0 |
func |
require_natten |
2 | 0 | 0 |
func |
require_torch_non_multi_gpu |
2 | 0 | 0 |
func |
require_trackio |
2 | 0 | 0 |
func |
require_schedulefree |
2 | 0 | 0 |
func |
require_onnx |
2 | 0 | 0 |
func |
require_torch_gpu |
2 | 0 | 0 |
func |
require_large_cpu_ram |
3 | 1 | 0 |
func |
require_librosa |
2 | 0 | 0 |
func |
require_spacy |
2 | 0 | 0 |
func |
require_torch_xla |
2 | 0 | 0 |
func |
require_torch_npu |
2 | 0 | 0 |
func |
require_phonemizer |
2 | 0 | 0 |
func |
check_json_file_has_correct_format |
2 | 0 | 0 |
func |
set_model_for_less_flaky_test |
2 | 0 | 0 |
func |
require_torch_accelerator |
2 | 0 | 0 |
func |
backend_synchronize |
2 | 1 | 0 |
func |
require_non_xpu |
2 | 0 | 0 |
func |
require_openai |
2 | 0 | 0 |
func |
require_fsdp |
3 | 1 | 0 |
func |
require_torch_multi_accelerator |
2 | 0 | 0 |
func |
apply_skip_if_not_implemented |
2 | 0 | 0 |
func |
require_quark |
2 | 0 | 0 |
func |
require_torchvision |
2 | 0 | 0 |
func |
require_av |
2 | 0 | 0 |
func |
require_usr_bin_time |
2 | 0 | 0 |
func |
backend_max_memory_allocated |
2 | 1 | 0 |
func |
require_galore_torch |
2 | 0 | 0 |
func |
get_tests_dir |
2 | 0 | 0 |
func |
require_apollo_torch |
2 | 0 | 0 |
func |
mockenv |
2 | 0 | 0 |
func |
require_tokenizers |
2 | 0 | 0 |
func |
require_torch_multi_npu |
2 | 0 | 0 |
func |
require_torch_non_multi_accelerator |
2 | 0 | 0 |
func |
convert_all_safetensors_to_bins |
2 | 1 | 0 |
meth |
HfDocTestParser.parse |
3 | 0 | 0 |
attr |
HfDocTestParser._EXAMPLE_RE |
1 | 0 | 0 |
func |
backend_empty_cache |
2 | 1 | 0 |
func |
backend_reset_peak_memory_stats |
2 | 1 | 0 |
func |
require_qutlass |
2 | 0 | 0 |
func |
pytest_xdist_worker_id |
1 | 0 | 0 |
func |
parse_int_from_env |
3 | 0 | 0 |
func |
torchrun |
5 | 4 | 0 |
meth |
CPUMemoryMonitor.init |
1 | 0 | 0 |
attr |
CPUMemoryMonitor.device_name |
1 | 0 | 0 |
attr |
CPUMemoryMonitor.total_memory |
1 | 0 | 0 |
attr |
CPUMemoryMonitor.total_memory_gib |
1 | 0 | 0 |
func |
require_timm |
2 | 0 | 0 |
func |
require_g2p_en |
2 | 0 | 0 |
func |
require_essentia |
2 | 0 | 0 |
func |
is_flaky |
4 | 3 | 0 |
func |
require_pandas |
2 | 0 | 0 |
func |
require_deterministic_for_xpu |
2 | 0 | 0 |
func |
require_gptqmodel |
2 | 0 | 0 |
func |
read_json_file |
2 | 0 | 0 |
func |
require_mistral_common |
2 | 0 | 0 |
func |
require_tiktoken |
2 | 0 | 0 |
func |
backend_device_count |
2 | 1 | 0 |
func |
backend_memory_allocated |
2 | 1 | 0 |
meth |
ColoredFormatter.init |
3 | 2 | 0 |
func |
require_triton |
2 | 1 | 0 |
func |
custom_tokenizers |
2 | 0 | 0 |
func |
require_fp_quant |
2 | 0 | 0 |
meth |
CaptureStderr.init |
2 | 0 | 0 |
func |
force_serialization_as_bin_files |
1 | 0 | 0 |
func |
require_torch_fp16 |
2 | 0 | 0 |
func |
slow |
2 | 0 | 0 |
func |
apply_print_resets |
2 | 0 | 0 |
func |
require_pretty_midi |
2 | 0 | 0 |
func |
write_file |
3 | 0 | 0 |
func |
require_sentencepiece |
2 | 0 | 0 |
meth |
TestCasePlus.setUp |
1 | 0 | 0 |
meth |
TestCasePlus.get_env |
1 | 0 | 0 |
meth |
TestCasePlus.get_auto_remove_tmp_dir |
5 | 0 | 0 |
meth |
TestCasePlus.python_one_liner_max_rss |
2 | 0 | 0 |
meth |
TestCasePlus.tearDown |
1 | 0 | 0 |
prop |
TestCasePlus.test_file_path |
1 | 0 | 0 |
prop |
TestCasePlus.test_file_path_str |
1 | 0 | 0 |
prop |
TestCasePlus.test_file_dir |
1 | 0 | 0 |
prop |
TestCasePlus.test_file_dir_str |
1 | 0 | 0 |
prop |
TestCasePlus.tests_dir |
1 | 0 | 0 |
prop |
TestCasePlus.tests_dir_str |
1 | 0 | 0 |
prop |
TestCasePlus.examples_dir |
1 | 0 | 0 |
prop |
TestCasePlus.examples_dir_str |
1 | 0 | 0 |
prop |
TestCasePlus.repo_root_dir |
1 | 0 | 0 |
prop |
TestCasePlus.repo_root_dir_str |
1 | 0 | 0 |
prop |
TestCasePlus.src_dir |
1 | 0 | 0 |
prop |
TestCasePlus.src_dir_str |
1 | 0 | 0 |
func |
is_agent_test |
2 | 0 | 0 |
func |
run_first |
2 | 0 | 0 |
func |
require_hqq |
2 | 0 | 0 |
func |
require_torchcodec |
2 | 0 | 0 |
func |
require_bs4 |
2 | 0 | 0 |
func |
require_grokadamw |
2 | 0 | 0 |
func |
require_torch_optimi |
2 | 0 | 0 |
func |
require_aqlm |
2 | 0 | 0 |
func |
require_vptq |
2 | 0 | 0 |
func |
require_speech |
2 | 0 | 0 |
func |
require_fp8 |
2 | 0 | 0 |
func |
require_torch |
2 | 0 | 0 |
func |
require_seqio |
2 | 0 | 0 |
func |
require_torchao |
2 | 0 | 0 |
func |
is_tensor_parallel_test |
2 | 0 | 0 |
func |
require_jmespath |
2 | 0 | 0 |
func |
require_sacremoses |
2 | 0 | 0 |
func |
require_faiss |
2 | 0 | 0 |
func |
require_flash_attn |
2 | 0 | 0 |
func |
pytest_terminal_summary_main |
3 | 0 | 0 |
func |
compare_pipeline_output_to_hub_spec |
3 | 0 | 0 |
func |
require_bitsandbytes |
2 | 0 | 0 |
func |
require_levenshtein |
2 | 0 | 0 |
meth |
TemporaryHubRepo.enter |
1 | 0 | 0 |
meth |
TemporaryHubRepo.exit |
4 | 0 | 0 |
attr |
TemporaryHubRepo.token |
1 | 0 | 0 |
attr |
TemporaryHubRepo.repo_url |
1 | 0 | 0 |
func |
require_wandb |
2 | 0 | 0 |
func |
require_lomo |
2 | 0 | 0 |
func |
require_torch_up_to_2_accelerators |
2 | 0 | 0 |
func |
require_swanlab |
2 | 0 | 0 |
func |
require_gguf |
3 | 1 | 0 |
func |
require_pyctcdecode |
2 | 0 | 0 |
func |
set_config_for_less_flaky_test |
2 | 0 | 0 |
func |
require_torch_up_to_2_gpus |
2 | 0 | 0 |
func |
patch_testing_methods_to_collect_info |
1 | 0 | 0 |
func |
require_torch_large_gpu |
3 | 1 | 0 |
func |
require_torch_neuroncore |
2 | 0 | 0 |
func |
is_pipeline_test |
2 | 0 | 0 |
func |
skip_if_not_implemented |
2 | 0 | 0 |
func |
require_torch_tensorrt_fx |
2 | 0 | 0 |
func |
require_torch_multi_xpu |
2 | 0 | 0 |
func |
mockenv_context |
3 | 0 | 0 |
func |
run_command |
3 | 1 | 0 |
func |
require_rjieba |
2 | 0 | 0 |
func |
LoggingLevel |
2 | 0 | 0 |
func |
require_ray |
2 | 0 | 0 |
func |
require_pytesseract |
2 | 0 | 0 |
func |
patch_torch_compile_force_graph |
1 | 0 | 0 |
func |
require_kernels |
2 | 0 | 0 |
func |
backend_manual_seed |
3 | 2 | 0 |
func |
require_flute_hadamard |
2 | 0 | 0 |
func |
require_vision |
2 | 0 | 0 |
transformers.time_series_utils (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LambdaLayer.init |
2 | 0 | 0 |
meth |
LambdaLayer.forward |
3 | 0 | 0 |
attr |
LambdaLayer.function |
1 | 0 | 0 |
meth |
NormalOutput.domain_map |
3 | 2 | 0 |
meth |
StudentTOutput.domain_map |
4 | 3 | 0 |
meth |
AffineTransformed.init |
5 | 1 | 0 |
prop |
AffineTransformed.mean |
1 | 0 | 0 |
prop |
AffineTransformed.variance |
1 | 0 | 0 |
prop |
AffineTransformed.stddev |
1 | 0 | 0 |
attr |
AffineTransformed.scale |
1 | 0 | 0 |
attr |
AffineTransformed.loc |
1 | 0 | 0 |
meth |
NegativeBinomialOutput.domain_map |
3 | 2 | 0 |
meth |
NegativeBinomialOutput._base_distribution |
2 | 1 | 0 |
meth |
NegativeBinomialOutput.distribution |
4 | 3 | 0 |
meth |
ParameterProjection.init |
5 | 4 | 0 |
attr |
ParameterProjection.args_dim |
1 | 0 | 0 |
attr |
ParameterProjection.proj |
1 | 0 | 0 |
attr |
ParameterProjection.domain_map |
1 | 0 | 0 |
meth |
DistributionOutput._base_distribution |
2 | 0 | 0 |
meth |
DistributionOutput.distribution |
4 | 3 | 0 |
meth |
DistributionOutput.domain_map |
2 | 1 | 0 |
attr |
DistributionOutput.dim |
1 | 0 | 0 |
transformers.tokenization_mistral_common (24 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MistralCommonBackend.init |
9 | 7 | 0 |
meth |
MistralCommonBackend.len |
1 | 0 | 0 |
meth |
MistralCommonBackend.encode |
15 | 14 | 0 |
meth |
MistralCommonBackend._decode |
5 | 4 | 0 |
meth |
MistralCommonBackend.decode |
5 | 4 | 0 |
meth |
MistralCommonBackend.batch_decode |
5 | 4 | 0 |
meth |
MistralCommonBackend.tokenize |
5 | 4 | 0 |
meth |
MistralCommonBackend._encode_plus |
21 | 20 | 0 |
meth |
MistralCommonBackend.prepare_for_model |
21 | 20 | 0 |
meth |
MistralCommonBackend.truncate_sequences |
7 | 6 | 0 |
meth |
MistralCommonBackend.apply_chat_template |
12 | 11 | 0 |
meth |
MistralCommonBackend.call |
21 | 20 | 0 |
meth |
MistralCommonBackend.from_pretrained |
15 | 12 | 0 |
meth |
MistralCommonBackend.save_pretrained |
8 | 7 | 0 |
meth |
MistralCommonBackend.added_tokens_decoder |
1 | 0 | 0 |
meth |
MistralCommonBackend.add_special_tokens |
3 | 2 | 0 |
meth |
MistralCommonBackend.add_tokens |
3 | 2 | 0 |
meth |
MistralCommonBackend.convert_added_tokens |
4 | 3 | 0 |
meth |
MistralCommonBackend.save_chat_templates |
5 | 4 | 0 |
attr |
MistralCommonBackend.init_kwargs |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.tokenization_python (35 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PythonBackend.init |
2 | 0 | 0 |
meth |
PythonBackend.len |
1 | 0 | 0 |
meth |
PythonBackend._update_total_vocab_size |
1 | 0 | 0 |
meth |
PythonBackend._update_trie |
2 | 1 | 0 |
meth |
PythonBackend.tokenize |
3 | 2 | 0 |
meth |
PythonBackend._tokenize |
3 | 0 | 0 |
meth |
PythonBackend._convert_token_to_id_with_added_voc |
2 | 0 | 0 |
meth |
PythonBackend._convert_token_to_id |
2 | 0 | 0 |
meth |
PythonBackend._encode_plus |
19 | 18 | 0 |
meth |
PythonBackend.prepare_for_tokenization |
4 | 3 | 0 |
meth |
PythonBackend._decode |
5 | 4 | 0 |
meth |
PythonBackend.prepare_for_model |
19 | 18 | 0 |
attr |
PythonBackend.tokens_trie |
1 | 0 | 0 |
attr |
PythonBackend.total_vocab_size |
1 | 0 | 0 |
attr |
PythonBackend.token_type_ids_pattern |
1 | 0 | 0 |
attr |
PythonBackend.token_type_ids_include_special_tokens |
1 | 0 | 0 |
attr |
PythonBackend.special_tokens_pattern |
1 | 0 | 0 |
meth |
ExtensionsTrie.init |
2 | 0 | 0 |
meth |
ExtensionsTrie.extensions |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
Trie.init |
2 | 0 | 0 |
meth |
Trie.update |
2 | 0 | 0 |
meth |
Trie.add |
2 | 1 | 0 |
meth |
Trie.cut_text |
3 | 0 | 0 |
attr |
Trie.data |
1 | 0 | 0 |
transformers.tokenization_utils_base (95 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PreTrainedTokenizerBase.init |
2 | 0 | 0 |
meth |
PreTrainedTokenizerBase._set_processor_class |
2 | 1 | 0 |
meth |
PreTrainedTokenizerBase.add_special_tokens |
3 | 2 | 0 |
meth |
PreTrainedTokenizerBase.setattr |
3 | 0 | 0 |
meth |
PreTrainedTokenizerBase.getattr |
2 | 0 | 0 |
meth |
PreTrainedTokenizerBase._set_model_specific_special_tokens |
2 | 1 | 0 |
meth |
PreTrainedTokenizerBase.from_pretrained |
10 | 6 | 0 |
meth |
PreTrainedTokenizerBase._from_pretrained |
12 | 0 | 0 |
meth |
PreTrainedTokenizerBase.convert_to_native_format |
2 | 0 | 0 |
meth |
PreTrainedTokenizerBase.convert_added_tokens |
4 | 1 | 0 |
meth |
PreTrainedTokenizerBase.save_pretrained |
6 | 5 | 0 |
meth |
PreTrainedTokenizerBase.tokenize |
5 | 4 | 0 |
meth |
PreTrainedTokenizerBase.encode |
11 | 10 | 0 |
meth |
PreTrainedTokenizerBase._get_padding_truncation_strategies |
7 | 0 | 0 |
meth |
PreTrainedTokenizerBase.call |
23 | 22 | 0 |
meth |
PreTrainedTokenizerBase._encode_plus |
21 | 20 | 0 |
meth |
PreTrainedTokenizerBase.decode |
4 | 3 | 0 |
meth |
PreTrainedTokenizerBase.batch_decode |
5 | 4 | 0 |
meth |
PreTrainedTokenizerBase._decode |
5 | 4 | 0 |
meth |
PreTrainedTokenizerBase._eventual_warn_about_too_long_sequence |
4 | 3 | 0 |
meth |
PreTrainedTokenizerBase.register_for_auto_class |
2 | 0 | 0 |
meth |
PreTrainedTokenizerBase.apply_chat_template |
16 | 15 | 0 |
meth |
PreTrainedTokenizerBase.encode_message_with_chat_template |
4 | 3 | 0 |
meth |
PreTrainedTokenizerBase.save_chat_templates |
5 | 4 | 0 |
meth |
PreTrainedTokenizerBase.parse_response |
3 | 2 | 0 |
prop |
PreTrainedTokenizerBase.max_len_single_sentence |
2 | 1 | 0 |
prop |
PreTrainedTokenizerBase.max_len_sentences_pair |
2 | 1 | 0 |
attr |
PreTrainedTokenizerBase.init_inputs |
1 | 0 | 0 |
attr |
PreTrainedTokenizerBase.init_kwargs |
1 | 0 | 0 |
attr |
PreTrainedTokenizerBase.name_or_path |
1 | 0 | 0 |
attr |
PreTrainedTokenizerBase.verbose |
1 | 0 | 0 |
attr |
PreTrainedTokenizerBase.model_max_length |
1 | 0 | 0 |
attr |
PreTrainedTokenizerBase.clean_up_tokenization_spaces |
1 | 0 | 0 |
attr |
PreTrainedTokenizerBase.split_special_tokens |
1 | 0 | 0 |
attr |
PreTrainedTokenizerBase.chat_template |
1 | 0 | 0 |
attr |
PreTrainedTokenizerBase.response_schema |
1 | 0 | 0 |
attr |
PreTrainedTokenizerBase.deprecation_warnings |
1 | 0 | 0 |
attr |
PreTrainedTokenizerBase.backend |
1 | 0 | 0 |
attr |
PreTrainedTokenizerBase.files_loaded |
1 | 0 | 0 |
meth |
AddedToken.init |
7 | 1 | 0 |
meth |
AddedToken.getstate |
1 | 0 | 0 |
meth |
AddedToken.str |
1 | 0 | 0 |
meth |
BatchEncoding.init |
6 | 5 | 0 |
meth |
BatchEncoding.getattr |
2 | 1 | 0 |
meth |
BatchEncoding.getstate |
1 | 0 | 0 |
meth |
BatchEncoding.setstate |
2 | 0 | 0 |
meth |
BatchEncoding.convert_to_tensors |
3 | 2 | 0 |
func |
find_sentencepiece_model_file |
3 | 0 | 0 |
attr |
LARGE_INTEGER |
1 | 0 | 0 |
func |
import_protobuf_decode_error |
2 | 0 | 0 |
func |
load_vocab_and_merges |
3 | 0 | 0 |
func |
generate_merges |
4 | 2 | 0 |
attr |
VERY_LARGE_INTEGER |
1 | 0 | 0 |
func |
flatten |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.tokenization_utils_sentencepiece (21 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SentencePieceBackend.init |
2 | 0 | 0 |
meth |
SentencePieceBackend.get_vocab |
1 | 0 | 0 |
meth |
SentencePieceBackend._update_trie |
2 | 1 | 0 |
meth |
SentencePieceBackend._tokenize |
3 | 0 | 0 |
meth |
SentencePieceBackend._convert_token_to_id |
2 | 0 | 0 |
meth |
SentencePieceBackend._convert_id_to_token |
2 | 0 | 0 |
meth |
SentencePieceBackend._decode |
6 | 5 | 0 |
attr |
SentencePieceBackend.vocab_file |
1 | 0 | 0 |
attr |
SentencePieceBackend.legacy |
1 | 0 | 0 |
attr |
SentencePieceBackend.sp_model_kwargs |
1 | 0 | 0 |
attr |
SentencePieceBackend.sp_model |
1 | 0 | 0 |
attr |
SentencePieceBackend.total_vocab_size |
1 | 0 | 0 |
meth |
SentencePieceExtractor.init |
2 | 1 | 0 |
meth |
SentencePieceExtractor.extract |
2 | 1 | 0 |
attr |
SentencePieceExtractor.sp |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.tokenization_utils_tokenizers (38 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TokenizersBackend.convert_to_native_format |
3 | 0 | 0 |
meth |
TokenizersBackend.init |
3 | 0 | 0 |
meth |
TokenizersBackend.update_post_processor |
1 | 0 | 0 |
meth |
TokenizersBackend._post_init |
1 | 0 | 0 |
meth |
TokenizersBackend._add_tokens |
3 | 2 | 0 |
meth |
TokenizersBackend.tokenize |
5 | 4 | 0 |
meth |
TokenizersBackend.set_truncation_and_padding |
7 | 6 | 0 |
meth |
TokenizersBackend._encode_plus |
21 | 20 | 0 |
meth |
TokenizersBackend._decode |
5 | 4 | 0 |
meth |
TokenizersBackend.train_new_from_iterator |
7 | 0 | 0 |
meth |
TokenizersBackend._patch_mistral_regex |
11 | 0 | 0 |
prop |
TokenizersBackend.add_eos_token |
2 | 0 | 0 |
prop |
TokenizersBackend.add_bos_token |
2 | 0 | 0 |
attr |
TokenizersBackend.add_prefix_space |
1 | 0 | 0 |
attr |
TokenizersBackend.vocab_file |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.trainer (72 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Trainer.init |
14 | 13 | 0 |
meth |
Trainer._build_accelerator_args |
2 | 1 | 0 |
meth |
Trainer.create_optimizer |
2 | 1 | 0 |
meth |
Trainer._init_training_state |
6 | 1 | 0 |
meth |
Trainer._prepare_for_training |
4 | 0 | 0 |
meth |
Trainer._run_epoch |
12 | 0 | 0 |
meth |
Trainer._finalize_training |
4 | 0 | 0 |
meth |
Trainer._update_auto_batch_size |
2 | 0 | 0 |
meth |
Trainer._track_num_input_tokens |
2 | 0 | 0 |
meth |
Trainer._clip_grad_norm |
2 | 0 | 0 |
meth |
Trainer._get_grad_norm |
3 | 0 | 0 |
meth |
Trainer._issue_warnings_after_load |
2 | 2 | 1 |
meth |
Trainer.push_to_hub |
6 | 5 | 0 |
meth |
Trainer.hyperparameter_search |
8 | 7 | 0 |
attr |
Trainer.args |
1 | 0 | 0 |
attr |
Trainer.deepspeed |
1 | 0 | 0 |
attr |
Trainer.model |
1 | 0 | 0 |
attr |
Trainer.is_model_parallel |
1 | 0 | 0 |
attr |
Trainer.is_fsdp_xla_enabled |
1 | 0 | 0 |
attr |
Trainer.model_wrapped |
1 | 0 | 0 |
attr |
Trainer.label_names |
1 | 0 | 0 |
attr |
Trainer.can_return_loss |
1 | 0 | 0 |
attr |
Trainer.data_collator |
1 | 0 | 0 |
attr |
Trainer.train_dataset |
1 | 0 | 0 |
attr |
Trainer.eval_dataset |
1 | 0 | 0 |
attr |
Trainer.processing_class |
1 | 0 | 0 |
attr |
Trainer.neftune_noise_alpha |
1 | 0 | 0 |
attr |
Trainer.compute_loss_func |
1 | 0 | 0 |
attr |
Trainer.compute_metrics |
1 | 0 | 0 |
attr |
Trainer.preprocess_logits_for_metrics |
1 | 0 | 0 |
attr |
Trainer.optimizer_cls_and_kwargs |
1 | 0 | 0 |
attr |
Trainer.callback_handler |
1 | 0 | 0 |
attr |
Trainer.hub_model_id |
1 | 0 | 0 |
attr |
Trainer.control |
1 | 0 | 0 |
attr |
Trainer.state |
1 | 0 | 0 |
attr |
Trainer.is_in_train |
1 | 0 | 0 |
attr |
Trainer.hp_name |
1 | 0 | 0 |
attr |
Trainer.hp_search_backend |
1 | 0 | 0 |
attr |
Trainer.current_flos |
1 | 0 | 0 |
attr |
Trainer.is_fsdp_xla_v2_enabled |
1 | 0 | 0 |
attr |
Trainer.is_fsdp_xla_v1_enabled |
1 | 0 | 0 |
attr |
Trainer.place_model_on_device |
1 | 0 | 0 |
attr |
Trainer.model_accepts_loss_kwargs |
1 | 0 | 0 |
attr |
Trainer.model_init |
1 | 0 | 0 |
attr |
Trainer.label_smoother |
1 | 0 | 0 |
attr |
IS_XLA_FSDPV2_POST_2_2 |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.trainer_callback (167 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PrinterCallback.on_log |
6 | 0 | 0 |
meth |
EarlyStoppingCallback.init |
3 | 2 | 0 |
meth |
EarlyStoppingCallback.check_metric_value |
5 | 0 | 0 |
meth |
EarlyStoppingCallback.on_train_begin |
5 | 0 | 0 |
meth |
EarlyStoppingCallback.on_evaluate |
6 | 0 | 0 |
attr |
EarlyStoppingCallback.early_stopping_patience |
1 | 0 | 0 |
attr |
EarlyStoppingCallback.early_stopping_threshold |
1 | 0 | 0 |
attr |
EarlyStoppingCallback.early_stopping_patience_counter |
1 | 0 | 0 |
meth |
TrainerCallback.on_init_end |
5 | 3 | 0 |
meth |
TrainerCallback.on_train_begin |
5 | 3 | 0 |
meth |
TrainerCallback.on_train_end |
5 | 3 | 0 |
meth |
TrainerCallback.on_epoch_begin |
5 | 3 | 0 |
meth |
TrainerCallback.on_epoch_end |
5 | 3 | 0 |
meth |
TrainerCallback.on_step_begin |
5 | 3 | 0 |
meth |
TrainerCallback.on_pre_optimizer_step |
5 | 3 | 0 |
meth |
TrainerCallback.on_optimizer_step |
5 | 3 | 0 |
meth |
TrainerCallback.on_substep_end |
5 | 3 | 0 |
meth |
TrainerCallback.on_step_end |
5 | 3 | 0 |
meth |
TrainerCallback.on_evaluate |
5 | 3 | 0 |
meth |
TrainerCallback.on_predict |
6 | 3 | 0 |
meth |
TrainerCallback.on_save |
5 | 3 | 0 |
meth |
TrainerCallback.on_log |
5 | 3 | 0 |
meth |
TrainerCallback.on_prediction_step |
5 | 3 | 0 |
meth |
TrainerCallback.on_push_begin |
5 | 3 | 0 |
meth |
DefaultFlowCallback.on_step_end |
5 | 3 | 0 |
meth |
DefaultFlowCallback.on_epoch_end |
5 | 3 | 0 |
meth |
TrainerControl._new_training |
1 | 0 | 0 |
meth |
TrainerControl._new_epoch |
1 | 0 | 0 |
meth |
TrainerControl._new_step |
1 | 0 | 0 |
meth |
TrainerState.post_init |
1 | 0 | 0 |
meth |
TrainerState.save_to_json |
2 | 1 | 0 |
meth |
TrainerState.load_from_json |
2 | 1 | 0 |
meth |
TrainerState.compute_steps |
3 | 0 | 0 |
meth |
TrainerState.init_training_references |
5 | 0 | 0 |
meth |
ProgressCallback.init |
2 | 1 | 0 |
meth |
ProgressCallback.on_train_begin |
5 | 0 | 0 |
meth |
ProgressCallback.on_step_end |
5 | 0 | 0 |
meth |
ProgressCallback.on_prediction_step |
6 | 0 | 0 |
meth |
ProgressCallback.on_evaluate |
5 | 0 | 0 |
meth |
ProgressCallback.on_predict |
5 | 0 | 0 |
meth |
ProgressCallback.on_log |
6 | 0 | 0 |
meth |
ProgressCallback.on_train_end |
5 | 0 | 0 |
attr |
ProgressCallback.training_bar |
1 | 0 | 0 |
attr |
ProgressCallback.prediction_bar |
1 | 0 | 0 |
attr |
ProgressCallback.max_str_len |
1 | 0 | 0 |
meth |
ExportableState.from_state |
2 | 0 | 0 |
meth |
CallbackHandler.init |
6 | 0 | 0 |
meth |
CallbackHandler.add_callback |
2 | 0 | 0 |
meth |
CallbackHandler.pop_callback |
2 | 0 | 0 |
meth |
CallbackHandler.remove_callback |
2 | 0 | 0 |
meth |
CallbackHandler.on_init_end |
4 | 3 | 0 |
meth |
CallbackHandler.on_train_begin |
4 | 3 | 0 |
meth |
CallbackHandler.on_train_end |
4 | 3 | 0 |
meth |
CallbackHandler.on_epoch_begin |
4 | 3 | 0 |
meth |
CallbackHandler.on_epoch_end |
4 | 3 | 0 |
meth |
CallbackHandler.on_step_begin |
4 | 3 | 0 |
meth |
CallbackHandler.on_pre_optimizer_step |
4 | 3 | 0 |
meth |
CallbackHandler.on_optimizer_step |
4 | 3 | 0 |
meth |
CallbackHandler.on_substep_end |
4 | 3 | 0 |
meth |
CallbackHandler.on_step_end |
4 | 3 | 0 |
meth |
CallbackHandler.on_evaluate |
5 | 3 | 0 |
meth |
CallbackHandler.on_predict |
5 | 3 | 0 |
meth |
CallbackHandler.on_save |
4 | 3 | 0 |
meth |
CallbackHandler.on_log |
5 | 3 | 0 |
meth |
CallbackHandler.on_prediction_step |
4 | 3 | 0 |
meth |
CallbackHandler.on_push_begin |
5 | 3 | 0 |
meth |
CallbackHandler.call_event |
6 | 0 | 0 |
prop |
CallbackHandler.callback_list |
1 | 0 | 0 |
attr |
CallbackHandler.callbacks |
1 | 0 | 0 |
attr |
CallbackHandler.model |
1 | 0 | 0 |
attr |
CallbackHandler.processing_class |
1 | 0 | 0 |
attr |
CallbackHandler.optimizer |
1 | 0 | 0 |
attr |
CallbackHandler.lr_scheduler |
1 | 0 | 0 |
attr |
CallbackHandler.train_dataloader |
1 | 0 | 0 |
attr |
CallbackHandler.eval_dataloader |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.trainer_jit_checkpoint (41 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
CheckpointManager.init |
3 | 1 | 0 |
meth |
CheckpointManager.setup_signal_handler |
1 | 0 | 0 |
meth |
CheckpointManager._sigterm_handler |
3 | 0 | 0 |
meth |
CheckpointManager._enable_checkpoint |
1 | 0 | 0 |
meth |
CheckpointManager.execute_jit_checkpoint |
1 | 0 | 0 |
attr |
CheckpointManager.trainer |
1 | 0 | 0 |
attr |
CheckpointManager.is_checkpoint_requested |
1 | 0 | 0 |
attr |
CheckpointManager.kill_wait |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
JITCheckpointCallback.init |
1 | 0 | 0 |
meth |
JITCheckpointCallback.set_trainer |
2 | 0 | 0 |
meth |
JITCheckpointCallback.on_pre_optimizer_step |
5 | 0 | 0 |
meth |
JITCheckpointCallback.on_step_begin |
5 | 0 | 0 |
meth |
JITCheckpointCallback.on_step_end |
5 | 0 | 0 |
meth |
JITCheckpointCallback.on_epoch_end |
5 | 0 | 0 |
meth |
JITCheckpointCallback.on_train_end |
5 | 0 | 0 |
attr |
JITCheckpointCallback.trainer |
1 | 0 | 0 |
transformers.trainer_optimizer (1 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
is_optimizer_factory |
2 | 2 | 1 |
attr |
logger |
1 | 0 | 0 |
transformers.trainer_pt_utils (171 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
torch_distributed_zero_first |
2 | 1 | 0 |
func |
get_model_param_count |
3 | 0 | 0 |
func |
torch_pad_and_concatenate |
4 | 0 | 0 |
func |
atleast_1d |
2 | 1 | 0 |
func |
numpy_pad_and_concatenate |
4 | 0 | 0 |
func |
nested_detach |
2 | 0 | 0 |
func |
save_state |
2 | 0 | 0 |
func |
log_metrics |
4 | 0 | 0 |
func |
nested_truncate |
3 | 0 | 0 |
meth |
LabelSmoother.call |
4 | 0 | 0 |
meth |
DistributedSamplerWithLoop.init |
4 | 0 | 0 |
meth |
DistributedSamplerWithLoop.iter |
1 | 0 | 0 |
attr |
DistributedSamplerWithLoop.batch_size |
1 | 0 | 0 |
func |
expand_like |
4 | 0 | 0 |
func |
nested_numpify |
2 | 0 | 0 |
func |
get_length_grouped_indices |
5 | 0 | 0 |
func |
get_num_trainable_parameters |
2 | 1 | 0 |
func |
smp_nested_concat |
2 | 0 | 0 |
func |
reissue_pt_warnings |
2 | 0 | 0 |
func |
get_optimizer_group |
3 | 1 | 0 |
func |
nested_new_like |
4 | 0 | 0 |
meth |
AcceleratorConfig.from_json_file |
2 | 0 | 0 |
meth |
AcceleratorConfig.to_dict |
1 | 0 | 0 |
meth |
AcceleratorConfig.pop |
3 | 0 | 0 |
func |
nested_gather |
4 | 0 | 0 |
meth |
IterableDatasetShard.init |
7 | 6 | 0 |
meth |
IterableDatasetShard.set_epoch |
2 | 0 | 0 |
meth |
IterableDatasetShard.iter |
1 | 0 | 0 |
meth |
IterableDatasetShard.len |
1 | 0 | 0 |
attr |
IterableDatasetShard.dataset |
1 | 0 | 0 |
attr |
IterableDatasetShard.batch_size |
1 | 0 | 0 |
attr |
IterableDatasetShard.drop_last |
1 | 0 | 0 |
attr |
IterableDatasetShard.num_processes |
1 | 0 | 0 |
attr |
IterableDatasetShard.process_index |
1 | 0 | 0 |
attr |
IterableDatasetShard.seed |
1 | 0 | 0 |
attr |
IterableDatasetShard.epoch |
1 | 0 | 0 |
attr |
IterableDatasetShard.num_examples |
1 | 0 | 0 |
func |
smp_gather |
2 | 0 | 0 |
func |
distributed_concat |
3 | 3 | 2 |
meth |
LayerWiseDummyScheduler.init |
3 | 0 | 0 |
meth |
LayerWiseDummyScheduler.get_lr |
1 | 0 | 0 |
meth |
LayerWiseDummyScheduler._get_closed_form_lr |
1 | 0 | 0 |
attr |
LayerWiseDummyScheduler.default_lr |
1 | 0 | 0 |
func |
is_attention_mask_causal |
2 | 0 | 0 |
meth |
DistributedLengthGroupedSampler.init |
9 | 8 | 0 |
attr |
DistributedLengthGroupedSampler.batch_size |
1 | 0 | 0 |
attr |
DistributedLengthGroupedSampler.num_replicas |
1 | 0 | 0 |
attr |
DistributedLengthGroupedSampler.rank |
1 | 0 | 0 |
attr |
DistributedLengthGroupedSampler.epoch |
1 | 0 | 0 |
attr |
DistributedLengthGroupedSampler.drop_last |
1 | 0 | 0 |
attr |
DistributedLengthGroupedSampler.lengths |
1 | 0 | 0 |
attr |
DistributedLengthGroupedSampler.total_size |
1 | 0 | 0 |
attr |
DistributedLengthGroupedSampler.seed |
1 | 0 | 0 |
attr |
DistributedLengthGroupedSampler.num_samples |
1 | 0 | 0 |
func |
nested_concat |
4 | 0 | 0 |
func |
save_metrics |
5 | 0 | 0 |
func |
set_rng_state_for_device |
5 | 0 | 0 |
func |
smp_forward_backward |
4 | 0 | 0 |
meth |
LengthGroupedSampler.init |
6 | 4 | 0 |
meth |
LengthGroupedSampler.len |
1 | 0 | 0 |
meth |
LengthGroupedSampler.iter |
1 | 0 | 0 |
attr |
LengthGroupedSampler.batch_size |
1 | 0 | 0 |
attr |
LengthGroupedSampler.lengths |
1 | 0 | 0 |
attr |
LengthGroupedSampler.generator |
1 | 0 | 0 |
func |
get_learning_rates |
2 | 1 | 0 |
func |
get_parameter_names |
4 | 0 | 0 |
func |
remove_dummy_checkpoint |
4 | 0 | 0 |
func |
get_dataloader_sampler |
2 | 0 | 0 |
func |
nested_xla_mesh_reduce |
3 | 0 | 0 |
func |
smp_forward_only |
3 | 0 | 0 |
meth |
EvalLoopContainer.init |
3 | 2 | 0 |
meth |
EvalLoopContainer.add |
2 | 1 | 0 |
meth |
EvalLoopContainer.get_arrays |
1 | 0 | 0 |
attr |
EvalLoopContainer.do_nested_concat |
1 | 0 | 0 |
attr |
EvalLoopContainer.padding_index |
1 | 0 | 0 |
attr |
EvalLoopContainer.tensors |
1 | 0 | 0 |
attr |
EvalLoopContainer.arrays |
1 | 0 | 0 |
func |
safe_globals |
1 | 0 | 0 |
meth |
LayerWiseDummyOptimizer.init |
3 | 0 | 0 |
meth |
LayerWiseDummyOptimizer.step |
2 | 1 | 0 |
attr |
LayerWiseDummyOptimizer.optimizer_dict |
1 | 0 | 0 |
func |
find_batch_size |
2 | 0 | 0 |
meth |
ShardSampler.init |
6 | 5 | 0 |
meth |
ShardSampler.iter |
1 | 0 | 0 |
meth |
ShardSampler.len |
1 | 0 | 0 |
attr |
ShardSampler.dataset |
1 | 0 | 0 |
attr |
ShardSampler.batch_size |
1 | 0 | 0 |
attr |
ShardSampler.drop_last |
1 | 0 | 0 |
attr |
ShardSampler.num_processes |
1 | 0 | 0 |
attr |
ShardSampler.process_index |
1 | 0 | 0 |
attr |
ShardSampler.total_batch_size |
1 | 0 | 0 |
attr |
ShardSampler.total_num_samples |
1 | 0 | 0 |
func |
get_module_class_from_name |
3 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
get_tpu_sampler |
3 | 2 | 0 |
transformers.trainer_seq2seq (8 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Seq2SeqTrainer.init |
13 | 12 | 0 |
meth |
Seq2SeqTrainer.evaluate |
5 | 4 | 0 |
meth |
Seq2SeqTrainer.predict |
5 | 4 | 0 |
meth |
Seq2SeqTrainer.prediction_step |
6 | 5 | 0 |
meth |
Seq2SeqTrainer._pad_tensors_to_max_len |
3 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.trainer_utils (82 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
EvalPrediction.init |
5 | 4 | 0 |
meth |
EvalPrediction.iter |
1 | 0 | 0 |
meth |
EvalPrediction.getitem |
2 | 0 | 0 |
attr |
EvalPrediction.predictions |
1 | 0 | 0 |
attr |
EvalPrediction.label_ids |
1 | 0 | 0 |
attr |
EvalPrediction.inputs |
1 | 0 | 0 |
attr |
EvalPrediction.losses |
1 | 0 | 0 |
attr |
EvalPrediction.elements |
1 | 0 | 0 |
func |
enable_full_determinism |
3 | 2 | 0 |
func |
set_seed |
3 | 2 | 0 |
func |
number_of_arguments |
2 | 0 | 0 |
func |
is_main_process |
2 | 0 | 0 |
func |
speed_metrics |
6 | 0 | 0 |
func |
check_target_module_exists |
4 | 2 | 0 |
func |
has_length |
2 | 2 | 1 |
meth |
RemoveColumnsCollator.init |
6 | 2 | 0 |
meth |
RemoveColumnsCollator.call |
2 | 1 | 0 |
attr |
RemoveColumnsCollator.data_collator |
1 | 0 | 0 |
attr |
RemoveColumnsCollator.signature_columns |
1 | 0 | 0 |
attr |
RemoveColumnsCollator.logger |
1 | 0 | 0 |
attr |
RemoveColumnsCollator.description |
1 | 0 | 0 |
attr |
RemoveColumnsCollator.model_name |
1 | 0 | 0 |
attr |
RemoveColumnsCollator.message_logged |
1 | 0 | 0 |
func |
default_hp_space_wandb |
2 | 1 | 0 |
func |
suppress_progress_bars |
1 | 0 | 0 |
func |
denumpify_detensorize |
2 | 0 | 0 |
func |
seed_worker |
4 | 3 | 0 |
func |
default_hp_space_ray |
2 | 1 | 0 |
func |
unwrap_peft_model |
2 | 0 | 0 |
func |
default_hp_space_optuna |
2 | 1 | 0 |
func |
find_executable_batch_size |
4 | 3 | 0 |
func |
load_sharded_checkpoint |
5 | 0 | 0 |
func |
validate_quantization_for_training |
2 | 0 | 0 |
func |
align_special_tokens |
3 | 0 | 0 |
func |
total_processes_number |
2 | 0 | 0 |
meth |
TrainerMemoryTracker.init |
2 | 0 | 0 |
meth |
TrainerMemoryTracker.derive_stage |
1 | 0 | 0 |
meth |
TrainerMemoryTracker.cpu_mem_used |
1 | 0 | 0 |
meth |
TrainerMemoryTracker.peak_monitor_func |
1 | 0 | 0 |
meth |
TrainerMemoryTracker.start |
1 | 0 | 0 |
meth |
TrainerMemoryTracker.stop |
2 | 0 | 0 |
meth |
TrainerMemoryTracker.update_metrics |
3 | 0 | 0 |
meth |
TrainerMemoryTracker.stop_and_update_metrics |
2 | 0 | 0 |
attr |
TrainerMemoryTracker.skip_memory_metrics |
1 | 0 | 0 |
attr |
TrainerMemoryTracker.process |
1 | 0 | 0 |
attr |
TrainerMemoryTracker.cur_stage |
1 | 0 | 0 |
attr |
TrainerMemoryTracker.cpu |
1 | 0 | 0 |
attr |
TrainerMemoryTracker.init_reported |
1 | 0 | 0 |
attr |
TrainerMemoryTracker.torch |
1 | 0 | 0 |
attr |
TrainerMemoryTracker.gpu |
1 | 0 | 0 |
func |
get_last_checkpoint |
2 | 0 | 0 |
func |
compare_trainer_and_checkpoint_args |
3 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.training_args (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TrainingArguments.post_init |
1 | 0 | 0 |
meth |
TrainingArguments._validate_args |
1 | 0 | 0 |
meth |
TrainingArguments.str |
1 | 0 | 0 |
meth |
TrainingArguments.repr |
1 | 0 | 0 |
meth |
TrainingArguments.get_process_log_level |
1 | 0 | 0 |
meth |
TrainingArguments.main_process_first |
3 | 0 | 0 |
meth |
TrainingArguments.get_warmup_steps |
2 | 1 | 0 |
meth |
TrainingArguments.to_dict |
1 | 0 | 0 |
meth |
TrainingArguments.to_json_string |
1 | 0 | 0 |
meth |
TrainingArguments.set_training |
9 | 8 | 0 |
meth |
TrainingArguments.set_evaluate |
7 | 6 | 0 |
meth |
TrainingArguments.set_testing |
3 | 2 | 0 |
meth |
TrainingArguments.set_save |
5 | 4 | 0 |
meth |
TrainingArguments.set_logging |
9 | 8 | 0 |
meth |
TrainingArguments.set_push_to_hub |
7 | 6 | 0 |
meth |
TrainingArguments.set_optimizer |
8 | 7 | 0 |
meth |
TrainingArguments.set_lr_scheduler |
6 | 5 | 0 |
meth |
TrainingArguments.set_dataloader |
11 | 10 | 0 |
meth |
TrainingArguments._process_fsdp_args |
1 | 0 | 0 |
prop |
TrainingArguments.n_gpu |
1 | 0 | 0 |
prop |
TrainingArguments.parallel_mode |
1 | 0 | 0 |
prop |
TrainingArguments.world_size |
1 | 0 | 0 |
prop |
TrainingArguments.process_index |
1 | 0 | 0 |
prop |
TrainingArguments.local_process_index |
1 | 0 | 0 |
prop |
TrainingArguments.should_log |
1 | 0 | 0 |
prop |
TrainingArguments.should_save |
1 | 0 | 0 |
prop |
TrainingArguments._no_sync_in_gradient_accumulation |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
str_to_bool |
3 | 2 | 0 |
attr |
trainer_log_levels |
1 | 0 | 0 |
attr |
log_levels |
1 | 0 | 0 |
transformers.training_args_seq2seq (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Seq2SeqTrainingArguments.to_dict |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.utils (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
check_min_version |
2 | 0 | 0 |
transformers.utils.attention_visualizer (18 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
generate_attention_matrix_from_mask |
7 | 0 | 0 |
meth |
AttentionMaskVisualizer.init |
2 | 1 | 0 |
meth |
AttentionMaskVisualizer.call |
3 | 1 | 0 |
meth |
AttentionMaskVisualizer.visualize_attention_mask |
3 | 1 | 0 |
attr |
AttentionMaskVisualizer.image_token |
1 | 0 | 0 |
attr |
AttentionMaskVisualizer.mapped_cls |
1 | 0 | 0 |
attr |
AttentionMaskVisualizer.model |
1 | 0 | 0 |
attr |
AttentionMaskVisualizer.repo_id |
1 | 0 | 0 |
attr |
AttentionMaskVisualizer.config |
1 | 0 | 0 |
attr |
AttentionMaskVisualizer.sliding_window |
1 | 0 | 0 |
transformers.utils.auto_docstring (43 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
auto_docstring |
5 | 0 | 0 |
func |
find_sig_line |
3 | 0 | 0 |
func |
parse_shape |
2 | 0 | 0 |
func |
auto_class_docstring |
5 | 0 | 0 |
func |
get_indent_level |
2 | 0 | 0 |
func |
auto_method_docstring |
7 | 0 | 0 |
attr |
PATH_TO_TRANSFORMERS |
1 | 0 | 0 |
func |
get_checkpoint_from_config_class |
2 | 0 | 0 |
func |
process_type_annotation |
3 | 2 | 0 |
func |
add_intro_docstring |
4 | 0 | 0 |
func |
get_model_name |
2 | 0 | 0 |
func |
parse_default |
2 | 0 | 0 |
func |
contains_type |
3 | 1 | 0 |
func |
generate_processor_intro |
2 | 1 | 0 |
func |
parse_docstring |
4 | 0 | 0 |
transformers.utils.chat_parsing_utils (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
recursive_parse |
3 | 2 | 0 |
transformers.utils.chat_template_utils (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Chat.init |
2 | 1 | 0 |
attr |
Chat.messages |
1 | 0 | 0 |
attr |
args_re |
1 | 0 | 0 |
attr |
BASIC_TYPES |
1 | 0 | 0 |
func |
render_jinja_template |
9 | 8 | 0 |
attr |
description_re |
1 | 0 | 0 |
func |
is_valid_message |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
attr |
args_split_re |
1 | 0 | 0 |
attr |
returns_re |
1 | 0 | 0 |
transformers.utils.deprecation (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
deprecate_kwarg |
8 | 7 | 0 |
transformers.utils.doc (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
add_start_docstrings |
2 | 0 | 0 |
func |
add_end_docstrings |
2 | 0 | 0 |
func |
replace_return_docstrings |
3 | 0 | 0 |
func |
get_docstring_indentation_level |
2 | 0 | 0 |
func |
add_start_docstrings_to_model_forward |
2 | 0 | 0 |
attr |
PIPELINE_TASKS_TO_SAMPLE_DOCSTRINGS |
1 | 0 | 0 |
attr |
MODELS_TO_PIPELINE |
1 | 0 | 0 |
func |
add_code_sample_docstrings |
15 | 0 | 0 |
func |
filter_outputs_from_example |
3 | 0 | 0 |
func |
copy_func |
2 | 0 | 0 |
transformers.utils.dummy_detectron2_objects (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
LayoutLMv2Model.init |
3 | 0 | 0 |
meth |
LayoutLMv2Model.from_pretrained |
3 | 0 | 0 |
transformers.utils.dummy_essentia_and_librosa_and_pretty_midi_and_scipy_and_torch_objects (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Pop2PianoProcessor.init |
3 | 0 | 0 |
meth |
Pop2PianoFeatureExtractor.init |
3 | 0 | 0 |
meth |
Pop2PianoTokenizer.init |
3 | 0 | 0 |
transformers.utils.dummy_mistral_common_objects (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MistralCommonBackend.init |
3 | 0 | 0 |
transformers.utils.dummy_music_objects (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Pop2PianoFeatureExtractor.init |
3 | 0 | 0 |
meth |
Pop2PianoTokenizer.init |
3 | 0 | 0 |
transformers.utils.dummy_pt_objects (243 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
SynthIDTextWatermarkLogitsProcessor.init |
3 | 0 | 0 |
func |
get_wsd_schedule |
3 | 0 | 0 |
meth |
EncoderDecoderCache.init |
3 | 0 | 0 |
func |
get_constant_schedule |
3 | 0 | 0 |
func |
get_linear_schedule_with_warmup |
3 | 0 | 0 |
func |
convert_and_export_with_cache |
3 | 0 | 0 |
func |
get_constant_schedule_with_warmup |
3 | 0 | 0 |
func |
get_polynomial_decay_schedule_with_warmup |
3 | 0 | 0 |
meth |
TopKLogitsWarper.init |
3 | 0 | 0 |
meth |
GlueDataTrainingArguments.init |
3 | 0 | 0 |
meth |
MinPLogitsWarper.init |
3 | 0 | 0 |
meth |
SuppressTokensAtBeginLogitsProcessor.init |
3 | 0 | 0 |
meth |
TopPLogitsWarper.init |
3 | 0 | 0 |
meth |
LogitsProcessor.init |
3 | 0 | 0 |
meth |
SequenceBiasLogitsProcessor.init |
3 | 0 | 0 |
meth |
WhisperTimeStampLogitsProcessor.init |
3 | 0 | 0 |
meth |
Trainer.init |
3 | 0 | 0 |
meth |
LogitsProcessorList.init |
3 | 0 | 0 |
meth |
DynamicCache.init |
3 | 0 | 0 |
meth |
PrefixConstrainedLogitsProcessor.init |
3 | 0 | 0 |
meth |
GenerationMixin.init |
3 | 0 | 0 |
meth |
GlueDataset.init |
3 | 0 | 0 |
meth |
MaxLengthCriteria.init |
3 | 0 | 0 |
meth |
MaxTimeCriteria.init |
3 | 0 | 0 |
meth |
DisjunctiveConstraint.init |
3 | 0 | 0 |
meth |
AttentionInterface.init |
3 | 0 | 0 |
meth |
StopStringCriteria.init |
3 | 0 | 0 |
meth |
ConstraintListState.init |
3 | 0 | 0 |
meth |
BeamScorer.init |
3 | 0 | 0 |
meth |
EosTokenCriteria.init |
3 | 0 | 0 |
meth |
GradientCheckpointingLayer.init |
3 | 0 | 0 |
meth |
StoppingCriteria.init |
3 | 0 | 0 |
meth |
NoBadWordsLogitsProcessor.init |
3 | 0 | 0 |
meth |
EtaLogitsWarper.init |
3 | 0 | 0 |
meth |
ForcedEOSTokenLogitsProcessor.init |
3 | 0 | 0 |
func |
get_cosine_with_hard_restarts_schedule_with_warmup |
3 | 0 | 0 |
meth |
ConstrainedBeamSearchScorer.init |
3 | 0 | 0 |
meth |
Cache.init |
3 | 0 | 0 |
meth |
BayesianDetectorModel.init |
3 | 0 | 0 |
meth |
ForcedBOSTokenLogitsProcessor.init |
3 | 0 | 0 |
meth |
StoppingCriteriaList.init |
3 | 0 | 0 |
func |
get_scheduler |
3 | 0 | 0 |
meth |
Constraint.init |
3 | 0 | 0 |
meth |
ExponentialDecayLengthPenalty.init |
3 | 0 | 0 |
meth |
WatermarkDetector.init |
3 | 0 | 0 |
meth |
UnbatchedClassifierFreeGuidanceLogitsProcessor.init |
3 | 0 | 0 |
meth |
SquadDataset.init |
3 | 0 | 0 |
meth |
MinNewTokensLengthLogitsProcessor.init |
3 | 0 | 0 |
meth |
Seq2SeqTrainer.init |
3 | 0 | 0 |
meth |
BayesianDetectorConfig.init |
3 | 0 | 0 |
meth |
AttentionMaskInterface.init |
3 | 0 | 0 |
meth |
StaticCache.init |
3 | 0 | 0 |
meth |
SynthIDTextWatermarkDetector.init |
3 | 0 | 0 |
meth |
PhrasalConstraint.init |
3 | 0 | 0 |
meth |
MinLengthLogitsProcessor.init |
3 | 0 | 0 |
meth |
EpsilonLogitsWarper.init |
3 | 0 | 0 |
meth |
Adafactor.init |
3 | 0 | 0 |
meth |
SynthIDTextWatermarkingConfig.init |
3 | 0 | 0 |
meth |
TemperatureLogitsWarper.init |
3 | 0 | 0 |
meth |
WatermarkLogitsProcessor.init |
3 | 0 | 0 |
func |
torch_distributed_zero_first |
3 | 0 | 0 |
meth |
ClassifierFreeGuidanceLogitsProcessor.init |
3 | 0 | 0 |
meth |
EncoderNoRepeatNGramLogitsProcessor.init |
3 | 0 | 0 |
meth |
LogitNormalization.init |
3 | 0 | 0 |
meth |
EncoderRepetitionPenaltyLogitsProcessor.init |
3 | 0 | 0 |
func |
get_inverse_sqrt_schedule |
3 | 0 | 0 |
meth |
QuantizedCache.init |
3 | 0 | 0 |
meth |
SquadDataTrainingArguments.init |
3 | 0 | 0 |
func |
model_addition_debugger_context |
3 | 0 | 0 |
func |
dynamic_rope_update |
3 | 0 | 0 |
meth |
RepetitionPenaltyLogitsProcessor.init |
3 | 0 | 0 |
func |
get_cosine_schedule_with_warmup |
3 | 0 | 0 |
meth |
NoRepeatNGramLogitsProcessor.init |
3 | 0 | 0 |
func |
apply_chunking_to_forward |
3 | 0 | 0 |
meth |
AlternatingCodebooksLogitsProcessor.init |
3 | 0 | 0 |
meth |
Conv1D.init |
3 | 0 | 0 |
meth |
TorchExportableModuleWithStaticCache.init |
3 | 0 | 0 |
meth |
SuppressTokensLogitsProcessor.init |
3 | 0 | 0 |
meth |
PreTrainedModel.init |
3 | 0 | 0 |
meth |
TypicalLogitsWarper.init |
3 | 0 | 0 |
meth |
InfNanRemoveLogitsProcessor.init |
3 | 0 | 0 |
transformers.utils.dummy_sentencepiece_and_tokenizers_objects (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
convert_slow_tokenizer |
3 | 0 | 0 |
transformers.utils.dummy_speech_objects (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
Speech2TextFeatureExtractor.init |
3 | 0 | 0 |
meth |
ASTFeatureExtractor.init |
3 | 0 | 0 |
transformers.utils.dummy_timm_and_torchvision_objects (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TimmWrapperImageProcessor.init |
3 | 0 | 0 |
transformers.utils.dummy_tokenizers_objects (3 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
PreTrainedTokenizerFast.init |
3 | 0 | 0 |
transformers.utils.dummy_torchaudio_objects (12 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
MusicgenMelodyFeatureExtractor.init |
3 | 0 | 0 |
meth |
GraniteSpeechFeatureExtractor.init |
3 | 0 | 0 |
meth |
GraniteSpeechProcessor.init |
3 | 0 | 0 |
meth |
MusicgenMelodyProcessor.init |
3 | 0 | 0 |
transformers.utils.dummy_torchvision_objects (6 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BaseVideoProcessor.init |
3 | 0 | 0 |
meth |
BaseImageProcessorFast.init |
3 | 0 | 0 |
transformers.utils.dummy_vision_objects (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
ImageProcessingMixin.init |
3 | 0 | 0 |
meth |
ImageFeatureExtractionMixin.init |
3 | 0 | 0 |
meth |
BaseImageProcessor.init |
3 | 0 | 0 |
transformers.utils.generic (91 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
expand_dims |
3 | 0 | 0 |
func |
torch_float |
2 | 0 | 0 |
func |
infer_framework_from_repr |
2 | 1 | 0 |
func |
is_numpy_array |
2 | 1 | 0 |
func |
is_torch_tensor |
2 | 1 | 0 |
func |
can_return_loss |
2 | 0 | 0 |
meth |
ContextManagers.init |
2 | 1 | 0 |
meth |
ContextManagers.enter |
1 | 0 | 0 |
meth |
ContextManagers.exit |
3 | 0 | 0 |
attr |
ContextManagers.context_managers |
1 | 0 | 0 |
attr |
ContextManagers.stack |
1 | 0 | 0 |
func |
is_mlx_array |
2 | 1 | 0 |
func |
strtobool |
2 | 1 | 0 |
func |
can_return_tuple |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
is_torch_dtype |
2 | 1 | 0 |
func |
find_labels |
2 | 0 | 0 |
func |
torch_int |
2 | 0 | 0 |
func |
merge_with_config_defaults |
2 | 0 | 0 |
func |
to_numpy |
2 | 0 | 0 |
func |
tensor_size |
2 | 0 | 0 |
meth |
ModelOutput.init |
3 | 0 | 0 |
meth |
ModelOutput.post_init |
1 | 0 | 0 |
meth |
ModelOutput.delitem |
3 | 0 | 0 |
meth |
ModelOutput.setdefault |
3 | 0 | 0 |
meth |
ModelOutput.pop |
3 | 0 | 0 |
meth |
ModelOutput.update |
3 | 0 | 0 |
meth |
ModelOutput.getitem |
2 | 0 | 0 |
meth |
ModelOutput.setattr |
3 | 0 | 0 |
meth |
ModelOutput.setitem |
3 | 0 | 0 |
meth |
ModelOutput.reduce |
1 | 0 | 0 |
meth |
ExplicitEnum.missing |
2 | 0 | 0 |
func |
reshape |
3 | 0 | 0 |
func |
is_tensor |
2 | 1 | 0 |
func |
flatten_dict |
4 | 3 | 0 |
func |
set_attribute_for_modules |
4 | 3 | 1 |
func |
transpose |
3 | 0 | 0 |
func |
is_flash_attention_requested |
3 | 2 | 0 |
func |
squeeze |
3 | 0 | 0 |
func |
is_torch_device |
2 | 1 | 0 |
func |
del_attribute_from_modules |
3 | 2 | 0 |
func |
maybe_autocast |
5 | 4 | 0 |
func |
safe_load_json_file |
2 | 1 | 0 |
meth |
GeneralInterface.init |
1 | 0 | 0 |
meth |
GeneralInterface.getitem |
2 | 0 | 0 |
meth |
GeneralInterface.setitem |
3 | 0 | 0 |
meth |
GeneralInterface.delitem |
2 | 0 | 0 |
meth |
GeneralInterface.iter |
1 | 0 | 0 |
meth |
GeneralInterface.len |
1 | 0 | 0 |
meth |
GeneralInterface.register |
3 | 2 | 0 |
func |
to_py_obj |
2 | 0 | 0 |
func |
filter_out_non_signature_kwargs |
2 | 1 | 0 |
transformers.utils.hp_naming (17 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
TrialShortNamer.set_defaults |
3 | 0 | 0 |
meth |
TrialShortNamer.shortname_for_word |
3 | 0 | 0 |
meth |
TrialShortNamer.shortname_for_key |
3 | 0 | 0 |
meth |
TrialShortNamer.add_new_param_name |
3 | 0 | 0 |
meth |
TrialShortNamer.build_naming_info |
1 | 0 | 0 |
meth |
TrialShortNamer.shortname |
2 | 0 | 0 |
meth |
TrialShortNamer.parse_repr |
2 | 0 | 0 |
transformers.utils.hub (29 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
convert_file_size_to_int |
2 | 1 | 0 |
func |
define_sagemaker_information |
1 | 0 | 0 |
func |
get_checkpoint_shard_files |
13 | 0 | 0 |
func |
cached_file |
4 | 3 | 0 |
attr |
HF_MODULES_CACHE |
1 | 0 | 0 |
meth |
PushInProgress.is_done |
1 | 0 | 0 |
meth |
PushInProgress.wait_until_done |
1 | 0 | 0 |
attr |
PushInProgress.jobs |
1 | 0 | 0 |
func |
has_file |
10 | 8 | 0 |
meth |
PushToHubMixin._get_files_timestamps |
2 | 1 | 0 |
meth |
PushToHubMixin._upload_modified_files |
9 | 8 | 0 |
meth |
PushToHubMixin.save_pretrained |
3 | 0 | 0 |
func |
cached_files |
17 | 16 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.utils.import_utils (37 missing, 1 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
is_torch_musa_available |
2 | 1 | 0 |
func |
is_torch_neuroncore_available |
2 | 1 | 0 |
func |
is_torch_npu_available |
2 | 1 | 0 |
func |
is_torch_xla_available |
3 | 1 | 0 |
func |
spread_import_structure |
2 | 0 | 0 |
func |
is_torchdistx_available |
1 | 0 | 0 |
meth |
DummyObject.getattribute |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
func |
create_import_structure_from_path |
2 | 0 | 0 |
attr |
BACKENDS_MAPPING |
1 | 0 | 0 |
func |
requires_backends |
3 | 0 | 0 |
meth |
Backend.init |
2 | 1 | 0 |
prop |
Backend.error_message |
1 | 0 | 0 |
attr |
PACKAGE_DISTRIBUTION_MAPPING |
1 | 0 | 0 |
attr |
USE_TORCH_XLA |
1 | 0 | 0 |
func |
fetch__all__ |
2 | 1 | 0 |
func |
is_fake_tensor |
2 | 1 | 0 |
func |
is_qutlass_available |
1 | 0 | 0 |
attr |
ENV_VARS_TRUE_AND_AUTO_VALUES |
1 | 0 | 0 |
func |
is_tracing |
2 | 1 | 0 |
func |
requires |
2 | 0 | 0 |
func |
is_jax_jitting |
2 | 0 | 0 |
func |
is_fp_quant_available |
1 | 0 | 0 |
attr |
BASE_FILE_REQUIREMENTS |
1 | 0 | 0 |
func |
split_package_version |
2 | 1 | 0 |
func |
is_optimum_quanto_available |
1 | 0 | 0 |
func |
direct_transformers_import |
3 | 2 | 0 |
func |
is_torch_fx_proxy |
2 | 1 | 0 |
func |
torch_compilable_check |
4 | 4 | 1 |
func |
is_flash_linear_attention_available |
1 | 0 | 0 |
transformers.utils.kernel_config (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
KernelConfig.init |
3 | 0 | 0 |
meth |
KernelConfig.update_kernel |
7 | 0 | 0 |
meth |
KernelConfig.store_registered_layer_names |
2 | 0 | 0 |
meth |
KernelConfig.sanitize_kernel_mapping |
2 | 0 | 0 |
meth |
KernelConfig.create_compatible_mapping |
3 | 0 | 0 |
attr |
KernelConfig.kernel_mapping |
1 | 0 | 0 |
attr |
KernelConfig.registered_layer_names |
1 | 0 | 0 |
attr |
KernelConfig.use_local_kernel |
1 | 0 | 0 |
func |
add_to_mapping |
6 | 0 | 0 |
func |
infer_device |
2 | 0 | 0 |
func |
add_to_mapping_local |
6 | 0 | 0 |
transformers.utils.loading_report (4 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
log_state_dict_report |
6 | 4 | 0 |
meth |
LoadStateDictInfo.missing_and_mismatched |
1 | 0 | 0 |
meth |
LoadStateDictInfo.to_dict |
1 | 0 | 0 |
transformers.utils.logging (33 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
captureWarnings |
2 | 0 | 0 |
func |
disable_progress_bar |
1 | 0 | 0 |
meth |
EmptyTqdm.init |
3 | 0 | 0 |
meth |
EmptyTqdm.iter |
1 | 0 | 0 |
meth |
EmptyTqdm.getattr |
2 | 0 | 0 |
meth |
EmptyTqdm.enter |
1 | 0 | 0 |
meth |
EmptyTqdm.exit |
4 | 0 | 0 |
func |
set_verbosity_error |
1 | 0 | 0 |
attr |
tqdm |
1 | 0 | 0 |
func |
warning_advice |
4 | 0 | 0 |
func |
warning_once |
4 | 0 | 0 |
func |
set_verbosity_warning |
1 | 0 | 0 |
func |
info_once |
4 | 0 | 0 |
func |
enable_progress_bar |
1 | 0 | 0 |
func |
set_verbosity_info |
1 | 0 | 0 |
func |
set_verbosity_debug |
1 | 0 | 0 |
func |
get_log_levels_dict |
1 | 0 | 0 |
transformers.utils.metrics (11 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
traced |
5 | 1 | 0 |
meth |
ContinuousBatchProcessorMetrics.init |
2 | 1 | 0 |
meth |
ContinuousBatchProcessorMetrics._setup_metrics |
1 | 0 | 0 |
meth |
ContinuousBatchProcessorMetrics.record_kv_cache_memory_metrics |
2 | 1 | 0 |
attr |
ContinuousBatchProcessorMetrics.max_batch_tokens |
1 | 0 | 0 |
func |
attach_tracer |
2 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.utils.notebook (82 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
NotebookTrainingTracker.init |
3 | 0 | 0 |
meth |
NotebookTrainingTracker.display |
1 | 0 | 0 |
meth |
NotebookTrainingTracker.write_line |
2 | 0 | 0 |
meth |
NotebookTrainingTracker.add_child |
4 | 0 | 0 |
meth |
NotebookTrainingTracker.remove_child |
1 | 0 | 0 |
attr |
NotebookTrainingTracker.inner_table |
1 | 0 | 0 |
attr |
NotebookTrainingTracker.child_bar |
1 | 0 | 0 |
meth |
NotebookProgressCallback.init |
1 | 0 | 0 |
meth |
NotebookProgressCallback.on_train_begin |
5 | 0 | 0 |
meth |
NotebookProgressCallback.on_step_end |
5 | 0 | 0 |
meth |
NotebookProgressCallback.on_prediction_step |
6 | 0 | 0 |
meth |
NotebookProgressCallback.on_predict |
5 | 0 | 0 |
meth |
NotebookProgressCallback.on_log |
6 | 0 | 0 |
meth |
NotebookProgressCallback.on_evaluate |
6 | 0 | 0 |
meth |
NotebookProgressCallback.on_train_end |
5 | 0 | 0 |
attr |
NotebookProgressCallback.training_tracker |
1 | 0 | 0 |
attr |
NotebookProgressCallback.prediction_bar |
1 | 0 | 0 |
meth |
NotebookProgressBar.init |
6 | 5 | 0 |
meth |
NotebookProgressBar.update |
4 | 3 | 0 |
meth |
NotebookProgressBar.update_bar |
3 | 0 | 0 |
meth |
NotebookProgressBar.display |
1 | 0 | 0 |
meth |
NotebookProgressBar.close |
1 | 0 | 0 |
attr |
NotebookProgressBar.update_every |
1 | 0 | 0 |
attr |
NotebookProgressBar.total |
1 | 0 | 0 |
attr |
NotebookProgressBar.prefix |
1 | 0 | 0 |
attr |
NotebookProgressBar.leave |
1 | 0 | 0 |
attr |
NotebookProgressBar.parent |
1 | 0 | 0 |
attr |
NotebookProgressBar.width |
1 | 0 | 0 |
attr |
NotebookProgressBar.last_value |
1 | 0 | 0 |
attr |
NotebookProgressBar.comment |
1 | 0 | 0 |
attr |
NotebookProgressBar.output |
1 | 0 | 0 |
attr |
NotebookProgressBar.value |
1 | 0 | 0 |
attr |
NotebookProgressBar.label |
1 | 0 | 0 |
func |
html_progress_bar |
6 | 0 | 0 |
func |
text_to_html_table |
2 | 0 | 0 |
func |
format_time |
2 | 0 | 0 |
transformers.utils.output_capturing (14 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
capture_outputs |
3 | 0 | 0 |
meth |
CompileableContextVar.init |
3 | 0 | 0 |
meth |
CompileableContextVar.get |
1 | 0 | 0 |
meth |
CompileableContextVar.set |
2 | 0 | 0 |
meth |
CompileableContextVar.reset |
2 | 0 | 0 |
attr |
CompileableContextVar.context_var |
1 | 0 | 0 |
attr |
CompileableContextVar.global_var |
1 | 0 | 0 |
attr |
CompileableContextVar.compiling |
1 | 0 | 0 |
transformers.utils.pytest_helpers (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
main |
1 | 0 | 0 |
func |
summarize |
2 | 1 | 0 |
transformers.utils.quantization_config (143 missing, 2 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
QuantoConfig.init |
5 | 1 | 0 |
meth |
QuantoConfig.post_init |
1 | 0 | 0 |
meth |
BitNetQuantConfig.init |
7 | 5 | 0 |
meth |
BitNetQuantConfig.post_init |
1 | 0 | 0 |
meth |
FineGrainedFP8Config.init |
6 | 4 | 0 |
meth |
FineGrainedFP8Config.post_init |
1 | 0 | 0 |
meth |
FineGrainedFP8Config.get_loading_attributes |
1 | 0 | 0 |
meth |
VptqConfig.init |
6 | 4 | 0 |
meth |
VptqConfig.post_init |
1 | 0 | 0 |
meth |
QuarkConfig.init |
2 | 0 | 0 |
attr |
QuarkConfig.custom_mode |
1 | 0 | 0 |
attr |
QuarkConfig.legacy |
1 | 0 | 0 |
attr |
QuarkConfig.quant_method |
1 | 0 | 0 |
attr |
QuarkConfig.quant_config |
1 | 0 | 0 |
attr |
QuarkConfig.json_export_config |
1 | 0 | 0 |
meth |
FbgemmFp8Config.init |
4 | 2 | 0 |
meth |
FbgemmFp8Config.get_loading_attributes |
1 | 0 | 0 |
meth |
BitsAndBytesConfig.init |
12 | 0 | 0 |
meth |
BitsAndBytesConfig.post_init |
1 | 0 | 0 |
meth |
BitsAndBytesConfig.is_quantizable |
1 | 0 | 0 |
meth |
BitsAndBytesConfig.quantization_method |
1 | 0 | 0 |
meth |
BitsAndBytesConfig.repr |
1 | 0 | 0 |
prop |
BitsAndBytesConfig.load_in_4bit |
2 | 1 | 0 |
prop |
BitsAndBytesConfig.load_in_8bit |
2 | 1 | 0 |
meth |
GPTQConfig.init |
22 | 20 | 1 |
meth |
GPTQConfig.get_loading_attributes |
1 | 0 | 0 |
meth |
GPTQConfig.post_init |
1 | 0 | 0 |
meth |
GPTQConfig.to_dict_optimum |
1 | 0 | 0 |
meth |
GPTQConfig.from_dict_optimum |
2 | 0 | 0 |
meth |
MetalConfig.init |
6 | 4 | 0 |
meth |
MetalConfig.post_init |
1 | 0 | 0 |
meth |
MetalConfig.get_loading_attributes |
1 | 0 | 0 |
attr |
MetalConfig.quant_method |
1 | 0 | 0 |
attr |
MetalConfig.bits |
1 | 0 | 0 |
attr |
MetalConfig.group_size |
1 | 0 | 0 |
attr |
MetalConfig.modules_to_not_convert |
1 | 0 | 0 |
attr |
MetalConfig.dequantize |
1 | 0 | 0 |
meth |
SinqConfig.init |
7 | 6 | 1 |
meth |
SinqConfig.post_init |
1 | 0 | 0 |
attr |
SinqConfig.quant_method |
1 | 0 | 0 |
attr |
SinqConfig.nbits |
1 | 0 | 0 |
attr |
SinqConfig.group_size |
1 | 0 | 0 |
attr |
SinqConfig.tiling_mode |
1 | 0 | 0 |
attr |
SinqConfig.method |
1 | 0 | 0 |
attr |
SinqConfig.modules_to_not_convert |
1 | 0 | 0 |
meth |
AwqConfig.init |
7 | 5 | 0 |
meth |
AwqConfig.post_init |
1 | 0 | 0 |
meth |
CompressedTensorsConfig.init |
11 | 9 | 0 |
meth |
CompressedTensorsConfig.post_init |
1 | 0 | 0 |
meth |
CompressedTensorsConfig.from_dict |
4 | 0 | 0 |
meth |
CompressedTensorsConfig.get_loading_attributes |
1 | 0 | 0 |
prop |
CompressedTensorsConfig.is_quantized |
1 | 0 | 0 |
prop |
CompressedTensorsConfig.is_quantization_compressed |
1 | 0 | 0 |
prop |
CompressedTensorsConfig.is_sparsification_compressed |
1 | 0 | 0 |
attr |
CompressedTensorsConfig.quantization_config |
1 | 0 | 0 |
attr |
CompressedTensorsConfig.sparsity_config |
1 | 0 | 0 |
attr |
CompressedTensorsConfig.run_compressed |
1 | 0 | 0 |
attr |
CompressedTensorsConfig.quant_method |
1 | 0 | 0 |
meth |
HqqConfig.init |
8 | 6 | 0 |
meth |
HqqConfig.post_init |
1 | 0 | 0 |
meth |
HqqConfig.from_dict |
2 | 1 | 0 |
meth |
HqqConfig.repr |
1 | 0 | 0 |
meth |
AutoRoundConfig.init |
6 | 4 | 0 |
meth |
AutoRoundConfig.post_init |
1 | 0 | 0 |
meth |
AutoRoundConfig.get_loading_attributes |
1 | 0 | 0 |
meth |
AutoRoundConfig.to_dict |
1 | 0 | 0 |
meth |
AutoRoundConfig.from_dict |
4 | 0 | 0 |
meth |
SpQRConfig.init |
7 | 5 | 0 |
meth |
SpQRConfig.post_init |
1 | 0 | 0 |
meth |
AqlmConfig.init |
7 | 5 | 0 |
meth |
AqlmConfig.post_init |
1 | 0 | 0 |
meth |
FPQuantConfig.init |
10 | 8 | 0 |
meth |
FPQuantConfig.post_init |
1 | 0 | 0 |
meth |
TorchAoConfig.init |
6 | 4 | 0 |
meth |
TorchAoConfig.post_init |
1 | 0 | 0 |
meth |
TorchAoConfig._validate_string_quant_type |
1 | 0 | 0 |
meth |
TorchAoConfig._get_torchao_quant_type_to_method |
1 | 0 | 0 |
meth |
TorchAoConfig.get_apply_tensor_subclass |
1 | 0 | 0 |
meth |
TorchAoConfig.to_dict |
1 | 0 | 0 |
meth |
TorchAoConfig.from_dict |
4 | 0 | 0 |
meth |
HiggsConfig.init |
8 | 6 | 0 |
meth |
HiggsConfig.post_init |
1 | 0 | 0 |
meth |
FourOverSixConfig.init |
14 | 12 | 0 |
meth |
EetqConfig.init |
4 | 2 | 0 |
meth |
EetqConfig.post_init |
1 | 0 | 0 |
meth |
QuantizationConfigMixin.from_dict |
4 | 0 | 0 |
meth |
QuantizationConfigMixin.to_json_file |
2 | 1 | 0 |
meth |
QuantizationConfigMixin.iter |
1 | 0 | 0 |
meth |
QuantizationConfigMixin.repr |
1 | 0 | 0 |
meth |
QuantizationConfigMixin.update |
2 | 0 | 0 |
meth |
Mxfp4Config.init |
4 | 2 | 0 |
meth |
Mxfp4Config.get_loading_attributes |
1 | 0 | 0 |
meth |
VptqLayerConfig.init |
14 | 12 | 0 |
meth |
VptqLayerConfig.post_init |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.utils.sentencepiece_model_pb2 (5 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
TrainerSpec |
1 | 0 | 0 |
attr |
DESCRIPTOR |
1 | 0 | 0 |
attr |
SelfTestData |
1 | 0 | 0 |
attr |
NormalizerSpec |
1 | 0 | 0 |
attr |
ModelProto |
1 | 0 | 0 |
transformers.utils.sentencepiece_model_pb2_new (1 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
attr |
DESCRIPTOR |
1 | 0 | 0 |
transformers.utils.type_validators (9 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
tensor_type_validator |
2 | 1 | 0 |
func |
padding_validator |
2 | 1 | 0 |
func |
positive_int |
2 | 1 | 0 |
func |
video_metadata_validator |
2 | 1 | 0 |
func |
positive_any_number |
2 | 1 | 0 |
func |
truncation_validator |
2 | 1 | 0 |
func |
image_size_validator |
2 | 1 | 0 |
func |
device_validator |
2 | 1 | 0 |
func |
resampling_validator |
2 | 1 | 0 |
transformers.utils.versions (2 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
require_version_core |
2 | 0 | 0 |
transformers.video_processing_utils (23 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
meth |
BaseVideoProcessor.call |
3 | 1 | 0 |
meth |
BaseVideoProcessor.sample_frames |
5 | 3 | 0 |
meth |
BaseVideoProcessor._preprocess |
15 | 14 | 0 |
meth |
BaseVideoProcessor.from_pretrained |
8 | 6 | 0 |
meth |
BaseVideoProcessor.save_pretrained |
4 | 2 | 0 |
meth |
BaseVideoProcessor.get_video_processor_dict |
3 | 2 | 0 |
meth |
BaseVideoProcessor.from_dict |
3 | 1 | 0 |
meth |
BaseVideoProcessor.to_json_file |
2 | 1 | 0 |
meth |
BaseVideoProcessor.repr |
1 | 0 | 0 |
meth |
BaseVideoProcessor.from_json_file |
2 | 1 | 0 |
meth |
BaseVideoProcessor.register_for_auto_class |
2 | 0 | 0 |
meth |
BaseVideoProcessor.fetch_videos |
3 | 1 | 0 |
attr |
BaseVideoProcessor.size |
1 | 0 | 0 |
attr |
BaseVideoProcessor.crop_size |
1 | 0 | 0 |
attr |
BaseVideoProcessor.model_valid_processing_keys |
1 | 0 | 0 |
attr |
logger |
1 | 0 | 0 |
transformers.video_utils (34 missing, 0 any)
| Symbol | Typable | Typed | Any | |
|---|---|---|---|---|
func |
get_uniform_frame_indices |
3 | 2 | 0 |
func |
is_valid_video_frame |
2 | 0 | 0 |
func |
read_video_pyav |
4 | 2 | 0 |
func |
is_valid_video |
2 | 0 | 0 |
func |
read_video_decord |
4 | 2 | 0 |
func |
valid_videos |
2 | 0 | 0 |
func |
read_video_torchcodec |
4 | 2 | 0 |
func |
default_sample_indices_fn |
5 | 1 | 0 |
func |
read_video_opencv |
4 | 3 | 0 |
func |
is_batched_video |
2 | 0 | 0 |
func |
make_batched_videos |
2 | 1 | 0 |
attr |
logger |
1 | 0 | 0 |
meth |
VideoMetadata.iter |
1 | 0 | 0 |
meth |
VideoMetadata.len |
1 | 0 | 0 |
meth |
VideoMetadata.getitem |
2 | 0 | 0 |
meth |
VideoMetadata.setitem |
3 | 0 | 0 |
meth |
VideoMetadata.update |
2 | 0 | 0 |
func |
load_video |
7 | 6 | 0 |
func |
read_video_torchvision |
4 | 2 | 0 |
Type-Ignore Comments
| Flavor | Count |
|---|---|
type: ignore[unresolved-attribute] |
3 |
type: ignore |
2 |
type: ignore[override] |
1 |