ai-content-maker/.venv/Lib/site-packages/torchaudio/models/__init__.py

86 lines
1.9 KiB
Python
Raw Permalink Normal View History

2024-05-03 04:18:51 +03:00
from ._hdemucs import HDemucs, hdemucs_high, hdemucs_low, hdemucs_medium
from .conformer import Conformer
from .conv_tasnet import conv_tasnet_base, ConvTasNet
from .deepspeech import DeepSpeech
from .emformer import Emformer
from .rnnt import emformer_rnnt_base, emformer_rnnt_model, RNNT
from .rnnt_decoder import Hypothesis, RNNTBeamSearch
from .squim import (
squim_objective_base,
squim_objective_model,
squim_subjective_base,
squim_subjective_model,
SquimObjective,
SquimSubjective,
)
from .tacotron2 import Tacotron2
from .wav2letter import Wav2Letter
from .wav2vec2 import (
hubert_base,
hubert_large,
hubert_pretrain_base,
hubert_pretrain_large,
hubert_pretrain_model,
hubert_pretrain_xlarge,
hubert_xlarge,
HuBERTPretrainModel,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
wav2vec2_model,
wav2vec2_xlsr_1b,
wav2vec2_xlsr_2b,
wav2vec2_xlsr_300m,
Wav2Vec2Model,
wavlm_base,
wavlm_large,
wavlm_model,
)
from .wavernn import WaveRNN
__all__ = [
"Wav2Letter",
"WaveRNN",
"ConvTasNet",
"conv_tasnet_base",
"DeepSpeech",
"Wav2Vec2Model",
"HuBERTPretrainModel",
"wavlm_model",
"wavlm_base",
"wavlm_large",
"wav2vec2_model",
"wav2vec2_base",
"wav2vec2_large",
"wav2vec2_large_lv60k",
"hubert_base",
"hubert_large",
"hubert_xlarge",
"hubert_pretrain_model",
"hubert_pretrain_base",
"hubert_pretrain_large",
"hubert_pretrain_xlarge",
"wav2vec2_xlsr_300m",
"wav2vec2_xlsr_1b",
"wav2vec2_xlsr_2b",
"Tacotron2",
"Conformer",
"Emformer",
"Hypothesis",
"RNNT",
"RNNTBeamSearch",
"emformer_rnnt_base",
"emformer_rnnt_model",
"HDemucs",
"hdemucs_low",
"hdemucs_medium",
"hdemucs_high",
"squim_objective_base",
"squim_objective_model",
"squim_subjective_base",
"squim_subjective_model",
"SquimObjective",
"SquimSubjective",
]