Word-audio ZSL training
Word-audio ZSL training#
Using the code snippets we prepared in previous stages, we now can train our model. (Note that, when training a gradient-based metric learning models, we often find that the model convergence is quite sensitive to the hyperparameters.)
from zsl.model import WordAudioSiameseNetwork
from zsl.loss import TripletLoss
from zsl.dataset import WordAudioDataset
from zsl.data_prep import prepare_zsl_split_word_audio
from zsl.transforms import get_transforms
from zsl.model_manager import ModelManager
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ReduceLROnPlateau
(
seen_word_classes,
seen_audio_X_train,
seen_audio_y_train,
seen_audio_X_test,
seen_audio_y_test,
unseen_word_classes,
unseen_audio_X_train,
unseen_audio_y_train,
unseen_audio_X_test,
unseen_audio_y_test,
inst_word_emb_dict
) = prepare_zsl_split_word_audio()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
siamese_network = WordAudioSiameseNetwork().to(device)
triplet_loss = TripletLoss(0.5).to(device)
_, mel_transform = get_transforms()
seen_word_audio_dataset_tr = WordAudioDataset(
audio_path_list = seen_audio_X_train,
audio_label_list = seen_audio_y_train,
audio_transform = mel_transform,
curr_word_classes = seen_word_classes,
word_emb_dict = inst_word_emb_dict
)
seen_word_audio_dataset_ts = WordAudioDataset(
audio_path_list = seen_audio_X_test,
audio_label_list = seen_audio_y_test,
audio_transform = mel_transform,
curr_word_classes = seen_word_classes,
word_emb_dict = inst_word_emb_dict
)
seen_word_audio_dataloaders = {}
seen_word_audio_dataloaders['train'] = DataLoader(
seen_word_audio_dataset_tr,
batch_size=16,
num_workers=8,
shuffle=True
)
seen_word_audio_dataloaders['test'] = DataLoader(
seen_word_audio_dataset_ts,
batch_size=16,
num_workers=8,
shuffle=True
)
We include the hyperparameters of one of working training strategies.
optimizer_siamese = optim.Adam(siamese_network.parameters(), lr=0.01)
exp_lr_scheduler = ReduceLROnPlateau(
optimizer_siamese,
mode='min',
factor=0.2,
patience=5,
verbose=True
)
curr_model = ModelManager(
siamese_network,
triplet_loss,
optimizer_siamese,
exp_lr_scheduler,
device
)
curr_model.train_model(
exp='word_audio',
dataloaders=seen_word_audio_dataloaders,
validation='random',
num_epochs=200
)