# MIT License
#
# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import numpy as np
from torch.utils.data import DataLoader, Sampler
def _collate_fn(batch, pad_id: int = 0):
r"""
Functions that pad to the maximum sequence length
Args:
batch (tuple): tuple contains input and target tensors
pad_id (int): identification of pad token
Returns:
seqs (torch.FloatTensor): tensor contains input sequences.
target (torch.IntTensor): tensor contains target sequences.
seq_lengths (torch.IntTensor): tensor contains input sequence lengths
target_lengths (torch.IntTensor): tensor contains target sequence lengths
"""
def seq_length_(p):
return len(p[0])
def target_length_(p):
return len(p[1])
# sort by sequence length for rnn.pack_padded_sequence()
batch = sorted(batch, key=lambda sample: sample[0].size(0), reverse=True)
seq_lengths = [len(s[0]) for s in batch]
target_lengths = [len(s[1]) - 1 for s in batch]
max_seq_sample = max(batch, key=seq_length_)[0]
max_target_sample = max(batch, key=target_length_)[1]
max_seq_size = max_seq_sample.size(0)
max_target_size = len(max_target_sample)
feat_size = max_seq_sample.size(1)
batch_size = len(batch)
seqs = torch.zeros(batch_size, max_seq_size, feat_size)
targets = torch.zeros(batch_size, max_target_size).to(torch.long)
targets.fill_(pad_id)
for x in range(batch_size):
sample = batch[x]
tensor = sample[0]
target = sample[1]
seq_length = tensor.size(0)
seqs[x].narrow(0, 0, seq_length).copy_(tensor)
targets[x].narrow(0, 0, len(target)).copy_(torch.LongTensor(target))
seq_lengths = torch.IntTensor(seq_lengths)
target_lengths = torch.IntTensor(target_lengths)
return seqs, targets, seq_lengths, target_lengths
[docs]class AudioDataLoader(DataLoader):
r"""
Audio Data Loader
Args:
dataset (torch.utils.data.Dataset): dataset from which to load the data.
num_workers (int): how many subprocesses to use for data loading.
batch_sampler (torch.utils.data.sampler.Sampler): defines the strategy to draw samples from the dataset.
"""
def __init__(
self,
dataset: torch.utils.data.Dataset,
num_workers: int,
batch_sampler: torch.utils.data.sampler.Sampler,
**kwargs,
) -> None:
super(AudioDataLoader, self).__init__(
dataset=dataset,
num_workers=num_workers,
batch_sampler=batch_sampler,
**kwargs,
)
self.collate_fn = _collate_fn
[docs]class BucketingSampler(Sampler):
r"""
Samples batches assuming they are in order of size to batch similarly sized samples together.
Args:
data_source (torch.utils.data.Dataset): dataset to sample from
batch_size (int): size of batch
drop_last (bool): flat indication whether to drop last batch or not
"""
def __init__(self, data_source, batch_size: int = 32, drop_last: bool = False) -> None:
super(BucketingSampler, self).__init__(data_source)
self.batch_size = batch_size
self.data_source = data_source
ids = list(range(0, len(data_source)))
self.bins = [ids[i:i + batch_size] for i in range(0, len(ids), batch_size)]
self.drop_last = drop_last
def __iter__(self):
for ids in self.bins:
np.random.shuffle(ids)
yield ids
def __len__(self):
return len(self.bins)
def shuffle(self, epoch):
np.random.shuffle(self.bins)