1
#include <torch/csrc/jit/mobile/train/random.h>
2
#include <torch/types.h>
12
RandomSampler::RandomSampler(int64_t size, Dtype index_dtype)
13
: indices_(torch::randperm(size, index_dtype)) {}
15
RandomSampler::~RandomSampler() = default;
17
void RandomSampler::reset(optional<size_t> new_size) {
18
// This allocates a new chunk of memory every time (just FYI). It should be
19
// amortized over the entire epoch hopefully.
20
const auto size = new_size.value_or(static_cast<size_t>(indices_.numel()));
21
indices_ = torch::randperm(size, indices_.options());
25
optional<std::vector<size_t>> RandomSampler::next(size_t batch_size) {
26
AT_ASSERT(index_ <= indices_.numel());
27
const size_t remaining_indices = indices_.numel() - index_;
28
if (remaining_indices == 0) {
31
std::vector<size_t> index_batch(std::min(batch_size, remaining_indices));
32
auto slice = indices_.slice(/*dim=*/0, index_, index_ + index_batch.size());
33
// You may want to store your indices with 32-bit or less, but here we need
34
// to upcast to 64-bit. A batch itself won't hold too many indices, so that
35
// should be ok. Note that if this indeed results in a type promotion, there
36
// will be two allocations: one for the upcast slice, and one for the
37
// returned `index_batch` vector.
38
slice = slice.to(torch::kInt64);
39
const auto* data = slice.const_data_ptr<int64_t>();
40
std::copy(data, data + index_batch.size(), index_batch.begin());
41
index_ += index_batch.size();
45
void RandomSampler::save(serialize::OutputArchive& archive) const {
46
TORCH_CHECK(false, "Serialization of RandomSampler not supported on mobile.");
49
void RandomSampler::load(serialize::InputArchive& archive) {
50
TORCH_CHECK(false, "Serialization of RandomSampler not supported on mobile.");
53
size_t RandomSampler::index() const noexcept {