Skip to content

Commit 155edb6

Browse files
r-barnesfacebook-github-bot
authored andcommitted
c10::optional -> std::optional in pytorch/audio/src/libtorio/ffmpeg/stream_reader/stream_processor.h +20
Summary: X-link: pytorch/audio#3792 `c10::optional` was switched to be `std::optional` after PyTorch moved to C++17. Let's eliminate `c10::optional`, if we can. Reviewed By: albanD Differential Revision: D57294285 fbshipit-source-id: b29f8f3d7ac8aee0546dcad71c1b0197278fa0e5
1 parent 9877559 commit 155edb6

File tree

3 files changed

+59
-59
lines changed

3 files changed

+59
-59
lines changed

torchcsprng/csrc/csprng.cpp

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ static const auto TENSOR_DEVICE_TYPE_IS_NOT_SUPPORTED = "tensor device type is n
2626

2727
// ==================================================== Random ========================================================
2828

29-
Tensor& random_(Tensor& self, c10::optional<Generator> gen) {
29+
Tensor& random_(Tensor& self, std::optional<Generator> gen) {
3030
if (self.device().type() == DeviceType::CPU) {
3131
return cpu::random_(self, gen);
3232
#ifdef WITH_CUDA
@@ -39,7 +39,7 @@ Tensor& random_(Tensor& self, c10::optional<Generator> gen) {
3939
}
4040

4141
Tensor& random_from_to(Tensor& self, int64_t from, optional<int64_t> to,
42-
c10::optional<Generator> gen) {
42+
std::optional<Generator> gen) {
4343
if (self.device().type() == DeviceType::CPU) {
4444
return cpu::random_from_to(self, from, to, gen);
4545
#ifdef WITH_CUDA
@@ -52,7 +52,7 @@ Tensor& random_from_to(Tensor& self, int64_t from, optional<int64_t> to,
5252
}
5353

5454
Tensor& random_to(Tensor& self, int64_t to,
55-
c10::optional<Generator> gen) {
55+
std::optional<Generator> gen) {
5656
if (self.device().type() == DeviceType::CPU) {
5757
return cpu::random_to(self, to, gen);
5858
#ifdef WITH_CUDA
@@ -66,7 +66,7 @@ Tensor& random_to(Tensor& self, int64_t to,
6666

6767
// ==================================================== Uniform =======================================================
6868

69-
Tensor& uniform_(Tensor& self, double from, double to, c10::optional<Generator> gen) {
69+
Tensor& uniform_(Tensor& self, double from, double to, std::optional<Generator> gen) {
7070
if (self.device().type() == DeviceType::CPU) {
7171
return cpu::uniform_(self, from, to, gen);
7272
#ifdef WITH_CUDA
@@ -80,7 +80,7 @@ Tensor& uniform_(Tensor& self, double from, double to, c10::optional<Generator>
8080

8181
// ==================================================== Normal ========================================================
8282

83-
Tensor& normal_(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
83+
Tensor& normal_(Tensor& self, double mean, double std, std::optional<Generator> gen) {
8484
if (self.device().type() == DeviceType::CPU) {
8585
return cpu::normal_(self, mean, std, gen);
8686
#ifdef WITH_CUDA
@@ -92,7 +92,7 @@ Tensor& normal_(Tensor& self, double mean, double std, c10::optional<Generator>
9292
}
9393
}
9494

95-
Tensor& normal_Tensor_float_out(const Tensor& mean, double std, c10::optional<Generator> gen, Tensor& output) {
95+
Tensor& normal_Tensor_float_out(const Tensor& mean, double std, std::optional<Generator> gen, Tensor& output) {
9696
if (output.device().type() == DeviceType::CPU) {
9797
return cpu::normal_Tensor_float_out(output, mean, std, gen);
9898
#ifdef WITH_CUDA
@@ -104,7 +104,7 @@ Tensor& normal_Tensor_float_out(const Tensor& mean, double std, c10::optional<Ge
104104
}
105105
}
106106

107-
Tensor& normal_float_Tensor_out(double mean, const Tensor& std, c10::optional<Generator> gen, Tensor& output) {
107+
Tensor& normal_float_Tensor_out(double mean, const Tensor& std, std::optional<Generator> gen, Tensor& output) {
108108
if (output.device().type() == DeviceType::CPU) {
109109
return cpu::normal_float_Tensor_out(output, mean, std, gen);
110110
#ifdef WITH_CUDA
@@ -116,7 +116,7 @@ Tensor& normal_float_Tensor_out(double mean, const Tensor& std, c10::optional<Ge
116116
}
117117
}
118118

119-
Tensor& normal_Tensor_Tensor_out(const Tensor& mean, const Tensor& std, c10::optional<Generator> gen, Tensor& output) {
119+
Tensor& normal_Tensor_Tensor_out(const Tensor& mean, const Tensor& std, std::optional<Generator> gen, Tensor& output) {
120120
if (output.device().type() == DeviceType::CPU) {
121121
return cpu::normal_Tensor_Tensor_out(output, mean, std, gen);
122122
#ifdef WITH_CUDA
@@ -128,7 +128,7 @@ Tensor& normal_Tensor_Tensor_out(const Tensor& mean, const Tensor& std, c10::opt
128128
}
129129
}
130130

131-
Tensor normal_Tensor_float(const Tensor& mean, double std, c10::optional<Generator> gen) {
131+
Tensor normal_Tensor_float(const Tensor& mean, double std, std::optional<Generator> gen) {
132132
if (mean.device().type() == DeviceType::CPU) {
133133
return cpu::normal_Tensor_float(mean, std, gen);
134134
#ifdef WITH_CUDA
@@ -140,7 +140,7 @@ Tensor normal_Tensor_float(const Tensor& mean, double std, c10::optional<Generat
140140
}
141141
}
142142

143-
Tensor normal_float_Tensor(double mean, const Tensor& std, c10::optional<Generator> gen) {
143+
Tensor normal_float_Tensor(double mean, const Tensor& std, std::optional<Generator> gen) {
144144
if (std.device().type() == DeviceType::CPU) {
145145
return cpu::normal_float_Tensor(mean, std, gen);
146146
#ifdef WITH_CUDA
@@ -152,7 +152,7 @@ Tensor normal_float_Tensor(double mean, const Tensor& std, c10::optional<Generat
152152
}
153153
}
154154

155-
Tensor normal_Tensor_Tensor(const Tensor& mean, const Tensor& std, c10::optional<Generator> gen) {
155+
Tensor normal_Tensor_Tensor(const Tensor& mean, const Tensor& std, std::optional<Generator> gen) {
156156
if (mean.device().type() == DeviceType::CPU) {
157157
return cpu::normal_Tensor_Tensor(mean, std, gen);
158158
#ifdef WITH_CUDA
@@ -166,7 +166,7 @@ Tensor normal_Tensor_Tensor(const Tensor& mean, const Tensor& std, c10::optional
166166

167167
// ==================================================== Cauchy ========================================================
168168

169-
Tensor& cauchy_(Tensor& self, double median, double sigma, c10::optional<Generator> gen) {
169+
Tensor& cauchy_(Tensor& self, double median, double sigma, std::optional<Generator> gen) {
170170
if (self.device().type() == DeviceType::CPU) {
171171
return cpu::cauchy_(self, median, sigma, gen);
172172
#ifdef WITH_CUDA
@@ -180,7 +180,7 @@ Tensor& cauchy_(Tensor& self, double median, double sigma, c10::optional<Generat
180180

181181
// ================================================== LogNormal =======================================================
182182

183-
Tensor& log_normal_(Tensor& self, double mean, double std, c10::optional<Generator> gen) {
183+
Tensor& log_normal_(Tensor& self, double mean, double std, std::optional<Generator> gen) {
184184
if (self.device().type() == DeviceType::CPU) {
185185
return cpu::log_normal_(self, mean, std, gen);
186186
#ifdef WITH_CUDA
@@ -194,7 +194,7 @@ Tensor& log_normal_(Tensor& self, double mean, double std, c10::optional<Generat
194194

195195
// ================================================== Geometric =======================================================
196196

197-
Tensor& geometric_(Tensor& self, double p, c10::optional<Generator> gen) {
197+
Tensor& geometric_(Tensor& self, double p, std::optional<Generator> gen) {
198198
if (self.device().type() == DeviceType::CPU) {
199199
return cpu::geometric_(self, p, gen);
200200
#ifdef WITH_CUDA
@@ -208,7 +208,7 @@ Tensor& geometric_(Tensor& self, double p, c10::optional<Generator> gen) {
208208

209209
// ================================================== Exponential =====================================================
210210

211-
Tensor& exponential_(Tensor& self, double lambda, c10::optional<Generator> gen) {
211+
Tensor& exponential_(Tensor& self, double lambda, std::optional<Generator> gen) {
212212
if (self.device().type() == DeviceType::CPU) {
213213
return cpu::exponential_(self, lambda, gen);
214214
#ifdef WITH_CUDA
@@ -248,7 +248,7 @@ namespace {
248248
}
249249

250250
template <typename scalar_t, typename RNG>
251-
void randperm(Tensor& result, int64_t n, c10::optional<at::Generator> generator) {
251+
void randperm(Tensor& result, int64_t n, std::optional<at::Generator> generator) {
252252
auto gen = at::check_generator<RNG>(generator);
253253
scalar_t *r__data = result.data_ptr<scalar_t>();
254254

@@ -271,7 +271,7 @@ namespace {
271271
}
272272
} // namespace
273273

274-
Tensor& randperm_generator_out(int64_t n, c10::optional<Generator> generator, Tensor& result) {
274+
Tensor& randperm_generator_out(int64_t n, std::optional<Generator> generator, Tensor& result) {
275275
TORCH_CHECK(n >= 0, "n must be non-negative, got", n);
276276
check_supported_max_int_with_precision(n, result);
277277
if (result.device().type() == at::kCUDA) {
@@ -317,15 +317,15 @@ Tensor decrypt_pybind(Tensor input, Tensor output, Tensor key, const std::string
317317

318318
// ====================================================================================================================
319319

320-
Generator create_random_device_generator(c10::optional<std::string> token = c10::nullopt) {
320+
Generator create_random_device_generator(std::optional<std::string> token = c10::nullopt) {
321321
if (token.has_value()) {
322322
return make_generator<CSPRNGGeneratorImpl>(*token);
323323
} else {
324324
return make_generator<CSPRNGGeneratorImpl>(true);
325325
}
326326
}
327327

328-
Generator create_mt19937_generator(c10::optional<uint64_t> seed = c10::nullopt) {
328+
Generator create_mt19937_generator(std::optional<uint64_t> seed = c10::nullopt) {
329329
if (seed.has_value()) {
330330
return make_generator<CSPRNGGeneratorImpl>(*seed);
331331
} else {

torchcsprng/csrc/kernels_body.inc

Lines changed: 25 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
// using `generator`, which must be an instance of `at::CPUGeneratorImpl`
1212
// and passes it to the `device`.
1313
template<typename RNG>
14-
at::Tensor key_tensor(size_t block_t_size, c10::optional<at::Generator> generator) {
14+
at::Tensor key_tensor(size_t block_t_size, std::optional<at::Generator> generator) {
1515
std::lock_guard<std::mutex> lock(generator->mutex());
1616
auto gen = at::check_generator<RNG>(generator);
1717
auto key = torch::empty({static_cast<signed long>(block_t_size)}, torch::kUInt8);
@@ -114,7 +114,7 @@ template <> struct UIntType<bool> { using type = uint32_t; };
114114

115115
template<typename RNG>
116116
struct RandomKernel {
117-
void operator()(TensorIterator& iter, c10::optional<Generator> generator) {
117+
void operator()(TensorIterator& iter, std::optional<Generator> generator) {
118118
const Tensor key_t = aes128_key_tensor<RNG>(*generator).to(iter.device());
119119
const auto key = key_t.data_ptr<uint8_t>();
120120
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "random_kernel", [&] {
@@ -150,7 +150,7 @@ void random_full_range_kernel_helper(TensorIterator& iter, const uint8_t* key) {
150150

151151
template<typename RNG>
152152
struct RandomFromToKernel {
153-
void operator()(TensorIterator& iter, uint64_t range, int64_t base, c10::optional<Generator> generator) {
153+
void operator()(TensorIterator& iter, uint64_t range, int64_t base, std::optional<Generator> generator) {
154154
const Tensor key_t = aes128_key_tensor<RNG>(*generator).to(iter.device());
155155
const auto key = key_t.data_ptr<uint8_t>();
156156
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "random_from_to_kernel", [&] {
@@ -166,7 +166,7 @@ struct RandomFromToKernel {
166166
}
167167
});
168168
}
169-
void operator()(TensorIterator& iter, c10::optional<Generator> generator) {
169+
void operator()(TensorIterator& iter, std::optional<Generator> generator) {
170170
const Tensor key_t = aes128_key_tensor<RNG>(*generator).to(iter.device());
171171
const auto key = key_t.data_ptr<uint8_t>();
172172
AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "random_full_64_bits_range_kernel", [&] {
@@ -183,23 +183,23 @@ struct RandomFromToKernel {
183183
}
184184
};
185185

186-
at::Tensor& random_(at::Tensor& self, c10::optional<at::Generator> generator) {
186+
at::Tensor& random_(at::Tensor& self, std::optional<at::Generator> generator) {
187187
return at::native::templates::random_impl<RandomKernel, CSPRNGGeneratorImpl>(self, generator);
188188
}
189189

190-
at::Tensor& random_from_to(at::Tensor& self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) {
190+
at::Tensor& random_from_to(at::Tensor& self, int64_t from, std::optional<int64_t> to, c10::optional<at::Generator> generator) {
191191
return at::native::templates::random_from_to_impl<RandomFromToKernel, CSPRNGGeneratorImpl>(self, from, to, generator);
192192
}
193193

194-
at::Tensor& random_to(at::Tensor& self, int64_t to, c10::optional<at::Generator> generator) {
194+
at::Tensor& random_to(at::Tensor& self, int64_t to, std::optional<at::Generator> generator) {
195195
return random_from_to(self, 0, to, generator);
196196
}
197197

198198
// ==================================================== Uniform =======================================================
199199

200200
template<typename RNG>
201201
struct UniformKernel {
202-
void operator()(TensorIterator& iter, double from, double to, c10::optional<Generator> generator) {
202+
void operator()(TensorIterator& iter, double from, double to, std::optional<Generator> generator) {
203203
const Tensor key_t = aes128_key_tensor<RNG>(*generator).to(iter.device());
204204
const auto key = key_t.data_ptr<uint8_t>();
205205
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "uniform_kernel", [&] {
@@ -213,15 +213,15 @@ struct UniformKernel {
213213
}
214214
};
215215

216-
at::Tensor& uniform_(at::Tensor& self, double from, double to, c10::optional<at::Generator> generator) {
216+
at::Tensor& uniform_(at::Tensor& self, double from, double to, std::optional<at::Generator> generator) {
217217
return at::native::templates::uniform_impl_<UniformKernel, CSPRNGGeneratorImpl>(self, from, to, generator);
218218
}
219219

220220
// ==================================================== Normal ========================================================
221221

222222
template<typename RNG>
223223
struct NormalKernel {
224-
void operator()(Tensor& self, double mean, double std, c10::optional<Generator> generator) {
224+
void operator()(Tensor& self, double mean, double std, std::optional<Generator> generator) {
225225
auto iter = TensorIterator::nullary_op(self);
226226
const Tensor key_t = aes128_key_tensor<RNG>(*generator).to(iter.device());
227227
const auto key = key_t.data_ptr<uint8_t>();
@@ -236,39 +236,39 @@ struct NormalKernel {
236236
}
237237
};
238238

239-
at::Tensor& normal_(at::Tensor& self, double mean, double std, c10::optional<at::Generator> generator) {
239+
at::Tensor& normal_(at::Tensor& self, double mean, double std, std::optional<at::Generator> generator) {
240240
return at::native::templates::normal_impl_<NormalKernel, CSPRNGGeneratorImpl>(self, mean, std, generator);
241241
}
242242

243-
at::Tensor& normal_Tensor_float_out(at::Tensor& output, const at::Tensor& mean, double std, c10::optional<at::Generator> gen) {
243+
at::Tensor& normal_Tensor_float_out(at::Tensor& output, const at::Tensor& mean, double std, std::optional<at::Generator> gen) {
244244
return at::native::templates::normal_out_impl<NormalKernel, CSPRNGGeneratorImpl>(output, mean, std, gen);
245245
}
246246

247-
at::Tensor& normal_float_Tensor_out(at::Tensor& output, double mean, const at::Tensor& std, c10::optional<at::Generator> gen) {
247+
at::Tensor& normal_float_Tensor_out(at::Tensor& output, double mean, const at::Tensor& std, std::optional<at::Generator> gen) {
248248
return at::native::templates::normal_out_impl<NormalKernel, CSPRNGGeneratorImpl>(output, mean, std, gen);
249249
}
250250

251-
at::Tensor& normal_Tensor_Tensor_out(at::Tensor& output, const at::Tensor& mean, const at::Tensor& std, c10::optional<at::Generator> gen) {
251+
at::Tensor& normal_Tensor_Tensor_out(at::Tensor& output, const at::Tensor& mean, const at::Tensor& std, std::optional<at::Generator> gen) {
252252
return at::native::templates::normal_out_impl<NormalKernel, CSPRNGGeneratorImpl>(output, mean, std, gen);
253253
}
254254

255-
at::Tensor normal_Tensor_float(const at::Tensor& mean, double std, c10::optional<at::Generator> gen) {
255+
at::Tensor normal_Tensor_float(const at::Tensor& mean, double std, std::optional<at::Generator> gen) {
256256
return at::native::templates::normal_impl<NormalKernel, CSPRNGGeneratorImpl>(mean, std, gen);
257257
}
258258

259-
at::Tensor normal_float_Tensor(double mean, const at::Tensor& std, c10::optional<at::Generator> gen) {
259+
at::Tensor normal_float_Tensor(double mean, const at::Tensor& std, std::optional<at::Generator> gen) {
260260
return at::native::templates::normal_impl<NormalKernel, CSPRNGGeneratorImpl>(mean, std, gen);
261261
}
262262

263-
at::Tensor normal_Tensor_Tensor(const at::Tensor& mean, const at::Tensor& std, c10::optional<at::Generator> gen) {
263+
at::Tensor normal_Tensor_Tensor(const at::Tensor& mean, const at::Tensor& std, std::optional<at::Generator> gen) {
264264
return at::native::templates::normal_impl<NormalKernel, CSPRNGGeneratorImpl>(mean, std, gen);
265265
}
266266

267267
// ==================================================== Cauchy ========================================================
268268

269269
template<typename RNG>
270270
struct CauchyKernel {
271-
void operator()(TensorIterator& iter, double median, double sigma, c10::optional<Generator> generator) {
271+
void operator()(TensorIterator& iter, double median, double sigma, std::optional<Generator> generator) {
272272
const Tensor key_t = aes128_key_tensor<RNG>(*generator).to(iter.device());
273273
const auto key = key_t.data_ptr<uint8_t>();
274274
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "cauchy_kernel", [&] {
@@ -282,15 +282,15 @@ struct CauchyKernel {
282282
}
283283
};
284284

285-
at::Tensor& cauchy_(at::Tensor& self, double median, double sigma, c10::optional<at::Generator> generator) {
285+
at::Tensor& cauchy_(at::Tensor& self, double median, double sigma, std::optional<at::Generator> generator) {
286286
return at::native::templates::cauchy_impl_<CauchyKernel, CSPRNGGeneratorImpl>(self, median, sigma, generator);
287287
}
288288

289289
// ================================================== LogNormal =======================================================
290290

291291
template<typename RNG>
292292
struct LogNormalKernel {
293-
void operator()(TensorIterator& iter, double mean, double std, c10::optional<Generator> generator) {
293+
void operator()(TensorIterator& iter, double mean, double std, std::optional<Generator> generator) {
294294
const Tensor key_t = aes128_key_tensor<RNG>(*generator).to(iter.device());
295295
const auto key = key_t.data_ptr<uint8_t>();
296296
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "log_normal", [&] {
@@ -304,15 +304,15 @@ struct LogNormalKernel {
304304
}
305305
};
306306

307-
at::Tensor& log_normal_(at::Tensor& self, double mean, double std, c10::optional<at::Generator> gen) {
307+
at::Tensor& log_normal_(at::Tensor& self, double mean, double std, std::optional<at::Generator> gen) {
308308
return at::native::templates::log_normal_impl_<LogNormalKernel, CSPRNGGeneratorImpl>(self, mean, std, gen);
309309
}
310310

311311
// ================================================== Geometric =======================================================
312312

313313
template<typename RNG>
314314
struct GeometricKernel {
315-
void operator()(TensorIterator& iter, double p, c10::optional<Generator> generator) {
315+
void operator()(TensorIterator& iter, double p, std::optional<Generator> generator) {
316316
const Tensor key_t = aes128_key_tensor<RNG>(*generator).to(iter.device());
317317
const auto key = key_t.data_ptr<uint8_t>();
318318
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "geometric_kernel", [&] {
@@ -326,15 +326,15 @@ struct GeometricKernel {
326326
}
327327
};
328328

329-
at::Tensor& geometric_(at::Tensor& self, double p, c10::optional<at::Generator> gen) {
329+
at::Tensor& geometric_(at::Tensor& self, double p, std::optional<at::Generator> gen) {
330330
return at::native::templates::geometric_impl_<GeometricKernel, CSPRNGGeneratorImpl>(self, p, gen);
331331
}
332332

333333
// ================================================== Exponential =====================================================
334334

335335
template<typename RNG>
336336
struct ExponentialKernel {
337-
void operator()(TensorIterator& iter, double lambda, c10::optional<Generator> generator) {
337+
void operator()(TensorIterator& iter, double lambda, std::optional<Generator> generator) {
338338
const Tensor key_t = aes128_key_tensor<RNG>(*generator).to(iter.device());
339339
const auto key = key_t.data_ptr<uint8_t>();
340340
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "exponential_kernel", [&] {
@@ -348,7 +348,7 @@ struct ExponentialKernel {
348348
}
349349
};
350350

351-
at::Tensor& exponential_(at::Tensor& self, double lambda, c10::optional<at::Generator> gen) {
351+
at::Tensor& exponential_(at::Tensor& self, double lambda, std::optional<at::Generator> gen) {
352352
return at::native::templates::exponential_impl_<ExponentialKernel, CSPRNGGeneratorImpl>(self, lambda, gen);
353353
}
354354

0 commit comments

Comments
 (0)