diff --git a/.gitignore b/.gitignore index bab8676e1..edc340f28 100644 --- a/.gitignore +++ b/.gitignore @@ -246,7 +246,7 @@ __pycache__/ # OpenCover UI analysis results OpenCover/ -# Azure Stream Analytics local run output +# Azure Stream Analytics local run output ASALocalRun/ # MSBuild Binary and Structured Log @@ -271,4 +271,5 @@ packages/ /src/Native/out/build/x64-Debug *.code-workspace /.idea +/.vscode /test/TorchSharpTest/exportsd.py diff --git a/src/Native/LibTorchSharp/THSTensor.cpp b/src/Native/LibTorchSharp/THSTensor.cpp index df13658ee..ae076eb4a 100644 --- a/src/Native/LibTorchSharp/THSTensor.cpp +++ b/src/Native/LibTorchSharp/THSTensor.cpp @@ -2200,3 +2200,18 @@ Tensor THSTensor_unflatten_names(Tensor tensor, const char** names, const int64_ return nullptr; } + +Tensor THSTensor_fake_quantize_per_channel_affine(Tensor tensor, Tensor scale, Tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) +{ + at::Tensor res; + CATCH(res = at::fake_quantize_per_channel_affine(*tensor, *scale, *zero_point, axis, quant_min, quant_max);) + return ResultTensor(res); +} + +Tensor THSTensor_fake_quantize_per_channel_affine_cachemask(Tensor tensor, Tensor scale, Tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, Tensor* mask) +{ + std::tuple res; + CATCH(res = at::fake_quantize_per_channel_affine_cachemask(*tensor, *scale, *zero_point, axis, quant_min, quant_max);) + *mask = ResultTensor(std::get<1>(res)); + return ResultTensor(std::get<0>(res)); +} \ No newline at end of file diff --git a/src/Native/LibTorchSharp/THSTensor.h b/src/Native/LibTorchSharp/THSTensor.h index 7dfb27b7e..f25cd25fe 100644 --- a/src/Native/LibTorchSharp/THSTensor.h +++ b/src/Native/LibTorchSharp/THSTensor.h @@ -1712,3 +1712,9 @@ EXPORT_API(Tensor) THSTensor_kaiser_window(const int64_t len, bool periodic, dou EXPORT_API(Tensor) THSTensor_stft(const Tensor x, int64_t n_fft, int64_t hop_length, int64_t win_length, const Tensor window, bool normalized, int64_t onesided, bool return_complex); EXPORT_API(Tensor) THSTensor_istft(const Tensor x, int64_t n_fft, int64_t hop_length, int64_t win_length, const Tensor window, bool center, bool normalized, int64_t onesided, int64_t length, bool return_complex); + + +// Pointwise Ops + +EXPORT_API(Tensor) THSTensor_fake_quantize_per_channel_affine(const Tensor tensor, const Tensor scale, const Tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); +EXPORT_API(Tensor) THSTensor_fake_quantize_per_channel_affine_cachemask(const Tensor tensor, const Tensor scale, const Tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, Tensor* mask); \ No newline at end of file diff --git a/src/TorchSharp/PInvoke/LibTorchSharp.THSTensor.cs b/src/TorchSharp/PInvoke/LibTorchSharp.THSTensor.cs index b41b7864c..caf08e1f5 100644 --- a/src/TorchSharp/PInvoke/LibTorchSharp.THSTensor.cs +++ b/src/TorchSharp/PInvoke/LibTorchSharp.THSTensor.cs @@ -2071,9 +2071,16 @@ internal static extern IntPtr THSTensor_upsample_nearest3d(IntPtr input, [DllImport("LibTorchSharp")] internal static extern IntPtr THSTensor_searchsorted_t(IntPtr sorted_sequence, IntPtr values, bool out_int32, bool right, IntPtr sorter); + [DllImport("LibTorchSharp")] internal static extern IntPtr THSTensor_searchsorted_s(IntPtr sorted_sequence, IntPtr values, bool out_int32, bool right, IntPtr sorter); + [DllImport("LibTorchSharp")] + internal static extern IntPtr THSTensor_fake_quantize_per_channel_affine(IntPtr tensor, IntPtr scale, IntPtr zero_point, long axis, long quant_min, long quant_max); + + [DllImport("LibTorchSharp")] + internal static extern IntPtr THSTensor_fake_quantize_per_channel_affine_cachemask(IntPtr tensor, IntPtr scale, IntPtr zero_point, long axis, long quant_min, long quant_max, out IntPtr mask); + [DllImport("LibTorchSharp")] internal static extern IntPtr THSTensor_histogram_t(IntPtr input, IntPtr bins, IntPtr weight, bool density, out IntPtr r_bin_edges); [DllImport("LibTorchSharp")] diff --git a/src/TorchSharp/Tensor/Tensor.PointwiseOps.cs b/src/TorchSharp/Tensor/Tensor.PointwiseOps.cs new file mode 100644 index 000000000..944cebfcd --- /dev/null +++ b/src/TorchSharp/Tensor/Tensor.PointwiseOps.cs @@ -0,0 +1,50 @@ +// Copyright (c) .NET Foundation and Contributors. All Rights Reserved. See LICENSE in the project root for license information. +#nullable enable +using System; +using static TorchSharp.PInvoke.LibTorchSharp; + +namespace TorchSharp +{ + public static partial class torch + { + public partial class Tensor + { + // https://pytorch.org/docs/stable/generated/torch.fake_quantize_per_channel_affine + /// + /// Returns a new tensor with the data in this fake quantized per channel using + /// , , and , + /// across the channel specified by . + /// + /// quantization scale, per channel (float32) + /// quantization zero_point, per channel (torch.int32, torch.half, or torch.float32) + /// channel axis + /// lower bound of the quantized domain + /// upper bound of the quantized domain + /// A newly fake_quantized per channel torch.float32 tensor + public Tensor fake_quantize_per_channel_affine(Tensor scale, Tensor zero_point, long axis, long quant_min, long quant_max) + { + var res = THSTensor_fake_quantize_per_channel_affine( + Handle, scale.Handle, zero_point.handle, + axis, quant_min, quant_max); + + if (res == IntPtr.Zero) + CheckForErrors(); + + return new Tensor(res); + } + + // see: aten/src/ATen/native/quantized/FakeQuantPerChannelAffine.cpp + internal (Tensor res, Tensor mask) fake_quantize_per_channel_affine_cachemask(Tensor scale, Tensor zero_point, long axis, long quant_min, long quant_max) + { + var res = THSTensor_fake_quantize_per_channel_affine_cachemask( + Handle, scale.Handle, zero_point.handle, + axis, quant_min, quant_max, out IntPtr mask); + + if (res == IntPtr.Zero || mask == IntPtr.Zero) + CheckForErrors(); + + return (new Tensor(res), new Tensor(mask)); + } + } + } +} \ No newline at end of file diff --git a/src/TorchSharp/Tensor/torch.PointwiseOps.cs b/src/TorchSharp/Tensor/torch.PointwiseOps.cs index 0fccbd8ce..54deed818 100644 --- a/src/TorchSharp/Tensor/torch.PointwiseOps.cs +++ b/src/TorchSharp/Tensor/torch.PointwiseOps.cs @@ -741,9 +741,9 @@ public static Tensor addcmul_(Tensor input, Tensor tensor1, Tensor tensor2, Scal /// lower bound of the quantized domain /// upper bound of the quantized domain /// A newly fake_quantized per channel torch.float32 tensor - [Pure, Obsolete("not implemented", true)] + [Pure] public static Tensor fake_quantize_per_channel_affine(Tensor input, Tensor scale, Tensor zero_point, int axis, long quant_min, long quant_max) - => throw new NotImplementedException(); + => input.fake_quantize_per_channel_affine(scale, zero_point, axis, quant_min, quant_max); // https://pytorch.org/docs/stable/generated/torch.fake_quantize_per_tensor_affine /// diff --git a/src/TorchVision/AdjustGamma.cs b/src/TorchVision/AdjustGamma.cs index 89cd0b5ae..ca0e16da5 100644 --- a/src/TorchVision/AdjustGamma.cs +++ b/src/TorchVision/AdjustGamma.cs @@ -1,4 +1,5 @@ // Copyright (c) .NET Foundation and Contributors. All Rights Reserved. See LICENSE in the project root for license information. +#nullable enable using System; using static TorchSharp.torch; @@ -19,16 +20,16 @@ internal AdjustGamma(double gamma, double gain = 1.0) public Tensor call(Tensor img) { var dtype = img.dtype; - if (!torch.is_floating_point(img)) - img = transforms.ConvertImageDtype(torch.float32).call(img); + if (!is_floating_point(img)) + img = transforms.ConvertImageDtype(float32).call(img); img = (gain * img.pow(gamma)).clamp(0, 1); - return transforms.ConvertImageDtype(dtype).call(img); ; + return transforms.ConvertImageDtype(dtype).call(img); } - private double gamma; - private double gain; + private readonly double gamma; + private readonly double gain; } public static partial class transforms @@ -44,10 +45,7 @@ public static partial class transforms /// /// The constant multiplier in the gamma correction equation. /// - static public ITransform AdjustGamma(double gamma, double gain = 1.0) - { - return new AdjustGamma(gamma); - } + public static ITransform AdjustGamma(double gamma, double gain = 1.0) => new AdjustGamma(gamma, gain); } } } diff --git a/test/TorchSharpTest/TestTorchTensor.cs b/test/TorchSharpTest/TestTorchTensor.cs index eda466b65..508275514 100644 --- a/test/TorchSharpTest/TestTorchTensor.cs +++ b/test/TorchSharpTest/TestTorchTensor.cs @@ -7724,6 +7724,7 @@ public void TestCartesianProd() } [Fact] + [TestOf(nameof(torch.combinations))] public void TestCombinations() { var t = torch.arange(5); @@ -7735,6 +7736,7 @@ public void TestCombinations() } [Fact] + [TestOf(nameof(torch.cdist))] public void TestCDist() { var a = torch.randn(3, 2); @@ -7746,6 +7748,7 @@ public void TestCDist() } [Fact] + [TestOf(nameof(torch.rot90))] public void TestRot90() { var a = torch.arange(8).view(2, 2, 2); @@ -7756,6 +7759,7 @@ public void TestRot90() } [Fact] + [TestOf(nameof(torch.diag_embed))] public void TestDiagembed() { var a = torch.randn(2, 3); @@ -7768,6 +7772,7 @@ public void TestDiagembed() } [Fact] + [TestOf(nameof(torch.searchsorted))] public void TestSearchSorted() { var ss = torch.from_array(new long[] { 1, 3, 5, 7, 9, 2, 4, 6, 8, 10 }).reshape(2, -1); @@ -7782,6 +7787,7 @@ public void TestSearchSorted() } [Fact] + [TestOf(nameof(torch.histogram))] public void TestHistogram() { // https://pytorch.org/docs/stable/generated/torch.histogram.html @@ -7804,6 +7810,17 @@ public void TestHistogram() Assert.True(bin_edges.allclose(torch.tensor(new double[] { 0, 1, 2, 3 }), 0.001)); } + [Fact] + [TestOf(nameof(torch.fake_quantize_per_channel_affine))] + public void TestFakeQuantizePerChannelAffine() + { + var x = torch.rand(2, 2, 2); + var scales = (torch.randn(2) + 1d) * 0.05d; + var zero_points = torch.zeros(2).to(torch.int32); + var result = torch.fake_quantize_per_channel_affine(x, scales, zero_points, axis: 0, quant_min: 0, quant_max: 255); + Assert.True(true); + } + [Fact] public void TestHistogramOptimBinNums() {