I was looking at this file in the Pytorch repo: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/quantized/cpu/conv_packed_params.h
#pragma once
#include <ATen/ATen.h>
#include <ATen/core/ivalue.h>
template <int kSpatialDim = 2>
struct ConvPackedParamsBase : public torch::jit::CustomClassHolder {
virtual at::Tensor apply(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point) = 0;
virtual at::Tensor apply_relu(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point) = 0;
virtual at::Tensor apply_dynamic(
const at::Tensor& input,
bool reduce_range) = 0;
virtual std::tuple<at::Tensor, c10::optional<at::Tensor>> unpack() = 0;
virtual torch::List<int64_t> stride() const = 0;
virtual torch::List<int64_t> padding() const = 0;
virtual torch::List<int64_t> output_padding() const = 0;
virtual torch::List<int64_t> dilation() const = 0;
virtual int64_t groups() const = 0;
virtual bool transpose() const = 0;
};
It appears to be a template specification but kSpatialDim
is never used. I was wondering what is the purpose of this kind of design?
Aucun commentaire:
Enregistrer un commentaire