您当前的位置: 首页 >  c++

FPGA硅农

暂无认证

  • 1浏览

    0关注

    282博文

    0收益

  • 0浏览

    0点赞

    0打赏

    0留言

私信
关注
热门博文

基于C++的BNN推理

FPGA硅农 发布时间:2021-02-22 15:34:21 ,浏览量:1

BNN的训练由pytorch完成,权重和激活均被量化为-1和1,padding时补-1,为了能在硬件上进行部署,推理时作了如下变换: -1由0代替,1由1代替,乘法由同或运算代替,为了使乘累加的和不变(之前-1x-1=1,1x1=1,1x-1=-1,但是现在1 xnor 1=1,0 xnor 0=1,1 xnor 0=0),需要对最后卷积的结果进行修改: c o n v _ o u t = 2 ∗ x n o r _ c o n v ( x , w ) − c h _ i n ∗ k ∗ k + b i a s conv\_out=2*xnor\_conv(x,w)-ch\_in*k*k+bias conv_out=2∗xnor_conv(x,w)−ch_in∗k∗k+bias 而BN层运算为: b n _ o u t = c o n v _ o u t − μ σ ∗ γ + β bn\_out=\frac{conv\_out-\mu}{\sigma}*\gamma+\beta bn_out=σconv_out−μ​∗γ+β 为了加速推理速度,我们将BN层和卷积层最后对卷积结果的修改进行融合,则有: γ ′ = 2 γ / σ \gamma'=2\gamma/\sigma γ′=2γ/σ β ′ = b i a s − c h _ i n ∗ k ∗ k − μ σ ∗ γ + β \beta'=\frac{bias-ch\_in*k*k-\mu}{\sigma}*\gamma+\beta β′=σbias−ch_in∗k∗k−μ​∗γ+β 融合之后卷积和BN层的计算可简化为: x n o r _ c o n v _ o u t = x n o r _ c o n v ( x , w ) xnor\_conv\_out=xnor\_conv(x,w) xnor_conv_out=xnor_conv(x,w) b n _ o u t = γ ′ ∗ x n o r _ c o n v _ o u t + β ′ bn\_out=\gamma'*xnor\_conv\_out+\beta' bn_out=γ′∗xnor_conv_out+β′ 以下是pytorch上训练、存储权重并模拟推理的代码:

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.autograd import Function

# ********************* 二值(+-1) ***********************
# A
class Binary_a(Function):

    @staticmethod
    def forward(self, input):
        self.save_for_backward(input)
        output = torch.sign(input)
        return output

    @staticmethod
    def backward(self, grad_output):
        input, = self.saved_tensors
        # *******************ste*********************
        grad_input = grad_output.clone()
        # ****************saturate_ste***************
        grad_input[input.ge(1)] = 0
        grad_input[input.le(-1)] = 0
        return grad_input


# W
class Binary_w(Function):

    @staticmethod
    def forward(self, input):
        output = torch.sign(input)
        return output

    @staticmethod
    def backward(self, grad_output):
        # *******************ste*********************
        grad_input = grad_output.clone()
        return grad_input


# ********************* 三值(+-1、0) ***********************
class Ternary(Function):

    @staticmethod
    def forward(self, input):
        # **************** channel级 - E(|W|) ****************
        E = torch.mean(torch.abs(input), (3, 2, 1), keepdim=True)
        # **************** 阈值 ****************
        threshold = E * 0.7
        # ************** W —— +-1、0 **************
        output = torch.sign(
            torch.add(torch.sign(torch.add(input, threshold)), torch.sign(torch.add(input, -threshold))))
        return output, threshold

    @staticmethod
    def backward(self, grad_output, grad_threshold):
        # *******************ste*********************
        grad_input = grad_output.clone()
        return grad_input


# ********************* A(特征)量化(二值) ***********************
class activation_bin(nn.Module):
    def __init__(self, A):
        super().__init__()
        self.A = A
        self.relu = nn.ReLU(inplace=True)

    def binary(self, input):
        output = Binary_a.apply(input)
        return output

    def forward(self, input):
        if self.A == 2:
            output = self.binary(input)
            # ******************** A —— 1、0 *********************
            # a = torch.clamp(a, min=0)
        else:
            output = self.relu(input)
        return output


# ********************* W(模型参数)量化(三/二值) ***********************
def meancenter_clampConvParams(w):
    mean = w.data.mean(1, keepdim=True)
    w.data.sub(mean)  # W中心化(C方向)
    w.data.clamp(-1.0, 1.0)  # W截断
    return w


class weight_tnn_bin(nn.Module):
    def __init__(self, W):
        super().__init__()
        self.W = W

    def binary(self, input):
        output = Binary_w.apply(input)
        return output

    def ternary(self, input):
        output = Ternary.apply(input)
        return output

    def forward(self, input):
        if self.W == 2 or self.W == 3:
            # **************************************** W二值 *****************************************
            if self.W == 2:
                output = meancenter_clampConvParams(input)  # W中心化+截断
                # **************** channel级 - E(|W|) ****************
                E = torch.mean(torch.abs(output), (3, 2, 1), keepdim=True)
                # **************** α(缩放因子) ****************
                alpha = E
                # ************** W —— +-1 **************
                output = self.binary(output)
                # ************** W * α **************
                # output = output * alpha # 若不需要α(缩放因子),注释掉即可
                # **************************************** W三值 *****************************************
            elif self.W == 3:
                output_fp = input.clone()
                # ************** W —— +-1、0 **************
                output, threshold = self.ternary(input)
                # **************** α(缩放因子) ****************
                output_abs = torch.abs(output_fp)
                mask_le = output_abs.le(threshold)
                mask_gt = output_abs.gt(threshold)
                output_abs[mask_le] = 0
                output_abs_th = output_abs.clone()
                output_abs_th_sum = torch.sum(output_abs_th, (3, 2, 1), keepdim=True)
                mask_gt_sum = torch.sum(mask_gt, (3, 2, 1), keepdim=True).float()
                alpha = output_abs_th_sum / mask_gt_sum  # α(缩放因子)
                # *************** W * α ****************
                output = output * alpha  # 若不需要α(缩放因子),注释掉即可
        else:
            output = input
        return output


# ********************* 量化卷积(同时量化A/W,并做卷积) ***********************
class Conv2d_Q(nn.Conv2d):
    def __init__(
            self,
            in_channels,
            out_channels,
            kernel_size,
            stride=1,
            padding=0,
            dilation=1,
            groups=1,
            bias=True,
            A=2,
            W=2
    ):
        super().__init__(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=kernel_size,
            stride=stride,
            padding=padding,
            dilation=dilation,
            groups=groups,
            bias=bias
        )
        # 实例化调用A和W量化器
        self.activation_quantizer = activation_bin(A=A)
        self.weight_quantizer = weight_tnn_bin(W=W)

    def forward(self, input):
        # 量化A和W
        bin_input = self.activation_quantizer(input)
        tnn_bin_weight = self.weight_quantizer(self.weight)
        #用-1做padding
        padding_tuple=(self.padding[0],self.padding[0],self.padding[0],self.padding[0])
        bin_input_pad=F.pad(input=bin_input,pad=padding_tuple,mode='constant',value=-1)
        # 用量化后的A和W做卷积
        output = F.conv2d(
            input=bin_input_pad,
            weight=tnn_bin_weight,
            bias=self.bias,
            stride=self.stride,
            padding=0,
            dilation=self.dilation,
            groups=self.groups)
        return output


# *********************量化(三值、二值)卷积*********************
class Tnn_Bin_Conv2d(nn.Module):
    # 参数:last_relu-尾层卷积输入激活
    def __init__(self, input_channels, output_channels,
                 kernel_size=-1, stride=-1, padding=-1, groups=1, last_relu=0, A=2, W=2):
        super(Tnn_Bin_Conv2d, self).__init__()
        self.A = A
        self.W = W
        self.last_relu = last_relu

        # ********************* 量化(三/二值)卷积 *********************
        self.tnn_bin_conv = Conv2d_Q(input_channels, output_channels,
                                     kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, A=A, W=W)
        self.bn = nn.BatchNorm2d(output_channels)
        self.relu = nn.ReLU(inplace=True)

    def forward(self, x):
        x = self.tnn_bin_conv(x)
        x = self.bn(x)
        if self.last_relu:
            x = self.relu(x)
        return x


class Net(nn.Module):
    def __init__(self, cfg=None, A=2, W=2):
        super(Net, self).__init__()
        # 模型结构与搭建
        if cfg is None:
            cfg = [16, 32, 64, 10]
        self.tnn_bin = nn.Sequential(
            nn.Conv2d(1, cfg[0], kernel_size=5, stride=1, padding=2),
            nn.BatchNorm2d(cfg[0]),
            nn.MaxPool2d(kernel_size=2, stride=2),

            Tnn_Bin_Conv2d(cfg[0], cfg[1], kernel_size=5, stride=1, padding=2, A=A, W=W),
            Tnn_Bin_Conv2d(cfg[1], cfg[1], kernel_size=5, stride=1, padding=2, A=A, W=W),
            nn.MaxPool2d(kernel_size=2, stride=2),

            Tnn_Bin_Conv2d(cfg[1], cfg[2], kernel_size=5, stride=1, padding=2, A=A, W=W),
            Tnn_Bin_Conv2d(cfg[2], cfg[3], kernel_size=5, stride=1, padding=2, last_relu=1, A=A, W=W),
            nn.AvgPool2d(kernel_size=7, stride=1, padding=0),
        )

    def forward(self, x):
        x = self.tnn_bin(x)
        x = x.view(x.size(0), -1)
        return x


import numpy as np
import torch.optim as optim
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms

device = torch.device('cuda:0')


# 随机种子——训练结果可复现
def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    torch.backends.cudnn.deterministic = True


# 训练lr调整
def adjust_learning_rate(optimizer, epoch):
    update_list = [10, 20, 30, 40, 50]
    if epoch in update_list:
        for param_group in optimizer.param_groups:
            param_group['lr'] = param_group['lr'] * 0.2
    return


# 模型训练
def train(epoch):
    model.train()

    for batch_idx, (data, target) in enumerate(train_loader):
        # 前向传播
        data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)
        output = model(data)
        loss = criterion(output, target)

        # 反向传播
        optimizer.zero_grad()
        loss.backward()  # 求梯度
        optimizer.step()  # 参数更新

        # 显示训练集loss(/100个batch)
        if batch_idx % 100 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLR: {}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                       100. * batch_idx / len(train_loader), loss.data.item(),
                optimizer.param_groups[0]['lr']))
    return


# 模型测试
def test():
    global best_acc
    model.eval()
    test_loss = 0
    correct = 0

    for data, target in test_loader:
        data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)
        # 前向传播
        output = model(data)
        test_loss += criterion(output, target).data.item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()
    # 测试准确率
    acc = 100. * float(correct) / len(test_loader.dataset)

    print(acc)


def my_conv(x, w, kernel_size, stride, padding, padding_value):
    batch_size, channel_in, height, width = x.size()
    channel_out, channel_in, kx, ky = w.size()
    x_pad = torch.nn.functional.pad(input=x, pad=(padding, padding, padding, padding), mode='constant',
                                        value=padding_value)
    h_out = int((height + 2 * padding - kernel_size) / stride + 1)
    w_out = int((width + 2 * padding - kernel_size) / stride + 1)
    out = torch.zeros((int(batch_size), int(channel_out), int(h_out), int(w_out)))
    for b in range(batch_size):
        for ch in range(channel_out):
            for i in range(h_out):
                for j in range(w_out):
                    out[b,ch,i,j]=torch.sum(torch.eq(x_pad[b,:,i*stride:i*stride+kernel_size,
                                                     j*stride:j*stride+kernel_size],w[ch,:,:,:]))
    return out

def param_gen(gamma,beta,mean,var,bias,channel,kernel_size):
    gamma_1=2*gamma/torch.sqrt(var)
    beta_1=(bias-mean-channel*kernel_size*kernel_size)/torch.sqrt(var)*gamma+beta
    return gamma_1,beta_1

if __name__ == '__main__':
    setup_seed(1)  # 随机种子——训练结果可复现

    train_dataset = torchvision.datasets.MNIST(root='../../data',
                                               train=True,
                                               transform=transforms.ToTensor(),
                                               download=True)

    test_dataset = torchvision.datasets.MNIST(root='../../data',
                                              train=False,
                                              transform=transforms.ToTensor())

    # Data loader
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=128,
                                               shuffle=True)

    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=128,
                                              shuffle=False)

    print('******Initializing model******')
    # ******************** 在model的量化卷积中同时量化A(特征)和W(模型参数) ************************
    model = Net(A=2, W=2)
    best_acc = 0
    for m in model.modules():
        if isinstance(m, nn.Conv2d):
            nn.init.xavier_uniform_(m.weight.data)
            m.bias.data.zero_()
        elif isinstance(m, nn.Linear):
            m.weight.data.normal_(0, 0.01)
            m.bias.data.zero_()

    # cpu、gpu
    model.to(device)
    print(model)
    # 损失函数
    criterion = nn.CrossEntropyLoss()
    # 优化器
    optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=0.0)

    # 训练模型
    for epoch in range(1, 20):
        adjust_learning_rate(optimizer, epoch)
        train(epoch)
        test()

    param = model.state_dict()

    WeightBin = weight_tnn_bin(2)

    # 浮点卷积层
    Wc1 = param['tnn_bin.0.weight'].cpu()
    bc1 = param['tnn_bin.0.bias'].cpu()
    # BN层
    bn1_mean = param['tnn_bin.1.running_mean'].cpu()
    bn1_var = param['tnn_bin.1.running_var'].cpu()
    bn1_gamma = param['tnn_bin.1.weight'].cpu()
    bn1_beta = param['tnn_bin.1.bias'].cpu()
    # 二值卷积层1,2
    BWc1 = WeightBin.forward(param['tnn_bin.3.tnn_bin_conv.weight']).cpu()
    BWc1 = (BWc1+1)/2
    Bbc1 = param['tnn_bin.3.tnn_bin_conv.bias'].cpu()
    bn2_mean = param['tnn_bin.3.bn.running_mean'].cpu()
    bn2_var = param['tnn_bin.3.bn.running_var'].cpu()
    bn2_gamma = param['tnn_bin.3.bn.weight'].cpu()
    bn2_beta = param['tnn_bin.3.bn.bias'].cpu()

    gamma2,beta2=param_gen(gamma=bn2_gamma,beta=bn2_beta,mean=bn2_mean,var=bn2_var,bias=Bbc1,channel=16,kernel_size=5)

    BWc2 = WeightBin.forward(param['tnn_bin.4.tnn_bin_conv.weight']).cpu()
    BWc2 = (BWc2+1)/2
    Bbc2 = param['tnn_bin.4.tnn_bin_conv.bias'].cpu()
    bn3_mean = param['tnn_bin.4.bn.running_mean'].cpu()
    bn3_var = param['tnn_bin.4.bn.running_var'].cpu()
    bn3_gamma = param['tnn_bin.4.bn.weight'].cpu()
    bn3_beta = param['tnn_bin.4.bn.bias'].cpu()

    gamma3, beta3 = param_gen(gamma=bn3_gamma, beta=bn3_beta, mean=bn3_mean, var=bn3_var, bias=Bbc2, channel=32,
                              kernel_size=5)
    # 二值卷积层3,4
    BWc3 = WeightBin.forward(param['tnn_bin.6.tnn_bin_conv.weight']).cpu()
    BWc3 = (BWc3+1)/2
    Bbc3 = param['tnn_bin.6.tnn_bin_conv.bias'].cpu()
    bn4_mean = param['tnn_bin.6.bn.running_mean'].cpu()
    bn4_var = param['tnn_bin.6.bn.running_var'].cpu()
    bn4_gamma = param['tnn_bin.6.bn.weight'].cpu()
    bn4_beta = param['tnn_bin.6.bn.bias'].cpu()

    gamma4, beta4 = param_gen(gamma=bn4_gamma, beta=bn4_beta, mean=bn4_mean, var=bn4_var, bias=Bbc3, channel=32,
                              kernel_size=5)

    BWc4 = WeightBin.forward(param['tnn_bin.7.tnn_bin_conv.weight']).cpu()
    BWc4 = (BWc4+1)/2
    Bbc4 = param['tnn_bin.7.tnn_bin_conv.bias'].cpu()
    bn5_mean = param['tnn_bin.7.bn.running_mean'].cpu()
    bn5_var = param['tnn_bin.7.bn.running_var'].cpu()
    bn5_gamma = param['tnn_bin.7.bn.weight'].cpu()
    bn5_beta = param['tnn_bin.7.bn.bias'].cpu()

    gamma5, beta5 = param_gen(gamma=bn5_gamma, beta=bn5_beta, mean=bn5_mean, var=bn5_var, bias=Bbc4, channel=64,
                              kernel_size=5)

    Wc1.numpy().tofile("Wc1.bin")
    bc1.numpy().tofile("bc1.bin")
    bn1_gamma.numpy().tofile("bn1_gamma.bin")
    bn1_beta.numpy().tofile("bn1_beta.bin")
    bn1_mean.numpy().tofile("bn1_mean.bin")
    bn1_var.numpy().tofile("bn1_var.bin")
    BWc1.numpy().tofile("BWc1.bin")
    BWc2.numpy().tofile("BWc2.bin")
    BWc3.numpy().tofile("BWc3.bin")
    BWc4.numpy().tofile("BWc4.bin")
    gamma2.numpy().tofile("gamma2.bin")
    beta2.numpy().tofile("beta2.bin")
    gamma3.numpy().tofile("gamma3.bin")
    beta3.numpy().tofile("beta3.bin")
    gamma4.numpy().tofile("gamma4.bin")
    beta4.numpy().tofile("beta4.bin")
    gamma5.numpy().tofile("gamma5.bin")
    beta5.numpy().tofile("beta5.bin")

    correct = 0
    for batch_idx, (data, target) in enumerate(train_loader):

        x = F.conv2d(data, Wc1, bias=bc1, stride=1, padding=2)
        x = F.batch_norm(x, running_mean=bn1_mean, running_var=bn1_var, weight=bn1_gamma,bias=bn1_beta)
        x = F.max_pool2d(x, kernel_size=2, stride=2)

        x = (torch.sign(x)+1)/2
        x = my_conv(x,BWc1,kernel_size=5,stride=1,padding=2,padding_value=0)
        x=x*gamma2.view(1,-1,1,1)+beta2.view(1,-1,1,1)

        x = (torch.sign(x)+1)/2
        x = my_conv(x, BWc2, kernel_size=5, stride=1, padding=2, padding_value=0)
        x = x * gamma3.view(1, -1, 1, 1) + beta3.view(1, -1, 1, 1)
        x = F.max_pool2d(x, kernel_size=2, stride=2)

        x = (torch.sign(x)+1)/2
        x = my_conv(x, BWc3, kernel_size=5, stride=1, padding=2, padding_value=0)
        x = x * gamma4.view(1, -1, 1, 1) + beta4.view(1, -1, 1, 1)

        x = (torch.sign(x)+1)/2
        x = my_conv(x, BWc4, kernel_size=5, stride=1, padding=2, padding_value=0)
        x = x * gamma5.view(1, -1, 1, 1) + beta5.view(1, -1, 1, 1)
        x=torch.relu(x)

        x = F.avg_pool2d(x, kernel_size=7)

        output = torch.argmax(x, axis=1)
        for i in range(data.size(0)):
            if target[i] == output[i]:
                correct += 1
        print(correct/data.size(0))
        correct=0







另一个python程序读取权重进行推理:

import torch
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import numpy as np

def my_conv(x, w, kernel_size, stride, padding, padding_value):
    batch_size, channel_in, height, width = x.size()
    channel_out, channel_in, kx, ky = w.size()
    x_pad = torch.nn.functional.pad(input=x, pad=(padding, padding, padding, padding), mode='constant',
                                        value=padding_value)
    h_out = int((height + 2 * padding - kernel_size) / stride + 1)
    w_out = int((width + 2 * padding - kernel_size) / stride + 1)
    out = torch.zeros((int(batch_size), int(channel_out), int(h_out), int(w_out)))
    for b in range(batch_size):
        for ch in range(channel_out):
            for i in range(h_out):
                for j in range(w_out):
                    out[b,ch,i,j]=torch.sum(torch.eq(x_pad[b,:,i*stride:i*stride+kernel_size,
                                                     j*stride:j*stride+kernel_size],w[ch,:,:,:]))
    return out

Wc1=torch.from_numpy(np.fromfile("Wc1.bin",dtype=np.float32)).view(16,1,5,5)
bc1=torch.from_numpy(np.fromfile("bc1.bin",dtype=np.float32))
bn1_gamma=torch.from_numpy(np.fromfile("bn1_gamma.bin",dtype=np.float32))
bn1_beta=torch.from_numpy(np.fromfile("bn1_beta.bin",dtype=np.float32))
bn1_mean=torch.from_numpy(np.fromfile("bn1_mean.bin",dtype=np.float32))
bn1_var=torch.from_numpy(np.fromfile("bn1_var.bin",dtype=np.float32))
BWc1=torch.from_numpy(np.fromfile("BWc1.bin",dtype=np.float32)).view(32,16,5,5)
BWc2=torch.from_numpy(np.fromfile("BWc2.bin",dtype=np.float32)).view(32,32,5,5)
BWc3=torch.from_numpy(np.fromfile("BWc3.bin",dtype=np.float32)).view(64,32,5,5)
BWc4=torch.from_numpy(np.fromfile("BWc4.bin",dtype=np.float32)).view(10,64,5,5)
gamma2=torch.from_numpy(np.fromfile("gamma2.bin",dtype=np.float32))
beta2=torch.from_numpy(np.fromfile("beta2.bin",dtype=np.float32))
gamma3=torch.from_numpy(np.fromfile("gamma3.bin",dtype=np.float32))
beta3=torch.from_numpy(np.fromfile("beta3.bin",dtype=np.float32))
gamma4=torch.from_numpy(np.fromfile("gamma4.bin",dtype=np.float32))
beta4=torch.from_numpy(np.fromfile("beta4.bin",dtype=np.float32))
gamma5=torch.from_numpy(np.fromfile("gamma5.bin",dtype=np.float32))
beta5=torch.from_numpy(np.fromfile("beta5.bin",dtype=np.float32))




test_dataset = torchvision.datasets.MNIST(root='../../data',
                                              train=False,
                                              transform=transforms.ToTensor())
# Data loader
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=100,
                                              shuffle=False)

correct = 0
for batch_idx, (data, target) in enumerate(test_loader):

    x = F.conv2d(data, Wc1, bias=bc1, stride=1, padding=2)
    x = F.batch_norm(x, running_mean=bn1_mean, running_var=bn1_var, weight=bn1_gamma,bias=bn1_beta)
    x = F.max_pool2d(x, kernel_size=2, stride=2)

    x = (torch.sign(x)+1)/2
    x = my_conv(x,BWc1,kernel_size=5,stride=1,padding=2,padding_value=0)
    x = x * gamma2.view(1,-1,1,1)+beta2.view(1,-1,1,1)

    x = (torch.sign(x)+1)/2
    x = my_conv(x, BWc2, kernel_size=5, stride=1, padding=2, padding_value=0)
    x = x * gamma3.view(1, -1, 1, 1) + beta3.view(1, -1, 1, 1)
    x = F.max_pool2d(x, kernel_size=2, stride=2)

    x = (torch.sign(x)+1)/2
    x = my_conv(x, BWc3, kernel_size=5, stride=1, padding=2, padding_value=0)
    x = x * gamma4.view(1, -1, 1, 1) + beta4.view(1, -1, 1, 1)

    x = (torch.sign(x)+1)/2
    x = my_conv(x, BWc4, kernel_size=5, stride=1, padding=2, padding_value=0)
    x = x * gamma5.view(1, -1, 1, 1) + beta5.view(1, -1, 1, 1)
    x=torch.relu(x)

    x = F.avg_pool2d(x, kernel_size=7)

    output = torch.argmax(x, axis=1)
    for i in range(data.size(0)):
        if target[i] == output[i]:
            correct += 1
    print(correct/data.size(0))
    correct=0

C++上的推理:

#include 
#include 
#include
#include
#include
#pragma GCC optimize(3,"Ofast","inline")
using namespace std;

float img[10000][28][28];
int label[10000];

inline int xnor(int a,int b){
    return (a==b)?1:0;
}

void bconv(int ch_in,int ch_out,int pad,int stride,int k,int h,int w,int *in,int *weight,int *out){
    int h_o,w_o;
    int i,j,n,m;
    int kx,ky;
    h_o=(h-k+2*pad)/stride+1;
    w_o=(w-k+2*pad)/stride+1;
    for(i=0;i            
关注
打赏
1658642721
查看更多评论
0.0492s