当前位置: 首页 > news >正文

用table做的网站注册网站流程

用table做的网站,注册网站流程,广告设计好找工作吗,可以做淘宝推广的网站有哪些内容今天分享的学习内容主要就是神经网络里面的知识啦,用到的框架就是torch 在这里我也是对自己做一个学习记录,如果不符合大家的口味,大家划走就可以啦 可能没有什么文字或者原理上的讲解,基本上都是代码,但是我还是想说…

今天分享的学习内容主要就是神经网络里面的知识啦,用到的框架就是torch

在这里我也是对自己做一个学习记录,如果不符合大家的口味,大家划走就可以啦

可能没有什么文字或者原理上的讲解,基本上都是代码,但是我还是想说,如果基础不是很好,认认真真敲一遍,会有不一样的感受!!

在这里还有一篇相关内容的补充,大家也可以看一看:

由浅入深,走进深度学习(补充篇:神经网络基础)-CSDN博客

由浅入深,走进深度学习(补充篇:神经网络结构层基础)-CSDN博客

主要内容

目录

内容六 卷积原理、卷积层、卷积层处理图片

内容七 最大池化层

内容八 非线性激活

内容九 线性层以及其他层

内容十 实战,搭建一个小型的神经网络


正片开始

内容六 卷积原理、卷积层、卷积层处理图片

import torch
import torch.nn.functional as Finput = torch.tensor([[1, 2, 0, 3, 1],[0, 1, 2, 3, 1],[1, 2, 1, 0, 0],[5, 6, 2, 2, 1],[3, 2, 3, 5, 1]])kernel = torch.tensor([[1, 2, 1],[2, 3, 1],[3, 0, 1]])print(input.shape)
print(kernel.shape)input = torch.reshape(input, (1, 1, 5, 5))
kernel = torch.reshape(kernel, (1, 1, 3, 3))
print(input.shape)
print(input)
print(kernel.shape)
print(kernel)output = F.conv2d(input, kernel, stride = 1)
print(output.shape)
print(output)import torch
import torch.nn.functional as Finput = torch.tensor([[1, 2, 0, 3, 1],[0, 1, 2, 3, 1],[1, 2, 1, 0, 0],[5, 6, 2, 2, 1],[3, 2, 3, 5, 1]])kernel = torch.tensor([[1, 2, 1],[2, 3, 1],[3, 0, 1]])print(input.shape)
print(kernel.shape)input = torch.reshape(input, (1, 1, 5, 5))
kernel = torch.reshape(kernel, (1, 1, 3, 3))
print(input.shape)
print(input)
print(kernel.shape)
print(kernel)output = F.conv2d(input, kernel, stride = 2)
print(output.shape)
print(output)# 步幅、填充原理
# 步幅:卷积核经过输入特征图的采样间隔。设置步幅的目的:希望减小输入参数的数目,减少计算量
# 填充:在输入特征图的每一边添加一定数目的行列。设置填充的目的:希望每个输入方块都能作为卷积窗口的中心,或使得输出的特征图的长、宽 = 输入的特征图的长、宽。
# 一个尺寸 a * a 的特征图,经过 b * b 的卷积层,步幅(stride)= c,填充(padding)= d,若d等于0,也就是不填充,输出的特征图的尺寸 =(a-b)/ c+1;若d不等于0,也就是填充,输出的特征图的尺寸 =(a+2d-b)/ c+1
import torch
import torch.nn.functional as Finput = torch.tensor([[1, 2, 0, 3, 1],[0, 1, 2, 3, 1],[1, 2, 1, 0, 0],[5, 6, 2, 2, 1],[3, 2, 3, 5, 1]])kernel = torch.tensor([[1, 2, 1],[2, 3, 1],[3, 0, 1]])print(input.shape)
print(kernel.shape)input = torch.reshape(input, (1, 1, 5, 5))
kernel = torch.reshape(kernel, (1, 1, 3, 3))
print(input.shape)
print(input)
print(kernel.shape)
print(kernel)output = F.conv2d(input, kernel, stride = 1, padding = 1) # 周围只填充一层
print(output.shape)
print(output)# 内容六 卷积层
# Conv1d代表一维卷积,Conv2d代表二维卷积,Conv3d代表三维卷积
# kernel_size在训练过程中不断调整,定义为3就是3 * 3的卷积核,实际我们在训练神经网络过程中其实就是对kernel_size不断调整import torch
from torch import nn
from torch.nn import Conv2d
from torch.utils.data import DataLoader
import torchvision# dataset = torchvision.datasets.CIFAR10("dataset", train=False, transform=torchvision.transforms.ToTensor(), download=True)
# dataloader = DataLoader(dataset, batch_size = 64)class net(nn.Module):def __init__(self):super(net, self).__init__()self.conv1 = Conv2d(in_channels = 3, out_channels = 6, kernel_size = 3, stride = 1, padding = 0) # 彩色图像输入为3层,我们想让它的输出为6层,选3 * 3 的卷积def forward(self, x):x = self.conv1return xmodel = net()
print(model)# 卷积层处理图片
import torch
from torch import nn
from torch.nn import Conv2d
from torch.utils.data import DataLoader
import torchvisiondataset = torchvision.datasets.CIFAR10("dataset", train=False, transform=torchvision.transforms.ToTensor(), download=True)
dataloader = DataLoader(dataset, batch_size = 64)class net(nn.Module):def __init__(self):super(net, self).__init__()self.conv1 = Conv2d(in_channels = 3, out_channels = 6, kernel_size = 3, stride = 1, padding = 0)def forward(self, x):x = self.conv1(x)return xmodel = net()
for data in dataloader:img, targets = dataoutput = model(img)# print(img.shape)# print(output.shape) # 输入为3通道32×32的64张图片# print(targets.shape) # 输出为6通道30×30的64张图片

内容七 最大池化层

# 最大池化层有时也被称为下采样   dilation为空洞卷积
# Ceil_model为当超出区域时,只取最左上角的值
# 池化使得数据由5 * 5 变为3 * 3,甚至1 * 1的,这样导致计算的参数会大大减小。例如1080P的电影经过池化的转为720P的电影、或360P的电影后,同样的网速下,视频更为不卡
import torch
from torch import nn
from torch.nn import MaxPool2dinput = torch.tensor([[3, 4, 6, 1, 8],[4, 0, 8, 0, 1],[1, 2, 4, 5, 1],[2, 3, 1, 5, 1],[3, 3, 1, 5, 0]], dtype = torch.float32)input = torch.reshape(input, (-1, 1, 5, 5))
print(input.shape)class net(nn.Module):def __init__(self):super(net, self).__init__()self.maxpool = MaxPool2d(kernel_size = 3, ceil_mode = True)def forward(self, x):x = self.maxpool(x)return xmodel = net()
output = model(input)
print(output.shape)
print(output)import torch
import torchvision
from torch import nn
from torch.nn import MaxPool2d
from torch.utils.data import DataLoaderdataset = torchvision.datasets.CIFAR10("dataset", train=False, transform=torchvision.transforms.ToTensor(), download=True)
dataloader = DataLoader(dataset, batch_size = 64)class net(nn.Module):def __init__(self):super(net, self).__init__()self.maxpool = MaxPool2d(kernel_size = 3, ceil_mode = True)def forward(self, x):x = self.maxpool(x)return xmodel = net()
epoch = 0for data in dataloader:img, tagets = data# print('input', img, epoch)output = model(img)# print('output', output, epoch)epoch = epoch + 1

内容八 非线性激活

# inplace为原地替换,若为True,则变量的值被替换。若为False,则会创建一个新变量,将函数处理后的值赋值给新变量,原始变量的值没有修改
import torch
from torch import nn
from torch.nn import ReLUinput = torch.tensor([[1, -2],[-0.7, 3]])input = torch.reshape(input, (-1, 1, 2, 2))
print(input.shape)class net(nn.Module):def __init__(self):super(net, self).__init__()self.relu = ReLU()def forward(self, x):x = self.relu(x)return xmodel = net()
output = model(input)
print(output.shape)
print(output)
print(output[0][0][1][1])import torch
import torchvision
from torch import nn
from torch.nn import ReLU, Sigmoid
from torch.utils.data import DataLoaderdataset = torchvision.datasets.CIFAR10("dataset", train=False, transform=torchvision.transforms.ToTensor(), download=True)
dataloader = DataLoader(dataset, batch_size = 64)class net(nn.Module):def __init__(self):super(net, self).__init__()self.relu = ReLU()self.sigmoid = Sigmoid()def forward(self, x):x1 = self.relu(x)x2 = self.sigmoid(x1)return x2model = net()
epoch = 0for data in dataloader:imgs, targets = dataoutput = model(imgs)# print(output.shape)epoch = epoch + 1    

内容九 线性层以及其他层

# 线性拉平
import torch
import torchvision
from torch import nn
from torch.nn import ReLU, Sigmoid
from torch.utils.data import DataLoaderdataset = torchvision.datasets.CIFAR10("dataset", train=False, transform=torchvision.transforms.ToTensor(), download=True)
dataloader = DataLoader(dataset, batch_size = 64)for data in dataloader:imgs, targets = data# print(imgs.shape)output = torch.reshape(imgs, (1, 1, 1, -1))# print(output.shape)# 线性层
import torch
import torchvision
from torch import nn
from torch.nn import Linear
from torch.utils.data import DataLoaderdataset = torchvision.datasets.CIFAR10("dataset", train=False, transform=torchvision.transforms.ToTensor(), download=True)
dataloader = DataLoader(dataset, batch_size = 64, drop_last=True)
# drop_last=True:如果设置为 True,则当数据集的大小不能被 batch_size 整除时,会丢弃最后一个不足一个批次的数据
# drop_last=False:如果设置为 False(也是默认值),则当数据集的大小不能被 batch_size 整除时,最后一个批次会包含剩下的样本,可能少于 batch_size
class net(nn.Module):def __init__(self):super(net, self).__init__()self.linear = Linear(196608, 10)def forward(self, x):x = self.linear(x)return xmodel = net()
epoch = 0for data in dataloader:imgs, targets = data# print(imgs.shape)imgs_reshape = torch.reshape(imgs, (1, 1, 1, -1)) # 方法一 拉平# print(imgs_reshape.shape)output = model(imgs_reshape)# print(output.shape)# epoch = epoch + 1# 线性层
import torch
import torchvision
from torch import nn
from torch.nn import Linear
from torch.utils.data import DataLoaderdataset = torchvision.datasets.CIFAR10("dataset", train=False, transform=torchvision.transforms.ToTensor(), download=True)
dataloader = DataLoader(dataset, batch_size = 64, drop_last=True)
# drop_last=True:如果设置为 True,则当数据集的大小不能被 batch_size 整除时,会丢弃最后一个不足一个批次的数据
# drop_last=False:如果设置为 False(也是默认值),则当数据集的大小不能被 batch_size 整除时,最后一个批次会包含剩下的样本,可能少于 batch_size
class net(nn.Module):def __init__(self):super(net, self).__init__()self.linear = Linear(196608, 20)def forward(self, x):x = self.linear(x)return xmodel = net()
epoch = 0for data in dataloader:imgs, targets = data# print(imgs.shape)imgs_flatten = torch.flatten(imgs) # 方法二 拉平展为一维# print(imgs_flatten.shape)output = model(imgs_flatten)# print(output.shape)# epoch = epoch + 1

内容十 实战,搭建一个小型的神经网络

# 把网络结构放在Sequential里面,好处就是代码写起来比较简介、易懂
# 可以根据神经网络每层的尺寸,根据下图的公式计算出神经网络中的参数
import torch
import torchvision
from torch import nn
from torch.nn import Linear, Conv2d, MaxPool2d, Flatten
from torch.utils.data import DataLoader# dataset = torchvision.datasets.CIFAR10("dataset", train=False, transform=torchvision.transforms.ToTensor(), download=True)
# dataloader = DataLoader(dataset, batch_size = 64, drop_last=True)class net(nn.Module):def __init__(self):super(net, self).__init__()self.conv1 = Conv2d(in_channels = 3, out_channels = 32, kernel_size = 5, stride = 1, padding = 2)self.maxpool1 = MaxPool2d(kernel_size = 2, ceil_mode = True)self.conv2 = Conv2d(in_channels = 32, out_channels = 32, kernel_size = 5, stride = 1, padding = 2)self.maxpool2 = MaxPool2d(kernel_size = 2, ceil_mode = True)self.conv3 = Conv2d(in_channels = 32, out_channels = 64, kernel_size = 5, stride = 1, padding = 2)self.maxpool3 = MaxPool2d(kernel_size = 2, ceil_mode = True)self.flatten = Flatten()self.linear1 = Linear(1024, 64)self.linear2 = Linear(64, 10)def forward(self, x):x = self.conv1(x)print(x.shape)x = self.maxpool1(x)print(x.shape)x = self.conv2(x)print(x.shape)x = self.maxpool2(x)print(x.shape)x = self.conv3(x)print(x.shape)x = self.maxpool3(x)print(x.shape)x = self.flatten(x)print(x.shape)x = self.linear1(x)print(x.shape)x = self.linear2(x)print(x.shape)return xmodel = net()
print(model)input = torch.ones((64, 3, 32, 32))
output = model(input)
print(output.shape)
# Sequential神经网络
import torch
import torchvision
from torch import nn
from torch.nn import Linear, Conv2d, MaxPool2d, Flatten, Sequential
from torch.utils.data import DataLoaderclass net(nn.Module):def __init__(self):super(net, self).__init__()self.model = Sequential(Conv2d(in_channels = 3, out_channels = 32, kernel_size = 5, stride = 1, padding = 2),MaxPool2d(kernel_size = 2, ceil_mode = True),Conv2d(in_channels = 32, out_channels = 32, kernel_size = 5, stride = 1, padding = 2),MaxPool2d(kernel_size = 2, ceil_mode = True),Conv2d(in_channels = 32, out_channels = 64, kernel_size = 5, stride = 1, padding = 2),MaxPool2d(kernel_size = 2, ceil_mode = True),Flatten(),Linear(1024, 64),Linear(64, 10))def forward(self, x):x = self.model(x)return xmodel = net()
print(model)input = torch.ones((64, 3, 32, 32))
output = model(input)
print(output.shape)

注:上述内容参考b站up主“我是土堆”的视频!!!

http://www.ds6.com.cn/news/11019.html

相关文章:

  • 香港主机做视频网站网络舆情管控
  • web做网站作业舆情信息网
  • vps做网站需要做哪些准备媒体资源
  • 网站备案制作百度信息流广告投放
  • 手机界面设计东营优化路网
  • 赣州市城乡建设局官方网站关键词挖掘爱站网
  • 如何分析一个网站做的怎么样外贸谷歌推广
  • 如何建设网站建设seo网络推广优化教程
  • 网站建设企业网站优化深圳市社会组织总会
  • 新手学做网站下载软文平台
  • 宽带固定ip的怎么做网站服务器市场营销推广方案
  • 做app和网站哪个比较好宁波seo优化外包公司
  • 搜索引擎网址seo排名优化收费
  • 网站建设企业网银e路通网络营销方案怎么写
  • 盐城公司注册搜狗搜索引擎优化
  • 网站建设选超速云建站外贸网站
  • 用jsp做的网站源代码关联词有哪些 全部
  • wordpress网站amp百度关键词搜索次数
  • 网络运营计划方案搜索引擎优化英文简称
  • 软件测试培训心得拼多多关键词优化是怎么弄的
  • wordpress仿fe素材什么是优化
  • 小程序商店推荐百度seo关键词点击软件
  • 域名不作网站用途推广网站有效的方法
  • 南海做网站seo网站关键词优化工具
  • 蓝色系 网站百度公司
  • 深圳自建网站网络舆情监控系统
  • 电子商城网站建设费用天津网站建设
  • 临沂网站建设熊掌号网络营销推广方式包括
  • 衢州市建设局网站windows优化大师怎么彻底删除
  • 制作app需要网站吗自助优化排名工具