卷积神经网络

Posted semen

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了卷积神经网络相关的知识,希望对你有一定的参考价值。

function just_one_CNN()
load mnist_uint8;
train_x=double(reshape(train_x\',28,28,60000))/255;
test_x=double(reshape(test_x\',28,28,10000))/255;
train_y=double(train_y\')/255;
test_y=double(test_y\')/255;
cnn.layers={struct(\'type\',\'i\');
            struct(\'type\',\'c\',\'outputmaps\',6,\'kernelsize\',5);
            struct(\'type\',\'s\',\'scale\',2);
            struct(\'type\',\'c\',\'outputmaps\',16,\'kernelsize\',5);
            struct(\'type\',\'s\',\'scale\',2);};
cnn=cnnsetup(cnn,train_x,train_y);
opts.alpha=1;
opts.batchsizes=2;
opts.numepoches=2;
cnn=cnntrain(cnn,train_x,train_y,opts);
preimage=predict();
cnntest(cnn,preimage);
end
function net=cnnsetup(net,x,y)
inputmaps=1;
mapsize=size(x(:,:,1));
for l=1:numel(net.layers)
    if strcmp(net.layers{l}.type,\'s\')
        mapsize=mapsize/net.layers{l}.scale;
        for j=1:inputmaps
            net.layers{l}.b{j}=0;
        end
    end
    if strcmp(net.layers{l}.type,\'c\')
        mapsize=mapsize-net.layers{l}.kernelsize+1;
        fan_out=net.layers{l}.outputmaps*net.layers{l}.kernelsize^2;
        for j=1:net.layers{l}.outputmaps
            fan_in=inputmaps*net.layers{l}.kernelsize^2;
            for i=1:inputmaps
                net.layers{l}.k{i}{j}=(rand(net.layers{l}.kernelsize)- 0.5) * 2 * sqrt(6 / (fan_in + fan_out));
            end
            net.layers{l}.b{j}=0;
        end
        inputmaps=net.layers{l}.outputmaps;
    end
end
fvnum=prod(mapsize)*inputmaps;
onum=size(y,1);
net.ffb=zeros(onum,1);
net.ffW=(rand(onum, fvnum) - 0.5) * 2 * sqrt(6 / (onum + fvnum));
end
function net=cnntrain(net,x,y,opts)
m=size(x,3);
numbatchs=m/opts.batchsizes;
if rem(numbatchs, 1) ~= 0
        error(\'numbatches not integer\');
end
net.rL=[];
for i=1:opts.numepoches
    tic;
    kk=randperm(m);
    for l=1:numbatchs
        batch_x=x(:,:,kk((l-1)*opts.batchsizes+1:l*opts.batchsizes));
        batch_y=y(:,kk((l-1)*opts.batchsizes+1:l*opts.batchsizes));
        net=cnnff(net,batch_x);
        net=cnnbp(net,batch_y);
        net = cnnapplygrads(net, opts);  
        if isempty(net.rL)  
           net.rL(1) = net.L;
        end  
        net.rL(end + 1) = 0.99 * net.rL(end) + 0.01 * net.L;
    end
    toc
end
end

function net=cnnff(net,x)
n=numel(net.layers);
net.layers{1}.a{1}=x;
inputmaps=1;
for l=2:n
    if strcmp(net.layers{l}.type,\'c\')
        for j=1:net.layers{l}.outputmaps
            z=zeros(size(net.layers{l-1}.a{1})-[net.layers{l}.kernelsize-1 net.layers{l}.kernelsize-1 0]);
            for i=1:inputmaps
                z=z+convn(net.layers{l-1}.a{i},net.layers{l}.k{i}{j},\'valid\');
            end
            net.layers{l}.a{j}=sigm(z+net.layers{l}.b{j});
        end
        inputmaps=net.layers{l}.outputmaps;
    elseif strcmp(net.layers{l}.type,\'s\')
        for j=1:inputmaps
            z=convn(net.layers{l-1}.a{j},ones(net.layers{l}.scale)/(net.layers{l}.scale^2),\'valid\');
            net.layers{l}.a{j} = z(1 : net.layers{l}.scale : end, 1 : net.layers{l}.scale : end, :);
        end
    end
end
net.fv=[];
for j=1:numel(net.layers{n}.a)
    sa=size(net.layers{n}.a{j});
    net.fv=[net.fv;reshape(net.layers{n}.a{j},sa(1)*sa(2),sa(3))];
end
net.o = sigm(net.ffW * net.fv + repmat(net.ffb, 1, size(net.fv, 2)));
end

function [out]=sigm(in)
out=1./(1+exp(-in));
end

function net=cnnbp(net,y)
n=numel(net.layers);
net.e=net.o-y;
net.L=1/2*sum(net.e(:).^2)/size(net.e,2);
net.od=net.e.*(net.o.*(1-net.o));
net.fvd=(net.ffW\'*net.od);
if strcmp(net.layers{n}.type,\'c\')
    net.fvd=net.fv.*(netfv.*(1-net.fv));
end
sa=size(net.layers{n}.a{1});
fvnum=sa(1)*sa(2);
for j = 1 : numel(net.layers{n}.a)
    net.layers{n}.d{j} = reshape(net.fvd(((j - 1) * fvnum + 1) : j * fvnum, :), sa(1), sa(2), sa(3));  
end 
for l = (n - 1) : -1 : 1                   
    if strcmp(net.layers{l}.type, \'c\')      
        for j = 1 : numel(net.layers{l}.a) 
           
            net.layers{l}.d{j} = net.layers{l}.a{j} .* (1 - net.layers{l}.a{j}) .* (expand(net.layers{l + 1}.d{j}, [net.layers{l + 1}.scale net.layers{l + 1}.scale 1]) / net.layers{l + 1}.scale ^ 2);  
        end  
          
    elseif strcmp(net.layers{l}.type, \'s\')          
     
        for i = 1 : numel(net.layers{l}.a)           
            z = zeros(size(net.layers{l}.a{1}));  
            for j = 1 : numel(net.layers{l + 1}.a)   
                z = z + convn(net.layers{l + 1}.d{j}, rot180(net.layers{l + 1}.k{i}{j}), \'full\');  
            end  
            net.layers{l}.d{i} = z;  
        end  
    end  
end 
for l = 2 : n  
    if strcmp(net.layers{l}.type, \'c\')  
        for j = 1 : numel(net.layers{l}.a)  
            for i = 1 : numel(net.layers{l - 1}.a)  
                net.layers{l}.dk{i}{j} = convn(flipall(net.layers{l - 1}.a{i}), net.layers{l}.d{j}, \'valid\') / size(net.layers{l}.d{j}, 3);  
            end  
            net.layers{l}.db{j} = sum(net.layers{l}.d{j}(:)) / size(net.layers{l}.d{j}, 3);  
        end  
    end  
end
net.dffW = net.od * (net.fv)\' / size(net.od, 2);
net.dffb = mean(net.od, 2);

end

function X = rot180(X)
    X = flipdim(flipdim(X, 1), 2);
end

function net = cnnapplygrads(net, opts)  
    for l = 2 : numel(net.layers)  
        if strcmp(net.layers{l}.type, \'c\')  
            for j = 1 : numel(net.layers{l}.a)  
                for ii = 1 : numel(net.layers{l - 1}.a)  
                    net.layers{l}.k{ii}{j} = net.layers{l}.k{ii}{j} - opts.alpha * net.layers{l}.dk{ii}{j};  
                end  
            end  
            net.layers{l}.b{j} = net.layers{l}.b{j} - opts.alpha * net.layers{l}.db{j};  
        end  
    end  
  
    net.ffW = net.ffW - opts.alpha * net.dffW;  
    net.ffb = net.ffb - opts.alpha * net.dffb;  
end    

function cnntest(net, x)  
  
    net = cnnff(net, x);
    [~, h] = max(net.o);
    disp(\'the image data is\');
    disp(h-1);
end 

function [guige]=predict()
ff=imread(\'seven.png\');
tgray=rgb2gray(ff);
tgray(1:7,:)=[];
tgray(end-3:end,:)=[];
tgray(:,1)=[];
gg=imread(\'eight.png\');
eg=rgb2gray(gg);
eg(1:5,:)=[];
eg(end-4:end,:)=[];
eg(:,1)=[];
eg(:,end)=[];
guige=[tgray;eg];
guige=double(reshape(guige\',28,28,2))/255;
end

function B = expand(A, S)
if nargin < 2
    error(\'Size vector must be provided.  See help.\');
end
SA = size(A); 
if length(SA) ~= length(S)
   error(\'Length of size vector must equal ndims(A).  See help.\')
elseif any(S ~= floor(S))
   error(\'The size vector must contain integers only.  See help.\')
end

T = cell(length(SA), 1);
for ii = length(SA) : -1 : 1
    H = zeros(SA(ii) * S(ii), 1);  
    H(1 : S(ii) : SA(ii) * S(ii)) = 1;  
    T{ii} = cumsum(H);   
end
B = A(T{:}); 
end

function X=flipall(X)
    for i=1:ndims(X)
        X = flipdim(X,i);
    end
end

上面代码只需要放在一个just_one_CNN函数里面就能运行。

可以任意拓展网络的层数,只需更新相应的参数就可以

对下面两张rgb图片能够正确识别,但是要进行处理,要将图片转为灰度图,分割成大小为28x28的图片,在展开成两行向量

       

以上是关于卷积神经网络的主要内容,如果未能解决你的问题,请参考以下文章

卷积神经网络之GAN(附完整代码)

卷积神经网络:反向传播过程的代码实现

卷积神经网络(原理与代码实现)

卷积神经网络(原理与代码实现)

深度卷积生成对抗网络(DCGAN)简介及图像生成仿真(附代码)

卷积神经网络结构——LeNet-5(卷积神经网络入门,Keras代码实现)