利用pytorch的载入训练npy类型数据代码

Posted Mario cai

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了利用pytorch的载入训练npy类型数据代码相关的知识,希望对你有一定的参考价值。

 utils

import os
import sys
import json
import pickle
import random
import imageio
import numpy as np
import torch
from tqdm import tqdm


def train_one_epoch(model, optimizer, data_loader, device, epoch):
    model.train()
    loss_function = torch.nn.CrossEntropyLoss()
    # from loss_function import FocalLoss1
    # loss_function = FocalLoss1().cuda()
    accu_loss = torch.zeros(1).to(device)  # 累计损失
    accu_num = torch.zeros(1).to(device)   # 累计预测正确的样本数
    optimizer.zero_grad()
    k = 0
    sample_num = 0
    data_loader = tqdm(data_loader)
    for step, data in enumerate(data_loader):
        k = k+1
        images, labels = data
        sample_num += images.shape[0]
        # print(images[0].shape)
        # c = np.array(images[0])
        # c = c.transpose((1, 2, 0))
        # imageio.imwrite('data' + '%d.jpg' % (k + 1), c)


        pred = model(images.to(device))
        # print(images.shape)
        pred_classes = torch.max(pred, dim=1)[1]
        accu_num += torch.eq(pred_classes, labels.to(device)).sum()

        loss = loss_function(pred, labels.to(device))
        loss.backward()
        accu_loss += loss.detach()

        data_loader.desc = "[train epoch ] loss: :.3f, acc: :.3f".format(epoch,
                                                                               accu_loss.item() / (step + 1),
                                                                               accu_num.item() / sample_num)

        if not torch.isfinite(loss):
            print('WARNING: non-finite loss, ending training ', loss)
            sys.exit(1)

        optimizer.step()
        optimizer.zero_grad()

    return accu_loss.item() / (step + 1), accu_num.item() / sample_num

@torch.no_grad()
def evaluate(model, data_loader, device, epoch):
    # from loss_function import FocalLoss1
    # loss_function = FocalLoss1().cuda()
    loss_function = torch.nn.CrossEntropyLoss()
    model.eval()

    accu_num = torch.zeros(1).to(device)   # 累计预测正确的样本数
    accu_loss = torch.zeros(1).to(device)  # 累计损失

    sample_num = 0
    data_loader = tqdm(data_loader)
    for step, data in enumerate(data_loader):
        images, labels = data
        sample_num += images.shape[0]

        pred = model(images.to(device))
        pred_classes = torch.max(pred, dim=1)[1]
        accu_num += torch.eq(pred_classes, labels.to(device)).sum()

        loss = loss_function(pred, labels.to(device))
        accu_loss += loss

        data_loader.desc = "[valid epoch ] loss: :.3f, acc: :.3f".format(epoch,
                                                                               accu_loss.item() / (step + 1),
                                                                               accu_num.item() / sample_num)

    return accu_loss.item() / (step + 1), accu_num.item() / sample_num

TRAIN

import os
import sys
import json
import pickle
import random
import imageio
import numpy as np
import torch
from tqdm import tqdm


def train_one_epoch(model, optimizer, data_loader, device, epoch):
    model.train()
    loss_function = torch.nn.CrossEntropyLoss()
    # from loss_function import FocalLoss1
    # loss_function = FocalLoss1().cuda()
    accu_loss = torch.zeros(1).to(device)  # 累计损失
    accu_num = torch.zeros(1).to(device)   # 累计预测正确的样本数
    optimizer.zero_grad()
    k = 0
    sample_num = 0
    data_loader = tqdm(data_loader)
    for step, data in enumerate(data_loader):
        k = k+1
        images, labels = data
        sample_num += images.shape[0]
        # print(images[0].shape)
        # c = np.array(images[0])
        # c = c.transpose((1, 2, 0))
        # imageio.imwrite('data' + '%d.jpg' % (k + 1), c)


        pred = model(images.to(device))
        # print(images.shape)
        pred_classes = torch.max(pred, dim=1)[1]
        accu_num += torch.eq(pred_classes, labels.to(device)).sum()

        loss = loss_function(pred, labels.to(device))
        loss.backward()
        accu_loss += loss.detach()

        data_loader.desc = "[train epoch ] loss: :.3f, acc: :.3f".format(epoch,
                                                                               accu_loss.item() / (step + 1),
                                                                               accu_num.item() / sample_num)

        if not torch.isfinite(loss):
            print('WARNING: non-finite loss, ending training ', loss)
            sys.exit(1)

        optimizer.step()
        optimizer.zero_grad()

    return accu_loss.item() / (step + 1), accu_num.item() / sample_num

@torch.no_grad()
def evaluate(model, data_loader, device, epoch):
    # from loss_function import FocalLoss1
    # loss_function = FocalLoss1().cuda()
    loss_function = torch.nn.CrossEntropyLoss()
    model.eval()

    accu_num = torch.zeros(1).to(device)   # 累计预测正确的样本数
    accu_loss = torch.zeros(1).to(device)  # 累计损失

    sample_num = 0
    data_loader = tqdm(data_loader)
    for step, data in enumerate(data_loader):
        images, labels = data
        sample_num += images.shape[0]

        pred = model(images.to(device))
        pred_classes = torch.max(pred, dim=1)[1]
        accu_num += torch.eq(pred_classes, labels.to(device)).sum()

        loss = loss_function(pred, labels.to(device))
        accu_loss += loss

        data_loader.desc = "[valid epoch ] loss: :.3f, acc: :.3f".format(epoch,
                                                                               accu_loss.item() / (step + 1),
                                                                               accu_num.item() / sample_num)

    return accu_loss.item() / (step + 1), accu_num.item() / sample_num

DATASET

from PIL import Image
import torch
from torch.utils.data import Dataset
import csv
import torch
import imageio
import tensorflow as tf
import os
from torchvision import transforms
from medpy.io import load
# from albumentations import *
import numpy as np
from PIL import Image

from torch import nn
import torch.nn.functional as F
import random
import torchio as tio
from torchio import AFFINE, DATA
import torchio
from torchio import ScalarImage, LabelMap, Subject, SubjectsDataset, Queue
from torchio.data import UniformSampler
from torchio.transforms import (
    RandomFlip,
    RandomAffine,
    RandomElasticDeformation,
    RandomNoise,
    RandomMotion,
    RandomBiasField,
    RescaleIntensity,
    Resample,
    ToCanonical,
    ZNormalization,
    CropOrPad,
RandomSpike,
RandomBlur,
RandomSwap,
    HistogramStandardization,
    OneOf,
    Clamp,
    Compose,
    RandomGhosting,
)
from pathlib import Path
from glob import glob
from os.path import dirname, join, basename, isfile
import sys


class MyDataSet(Dataset):
    """自定义数据集"""
    # def __init__(self, root,transform):
    def __init__(self, data,label, transform=True):
        self.data = data
        self.label = label
        self.images_path = torch.tensor(np.load(os.path.join(data,"train.npy")))
        self.images_class = torch.tensor(np.load(os.path.join(label,"label.npy")))
        self.transform = transform

    def __len__(self):
        return self.images_path.shape[0]  # 返回数据的总个数


    def __getitem__(self, index):
        img = self.images_path[index, :, :]  # 读取每一个npy的数据
        label = self.images_class[index]  # 读取每一个npy的数据
        img = np.expand_dims(img, axis=0)
        img = torch.Tensor(img)
        img = torch.cat([img, img, img], dim=0)
        ###############################################################################################################3

        ###############################################################################################################3
        label = label.type(torch.long)

        if self.transform is not None:
            img = self.transform(img)
        return img, label  # 返回数据还有标签

    # def collate_fn(index):
    #     # 官方实现的default_collate可以参考
    #     print(index)
    #     # https://github.com/pytorch/pytorch/blob/67b7e751e6b5931a9f45274653f4f653a4e6cdf6/torch/utils/data/_utils/collate.py
    #     images, labels = tuple(zip(*index))
    #     #
    #     images = torch.stack(images, dim=0)
    #     labels = torch.as_tensor(labels)
    #     return images, labels


class valDataset(Dataset):
    def __init__(self, data,label, transform=True):
        self.data = data
        self.label = label
        self.images_path = torch.tensor(np.load(os.path.join(data,"val.npy")))
        self.images_class = torch.tensor(np.load(os.path.join(label,"val_label.npy")))
        self.transform = transform

    def __len__(self):
        return self.images_path.shape[0] #返回数据的总个数

    def __getitem__(self, index):
        img = self.images_path[index, :, :]  # 读取每一个npy的数据
        label = self.images_class[index]  # 读取每一个npy的数据
        img = np.expand_dims(img, axis=0)
        img = torch.Tensor(img)
        img = torch.cat([img, img, img], dim=0)
        ###############################################################################################################3

        ###############################################################################################################3
        ###############################################################################################################3
        label = label.type(torch.long)

        if self.transform is not None:
            img = self.transform(img)
        return img, label  # 返回数据还有标签

    #
    # def collate_fn(batch):
    #     # 官方实现的default_collate可以参考
    #     # https://github.com/pytorch/pytorch/blob/67b7e751e6b5931a9f45274653f4f653a4e6cdf6/torch/utils/data/_utils/collate.py
    #     images, labels = tuple(zip(*batch))
    #
    #     images = torch.stack(images, dim=0)
    #     labels = torch.as_tensor(labels)
    #     return images, labels

以上是关于利用pytorch的载入训练npy类型数据代码的主要内容,如果未能解决你的问题,请参考以下文章

利用pytorch的载入训练npy类型数据代码

用Pytorch训练分类模型

Pytorch学习

Pytorch定义并训练自己的数字数据集

Pytorch定义并训练自己的数字数据集

PyTorch自定义损失函数实现