用PaddlePaddle实现图像分类-DistResNet

项目简介

本项目基于paddle实现了图像分类模型 ResNet的训练和预测,建议使用GPU运行,具体介绍如下。动态图版本请查看:用PaddlePaddle实现图像分类-DisResNet(动态图版)

下载安装命令

## CPU版本安装命令
pip install -f https://paddlepaddle.org.cn/pip/oschina/cpu paddlepaddle

## GPU版本安装命令
pip install -f https://paddlepaddle.org.cn/pip/oschina/gpu paddlepaddle-gpu

模型简介

ResNet(Residual Neural Network)由微软研究院的Kaiming He等四名华人提出,通过使用ResNet Unit成功训练出了152层的神经网络,并在ILSVRC2015比赛中取得冠军,在top5上的错误率为3.57%,同时参数量比VGGNet低,效果非常突出。ResNet的结构可以极快的加速神经网络的训练,模型的准确率也有比较大的提升。同时ResNet的推广性非常好,甚至可以直接用到InceptionNet网络中。

ResNet的主要思想是在网络中增加了直连通道。此前的网络结构是对输入做一个非线性变换,而Highway Network则允许保留之前网络层的一定比例的输出。ResNet的思想和Highway Network的思想非常类似,其允许原始输入信息直接传到后面的层中,基本的残差结构Residual Block如下图所示。
用PaddlePaddle实现图像分类-DistResNet
考虑到实际应用中的计算量,作者在不同深度的ResNet中分别采用了两种不同的Residual Block,如下图所示。
用PaddlePaddle实现图像分类-DistResNet
当网络结构较深时,采用上图右边的block结构,首先使用1×11/times 11×1的卷积进行降维以减少计算量,然后进行3×33/times 33×3的正常卷积操作,然后再使用1×11/times 11×1的卷积进行升维,从而达到与左边结构等同的效果。不同深度的ResNet具体的参数配置如下表。 用PaddlePaddle实现图像分类-DistResNet参考链接:
【1】ResNet介绍
【2】Deep Residual Learning for Image Recognition

数据介绍

使用公开鲜花据集,数据集压缩包里包含五个文件夹,每个文件夹一种花卉。分别是雏菊,蒲公英,玫瑰,向日葵,郁金香。每种各690-890张不等

In[
 ]

# 解压花朵数据集 
!cd data/data2815 && unzip -q flower_photos.zip 

 

预处理数据,将其转化为需要的格式

In[
 ]

# 预处理数据,将其转化为标准格式。同时将数据拆分成两份,以便训练和计算预估准确率
import codecs 
import os 
import random 
import shutil 
from PIL import Image 
 
train_ratio = 4.0 / 5 
 
all_file_dir = 'data/data2815' 
class_list = [c for c in os.listdir(all_file_dir) if os.path.isdir(os.path.join(all_file_dir, c)) and not c.endswith('Set') and not c.startswith('.')] 
class_list.sort()
print(class_list) 
train_image_dir = os.path.join(all_file_dir, "trainImageSet") 
if not os.path.exists(train_image_dir): 
    os.makedirs(train_image_dir) 
     
eval_image_dir = os.path.join(all_file_dir, "evalImageSet") 
if not os.path.exists(eval_image_dir): 
    os.makedirs(eval_image_dir) 
 
train_file = codecs.open(os.path.join(all_file_dir, "train.txt"), 'w') 
eval_file = codecs.open(os.path.join(all_file_dir, "eval.txt"), 'w') 
 
with codecs.open(os.path.join(all_file_dir, "label_list.txt"), "w") as label_list: 
    label_id = 0 
    for class_dir in class_list: 
        label_list.write("{0}/t{1}/n".format(label_id, class_dir)) 
        image_path_pre = os.path.join(all_file_dir, class_dir) 
        for file in os.listdir(image_path_pre): 
            try: 
                img = Image.open(os.path.join(image_path_pre, file)) 
                if random.uniform(0, 1) <= train_ratio: 
                    shutil.copyfile(os.path.join(image_path_pre, file), os.path.join(train_image_dir, file)) 
                    train_file.write("{0}/t{1}/n".format(os.path.join(train_image_dir, file), label_id)) 
                else: 
                    shutil.copyfile(os.path.join(image_path_pre, file), os.path.join(eval_image_dir, file)) 
                    eval_file.write("{0}/t{1}/n".format(os.path.join(eval_image_dir, file), label_id)) 
            except Exception as e: 
                pass 
                # 存在一些文件打不开,此处需要稍作清洗 
        label_id += 1 
             
train_file.close() 
eval_file.close()
['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips']

 

模型训练主体

In[1]

# -*- coding: UTF-8 -*- 
""" 
训练常用视觉基础网络,用于分类任务 
需要将训练图片,类别文件 label_list.txt 放置在同一个文件夹下 
程序会先读取 train.txt 文件获取类别数和图片数量 
""" 
from __future__ import absolute_import 
from __future__ import division 
from __future__ import print_function 
import os 
import numpy as np 
import time 
import math 
import paddle 
import paddle.fluid as fluid 
import codecs 
import logging 
from paddle.fluid.initializer import MSRA 
from paddle.fluid.initializer import Uniform 
from paddle.fluid.param_attr import ParamAttr 
from PIL import Image 
from PIL import ImageEnhance 
train_parameters = { 
"input_size": [3, 224, 224], 
"class_dim": -1,  # 分类数,会在初始化自定义 reader 的时候获得 
"image_count": -1,  # 训练图片数量,会在初始化自定义 reader 的时候获得 
"label_dict": {}, 
"data_dir": "data/data2815",  # 训练数据存储地址 
"train_file_list": "train.txt", 
"label_file": "label_list.txt", 
"save_freeze_dir": "./freeze-model", 
"save_persistable_dir": "./persistable-params", 
"continue_train": True,        # 是否接着上一次保存的参数接着训练,优先级高于预训练模型 
"pretrained": False,            # 是否使用预训练的模型,暂时没有预训练模型
"pretrained_dir": "data/data6487/ResNet50_pretrained",  
"mode": "train", 
"num_epochs": 5, 
"train_batch_size": 30, 
"mean_rgb": [127.5, 127.5, 127.5],  # 常用图片的三通道均值,通常来说需要先对训练数据做统计,此处仅取中间值 
"use_gpu": True, 
"dropout_seed": None, 
"image_enhance_strategy": {  # 图像增强相关策略 
"need_distort": True,  # 是否启用图像颜色增强 
"need_rotate": True,   # 是否需要增加随机角度 
"need_crop": True,      # 是否要增加裁剪 
"need_flip": True,      # 是否要增加水平随机翻转 
"hue_prob": 0.5, 
"hue_delta": 18, 
"contrast_prob": 0.5, 
"contrast_delta": 0.5, 
"saturation_prob": 0.5, 
"saturation_delta": 0.5, 
"brightness_prob": 0.5, 
"brightness_delta": 0.125 
}, 
"early_stop": { 
"sample_frequency": 50, 
"successive_limit": 3, 
"good_acc1": 0.92 
}, 
"rsm_strategy": { 
"learning_rate": 0.001, 
"lr_epochs": [20, 40, 60, 80, 100], 
"lr_decay": [1, 0.5, 0.25, 0.1, 0.01, 0.002] 
}, 
"momentum_strategy": { 
"learning_rate": 0.001, 
"lr_epochs": [20, 40, 60, 80, 100], 
"lr_decay": [1, 0.5, 0.25, 0.1, 0.01, 0.002] 
}, 
"sgd_strategy": { 
"learning_rate": 0.001, 
"lr_epochs": [20, 40, 60, 80, 100], 
"lr_decay": [1, 0.5, 0.25, 0.1, 0.01, 0.002] 
}, 
"adam_strategy": { 
"learning_rate": 0.002 
} 
} 
class DistResNet():
def __init__(self, layers=50, is_train=True):
self.params = train_parameters
self.layers = layers
self.is_train = is_train
self.weight_decay = 1e-4
def net(self, input, class_dim=1000):
layers = self.layers
supported_layers = [50, 101, 152]
assert layers in supported_layers, /
"supported layers are {} but input layer is {}".format(supported_layers, layers)
if layers == 50:
depth = [3, 4, 6, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
num_filters = [64, 128, 256, 512]
conv = self.conv_bn_layer(
input=input, num_filters=64, filter_size=7, stride=2, act='relu')
conv = fluid.layers.pool2d(
input=conv,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
for block in range(len(depth)):
for i in range(depth[block]):
conv = self.bottleneck_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1)
pool = fluid.layers.pool2d(
input=conv, pool_size=7, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
out = fluid.layers.fc(input=pool,
size=class_dim,
act="softmax",
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv,
stdv),
regularizer=fluid.regularizer.L2Decay(self.weight_decay)),
bias_attr=fluid.ParamAttr(
regularizer=fluid.regularizer.L2Decay(self.weight_decay))
)
return out
def conv_bn_layer(self,
input,
num_filters,
filter_size,
stride=1,
groups=1,
act=None,
bn_init_value=1.0):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
bias_attr=False,
param_attr=fluid.ParamAttr(regularizer=fluid.regularizer.L2Decay(self.weight_decay)))
return fluid.layers.batch_norm(
input=conv, act=act, is_test=not self.is_train,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(bn_init_value),
regularizer=None))
def shortcut(self, input, ch_out, stride):
ch_in = input.shape[1]
if ch_in != ch_out or stride != 1:
return self.conv_bn_layer(input, ch_out, 1, stride)
else:
return input
def bottleneck_block(self, input, num_filters, stride):
conv0 = self.conv_bn_layer(
input=input, num_filters=num_filters, filter_size=1, act='relu')
conv1 = self.conv_bn_layer(
input=conv0,
num_filters=num_filters,
filter_size=3,
stride=stride,
act='relu')
# NOTE: default bias is 0.0 already
conv2 = self.conv_bn_layer(
input=conv1, num_filters=num_filters * 4, filter_size=1, act=None, bn_init_value=0.0)
short = self.shortcut(input, num_filters * 4, stride)
return fluid.layers.elementwise_add(x=short, y=conv2, act='relu')
def init_log_config(): 
""" 
初始化日志相关配置 
:return: 
""" 
global logger 
logger = logging.getLogger() 
logger.setLevel(logging.INFO) 
log_path = os.path.join(os.getcwd(), 'logs') 
if not os.path.exists(log_path): 
os.makedirs(log_path) 
log_name = os.path.join(log_path, 'train.log') 
sh = logging.StreamHandler() 
fh = logging.FileHandler(log_name, mode='w') 
fh.setLevel(logging.DEBUG) 
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s") 
fh.setFormatter(formatter) 
sh.setFormatter(formatter) 
logger.addHandler(sh) 
logger.addHandler(fh) 
def init_train_parameters(): 
""" 
初始化训练参数,主要是初始化图片数量,类别数 
:return: 
""" 
train_file_list = os.path.join(train_parameters['data_dir'], train_parameters['train_file_list']) 
label_list = os.path.join(train_parameters['data_dir'], train_parameters['label_file']) 
index = 0 
with codecs.open(label_list, encoding='utf-8') as flist: 
lines = [line.strip() for line in flist] 
for line in lines: 
parts = line.strip().split() 
train_parameters['label_dict'][parts[1]] = int(parts[0]) 
index += 1 
train_parameters['class_dim'] = index 
with codecs.open(train_file_list, encoding='utf-8') as flist: 
lines = [line.strip() for line in flist] 
train_parameters['image_count'] = len(lines) 
def resize_img(img, target_size): 
""" 
强制缩放图片 
:param img: 
:param target_size: 
:return: 
""" 
target_size = input_size 
img = img.resize((target_size[1], target_size[2]), Image.BILINEAR) 
return img 
def random_crop(img, scale=[0.08, 1.0], ratio=[3. / 4., 4. / 3.]): 
aspect_ratio = math.sqrt(np.random.uniform(*ratio)) 
w = 1. * aspect_ratio 
h = 1. / aspect_ratio 
bound = min((float(img.size[0]) / img.size[1]) / (w**2), 
(float(img.size[1]) / img.size[0]) / (h**2)) 
scale_max = min(scale[1], bound) 
scale_min = min(scale[0], bound) 
target_area = img.size[0] * img.size[1] * np.random.uniform(scale_min, 
scale_max) 
target_size = math.sqrt(target_area) 
w = int(target_size * w) 
h = int(target_size * h) 
i = np.random.randint(0, img.size[0] - w + 1) 
j = np.random.randint(0, img.size[1] - h + 1) 
img = img.crop((i, j, i + w, j + h)) 
img = img.resize((train_parameters['input_size'][1], train_parameters['input_size'][2]), Image.BILINEAR) 
return img 
def rotate_image(img): 
""" 
图像增强,增加随机旋转角度 
""" 
angle = np.random.randint(-14, 15) 
img = img.rotate(angle) 
return img 
def random_brightness(img): 
""" 
图像增强,亮度调整 
:param img: 
:return: 
""" 
prob = np.random.uniform(0, 1) 
if prob < train_parameters['image_enhance_strategy']['brightness_prob']: 
brightness_delta = train_parameters['image_enhance_strategy']['brightness_delta'] 
delta = np.random.uniform(-brightness_delta, brightness_delta) + 1 
img = ImageEnhance.Brightness(img).enhance(delta) 
return img 
def random_contrast(img): 
""" 
图像增强,对比度调整 
:param img: 
:return: 
""" 
prob = np.random.uniform(0, 1) 
if prob < train_parameters['image_enhance_strategy']['contrast_prob']: 
contrast_delta = train_parameters['image_enhance_strategy']['contrast_delta'] 
delta = np.random.uniform(-contrast_delta, contrast_delta) + 1 
img = ImageEnhance.Contrast(img).enhance(delta) 
return img 
def random_saturation(img): 
""" 
图像增强,饱和度调整 
:param img: 
:return: 
""" 
prob = np.random.uniform(0, 1) 
if prob < train_parameters['image_enhance_strategy']['saturation_prob']: 
saturation_delta = train_parameters['image_enhance_strategy']['saturation_delta'] 
delta = np.random.uniform(-saturation_delta, saturation_delta) + 1 
img = ImageEnhance.Color(img).enhance(delta) 
return img 
def random_hue(img): 
""" 
图像增强,色度调整 
:param img: 
:return: 
""" 
prob = np.random.uniform(0, 1) 
if prob < train_parameters['image_enhance_strategy']['hue_prob']: 
hue_delta = train_parameters['image_enhance_strategy']['hue_delta'] 
delta = np.random.uniform(-hue_delta, hue_delta) 
img_hsv = np.array(img.convert('HSV')) 
img_hsv[:, :, 0] = img_hsv[:, :, 0] + delta 
img = Image.fromarray(img_hsv, mode='HSV').convert('RGB') 
return img 
def distort_color(img): 
""" 
概率的图像增强 
:param img: 
:return: 
""" 
prob = np.random.uniform(0, 1) 
# Apply different distort order 
if prob < 0.35: 
img = random_brightness(img) 
img = random_contrast(img) 
img = random_saturation(img) 
img = random_hue(img) 
elif prob < 0.7: 
img = random_brightness(img) 
img = random_saturation(img) 
img = random_hue(img) 
img = random_contrast(img) 
return img 
def custom_image_reader(file_list, data_dir, mode): 
""" 
自定义用户图片读取器,先初始化图片种类,数量 
:param file_list: 
:param data_dir: 
:param mode: 
:return: 
""" 
with codecs.open(file_list) as flist: 
lines = [line.strip() for line in flist] 
def reader(): 
np.random.shuffle(lines) 
for line in lines: 
if mode == 'train' or mode == 'val': 
img_path, label = line.split() 
img = Image.open(img_path) 
try: 
if img.mode != 'RGB': 
img = img.convert('RGB') 
if train_parameters['image_enhance_strategy']['need_distort'] == True: 
img = distort_color(img) 
if train_parameters['image_enhance_strategy']['need_rotate'] == True: 
img = rotate_image(img) 
if train_parameters['image_enhance_strategy']['need_crop'] == True: 
img = random_crop(img, train_parameters['input_size']) 
if train_parameters['image_enhance_strategy']['need_flip'] == True: 
mirror = int(np.random.uniform(0, 2)) 
if mirror == 1: 
img = img.transpose(Image.FLIP_LEFT_RIGHT) 
# HWC--->CHW && normalized 
img = np.array(img).astype('float32') 
img -= train_parameters['mean_rgb'] 
img = img.transpose((2, 0, 1))  # HWC to CHW 
img *= 0.007843                 # 像素值归一化 
yield img, int(label) 
except Exception as e: 
pass                            # 以防某些图片读取处理出错,加异常处理 
elif mode == 'test': 
img_path = os.path.join(data_dir, line) 
img = Image.open(img_path) 
if img.mode != 'RGB': 
img = img.convert('RGB') 
img = resize_img(img, train_parameters['input_size']) 
# HWC--->CHW && normalized 
img = np.array(img).astype('float32') 
img -= train_parameters['mean_rgb'] 
img = img.transpose((2, 0, 1))  # HWC to CHW 
img *= 0.007843  # 像素值归一化 
yield img 
return reader 
def optimizer_momentum_setting(): 
""" 
阶梯型的学习率适合比较大规模的训练数据 
""" 
learning_strategy = train_parameters['momentum_strategy'] 
batch_size = train_parameters["train_batch_size"] 
iters = train_parameters["image_count"] // batch_size 
lr = learning_strategy['learning_rate'] 
boundaries = [i * iters for i in learning_strategy["lr_epochs"]] 
values = [i * lr for i in learning_strategy["lr_decay"]] 
learning_rate = fluid.layers.piecewise_decay(boundaries, values) 
optimizer = fluid.optimizer.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9) 
return optimizer 
def optimizer_rms_setting(): 
""" 
阶梯型的学习率适合比较大规模的训练数据 
""" 
batch_size = train_parameters["train_batch_size"] 
iters = train_parameters["image_count"] // batch_size 
learning_strategy = train_parameters['rsm_strategy'] 
lr = learning_strategy['learning_rate'] 
boundaries = [i * iters for i in learning_strategy["lr_epochs"]] 
values = [i * lr for i in learning_strategy["lr_decay"]] 
optimizer = fluid.optimizer.RMSProp( 
learning_rate=fluid.layers.piecewise_decay(boundaries, values)) 
return optimizer 
def optimizer_sgd_setting(): 
""" 
loss下降相对较慢,但是最终效果不错,阶梯型的学习率适合比较大规模的训练数据 
""" 
learning_strategy = train_parameters['sgd_strategy'] 
batch_size = train_parameters["train_batch_size"] 
iters = train_parameters["image_count"] // batch_size 
lr = learning_strategy['learning_rate'] 
boundaries = [i * iters for i in learning_strategy["lr_epochs"]] 
values = [i * lr for i in learning_strategy["lr_decay"]] 
learning_rate = fluid.layers.piecewise_decay(boundaries, values) 
optimizer = fluid.optimizer.SGD(learning_rate=learning_rate) 
return optimizer 
def optimizer_adam_setting(): 
""" 
能够比较快速的降低 loss,但是相对后期乏力 
""" 
learning_strategy = train_parameters['adam_strategy'] 
learning_rate = learning_strategy['learning_rate'] 
optimizer = fluid.optimizer.Adam(learning_rate=learning_rate) 
return optimizer 
def load_params(exe, program): 
if train_parameters['continue_train'] and os.path.exists(train_parameters['save_persistable_dir']): 
logger.info('load params from retrain model') 
fluid.io.load_persistables(executor=exe, 
dirname=train_parameters['save_persistable_dir'], 
main_program=program) 
elif train_parameters['pretrained'] and os.path.exists(train_parameters['pretrained_dir']): 
logger.info('load params from pretrained model') 
def if_exist(var): 
return os.path.exists(os.path.join(train_parameters['pretrained_dir'], var.name)) 
fluid.io.load_vars(exe, train_parameters['pretrained_dir'], main_program=program, 
predicate=if_exist) 
def train(): 
train_prog = fluid.Program() 
train_startup = fluid.Program() 
logger.info("create prog success") 
logger.info("train config: %s", str(train_parameters)) 
logger.info("build input custom reader and data feeder") 
file_list = os.path.join(train_parameters['data_dir'], "train.txt") 
mode = train_parameters['mode'] 
batch_reader = paddle.batch(custom_image_reader(file_list, train_parameters['data_dir'], mode), 
batch_size=train_parameters['train_batch_size'], 
drop_last=False) 
batch_reader = paddle.reader.shuffle(batch_reader, train_parameters['train_batch_size']) 
place = fluid.CUDAPlace(0) if train_parameters['use_gpu'] else fluid.CPUPlace() 
# 定义输入数据的占位符 
img = fluid.layers.data(name='img', shape=train_parameters['input_size'], dtype='float32') 
label = fluid.layers.data(name='label', shape=[1], dtype='int64') 
feeder = fluid.DataFeeder(feed_list=[img, label], place=place) 
# 选取不同的网络 
logger.info("build newwork") 
model = DistResNet() 
out = model.net(input=img, class_dim=train_parameters['class_dim']) 
cost = fluid.layers.cross_entropy(out, label) 
avg_cost = fluid.layers.mean(x=cost) 
acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1) 
# 选取不同的优化器 
optimizer = optimizer_rms_setting() 
# optimizer = optimizer_momentum_setting() 
# optimizer = optimizer_sgd_setting() 
# optimizer = optimizer_adam_setting() 
optimizer.minimize(avg_cost) 
exe = fluid.Executor(place) 
main_program = fluid.default_main_program() 
exe.run(fluid.default_startup_program()) 
train_fetch_list = [avg_cost.name, acc_top1.name, out.name] 
load_params(exe, main_program) 
# 训练循环主体 
stop_strategy = train_parameters['early_stop'] 
successive_limit = stop_strategy['successive_limit'] 
sample_freq = stop_strategy['sample_frequency'] 
good_acc1 = stop_strategy['good_acc1'] 
successive_count = 0 
stop_train = False 
total_batch_count = 0 
for pass_id in range(train_parameters["num_epochs"]): 
logger.info("current pass: %d, start read image", pass_id) 
batch_id = 0 
for step_id, data in enumerate(batch_reader()): 
t1 = time.time() 
# logger.info("data size:{0}".format(len(data))) 
loss, acc1, pred_ot = exe.run(main_program, 
feed=feeder.feed(data), 
fetch_list=train_fetch_list) 
t2 = time.time() 
batch_id += 1 
total_batch_count += 1 
period = t2 - t1 
loss = np.mean(np.array(loss)) 
acc1 = np.mean(np.array(acc1)) 
if batch_id % 10 == 0: 
logger.info("Pass {0}, trainbatch {1}, loss {2}, acc1 {3}, time {4}".format(pass_id, batch_id, loss, acc1, 
"%2.2f sec" % period)) 
# 简单的提前停止策略,认为连续达到某个准确率就可以停止了 
if acc1 >= good_acc1: 
successive_count += 1 
logger.info("current acc1 {0} meets good {1}, successive count {2}".format(acc1, good_acc1, successive_count)) 
fluid.io.save_inference_model(dirname=train_parameters['save_freeze_dir'], 
feeded_var_names=['img'], 
target_vars=[out], 
main_program=main_program, 
executor=exe) 
if successive_count >= successive_limit: 
logger.info("end training") 
stop_train = True 
break 
else: 
successive_count = 0 
# 通用的保存策略,减小意外停止的损失 
if total_batch_count % sample_freq == 0: 
logger.info("temp save {0} batch train result, current acc1 {1}".format(total_batch_count, acc1)) 
fluid.io.save_persistables(dirname=train_parameters['save_persistable_dir'], 
main_program=main_program, 
executor=exe) 
if stop_train: 
break 
logger.info("training till last epcho, end training") 
fluid.io.save_persistables(dirname=train_parameters['save_persistable_dir'], 
main_program=main_program, 
executor=exe) 
fluid.io.save_inference_model(dirname=train_parameters['save_freeze_dir'], 
feeded_var_names=['img'], 
target_vars=[out], 
main_program=main_program, 
executor=exe) 
if __name__ == '__main__': 
init_log_config() 
init_train_parameters() 
train() 
2020-01-15 20:05:43,148-INFO: create prog success
2020-01-15 20:05:43,148 - <ipython-input-1-8398a1964f86>[line:483] - INFO: create prog success
2020-01-15 20:05:43,151-INFO: train config: {'input_size': [3, 224, 224], 'class_dim': 5, 'image_count': 2960, 'label_dict': {'daisy': 0, 'dandelion': 1, 'roses': 2, 'sunflowers': 3, 'tulips': 4}, 'data_dir': 'data/data2815', 'train_file_list': 'train.txt', 'label_file': 'label_list.txt', 'save_freeze_dir': './freeze-model', 'save_persistable_dir': './persistable-params', 'continue_train': True, 'pretrained': False, 'pretrained_dir': 'data/data6487/ResNet50_pretrained', 'mode': 'train', 'num_epochs': 5, 'train_batch_size': 30, 'mean_rgb': [127.5, 127.5, 127.5], 'use_gpu': True, 'dropout_seed': None, 'image_enhance_strategy': {'need_distort': True, 'need_rotate': True, 'need_crop': True, 'need_flip': True, 'hue_prob': 0.5, 'hue_delta': 18, 'contrast_prob': 0.5, 'contrast_delta': 0.5, 'saturation_prob': 0.5, 'saturation_delta': 0.5, 'brightness_prob': 0.5, 'brightness_delta': 0.125}, 'early_stop': {'sample_frequency': 50, 'successive_limit': 3, 'good_acc1': 0.92}, 'rsm_strategy': {'learning_rate': 0.001, 'lr_epochs': [20, 40, 60, 80, 100], 'lr_decay': [1, 0.5, 0.25, 0.1, 0.01, 0.002]}, 'momentum_strategy': {'learning_rate': 0.001, 'lr_epochs': [20, 40, 60, 80, 100], 'lr_decay': [1, 0.5, 0.25, 0.1, 0.01, 0.002]}, 'sgd_strategy': {'learning_rate': 0.001, 'lr_epochs': [20, 40, 60, 80, 100], 'lr_decay': [1, 0.5, 0.25, 0.1, 0.01, 0.002]}, 'adam_strategy': {'learning_rate': 0.002}}
2020-01-15 20:05:43,151 - <ipython-input-1-8398a1964f86>[line:484] - INFO: train config: {'input_size': [3, 224, 224], 'class_dim': 5, 'image_count': 2960, 'label_dict': {'daisy': 0, 'dandelion': 1, 'roses': 2, 'sunflowers': 3, 'tulips': 4}, 'data_dir': 'data/data2815', 'train_file_list': 'train.txt', 'label_file': 'label_list.txt', 'save_freeze_dir': './freeze-model', 'save_persistable_dir': './persistable-params', 'continue_train': True, 'pretrained': False, 'pretrained_dir': 'data/data6487/ResNet50_pretrained', 'mode': 'train', 'num_epochs': 5, 'train_batch_size': 30, 'mean_rgb': [127.5, 127.5, 127.5], 'use_gpu': True, 'dropout_seed': None, 'image_enhance_strategy': {'need_distort': True, 'need_rotate': True, 'need_crop': True, 'need_flip': True, 'hue_prob': 0.5, 'hue_delta': 18, 'contrast_prob': 0.5, 'contrast_delta': 0.5, 'saturation_prob': 0.5, 'saturation_delta': 0.5, 'brightness_prob': 0.5, 'brightness_delta': 0.125}, 'early_stop': {'sample_frequency': 50, 'successive_limit': 3, 'good_acc1': 0.92}, 'rsm_strategy': {'learning_rate': 0.001, 'lr_epochs': [20, 40, 60, 80, 100], 'lr_decay': [1, 0.5, 0.25, 0.1, 0.01, 0.002]}, 'momentum_strategy': {'learning_rate': 0.001, 'lr_epochs': [20, 40, 60, 80, 100], 'lr_decay': [1, 0.5, 0.25, 0.1, 0.01, 0.002]}, 'sgd_strategy': {'learning_rate': 0.001, 'lr_epochs': [20, 40, 60, 80, 100], 'lr_decay': [1, 0.5, 0.25, 0.1, 0.01, 0.002]}, 'adam_strategy': {'learning_rate': 0.002}}
2020-01-15 20:05:43,152-INFO: build input custom reader and data feeder
2020-01-15 20:05:43,152 - <ipython-input-1-8398a1964f86>[line:485] - INFO: build input custom reader and data feeder
2020-01-15 20:05:43,154-INFO: build newwork
2020-01-15 20:05:43,154 - <ipython-input-1-8398a1964f86>[line:499] - INFO: build newwork
2020-01-15 20:05:46,667-INFO: load params from retrain model
2020-01-15 20:05:46,667 - <ipython-input-1-8398a1964f86>[line:467] - INFO: load params from retrain model
2020-01-15 20:05:47,299-INFO: current pass: 0, start read image
2020-01-15 20:05:47,299 - <ipython-input-1-8398a1964f86>[line:528] - INFO: current pass: 0, start read image
2020-01-15 20:05:57,508-INFO: Pass 0, trainbatch 10, loss 0.8796854615211487, acc1 0.6000000238418579, time 0.16 sec
2020-01-15 20:05:57,508 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 0, trainbatch 10, loss 0.8796854615211487, acc1 0.6000000238418579, time 0.16 sec
2020-01-15 20:05:59,203-INFO: Pass 0, trainbatch 20, loss 0.987860381603241, acc1 0.6000000238418579, time 0.17 sec
2020-01-15 20:05:59,203 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 0, trainbatch 20, loss 0.987860381603241, acc1 0.6000000238418579, time 0.17 sec
2020-01-15 20:06:01,056-INFO: Pass 0, trainbatch 30, loss 0.7599517107009888, acc1 0.699999988079071, time 0.33 sec
2020-01-15 20:06:01,056 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 0, trainbatch 30, loss 0.7599517107009888, acc1 0.699999988079071, time 0.33 sec
2020-01-15 20:06:11,107-INFO: Pass 0, trainbatch 40, loss 1.2075215578079224, acc1 0.6000000238418579, time 0.17 sec
2020-01-15 20:06:11,107 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 0, trainbatch 40, loss 1.2075215578079224, acc1 0.6000000238418579, time 0.17 sec
2020-01-15 20:06:12,817-INFO: Pass 0, trainbatch 50, loss 0.957575261592865, acc1 0.5666666626930237, time 0.17 sec
2020-01-15 20:06:12,817 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 0, trainbatch 50, loss 0.957575261592865, acc1 0.5666666626930237, time 0.17 sec
2020-01-15 20:06:12,819-INFO: temp save 50 batch train result, current acc1 0.5666666626930237
2020-01-15 20:06:12,819 - <ipython-input-1-8398a1964f86>[line:563] - INFO: temp save 50 batch train result, current acc1 0.5666666626930237
2020-01-15 20:06:17,525-INFO: Pass 0, trainbatch 60, loss 1.161435842514038, acc1 0.5666666626930237, time 0.16 sec
2020-01-15 20:06:17,525 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 0, trainbatch 60, loss 1.161435842514038, acc1 0.5666666626930237, time 0.16 sec
2020-01-15 20:06:27,925-INFO: Pass 0, trainbatch 70, loss 0.9199239015579224, acc1 0.6666666865348816, time 0.17 sec
2020-01-15 20:06:27,925 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 0, trainbatch 70, loss 0.9199239015579224, acc1 0.6666666865348816, time 0.17 sec
2020-01-15 20:06:29,842-INFO: Pass 0, trainbatch 80, loss 0.9726725816726685, acc1 0.699999988079071, time 0.18 sec
2020-01-15 20:06:29,842 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 0, trainbatch 80, loss 0.9726725816726685, acc1 0.699999988079071, time 0.18 sec
2020-01-15 20:06:31,548-INFO: Pass 0, trainbatch 90, loss 0.8365088701248169, acc1 0.7333333492279053, time 0.17 sec
2020-01-15 20:06:31,548 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 0, trainbatch 90, loss 0.8365088701248169, acc1 0.7333333492279053, time 0.17 sec
2020-01-15 20:06:35,782-INFO: current pass: 1, start read image
2020-01-15 20:06:35,782 - <ipython-input-1-8398a1964f86>[line:528] - INFO: current pass: 1, start read image
2020-01-15 20:06:44,383-INFO: temp save 100 batch train result, current acc1 0.699999988079071
2020-01-15 20:06:44,383 - <ipython-input-1-8398a1964f86>[line:563] - INFO: temp save 100 batch train result, current acc1 0.699999988079071
2020-01-15 20:06:48,776-INFO: Pass 1, trainbatch 10, loss 0.9679884314537048, acc1 0.6000000238418579, time 0.18 sec
2020-01-15 20:06:48,776 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 1, trainbatch 10, loss 0.9679884314537048, acc1 0.6000000238418579, time 0.18 sec
2020-01-15 20:06:50,672-INFO: Pass 1, trainbatch 20, loss 0.9205846786499023, acc1 0.6333333253860474, time 0.18 sec
2020-01-15 20:06:50,672 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 1, trainbatch 20, loss 0.9205846786499023, acc1 0.6333333253860474, time 0.18 sec
2020-01-15 20:06:52,387-INFO: Pass 1, trainbatch 30, loss 0.8219196200370789, acc1 0.6333333253860474, time 0.17 sec
2020-01-15 20:06:52,387 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 1, trainbatch 30, loss 0.8219196200370789, acc1 0.6333333253860474, time 0.17 sec
2020-01-15 20:07:02,447-INFO: Pass 1, trainbatch 40, loss 1.0034916400909424, acc1 0.6333333253860474, time 0.16 sec
2020-01-15 20:07:02,447 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 1, trainbatch 40, loss 1.0034916400909424, acc1 0.6333333253860474, time 0.16 sec
2020-01-15 20:07:04,123-INFO: Pass 1, trainbatch 50, loss 0.9310668706893921, acc1 0.6333333253860474, time 0.16 sec
2020-01-15 20:07:04,123 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 1, trainbatch 50, loss 0.9310668706893921, acc1 0.6333333253860474, time 0.16 sec
2020-01-15 20:07:04,288-INFO: temp save 150 batch train result, current acc1 0.699999988079071
2020-01-15 20:07:04,288 - <ipython-input-1-8398a1964f86>[line:563] - INFO: temp save 150 batch train result, current acc1 0.699999988079071
2020-01-15 20:07:08,879-INFO: Pass 1, trainbatch 60, loss 1.1847180128097534, acc1 0.6666666865348816, time 0.17 sec
2020-01-15 20:07:08,879 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 1, trainbatch 60, loss 1.1847180128097534, acc1 0.6666666865348816, time 0.17 sec
2020-01-15 20:07:18,843-INFO: Pass 1, trainbatch 70, loss 0.7221229672431946, acc1 0.7333333492279053, time 0.17 sec
2020-01-15 20:07:18,843 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 1, trainbatch 70, loss 0.7221229672431946, acc1 0.7333333492279053, time 0.17 sec
2020-01-15 20:07:20,725-INFO: Pass 1, trainbatch 80, loss 0.7030007839202881, acc1 0.7333333492279053, time 0.17 sec
2020-01-15 20:07:20,725 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 1, trainbatch 80, loss 0.7030007839202881, acc1 0.7333333492279053, time 0.17 sec
2020-01-15 20:07:22,443-INFO: Pass 1, trainbatch 90, loss 0.9687625765800476, acc1 0.6000000238418579, time 0.17 sec
2020-01-15 20:07:22,443 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 1, trainbatch 90, loss 0.9687625765800476, acc1 0.6000000238418579, time 0.17 sec
2020-01-15 20:07:26,540-INFO: current pass: 2, start read image
2020-01-15 20:07:26,540 - <ipython-input-1-8398a1964f86>[line:528] - INFO: current pass: 2, start read image
2020-01-15 20:07:35,332-INFO: temp save 200 batch train result, current acc1 0.6333333253860474
2020-01-15 20:07:35,332 - <ipython-input-1-8398a1964f86>[line:563] - INFO: temp save 200 batch train result, current acc1 0.6333333253860474
2020-01-15 20:07:39,516-INFO: Pass 2, trainbatch 10, loss 0.8318621516227722, acc1 0.7333333492279053, time 0.17 sec
2020-01-15 20:07:39,516 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 2, trainbatch 10, loss 0.8318621516227722, acc1 0.7333333492279053, time 0.17 sec
2020-01-15 20:07:41,362-INFO: Pass 2, trainbatch 20, loss 0.9822636842727661, acc1 0.5, time 0.17 sec
2020-01-15 20:07:41,362 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 2, trainbatch 20, loss 0.9822636842727661, acc1 0.5, time 0.17 sec
2020-01-15 20:07:43,018-INFO: Pass 2, trainbatch 30, loss 1.0782616138458252, acc1 0.46666666865348816, time 0.16 sec
2020-01-15 20:07:43,018 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 2, trainbatch 30, loss 1.0782616138458252, acc1 0.46666666865348816, time 0.16 sec
2020-01-15 20:07:52,731-INFO: Pass 2, trainbatch 40, loss 0.8365978598594666, acc1 0.7333333492279053, time 0.17 sec
2020-01-15 20:07:52,731 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 2, trainbatch 40, loss 0.8365978598594666, acc1 0.7333333492279053, time 0.17 sec
2020-01-15 20:07:54,416-INFO: Pass 2, trainbatch 50, loss 0.9710181951522827, acc1 0.6666666865348816, time 0.17 sec
2020-01-15 20:07:54,416 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 2, trainbatch 50, loss 0.9710181951522827, acc1 0.6666666865348816, time 0.17 sec
2020-01-15 20:07:54,754-INFO: temp save 250 batch train result, current acc1 0.6666666865348816
2020-01-15 20:07:54,754 - <ipython-input-1-8398a1964f86>[line:563] - INFO: temp save 250 batch train result, current acc1 0.6666666865348816
2020-01-15 20:07:59,101-INFO: Pass 2, trainbatch 60, loss 0.8595811128616333, acc1 0.6666666865348816, time 0.17 sec
2020-01-15 20:07:59,101 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 2, trainbatch 60, loss 0.8595811128616333, acc1 0.6666666865348816, time 0.17 sec
2020-01-15 20:08:09,177-INFO: Pass 2, trainbatch 70, loss 0.6333511471748352, acc1 0.7333333492279053, time 0.17 sec
2020-01-15 20:08:09,177 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 2, trainbatch 70, loss 0.6333511471748352, acc1 0.7333333492279053, time 0.17 sec
2020-01-15 20:08:11,021-INFO: Pass 2, trainbatch 80, loss 0.9489333629608154, acc1 0.6333333253860474, time 0.16 sec
2020-01-15 20:08:11,021 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 2, trainbatch 80, loss 0.9489333629608154, acc1 0.6333333253860474, time 0.16 sec
2020-01-15 20:08:12,720-INFO: Pass 2, trainbatch 90, loss 0.7598487138748169, acc1 0.699999988079071, time 0.17 sec
2020-01-15 20:08:12,720 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 2, trainbatch 90, loss 0.7598487138748169, acc1 0.699999988079071, time 0.17 sec
2020-01-15 20:08:16,948-INFO: current pass: 3, start read image
2020-01-15 20:08:16,948 - <ipython-input-1-8398a1964f86>[line:528] - INFO: current pass: 3, start read image
2020-01-15 20:08:25,828-INFO: temp save 300 batch train result, current acc1 0.7333333492279053
2020-01-15 20:08:25,828 - <ipython-input-1-8398a1964f86>[line:563] - INFO: temp save 300 batch train result, current acc1 0.7333333492279053
2020-01-15 20:08:29,844-INFO: Pass 3, trainbatch 10, loss 0.6988822817802429, acc1 0.699999988079071, time 0.17 sec
2020-01-15 20:08:29,844 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 3, trainbatch 10, loss 0.6988822817802429, acc1 0.699999988079071, time 0.17 sec
2020-01-15 20:08:31,688-INFO: Pass 3, trainbatch 20, loss 0.8830958604812622, acc1 0.6000000238418579, time 0.17 sec
2020-01-15 20:08:31,688 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 3, trainbatch 20, loss 0.8830958604812622, acc1 0.6000000238418579, time 0.17 sec
2020-01-15 20:08:33,375-INFO: Pass 3, trainbatch 30, loss 0.6778205633163452, acc1 0.7333333492279053, time 0.16 sec
2020-01-15 20:08:33,375 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 3, trainbatch 30, loss 0.6778205633163452, acc1 0.7333333492279053, time 0.16 sec
2020-01-15 20:08:43,447-INFO: Pass 3, trainbatch 40, loss 1.0915954113006592, acc1 0.6333333253860474, time 0.33 sec
2020-01-15 20:08:43,447 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 3, trainbatch 40, loss 1.0915954113006592, acc1 0.6333333253860474, time 0.33 sec
2020-01-15 20:08:45,154-INFO: Pass 3, trainbatch 50, loss 0.5070432424545288, acc1 0.8333333134651184, time 0.18 sec
2020-01-15 20:08:45,154 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 3, trainbatch 50, loss 0.5070432424545288, acc1 0.8333333134651184, time 0.18 sec
2020-01-15 20:08:45,668-INFO: temp save 350 batch train result, current acc1 0.6333333253860474
2020-01-15 20:08:45,668 - <ipython-input-1-8398a1964f86>[line:563] - INFO: temp save 350 batch train result, current acc1 0.6333333253860474
2020-01-15 20:08:49,909-INFO: Pass 3, trainbatch 60, loss 0.845004141330719, acc1 0.6000000238418579, time 0.35 sec
2020-01-15 20:08:49,909 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 3, trainbatch 60, loss 0.845004141330719, acc1 0.6000000238418579, time 0.35 sec
2020-01-15 20:09:00,139-INFO: Pass 3, trainbatch 70, loss 0.6369796991348267, acc1 0.800000011920929, time 0.17 sec
2020-01-15 20:09:00,139 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 3, trainbatch 70, loss 0.6369796991348267, acc1 0.800000011920929, time 0.17 sec
2020-01-15 20:09:01,846-INFO: Pass 3, trainbatch 80, loss 0.5189567804336548, acc1 0.8333333134651184, time 0.18 sec
2020-01-15 20:09:01,846 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 3, trainbatch 80, loss 0.5189567804336548, acc1 0.8333333134651184, time 0.18 sec
2020-01-15 20:09:03,939-INFO: Pass 3, trainbatch 90, loss 0.7193944454193115, acc1 0.7333333492279053, time 0.18 sec
2020-01-15 20:09:03,939 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 3, trainbatch 90, loss 0.7193944454193115, acc1 0.7333333492279053, time 0.18 sec
2020-01-15 20:09:07,939-INFO: current pass: 4, start read image
2020-01-15 20:09:07,939 - <ipython-input-1-8398a1964f86>[line:528] - INFO: current pass: 4, start read image
2020-01-15 20:09:17,060-INFO: temp save 400 batch train result, current acc1 0.699999988079071
2020-01-15 20:09:17,060 - <ipython-input-1-8398a1964f86>[line:563] - INFO: temp save 400 batch train result, current acc1 0.699999988079071
2020-01-15 20:09:20,939-INFO: Pass 4, trainbatch 10, loss 0.8663693070411682, acc1 0.7333333492279053, time 0.16 sec
2020-01-15 20:09:20,939 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 4, trainbatch 10, loss 0.8663693070411682, acc1 0.7333333492279053, time 0.16 sec
2020-01-15 20:09:22,619-INFO: Pass 4, trainbatch 20, loss 1.1480329036712646, acc1 0.5, time 0.16 sec
2020-01-15 20:09:22,619 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 4, trainbatch 20, loss 1.1480329036712646, acc1 0.5, time 0.16 sec
2020-01-15 20:09:24,480-INFO: Pass 4, trainbatch 30, loss 0.8260254263877869, acc1 0.7666666507720947, time 0.17 sec
2020-01-15 20:09:24,480 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 4, trainbatch 30, loss 0.8260254263877869, acc1 0.7666666507720947, time 0.17 sec
2020-01-15 20:09:34,614-INFO: Pass 4, trainbatch 40, loss 0.8281996846199036, acc1 0.699999988079071, time 0.17 sec
2020-01-15 20:09:34,614 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 4, trainbatch 40, loss 0.8281996846199036, acc1 0.699999988079071, time 0.17 sec
2020-01-15 20:09:36,479-INFO: Pass 4, trainbatch 50, loss 1.1023882627487183, acc1 0.5333333611488342, time 0.18 sec
2020-01-15 20:09:36,479 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 4, trainbatch 50, loss 1.1023882627487183, acc1 0.5333333611488342, time 0.18 sec
2020-01-15 20:09:37,167-INFO: temp save 450 batch train result, current acc1 0.800000011920929
2020-01-15 20:09:37,167 - <ipython-input-1-8398a1964f86>[line:563] - INFO: temp save 450 batch train result, current acc1 0.800000011920929
2020-01-15 20:09:41,004-INFO: Pass 4, trainbatch 60, loss 0.6029046177864075, acc1 0.8333333134651184, time 0.17 sec
2020-01-15 20:09:41,004 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 4, trainbatch 60, loss 0.6029046177864075, acc1 0.8333333134651184, time 0.17 sec
2020-01-15 20:09:50,941-INFO: Pass 4, trainbatch 70, loss 0.7502086758613586, acc1 0.699999988079071, time 0.17 sec
2020-01-15 20:09:50,941 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 4, trainbatch 70, loss 0.7502086758613586, acc1 0.699999988079071, time 0.17 sec
2020-01-15 20:09:52,606-INFO: Pass 4, trainbatch 80, loss 1.2345538139343262, acc1 0.6333333253860474, time 0.16 sec
2020-01-15 20:09:52,606 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 4, trainbatch 80, loss 1.2345538139343262, acc1 0.6333333253860474, time 0.16 sec
2020-01-15 20:09:54,427-INFO: Pass 4, trainbatch 90, loss 0.5379651188850403, acc1 0.8666666746139526, time 0.17 sec
2020-01-15 20:09:54,427 - <ipython-input-1-8398a1964f86>[line:544] - INFO: Pass 4, trainbatch 90, loss 0.5379651188850403, acc1 0.8666666746139526, time 0.17 sec
2020-01-15 20:09:58,252-INFO: training till last epcho, end training
2020-01-15 20:09:58,252 - <ipython-input-1-8398a1964f86>[line:569] - INFO: training till last epcho, end training

 

 

In[2]

from __future__ import absolute_import     
from __future__ import division     
from __future__ import print_function     
import os     
import numpy as np     
import random     
import time     
import codecs     
import sys     
import functools     
import math     
import paddle     
import paddle.fluid as fluid     
from paddle.fluid import core     
from paddle.fluid.param_attr import ParamAttr     
from PIL import Image, ImageEnhance     
target_size = [3, 224, 224]     
mean_rgb = [127.5, 127.5, 127.5]     
data_dir = "data/data2815"     
eval_file = "eval.txt"     
use_gpu = True     
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()     
exe = fluid.Executor(place)     
save_freeze_dir = "./freeze-model"     
[inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(dirname=save_freeze_dir, executor=exe)     
# print(fetch_targets)     
def crop_image(img, target_size):     
width, height = img.size     
w_start = (width - target_size[2]) / 2     
h_start = (height - target_size[1]) / 2     
w_end = w_start + target_size[2]     
h_end = h_start + target_size[1]     
img = img.crop((w_start, h_start, w_end, h_end))     
return img     
def resize_img(img, target_size):     
ret = img.resize((target_size[1], target_size[2]), Image.BILINEAR)     
return ret     
def read_image(img_path):     
img = Image.open(img_path)     
if img.mode != 'RGB':     
img = img.convert('RGB')     
img = crop_image(img, target_size)     
img = np.array(img).astype('float32')     
img -= mean_rgb     
img = img.transpose((2, 0, 1))  # HWC to CHW     
img *= 0.007843     
img = img[np.newaxis,:]     
return img     
def infer(image_path):     
tensor_img = read_image(image_path)     
label = exe.run(inference_program, feed={feed_target_names[0]: tensor_img}, fetch_list=fetch_targets)     
return np.argmax(label)     
def eval_all():     
eval_file_path = os.path.join(data_dir, eval_file)     
total_count = 0     
right_count = 0     
with codecs.open(eval_file_path, encoding='utf-8') as flist:      
lines = [line.strip() for line in flist]     
t1 = time.time()     
for line in lines:     
total_count += 1     
parts = line.strip().split()     
result = infer(parts[0])     
# print("infer result:{0} answer:{1}".format(result, parts[1]))     
if str(result) == parts[1]:     
right_count += 1     
period = time.time() - t1     
print("total eval count:{0} cost time:{1} predict accuracy:{2}".format(total_count, "%2.2f sec" % period, right_count / total_count))     
if __name__ == '__main__':     
eval_all() 
total eval count:710 cost time:22.78 sec predict accuracy:0.6563380281690141

点击链接,使用AI Studio一键上手实践项目吧:https://aistudio.baidu.com/aistudio/projectdetail/169503 

下载安装命令

## CPU版本安装命令
pip install -f https://paddlepaddle.org.cn/pip/oschina/cpu paddlepaddle

## GPU版本安装命令
pip install -f https://paddlepaddle.org.cn/pip/oschina/gpu paddlepaddle-gpu

>> 访问 PaddlePaddle 官网,了解更多相关内容

{{o.name}}


{{m.name}}

原创文章,作者:ItWorker,如若转载,请注明出处:https://blog.ytso.com/92060.html

(0)
上一篇 2021年8月15日 00:05
下一篇 2021年8月15日 00:16

相关推荐

发表回复

登录后才能评论