#
#作者:韦访
#博客:https://blog.csdn.net/rookie_wei
#微信:1007895847
#添加微信的备注一下是CSDN的
#欢迎大家一起学习
#
------韦访 20190118
1、概述
想用tensorflow做个疲劳检测,那么,该怎么下手呢?首先,根据常识,疲劳的时候,眼睛总会想闭上吧?还打哈欠吧?那么,先从眼睛入手,那么,要做的工作就是,首先得识别出眼睛的位置,也就是人脸关键点检测,在人脸识别上的教程(
https://blog.csdn.net/rookie_wei/article/details/81676177
<https://blog.csdn.net/rookie_wei/article/details/81676177>
)中,我们讲过,使用MTCNN可以将人脸检测出来,并且识别出5个关键点(左眼、右眼、鼻子、左嘴角、右嘴角)的位置。检测出眼睛的位置以后,就可以将眼睛的框出来,然后,识别它是开眼还是闭眼。打哈欠的话,是不是也得先识别嘴巴的位置啊?识别出嘴巴以后怎办,后面再说。这一讲,先来对眼睛的开闭进行识别。首先说明一下,我可能并不会给出一个完整的疲劳检测系统的教程,做出来了我可能也不会全部开源,仅供部分参考,作为之前教程的一个回顾,你懂的。
2、下载数据集
下载链接:
http://parnec.nuaa.edu.cn/xtan/data/datasets/dataset_B_Eye_Images.rar
<http://parnec.nuaa.edu.cn/xtan/data/datasets/dataset_B_Eye_Images.rar>
数据集很小,2.5M而已。下载完以后,解压,得到目录如下:
根据文件夹名称,可以猜出应该是将眼睛分为4种状态了,分别是左眼开,左眼闭,右眼开,右眼闭。现在来看看每个文件夹里的内容是否跟我们预想的一样。
果然哈。那么接下来的问题就是,怎么识别?看过我tensorflow系列教程的朋友们应该知道,在第16讲和20讲中,我们使用过slim模型来进行图片分类,如果没看过,出门左转,链接如下:
16讲:https://blog.csdn.net/rookie_wei/article/details/80639490
<https://blog.csdn.net/rookie_wei/article/details/80639490>
20讲:https://blog.csdn.net/rookie_wei/article/details/80796009
<https://blog.csdn.net/rookie_wei/article/details/80796009>
这一讲,其实就是这两讲的一样应用,如果看过这两讲的朋友,就此打住,别往下看了,自己根据这两讲的内容做一下开闭眼的识别。
3、将数据集转成TFRecord格式
slim模型的下载、验证和结构我就不再重复讲了,现在,我们来将数据集转成TFRecord格式。我试过就按数据集的默认分类去识别,即左眼开,左眼闭,右眼开,右眼闭,效果并不好,识别的准确率仅为50%多一点,这肯定不行的。而我们现在的目的是,能识别出开眼和闭眼即可,所以并不需要分的那么详细。因此,现在将closedRightEyes文件夹下的所有图片剪切到closedLeftEyes文件夹,将openRightEyes文件夹下的所有图片剪切到openLeftEyes文件夹。再把两个空的文件夹删除,剩下的如下图所示,
将这个两个文件夹放到slim/images_data/eye_open_and_close文件夹下。
好了,接着就来修改代码了,仿造第20讲的教程做即可,写博客或者笔记的好处就是这样了,随手一拿就可以开干。首先,复制download_and_convert_flowers.py并将文件名改为convert_eye.py。修改后的源码如下,
# encoding:utf-8 from __future__ import absolute_import from __future__ import
division from __future__ import print_function import math import os import
random import sys import tensorflow as tf from datasets import dataset_utils #
The URL where the Flowers data can be downloaded. _DATA_URL =
'http://download.tensorflow.org/example_images/flower_photos.tgz' # The number
of images in the validation set. _NUM_VALIDATION = 350 # Seed for
repeatability. _RANDOM_SEED = 0 # The number of shards per dataset split.
_NUM_SHARDS = 4 class ImageReader(object): """Helper class that provides
TensorFlow image coding utilities.""" def __init__(self): # Initializes
function that decodes RGB JPEG data. self._decode_jpeg_data =
tf.placeholder(dtype=tf.string) self._decode_jpeg =
tf.image.decode_jpeg(self._decode_jpeg_data, channels=3) def
read_image_dims(self, sess, image_data): image = self.decode_jpeg(sess,
image_data) return image.shape[0], image.shape[1] def decode_jpeg(self, sess,
image_data): image = sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data}) assert len(image.shape) == 3
assert image.shape[2] == 3 return image def
_get_filenames_and_classes(dataset_dir): """Returns a list of filenames and
inferred class names. Args: dataset_dir: A directory containing a set of
subdirectories representing class names. Each subdirectory should contain PNG
or JPG encoded images. Returns: A list of image file paths, relative to
`dataset_dir` and the list of subdirectories, representing class names. """ #
将flower_photos改为eye_photos flower_root = os.path.join(dataset_dir,
'eye_open_and_close') directories = [] class_names = [] for filename in
os.listdir(flower_root): path = os.path.join(flower_root, filename) if
os.path.isdir(path): directories.append(path) class_names.append(filename)
photo_filenames = [] for directory in directories: for filename in
os.listdir(directory): path = os.path.join(directory, filename)
photo_filenames.append(path) return photo_filenames, sorted(class_names) def
_get_dataset_filename(dataset_dir, split_name, shard_id): #
修改文件名,将flowersg改为eye output_filename = 'eye_%s_%05d-of-%05d.tfrecord' % (
split_name, shard_id, _NUM_SHARDS) return os.path.join(dataset_dir,
output_filename) def _convert_dataset(split_name, filenames,
class_names_to_ids, dataset_dir): """Converts the given filenames to a TFRecord
dataset. Args: split_name: The name of the dataset, either 'train' or
'validation'. filenames: A list of absolute paths to png or jpg images.
class_names_to_ids: A dictionary from class names (strings) to ids (integers).
dataset_dir: The directory where the converted datasets are stored. """ assert
split_name in ['train', 'validation'] num_per_shard =
int(math.ceil(len(filenames) / float(_NUM_SHARDS))) with
tf.Graph().as_default(): image_reader = ImageReader() with tf.Session('') as
sess: for shard_id in range(_NUM_SHARDS): output_filename =
_get_dataset_filename( dataset_dir, split_name, shard_id) with
tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer: start_ndx =
shard_id * num_per_shard end_ndx = min((shard_id + 1) * num_per_shard,
len(filenames)) for i in range(start_ndx, end_ndx): sys.stdout.write('\r>>
Converting image %d/%d shard %d' % ( i + 1, len(filenames), shard_id))
sys.stdout.flush() # Read the filename: image_data =
tf.gfile.FastGFile(filenames[i], 'rb').read() height, width =
image_reader.read_image_dims(sess, image_data) class_name =
os.path.basename(os.path.dirname(filenames[i])) class_id =
class_names_to_ids[class_name] example = dataset_utils.image_to_tfexample(
image_data, b'jpg', height, width, class_id)
tfrecord_writer.write(example.SerializeToString()) sys.stdout.write('\n')
sys.stdout.flush() def _clean_up_temporary_files(dataset_dir): """Removes
temporary files used to create the dataset. Args: dataset_dir: The directory
where the temporary files are stored. """ filename = _DATA_URL.split('/')[-1]
filepath = os.path.join(dataset_dir, filename) tf.gfile.Remove(filepath) #
将flower_photos改为eye_photos tmp_dir = os.path.join(dataset_dir, 'eye_photos')
tf.gfile.DeleteRecursively(tmp_dir) def _dataset_exists(dataset_dir): for
split_name in ['train', 'validation']: for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename( dataset_dir, split_name, shard_id) if
not tf.gfile.Exists(output_filename): return False return True def
run(dataset_dir): """Runs the download and conversion operation. Args:
dataset_dir: The dataset directory where the dataset is stored. """ if not
tf.gfile.Exists(dataset_dir): tf.gfile.MakeDirs(dataset_dir) if
_dataset_exists(dataset_dir): print('Dataset files already exist. Exiting
without re-creating them.') return # 因为我们不需要下载,所以这行注释掉 #
dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
photo_filenames, class_names = _get_filenames_and_classes(dataset_dir)
class_names_to_ids = dict(zip(class_names, range(len(class_names)))) # Divide
into train and test: random.seed(_RANDOM_SEED) random.shuffle(photo_filenames)
training_filenames = photo_filenames[_NUM_VALIDATION:] validation_filenames =
photo_filenames[:_NUM_VALIDATION] # First, convert the training and validation
sets. _convert_dataset('train', training_filenames, class_names_to_ids,
dataset_dir) _convert_dataset('validation', validation_filenames,
class_names_to_ids, dataset_dir) # Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(class_names)), class_names))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir) #
将这行注释掉,要不然转换完以后,原始数据会被删除 # _clean_up_temporary_files(dataset_dir)
print('\nFinished converting the Flowers dataset!')
再修改download_and_convert_data.py,添加
from datasets import convert_eye
再在
elif FLAGS.dataset_name == 'mnist':
download_and_convert_mnist.run(FLAGS.dataset_dir)
后添加
elif FLAGS.dataset_name == 'eye':
convert_eye.run(FLAGS.dataset_dir)
如下图所示,
然后运行命令,
python download_and_convert_data.py --dataset_name=eye
--dataset_dir=images_data/eye_open_and_close
运行结果,
Instructions for updating:
Use tf.gfile.GFile.
>> Converting image 143/4498 shard 0Traceback (most recent call last):
File
"/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py",
line 1335, in _do_call
return fn(*args)
File
"/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py",
line 1320, in _run_fn
options, feed_dict, fetch_list, target_list, run_metadata)
File
"/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py",
line 1408, in _call_tf_sessionrun
run_metadata)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Expected image
(JPEG, PNG, or GIF), got unknown format starting with
'\320\317\021\340\241\261\032\341\000\000\000\000\000\000\000\000'
[[{{node DecodeJpeg}}]]
报错,看这句,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Expected image
(JPEG, PNG, or GIF), got unknown format starting with
应该首先想到的是,我们数据集里是不是有不是图片的文件?
果然就看到了如下图这个文件,
删掉即可,注意,两个文件夹下都有这个文件啊。
再运行上面的命令,运行结果,
这就对了,当然,这个打印还是打印Flowsers数据集的,如果你有强迫症,也可以把它改了。去images_data/eye_open_and_close/文件夹下看看有没有TFRecord文件生成,
你也可以用第20讲的代码显示一张图片以验证这个TFRecord是否正确,我这里就不验证了。
4、定义datasets文件
继续修改代码,将datasets/flowers.py复制并重命名为eye.py ,将
_FILE_PATTERN = 'flowers_%s_*.tfrecord'
改为
_FILE_PATTERN = 'eye_%s_*.tfrecord'
将
SPLITS_TO_SIZES = {'train': 3320, 'validation': 350}
改为
SPLITS_TO_SIZES = {'train': 4496, 'validation': 350}
其中,train代表训练的图片张数,validation代表验证使用的图片张数。完整代码如下,
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed
under the Apache License, Version 2.0 (the "License"); # you may not use this
file except in compliance with the License. # You may obtain a copy of the
License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required
by applicable law or agreed to in writing, software # distributed under the
License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS
OF ANY KIND, either express or implied. # See the License for the specific
language governing permissions and # limitations under the License. #
==============================================================================
"""Provides data for the flowers dataset. The dataset scripts used to create
the dataset can be found at:
tensorflow/models/research/slim/datasets/download_and_convert_flowers.py """
from __future__ import absolute_import from __future__ import division from
__future__ import print_function import os import tensorflow as tf from
datasets import dataset_utils slim = tf.contrib.slim _FILE_PATTERN =
'eye_%s_*.tfrecord' SPLITS_TO_SIZES = {'train': 4496, 'validation': 350}
_NUM_CLASSES = 2 _ITEMS_TO_DESCRIPTIONS = { 'image': 'A color image of varying
size.', 'label': 'A single integer between 0 and 4', } def
get_split(split_name, dataset_dir, file_pattern=None, reader=None): """Gets a
dataset tuple with instructions for reading flowers. Args: split_name: A
train/validation split name. dataset_dir: The base directory of the dataset
sources. file_pattern: The file pattern to use when matching the dataset
sources. It is assumed that the pattern contains a '%s' string so that the
split name can be inserted. reader: The TensorFlow reader type. Returns: A
`Dataset` namedtuple. Raises: ValueError: if `split_name` is not a valid
train/validation split. """ if split_name not in SPLITS_TO_SIZES: raise
ValueError('split name %s was not recognized.' % split_name) if not
file_pattern: file_pattern = _FILE_PATTERN file_pattern =
os.path.join(dataset_dir, file_pattern % split_name) # Allowing None in the
signature so that dataset_factory can use the default. if reader is None:
reader = tf.TFRecordReader keys_to_features = { 'image/encoded':
tf.FixedLenFeature((), tf.string, default_value=''), 'image/format':
tf.FixedLenFeature((), tf.string, default_value='png'), 'image/class/label':
tf.FixedLenFeature( [], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),
} items_to_handlers = { 'image': slim.tfexample_decoder.Image(), 'label':
slim.tfexample_decoder.Tensor('image/class/label'), } decoder =
slim.tfexample_decoder.TFExampleDecoder( keys_to_features, items_to_handlers)
labels_to_names = None if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir) return
slim.dataset.Dataset( data_sources=file_pattern, reader=reader,
decoder=decoder, num_samples=SPLITS_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS, num_classes=_NUM_CLASSES,
labels_to_names=labels_to_names)
5、开始训练
接着,修改datasets/dataset_factory.py文件,
将
from datasets import cifar10
from datasets import flowers
from datasets import imagenet
from datasets import mnist
datasets_map = {
'cifar10': cifar10,
'flowers': flowers,
'imagenet': imagenet,
'mnist': mnist,
}
改成
from datasets import cifar10
from datasets import flowers
from datasets import imagenet
from datasets import mnist
from datasets import eye
datasets_map = {
'cifar10': cifar10,
'flowers': flowers,
'imagenet': imagenet,
'mnist': mnist,
'eye': eye,
}
然后,运行以下代码进行训练,
python train_image_classifier.py --train_dir=saver/inv3_eye_open_and_close
--dataset_name=eye --dataset_split_name=train
--dataset_dir=images_data/eye_open_and_close --model_name=inception_v3
--learning_rate_decay_type=fixed --save_interval_secs=60
--save_summaries_secs=60 --log_every_n_steps=10 --optimizer=rmsprop
--learning_rate=0.0001
运行结果,
OK,跑起来就行了。
6、测试准确率
训练到感觉loss不怎么下降的时候,测试一下它的准确率,命令如下,
python eval_image_classifier.py
--checkpoint_path=saver/inv3_eye_open_and_close/
--eval_dir=saver/inv3_eye_open_and_close/ --dataset_name=eye
--dataset_split_name=validation --dataset_dir=images_data/eye_open_and_close
--model_name=inception_v3 --batch_size=64
在测试集上的准确率为93.49%,在精度不是要求特别高的情况下还是可以了。
如果您感觉本篇博客对您有帮助,请打开支付宝,领个红包支持一下,祝您扫到99元,谢谢~~
热门工具 换一换