Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Y
yoloOneTouch
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
1
Issues
1
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
bitsoko services
yoloOneTouch
Commits
6cf11eee
Commit
6cf11eee
authored
Apr 25, 2017
by
bitsoko
Committed by
GitHub
Apr 25, 2017
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Update runCpu.sh
parent
b432f4de
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
28 additions
and
101 deletions
+28
-101
runCpu.sh
vision/runCpu.sh
+28
-101
No files found.
vision/runCpu.sh
View file @
6cf11eee
'''This script goes along the blog post
"Building powerful image classification models using very little data"
from blog.keras.io.
It uses data that can be downloaded at:
https://www.kaggle.com/c/dogs-vs-cats/data
In our setup, we:
- created a data/ folder
- created train/ and validation/ subfolders inside data/
- created cats/ and dogs/ subfolders inside train/ and validation/
- put the cat pictures index 0-999 in data/train/cats
- put the cat pictures index 1000-1400 in data/validation/cats
- put the dogs pictures index 12500-13499 in data/train/dogs
- put the dog pictures index 13500-13900 in data/validation/dogs
So that we have 1000 training examples for each class, and 400 validation examples for each class.
In summary, this is our directory structure:
```
data/
train/
dogs/
dog001.jpg
dog002.jpg
...
cats/
cat001.jpg
cat002.jpg
...
validation/
dogs/
dog001.jpg
dog002.jpg
...
cats/
cat001.jpg
cat002.jpg
...
```
'''
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
from keras import applications
PRETRAINED_CHECKPOINT_DIR
=
/tflow/tmp/checkpoints
# dimensions of our images.
img_width, img_height
=
150, 150
OUTPUT_DIR
=
output
DATA_DIR
=
data
CAPTIONS_DIR
=
"
${
DATA_DIR
}
/captions"
IMG_TRAIN_DIR
=
"
${
DATA_DIR
}
/val"
IMG_VAL_DIR
=
"
${
DATA_DIR
}
/val"
top_model_weights_path
=
'/tflow/vgg16_weights.h5'
train_data_dir
=
'data/train'
validation_data_dir
=
'data/validation'
nb_train_samples
=
2000
nb_validation_samples
=
800
epochs
=
1
batch_size
=
16
# Where the training (fine-tuned) checkpoint and logs will be saved to.
TRAIN_DIR
=
models/inception_v3
def save_bottlebeck_features
()
:
datagen
=
ImageDataGenerator
(
rescale
=
1. / 255
)
# Where the dataset is saved to.
DATASET_DIR
=
output/dataset
# build the VGG16 network
model
=
applications.VGG16
(
include_top
=
False,
weights
=
'imagenet'
)
# Download the pre-trained checkpoint.
if
[
!
-d
"
$PRETRAINED_CHECKPOINT_DIR
"
]
;
then
mkdir
${
PRETRAINED_CHECKPOINT_DIR
}
fi
if
[
!
-f
${
PRETRAINED_CHECKPOINT_DIR
}
/inception_v3.ckpt
]
;
then
wget http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz
tar
-xvf
inception_v3_2016_08_28.tar.gz
mv
inception_v3.ckpt
${
PRETRAINED_CHECKPOINT_DIR
}
/inception_v3.ckpt
rm
inception_v3_2016_08_28.tar.gz
fi
generator
=
datagen.flow_from_directory
(
train_data_dir,
target_size
=(
img_width, img_height
)
,
batch_size
=
batch_size,
class_mode
=
None,
shuffle
=
False
)
bottleneck_features_train
=
model.predict_generator
(
generator, nb_train_samples // batch_size
)
np.save
(
open
(
'bottleneck_features_train.npy'
,
'w'
)
,
bottleneck_features_train
)
generator
=
datagen.flow_from_directory
(
validation_data_dir,
target_size
=(
img_width, img_height
)
,
batch_size
=
batch_size,
class_mode
=
None,
shuffle
=
False
)
bottleneck_features_validation
=
model.predict_generator
(
generator, nb_validation_samples // batch_size
)
np.save
(
open
(
'bottleneck_features_validation.npy'
,
'w'
)
,
bottleneck_features_validation
)
#prepare data
#python prepro.py --train_image_dir="${IMG_TRAIN_DIR}" --val_image_dir="${IMG_VAL_DIR}" --train_captions_file="${CAPTIONS_DIR}/val.json" --val_captions_file="${CAPTIONS_DIR}/val.json" --output_dir="${OUTPUT_DIR}/tf" --word_counts_output_file="${OUTPUT_DIR}/word_counts.txt"
def train_top_model
()
:
train_data
=
np.load
(
open
(
'bottleneck_features_train.npy'
))
train_labels
=
np.array
(
[
0]
*
(
nb_train_samples / 2
)
+
[
1]
*
(
nb_train_samples / 2
))
python train.py
\
--input_file_pattern
=
"
${
MSCOCO_DIR
}
/tf/val-?????-of-00256"
\
--inception_checkpoint_file
=
"
${
INCEPTION_CHECKPOINT
}
"
\
--train_dir
=
"
${
OUTPUT_DIR
}
/tf/val"
\
--train_inception
=
false
\
--number_of_steps
=
1000000
validation_data
=
np.load
(
open
(
'bottleneck_features_validation.npy'
))
validation_labels
=
np.array
(
[
0]
*
(
nb_validation_samples / 2
)
+
[
1]
*
(
nb_validation_samples / 2
))
model
=
Sequential
()
model.add
(
Flatten
(
input_shape
=
train_data.shape[1:]
))
model.add
(
Dense
(
256,
activation
=
'relu'
))
model.add
(
Dropout
(
0.5
))
model.add
(
Dense
(
1,
activation
=
'sigmoid'
))
model.compile
(
optimizer
=
'rmsprop'
,
loss
=
'binary_crossentropy'
,
metrics
=[
'accuracy'
])
model.fit
(
train_data, train_labels,
epochs
=
epochs,
batch_size
=
batch_size,
validation_data
=(
validation_data, validation_labels
))
model.save_weights
(
top_model_weights_path
)
save_bottlebeck_features
()
train_top_model
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment