Commit 94b559a8 authored by Konstantinos Bousmalis's avatar Konstantinos Bousmalis Committed by Neal Wu

Domain Separation Networks changes.

parent b4537839
konstantinos
nsilberman
dilipkay
dumitru
\ No newline at end of file
......@@ -26,14 +26,6 @@ py_library(
],
)
py_binary(
name = "download_and_convert_mnist_m",
srcs = ["download_and_convert_mnist_m.py"],
deps = [
"//slim:dataset_utils",
],
)
py_binary(
name = "mnist_m",
srcs = ["mnist_m.py"],
......
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts MNIST-M data to TFRecords of TF-Example protos.
This module downloads the MNIST-M data, uncompresses it, reads the files
that make up the MNIST-M data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take about a minute to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import sys
import google3
import numpy as np
from six.moves import urllib
import tensorflow as tf
from google3.third_party.tensorflow_models.slim.datasets import dataset_utils
tf.app.flags.DEFINE_string(
'dataset_dir', None,
'The directory where the output TFRecords and temporary files are saved.')
FLAGS = tf.app.flags.FLAGS
# The URLs where the MNIST-M data can be downloaded.
_DATA_URL = 'http://yann.lecun.com/exdb/mnist/'
_TRAIN_DATA_DIR = 'mnist_m_train'
_TRAIN_LABELS_FILENAME = 'mnist_m_train_labels'
_TEST_DATA_DIR = 'mnist_m_test'
_TEST_LABELS_FILENAME = 'mnist_m_test_labels'
_IMAGE_SIZE = 32
_NUM_CHANNELS = 3
# The number of images in the training set.
_NUM_TRAIN_SAMPLES = 59001
# The number of images to be kept from the training set for the validation set.
_NUM_VALIDATION = 1000
# The number of images in the test set.
_NUM_TEST_SAMPLES = 9001
# Seed for repeatability.
_RANDOM_SEED = 0
# The names of the classes.
_CLASS_NAMES = [
'zero',
'one',
'two',
'three',
'four',
'five',
'size',
'seven',
'eight',
'nine',
]
class ImageReader(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Initializes function that decodes RGB PNG data.
self._decode_png_data = tf.placeholder(dtype=tf.string)
self._decode_png = tf.image.decode_png(self._decode_png_data, channels=3)
def read_image_dims(self, sess, image_data):
image = self.decode_png(sess, image_data)
return image.shape[0], image.shape[1]
def decode_png(self, sess, image_data):
image = sess.run(
self._decode_png, feed_dict={self._decode_png_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _convert_dataset(split_name, filenames, filename_to_class_id, dataset_dir):
"""Converts the given filenames to a TFRecord dataset.
Args:
split_name: The name of the dataset, either 'train' or 'valid'.
filenames: A list of absolute paths to png images.
filename_to_class_id: A dictionary from filenames (strings) to class ids
(integers).
dataset_dir: The directory where the converted datasets are stored.
"""
print('Converting the {} split.'.format(split_name))
# Train and validation splits are both in the train directory.
if split_name in ['train', 'valid']:
png_directory = os.path.join(dataset_dir, 'mnist-m', 'mnist_m_train')
elif split_name == 'test':
png_directory = os.path.join(dataset_dir, 'mnist-m', 'mnist_m_test')
with tf.Graph().as_default():
image_reader = ImageReader()
with tf.Session('') as sess:
output_filename = _get_output_filename(dataset_dir, split_name)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
for filename in filenames:
# Read the filename:
image_data = tf.gfile.FastGFile(
os.path.join(png_directory, filename), 'r').read()
height, width = image_reader.read_image_dims(sess, image_data)
class_id = filename_to_class_id[filename]
example = dataset_utils.image_to_tfexample(image_data, 'png', height,
width, class_id)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def _extract_labels(label_filename):
"""Extract the labels into a dict of filenames to int labels.
Args:
labels_filename: The filename of the MNIST-M labels.
Returns:
A dictionary of filenames to int labels.
"""
print('Extracting labels from: ', label_filename)
label_file = tf.gfile.FastGFile(label_filename, 'r').readlines()
label_lines = [line.rstrip('\n').split() for line in label_file]
labels = {}
for line in label_lines:
assert len(line) == 2
labels[line[0]] = int(line[1])
return labels
def _get_output_filename(dataset_dir, split_name):
"""Creates the output filename.
Args:
dataset_dir: The directory where the temporary files are stored.
split_name: The name of the train/test split.
Returns:
An absolute file path.
"""
return '%s/mnist_m_%s.tfrecord' % (dataset_dir, split_name)
def _get_filenames(dataset_dir):
"""Returns a list of filenames and inferred class names.
Args:
dataset_dir: A directory containing a set PNG encoded MNIST-M images.
Returns:
A list of image file paths, relative to `dataset_dir`.
"""
photo_filenames = []
for filename in os.listdir(dataset_dir):
photo_filenames.append(filename)
return photo_filenames
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
train_filename = _get_output_filename(dataset_dir, 'train')
testing_filename = _get_output_filename(dataset_dir, 'test')
if tf.gfile.Exists(train_filename) and tf.gfile.Exists(testing_filename):
print('Dataset files already exist. Exiting without re-creating them.')
return
#TODO(konstantinos): Add download and cleanup functionality
train_validation_filenames = _get_filenames(
os.path.join(dataset_dir, 'mnist-m', 'mnist_m_train'))
test_filenames = _get_filenames(
os.path.join(dataset_dir, 'mnist-m', 'mnist_m_test'))
# Divide into train and validation:
random.seed(_RANDOM_SEED)
random.shuffle(train_validation_filenames)
train_filenames = train_validation_filenames[_NUM_VALIDATION:]
validation_filenames = train_validation_filenames[:_NUM_VALIDATION]
train_validation_filenames_to_class_ids = _extract_labels(
os.path.join(dataset_dir, 'mnist-m', 'mnist_m_train_labels.txt'))
test_filenames_to_class_ids = _extract_labels(
os.path.join(dataset_dir, 'mnist-m', 'mnist_m_test_labels.txt'))
# Convert the train, validation, and test sets.
_convert_dataset('train', train_filenames,
train_validation_filenames_to_class_ids, dataset_dir)
_convert_dataset('valid', validation_filenames,
train_validation_filenames_to_class_ids, dataset_dir)
_convert_dataset('test', test_filenames, test_filenames_to_class_ids,
dataset_dir)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
print('\nFinished converting the MNIST-M dataset!')
def main(_):
run(FLAGS.dataset_dir)
if __name__ == '__main__':
tf.app.run()
......@@ -13,9 +13,6 @@
# limitations under the License.
# ==============================================================================
"""Provides data for the MNIST-M dataset.
The dataset scripts used to create the dataset can be found at:
tensorflow_models/domain_adaptation_/datasets/download_and_convert_mnist_m_dataset.py
"""
from __future__ import absolute_import
......
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DSN components."""
import numpy as np
import tensorflow as tf
import models
class SharedEncodersTest(tf.test.TestCase):
def _testSharedEncoder(self,
input_shape=[5, 28, 28, 1],
model=models.dann_mnist,
is_training=True):
images = tf.to_float(np.random.rand(*input_shape))
with self.test_session() as sess:
logits, _ = model(images)
sess.run(tf.global_variables_initializer())
logits_np = sess.run(logits)
return logits_np
def testBuildGRLMnistModel(self):
logits = self._testSharedEncoder(model=getattr(models,
'dann_mnist'))
self.assertEqual(logits.shape, (5, 10))
self.assertTrue(np.any(logits))
def testBuildGRLSvhnModel(self):
logits = self._testSharedEncoder(model=getattr(models,
'dann_svhn'))
self.assertEqual(logits.shape, (5, 10))
self.assertTrue(np.any(logits))
def testBuildGRLGtsrbModel(self):
logits = self._testSharedEncoder([5, 40, 40, 3],
getattr(models, 'dann_gtsrb'))
self.assertEqual(logits.shape, (5, 43))
self.assertTrue(np.any(logits))
def testBuildPoseModel(self):
logits = self._testSharedEncoder([5, 64, 64, 4],
getattr(models, 'dsn_cropped_linemod'))
self.assertEqual(logits.shape, (5, 11))
self.assertTrue(np.any(logits))
def testBuildPoseModelWithBatchNorm(self):
images = tf.to_float(np.random.rand(10, 64, 64, 4))
with self.test_session() as sess:
logits, _ = getattr(models, 'dsn_cropped_linemod')(
images, batch_norm_params=models.default_batch_norm_params(True))
sess.run(tf.global_variables_initializer())
logits_np = sess.run(logits)
self.assertEqual(logits_np.shape, (10, 11))
self.assertTrue(np.any(logits_np))
class EncoderTest(tf.test.TestCase):
def _testEncoder(self, batch_norm_params=None, channels=1):
images = tf.to_float(np.random.rand(10, 28, 28, channels))
with self.test_session() as sess:
end_points = models.default_encoder(
images, 128, batch_norm_params=batch_norm_params)
sess.run(tf.global_variables_initializer())
private_code = sess.run(end_points['fc3'])
self.assertEqual(private_code.shape, (10, 128))
self.assertTrue(np.any(private_code))
self.assertTrue(np.all(np.isfinite(private_code)))
def testEncoder(self):
self._testEncoder()
def testEncoderMultiChannel(self):
self._testEncoder(None, 4)
def testEncoderIsTrainingBatchNorm(self):
self._testEncoder(models.default_batch_norm_params(True))
def testEncoderBatchNorm(self):
self._testEncoder(models.default_batch_norm_params(False))
class DecoderTest(tf.test.TestCase):
def _testDecoder(self,
height=64,
width=64,
channels=4,
batch_norm_params=None,
decoder=models.small_decoder):
codes = tf.to_float(np.random.rand(32, 100))
with self.test_session() as sess:
output = decoder(
codes,
height=height,
width=width,
channels=channels,
batch_norm_params=batch_norm_params)
sess.run(tf.initialize_all_variables())
output_np = sess.run(output)
self.assertEqual(output_np.shape, (32, height, width, channels))
self.assertTrue(np.any(output_np))
self.assertTrue(np.all(np.isfinite(output_np)))
def testSmallDecoder(self):
self._testDecoder(28, 28, 4, None, getattr(models, 'small_decoder'))
def testSmallDecoderThreeChannels(self):
self._testDecoder(28, 28, 3)
def testSmallDecoderBatchNorm(self):
self._testDecoder(28, 28, 4, models.default_batch_norm_params(False))
def testSmallDecoderIsTrainingBatchNorm(self):
self._testDecoder(28, 28, 4, models.default_batch_norm_params(True))
def testLargeDecoder(self):
self._testDecoder(32, 32, 4, None, getattr(models, 'large_decoder'))
def testLargeDecoderThreeChannels(self):
self._testDecoder(32, 32, 3, None, getattr(models, 'large_decoder'))
def testLargeDecoderBatchNorm(self):
self._testDecoder(32, 32, 4,
models.default_batch_norm_params(False),
getattr(models, 'large_decoder'))
def testLargeDecoderIsTrainingBatchNorm(self):
self._testDecoder(32, 32, 4,
models.default_batch_norm_params(True),
getattr(models, 'large_decoder'))
def testGtsrbDecoder(self):
self._testDecoder(40, 40, 3, None, getattr(models, 'large_decoder'))
def testGtsrbDecoderBatchNorm(self):
self._testDecoder(40, 40, 4,
models.default_batch_norm_params(False),
getattr(models, 'gtsrb_decoder'))
def testGtsrbDecoderIsTrainingBatchNorm(self):
self._testDecoder(40, 40, 4,
models.default_batch_norm_params(True),
getattr(models, 'gtsrb_decoder'))
if __name__ == '__main__':
tf.test.main()
konstantinos@kalivaki.lon.corp.google.com.139121:1490035651
\ No newline at end of file
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
r"""Evaluation for Domain Separation Networks (DSNs).
To build locally for CPU:
blaze build -c opt --copt=-mavx \
third_party/tensorflow_models/domain_adaptation/domain_separation:dsn_eval
To build locally for GPU:
blaze build -c opt --copt=-mavx --config=cuda_clang \
third_party/tensorflow_models/domain_adaptation/domain_separation:dsn_eval
To run locally:
$
./blaze-bin/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval
\
--alsologtostderr
"""
# pylint: enable=line-too-long
import math
import google3
import numpy as np
import tensorflow as tf
from google3.robotics.cad_learning.domain_adaptation.fnist import data_provider
from google3.third_party.tensorflow_models.domain_adaptation.domain_separation import models
slim = tf.contrib.slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 50,
'The number of images in each batch.')
tf.app.flags.DEFINE_string('master', 'local',
'BNS name of the TensorFlow master to use.')
tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/da/',
'Directory where the model was written to.')
tf.app.flags.DEFINE_string(
'eval_dir', '/tmp/da/',
'Directory where we should write the tf summaries to.')
tf.app.flags.DEFINE_string(
'dataset', 'pose_real',
'Which dataset to test on: "pose_real", "pose_synthetic".')
tf.app.flags.DEFINE_string('portion', 'valid',
'Which portion to test on: "valid", "test".')
tf.app.flags.DEFINE_integer('num_examples', 1000, 'Number of test examples.')
tf.app.flags.DEFINE_string('basic_tower', 'pose_mini',
'The basic tower building block.')
tf.app.flags.DEFINE_bool('use_logging', False, 'Debugging messages.')
def quaternion_metric(predictions, labels):
product = tf.multiply(predictions, labels)
internal_dot_products = tf.reduce_sum(product, [1])
logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products))
return tf.contrib.metrics.streaming_mean(logcost)
def to_degrees(predictions, labels):
"""Converts a log quaternion distance to an angle.
Args:
log_quaternion_loss: The log quaternion distance between two
unit quaternions (or a batch of pairs of quaternions).
Returns:
The angle in degrees of the implied angle-axis representation.
"""
product = tf.multiply(predictions, labels)
internal_dot_products = tf.reduce_sum(product, [1])
log_quaternion_loss = tf.log(1e-4 + 1 - tf.abs(internal_dot_products))
angle_loss = tf.acos(-(tf.exp(log_quaternion_loss) - 1)) * 2 * 180 / math.pi
return tf.contrib.metrics.streaming_mean(angle_loss)
def main(_):
g = tf.Graph()
with g.as_default():
images, labels = data_provider.provide(FLAGS.dataset, FLAGS.portion,
FLAGS.batch_size)
num_classes = labels['classes'].shape[1]
# Define the model:
with tf.variable_scope('towers'):
basic_tower = models.provide(FLAGS.basic_tower)
predictions, endpoints = basic_tower(
images, is_training=False, num_classes=num_classes)
names_to_values = {}
names_to_updates = {}
# Define the metrics:
if 'quaternions' in labels: # Also have to evaluate pose estimation!
quaternion_loss = quaternion_metric(labels['quaternions'],
endpoints['quaternion_pred'])
metric_name = 'Angle Mean Error'
names_to_values[metric_name], names_to_updates[metric_name] = to_degrees(
labels['quaternions'], endpoints['quaternion_pred'])
metric_name = 'Log Quaternion Error'
names_to_values[metric_name], names_to_updates[
metric_name] = quaternion_metric(labels['quaternions'],
endpoints['quaternion_pred'])
metric_name = 'Accuracy'
names_to_values[metric_name], names_to_updates[
metric_name] = tf.contrib.metrics.streaming_accuracy(
tf.argmax(predictions, 1), tf.argmax(labels['classes'], 1))
metric_name = 'Accuracy'
names_to_values[metric_name], names_to_updates[
metric_name] = tf.contrib.metrics.streaming_accuracy(
tf.argmax(predictions, 1), tf.argmax(labels['classes'], 1))
# Create the summary ops such that they also print out to std output:
summary_ops = []
for metric_name, metric_value in names_to_values.iteritems():
op = tf.contrib.deprecated.scalar_summary(metric_name, metric_value)
op = tf.Print(op, [metric_value], metric_name)
summary_ops.append(op)
# This ensures that we make a single pass over all of the data.
num_batches = math.ceil(FLAGS.num_examples / float(FLAGS.batch_size))
# Setup the global step.
slim.get_or_create_global_step()
slim.evaluation.evaluation_loop(
FLAGS.master,
checkpoint_dir=FLAGS.checkpoint_dir,
logdir=FLAGS.eval_dir,
num_evals=num_batches,
eval_op=names_to_updates.values(),
summary_op=tf.contrib.deprecated.merge_summary(summary_ops))
if __name__ == '__main__':
tf.app.run()
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
r"""Evaluation for Domain Separation Networks (DSNs).
To build locally for CPU:
blaze build -c opt --copt=-mavx \
third_party/tensorflow_models/domain_adaptation/domain_separation:dsn_eval
To build locally for GPU:
blaze build -c opt --copt=-mavx --config=cuda_clang \
third_party/tensorflow_models/domain_adaptation/domain_separation:dsn_eval
To run locally:
$
./blaze-bin/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval
\
--alsologtostderr
"""
# pylint: enable=line-too-long
import math
import google3
import numpy as np
import tensorflow as tf
from google3.robotics.cad_learning.domain_adaptation.fnist import data_provider
from google3.third_party.tensorflow_models.domain_adaptation.domain_separation import losses
from google3.third_party.tensorflow_models.domain_adaptation.domain_separation import models
slim = tf.contrib.slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 32,
'The number of images in each batch.')
tf.app.flags.DEFINE_string('master', 'local',
'BNS name of the TensorFlow master to use.')
tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/da/',
'Directory where the model was written to.')
tf.app.flags.DEFINE_string(
'eval_dir', '/tmp/da/',
'Directory where we should write the tf summaries to.')
tf.app.flags.DEFINE_string(
'dataset', 'pose_real',
'Which dataset to test on: "pose_real", "pose_synthetic".')
tf.app.flags.DEFINE_string('portion', 'valid',
'Which portion to test on: "valid", "test".')
tf.app.flags.DEFINE_integer('num_examples', 1000, 'Number of test examples.')
tf.app.flags.DEFINE_string('basic_tower', 'dsn_cropped_linemod',
'The basic tower building block.')
tf.app.flags.DEFINE_bool('enable_precision_recall', False,
'If True, precision and recall for each class will '
'be added to the metrics.')
tf.app.flags.DEFINE_bool('use_logging', False, 'Debugging messages.')
def quaternion_metric(predictions, labels):
params = {'batch_size': FLAGS.batch_size, 'use_logging': False}
logcost = losses.log_quaternion_loss_batch(predictions, labels, params)
return slim.metrics.streaming_mean(logcost)
def angle_diff(true_q, pred_q):
angles = 2 * (
180.0 /
np.pi) * np.arccos(np.abs(np.sum(np.multiply(pred_q, true_q), axis=1)))
return angles
def main(_):
g = tf.Graph()
with g.as_default():
images, labels = data_provider.provide(FLAGS.dataset, FLAGS.portion,
FLAGS.batch_size)
num_classes = labels['classes'].get_shape().as_list()[1]
# Define the model:
with tf.variable_scope('towers'):
basic_tower = getattr(models, FLAGS.basic_tower)
predictions, endpoints = basic_tower(
images,
num_classes=num_classes,
is_training=False,
batch_norm_params=None)
metric_names_to_values = {}
# Define the metrics:
if 'quaternions' in labels: # Also have to evaluate pose estimation!
quaternion_loss = quaternion_metric(labels['quaternions'],
endpoints['quaternion_pred'])
angle_errors, = tf.py_func(
angle_diff, [labels['quaternions'], endpoints['quaternion_pred']],
[tf.float32])
metric_names_to_values[
'Angular mean error'] = slim.metrics.streaming_mean(angle_errors)
metric_names_to_values['Quaternion Loss'] = quaternion_loss
accuracy = tf.contrib.metrics.streaming_accuracy(
tf.argmax(predictions, 1), tf.argmax(labels['classes'], 1))
predictions = tf.argmax(predictions, 1)
labels = tf.argmax(labels['classes'], 1)
metric_names_to_values['Accuracy'] = accuracy
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map(
metric_names_to_values)
# Create the summary ops such that they also print out to std output:
summary_ops = []
for metric_name, metric_value in names_to_values.iteritems():
op = tf.summary.scalar(metric_name, metric_value)
op = tf.Print(op, [metric_value], metric_name)
summary_ops.append(op)
# This ensures that we make a single pass over all of the data.
num_batches = math.ceil(FLAGS.num_examples / float(FLAGS.batch_size))
# Setup the global step.
slim.get_or_create_global_step()
slim.evaluation.evaluation_loop(
FLAGS.master,
checkpoint_dir=FLAGS.checkpoint_dir,
logdir=FLAGS.eval_dir,
num_evals=num_batches,
eval_op=names_to_updates.values(),
summary_op=tf.summary.merge(summary_ops))
if __name__ == '__main__':
tf.app.run()
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
r"""Evaluation for Domain Separation Networks (DSNs).
To build locally for CPU:
blaze build -c opt --copt=-mavx \
third_party/tensorflow_models/domain_adaptation/domain_separation:dsn_eval
To build locally for GPU:
blaze build -c opt --copt=-mavx --config=cuda_clang \
third_party/tensorflow_models/domain_adaptation/domain_separation:dsn_eval
To run locally:
$
./blaze-bin/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval
\
--alsologtostderr
"""
# pylint: enable=line-too-long
import math
import google3
import numpy as np
import tensorflow as tf
from google3.robotics.cad_learning.domain_adaptation.fnist import data_provider
from google3.third_party.tensorflow_models.domain_adaptation.domain_separation import models
slim = tf.contrib.slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 50,
'The number of images in each batch.')
tf.app.flags.DEFINE_string('master', 'local',
'BNS name of the TensorFlow master to use.')
tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/da/',
'Directory where the model was written to.')
tf.app.flags.DEFINE_string(
'eval_dir', '/tmp/da/',
'Directory where we should write the tf summaries to.')
tf.app.flags.DEFINE_string(
'dataset', 'pose_real',
'Which dataset to test on: "pose_real", "pose_synthetic".')
tf.app.flags.DEFINE_string('portion', 'valid',
'Which portion to test on: "valid", "test".')
tf.app.flags.DEFINE_integer('num_examples', 1000, 'Number of test examples.')
tf.app.flags.DEFINE_string('basic_tower', 'pose_mini',
'The basic tower building block.')
tf.app.flags.DEFINE_bool('use_logging', False, 'Debugging messages.')
def quaternion_metric(predictions, labels):
product = tf.multiply(predictions, labels)
internal_dot_products = tf.reduce_sum(product, [1])
logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products))
return tf.contrib.metrics.streaming_mean(logcost)
def to_degrees(predictions, labels):
"""Converts a log quaternion distance to an angle.
Args:
log_quaternion_loss: The log quaternion distance between two
unit quaternions (or a batch of pairs of quaternions).
Returns:
The angle in degrees of the implied angle-axis representation.
"""
product = tf.multiply(predictions, labels)
internal_dot_products = tf.reduce_sum(product, [1])
log_quaternion_loss = tf.log(1e-4 + 1 - tf.abs(internal_dot_products))
angle_loss = tf.acos(-(tf.exp(log_quaternion_loss) - 1)) * 2 * 180 / math.pi
return tf.contrib.metrics.streaming_mean(angle_loss)
def main(_):
g = tf.Graph()
with g.as_default():
images, labels = data_provider.provide(FLAGS.dataset, FLAGS.portion,
FLAGS.batch_size)
num_classes = labels['classes'].shape[1]
# Define the model:
with tf.variable_scope('towers'):
basic_tower = models.provide(FLAGS.basic_tower)
predictions, endpoints = basic_tower(
images, is_training=False, num_classes=num_classes)
names_to_values = {}
names_to_updates = {}
# Define the metrics:
if 'quaternions' in labels: # Also have to evaluate pose estimation!
quaternion_loss = quaternion_metric(labels['quaternions'],
endpoints['quaternion_pred'])
metric_name = 'Angle Mean Error'
names_to_values[metric_name], names_to_updates[metric_name] = to_degrees(
labels['quaternions'], endpoints['quaternion_pred'])
metric_name = 'Log Quaternion Error'
names_to_values[metric_name], names_to_updates[
metric_name] = quaternion_metric(labels['quaternions'],
endpoints['quaternion_pred'])
metric_name = 'Accuracy'
names_to_values[metric_name], names_to_updates[
metric_name] = tf.contrib.metrics.streaming_accuracy(
tf.argmax(predictions, 1), tf.argmax(labels['classes'], 1))
metric_name = 'Accuracy'
names_to_values[metric_name], names_to_updates[
metric_name] = tf.contrib.metrics.streaming_accuracy(
tf.argmax(predictions, 1), tf.argmax(labels['classes'], 1))
# Create the summary ops such that they also print out to std output:
summary_ops = []
for metric_name, metric_value in names_to_values.iteritems():
op = tf.contrib.deprecated.scalar_summary(metric_name, metric_value)
op = tf.Print(op, [metric_value], metric_name)
summary_ops.append(op)
# This ensures that we make a single pass over all of the data.
num_batches = math.ceil(FLAGS.num_examples / float(FLAGS.batch_size))
# Setup the global step.
slim.get_or_create_global_step()
slim.evaluation.evaluation_loop(
FLAGS.master,
checkpoint_dir=FLAGS.checkpoint_dir,
logdir=FLAGS.eval_dir,
num_evals=num_batches,
eval_op=names_to_updates.values(),
summary_op=tf.contrib.deprecated.merge_summary(summary_ops))
if __name__ == '__main__':
tf.app.run()
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
r"""Evaluation for Domain Separation Networks (DSNs).
To build locally for CPU:
blaze build -c opt --copt=-mavx \
third_party/tensorflow_models/domain_adaptation/domain_separation:dsn_eval
To build locally for GPU:
blaze build -c opt --copt=-mavx --config=cuda_clang \
third_party/tensorflow_models/domain_adaptation/domain_separation:dsn_eval
To run locally:
$
./blaze-bin/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval
\
--alsologtostderr
"""
# pylint: enable=line-too-long
import math
import google3
import numpy as np
import tensorflow as tf
from google3.third_party.tensorflow_models.domain_adaptation.datasets import dataset_factory
from google3.third_party.tensorflow_models.domain_adaptation.domain_separation import losses
from google3.third_party.tensorflow_models.domain_adaptation.domain_separation import losses
from google3.third_party.tensorflow_models.domain_adaptation.domain_separation import models
slim = tf.contrib.slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 32,
'The number of images in each batch.')
tf.app.flags.DEFINE_string('master', '',
'BNS name of the TensorFlow master to use.')
tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/da/',
'Directory where the model was written to.')
tf.app.flags.DEFINE_string(
'eval_dir', '/tmp/da/',
'Directory where we should write the tf summaries to.')
tf.app.flags.DEFINE_string('dataset_dir', None,
'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_string('dataset', 'mnist_m',
'Which dataset to test on: "mnist", "mnist_m".')
tf.app.flags.DEFINE_string('split', 'valid',
'Which portion to test on: "valid", "test".')
tf.app.flags.DEFINE_integer('num_examples', 1000, 'Number of test examples.')
>>>> ORIGINAL //depot/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py#5
tf.app.flags.DEFINE_string('basic_tower', 'pose_mini',
==== THEIRS //depot/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py#6
tf.app.flags.DEFINE_string('basic_tower', 'dsn_cropped_linemod',
==== YOURS //konstantinos:opensource:883:citc/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py
tf.app.flags.DEFINE_string('basic_tower', 'dann_mnist',
<<<<
'The basic tower building block.')
>>>> ORIGINAL //depot/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py#5
==== THEIRS //depot/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py#6
tf.app.flags.DEFINE_bool('enable_precision_recall', False,
'If True, precision and recall for each class will '
'be added to the metrics.')
==== YOURS //konstantinos:opensource:883:citc/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py
tf.app.flags.DEFINE_bool('enable_precision_recall', False,
'If True, precision and recall for each class will '
'be added to the metrics.')
<<<<
tf.app.flags.DEFINE_bool('use_logging', False, 'Debugging messages.')
def quaternion_metric(predictions, labels):
params = {'batch_size': FLAGS.batch_size, 'use_logging': False}
logcost = losses.log_quaternion_loss_batch(predictions, labels, params)
return slim.metrics.streaming_mean(logcost)
def angle_diff(true_q, pred_q):
angles = 2 * (
180.0 /
np.pi) * np.arccos(np.abs(np.sum(np.multiply(pred_q, true_q), axis=1)))
return angles
>>>> ORIGINAL //depot/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py#5
Returns:
The angle in degrees of the implied angle-axis representation.
"""
product = tf.multiply(predictions, labels)
internal_dot_products = tf.reduce_sum(product, [1])
log_quaternion_loss = tf.log(1e-4 + 1 - tf.abs(internal_dot_products))
angle_loss = tf.acos(-(tf.exp(log_quaternion_loss) - 1)) * 2 * 180 / math.pi
return tf.contrib.metrics.streaming_mean(angle_loss)
==== THEIRS //depot/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py#6
==== YOURS //konstantinos:opensource:883:citc/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py
def provide_batch_fn():
""" The provide_batch function to use. """
return dataset_factory.provide_batch
<<<<
def main(_):
g = tf.Graph()
with g.as_default():
# Load the data.
images, labels = provide_batch_fn()(
FLAGS.dataset, FLAGS.split, FLAGS.dataset_dir, 4, FLAGS.batch_size, 4)
num_classes = labels['classes'].get_shape().as_list()[1]
tf.summary.image('eval_images', images, max_outputs=3)
# Define the model:
with tf.variable_scope('towers'):
basic_tower = getattr(models, FLAGS.basic_tower)
predictions, endpoints = basic_tower(
images,
num_classes=num_classes,
is_training=False,
batch_norm_params=None)
metric_names_to_values = {}
# Define the metrics:
if 'quaternions' in labels: # Also have to evaluate pose estimation!
quaternion_loss = quaternion_metric(labels['quaternions'],
endpoints['quaternion_pred'])
angle_errors, = tf.py_func(
angle_diff, [labels['quaternions'], endpoints['quaternion_pred']],
[tf.float32])
metric_names_to_values[
'Angular mean error'] = slim.metrics.streaming_mean(angle_errors)
metric_names_to_values['Quaternion Loss'] = quaternion_loss
accuracy = tf.contrib.metrics.streaming_accuracy(
tf.argmax(predictions, 1), tf.argmax(labels['classes'], 1))
>>>> ORIGINAL //depot/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py#5
==== THEIRS //depot/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py#6
predictions = tf.argmax(predictions, 1)
labels = tf.argmax(labels['classes'], 1)
metric_names_to_values['Accuracy'] = accuracy
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map(
metric_names_to_values)
==== YOURS //konstantinos:opensource:883:citc/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py
predictions = tf.argmax(predictions, 1)
labels = tf.argmax(labels['classes'], 1)
metric_names_to_values['Accuracy'] = accuracy
for i in xrange(num_classes):
index_map = tf.one_hot(i, depth=num_classes)
name = 'PR/Precision_{}'.format(i)
metric_names_to_values[name] = slim.metrics.streaming_precision(
tf.gather(index_map, predictions), tf.gather(index_map, labels))
name = 'PR/Recall_{}'.format(i)
metric_names_to_values[name] = slim.metrics.streaming_recall(
tf.gather(index_map, predictions), tf.gather(index_map, labels))
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map(
metric_names_to_values)
<<<<
# Create the summary ops such that they also print out to std output:
summary_ops = []
for metric_name, metric_value in names_to_values.iteritems():
op = tf.summary.scalar(metric_name, metric_value)
op = tf.Print(op, [metric_value], metric_name)
summary_ops.append(op)
# This ensures that we make a single pass over all of the data.
num_batches = math.ceil(FLAGS.num_examples / float(FLAGS.batch_size))
# Setup the global step.
slim.get_or_create_global_step()
slim.evaluation.evaluation_loop(
FLAGS.master,
checkpoint_dir=FLAGS.checkpoint_dir,
logdir=FLAGS.eval_dir,
num_evals=num_batches,
eval_op=names_to_updates.values(),
summary_op=tf.summary.merge(summary_ops))
if __name__ == '__main__':
tf.app.run()
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
r"""Evaluation for Domain Separation Networks (DSNs).
To build locally for CPU:
blaze build -c opt --copt=-mavx \
third_party/tensorflow_models/domain_adaptation/domain_separation:dsn_eval
To build locally for GPU:
blaze build -c opt --copt=-mavx --config=cuda_clang \
third_party/tensorflow_models/domain_adaptation/domain_separation:dsn_eval
To run locally:
$
./blaze-bin/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval
\
--alsologtostderr
"""
# pylint: enable=line-too-long
import math
import google3
import numpy as np
import tensorflow as tf
from google3.robotics.cad_learning.domain_adaptation.fnist import data_provider
from google3.third_party.tensorflow_models.domain_adaptation.domain_separation import losses
from google3.third_party.tensorflow_models.domain_adaptation.domain_separation import models
slim = tf.contrib.slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 32,
'The number of images in each batch.')
tf.app.flags.DEFINE_string('master', 'local',
'BNS name of the TensorFlow master to use.')
tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/da/',
'Directory where the model was written to.')
tf.app.flags.DEFINE_string(
'eval_dir', '/tmp/da/',
'Directory where we should write the tf summaries to.')
tf.app.flags.DEFINE_string(
'dataset', 'pose_real',
'Which dataset to test on: "pose_real", "pose_synthetic".')
tf.app.flags.DEFINE_string('portion', 'valid',
'Which portion to test on: "valid", "test".')
tf.app.flags.DEFINE_integer('num_examples', 1000, 'Number of test examples.')
tf.app.flags.DEFINE_string('basic_tower', 'dsn_cropped_linemod',
'The basic tower building block.')
tf.app.flags.DEFINE_bool('enable_precision_recall', False,
'If True, precision and recall for each class will '
'be added to the metrics.')
tf.app.flags.DEFINE_bool('use_logging', False, 'Debugging messages.')
def quaternion_metric(predictions, labels):
params = {'batch_size': FLAGS.batch_size, 'use_logging': False}
logcost = losses.log_quaternion_loss_batch(predictions, labels, params)
return slim.metrics.streaming_mean(logcost)
def angle_diff(true_q, pred_q):
angles = 2 * (
180.0 /
np.pi) * np.arccos(np.abs(np.sum(np.multiply(pred_q, true_q), axis=1)))
return angles
def main(_):
g = tf.Graph()
with g.as_default():
images, labels = data_provider.provide(FLAGS.dataset, FLAGS.portion,
FLAGS.batch_size)
num_classes = labels['classes'].get_shape().as_list()[1]
# Define the model:
with tf.variable_scope('towers'):
basic_tower = getattr(models, FLAGS.basic_tower)
predictions, endpoints = basic_tower(
images,
num_classes=num_classes,
is_training=False,
batch_norm_params=None)
metric_names_to_values = {}
# Define the metrics:
if 'quaternions' in labels: # Also have to evaluate pose estimation!
quaternion_loss = quaternion_metric(labels['quaternions'],
endpoints['quaternion_pred'])
angle_errors, = tf.py_func(
angle_diff, [labels['quaternions'], endpoints['quaternion_pred']],
[tf.float32])
metric_names_to_values[
'Angular mean error'] = slim.metrics.streaming_mean(angle_errors)
metric_names_to_values['Quaternion Loss'] = quaternion_loss
accuracy = tf.contrib.metrics.streaming_accuracy(
tf.argmax(predictions, 1), tf.argmax(labels['classes'], 1))
predictions = tf.argmax(predictions, 1)
labels = tf.argmax(labels['classes'], 1)
metric_names_to_values['Accuracy'] = accuracy
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map(
metric_names_to_values)
# Create the summary ops such that they also print out to std output:
summary_ops = []
for metric_name, metric_value in names_to_values.iteritems():
op = tf.summary.scalar(metric_name, metric_value)
op = tf.Print(op, [metric_value], metric_name)
summary_ops.append(op)
# This ensures that we make a single pass over all of the data.
num_batches = math.ceil(FLAGS.num_examples / float(FLAGS.batch_size))
# Setup the global step.
slim.get_or_create_global_step()
slim.evaluation.evaluation_loop(
FLAGS.master,
checkpoint_dir=FLAGS.checkpoint_dir,
logdir=FLAGS.eval_dir,
num_evals=num_batches,
eval_op=names_to_updates.values(),
summary_op=tf.summary.merge(summary_ops))
if __name__ == '__main__':
tf.app.run()
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
r"""Evaluation for Domain Separation Networks (DSNs).
To build locally for CPU:
blaze build -c opt --copt=-mavx \
third_party/tensorflow_models/domain_adaptation/domain_separation:dsn_eval
To build locally for GPU:
blaze build -c opt --copt=-mavx --config=cuda_clang \
third_party/tensorflow_models/domain_adaptation/domain_separation:dsn_eval
To run locally:
$
./blaze-bin/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval
\
--alsologtostderr
"""
# pylint: enable=line-too-long
import math
import google3
import numpy as np
import tensorflow as tf
from google3.third_party.tensorflow_models.domain_adaptation.datasets import dataset_factory
from google3.third_party.tensorflow_models.domain_adaptation.domain_separation import losses
from google3.third_party.tensorflow_models.domain_adaptation.domain_separation import losses
from google3.third_party.tensorflow_models.domain_adaptation.domain_separation import models
slim = tf.contrib.slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 32,
'The number of images in each batch.')
tf.app.flags.DEFINE_string('master', '',
'BNS name of the TensorFlow master to use.')
tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/da/',
'Directory where the model was written to.')
tf.app.flags.DEFINE_string(
'eval_dir', '/tmp/da/',
'Directory where we should write the tf summaries to.')
tf.app.flags.DEFINE_string('dataset_dir', None,
'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_string('dataset', 'mnist_m',
'Which dataset to test on: "mnist", "mnist_m".')
tf.app.flags.DEFINE_string('split', 'valid',
'Which portion to test on: "valid", "test".')
tf.app.flags.DEFINE_integer('num_examples', 1000, 'Number of test examples.')
>>>> ORIGINAL //depot/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py#5
tf.app.flags.DEFINE_string('basic_tower', 'pose_mini',
==== THEIRS //depot/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py#6
tf.app.flags.DEFINE_string('basic_tower', 'dsn_cropped_linemod',
==== YOURS //konstantinos:opensource:883:citc/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py
tf.app.flags.DEFINE_string('basic_tower', 'dann_mnist',
<<<<
'The basic tower building block.')
>>>> ORIGINAL //depot/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py#5
==== THEIRS //depot/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py#6
tf.app.flags.DEFINE_bool('enable_precision_recall', False,
'If True, precision and recall for each class will '
'be added to the metrics.')
==== YOURS //konstantinos:opensource:883:citc/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py
tf.app.flags.DEFINE_bool('enable_precision_recall', False,
'If True, precision and recall for each class will '
'be added to the metrics.')
<<<<
tf.app.flags.DEFINE_bool('use_logging', False, 'Debugging messages.')
def quaternion_metric(predictions, labels):
params = {'batch_size': FLAGS.batch_size, 'use_logging': False}
logcost = losses.log_quaternion_loss_batch(predictions, labels, params)
return slim.metrics.streaming_mean(logcost)
def angle_diff(true_q, pred_q):
angles = 2 * (
180.0 /
np.pi) * np.arccos(np.abs(np.sum(np.multiply(pred_q, true_q), axis=1)))
return angles
>>>> ORIGINAL //depot/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py#5
Returns:
The angle in degrees of the implied angle-axis representation.
"""
product = tf.multiply(predictions, labels)
internal_dot_products = tf.reduce_sum(product, [1])
log_quaternion_loss = tf.log(1e-4 + 1 - tf.abs(internal_dot_products))
angle_loss = tf.acos(-(tf.exp(log_quaternion_loss) - 1)) * 2 * 180 / math.pi
return tf.contrib.metrics.streaming_mean(angle_loss)
==== THEIRS //depot/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py#6
==== YOURS //konstantinos:opensource:883:citc/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py
def provide_batch_fn():
""" The provide_batch function to use. """
return dataset_factory.provide_batch
<<<<
def main(_):
g = tf.Graph()
with g.as_default():
# Load the data.
images, labels = provide_batch_fn()(
FLAGS.dataset, FLAGS.split, FLAGS.dataset_dir, 4, FLAGS.batch_size, 4)
num_classes = labels['classes'].get_shape().as_list()[1]
tf.summary.image('eval_images', images, max_outputs=3)
# Define the model:
with tf.variable_scope('towers'):
basic_tower = getattr(models, FLAGS.basic_tower)
predictions, endpoints = basic_tower(
images,
num_classes=num_classes,
is_training=False,
batch_norm_params=None)
metric_names_to_values = {}
# Define the metrics:
if 'quaternions' in labels: # Also have to evaluate pose estimation!
quaternion_loss = quaternion_metric(labels['quaternions'],
endpoints['quaternion_pred'])
angle_errors, = tf.py_func(
angle_diff, [labels['quaternions'], endpoints['quaternion_pred']],
[tf.float32])
>>>> ORIGINAL //depot/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py#5
metric_name = 'Log Quaternion Error'
names_to_values[metric_name], names_to_updates[
metric_name] = quaternion_metric(labels['quaternions'],
endpoints['quaternion_pred'])
metric_name = 'Accuracy'
names_to_values[metric_name], names_to_updates[
metric_name] = tf.contrib.metrics.streaming_accuracy(
tf.argmax(predictions, 1), tf.argmax(labels['classes'], 1))
==== THEIRS //depot/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py#6
metric_names_to_values[
'Angular mean error'] = slim.metrics.streaming_mean(angle_errors)
metric_names_to_values['Quaternion Loss'] = quaternion_loss
==== YOURS //konstantinos:opensource:883:citc/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py
metric_names_to_values['Angular mean error'] = slim.metrics.mean(
angle_errors)
metric_names_to_values['Quaternion Loss'] = quaternion_loss
<<<<
accuracy = tf.contrib.metrics.streaming_accuracy(
tf.argmax(predictions, 1), tf.argmax(labels['classes'], 1))
>>>> ORIGINAL //depot/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py#5
==== THEIRS //depot/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py#6
predictions = tf.argmax(predictions, 1)
labels = tf.argmax(labels['classes'], 1)
metric_names_to_values['Accuracy'] = accuracy
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map(
metric_names_to_values)
==== YOURS //konstantinos:opensource:883:citc/google3/third_party/tensorflow_models/domain_adaptation/domain_separation/dsn_eval.py
predictions = tf.argmax(predictions, 1)
labels = tf.argmax(labels['classes'], 1)
metric_names_to_values['Accuracy'] = accuracy
for i in xrange(num_classes):
index_map = tf.one_hot(i, depth=num_classes)
name = 'PR/Precision_{}'.format(i)
metric_names_to_values[name] = slim.metrics.streaming_precision(
tf.gather(index_map, predictions), tf.gather(index_map, labels))
name = 'PR/Recall_{}'.format(i)
metric_names_to_values[name] = slim.metrics.streaming_recall(
tf.gather(index_map, predictions), tf.gather(index_map, labels))
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map(
metric_names_to_values)
<<<<
# Create the summary ops such that they also print out to std output:
summary_ops = []
for metric_name, metric_value in names_to_values.iteritems():
op = tf.summary.scalar(metric_name, metric_value)
op = tf.Print(op, [metric_value], metric_name)
summary_ops.append(op)
# This ensures that we make a single pass over all of the data.
num_batches = math.ceil(FLAGS.num_examples / float(FLAGS.batch_size))
# Setup the global step.
slim.get_or_create_global_step()
slim.evaluation.evaluation_loop(
FLAGS.master,
checkpoint_dir=FLAGS.checkpoint_dir,
logdir=FLAGS.eval_dir,
num_evals=num_batches,
eval_op=names_to_updates.values(),
summary_op=tf.summary.merge(summary_ops))
if __name__ == '__main__':
tf.app.run()
......@@ -60,10 +60,8 @@ py_library(
],
deps = [
":grl_op_grads_py",
#":grl_op_kernels",
":grl_op_shapes_py",
":grl_ops",
#":grl_ops_py",
":losses",
":models",
":utils",
......@@ -148,28 +146,6 @@ py_library(
srcs = ["grl_ops.py"],
data = ["_grl_ops.so"],
)
#cc_library(
# name = "grl_ops",
# srcs = ["grl_ops.cc"],
# deps = ["//tensorflow/core:framework"],
# alwayslink = 1,
#)
#tf_gen_op_wrapper_py(
# name = "grl_ops_py",
# out = "grl_ops.py",
# deps = [":grl_ops"],
#)
#cc_library(
# name = "grl_op_kernels",
# srcs = ["grl_op_kernels.cc"],
# deps = [
# "//tensorflow/core:framework",
# "//tensorflow/core:protos_all",
# ],
# alwayslink = 1,
#)
py_test(
name = "grl_ops_test",
......@@ -177,9 +153,7 @@ py_test(
srcs = ["grl_ops_test.py"],
deps = [
":grl_op_grads_py",
# ":grl_op_kernels",
":grl_op_shapes_py",
":grl_ops",
#":grl_ops_py",
],
)
......@@ -24,6 +24,11 @@ Then you need to build the binaries with Bazel:
$ bazel build -c opt domain_adaptation/domain_separation/...
Add models and models/slim to your $PYTHONPATH:
$ export PYTHONPATH=$PYTHONPATH:$PWD/slim
$ export PYTHONPATH=$PYTHONPATH:$PWD
You can then train with the following command:
$ ./bazel-bin/domain_adaptation/domain_separation/dsn_train \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment