From 42cf87da1c320a7a1d382e876433105d94fcbf6f Mon Sep 17 00:00:00 2001 From: Wangyida Date: Thu, 4 Jun 2015 22:31:06 +0800 Subject: [PATCH 01/82] triplet data generation and network update --- .../triplet/convert_mnist_triplet_data.cpp | 127 +++++ examples/triplet/create_mnist_triplet.sh | 21 + examples/triplet/mnist_siamese.ipynb | 154 ++++++ examples/triplet/mnist_triplet.prototxt | 113 ++++ .../triplet/mnist_triplet_solver.prototxt | 25 + .../triplet/mnist_triplet_train_test.prototxt | 498 ++++++++++++++++++ examples/triplet/readme.md | 179 +++++++ examples/triplet/train_mnist_triplet.sh | 5 + 8 files changed, 1122 insertions(+) create mode 100644 examples/triplet/convert_mnist_triplet_data.cpp create mode 100755 examples/triplet/create_mnist_triplet.sh create mode 100644 examples/triplet/mnist_siamese.ipynb create mode 100644 examples/triplet/mnist_triplet.prototxt create mode 100644 examples/triplet/mnist_triplet_solver.prototxt create mode 100644 examples/triplet/mnist_triplet_train_test.prototxt create mode 100644 examples/triplet/readme.md create mode 100755 examples/triplet/train_mnist_triplet.sh diff --git a/examples/triplet/convert_mnist_triplet_data.cpp b/examples/triplet/convert_mnist_triplet_data.cpp new file mode 100644 index 00000000000..d1eed30cba6 --- /dev/null +++ b/examples/triplet/convert_mnist_triplet_data.cpp @@ -0,0 +1,127 @@ +// This script converts the MNIST dataset to the leveldb format used +// by caffe to train siamese network. +// Usage: +// convert_mnist_data input_image_file input_label_file output_db_file +// The MNIST dataset could be downloaded at +// http://yann.lecun.com/exdb/mnist/ +#include // NOLINT(readability/streams) +#include + +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "stdint.h" + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" + +uint32_t swap_endian(uint32_t val) { + val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); + return (val << 16) | (val >> 16); +} + +void read_image(std::ifstream* image_file, std::ifstream* label_file, + uint32_t index, uint32_t rows, uint32_t cols, + char* pixels, char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index + 8); + label_file->read(label, 1); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename) { + // Open files + std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); + std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); + CHECK(image_file) << "Unable to open file " << image_filename; + CHECK(label_file) << "Unable to open file " << label_filename; + // Read the magic and the meta data + uint32_t magic; + uint32_t num_items; + uint32_t num_labels; + uint32_t rows; + uint32_t cols; + + image_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + CHECK_EQ(magic, 2051) << "Incorrect image file magic."; + label_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + CHECK_EQ(magic, 2049) << "Incorrect label file magic."; + image_file.read(reinterpret_cast(&num_items), 4); + num_items = swap_endian(num_items); + label_file.read(reinterpret_cast(&num_labels), 4); + num_labels = swap_endian(num_labels); + CHECK_EQ(num_items, num_labels); + image_file.read(reinterpret_cast(&rows), 4); + rows = swap_endian(rows); + image_file.read(reinterpret_cast(&cols), 4); + cols = swap_endian(cols); + + // Open leveldb + + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = true; + options.error_if_exists = true; + leveldb::Status status = leveldb::DB::Open( + options, db_filename, &db); + CHECK(status.ok()) << "Failed to open leveldb " << db_filename + << ". Is it already existing?"; + + char label_i; + char label_j; + char label_k; + char* pixels = new char[3 * rows * cols]; + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + + caffe::Datum datum; + datum.set_channels(3); // one channel for each image in the pair + datum.set_height(rows); + datum.set_width(cols); + LOG(INFO) << "A total of " << num_items << " items."; + LOG(INFO) << "Rows: " << rows << " Cols: " << cols; + for (int itemid = 0; itemid < num_items; ++itemid) { + int i = caffe::caffe_rng_rand() % num_items; // pick a random pair + int j = caffe::caffe_rng_rand() % num_items; + int k = caffe::caffe_rng_rand() % num_items; + read_image(&image_file, &label_file, i, rows, cols, + pixels, &label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels + (rows * cols), &label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels + (2 * rows * cols), &label_k); + datum.set_data(pixels, 3*rows*cols); + if (label_i == label_j && label_i == label_k) { + datum.set_label(1); + } else { + datum.set_label(0); + } + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); + } + + delete db; + delete pixels; +} + +int main(int argc, char** argv) { + if (argc != 4) { + printf("This script converts the MNIST dataset to the leveldb format used\n" + "by caffe to train a triplet network.\n" + "Usage:\n" + " convert_mnist_data input_image_file input_label_file " + "output_db_file\n" + "The MNIST dataset could be downloaded at\n" + " http://yann.lecun.com/exdb/mnist/\n" + "You should gunzip them after downloading.\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3]); + } + return 0; +} diff --git a/examples/triplet/create_mnist_triplet.sh b/examples/triplet/create_mnist_triplet.sh new file mode 100755 index 00000000000..f404f2aa255 --- /dev/null +++ b/examples/triplet/create_mnist_triplet.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env sh +# This script converts the mnist data into leveldb format. + +EXAMPLES=./build/examples/triplet +DATA=./data/mnist + +echo "Creating leveldb..." + +rm -rf ./examples/triplet/mnist_siamese_train_leveldb +rm -rf ./examples/triplet/mnist_siamese_test_leveldb + +$EXAMPLES/convert_mnist_triplet_data.bin \ + $DATA/train-images-idx3-ubyte \ + $DATA/train-labels-idx1-ubyte \ + ./examples/triplet/mnist_triplet_train_leveldb +$EXAMPLES/convert_mnist_triplet_data.bin \ + $DATA/t10k-images-idx3-ubyte \ + $DATA/t10k-labels-idx1-ubyte \ + ./examples/triplet/mnist_triplet_test_leveldb + +echo "Done." diff --git a/examples/triplet/mnist_siamese.ipynb b/examples/triplet/mnist_siamese.ipynb new file mode 100644 index 00000000000..8e076663ca6 --- /dev/null +++ b/examples/triplet/mnist_siamese.ipynb @@ -0,0 +1,154 @@ +{ + "metadata": { + "description": "Extracting features and plotting the Siamese network embedding.", + "example_name": "Siamese network embedding", + "include_in_docs": true, + "priority": 6, + "signature": "sha256:845bb18929f96543ba2611eb5eca744fd98939cbef876df6bc319c29f616fc64" + }, + "nbformat": 3, + "nbformat_minor": 0, + "worksheets": [ + { + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Setup\n", + "\n", + "Import Caffe and the usual modules." + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "%matplotlib inline\n", + "\n", + "# Make sure that caffe is on the python path:\n", + "caffe_root = '../../' # this file is expected to be in {caffe_root}/examples/siamese\n", + "import sys\n", + "sys.path.insert(0, caffe_root + 'python')\n", + "\n", + "import caffe" + ], + "language": "python", + "metadata": {}, + "outputs": [], + "prompt_number": 1 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load the trained net\n", + "\n", + "Load the model definition and weights and set to CPU mode TEST phase computation with input scaling." + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "MODEL_FILE = 'mnist_siamese.prototxt'\n", + "# decrease if you want to preview during training\n", + "PRETRAINED_FILE = 'mnist_siamese_iter_50000.caffemodel' \n", + "caffe.set_mode_cpu()\n", + "net = caffe.Net(MODEL_FILE, PRETRAINED_FILE, caffe.TEST)" + ], + "language": "python", + "metadata": {}, + "outputs": [], + "prompt_number": 2 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load some MNIST test data" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "TEST_DATA_FILE = '../../data/mnist/t10k-images-idx3-ubyte'\n", + "TEST_LABEL_FILE = '../../data/mnist/t10k-labels-idx1-ubyte'\n", + "n = 10000\n", + "\n", + "with open(TEST_DATA_FILE, 'rb') as f:\n", + " f.read(16) # skip the header\n", + " raw_data = np.fromstring(f.read(n * 28*28), dtype=np.uint8)\n", + "\n", + "with open(TEST_LABEL_FILE, 'rb') as f:\n", + " f.read(8) # skip the header\n", + " labels = np.fromstring(f.read(n), dtype=np.uint8)" + ], + "language": "python", + "metadata": {}, + "outputs": [], + "prompt_number": 3 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Generate the Siamese features" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "# reshape and preprocess\n", + "caffe_in = raw_data.reshape(n, 1, 28, 28) * 0.00390625 # manually scale data instead of using `caffe.io.Transformer`\n", + "out = net.forward_all(data=caffe_in)" + ], + "language": "python", + "metadata": {}, + "outputs": [], + "prompt_number": 4 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Visualize the learned Siamese embedding" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "feat = out['feat']\n", + "f = plt.figure(figsize=(16,9))\n", + "c = ['#ff0000', '#ffff00', '#00ff00', '#00ffff', '#0000ff', \n", + " '#ff00ff', '#990000', '#999900', '#009900', '#009999']\n", + "for i in range(10):\n", + " plt.plot(feat[labels==i,0].flatten(), feat[labels==i,1].flatten(), '.', c=c[i])\n", + "plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])\n", + "plt.grid()\n", + "plt.show()" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "metadata": {}, + "output_type": "display_data", + "png": "iVBORw0KGgoAAAANSUhEUgAAA54AAAIXCAYAAAD0R4FDAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXtwXOWZr/usvurWUktqGdmxaawEHEMuthGXITiIyMaJ\nwbEMFmCTDMkkoyqSyTnZMwdqpmYyzEyS2ruKue2ZqSTHO/vYGQbhCxdjwI637ViWMEEEMJhgB4MB\ngSRLsizJkiypuyX1+WP1Wlp971YvSd3y+1S5rF69Lt/6+lOrf/2+v/dVgsEggiAIgiAIgiAIgjBT\nWOZ6AIIgCIIgCIIgCML8RoSnIAiCIAiCIAiCMKOI8BQEQRAEQRAEQRBmFBGegiAIgiAIgiAIwowi\nwlMQBEEQBEEQBEGYUUR4CoIgCIIgCIIgCDNKRsJTUZQ8RVFaFUV5U1GUU4qi/HezBiYIgiAIgiAI\ngiDMD5RM+3gqilIQDAZHFEWxAS8B/08wGHzJlNEJgiAIgiAIgiAIOU/GqbbBYHAk9KMDsAJ9mZ5T\nEARBEARBEARBmD9kLDwVRbEoivIm0A0cDQaDpzIfliAIgiAIgiAIgjBfMCPiORkMBlcAi4EvK4pS\nk/GoBEEQBEEQBEEQhHmDzawTBYPBi4qivAhUA03adkVRMjORCoIgCIIgCIIgCFlNMBhUEj2fkfBU\nFMUDjAeDwQFFUfKBtcDfxxhEJpcRhDC+9a1vsWPHjrkehjCPkDUlmImsJ8FsZE0JZiNrSjAbRUmo\nOYHMI54LgV8pimJBTdt9PBgMHsnwnIIgCIIgCIIgCMI8IiPhGQwG3wZWmTQWQUiJq666aq6HIMwz\nZE0JZiLrSTAbWVOC2ciaEuaCjIsLCcJsU1NTM9dDEOYZsqYEM5H1JJiNrCnBbGRNCXOBCE9BEARB\nEARBEARhRjGtqq0gCIIgCIIgCIIQTSrFd3KF6RaOVWa64qyiKEGpaisIgiAIgiAIwuWKoijzotNH\nvPsIbU+oriXVVhAEQRAEQRAEQZhRRHgKOUdTU9NcD0GYZ8iaEsxE1pNgNrKmBLORNSXMBSI8BUEQ\nBEEQBEEQhBlFPJ6CIAiCIAiCIAgziHg8JeIpCIIgCIIgCIJwWdPX18emTZsoKiriqquu4sknnzT9\nGiI8hZxDfAmC2ciaEsxE1pNgNrKmBLORNSVE8v3vf5+8vDx6enp44okneOihhzh16pSp1xDhKQiC\nIAiCIAiCcJly6dIlnnnmGX784x9TUFDAl770JTZu3Mjjjz9u6nXE4ykIgiAIgiAIgjCDJPV4NjTA\nmTNQUACNjeB2p3eBDI4/ceIEt956K5cuXdK3/fM//zNNTU3s27cvpfsQj6cgCIIgCIIgCEK2c+YM\nHDsGBw6oInIWjx8eHqa4uDhsm8vlYmhoKP1xJECEp5BziC9BMBtZU4KZyHoSzEbWlGA2sqaykIIC\n9f/qati2bVaPLyoqYnBwMGzbxYsXcblc6Y8jASI8BUEQBEEQBEEQ5pLGRqivh0OH0k+zzfD4a665\nhvHxcd5//31921tvvcXnPve59MeRAPF4CoIgCIIgCIIgzCDZ3sdzy5YtKIrCL3/5S9544w3uuusu\nfvvb37J8+fKw/cTjKQiCIAiCIAiCIEyLn/3sZ4yOjrJgwQK+8Y1v8Itf/CJKdGaKCE8h5xBfgmA2\nsqYEM5H1JJiNrCnBbGRNCZGUlpby7LPPMjw8zEcffcT9999v+jVEeAqCIAiCIAiCIAgzing8BUEQ\nBEEQBEEQZpBs93iming8BUEQBEEQBEEQhKxFhKeQc4gvQTAbWVOCmch6EsxG1pRgNrKmhLlAhKcg\nCIIgCIIgCIIwo4jHUxAEQRAEQRAEYQYRj6dEPAVBEARBEARBEIQZRoSnkHOIL0EwG1lTgpnIehLM\nRtaUYDaypoS5QISnIAiCIAiCIAiCMKOIx1MQBEEQBEEQBGEGyWaP53/8x3+wY8cOfv/737Nlyxa2\nb98ed99MPJ62zIcqCIIgCIIgCIIg5CKf+tSn+NGPfsTBgwcZHR2dsetIqq2Qc4gvQTAbWVOCmch6\nEsxG1pRgNrKmBCObNm1i48aNlJeXz+h1RHgKgiAIgiAIgiDMKQ1ADbAeGJiD45nxVGDxeAqCIAiC\nIAiCIMwgyT2eNcCx0M/1wO40r5Dp8fCjH/2I9vZ28XgKgiAIgiBoNDc3MDBwBputgNraRpxO91wP\nSRAEIQMKQv9XA9vm4PiZj3hKqq2Qc4gvQTAbWVOCmch6mh0GBs7Q1XWM9vYDtLQ0zPVwZhRZU4LZ\nyJrKRhpRI5WHgOl8kZbp8WrUciaRiKcgCIIgCDmHzaZ+u+/xVLN69fS+3RcEQcge3EwnPdaM4ycm\nJggEAoyPjzMxMYHP58Nms2G1WjMYTzTi8RQEQRAEIefw+QZoaWlg9eptkmYrCELWk819PP/u7/6O\nf/iHf4ja9rd/+7dR+2bi8RThKQiCIAiCIAiCMINks/BMh0yEp3g8hZxDfAmC2ciaEswkcj01Nzew\nb18N+/evx+ebXon7TMmGMQjTR96jBLORNSXMBSI8BUEQBGEGyYYiONkwBkEQBOHyRlJtBUEQBGEG\n2b9/Pe3tB/B4qrnzzkNz4kfMhjEIgiBczkiqrQhPQRAEQZhRsqEITjaMQRAE4XJGhKek2go5iPgS\nBLORNSWYSeR6cjrdrFmze04FnxljEJ/o3CHvUYLZyJoS5gLp4ykIgiAIOUZzcwMDA2ew2QqorW2c\nFVGr+UQBWloaWLMmk35zU8zFvQiCIAizj6TaCoIgCEKOsW9fjS4Cq6rqTROBiUjHJ5qOmJyLexEE\nQZhtJNVWUm0FQRAEIeew2QoA8HiqWb16W9hzM5USW1vbSFVVfUrFidKpopvoXgRBEIT5gwhPIecQ\nX4JgNrKmBDOZjfWUSATOVOuUdHyi6YjJdATt5Yq8RwlmI2tKmAtEeAqCIAhCDmCMZAJxRWA2RBDz\n8ytwOj0pCclsKL4kCIJwOeP3+/nOd77DVVddRXFxMStXruTXv/616dcRj6cgCIJw2ZMLBW5S9UJm\nQ+sU8W0KgiCEk80ez5GRER577DG+/e1vc+WVV/Liiy+yZcsW3n77bbxeb9i+mXg8paqtIAiCcNkz\nUxVbzSTVSKYWQYyFWQI72XmyIeqqkQtfKgiCIMwlBQUFPProo/rjO++8k6VLl/LGG29ECc9MkFRb\nIecQX4JgNrKmBDOF0kytJzO8kGb5P5OdJ5t8mzPleZ1N5D1KMBtZU9lHAw3UUMN61jNA+oXhMj3e\nSHd3N2fOnOG6667L6DyRSMRTEARBuOyprW2c8/TUZLS2PsLISA9HjmxNO3KnRf36+98BUhPYiSKF\nyYR6oqhrJuza9VlGRrqwWOzcffdruFzJv4nPpuirIAhCPM5whmOomTcNNLCb9N5DMz1eIxAI8MAD\nD/Ctb32La665ZlrniId4PAVBEAQhy4gl+tLxTUYef/BgnX5sYeFiNm9+O6lwTXS9ufKRbt/uJhC4\nCKj38cADnyQ9Jhs8r4IgCMk8nutZzwEOUE01hziEm/TerzI9HmBycpKtW7cyPDzMc889h9VqTfk+\nxOMpCIIgCFlMvKhiLM9prMhdqscbj001/TVRpHCmIprJsFjsAFitBXz96y/F3U+bl6GhsxQWenE4\nimdriIIgCNOikUYaaGAb26YlGjM9PhgM8p3vfIfz58+zf//+mKIzU8TjKeQc4ksQzEbWlADh7Up8\nvun7Y9JZT/H8h4ODZ0M/WRkd7cHnG4jpm4x3fKRonI7nMpt8mhp33/0ahYWLuffeUwnTbLV5uXSp\nnZ6e4znt7wR5jxLMR9ZU9uHGzW52T0s0mnH8Qw89xB/+8Af27duH0+mc1jmSIcJTEARBuGxIJC7n\nogiNJjDt9mJuuukxffvkpD/00wTnzh2jpaUhZr/LeFHJSNE4nV6Z2dhf0+Xy8sADnyT1dmrzYreX\nAOLvFARBSERbWxvbtm3jrbfeorKyEpfLhcvl4sknnzT1OuLxFARBEC4bEvkW9+9fT3v7gbTSUdNF\nSwEdHDxLMBhgdLQXmIgaz44dZfj9/YAqnrZu/SjmeDL1L2baaiRbW5Vo83LTTY/R2vqw+DsFQZhz\nsrmPZzpk4vEU4SkIgiBcNiQSl7NRhMYofI1YLA48nhtwOIqprW3k0KF6OjsPY7eXsGTJ1xgZOYfN\nVsDg4HuMjp5PqaprKqIwlhBvbm6gre15JiZ8eDzXs3btnrjzkU7BI0EQhMsZEZ6SaivkIOJLEMxG\n1tTlQyLfolmppYnW01QK6FSxG4fDTVnZyjAv4tq1e6iqqmfr1o84d65JTwEeHPyIQOAiPl8vu3Zd\nE9eP2tzcwNmzu5OmDsdK1R0YOMPoaBd+fz+dnYdpaWngf/0vB9u2KWzbZuHcuZcSHp8uZnlr5zPy\nHiWYjawpYS4Q4SkIgiBcNqQjLmdCEGnCd/Pmk3i9G/F669iy5UPy8soAVcBZrfns3r2c9vbDHDpU\nz8TEmOEM4/pPk5P+uKJyYOCM3nYELHz88UF+9asKhobawu7twoW3cDrLcTiifaMAZWUrWL16G8Fg\nILQlyPPP36afIxAYJD+/krVrn5q2YJ8Lb60gCIIw+0iqrSAIgpDVzJWPcDbTSI8efZCPP96Px7OC\nCxdOMjbWoz+nKHaD8FOxWBxMTvqx20vYvPktPeVWm6uenleYnPShKDYsljwmJoaBqd6XsVJ+tXv0\n+QZoavo2EKSmZgdOp5tt2yyA+rd8w4YWFi681bT5mQ1vrSAIwlwjqbbSx1MQBEHIcmL1tJwNUk0j\nzUQYa8f29Z3E7++no+MwimIP20cVnQqa8AO1yTdAIHCRZ56ppqLiBmprG8PmSj12XK+Qa7UWYLE4\n2L7dzfj4SNg1HI5SrNZ89u2riXkfGzY08/zzt7FhwzEWLrw1rflJRm1t44x7awVBEIS5RyKeQs7R\n1NRETU3NXA9DmEfImspuzI6IpSoUUy02ZIz8uVxL+eCDYlauXER+vprammpxH1BFnN1eyLlz0QWI\nNCyWPK644ibOnTuGzVbE+LgazayqqsfvH6a9/YC+r93uprj40/T3/55Nm15l374vG1JwVRTFxt13\nv87x4/+XPpaioqUUFV2ZcOyx5idbq9zmOvIeJZiNrKnZRyKe4vEUBEEQspxEBYESEc+jmaqnMFU/\nqDHyV1CwiL6+t2hvP8DHHx9IubhPWdkKvN46Skuvpb//dNR+ZWVf4FOfWktBwSKuuqqOyclx8vMr\nqai4PnSeIsbG+lm9+udhxwUCA1y48DqTkz5eeGGNIdJpwWp1AWpU9MUX12G1qpFWRbExPNyWdOxO\npxuHw83Bg3X6HItfUxAEQYiHRDwFQRCEeYkxmpifX8m9957G6XSbHkE1Rv6OHNmqn9vhcNPZeRib\nrYgrrriZNWvUtiS7dn2WkZEuLBY7d911hBMnfqJHDR9/fCGjo11h53c4SrHZ8pmcHGdyMkAgMGzw\nfNqwWCx6Oq3XW0db296oMRojo/HwejfS1fUyPt/5sGtv2fJBVERzaOgsExMBxsbC+5BqEdd05lai\npIIgXA5IxFOEpyAIgjBP0QSmhrF4jlmeQqNoys+v4OLFs1y48DplZV/Ebndx4cIJfL4LAOTlVeJw\nuBgcfE8/vrBwMUuWfE0/R0/Pb/H7p19B1+vdSFvb88Ckvq2gYBFu97V0dh4O29fpLMfvv0gwOE5Z\n2Qo2bDiqC2dQ27zcc8+bnDjxU318vb2vhxU+0tAEKpD23EovUEEQLgdEeIrwFHIQ8SUIZiNran7i\n8w2we/dyRke7oiJwZkXZjKLJ6fTg8/Xy7ruwbJn6fH5+ZVgEU1FsBINTLVEWLryNiYlxenqOT/Mu\nw1Er4E6iRSGt1gIqK2/hy1/+Jbt3f5aJiTHsdheVlas5f/41XUTabIWhCrg2PJ5V2GyF9Pf/ntHR\n84yPXzKMObzIkXbNpUvvYWTk3LTmczaq2uZ6VFXeowSzkTU1+2S78PzGN77BkSNHuHTpEh6Ph+98\n5zv89V//ddR+4vEUBEEQhAicTjf33ns6pj/ULC+i0d/p8awIe87hKGXhwhq9Sm1Z2QocjpKwfc6d\nO0ZPz8sJrpDen2k1BVcVnYpiZ2JihI6Ow7S2Poz2OSEQGAJgcnKqRcv4+BiBwEV8vgt0dBzmllv+\nldHR8wQCF8OEcqTotNuLuf/+9xgZOTft+dQ8vKWl14b5Rc1EvKeCIAiJ+au/+is+/PBDBgcHOXDg\nAP/+7//Or3/9a1OvIcJTyDnkGzrBbGRNzV/iFQjKtBWIVrgoGAxQVOTFanUCasXZZcvUyOaGDU2M\njJzT/ZiXLn1CWdnnKChYRH5+ZehMViLF3BQKxpRZu70ktC05ijLVLc1ud3HTTY8xOTmmb+vtfQOP\nRy1MVFa2ImJ+guzbdysWiyqYrdYCnE4PAOXlK1myZD1LlqynqMhLWdnnaWl5SN83cj7jFXgyor1G\nQ0PJCxpNF7Nav8wV8h4lmI2sKSGS6667jry8PP2xzWZjwYIFpl5DhKcgCIJw2THdSrkaWgSto+Mw\ngcAluruP09FxGLu9EFArxf6f/1NHX99J/Rif7wLnzh2jouIGios/Hdo6keAqU4JUUewsWfI1Uv2z\nHQyO64I3EBjihRduD3ve41nF2rV7qKqqZ8OGo9x99+toolZRrIyP+5icDKAoDurqfsv9979HUdFS\nrNYC+vpO4vNdxO8fpLv7OO3tB7DbizKOLM+kOMz09RYEQZhpGoAaYD0wnZyPTI8H+N73vkdhYSHX\nXXcdf/M3f8OqVaumeabYiMdTyDnElyCYjawpIV2MvkSn001Hx2G9ku3Ro4f50peqsVqddHdHezeX\nLFnPhQsnGRlpT+laDoebxYu/yocfPm2oZps6dnsJZWWf08eiKHYqKm7k/PlXCAaDKIqVu+9+DYej\nhH37bqWg4FOcP9+qH+90VlBRUY3fPxjTi5rIm5mOfzPdok/NzQ20tT3PxIQPj+d61q7dM29FpbxH\nCWYja2r2SebxrAG0Ds71QLpl1jI9XiMYDHLs2DE2b97M/v37ufHGG8Oez8TjaUv0pCAIgiBc7kRW\nrh0aasNqteP1bqSmZgcwVckV4N1367jzzr0cObI15vk6OtQWK6mgKDbKy79IZ+dvpiU6QSEYnKSn\n51VA9Z0WF18TJiCDwUmefnolDz54gQce+IT9+9eHHe/znae9/QB5eZVEUlCwKGHRptraRp55ZhVW\nq5MjR7YmLOyjpdymysDAGb1wU2fnYVpaGnA43DldREgQhMuXgtD/1cB0cj4yPV5DURRqamqor6/n\nySefjBKemSART0EQBEEwoImnwcGzuFxe+vtP4ff3A1rVWFUAxmr9YRReq1f/nGefvZmxsa6oa4Rj\nBSawWBx6P04zyMtbwPj4GOPjg/p1Fiy4md7e15ic9EWPwppPefkqbLZ8rFYH3d2t+P1qKxiHo5R7\n7jnBM8/coPf5dDjcbNnyYZi4i9UaxbitqGgpRUVXRgnDVKrORu5jbP2itYM5eLBOWrMIgpCVJIt4\nDqCmy24DpvOVWabHR/Ld736XyspKfvKTn4Rtl6q2giAIgmASmi9xZKSd7u7juugEdNEZz4do9DS2\ntj7Mffed1gvzxGPDhiZcrqVYrXkJ90uXsbEeg+i0UF6+gp6e4zFFJ8DExCg9Pcfp7DxMd/crKIr6\n+cFicVBScg0tLQ9RXv5FQBOib6ZUtMm4raBgUUzPZype0Mh9amsb8Xrr8Ho3smHDUZxOd84XERLm\nK2a474T5jhs1PXa6ojGT48+fP8/OnTu5dOkSExMTHDx4kD179rBx48ZpjiY2IjyFnKOpqWmuhyDM\nM2RNXd5EVl7VxIvdXhy1b1nZCrzeurh+RZutgHffnRI+TqebioobEl7/nXf+jcLCKwkEBhPulxmT\n9PefSnlvv78Pn68Xi8VJeflKzp9v1YsIuVxLKS29lpaWh6Iq1cYq4mPc5nCocxopDCMFo/aaPPHE\nEvbuvZX9+9dHVc51Ot2sW/cs69btjXmt+ZRmK+9Ruc4ZVPfdAVQROvfImhKMKIrCL37xCxYvXkx5\neTk/+tGPePzxx7nhhsR/v9JFPJ6CIAhCzpFKamaqaJE0QI+ktbQ0cNNNj7F793ImJkax211UVq7m\nK195Iupakem1p08/SGmpl4MH6/Rte/fezOhoFzZbEePjw/qximJncPBjhobOTnv8qRLej1NDTfMF\nsNkKGR+/hN3uMvT69DE01AaA0+lhdPQ8Pt8AQ0MfAvDMM6soLAxPnXU43Pq9a55YTVhqcxtZQChy\nu/E1uXRJLcLk9dZRVLQUiyW+XzRdn6ggzA5mue8EYWbweDyz8mVERh5PRVGWAP8JLECt+74tGAz+\nW8Q+4vEUBEEQTCWWl3C6JKq8unfvrXohHqfTQ0XFDdTWNtLa+oguNgOBQb1irOZh7Os7qafoVlXV\nY7Xm8/HHBwgEBuOmuloszrjPzSRWax6LF3+VW275V1pbH+ammx4LE8oVFdczOPghPt+AIXUXbLYi\nJiZ8evqx11vHunXPhr02TmeF7gnNy6vkvvtOh81dvC8NtNfEbi8hELiovzbi4RRyE7Pdd0Iukszj\nmSvMZVXbAPDfgsHgm4qiFAGvK4pyKBgMns7wvIIgCIIQFzO9fPn5FTidnpgCSEsNtdmK8Pl6dX/h\nyEiPLoDy8yv1sVgsTn07qGJ1eLiTgYFTYV7RWMy86JyKbhqpqLiJmprtYdHCe+89zeOPVzI+Psy5\nc8dwOsvDRCcQFrkF9MJIxtfG4XDT2XkYgLGxrqi5i1eJ1hh1bm19GKs1n927lzM21guoKc/i4RRy\nB819l000AM8DPuB6YA8iioWZJiOPZzAY7AoGg2+Gfh4GTgOLzBiYIMRDfAmC2ciayj3M9PINDbXh\n8/XS0XE4qrCNdp0rrrgZmBK6mrjS2qIoioOLF9/j4sU/8O67YLUWkpe3AL9/iJ6e44aquOr3vQ6H\nO2nRIQ2bzWV4pKAKyPQxFt8x0tV1TL9vzVt55MhWrNb8qasqqV/T+NqsXbtHb8MSOXfaY2PRoJ07\nr9HbuaxZsxuXy8uaNbsZGmpjdLRLTxd2ua6aVx7OZMh7lGA2TU2vAl1AP3CYbPGeCvMb0zyeiqJc\nBawEWhPvKQiCIAiZYaaXL1H0VLuOzzcQ5kHMz69AUWyMjw/rkb9AQI34KYqViYlLTExcirpWMDiO\nzVaA232d7pNMRFnZ9fT1nTCegVhRy1Tw+S7E3K4oNnp732T7dneowJGaQuVweEJjWMG6dXtpbX2Y\njo7f4PNdwGrNZ9Gi22lvP6Sn2p4//zt8voGo1+a++06HzV2kn9Mo4rWeoTt3XkNFRbUeATV6YMvK\nvkBNzfZpzYEgCBpOw88rEe+pMBuY0sczlGbbBPwkGAzujXgu+OCDD3LVVVcB4Ha7WbFiBTU1NcDU\nt3jyWB7LY3ksj+XxXDz+oz9aQUtLA5OTf4zDURT1vMXSyMDAGVpb3yE/v5JVq5YQCAzS3Kz6Opct\nA4B331X//+IXFzA21kNX1zWMjHRSVTWsP2+x5HH11T4gqO8febz2+IMPigkEBuM+b9bjlSsXMTLS\nzbvvToQ9399/EzZbHn/2Z2rV2KamJkZGuujvf5ivf/0lXn/9Q158cS1XXz2un2/Rotv4i79omtb8\n/+53Z+ntfYNlyyzAZNj59u69lZYWdb7vuGMj69btzZr1I4/ny+O7gHZqahYBjTQ1vZll44v1+B+p\nqRkGCmhq+h4Q/f4V//ELwP+gpqYC2J4j95vbj2+//fZ54/E8evQob775JgMDamXzjz76iF/96ldJ\nPZ4ZC09FUezAC8CBYDD4rzGel+JCgiAIQk7S3NzA2bO7CQQuhm3Pz69kdLQLh6OU0tJr9eJCpaWf\nZ3z8UiiaacHjqaav74Tuf0wVq1Ut6mP0i84csb2fDkcpixevY2TkXNxCQGqU9GJofzdbtnw47RRY\nn2+AnTuv0YsRORylbNnyAU6nO2EBqHQwsxqyMF9oQG13chI17RSgnuzzZMaiBrVNC+TOmC9fpLhQ\nhh5PRe0u/b+BU7FEpyDMBNq3SIJgFrKmhHgMDJzRhZXmz/R4qqmre4Wqqnq2bPmAr371BbzeOgoL\nr2R0tIfXXvsALSW2t7cVu10VN1N9QRP+XQZgYmKYrq7mNEdr0ceYHrFTd/3+fj74YLfuv2xq+nbU\nPgsWqD3eHA4399zzZlwhF9krNRZq2q2W/qdQXFzFkSNb8fkGTPP0Gv2kkX7ebEbeo2YSrcemJjpz\nqeXJ9Nu0TG9NNaCK3fWolXoFIT0yEp7Al4BvALcrinIi9O+rJoxLEARBEOYEo0gaHHwPUEXnXXcd\n1cWPVvTG6XTjdLpZt+5ZJif9jI11R51vwYJqqqrq2bz5JFVV9TgcpWHPxxeL6XwzbmHLlg/Iz1+Q\nxjGxiBTFxsfh42lubiAQGAWsTE5O8Mwz1XrPz0hSFXyFhV79Wr29r+v7a77RTCOUZlZDFuYLmnhb\nAdQBh8id6q6NqJHO2RqzJtIPIMWIhOlgiscz4QUk1VYQBEGYYcxMoQzvQ1luKMqjsHDhl7njjr0x\nz//LXzqjUmq1VNzh4TYKC704HMV0d78clbprJP1+nqrodLm8YX1H06WsbAWjo12MjnZFPWe3u9i8\n+W1OnPgpbW3P4/P1MTk5QWS0tLBwMQ888EnU8ammyk7171T9rZmm1kYSWSRKEOa+x6aW6luAKiTN\nHoOZ51+PKjqryS2Bnh1Iqm3mEU9BEARBmBOMkcm+vlOmpVAao2Iez0rDM0HOnTsW9/zGViWKYiMv\nbwElJdfQ3X2cS5fa6ek5Tnv7AaxWZ8zjVSw4naWk2jJFUWxcdVUdR48+yP7967HZ8pMfFPGn32Yr\nZMmS9ZSXf0Hvk2l8DiAQGKK19WEGBs4wOtoVEtjhotNqLeDrX38p5hXVXqkVOByJP6hqKbVadDhS\ndKaSspsIsyKnwnxC67E5V2vCGEW8mvTTWJOlv5oZpZztCKswF7z33nvk5eXxzW9+0/Rzi/AUcg7x\nughmI2u1e9m8AAAgAElEQVQqNzGmbw4Oqu02pptCaRQ0q1f/XBc9a9bsQVEc+n6lpZ9n9eptMQVQ\nRUU1AO+/n0cwOM7YWA+9vW8AasQQ1JYhbvdyLJZ44nMyFHFM1jJFFbb33/8+Y2MX9Hm4cOFE2Hhj\noSjhf/rHxy/R0XGE999v1Ptkqvs59HtSW530Y7Xaw46120vYsKGFwsLF3HvvKVwuL7FQe6Wep7Mz\nvFdq5DxqwtCYymwkVz2amZJb71HiA0wP7QurIqCX9AWiUViuInruY/tAp7em5lqkC7PB97//fW68\n8UbUUj7mYlofT0EQBEGYTYyRybVrn6K19eGUUygjU3M1QQPQ2vpwWB/KpUvv5sMPn8HhKOarX30e\np9Mdtn9LSwMOh5tAYJS8vEqCwT79WK3HpcXixOFw4PerIlEtCK+hkJ6fE0AVti+//H9H9MGM3avT\niCoup65ptRYwMTESYz8/Docbp9ODz9dLR8dhFMXOpz61FovFjsVip6ZmB06nmwce+CRhunM8b2Xk\nPCbrzTpdj6ZUs51NNCEEqoCajUqrM52uaiaRY20MbesHDqMKxHxUAXkW8ALFxL8vTVh6gAuA1h94\nOXDacP65SiUWcomdO3dSWlrKtddey/vvv2/6+SXiKeQcWk8kQTCLy3VNZZq2ONcYK52eOPFTRkZ6\n9Cqoye4tMnKWSNCMjJwjGPTj8/XS2vowEC2ABgbO0NNznLGxLq65ZjLqej5fLxbLlNjUBGnoERZL\n4ihlOMY/3Qr5+RVYLE7Gx0dTOtpmK+aee97EYsnH4SiLm57rcJRSU7ODioobwsbd3/8OX/vai+Tn\nL+DgwTp9jo1zunPnNWFzH68qbbpCcrrVbXM9Uppb71HTr7Q6fXKp6M3zTI3128AjQE/oOa24UVto\nn3bgOInvS0t/tQCDhu1doWNiRymj15REqueahgaoqYH162FgGi9BpscPDg7y6KOP8i//8i8z5kUV\n4SkIwmVBQ3MzNfv2sX7/fgZ86RRumb9k84fxVNtvaOmYkfeiPf7v7eXcuPP/jXrdjYLHas3H7x/E\nas1DUay6eI21ryaOIgWQcR+HowQARbGiJRaVl69k06ZXyM+vDJ3VmMJkxWo1ir/o9Can04PDUUpe\n3gKuuOKPQtvK6e5+mfffbwwVI0qWnhu6mtVJUdGVLFhwI35/X8woqcNRyj33nMDpdFNb2xj2XHn5\nCiC+eFcjr+fD1lWkt1J7fScnA3i9dSkLyel6NKWa7WwyFz7AuRC706EBVRBq+JkSzYcBO+qcafej\nfVlVAjwWca7PAg6gAlW49kc8n+5c5JJ4n5+cOQPHjsGBA6qInO3jf/SjH/Hd736XRYsWzUiaLYjw\nFHKQ3PK6CNnCmYEBjnV1caC9nYaWlrDnLtc1lc0fxtMVxUbRMzY25UXss32ak75S/XXXBE8wGMDr\n3ciddx5iaKiNnp7jTEyMcf58a9Q1a2sbcbmWYrU6dVEaKYCMQrSi4j8oLFxMeXk1oHomh4ba2Lfv\n1lC7kMjU2gm9yq0qQKMLC/l8vfj9/YyN9dDd/dvQtgHGxnrCfJnxcDrLDec6T1PTt8KKIRlRFDsV\nFdfrAtrpdLNw4W2A6nHNy/Owb18N/f3vAFPrR5uDBQtu1rdbrfkxv0DQXt/OzsNYrfYZT301qw/o\nXJFb71Fz4QPMlaI3ZyIe24ktmrX7WRV6fBG4nfCIZBcQQH2POUb4e8oiEs9FA01NK0jFCyrMHgWh\nl6C6GrZN4yXI5Pg333yTI0eO8MMf/hBAIp6CIAiZUGBTI0/VHg/bVq+e49FkB9n8YTxSFCeLgKq+\nvQrGx4fp7DyMzVZIVVU9i69QP7hpr7smeDo6DmO1OsKilXZ7cdg1NZxON+Pjo3R3q1Vpn3zy01Hj\nMArRgoJKHnjgE/LyykL3UoTf38elS+309rYS6ee0Wl36ddUiRPGFpM1WBGipvKlFODdsaGFiIhCx\nVdFf/8g+osFggI6O8CJAd9yxl6qqejyelXz00XN0dR3D5+ulsHCxvn60OVi7do++roaG2vQvEHbv\nXq7P2Wx/6SHVbLMZM1I8s7nojfH+jN7uzwE7mBKZF4Ey1C+mqlAjnGWhfatRxaQxImk81xeAL4V+\nXgGsQU3bjTWnDaHrvhU617LQPrki3ucvjY1QXw+HDoF7Gi9BJscfO3aMjz76iCuvvJKFCxfyT//0\nTzz99NNUV1enP5AESB9PQRBynobmZs4MDFBgs9FYW4vbGV0xdMDno6GlhW2rV8d8XsguIvstGntr\nVlXVxyxCE6tXZOTrHmsf7Vo33fRYVIEirShNd/fxqMhiVVU9Doc7btEa7bw+X79emEf1dlpRRaON\ngoIrmJjw4/cPUFl5C11dL0f4P9V9S0quxe2+hkBAFdbxsNmKGB8fDtvmci1laOjDsG2LFq1h7do9\nUXOrEa9/ZuS+DkcZ99zzBi6XV5+roaGzFBZ6GR5uY3x8GL9/6oOv9tql0k9TCgLlItMp8lPDVDGi\nemanGNFsUsPU/S1AFZG/B5agFg2qQPV0vkT4l0mLgHdQo56LgNcAX+iYk6F9bkEVmk+EHmtFhOoM\n16xELTKkvRbG8WjMx3nPPrK5j+fo6ChDQ0OAGu38x3/8Rz766CN+8YtfUF5eHrZvJn08RXgKgpDz\n1Ozbx7Eu1TdTX1XF7jVr5nhEgtnEEoyRpCJmUtnHSKTQsttdBAJD+jgOHqxLKIibmxvo6zvF4OBZ\nCgs/xYULrwNgsThYuPDLnDt3nMnJ5EWBvN6NrFu3F59vgJ07r8bn68VudxMIDGH8sLpkyXo++WR/\nxNFWYkVHi4qupKhoKRaLnXPnjhEMBrBa81m06Ha+8pUnosS3zVbA5GQgSvhaLE6++c2usLmIRaLX\nLhapfNkQOb54AtW4T35+BUNDbSJoZ4Qa0heR61Ejb9XkVrQtmcjWnn8FVTBqeFCLAPlDj50Rz2tY\nUFusjBCdBVEJ3IEqWGNdX5tTDeNrEfncCuBojPELZpPNwjOSv//7v+fs2bP853/+Z9RzmQhPSbUV\nco7c8roIs0GmabSyprKfVNKCW1sfCatsG4t0Uy61lNCyshV4vXVs3vx23KJCWsqocT0Zq90ODJwK\nbbUyOemno+NwSqLTYrHT3/8O27e72bnzaiorvxxKK76EUVDa7S5uvfVnRBcnMorOqeeGhz/WfZYO\nRwlWawF2exHd3b/l8OF6fQ6Nflu7vYiqqvowz+jkpI9du5brVXvt9pLQ/2rqcnn5St1Pm2zejSnV\n2vmSpeOm4gc27vPxxweytqhWPHLnPWo6PsFcTfE0FuOpRE2LXctUaqtWvdYoKq2ovTr9hm2xfz/V\nlPpBYqfedwFPEr8YUGNoTBD9WjQCG2lqugnYiIhOIRaPPvpoTNGZKSI8BUHIeRpra6mvquLQnXdK\nGu08JRXBmG5Boli+0chtmuAtL/8CPl8/LS0PhUVLkwliTZg6nR6Dl3IixjYj4cWFNm16jdHR8wQC\nF/H5emlrexaf73xESi4EAkO0tj4c8oFGoyhqlDUWPl8vExOjjI2dx+/vD/N4GsV1Tc121qzZzd13\nv47FMvW7NjbWpYvSzZvfCv1/kqqqeu666zesW7c3JbEfKXIjizrFIhW/qHGf8vIvJt1fmC7TEZHZ\n7M+MJJZf04YqLrU+nNp7T6woZnSrpaltVuBOwr2bidB+/2NVvHUDrtDYPkT1jxqf2wv8D9TU30Re\n0Jo4zwnC9JBUW0EQBGFekEo6LkylXfb1ncTvV1sQaKmc8dI7jdudzgoqKqqTpmka02xdLi/nz7cC\n4HCoVWLHxnrp7j6u719evpKioisZH79ER8dhLBY7mza9Rnn5F/jVryrw+XrDzq+l/Uam/2qpuEYs\nlnzuu+80DkcJO3deg893Puz5SG+oxeLA47kBh6OY1at/zvPP305BwSIcjmL9vn2+AXbtWs7YWJd+\n7dbWRzLyZUa+hslSmSH9FGsgrXRr4XLls6iRxTHUdNQy4HWmem6WoqbJjjDVP9OKWn12D6oAj+/H\nVlkGdAJDMZ6zMyUujdiIjoIuCe3rA64PXf8qpgTnYuCTGOeqIX5qdKLnhOmQS6m2iRCPpyAIOUMq\nhYCE7CJXiryk6t+M9G0aRdMHHzyF399PeflKyso+r3sBNW+jUaAl8h1GXic/v5LR0S69P6bL5dVF\nliY4a2p2hBU7Mt7H0FAbu3YtY3LSp+9/yy3/kxdeuJ28vAUMDbWxadMruFxehobaeO65W1AUK4HA\nCH5/H1dccQvFxZ9maKiN/v538Pl6sVjs3Hnnb3jnnX9jbKxf927a7SWUlHw2VIFXjcwGgxNRIj3W\nnCfzZSZbS5HnS/XLhFjkyroVspEG4P8jtcrRsTzUTtRIZjD0L955FFRBG91LN1pg5qOmMmuCNNYx\nGvWoKbS9ofFVAx2AF7U4keYJTeSvzVXvbfYiwlNSbYUcJHe8LkIsEvXTnCuycU01NDdTs28f6/fv\nZ8AXK2Vr9kg3hXWuSNW/Genb1CvgDpzRxVVR0ZVhrUD6+k7i9W7Ue1Rq/UJjpX9q68mY3llX9wpV\nVfVs2fIBLpcXmErTjUxFjXUfJ078FIejBEWx43AU43CUcPTog/h8A5w/38rYWBetrQ8D4HJ5+cY3\nOnC5qvD7LwBBuruP09b2ot4GxWJxct9977Fw4a16CxSvdyNebx1bt36kt4IBi95DVLuXyFYzxrEm\nS3uNtZaM6c1A3P6o6QrHXFm3qZCN71HzD2Nq6SkSi04tHd5D7PRZH1M9NhOdJxjneIiOavpRxWYX\niUXnClRP52uokc5qoBVoB46jeULVNZUoNTpXvbdCNiPCUxCEWUX6aaZGNgn02e65ONNoYmbDhqOs\nW/dsTNFUU7NDfwwwNtaD1eoItSCZ6heaSNDU1jbici3l0qVPePrplfh8/WHPp1PoaGDgDGNjPQSD\nAc6dO8b77z9JV9exuIJQTfM9GXYOi2XKOzo56dOFqjaWdev2sm7ds7S2PkIgMIii2NE+FCuKjSVL\n1idNYfb7B8nLq2Tt2qcSel6N400kEDPpvznf1q0w0xiLBZ1Nsm9F6N9FIvvypk9/jG2xgkbJoq/F\nqKL5C6i+zYeAt5nqBVoS+t9YbCiRvzaXvLdCriCptoIgzCrSTzM11u/fz4H2dqo9njkvmpRuC5Jc\nITIVE6a8f62tj9DXd4re3t8xOekPS/VMJ/0zMq03WXpuvPRQ7ZqRlJWtwO/vZ3x8lMnJABUV11NQ\nsIiPPnqOQGCqoEh5+UruuONZdu1azuTkKIpix+NZhdNZFtVeJF5bFK2lS7wxx/LMRhJrLaXrzU01\ndXa+rlshVRK1O9GeO8tU+mkA1ZPpCe0T7pMOJ57/ci7ZiFo0qIZwb+Y21Pt9DHg49Fh+H+YCSbUV\n4SkIgpCViECfeeL5EZubGzh7drcu3AoLF7N589u6eElH0BgFY3n5Su666zcpC9W8vEruu++07vts\navo23d0vMzbWE+YLjRSKTqdHLy5kt5ewaNHt1NRsp7X1Ec6e3UUgMBh2TaezQi825HRWAMGo4kQA\nXm8d69Y9m3DMkH6/zul4c5MJeCPi9cxmkvXCzIQawgWY23CtQdS0UyN1qIKyM8ZzELuoT7bgQo1u\n/hR4CjWKWgTcjFpoSNZ8NiDCU1JthRxEvC5CLDLxRJq5pszyZrqdTnavWSOic4YwpqKWla0IS8Uc\nGDiji06HozRMdELy9E9tPWmpp07nApYsWZ9UdAIR6b1d7Nz5Gd37uG7ds9x337u4XEux2QqYmPBH\nHVNevhKPZ4Vh7G/p6cTqfamiU2vjYrMVhQlRn++87gEFtXKudt6amu0Jx2zs19na+khUq5p4pOvN\nTTd1dj54Pefv3z1jeqtZr43m1Xwn9FhLLTVe69XQcy7DPsWoFWtfi3FOK9kpOrXWK0Oo0cwzTKXu\nDhPe3iWc+bumhGxGhKcgCPOCbPFEZss4hMQYCwmNjHSGPacJHK0C7XQjZAMDZ+jpOY7P14PdXpjS\neWprG0PeShWf7wLt7QfYufMaXYAWFl5Jd/dxfXswGGDJkvV4vXXcdddvWLNmj17IaP/+dWzf7uZX\nv6pACX0P7XCUcvfdr+N0ehgfH2ZyMvwLEoejlPvuezfUi/NtvQBSvPFrntmyss/j8w1w5MhW+vtP\nZdxTNd510i00NJ+8nqnMU26hfWli9B1miiYwe1GL62jFcbRrFTGVJrsaNRp6LfBc6LhYXximUt12\nJlmAGnE14gZuC/2szZ92j8UR2wUhO5BUW0EQ5gXZ4onMlnEIiYn0TBYVLaWo6EpstgJWr/45ra0P\nZ+wNnG4rkBdfXEtHx2G9P2dkCxe/f5j29gMptXbZvt2tR28VxU5eXjl1da+EtXMxoig27r//fb3y\nbjoYU2Gt1nwmJkax24vZvPlk0vNNN402FeaT13Mm52luGECNyJnpO4zXBkS7Vj9qJND4fA1TabnZ\nSDmq8OwOPbYCbwBXEj5/2j2KnzMbyfZU25qaGlpbW7GFikAuXryY06dPR+0nHk9BEC57ssUTmS3j\nEBLj8w2wa9dyxsa68HiqsVic9PSovi6zPtD7fAM888wqCgoWYbcXR/kLY3kPm5sb6O8/RW/vG5SU\nXMvISAelpcs4d+6YLmBBLYKk9d50Oj1YLFYmJvxUVFzPmjV7aG19hIGBM3R3v0wwGECtkhkMuz/j\nHIDqB928+a24IjFyvNo1tMdHjmzVhbaiWDl/vjXl+cykX+flhMyTRiJvaDIxG/l8A1O+yGxm6ndY\nZSmq8JwJf6wwE2S78Lz99tv55je/yZ/8yZ8k3E88nsJlhfgShFhk4onMZE1FejrFm5kbOJ1u7rvv\ntJ666XCoqWlmpGNq68npdIelxUamnMbyHqpi8TgTE6P09b3O2FgXfX2/Jz+/kuLiz3DwYB1Hjmxl\n9eptrF2rptS63csYHe3G7++no0Nt8aKdOxgMYLXmsXDhl6PuT5sDr3cjRUVeyso+R0vLQ3FTOCPH\nG/k4PBW2LK35zKRfpxGzUlGzqY8uTK0ps+YpOzH20Uz22kV6Q43HQuI2IMY2IQ2hn7NddFoJF502\n1F6eifyxiedTPksJsZhpYRyZMC4IgnDZ8Y9vvcXfDQ5SYLPRWFsbJRobmps5MzAQ8/nn29roGh0F\n4NtNTTy7bt2sjj1byYVKolpRG1A/0E8nHVO7z8HBs7hcXuz2Ymy27+nPJ/IXDg6qvQLt9mJuuumx\niN6bFmASq7UQn09tFt/RcUSvPtvS0oDD4ebcuRbGxqYq0JaXr2T16m0cObJVv64xShp5f1r/TmMK\nZ0tLQ8wIZeS97Nnz+bDxZzKfxmMj5zadNaSJ4UT3kQqaVxugoaWF3WvWTOs8ZhNrnsxhJqvLpoom\nJrXxJLrPSG9oXZJjY7VPaQSeR+3Fma0UovbfXAK0GrYXMSUmS4nt40xnPoWsINNfQxN+jf/qr/6K\nv/zLv2TZsmX89Kc/5bbbbkt+UBpIqq0gCFlJPLGXSATGe17bdnZwEK/LRbHdHnZszb59+ofM+qoq\ndq9ZE3aewUCA492qt6YyP5/T996rH1u2Ywf9frW66JVFRfgnJvBNTHC9x8OetWsv28inUcg4nRVU\nVFRnrQBNF6Mg+tfea/loLIgDP9/lf1PAaFhqqeYvtFrzw3plOp1unnvuVrq7p9J7R0Z6ovpn5uUt\nYGysB4+nGofDTWfnYTyeakpLr43q1VlQsIj6+nf09iuJhF+kqDOmyRqjacb9Ir2vkeM3QxAZr+f3\nD6ad/qylojqdHkpKluFwRKc4p8L892pHfkI1Crd65kakxPNmQvR4tW1aumyyY3cQ3XfTgxo1zObP\nqG7gj4C3UNu8aCxArcBbCpxAFdORaHPiAZYxJbZz/z04V0maaltDZr+GGR7/6quvct111+FwOHjy\nySf5sz/7M958802qqqrC9pNUW0EQ5h3xqsMat6965pmodLhYx2nb2kdGON7dHXXOgpCRvtrjYdvq\n1VHnOTs41W6ia3Q07NjrPWqz8UKrlUG/n67RUfr9fg53dl7WVW216JjNVoTPd35GWllkklaZybHG\nFNOPfXbeYxnv8Hn+i29ERTa1CNXQUFtUWq3dHp7eq82Z3V6ib9+06VU9tVJLrb3zzkMMDbWFic7y\n8pW66DReN57oPHt2d4I02aljjPfa2vpw2Dkjx28GxutpEeF0zq/dR0nJMnp6Yqc4p0JjbS31VVXz\nVHRCdKpq8uqyxt+Zo0cfnIHquo2on5YjhWOs8WrpsjeHfn4VVWjFEp27iRadCmrV27kUnZH3GOvz\n+gDqPY8ZtpWg3m898AGxRSdMzacVtS/pAeBb0x+uMPNkWuQ5w+NvvPFGCgsLsdvt/PEf/zFf+tKX\n2L9//zQGEh8RnkLOIb6E+UmkpyqWGIRwkbiooCBKZGrPF9ls9Pt8YefSKHU4ws75PZst6kOm8Tqv\n1NVRmZ8fczx71q7F43RyaWKCgVDkE2BFWVnYfmbMyVwxnXFoAmDBgpuBmWllkUl/xkyONaacXll5\nAwCryor5G+8wd955iN/+9s2Ex2jzECn2tMebN79FX1U9P7/zEPe5vFSHxJ5RTE61fHGn3CPUeO/G\nPqVaBDOWUE2UKjwTfkPj9TZteiXt82v3kalnd7a92sm+CDH/717kJ9REok8l7AuXj/fPQG/UR1Cj\neFuZSiON15NTows1VfYCcDLG2OOl0mZDlDPydS6JeKwJ0ULDz6XA14AHUft0JkIT537DtilxK5+l\nspDkv4Yze/wsIMJTEISsIDJSGS/iYNxebFf7HRrFYGNtLR6nk+HxcQ53dOjnyrdaAbApCk0bNoSd\ns8jhiPqQabyO1+Xi9L33xhyP2+nkhooKAFaWl7OksJBypxNPSKiaNSfLd+9OKPpmUqROpzepJgCM\nUTqz02wz6c+YybFGwbX7jjupr6riyIZN1K2Ln9IZS6RFij3tscvl5ddrdnPY6Y5bNkQ735YtH/K1\nr704rb6Wxj6l8YSPcdw/aD0Ztsa08ba2PmJa9Cs/vwKnswKHw43DURI3apuMXCvCk8kXIdMj8hOq\nseBObIy/Mx7PCv1n875QioxqGrdF9uTU0HreFgAvhY5bCJQBawmPFEZ+5E2YETgHXIp4HDRsv4B6\n/xtQ5ydRUaFIrg/9vxLYnvkwhZkj+a/hjB1/8eJFDh48yNjYGOPj4zzxxBO0tLTw1a9+dZqDiY14\nPAVByAqm46mK17ok1rlu3buX4z09wJSPM1WS+UoHfD5WPf00iwoKODUwoHs+66uqcDsc+rEV+fm0\nDQ3FPU+8OdGIHHeYD9Xvj7q/ZONOlWz1u2XSn9Hs3o6ZFlOKPH5TSHTGcqxlSqx7T6U/ZCwvdKrH\npsr861OZGpm1SZmdwkDGdQOxi1VlRiyfZiLvJkAbcCuq6Pwp6qduY4RT80LagMnQv2zAguq97Elx\n/xLgI8K9uA7gBuJ7N7V1YUctRrQ9xj7CbJLN7VR6e3tZv349f/jDH7BarSxfvpwf//jH1NbWRu2b\nicdTqtoKgjCrxBNDjbW1afW/NJ4nkljnKnY4gOhU2VTGF6/CpXHfRQUFuvAzXqfu4EH9WKfFgm9S\n/eCTSgXcxtpalu/eTdfoaMxxG8dVmZcXdX9mVeZM97WZLTKp8JnKsemIyUyrqUYe/98cbm4bOMNy\nWwH5tY2Q5MN9OmONde+pRIDjpb9nEj2OxMxz5RLTraqsMjvVSyPXjXlfChgFUh3hAqmR+D05teM+\njyrMzhAuOhXgfOjncZPGahZfBl5JY/8bUe9fS5EuBa5B9W5C7NfduC48qCnMUlxIiI3H4+HVV1+d\n8etIqq2Qc4gvIbeJl7aZyFMVK4001nm0/bYeORIlkhIVCzGuqcjzNjQ3c7KvD4Ayh4NjnZ2U7djB\n2hde4FR/f1QBohVlZdR5vfp1jB/W8w0iOZXvPB9pbeXTxcVU5ufzVIwKuWE+1E2b4vpUPU4nncPD\n007DNb422eI7nQ1STX80tkEpL1/J5OQfJ9w3VlpqpOAaHThDadcxulJMvcw0VTOV1NR4v0NmprXm\nWoqsWSQqBgXJ/u5lWpFkrtEE0mFU8Wmcg8jcQWNvylPELpCkESQ7vJyxOA7Ee/+0od5fsWGbdm9a\nivQHqOnEMPW6R/bt1I4pQk1VDk/Nlc9SwlwgEU9BEGaMWNHDWFGTRC1QItuZLN+1i9P33ZewEi3A\noscfZ1VFhd465ZHWVnpGRth65EjCtNMwsXbpEofb2/XUWUVR6BlTPUOHOzv1gkMepxOvywWKwt51\n69SfQxijhfWHDnG4s5MVZWXsqKlJOn/GHqE/fPnlqAhpZCQyMhJrt1rZ6PXSOzqqR2Mz7Uk4nSiq\nWSm/s02q0beBgTP4/WoD+qKiK3E4ihLuGysyGhnxSjfyl2mkMJUIsHGNpXusmeMQIkkUFcwFEgln\nYxpxBfAcU1HNyhjHLUctOFQNvBbjWjayI/oZWWW3HNXHaWyPshZVjK9AbQcDU0Icol/3yMi39nx/\n6Dy5+sWEMJ8Qj6cgCDNGLE9YLF9mrP2M2yrz83UBBqrQW+HxUGizsaOmRj9PpCdSo76qip6RkZj+\ntEi08XVeuqSLXVAFrtvh4HCn2kttRVkZe9et4/bnn+fC2BiD4+Nxz20UgpFjbmhu5vm2NrX3Z0UF\newxRX2OP0I1eL3sTpOYm8nsO+/2meTSn4/eM5w1Mh0w9lNMhVR9oOv68VPdN14Nqhmc1kzmei9fH\nbF/t5RRhnXuMgvLnwMPEFs41TImpCqZSZzWBZjzus8A51IJCdwH7yA6RCWqCoZUpwWlFFYKtwBdQ\nxxo5BwOk94WC5octQm0zsyd0XLrnEWaKbPZ4poP08RQEISuJFZWMlVIba7+odiYhD2ORzUavz8fh\njg4cVmtUOq22n1bxVotcvtPfrx+vtVmJhTY+7fhyp5NyhwO3w8Evb7uNOq+XjV4vRzds4KcnTtDn\n8ySAV+QAACAASURBVOmiM7JNi4YWJTSOWUtZfeqDD6Z6f3Z0cPXOnXoaq9YjNJUIaay+o9p8mtmT\ncDrniucNTIfZr/qZPP1RI5300FT3TfXaxv0dDjcHD9ZNu7rsXLWnmS7Ga+7ceU3a9z0XY85+ItM1\nZwpjBduHiV+K0xgN/WLoZ2NU0HhcFzCI2j7kWbJHdK5FjWYaP48XAi6migVF3gukX6K0EdXLOYwa\n4dTWdKalUgXBPCTVVsg5mpqaqEkhTVGYe2IVpdEic2eHhvAWFlLscFDicFDhdOIOFQAyHptvtfLg\n0aN8rqyMm+12vU1KpIjRzvu58nJustn4n7fcwsOtrWGRSwvox3/xqadY6nJRYLPxPZuNu+64I+bY\nO4eHOd7Tw+HOTh5ubQ1Ldz0zMMDFwFTK1BNf+UpMMZYsLVijMCSqNX/pnrVrUy7qY7zGU2vX8nBr\nK9tWr+aR1ta4RZimQ7x0y0SYUZwom4vORKaHJnqPmslU0kwLHM1Vexoj6UQhtWvabEX4fOd1AZnq\nfWfzmorErL97yed3dgoVpe5LNaaTamPKR43o+VBbhWiRPWNrlQDR6azTwYIqwl+I87xWNTeSItQ2\nKM2oVXcJjVvrqTmIKg4row+dNm7UKrdaFeDEa1o+SwlzgaTaCjmHvFnmNsa0Sw2P00lvKAIZmYoZ\nmaa5bfXqmCImXjqnMTX0/YsXGQgJxXKnkwuha942NETTX/wFn921i66REcYmJii22xkPBrEoChd8\nPpwWC5PBIEHgS5WV7L3jDrYeORKW2msFbq2sjPIyxkov1sa1oqyMRYWFOCyWMFGdbnQyXmsZM9Jc\ns4FEqaTZljI5V+9RqabxxpuveHOcyvymkuqbynnSaaeiXXNsrJ/OzsNptyIxu6XOTGLWmko+v8na\nl5hFvPTPVFrD1DAljkEttrObqdYqt4Yem9U6xYNanCcSK3AWeDBiPEa0scGUZ9MFDMXZJ5J0W+Wk\nnlYrn6VmH0m1FeEpCMIsowmuErudi4GA6p10OuMKLm3/IpuNm6+4gkUFBTF7YUbut2fNGh5pbeVU\nXx9nBwd5ZdMmvtvczOGODlaWl9M9MkLn6CjFdjsnN2/G63Lh3r49LIKpoQBWRWHc8F5W5/VS7HDw\nn++9p2+zMPVRR+vhqfs3PR72rF3LzXv30jUygs1i4aYFC8KipPHEoxnznaqYzcVCQJdr78dIjELq\nB0533I+r6c5XuvvHE5jJztPc3MAHHzyF399PeflK7rrrN7Pmb71cSP7lhFl+wOn2Fq1hSsTFE2Sa\nOAa1sutypnpZPkJ0L89MsKPeQ+T5lNC1J1E9mu8DHRH7lKJWn53ybDawijMsoIATNOLHnVTg15B8\nPoRcQYSneDwFQZhlNI/gW5s3617BPWvWxPUNNtbW4nE69Wjgk++/H7MdS0V+PjZF0fdraGnhzMAA\nx3t66Bob4+HWVv06v7nrLpYWq6XqBwMBlu3aRdmOHYyMx/YEBSFMdAI0nzvHzrNnw7ZpolNLqT0z\nMDDl3+zspKGlha6RES4GAlwI+VS3Hjmi+01j+V8zbV+SriczXrubbCaXUiZnEqMv1Oigi3Qvpjtf\nQ0PqOrfbS7jppseS7h/PO5nsupHVgdPxt6bjh72cSe4xTtS+JB3PZ6IVqBHr3Mkq3NagptCuBzai\nOsaOh67zbcJ7edpRo4vTQUvbDRAtOhcBt6D6NvtR7zNWRHQQ+AxqJBbAzRmu5BitHMBPA4tJHlU2\no1XObPl2BSE5IjyFnEN6T+U2mrjyuly6yErUw9PtdHJDRYX+OBASgJq404TZ821tujjUivyE9dC0\nWqk7eJDhUJXYtiE11ckK+E6fpt/vJxAM4rRYuL68POl99Pn9+CfDU7lcdjvrlyzh2tJS6g4e1Asa\ngVogaNvq1dgt6tuuBfBPTnKgvZ2rd+5kyX/9F7c+91yUwMxUCCaa21iYUQhotsm23o+pvEfN9EfB\nRB9X052vwkIvAIHARVpbH066fzyBmey6xuNqanYkvc7lhFl/96JFerKVmIqAjEUqginWubU+lbEE\nmbHf5+9Q/ZLGL+N+DbwV+tkOrCLc95kMLVCzErgt9LM1Yp+VwDuE99gsI7yQkXbNCVRxugxtbgtC\n46jGwza8wFYSvwMkmg8jiV7H2K+hfJYS5gIRnoIgZDUNzc0MBgI4QoJtZXk5VxYW4rRY2HrkCPva\n2jjW1aW3HSl1ODhxzz24nU4q8vPxhITt2YsXdQG36umnGQztPxFxva8tWcKCUH/OVLEp6geWodA4\n24aGONbVRa/Px6KCAr0Krtvp5LW772ZxYSGrFy7Uj+31+WgfGeF4d/eUEH3iCW7du1cXr5p4ziT6\nmQpmVsCdLcyKeM1mXGC6H+dTJdHH1XTny+FQP2SnGiGNJzCTXTfbvkC4PEi2EqcbcYu3Ao2/ZZpA\nM547UQVWbSxFqG1VDgDG96gxpn5zi1FblfQBi1EjlLEwCkstq+VK1KimhymB+TnUCrS/CY2tMfR4\nI2qqr/aXpIQp0arhQ5vbRhqpp55DLMOtR2qvJv67TqoVaRO9jmZETYXLhZ07d7J8+XKKior4zGc+\nw0svvWTq+cXjKQhCVhHpMVy+e7few3NRQQHv1NdTd/CgXjDHrih6FNSCWjRoPBjkeo+H0YkJvaJt\nZV4eXWNjVHs8OC0Wvc+l2+HQxd/K8nJ+c9ddACzftYuusTF9XEU2G8MxUnEVpj6uACxwOvmCx8Ph\njg48TifLSkoodjioyM8P86YCNLS0cKi9nQG/Xz++0Grl0kS4HF5cWMjbmzeH3XesQkHZVmQn16hh\n9txUs1XCxQw0D6XVms/QUFvY+pI1l+skW4lm94CsYeq3rA5VfCY7dwPwPDCKKjSXh85RDTyFWuG2\nK3SuAKqYLEZNg60GrkUtAvQuagQyiCrGalArzx4L7T/I1DxobU5AFa5vJxijNodam5cS4AHgCKro\njDW3xp6bw6FtmbzrJHodpY9ntpDtHs9Dhw7xp3/6p+zevZsbb7yRc+fOEQwGWbRoUdh+4vEUBGHe\nEJla6jOIsPFQaqvWp7LYbufGBQv05yeB8z6f7qk09rOsWbRI7ek5MsKr59Um5FbgKwsXsqykhDyr\nFYfFwuefeoq7Dhzgc+XlrF+yhCWh6Gq8N8vIt94en49jnZ2sX7IEi6JwvKeHA+3tvBCKzGr3paXA\nVhvSiAHyQqmuJaE+otUeD29v3ozb6UyaBit9CTNjNuMCqSbQZQNapHJoqC1qfcmay3WSrcRMekAm\n83BuT/HcZ1CF5UXU1iWnUYXhIdT+l6dD97AqtP8EqujUPJRtqD7QXuBroe2ngBeBvaFjTxI+D8Zx\nJhKdMDWHH4TG4w6du4v4c6sdc7PhOpm86yR6HaWPp5Aajz76KI8++ig33ngjAAsXLowSnZkiwlPI\nOcSXML+JFFfXG4RZz9gYDS0teF1qwYjBQID3Ll5kQV4eoApRjRVlZbxSV6enjZ4bGaHX56NzZESP\nkE4A+z7+mOMtLYxNTNB6/jztly6pfTs7Oii026lyufBNTjIYp/BQLALBIEc6OsI8oFq01ON00jk8\nrKfL7omIWg4FAtR5vWHFl7SUV2Ma7COtrVFpt5pHbo/zh/zDpftZv38/Dx49OuPpufMFs8RgKu9R\nufhRMJZ383Iq7NTc3MC+fTXs378eny+zZOx0zzVzf/fMWomxRKYx/XMVU4WBNpLeb1mkP7MHNbr5\nCLAQqEIVlu8a9lnJlGDUisAVAz8DPkEViDB1/17C5yGdd4N4c5hobrXn9qRxnemMIT7yWSr7aG5o\nYF9NDfvXr8c3kP57TCbHT0xM8Prrr9PT08PVV1/NkiVL+MEPfsCYIfPLDER4CoKQVUR6DPesWUNl\nyHOpidG24WF9/56xMW654grqq6o4uXkzdV6v7qnUChjdvHcvL4VSVItCwhbUiGdktVoNh8XCsc5O\nPVU35j5K/IwS3+QkYwax6p+cxGGxMD45qUdBtchnucFL6Z+cxG618tMTJ+gZGYlb9dYYGb5m507W\n79/P9at3UFVVj8+9mpbuXg60t3Pg449zrkrtXHGyuYFv7KvhZROERTLMFDGxMd+xGsuDORu+zJmf\nq9QwRnd37rw6o/HMv0hxLI+hMWqopbQeRjUopLNWGlHFqpbdokUH/ws1qtgPdDK1ziuZ8mLClMgc\nBD6L2ucz2e/FbH01lItfQQkzxcCZM3QdO0b7gQO0NKT/vpDJ8d3d3QQCAZ5++mleeukl3nzzTU6c\nOMFPfvKTtMeRCBGeQs4hDY/nB/HahERWYXU7nZy+994wMeotLNT3L3U42F5Tg9vh4Oa9ezl27hyX\nDL04G5qbef/iRb30Q5HdrotTlxYhXbYsanzjk5P0jI3pwjRSYlqBP6qsxB5HfHqcTr2Crba/f3KS\ngdDYtCq3AK/ffTfO0L42ReHQJ5+w++xZXTBeHRKWxnnSIsNFNhvnfT4OtLfzg9aTrFmzmyK7GgGu\n9nj4YqhC70xUqc201Uu2YZYYSOU9amDgDI91LeGH7ctZvftnMzB/5pcvilUcaDZamWSLSNOiuzZb\nET5fb0bjSTdSnP1/92IlqmtRw2tRi/xopOtxc6OmxL5LeHQwuueyyirChZyxAu0YpFTUZ/6T/Wvq\n8sNWEHpfqK5m9bb0M0gyOT4/9AX/D37wA6644grKy8v58z//c/bv35/2OBIhwlMQhDkhnTYhRjHa\n0NzMqVAKiRX4QkhYRfbM/OLTT1Ozbx9PffCBLjqtwCt1dTy7bh17162jwJCaG8lkxOMg6OLQqihM\nAMfOnWPt4sUsLiykZcMGFhcWcrfXi8fp5ILPx1Ao4mlTFL0qr8ZVLpcurr0uF13f/CaeUGGkgUCA\niwbx3BsSlvcePqxvq8jPp8Lp1M9rFJbGqPGetWtnrEptLvb8TMRspo3abAV0s4D3WMbvRj0zMH/Z\nVckyk6il8XWxWvPnLPqpRXevuOJmfTzTXSfzr4JvrNRULZrXxlS7kRJgxzSvERkd1ASlhfB+nY4Y\nY6s0XB/Uoj69qAJ0OdHiM52MAemTKZhDbWMjVfX13HnoEE53+u8LmRxfWlrK4sWL075mukhVWyHn\naGpqkm/q5gHr9+/nQHs71R5PWqKoZt8+vbKrxtL/n723D2/ivNNGb1lf/rZsy8QhBgU3hKYfCU7c\n0ha81tZOKSbUboKSJu1F0rO1djdtt/tuN+w53bNnu233fa/T9Lq63Z7Tbjh9NyRN/YKTNIEU3BQT\n/FGSOk1DIF+NuyTQGjDGIGHjD9mY3/lj5hk9Gs1IM9JIlsxzc+nCmo9nnueZkTT33L/f/SstRXhu\nTimpAsS7zTptNmxZuRI9IyOYW1iAx+3G9SUl+N2FC8A772iqnjxYmZaHhoYQmp1F7+nTcX0PDgyg\n+/jxGNIIALVFRTg/O6vkljptNtx7ww0xLrcetxsrfvYzjExNAZBupdTk111QgNkvfSluHpjrrVli\nuXv3+zE9PYqCAifuvPMVlJX5lHVqd+FjQ1+Ncy9N9RzmKph7a1PTjrTIgJHvqEgkjKbuH+G3M15l\n/rTmOHXklpPl3r1+jI5KLqH19QG0thp37+TPy/PPd6TcjlWw6joxg/z+3VO7vvoSb24YJyGFzf4a\nwJcghfE2IDbMFpA+B29ByvV8HsB3IIXn9nLbqB1l/TDucW1m29xBfl9T+Ylcd7X953/+Z/T09GDf\nvn1wOBz4zGc+g09+8pP4l3/5l5jt0nG1dSRaKSAgIGA1GKFx2u1o9/mw0+83RViKOdfXi/Pzkro4\nOxtn/sN/JfJlWGZkl9zzkQguGAxvdNhs6ONyRsORCIKDg9jR1KSosMPhMI5duBBHOovtdvymowM3\n7NqlLFtWVIQ9J04o2960ezfevuce+EpKFOKpJp0A8PJnPxs3D+mQvunpUczPXwQA7N27AZ///J+U\ndUzNBKSyL1+YHlZu+AcHg2ht7UZXS0vMPKihJq+5TkxZ2Gi2jvWru78eM38spBSIznHqkNShXDkH\n6ajJ/HnJBTMjq66TIIIYxjCKUYwudMGTAw8IMoMuZOYhiA+SURAgmfSojxGEFHJ+DBLRBCTS2Q3p\nwcxNkHJEtaICzEQM5FZ0gYBAqvinf/onjI+P48Ybb0RhYSHuuece/OM//qOlxxCKp4CAQFbBK3Va\ntSjVUN84f+3FF9Hzxz/iA5WVqHS7cW52VjEAsgMokOt68ornNYWFIADnZmeVZWpFlEeVy4Wpy5cR\n4Vxp230+PLtxY1x/tg8NYec77yhqJoPDZsNlIqnkS02NUlP05qoqjE5PY0zlFBeor8dLZ88qxPOD\nHg9WV1Tg6zffjNv378dQR4cSVgwgjvxqzZWaZKjX79lVh0hkHHZ7Me6++60YxVOtZr548LMYGemB\n19toODzQ7LnOZaRD4NjtbzGkW3C9mdu/v830HCdDrpwDq1TCxVAbMwU//OiXlbIAAujOE6Usf+BH\nVIkE4mtcakUFsE+rE0AJpLDgZNdZutEFRr8hjG4nkKvIdcXTKITiKSAgkJPQullPVotSDbXyNjY9\njXORCPpHRxGor1dKqNhtNiwQYYEIdSUluMjlWJ7VsAP3ut04F4koyimP8NxcnOI4cOYM2vbvx8Tc\nHA6PjSn9+cXJk3GkE4i65U7Mz6P39GksKyyEr7QUZQ4H3lIprWwu7ujpUYjnDRUVeGbjRgDAzF/8\nRcz2/Lzy749duKCEGwcHB+NIhnou/+edr2Dv3g04cM0j+Omhoyh2vKmcJ7Wa2dLSZfqG3+y5zmWo\n584MgWM2P4B066hHL1KZ42TIlXNglUqYTVU60yiWlbJGNGLHoiplmSY0VrQf28bAwHYDYelMiVwL\n4HpIdUP57VjOKA/+0xow2FetdszA6DeE0e0EBHIXwlxIIO8gak/lD7TMZ9TlUpJBfeN8fGICgFSz\n8+F165T2/vzaa5XtXt+6NaaGpvqLrqG6Gr+9804E6utxdOtWOP7wB2WdQ8elNjQ3h56REYV0Mlfa\nuYUFze3VGJudxclLl3B4bEwhpaUOB9pWrFDmotzlUsZQ4XLpOsby83pTdzfeunAB/aOjCumskOdG\nDfXclZX58PnP/wknpi/HnSe1u3Aq7qVmz3UuwwyBU39HGQ3Ey4RD7FI6B0sNXehCAAEcwIGkYbaZ\n/d1L7ICcfjkbKxyWY9sw5nTMDI8OAXgGxkjkYoTNGj2mke2MGx2JeymBxYAgngICAhmD1s26mtAk\nA3Nv9cikzFcmuRdOzM/joaEhpb0nb78dq0pL4S4owH0HD+JDVVVKG7x6aYNEwD6xZw/6T5/Gjbt3\n4zJHUi8TaeZXqnHswgXUPP44ktHOKpfaYVEKCQaAS5cvo/fUKVyUCWNXSwtWlZXBbbfjuZMnFTJ4\n689/HkNCi7lapKMzMwqhLLFLLV+U50YN9dwx6JEqM+VStLY1e65zGekQODOl6K1GqueAkY3v7m/D\n7ZFw3vh15krNTyPwwINudOdAbmdiQpN+ORsryFxsG8ZyfVOpkbkYn1ajxzSynfVllAQErITI8RQQ\nEMgYtPIQjUIrfDRQX49LsvKoZarD57PVFhVhdGYG5U4nJubnUepw4JLKgCgTYLmdDO0+H4bOnsWo\nHO5rB1DucsU48BYVFOCjy5bh+MQEJubnMcGF/jZUV6PY4VDyWGvcbtzi9eLY+fMYm51Fo9eLp26/\nXXLbjUTQe+oUGr1eXJybw9jMDJwFBXjlzjvhKyvTdaHVO09m8gNzJZdQwBrwLrSv1Afw/7V254Vf\nZzruuVcvEucopp97bIXDcmwbkcj9GBzsQVPTLXC7n0yj3aUG5iCszmcVyAWIHE+heAoICGQQ6She\nLJyUEbRGrxdFdjsm5uZQW1iIp26/Pa5dXrn7TUcHAvX1OLZ1KwL19fjYsmUx25Y6MpPi/ufLl6NI\nVh6dNhtGp6bwoepqtK1YAXdBARaAGNIJQKoJOjqKkenpGNIJSPMwJIf3ljgcOCeTy49fc42i8G7Y\nswcDZ87gt2NjWFZYiBvKy/HuxAQuzs9jPBLBhr17AeirdnrnyUx4KdvW63bj9NSUIZVUIHfBFKWQ\ntxFPNO3IG7/OXHC9XSykrvYmVgbN1xxVh3smat9oaGhsG273SbS2noPb3Quh7PFYzPgKAYHkEIqn\nQN5B1J66OsDUuYbqalyIRFBXUoK3QiGFtBXZ7bjV60W506kY4oQjEdz6859jeXExyp1O1BQVKbUy\nf9zUhBt37cKc/H10+3XXoffUKcnZ1kAdTyP4QEUFGpctw7PvvRdX3qXa7cbFubkYNRSQyKmWOREg\nhelqGR1Vu914v8cTMx88vG43xmXSZwdw/N57lTBbI1BK3hQUoNTpxKMGSt4w1fT01JSizl6tyudS\n+I5i7rE3N+3AV9weU1rVwEDQwnqk5pCK620+lDUxck3ljtrrR+ZrYAplL10she+pfINQPIWrrYCA\nQI6Cd1XteP55JYyTgZUnAYA1u3fjnXvuwfahIVyYncV7k5MApLDUczIB+8jPfx5D4EocDlQ4nQir\nFMZUUVtUhMMdHVj+xBNKrVAe5zn1rwBS3qmroADuggLM64QAT12+rJtvysYOQAknBqTQ3OrCQvSe\nOgWnzaaE2ZoB7+AaqK/H9qGhpKVEmGratn8/gFiV1EgpksUkKwLx4N1jzdIXa+uRmkMqrrfDGFbK\nmgQRzNuyJrmj9maqBibvbPtjAA/B+tqgySBKmggIpAMRaiuQdxBP6K4O8OGfLIyz2u1WnpbZuW3H\nZmcRHBzEcydPKqVRKpxO3CLXvSyVQ1SZ2thQXY1ylytKOtNUO20A3r77bmwfGtIknWowMrlw5YpS\n8oX1k2FtVRWucE8UPXLZGK/bjQV5+YcrK9Hu8+HY1q1o9/nQ4fPhhTvuwJOtrQjU12Ns27aY2p9G\noQ6x1XIn1oNWOK+R/dM3MEkPVprSsO+ofDK6sRKJCJBxz03rEEQQfvjRhjaENY6aO2VN9GHkd898\nSGymYCbc08y2vHHOhwD8CsBqACfT6axJLB3zHnEvJbAYEMRTQEAg58HIjMNmA6NpFTIRA4AyhwMP\nr1uHCEf67HJZlA6fT8nvLLHbsaywEM986lM4KauiPHgyW66TA1pss6FtxYqYZZUuF+7o6cFT775r\neEwFQIwj7u3XXYc3AgF0+Hxo9/lwaMsWlHB9KHO54HW7cZkIYTm8tnj6GIILP8C3XvkNwpGIMv7t\nQ0MYm57GfQcPKnmWZhxq1eTRTK6nVr6okf0XW62xjPgGg4DfD7S1ITz+VrTNbwWzy7YWEYkI0GLc\ntjNFswc9CGoc1UxZk1xGJsrxpAYzbrJmtuXV0QIAFwGMA9iA7D3SyGa5lcV4TCMgkFmIHE+BvIPI\nS8gv8GGWfM6lXshlonb+8/e/V8ia02ZDicOhqJZetxsEKaSVd7AN1NdjR1MTan/6U0TksikdPh9e\nOXcOI9PTUmOqHM+bystxXWkpek+fjutHAYCm2lq8eu4cJg3W8DSClSUlmLtyBZGFBdxWU4PlxcXY\ne/IkwnNzuLmqCmUOh1JDFAAKMYfr8Qd4cBG/wcch6a5A24oVmJqfj3OY5V1na9xuNNbUxJyDROGw\n6bgTG90/ldw8K5G+c6cMvx99/f3wA9j/r7UY8Y7Ce74Rm799AO4Zj7k0tiWIxcjMa0MbetCDRjTm\nLbm8un739MJZeWfb1QDGMTBgRzjcCIdjGC0tIUhfL5n8kFnh0GsUfqSW/2oMV9c1lRvI5RzP0tJS\n2Lg65jMzM3jwwQfx7//+73HbihxPAQGBnMVzJ09idGYGAFDtcuG8rNYFBwcV4xkjOYDD4XCMQjhP\nhGmZXJY6HIqZTl1JCd5fUYHe06cVh9X7Dh7EHFers+/MmYR9/v3EBMpdLk3jnytAXL6pHZJ6yXI3\nU8HU5ctKHmjvqVOocbsVZfOtCxdQLtcDXVtVhT9NTeF8BPg9PogyzICRTgB47fx53CLXMOUVRqY6\nsrBjFvbKzgGf18kvB6IqZqowsn8quXlWoqWlyxriWywrIo2NaPnSUxg89hCa9u2QSGe+WMNaAD3q\n0IXs3bazPjixB+3oxE78W16SzqsPTBcHpLPIvhc83N+vANiAcPg6jI5KNYkHB4HW1kx/yPg+ZBrZ\nVFcFrnZcunRJ+Xtqagq1tbW4++67LT+OUDwFBAQsg5pAbh8ailEplxUWKrUn+RxAIzUgmcstj2q3\nGxtqa/Hbc+dwenoa5U4njm3digqXC7c+/TTOz87GucuqcXNVFd4OhXSdZY2gyuXCBQ13WQZXQUEM\n8dXcxmZTHHdtALR6U1dSgte3bsV9Bw+iZ2QEN7ou4rrq1Th0RlJCCwsK8Pt77kGFy6UojMwYyGm3\no8ThwNT8PHpPn445B+/fvRv/dfEiFgB8yOPBYHt7QmXTyIOCqxbhsBRuu2MH4JFJTjZFEg0shnGT\nH/FaTbZtWbT6IKCP3DH4Mq6LRyMVGrB580q43TsTbp8vGAgGER5+C47i42jp+g3cHt9id0nAAuSy\n4snjsccew7e//W3813/9l+Z6oXgKCAjkBNSq2dj0tEI6PS4XXv7sZ/HQ0FBcyKVWDqCa3DCXW75c\nx/lIBIdHRxXToIn5edy4ezeG77kHK0tL8Z78BM9hs8WVMWGoKynBssJCzbBaI3DYbLipshKHz55F\nid2OKY3wW5fNBp6Waimpc9x7rZ6udk3hmyVP4sWDP0OV629Q43ZjZfVN+ElzMx789a/x2vnzeLG9\nXXGw1VIyA/X12On3x4W9jnLn6cLcXFIiqT7PHpdLEFEGjwfoVlGcbIokGlgMl1ktrUZPx7IKauJU\nLBMnoRcZw2/Dz6FM/lwfHPwi2lqfWaSeGNfFLYtUyDGEh4cx2n8YADAYfAit6u8UgSWJdB/+WPXw\n6LHHHsO2bdtS2jcZhLmQQN6hr69vsbsgoAM1gWTvK10uvHbXXfCVlcUZzwCxZjbbh4bg37sXMEr3\nVgAAIABJREFUT737ruKEeuOuXbjv4EHsaGrCLz79aRTZJRsgO4DxSEQJSQWAuStXsGHv3phjr7/m\nGmV9md0ec2xXQQEK/vCHpGMrQNRZlsdlIrw6Pg4boEk6AeCSarndFv9AkPWKN00qtNlwXXExql0u\nFNFFjI0dxv8YqcYz7x3HuUgEvadP46GhIezbtAmnvvCFmLIpzEzozVAIQPScaJn/zMr9KwDQs2lT\n0rlIx/X2akCufUcthnGTlldppgMH1QZR6j4EB4Lw7/WjbX8bwnnmMpyNa2rCIYX6v+cFnmhaTFXG\nuOHQ4hoqZc78xyGH7HsbG9G0IzOf2Vz7nhJI3+TOCpO8kydPYmBgAPfff39K+yeDIJ4CAgKWQe2G\nyt6/e++9CWtJ8mSIkZiQTCbVOYketxu3yiVCGJ1rqK6GUyZzxXY7fv2ZzyjHri4sxJHxcYk4ulyw\nq4hnKBLBb8+di+vT8uJiLCssVN5fAXRrfi5cuaKpUlbJeZnqZc6C+K/eBUjq69GtW9G2YgWK7Hbc\n4vVi+vJlnJ+bw7H55XgCX8AFx/swTRI5rXS5dF1i2TyORyKoKymJIfVqZ9u1ck7oFQDfOXJEsz0g\nSmbnr1xBh8+XkuutQPZRVFQDt9ub1ZtzLepgpnBGKlATbHUfhsPD6B/tR89ID4KLULJnMZCslAyP\n11puwyv1wMDmtfiRe2fGerR0nFqfQ9Sj+QFLW27p6kJ9IIDNBw7A7Vk6Sq5AYqT7kNCKh4w//elP\n0dTUBJ8vM+HdIsdTQEBgUcBCaY9PTsJXUoKTly7BV1aGd8JhjEciWFtVhevLynBJzklkxPHVu+7C\nXw8OomdkBCV2O0qcTrz82c8CADbs3YsN11yDM9PTStjn9V1dSm1PPahDcddWVeHQli0AgJt278bo\n7KyyzobYUigFAApU+zttNthtNly+cgV8hul1xcWYnJuLyTtlDrylDgc+ds01eFIm4HzeKwDcVl2J\n/7P0GXzl3Cacmp6Bw2bD7+68U7dOJ8uJ5XM59XJptbbVgt7+6breZgq5k7O2uNi716+E2tbXBxbV\nxCmTSOaM3La/DT0jPWj0NuLA5gPwXAXXgx9+9MsBzgEE0J0gwDmMMIIIYgd2ZNCEyY/sZ95mKru4\nCkBI/rsDwGKFJgvkC5LleKbr7m6FO/yNN96Ib3zjG3jggQd0t0knx1MQTwEBgZQQDALDw5KJZ1dX\n1Ecl6X4y4Tx24YKiaqrBTHQ8bjfCkQhqHn9cIXZs3epduxQnW54EqcnRk6ramg3V1Tg1NYUxjkzW\nFBbiHPfeV1qK60tLcXxyEtcVFWFofFxZV2a3JyyjoudsawdQ6nQqJNhhs+FTdXX40YYNuGX347h4\nRVIx7/TV4emNbQoZbKiuxsrSUuz0++Fxu7Fhzx4lx1XPiAnQJoN6BJPflpkRaeVrGiWouQKjhGup\nE1TLSsXkOcKRMIKDQexo2nFVkE4gF0vJpFtQJxUS6UdmyO7tAHoBNAB4wWBfBK5m5Lq50IsvvohP\nfepTOHv2LEpKSnS3E+ZCAlcVRO2p3MDwMNAv/5YHg/F+Knrgy6sAUk7jxfl5lDudmJifR6nDgfd7\nPPjaiy/iVyMjiCwsKMVCWBitx+3GR2pqFBLEh3eysE+v243TnD04Q3VhIZ751Kfw0WeewdjsLNZW\nVeHs0aPAihUAJHLI18EcmZqK2Z+RTlZChY2hwGZDaG5Ot5zKAqCQzgqnE0e3blXCj9/nGMOrc9fB\nh/cwef4U/Hsv4w8XL6La7Ua1262QTgAol3NA2bjVynG5y6UQRjUpZQZNjIxqudMmKqui3j/XYTTs\nyGrzHfYdlcj9NxnZtZIMLyUDlnS0K4/bg+48VXuN/e7Fz04XurKgYppBugV1UrGoylR28ZNYVLvq\nNCHupQTUePzxx3HXXXclJJ3pQuR4CggIpASuXCHMeB9EOLXQXVCAgc98BoH6ehzbuhVetxuXLl9G\n76lT6PnjHzE6M4PQ3BzmiVBot+Otu+/Gvx45ouQZ+kpL4S4owH0HDyo5i10tLVhVWoq5hQUcHhuL\nO37vqVN4aGgI79xzDwL19Ti0ZQvGOCJ8aX4eFxOURgGAQrsdf37ddQAkc6L3V1QoNUWdNptiFGTX\n2f/Ply+PyXn9B+9ruA2v4NtVAzi2UI/+0VGcnpnBedlAqObxx1H4k5/gY888g3kitHP5lYwojkxN\n4fDYWEKDH4/bDY/LhY7nn0fb/v1468KFOFOgRPmaamMilvOpzhnNFbS0dKG+PpBU5cuU+U4i06Vk\nJhBWmEQwLK4Bi7VgtKMH0i1/MqgzCpdShmE84mfHAw+60Z0C6czUTBk3DtJGKiQyU9nF6Y5FQCC3\n8B//8R947LHHMnoMoXgK5B3EE7rcQFdXfLlCPfDKz83V1eg/cwYAELlyBd85ckRR1XgV0+NyKSVO\nGqqr8cIdd8Qpcl63WyGXK372M9htNjgLCvC+8vJoKRVIamOFy4Xw3BwavV4U2e3oeP55hWTZ1qwB\n5LARp82GIocD8/PzUt1LjTqgQx0dWFlaiuDgIPpPn44JxeXLpGgF5DZ6vXhUdQ0/X/IVzLtfxRNF\ndyBy6ULcPpeJcJkIQ7IJUm1RkbKOjYEpx8kMfvj5q5XNk7xuNwZOn0bVzp24uboa7T5fjMpqpC2m\njuZSjU9GuJLBakWQfUclIvHJyO5iONFaiUxl1ZmlHWp9bAyZLemSKah/97QVcSuVvUwXv0n1CklF\nMV3kekY5CnEvJbAYEIqngMASwGIoT6xcoZHcTl758bhcCnFS35DzrrhP3n472n0+dPh8CukEYm/m\nXbJDrdNmw/Tly7g4P4/xSASvnT8PQFIjFyDVxQzPzaG2qAgHNm/GycnJGCXKKxMwO4Cbq6owkcSM\naMsvf4mburvROzKCC6r5ZuVQtNTO5cXFmrmRxydncCxSiV+dGoVLdry9uaoK7T4fHBqlV0ZnZhQF\njc3Z0a1bYxyF9a6J4xMTAKSQ3ec3b0agvh5rKipwdnYWobk59J85A5fdbogwahGrfCytkilFUO3y\nzCOZGmtUreWRS+VCzCqTRmFWu1JTsePy+woAD1vYr+xBUiLD4ac0FHErlb3MFb+RnHZ3og39CJt2\nhBUqo4BAPkMQT4G8g6g9FY9cv9nnCcpOvx9v33235g05H8rpcbvx7MaNeGbjxpht+Jv5VXK46jyR\nkltpB/DyZz+LQH09PlJTE1PmpMBmiyn/wfJAJ994A4CkUL4qk1YboKl2AsDpqSklDJiZHhXb7VhW\nWKiEDm+49lrpmNx+H6mp0SxpwvpT6nCg4Gwlqv/kw7KfbsHOdRtjapDyGJueRjgSwfahIYxNT+Ov\nVbmXz508qVwTD/T1KUT0kkyqJ+bn0fqLX+DS3ByKHNHgl4bqasMlUbSIlSitEv2O0qqZypCM7KZC\nhnOpXEimaItZ2qGmYj4AXxgI4i/2+vGz/W2I5Ek9z+jvnkTpHQ7JTTVWEbeSlGWu+M0whtGPee6h\nREIvEoEMQdxLCSwGBPEUEEiAYBDw+4G2NiCcw/cnuX6zryYoiW7Ik4HflxntMNgAvHrXXfh/3nwT\nY9PTeIc7aYUFBXixvT2mP2sqKnB4bCyGYJLqfy3wdJQplNMLCxibncV3jhzBsfPnldqh7Eu21OHA\nDz7xCc2HBF0tLUp+62jFGZw/a0fvXjeCQeDZjRtjQmsZ+kdHERwc1H3owCuxg2fO4Kl330X/6KhS\ni5Svj1rqdGqqy8mgdR4TqXwCEjIVoVAsh+c2ehuxY5HDczNds9Mo1FSsHMCy8DDWjPbDa0H+rFUY\nGAhi714/9uuQ4e/he3I9zjcRBtDSshb19R0ZdCnOnLJYLD+WkB5KfBjAo5YfwzyWdvavgECuQJRT\nERBIAL8/6twaCBh3bs02crWOYqYRjkRwU3c3RmdmUOly4chdd8FXVhZTUqW2qAgFNhtebG+PMfQB\nouVB1lZV4Y1QKKYWp91mw4LGdxdzs61wOrG+tha/PnNGqcvptNlw7w034Gd/+INmfqdDru8ZuXIF\ndgAbamvx7MaN2D40hKfefRehuTmUhasx+c074P3LIaxpCqO80IF3Ll7Eu5OTMW1VOJ04cd99uO/g\nQc0SJ5WPPqqQTB7q+qj5UhplKUGvHmq6SKVciFamXabyM5P3JYhhDKMYxehCV0ZcWMMAfrS/Dd4c\nKy+TrPRPbD3OOnTjdeRruKlUL/SL2AGCBzuRG+PwI/v1RQWuNuR6ORWjEHU8BQQyhLY2oKdHcm49\ncMB4rUqB5EhmQmPUpMZMvcpE+wZ6e9F76hQKIJHOPRs34rO/+hUiV2ILpNx+3XXwuN3K8Woeewzj\nkQhsAG71evHuxIRufVItOGSCy74l3QUFKL1Qg4WaEMLzc8o2PCkuANB07bV49lOfkuZK46HD7b/4\nBXpPn0a5w4GJy5fj6oHqPawIDgzg5ZO/hX1hAh77AubLb0Wps9BSo6BUa8DmMsyYKuVSPVQ/4m+3\ntZZlpy88uQqgO0NHNlNkPVskPFmt1dyrx7nUkG590Vhk4yGKQP5BEE9BPAXyENmsPRUOG3duFTCH\nZKrPtT/9qVLvs8PnwzMbNxpum5GqIrsdJycnY8gATxBqiopwcnISM2+9he4vfxk37NqlEDxXQQH+\n7Npr4SoowMFTpxC5cgVlTide5+pvAsDJyUls2LsX1xUXK66zDOu83hjHW0AijXq1PrXQ6PXivclJ\nnJdDMvn6oYnUMjYHD69bh4eGhgyr4fx5KcUELqE86bHMIt1IArWj51eHji26ky4/b82Tk+j7+td1\nt82lCAWt221rb8GNIYggnsJTCCGEBjTgBbyQlZv1ZMTSj+yQ8GRk+Bd9v8Dj/sdzqB7nUgMrtmNN\nTc5sPURJB6KOZ/YhiKfI8RQQSAgzzq0C2tDLZ0uWl8rX+zT7Nc1yD9XutUCsEVPPH/+I/tFRvHzu\nHB4aGoKdc5Cdu3IFvadOocTpRGNNDQBgcn4eDw0NxRxr4/79mJybw6sywWyorka1ywUAGBofh1Nu\n0wbA43Jpkk69L2Lmgvu7O+9Esd2OMrtdIZ0Omw0Pr1uXdA58ZWWm8mkVoyNM4gqkcVS5XDg9NZVW\nTiJ/HTgrpDbM1oBlUNe4zAVzLf56/vsPfzjhtunkOFsNrVxMftn2LDlmD2MYIUiGOSuxMmvkKpn7\nrtUmSXqZhMnMpEpRmmI9TgFjsDanNZrH2ogdFrsCCwjkM9Imnjab7T9tNttZm832uhUdEhBIBvGE\nLreQzChFjxQkM6G5zesFIOUkVrhcMccwas6iRW75ZbdUV0t/r1+PHU1NWCu/Z/C4XNjR1KSYGGmR\n5NHpaVycn8c8EWwAqt1uNMh997rduLm6Gu6CArx21134+LJlAKIlVz7k8WB5cTE6fL64osoN1dV4\nMxCAx+2Gr6wMH6mpwSRHxi8TxZFgK9DV0oI7fXXwuRYwDanMzNTlyzh89ix6RkbwRZNOiOxcMXOj\nnpERlP7lIAKB1MPX1TUu2Tm9wXEeW2d/uChOpfz1fIccAp0LSGaZonW7zS/LFqnnb9R3YmfGjhN/\nXMjHjRJLfs5+DGtNklItM5Pq755UusSPNrQhLExzsoYudCGAQE6HRYt7KYHFgBWK56MAPm1BOwIC\nAnmIZDemespmMtXnydtvR6C+Hoe2bIlRLmsefxyPvvOO8n71rl26BFSL3Kprha4qLYW7oAAffuop\n/F5lXfyJa66R8jiLilDjdsMjK5k8nLKrbQEkZbb39GmUOp0I1NejwGbD78bHEblyBf/8yitKO2u9\nXrT7fBhsb8epL3wB5yMRxSm3wunUdJdl88jqelrlYKwm8R63G09vbMPKZR9SjsOXW0mmPqvbY9cH\ny3ttqK7Goy1NSiRBKg6v6hqXXS0tWO8+iS9f/jbCp/cuilNpLqmYPNKtp5kJx2yteqOJbtQz6Teq\nVnyDkEg3m7NGAJcsOA4bw5vye+urY2pDKl3Sjx70IGhpRVUjiD1zi0WCkzkGZwIeeIRCLZB3GBkZ\nwZYtW1BdXY1rr70WX/3qV7GwoGWVmDrSJp5ENAjI8TECAlmAqD2VW0h2Y5pqeQ3+Rp4dowCS0sfy\nMO0AxuWSIFpKnBYZUNcKXVlaisODgxiZmsJFzgV2bVUVfvbJTwKQ8jjPRSLoPX06jly/cuedqCsp\nwTK55Em504lCux1j09MxJU2Ia6f/zBm47Pa42peVLhc2rViBUCSC+w4ejCFibB7/63Ofs7RcCV/v\nc83u3cox+fPWyKnPO5M8JecfRGzZ/d/htseuX1laGtNvvQcXiQipOizR43bjGzVHUIwZVV3DxUEu\nfUelGyqayuc3GVHUqjea6EY9XfKcCEzd3S73+SkAF+V1dgDjFh2XjWEcQB3iFdRkc5bqNbW4IZ+x\nZy4bJFiL3KpD8wUk5NL3lEBu4G/+5m/g9Xpx5swZvPbaa+jv78ePfvQjS48hcjwFBATSQrIbUyuU\nIHaMSlUbBVxOJq/E6ZEWreWM9LHw17VVVejw+XBoy5Y4YqhFrn1lZfjT5z+P95VLJjwT8/PoPXUK\n/aOjCkEusdsxdfmyoo6q22Hje/fee3FmelohYjdxRNBszqZ6rHpzwufSjs3OKuSPHW/70BBmLl9G\nbWEhnt24Melx2Vz58B7umn0Yf+3YhdrCQmXcjLiy/rwZCmnOidkQT7UKKiAh3XqaqXx+k+ZNquqN\napEutmwFgKPysrXInErI+syeolcCqJH/rgDwcJrt8w8AtAqhZIpcL27IZ+xjj2yQYC1yqw7NF+Ah\n6pcKRPHmm2/innvugcvlwjXXXINPf/rTePPNN5PvaALqtKKM4IEHHsD1118PAPB4PFi7dq0SW86e\nuIj34r2Z9wy50p9cfX/HD36AkUuXsLyhAV0tLXjtpZcycjzmdmpm/+DAAF4eHITbbsfz/+2/weN2\nJ9ze43Jh2cmTOH/xIrBmDRq9Xhz/7W+l2pcf/CB+8IlPKNsPT0xIDqPvvIOOt99WHEZfHhzE0QsX\ngDVrEBwcxIMOBxbeeQc1N98Me0EB6J13UHD+PB79u7+L6U9XSwuCg4O4+Prr8H/ve3Hz2VVQgLdC\nIeCdd/C+8nKsamxE76lTKHv3XUxdvoypG29E76lTWB8Oo9lux7P33x833u7WVvT19WHmrbeAqioA\nwOjRo+g4d07pv5n5HQ6H0S9bxwZdLoxNT8e8Z8dbdeYMQnJu6w1nzmCb/F3N2nv58GEclc2V7t+x\nA9+87TZ0FRRgOBzGzFtv4Z9uvVXJaezr68ODDgcmC0/hrtkfYGJ0BYqvvw9v33M7goOD2HblCl57\n6aW4/tXdeisObN4cc30WOxzAO+/gxooK7Lj//qTjdbs9cDgexEsvvZYznz+t998DcMnvRzGAB/v6\nUJqF43dnuP0uvx/DAGb6+vBPAIrl9Tf29WGbtEPs9i1dCA4Gse3KNrz20msY9vsl/8++PnQA6JPb\n62ff9/L+JX19+IKB+VP35w4D4ymWj38DgA/6/dgJoKmvD6MALvr9eEg+Xqrz1QWgo68Pfw/Ak+D4\nNwLYobHe7/encf67TffXmvcPApiG3/8sAA8e7HsQ05jGs/5n4YEnI8efwQzgl8jttr5t6EMfWlq6\nMDgYxJUr23L++yGb76VlL8PvPyr/3QHgmznTv6X6PhGCA0EMh4dR7ChGV0uX4XrMVu2/ceNGdHV1\nobm5GRcuXEBPTw++853vaG7b19eH1157DWE5RenEiROGjmFJORWbzXY9gOeIKM7KT5RTERBYPGSq\nUL0VMNs3fvu6khK8vnUr7vjlL3H47Nm4NvTqJGot59tl0OsPv+2q0lKsLC1FscOBifl5pR9Omw2f\nuOYaVLrdODczg8NjYwCkMNp37703qXIUjkRw0+7dGJ2djet/OrUi7zt4UHNOwpEIHujrgw3Ao35/\nXJusHa97Cmsq9qPc5cTE/Jdw+Oy47lwZqZOYrJalVsmR4MAAnjt5EpGFBdxWU4MnczCnMhn8yE55\njiCsqz/Jt1UD4KSqXT+iY6oF8BsADwEoUm27XadPiUq6MNgB/DmAGQCH5WXq+WP9PIaocml0jrWK\naWSzrIy1xTz0YOVVkZsIy7mkouyMUSxG8aSrF8nKqfj3+tE/KpfhqQ+gu9XcL0S6+1+4cAGtra14\n/fXXsbCwgAceeAD/+Z//GbedKKcicFXByFMjAQks7NE75cXp/7sJbW1SbdJcgFnTEn7717duhcft\n1nWbVYf/srDO+StX0OHzxRAdpqwlcq5lOD45CUAKy11WVKSEgh6fmFC2mSdC/+gonHY7jly4oCzf\nu3Ejtg8NJTXS8bjdePueezTDl82En6rnQC8k2uN249mNG/GMThgt229NxX4cHutFz0gPjk/8Xneu\n3r97N67pegb3ntqM0Tl7XHt6/dOaB3WI53A4jNGZGYTm5tB76tSilU5JhkTfUVaX59CDlaGbfFv/\nS6PdYm7bUUiksxsS6eS3fQJh5f1qXFEC+7qgXdKllmt3AUAvgOPyey+A04gNEFSHy5bDeIislruv\nVr9SgRFTnWTFPKz53ctktmxuQJj6GId0TVl1lQtYAXUaQjb3JyJs3LgRgUAA09PTGB8fx4ULF/AP\n//APpvuRCGkTT5vN9r8AvAjgRpvN9iebzfbF9LslICBgBRTSsH8zDve60dMDBDN0vxEMShFxK74x\ngA0/T+5S2tXSojjKqo109LZP5FDLq2I3dXejd2QEgQMHEI5EFAOd3tOnQUAMmelqaUHz8uVoW7Ei\nzrlWnRfpKykBAFycn8fL584BkBTO64qL4SqIfp2W2O0IRSKwc08E733hhRgjnwcS3Ejq5dWZIese\neSw3dXejaudOBA4cUNRDM06yrC/lLkbMG/Gbji/pkkZWXmY8EsGGvXtNjzERijl33YbqastcVrOJ\nbN3mpUtwg5CUzDYATm45s98qhUTwwogliV5I1KYKUi4j34c57pZjHAW4Sd5fi3RtB/A+1bFdkAio\nTT72YWgTYHaUCfnYibLX+HGqt7GqsuPiOsvyyNxjD2scaxOdDYHMwNr6pQLpoaulC4H6AA5sPmA6\nTDbd/cfHx/G73/0OX/nKV+B0OlFVVYUHHngA+/fvN92PRLAk1DbhAUSorYDAoqOtDejpARobU6+d\nmAx+P9DfD+Dv9gJrjIXQZiIUWB06G6ivR+/IiFLOo8PnwzMbNxrqi3rZpbk5qQ6lw4FLly/HtbG8\nuBiRhQWcl8mcDZLpUbHdjrfuvhsNTz+dtB+AfkitVvip2bnwuFzoPn5ccfA1Ou/hSBjBwSB2NO1I\n+INW89hjGI9ElDH7ysqStm0UycKC9TAwEEQ4PAyHoxgtLV15bT5kNFgy1dBNrXDVlQDOQCKdHkjl\nRdjVz0JZ2fFOIxoKC0gOrsxMx4tXcR63xhyvBhINqgHwKwARALchNqQWkEinG8Ckqr+VAKoBnIMU\njgsALM7ADomo8v1Uw4/Mhz63oQ096EEjGhe5rmPmAnr98KNfnskAAuhOaSb9yE4guoDA4iBZqO1i\ngohQV1eHr33ta/j617+OyclJfPGLX0RJSQmeeOKJmG1FqK2AgEBCdHUBgUB6pDOZSnb8EwPA3+2F\nfaW2S6kWMlEjkFfF1lZVYUdTE26TzXEaqqvxKGeskKwv6mVMYf3YNdco+5VxIbpvBgL4aE2Nso4g\nfcm+1NEBX1mZoX4A+iG1TMXseP75mPOgd2605mI4HFZIZ6XLZXjePW4Pulu7kz5FZeVlrCadUh8S\nhwXrYSmVUzAaLJmqjqEOV22E5CzLlE47oqSzElHdjB3vJNfWhxDr4Po7vA/FGIND9qAuhUQYewDs\nhxSmG0JsSG0DgHa5DTXp9AA4IrdxERLhnOL6toEbA+snr6ndD4lgA8Ycc1PV46xwlrWmFmXm1C1r\nHGuzFYguICCghs1mw89//nM899xz8Hq9WL16NdxuN77//e9behxBPAXyDiLH0zw8HqC7Oz2lM1l+\noa8xDKwZxUJRBHUlJYbq/tUUFcWFt6aLrpYWdPh8aOdKojzZ2opAfT1euOMOzT7d8YMfYGJ+HrVF\nRXjq9tt1Q3m3f9WNse+0Ajta0bbchw6fD5tXrIBXrgnK9mHlQwDgCoDvHDkCAEn7wZCIkGudB71z\nozUXfM3QI3fdZbk5DysvYzXpTAfZLqeQye+oTN+aHx8YAPbuRdn+/VgRicAN4B3umA3y35WQSJ+6\nFuUE9/4GROtjtgGoQAXKsQyXIT0QvyRv9yFIxI+hDMAnIKmg1QB2Ikp8AWAZAB8kFbQBwLS8vBjA\ny5C0sncBPItoWDNfp5PPV2UE+3qNsfghke4Ncv/fQmoZklbkHQ4Ovqz78MSaMNf0YE3ZFpFvmE2I\neykBNdatW4fBwUGEQiGcO3cOu3btQg33MN0KZKWcioCAQP4jmTpZXhhdb7TY/MnJSZyLRNB7+jSC\ng4OWhNp63O64EFaWT8iDD2f90+Qk3pBdaR8aGlK2Ve83PCyHE8ONVbcUYWVjGMcuXIgxu+lubcXb\n99wT40zL5otXLFkY7fahIQyHwzg+MQFfWRnKnU78uKkJDw0NaYbUJlJmSx0OhCIRhCMR6Vgac8FK\nw+iF65pxzs0XsHIKiVx28wVdsDZYUh266wuHMTI6ikkAhYODOCxf/3UAPgBJiWTOtT5VO92IEs9K\nAI8C6EA0ePJWSOqkGhcADEIiquchKZuD8ra98n6MpJZCIpf3c+0yPA/gZkQDNIMAxgDcB+B38t88\nGJllobor5DGVy+Ngob4j8v8sjzUR6TcSCp2Kt6zdLn0OtR6esBxSqe1gimGuZnoZv46R6/TAFFkB\nAYGlCpHjKSAgYAjJ8gvN5h8CyUtqWA1Gqo5PTmIiEsGEnKdZW1SE0ZmZpP3gc2Xd/8deHB6P5k/y\n+wYHBvBWKITjExP4jRxmy6DOGx2bnjZczgXQnudwJILVu3ZhXA6zTSdfVi/vNhiUiHfnNT18AAAg\nAElEQVRxsRS6nYk8YYHsw4/YrLpL3GfSs3kzet1updDCTZDCYQGJUD4D7ZxQJ4A/QCJxrFhDKaLk\nkUcxJCXxXwE8BmBOXs7yM9cCKEFsvqcbUiQBr4Kytj4CiRz75HZZn1i+NSAppXcPBLEsPIw5RzEe\na+nCpOqBRK081gpIYbyNkNTSh5CY9PuRPEvRyDZqJCpRlJkc0kS9TLRucRBEEMMYRjGK0YUu4Wor\nkJPI5RxPMxA5ngICArow42Cqub/sVnvfZ93Y0ajvQpqKS6le2ZNU+5oMLCR1ZGpKIZ2VLhd+09GR\nsLSHUo7lr/aj/d4IDhyIKrxrq6riSrQMh8M4fPYsRmdm8NDQUExbasWSvTdSzgXQnmeP242PyOEw\n6ebL6inbTO3NpDPyUkC++XKqQ3f5z+STbjcCkJROnnQCURKnzgmtBHAXJEWyDcCPIRFFLdIJSGZC\nHwOwG1HSCURNgV5HfGhWBPGks0DuYz8khfIw16dSxN7stAKoCw9jzWg/PjzSg8/JoavMnKgBkqIb\nAHAU0eBPH5JnSBoJhU4lXNrt9qC1tRtDQ9vjcj2tCXM108vcy8XMHedgAQGBRBDEUyDvIPISzMFM\n7UfN/TNIONQkKt2+JgMjVRUyybMDcNvtuOMHP8CluTnd/Vi/ekdH4PrfBuHxRG/QD23ZgmdUNTr/\nINf1rHA68fC6dTFtsf0+UFmJjuefxzwRfKWluMnjicsxNQOj5WkSkfvgwIBmrisgKZ2ApPbuyI17\nzZzEMID+vr6crJSoJsUsJ7MWkprnAbDd7cZYayvuk889MwziSacTwDhiS600QHK//QCkkFeWC7kG\nElHUw4Lc9kSC9YMAEj5Ch6SAHtFYboNEehmRXQvgZwA+Luf9vudtxM+bdsDBbbMSUZJphGzyMJKl\nmEomI/vd0zLKykztykS9zJ1cTJbf+ibeBKBtbpQLObC5CHEvJbAYEMRTQGCJI13n2GwSjuOTkm+l\nFmEzAz1yxUjf0a1b4XW7pZvemRm8EQolJLtac5iINEcWpFvYi/PzcYon2+/k5KREZk+dwtT8PIbO\nndNUSOPGJivQbW1AmLuH8rjdWFlaisNjYwnHwvdz9a5dMXOkp9QGg8DEBFBbCzz1lAiz5aG+1qys\nn2n1LbLaEXcYkjI4CimEVGsbIKpvARLN8CBaQ9MFYBWAU4iWUuHDW62IW7iCqMKqhwpVP/n+AlF3\n3EOQjIZ+2NKFN+oD+H83H8B5t0dx6m2U2/IjtXNgxDc2HW/Z7BllJepl7tR+ZErnOMZRhzpN1Tdf\n1VBBmAWWIgTxFMg7+BOUoRCIhzqc1fT+cimWD/z3AXQMZC4MFgB8JSUAtAmbGTz32yi5+uLB2HIk\n3a2t8JWVKaGpFU4nsGZNQmJuZA55ctpQXa38rdcmv/1arzfp9gyJFGi+zaKnmzQJKm9ENB6JxJDU\nRGG2hw8Do6PAQw9BgINape8CEPD7U9aCjJZLSQVqUqx+H0S0vEgVgAH5/2lI5kLV8rbjsvMt9u/H\n85EILkAy7lFXtk01k6k0hX0uIlpKhUcIQCEkYjwA4IOQwnp73R78sLVbye30QCKmByApvJk6B6mC\n/e61tHShvj6AzZsP5L1RFo9USRZfxuV1vK6p+lpT6iX9vppFpgmzuJcSWAwIcyEBgTxHtkxf9Exn\nrIQRsyEjrqtV/7wfoetGgPe8aD+5Gc926ZshPbxuna6DrBnwpj8Akhotmd2egTc4Utdl5dvs2OiW\nHXilBwfd3bHbhCIR9J46FTPXauMiNtdvHnVg/HgRSq+fxMdudeDJjUvD7dYKWG2Qxcx4mKGPlR9n\nFl7LzHHY+yJIZIs3CHIglkjy5jzYu1d6CgEA9fWAxd8FyxDvQJsMMf0zCVYahrn0ZvIcLDYGBoII\nh4fhcBSjpaUrZ8irH37FmTeAgGGH3DDCCCKIHdihG2psZJts9NUsMmMaJbCYEOZCgngK5CH6+vrE\nkzoOfj80CYbVyIYDrRFnXCME+Pb2CHqvGcTaN5twaJ87KRnPp2sqHJYeNuzYkfghQyKCCsTPtRah\n5+caBCXRrsPniyvTcrVC65pN53pSk0OjYKGzxyGZ9MwDuA3AckikMlHpDj/iS5PwKIAU7qpg/35g\nZATweoHNmwELvwtskGp4HtZZH9eXNFEHycCInxc1ITdT9sQKaBUyseo7au9eP0ZHpbNdXx9Aa+vi\nO9IC2SFZVjnfZosQahFmK9178+l3b6lAEE8RaisgkPfIVg5muiG7RmDEGddIzuqTj7kRCLcaIp35\nBo9HeriQbFwsRFqLdALGjJ3YXAOIcXex+mcz027GmUQqbs4J20Nq2XMsRHcEkloYglQDczeiYaMP\naOzHh9d+GJIDrRpxRK+lRVI6LSadgHRt6ZFOzb4YAH+jUw5JUQWksU4AWA2JYDKwc7BYIbeZDLfO\nXo6oOWTGmTcWRkJXjYTRZqOvgLZplJnwW5EjKpCLEIqngECew6gClktIJzw4lXqhAsmhpWiHIxHc\n1N2N0ZkZlDmdmJyfx9qqKhzassXSuc9GGPdSBVPH3oTkNFuOqENsFaTcR+bWympv8vAjqnZ2ILY0\nihFUAbhgttMyqgGcT3HfVOCEVOrlT5CU4SkAk/K6Onk5j8UKuc3kcRPVAzUKI6pbLtbVNKJUZiuM\nNlWYUVtzfSxXI3Jd8Xz77bfx5S9/Ga+++ipqamrw8MMPo6OjI247EWorICCQV8hUeLCR/M+rCWbm\nQ4/QW50Lq4VMhnFrhS0m3SdPrqMgJGXuovy+DsCvAfwtJOVwHFH10APgPcSPX01yApCUUjuihFUP\ntZAUSLP5mJmGOj+VwQ7JuIjNlwtSWHIxgLcQzfFk14wTQAmAnchunmeq4dbZghFCs5ikR4/0Gsn1\nzPW8SjP5qrk+lqsRuUw8L1++jA984AN48MEH8bWvfQ19fX3YsmULjhw5gtWrV8dsK4inwFWFxcxL\nyJcb0lSQ7tjM7J8s/zBVpKKcBYPAyy/3YflyvyXmTGbmIdG26ZyP4MAAnjt5EudmZhTykMtKohbp\nteqz5kdU0QsAhm5/01Vgs/Ud5Ud0bJUA3kUsUWGkUm2ew4PPZ/wVogpkCRKXErHJLyvzLdOBHUAZ\nJDJ5AMBnEBs+q0YFgF8AuBcSWefnxg/9a4aRmuP4B/hwO8rhQBekEi1mH3CYQS7l4xkhNItJetIh\nvVYbES0G2DXqhBMlKMFO7NQcSy5dU1cLcpl4vvHGG/j4xz+OyclJZdnGjRuxbt06fOtb34rZVuR4\nCghkCVp5cEsF/Nhu/f6gZikOo/snm5tk+YepIpWapcPDwNGj2uVJUsFzJ08q83DL008nzF1MNGda\n64zmQg6HwxjlSGely5VSDddsQStP0qrPWip1NdOtfZstsLExYqn+KHVBIk7vAvhXaNem5PMZRyGZ\nEs0jef1KQmZIZ8I7FhXs3N8FkPo8BuD/AnB9kvYvAvh3AJsAfAxSyPB1ADYA+I28TTmAh1X7sxy7\nERThMBwxNVHVeZmsJusKud1M1GZdDBjJccxkHuTAQBB79/qxf38bIpH4GU2nfIpWXmW+gV2jveiF\nCy7dsXwP3xM5oDmGdP0OrPZLuHLlCt5444202+HhSL6JgEBuIZNP6JLlHubLDalR8ON1/lV0bO4n\nm5RQ2GDQWCismblhBjnpgil7kYUF3Ob14ifNzYbCQXk1zVnRAsBvmTlTZCEaoDg1P68oZ8HBwTjl\nLNGcaa1jZEyvPfW+AOBxuXDkrrvyTp3XmxuzSmgXzIctdrW0pJVHbPV3lDpcmKlrTki1J3dCe2yM\nVAJRYgQAtwJYKbdXA4l0vmlpj1PH5weCWBYexpyjGP+zpQszCfIQ+VBgngTbECXlgERK/wySey1T\ndB2QSOXHIBFuQMptPc3tNyGvfxvR+WWkphxOTCD6QOM+eT3/gIOf8xH5fxYebRaZ+N1LJQwdiJKz\ndLdJFeHwsOLMOzgYjHPm7UJXTqmWeqG/mcqDNUq8L/kvKcpwEEGRA5oDMPobn4n916xZg2XLluHh\nhx/G3/7t3+LQoUMYGBjAJz/5SVN9SAaheAoIcBgelnIP9dQvI86uwSBMqYVmt7cS/HhLdkXHVu6U\nxmaUjAWDwMT3W1B7qh5Pbcic660aTNkLzc2h9/RpPDQ0ZMhhlFfTSv9y0FL19baaGgBAQ3U1Gqqr\nAeiT8a6WFqwqK4Pbbsd9Bw/GPKFUX2vBIHDsdxIZa6hMTO67WlrQ7vOhw+fDe/feC19ZWfoDyzL0\nPmtmldBUXGKtdqpNB0FIxJJ3pmWEphdSaKne2Jji1gbgD/Iy5urK2ntc/nscUrhtJcypjkk7zzpg\n8LttWXgYa0b78eGRHnxhsNPwodgcVAM4B0m1vQ4SOW+CZKr0UW77ywAeAsBrAuxxTTm3bBSxzrJM\nyTuGDyGAqPkPU5d5MyBGfp1cu2oFdTFh1j03V1xSkznz5ppqqedEa8ah1gyMqs3pKMMCmUG64kY6\n+zudTjz77LPYt28frr32Wnz/+9/H3Xffjbq6OtP9SAgiyuhLOoSAgHU4dOhQxtretIkIIGpsJAqF\nUmujuVlqAyAKBKzf3gqsWUNUUUHkdMaOt7NT6k9rK1FHh/E5WIwxEBFt2reP8MgjhEceobVPPkmh\n2VlT+zU+/TSFZmctvaZCs7MUOHCAQrOzMX/roXnPHmUMgQMH9LdrJkLRLKHzALXfa2yci4nO/n5q\n3rOHNu3bZ/i8GIH63OUirLyemin2R7WDiDbJfzcSEfuIdsrbbuKW8fs6ub9dlPzHu8DANklfzdHv\nBQSM7fOVfZvokUdA//vTjVQ0GzJ8LJc8xnKd9W3yvNSq5q6Vm5/b5PVHNbZLBSEiChDROq4fqX49\nGr2mtK4DPWhdR4nQTM0E+V8g5ZGkhk7qpFqqpUqqpE2zzbTvQAfNzsb3upM6qZmaaRNtolDKZ858\n3xIdcxNtIhCokRpj1ustzxaeO/QcBSiwKMe+WpGMExm5Z8jk/mp8/OMfpx07dsQt1xuHvDwxL0y2\nQbovQTwFzIARn02b9ElPJolnKCQRp1RJJ5F58mp2eyNzlAwVFdEbwsLCaDupEkgrCHsqCM3OUscv\nf0ntv/xl0i9angidmJiI+XLe/G//lhGSpHX8uieeoPXPPKMcyyiRWqw5ThXJCHWqxNTqH9ZMwMrv\nKEYOQEQfJokgMELDXwbN3HYBjX3NEMsGIjpBREXcstVEVJVgH82XfM2ikQghY/sUzYao80DAFOks\n0VhWqnrvlOfjBDd3nUS0jOIJK1uvnmMz6O/vpD17mmnfvk30GXks6ZBYo9dUMxknuUbG2N/ZSXua\nm2nfpk30mVBr1oiSmszxpDcR8V0McpzsmCEKaRI8veXZQibvpQS0keuc6NixYzQzM0NTU1P08MMP\nU319Pc3NzcVtlw7xFK62AjmFTJXZyCbM1tU0u70Vc1RTA4yPS7mdb70F+GRLx1TdZtOtJZpOXU+j\nSORUmo06kvwxGAL19djR1GQonzDf6rUmK5EiancaQxjAFyGZ+eyEflitVu3HMICbIIWLNgA4Bcl8\npxGSMc+QTltVANZBqs/JtukA8AqiuYqGO5+huiCrAcwCmIY0N6yWaDGkkik3IZpfyWMVovmtE4iW\nm2EwUzszUY7k3r1+JQ+xrj6Ana3dWSmPYnUN0L1+P0blH5y6QAd2djtjciczlaeodqa9hEvoQQ8A\noAENeAEv5IybrihbImAUuexqCwDbt2/HT37yE8zPz+PP/uzP8MMf/hD19fVx26XjaivMhQRyCsVy\nUoxVRi9WwwhBSmaco9WGGfJoxRy98gqwYQPw619HSScg9UeL3AwEgwgPD8NRXIyWri645ZV682GW\nSLJcU7ZvJh44mDXyydTxK5xOXJyfV47F8gmTwSpDJjNIp6RJMoOepWbUlQqMmLt4IOUnJgMzUSqC\nRBKZcdD75PXPQCohwnggb4YzD+Ao19YFSOSlVn7vALBvIIgr4WHAUQy0dAEq058KROtjxnQ+Q9fs\nJIA1iCeX0/LrEwC8kHJXSwFcgjRWN7dPLbffhwHUQyL3Rkuj6Bk2dSE2D7GlaQfazA8xJfOfVMy0\nEsEh/+B4GxvRsuNRtKlaZXmKUn9TM6jRIq9a+YcP4AHYYMOjeDSG3PH7/xg/xkN4KGvGQgMDQXwp\nPIENjlp8qeUpeFSfi1SJeaL9MkX2BQS++93v4rvf/W5mD5JMEk33hRyXlQVyC0ZCXQ3nuqQZkqre\nv7MzNkQ11VxGo+Gsev1PNkdWhOKqsae5mR4B6BGADnCd5seyalX0uOvXm5unbISRbnvhBfLu3Emt\nv/hFXJjmc88/n/HwTRYiqg7zzWUYzT9NBfkQMpsqMhEWaRR8m16N9jtJCqG1ycvXUzTPkX9VkhSW\nWsOW7WkmPALpdSCQ2RsHAy87Nwb1y8uNq4Niw2v5vMYT8vp2Sh62rEanPEcgKTR5vWqf2dkQHTgQ\n0MxD5NtoJv18TL4fzYsUFjkbCtGBQIBmdb6YrchT1ApVNROGyu9fS7VZDV3ds6eZHnkE9MgjoP9x\nYFVcrmeyMFy9/NBE+1kVTixCbbOPpcKJ9MYBA6G2QvEUyClYqeqkq6Kp9x8bAy7Kj/QrK1NXG5Mp\nlkwtPHYMCIXi+59sjsyM26iixT/1buI6zY/F7Y4et7Y28RjV0FNarcTJyUmMRyLoPXUqzma81OVC\nd4YLafPKZr6ElWZSlTSq9OYjvnf0KL45MZH0c5WsxqhRxYvfjjmoNsrb96rafwLADLfvYQBc0IOC\nmyGpmI2Q1E/ICh68jYCGk2i2saCxzAWgFZLyykJonQBehhRiex+AH0Nys2WKoJaabKT26zCk8iuA\npHTOqfZxuz1xZT602mCKabTMSvRsFmMPACcaAfx9wpYyB7fHg9YEPyJWlC7RUjfNlGMpVs4YMIpR\n3IpbsRIrs6II8sr2k03uOPVXzzmWqZYv4SXMyVfPA3gAz+LZmDFpOc7mixutUGYFNJGMmab7whJh\n9wL5h3RVNPX+7H1lJdGJE6n3S0ux5FVKXi1Mpf9mxm1U0dJ76s2PhT/uiRPpmzRZjXxwQ801LGVV\nMpMw+rlKZu7STMYUUX67Dq5NdfudFP8jXUX6TrBs33YiqpwNSUqnCdMfvZfX5PZOInJTcqfdFfJc\nuIkI/Z2SSrtvU0yfjehDRkx31I6wqZgR8W1sI6Z+vkQhqiAiUIjuT8vgKF9gVN3UUgc7qZPW03py\nkUtRXtfTeksUQSPglW0t9VdvbGqzJBCogzqU9Wy/bbQtbsyLbUpkFEaU2cVwIV5MLBVOpDcOCFdb\ngaWIRKGk/Lp0yY+aIOqFuFoR2sqHrNbWSv83NBC1t5tv04wzr5VkzApH4EziaiNRmQi5FjAGs58r\nPYdfo+Uu1NutIaIKkgge/4ysmWJ/oGsonnR6KEoO11M0DJQd40OUfqmVExRbYiTRS8uxFiSF2fLr\nKik23JUPDXbIocGJ5tFMGRKi9F1v1W00c30P0C6d3prtZXaRaRKhRWT4ZXVURyHSJoCZ6iPf3gk6\noUsI1cdlfSyjMgKBGqhBcz9+fF7y5hVBMxKGfbWR06XCiQTxFFhSSHbDfMsth3TzB9X5k9m4+bai\nhuViqYWMjG37q1nNedKbv6VGapZirsti1Va1ApmqAZotJMoZ1qy3qaOQbiOJALZSYpqhJkGSXia9\n3BRV09RKo149z+UUS+K88rIquS/JSKNe7iXkNowS1zqK5p8yglxBUk1Ovn9OksgsI8d2IknpfARU\n8XQjHZ0NJSWJzVx7i/FxiT48mKcQ3U/q3krfUc20uL1MjEyVMmHEw0veOCJjRmXMRB+NtqfejvUx\nEVnlx1dKpaYJWjLCls7vnhEyaESZtYqc5guWCicSxFNgSSHZDfNHP3pIN5RUHWaq15aVxKmuTmq/\noiL1ENxU1EIrSaHePJldnq/IBvHMNplKJ9Q8o301INpk0tQoG0h0PTVTPHXQU0i1ttWCekrVBJN/\nz0hhMRHVkvYPdztFiZC6HiYS7Gflq5QkMrmN/oZq6AVqpm3UQRHlkmH9YyZIJI9dMUOaDdHyAwEK\nceY+iS49I+pyJvXGZAqqdE0Z1cAlZFspStVoKFk/tVRNBrNhp6yPXvLSelqf9twYHXOqc8PG10rJ\na6iqCRr/3k1uqqRKaqVWZX/+e8rstVJLtUrbfIiwWVhFTvMFS4UTCeIpsGTQ2SnlULJQU60b5lBI\nclBdvz654yu7+fZ6Y7dXh7amQz7NOrjyY02H/FpJCvVIitnlVoLNT12d9rnOZWidW6vJVDJyyH8W\nzBLJjBK/ZkrKpqwKAefHrafqZxta1EEvDNwozeCJYDtJRMxNRI90Er3STHRwE1FFSFILB0lSEk+Q\nKjRVfn2AYnMW1Y635RR1ia3Q2N/Kl5eIKuklAlUoN9OM/LUSkY+IlpFEPpkqzBNmtVLczLXNu/yy\n9tqTzLPW/gzZCYI1F+CbbaUo1dxDvX4mUjrT7WO6eaCsb63USh3Uodsvre06qZNqqTaOCKr30cvr\n1COJalLN5o1XS/XGq3UOEpHRSqpUtm+ndtPzZwb5ktNqBJDKDi+Jl974SBBPgXwCT5raE3yXGSVX\n7OZbTQ4ZcbJCtUuVhKWrGlpJCvUUV7PL04GarPHzk+ghQS6G/WqdW6vNjcyQQ7NEku/rthdesEb9\nZHfmTH5LwKasysflx13zjQM5odKboQ5Gt2VlPUCkaA8hInqjObpiVyCeMG2i+B/t5Rp94NcvI+lU\nGlU97RQlgmZuHopj3u9SSAc3pDhll82VVhkZfrz8pdess60WEj0I4NtZRbmRiWm1UpQpBVWvn4mU\nTjN91iJ56c5NqiG26mVa+ydrW289I2jLaJmyfjktV9RSEGgtrY0ZbyJyn6gfrM0SKtEkzwJXJwTx\nFMg7GCFNhw4dMk2u1NuHQlETn3RVu1RJGOtTaSlRa6t1JkK5bvKjBzVZY/NTXp74IYEVYb9Wh9pq\nXZ9Wmht19vdT5aOPEh55hNY++WTSNs2SXr6vlqmfzRT9ZaijrNyR8+Nu/cxsxlV6hmznDC8naVqd\nJOVfKoRHZkpHGiXFk6mVRBJ5XE8SkXRSlOydoHj1jpFHkJRf2UzxP/YOjWXLKaqOnlCts9EsNdM2\naqMILdPYl4XMNtA8tdP9tI1m455b8GosPzY9gqhF5M0Er4ZIIpW86ZJWO+qanlYglWvKaqUoEwoq\nc6WtpVo6EWOFZX2NUL7f6c5Nsr5pETrmUMuW8USQJ/VaYbXJ1vNQq5HbaBtVUzUto2Uxc3zo0KGE\n5D7RGEMUihnHKoqvYZoqlpKZ0NUGQTwF8g4x4YE6StahQ4dMkyut7a3Mq0wFoZAUAsyTJr79bdsy\nq+Rl+lhac5Vo/rQeDgQCUt5soocEVoT9Wk0UMkn+OzuJKr4VJYMdv/xl8v6kQXotU2rNpafpwkzY\nMD/ubD6QySTx1Arp1AqZDRApTGtjKJ4INXPbekgKzT2qsS7AvS8hiey1EsWUKymYDSnmP4ykrqX4\n0xxLTs8RaB+10/0UIkmpdXLr2yiWJPJ9Ys8tQnK/2XIWJJOuqpzoxpfvRy23H9+O3qXe2d9JzXua\nadO+TTG5p0aQCwZomVBQK+Qwai0yawVxZn0G6TvHpoJkfdMidPyy5bSc2qldU11cSSuphmpilER+\nfTu1Jzw2I6aM1Oo9MDh06FBScmnE+MjqEjZLyUzoaoMgngJ5DSuULKthdZ/UxkR8+2pSajX4Yzkc\n1h9La64SzV8iYpDquqWI5mYifEUig5XfzXxNUsuUWjNsIAHy3XgoHXRSbF6lYk4kv2fr1IRHTYQ6\nKRqey5ckYURKnSd5gmKdaJ1EMeVKquRyJSBJqewg7dMcDc+9rGzfQRFlPVMwtUirHpnTCjNOF4lu\nfNXhyTz5ZNC71Gu5Oes4kCM/aiaQSQW1kipTbncNraEKqiA3uWkdrYvLjWyn9pg8zGwoalqETs/Y\nqJM6FZWygRpiSFwN1VAd1ZGHPIbIs5aCbIRcbqNtVERFZCc7VVN1nPqsFbLMXw9WPpRYSmZCVxsE\n8RTIa2TDwMYsrO6TXu5pY6MUfssfy+pcRj7Ul/WhslK77TVrJHLs9Rp37tWaq1w8p/mGTZuIUDRL\nldsP0Imz+VdqJF1YnSubVZhwoNEsu0LRH9dKbjkLAV1HEhFSf0TVRIjPz1TnSbL9a7hlAYoNtwWR\nUq6k8ulGWj4bilmnNu5hY1Arsw00HzMNfD/V++qRueX0JoGICmiKmmnO8G2qun3+fSttTXCjHp/f\napRCVspzhqcbqd2k4rkUwQhGJVXS5+hzKZNBXjXlCZtWW2qVdRWtSmj0YwTJzIDYNowQrqN1MQ82\n1GqmVgkVfr3WMdl7PszWTFixOiS5juoSrs+EOp2JtgSyC0E8BfIaekqWkZAjK0iaVhtWq2t64aXq\nv4ni1dB0CShzB/Z4KEZ15cHmgFdE6+o0m9Ns34rwZiuRKHx7sZAwdFSDfSz2HC42rMyVzRQObT6k\nTTCbyTBb0dpUq4SI2aY7KT5nU4tIsWN5SSKMcbU5Z0PkPBCgdbMh8nDLeUKs7lOd/HeZfNxEl7DR\n8Syj/QSKxGxrhN+r2+ffd1Ak4Y0vTz55FTnZMVtnQ4QDAVo7a/6WOhdCba0AT5j4GpbphFeyXMMC\nKogjbImMeyqpMkZdNHJsLZKpV1qE35Y/Dtte7T7LHnSoS6gwoszWa4Uoq4lhIzXSalpNFVRBXvIq\nCibfp+cOPaf0lQ9JLqIi3XxbPoTX7DwZhcjxzF8I4imwqMiE22hnJ9EttxxK2GZnp0Si1CGd6v4k\n6x/LKwSIOkzGcBkdu1ESwZeZKSmJH1uq4Mms1hj59QBRcXHqtUpzAXqhvmkV0k6z5mXC0NFmMi+r\n5BhSmZ9s1zxNB1p9PXTLIe3zZiLPVbPsCulHKxttupmiXfNQVE1UEyl2LK380ZGHiMoAACAASURB\nVOUk5VOq19nl9tnx1X0yYrpjwvyYiIgq6Sg3ngUKkf7HJlbVjG3fbAqy+lzoHTPRPmawqA/HEhAB\nsyRBTTD1XFXNtHuCTlAd1dFROhpD2LQUa15lPUEnEuaA8n1gxkBaiqLazIft5yKXsryQChUSyfrJ\nk1E3uWPIHq+Qsu218j0d5IgZRwM1KLmjPDllCmYM8T4UDW8OUYjaqI2W0/I40snWd1AHraSVhuqf\npvMgQeR45i8E8RRYVGQiR9NIm+rcRUaU1PtqtcUTRp68Jirtkmo/9aBZA5Jrb9kySilcVasupjqc\nVw2myN58M9Hy5eZIZy6WOclEqG+6OYcJQ0ctMuRZTKQyP2b3yU4NRW1o9tWMraoOkm1qNBRVDS3V\nlDncaoXpMpWSKZ5a7rFriaiaoj/8Xnk/deivVq4pPwaiWALnovhanGq00pxCOvWOw8Aru2rzonRI\nYaJj5iPU5yURETBLEtT5e1omPKm0yyNRqCaf09hMUn3NNmrTrMXJ96GGapS/Wf9ZG1VURUwJ3Ebb\nNEN/WY4mPx6e9IKiYb8ucpGNbMpyJzljyGAxFcfsx8ZaSqVUTuVKrquTnAQCFVOxEsrMO9GCQD7y\npfXggEj74YNenqaRBwp8qLEo1ZJfEMRTwDDSJQla+6dyk5+sH0ba1KvRqS5fokW6tAje2rXax0rk\nCsv306xjrBZp5dv73OeIamrMl2BRq5eMUCdSXNMJ68zEg4d0kYkw1XRzDhOGjqZ7N5wDSGV+zO7T\nTNEfHe1LLQE1TZO1avY1C+etmZKNWRtaXUvUlpbi2UHxZJU3JFJvz9pMpBJqGRsZGZ/WeEJEVEpn\nqZyOkZdephMUJqJYIyKTzxKTIh8/qnqXfjPFzn0isxezRjBqUqi3v5F2UwnJTJQLqQbfB94p1kc+\nWk/rY9oopuK4ZexfBVVoqrAhCilht2pnWPU/plh2UqcSUsz+HZX9qBnR5P8xJZUnjDypraZq5W+9\nkijJSrlokVE98q+neKvzY3mCLFTP/IEgngKGkS5J0NrfTBgpI2Zqsx01QiGi5uZDScNXtcpvhEJE\nbne0/ba2+NItzEm2sVFS99T95/vKiClAVF0d22+WP7l+fTRE1ujcataA5OaSn+tVq4yTWtYuU3L1\nyLtVSmU+GQmlE8aWDzmHhpEB6TCV+TG7T3K1qZl0aUyCVUawbXaWag4coNbZWeXY/PVk1ZSq27FS\nYePb2qY6DlM8+Vc7xU8bI16tqm35nE+94yZqJ9XxVdARpd06OkxEiV1zGbKlnps9jjWhtrFHbSbt\nS199bRlREFNVpfT2N2uIo2cmxKBFOJMRW74PvFKqVjS1SCMjdw5y0FE6SttoG3nJG6fgrabV5CAH\nVVN1TK6o+l8zNccpxOyfi1xxKij710ZtRBRLolkb7zv0vhgiqVcShe9XG7XFnRczDx8SKd78MYWz\nbX5CEE8Bw0iXJKSzP0+kEtVrZND7AeYJkxZpJIolgcuWRUknH1ZbV6d/bC3VUC/8Vb0tv05PLd22\nTSKrtbXaYa18rmdDQzxRT0Qa+bqYeg8E9PJj9eY5lfzVXAzBXSrGHWmjmdIiYYuF5GpTApqWJoNr\npvgp468nrfWpQN1OojEnIzVsfR1JqmUrSWQypHEcXvF8pJPot81Ec5uItoa0yeoJig1p1TJCYghR\nfG4pwzaSnHWThdrqwUsvE4iomN5QFE8jqmQzZecjYPY41nxHxR7VgojwjCKZoqnl/qqnjqkJG58L\naaYP6vzKNmqjEIWojuoIBCqjMmqjthjn2lW0Ks4MiLVrJ7uynFcs+fxQfj92HPZPrX6q/7Gc0/W0\nngqpkNbROmqlVmqndnru0HMxhFhLzeykTnKQI649fk7MPHwwqngLZ9v8hCCeAoaRbghiOvvzpJWR\nIrPhqUTGVFsWXquX66lXTkTdV74EidNJtG5dPFlk27rdRHa7pIqy9bxxEa+WJqvdyfe1vT2e8GvN\ngVZupxFizeZCTRQzoY4L5AgskNE6+zupeU8zbdq3iUI5UzIiwa10mnfZyaYs1SlNR+FspsSkhl+v\n3k59HPa+gYhe53aMBKLTpj5eiIgc3LI6jfEw6E1/sjEkwwkKUx0dVkinUWjNs7UqqNTaJpkYZzcn\nNHZ0uUIw9ZAsz1Pt/qquj8mDN99hobJGQnTVfVDnZTrIQV7y0m10WwyBZCGsaiXRTnZqpVbNsFpG\nQhuoQXH8VZNBfj8b2RQFlyew7F8Jlegei82nlprJclfVbrwVVJFQpUwFIQrRKlpF62k91VGd4fMi\nkJsQxFPAFLKlRKmJUGur5Kj6/7P39tFtnfed55cEQIgvIgG+GaYp03QiK87YLhmxcRLGBVpT9ZB2\nQ9QTbhRvDtOzO+DO+GS3ezqxN+2cnHZ3JzOd05w5090507VmWuXNTCNbtWVFVhwqAWlVSezaieg0\nTc02Cd3IDi1LASVLFqm33/7x4Ln3dx889w24AEHpfnFwSAD3Pm/3Eryf+3vjffqBE9VNtrvbHrCm\npwUoSoshj8eMREQ7dq61vMRJX5/YJxol2rlTP1a5bXu7+bksRcItrxxg5batraIPdR5yrHKOY2MC\nQCWoc1dhO8ur05rK9pNJfVKmwUFz7F5iX8uN0w21QarkSrR4dZ7+XFrUKnwMNDV37d9ZcFsyr0u6\ng4g6SCTmWSZ/Fk5VXmG4XbOd2o/ltabhHJmxk8PFt3NkgmezzXwqnYNfGMxRjlL0DCVpkcYc6n3q\n1pmPfZD+xndcoVWitQJ10BQdq/Hldb2jplVO7pa6six2rqJEVgsaB6cttMUWdnKUM8Cui7polEap\nj/oMCyCPlYxTvATu+qiPClSgVmot+ayXeg04VD/roi4jk67MbCuTC6ngK/tZpMWShETy92ZqJgnJ\ncj0lXPJ9pFsuXx8JprzWqpxrO7VrM+C6ycmKHBTQhtoYheAZypdqZYnSgZBal9IJTqTLkQQcDnES\nZnXzUN1IJyfFe6OjJoyqlkL+Pi83wvuQ2wwNEW3fLl5HoybEShjkpUhUy6vbU42b5fGl2ax1TVVX\nYTXZkpNVV2e55seCz7urSw+XbueRrg+nRE1uCuKGSehqWyrfJU3SRASi8U+NEx4DjewfKbF4Vlom\npT6tqaUq53zqIPMfZz85g5cbdLnhxTQJwE2TSBTk2bKnaTjNxj2peW+i+F6l5UqsylGaxXB6+Xcl\nLm7zYp+FHHX7OI94wqMOepFQdJss7+K4PBt4kN9R1aiTWI02ndwtdfGdTmVUuHTwJsFMxmCqtTJ5\nEh75kG6ujdRIR+loSYxmL/XSNE1r3WFvpBstLqx8DLo+4xSnJmqidmov2UeCqlyTIRqiPuoz4JBb\nY2Xm4DSlCXlrO5PFv2AO/Ha1Vvm4kpT0lH3WLrGT7E/OLcxmu3kVgmcoX1KtadWyfKpJbrjbqkyW\no7OCSt1/f74EODmkcndYbjXk0NTQIPrnYOlmKZQxoXwO0aj5+cSEFWwleO3eTdTUZLWmTk+XwmUk\nYl0Xaf3UwTefu87lVo13la693JLpVRwUda7GKlw6jcWLi29PjzO4Ou1b7g2TEDxL5VbSpAQii9fT\nhQ8VaOrQlPaCvtLSM+kD6U1hTS3nfOomAUQNB9KUPjROy2sFW/BKk/lPtpv8u4Dy/acc3ucgqiYd\nktJhlO69YG1taRqnQ8U+/sGjFXicUNyn9cCvOZ5HulI1PcQvbv6ygqQn/lciRzn6lfyvBAZ1Xlwl\n/VqUy3W/LBdYdfGdWcqWuIryWEVuIdVlgOXj5/ORYMXhaIRGLEAnrZLSkikfavkS+eD9N1ADdVAH\nbaEtFkiVbekAVffopE5KUYp2027LPvL3IRoy1qOf+i3geQfdYXymAr9aa5UDorpuXs8RuYbcqrtI\ni2E2202uEDxDeZIEA+m26ZZZtlKpSW54WRPed0+PsN7dcIMAJlk+RAXCoSErpHIrI39K+JKApz77\n+pwthdzKJ8eeSJifxWJWILzrLrJk2AWIBgZKrbT8GY8TLS7qkwBxF2UJpXfcYXUB1kFzLCZeT06W\ntuX35oLsx67+p7Qg83hXL2DIYdWttqjTvqHrbnByK2lSApEerqcrLT0zfsjemrrZtUxETQyse+am\ntBf93LW1lfQA6SY7m5v6fpq1H7PpS3fYvaCVDmy8w8540VX1m1TwGMNZoAJN0icpS+s05nIepal0\nrmas6yWapE/W1BrjDRQFHHiJk/OSMTRNzueWCozlZiH1A6y8z920m3qox6ih6VbeQ3Ufla9VeBqm\nYct8JHgu0iIN0iDdTXcbkKmrwzlKo0ZioBEasWSb3UpbCSRcX50y2Eq42027tS68bg872OU1O3ny\nI7kOHdRB3dRNy7RsWWvuwtxCLcYa8DWV6+YkHmcrEzvZxdCGVs/NqRA8Q3mSCga1uJC3y0Db3+8M\nh3x8w8MmTKkgpSsdooIuf27daoISB/GJiVKrKAcoO5fZjg4TlDlk8kRCdk/pdjw9LQBOQjd3r5XP\naNRaz1ONd9WNWXfM/Wp6WvTR12e9MaC7aeHlfOLg7DdRVTVqc4Yimv72v6KeL/wBjR38iPbivByI\nrLT0TGGtQFNzemvqtSAJ1m37RwhrBeIX/RLKeC3KXtIDpJq11mtCH/m+tG52s77k064vvwCZZm3K\nOaYWcoQDacKhcZp0PMaV2U/dzqPqW2z9yRsomhfwbiBX6sJaesRM9+K/ozH6qOF+qoMR2ZbqFuvF\nmsnnprNU2s2Rw5WsoamDYNmmCmO91EtZytIyLVOWsrSNtlEXddEYjRlWOL59IzVa3FwlfPI6nFto\ni+Xz7bS95Jg0UAMdpaOONTtBoAQlSkq/RClqicm0e/BtZNKhDuqwwCa3uLZRm8XS2kiNlmRFco7d\n1G1Zg17qpQmaoCxlPQGi7hxRz+0CFaiHelzPYT83Wpz2DxMZBasQPEN5kgoGlV7I+3Wt5ODDwYW7\nmwIi4c7YGNFXv5ovGZ/anlPpkELB6iKrjkNtSyYS4k/pNlsoEDU22kMkB92hIevvW7aY20nQ5i6s\n3OVUQqZTP+rYec1SmUjJ7pjbaccOAdHd3VYXXdXqLJ929VPrHQyr4mpbq4KAVZKbW+umqF/q9RgE\nfKzKPZ8kEI0VoZODT5pKAXCZ3DPCqnDnRXz/ePHnMJklV/hy8XIrU5r97frVwV2SnXPZDXSl3kjI\n1Gmapqkj3+FoAaosTi5N6hET7sXfJh7PqloNdTDsBKc62SX90W3PIcWp/iQvEaJmgVVBTq4Rt0Dq\n4jl1D2nhbKZmCyzJ9VHrfcpHH/UZc0lSsiRuUyYDUh8TNKGN8XR6SIuwtt186fbqGsmkQj3Uo3VP\n5sfJDuacIM8LjOrPWO83Wtz2D116g1MInqE8iYNBEIla/LpW6oCoq0s802lhdeSWwnQ6b2yfywnY\nkVCmZlq1m48EwK1bS8fB4xjHxkSpFCfLJM9qC5ggOjJC9O53C3huahIutHKtp6etUN3dLdyFJeTy\nDLfqUwXdri4zjlXOq61NzHvbNvH52Jg1aY9TLU8utb6pepz4c3jYe7v1pqqAZ5rKu+rfQPG/l7ED\nG+fWWmkSIkNp8nYMvG7nUZWeTzrwkaA2RNaEQDrJbTuoFO68iEOhDm7TZC6Xrg6nl/Q5qnV1nIjS\nRYvv8DXoSl2J0pQ2IMEN4JZpuYw4Of0RUwHALulMyVjJhC83gLCOwhk4OKTw39X9dGNQXWr5Y4AG\ntO9voS22FsYRGimJ53QCOG5BnKAJCxyrbrcS8Pg+7dRO0zTtyeIpH13UZbS1lbZaYlJBKAHPOMUt\na5egBO2m3bYArbrX2sGcX3dqNZOvDlzlMZdj8+viXa5reChnheB5naoSeKzUBZPI2ZrG3Vh1yYMk\nmHHL5+CgADcJXdLaqI4XEJDqZT6yn927hWWRu6uqMaLSisctjq2t5u+qW/CuXWLMo6PWz+Jxsw8e\n98nHp1p8nZ6plFhDvk82ax27zuXW6diq544uIy+RtSxNY6NYQ79Ji+pVgUFPADUx7RTYGBXxv5fJ\nj2+cW2ulSYgMeT0GVTxWfuRkePVjhZPb6qDRi9z6cgPTAhENkrCG2rn7SqXJvGCYvMZdqcuV34tk\n/xfV+iOuWqOcsszq+raDU/tRuLevQkgHdVAjNRourHZjkBZS3cMteY+M2ZSPPurTutHaPSZowoCv\nO+nOEjjWWVjjFLdAZoYyWiufXZIk1TUYhBLA1Y1TxEJP0gANUC/1auuDykeEIkZMqLruXiyYO2iH\nJa6UyD0+d4qmLHC6SIu+zjE/51oo/wrB8zpVJfDoN75TB7mFggleKvx6HZtdCQ91X9XyFo1a3ULt\n5qMrxdLTY45XxprGYsKimUoJi2U2K+JKeQIcaTWV26sJhXTjdsvIq85Hvnay0Kpt8EQ9/Kkrp6Jb\nD+mq3N9fCpU6V9tqluCpVY1ZogChp4r+euWO0WuN1f3dRJecaKHKqjQJkSGHY2CB9zfX6sK3Mk3m\nP896NpJ7ObXTVHpBoJtTnTB/XcvvRfJGXlTbZUStVkZeDkZbaIt2DPI9p0y2TnCluuumKKUtkWIH\ngKq1NUYxo80RGjFKnzg9uHuu20Pn+ttIjTRKozRBE7SNtlGCEhSnuJHwCCQAWMZMqvGlTo9+6rdd\nd50FU4pbUmUbOkhV3+MAnaUshaofheB5naqS5EB+4/HsQFJ9X016Yzc2nUVUhbFbbskbbqNjY9ZY\nRvlsahJWOZ5hlccrqu6zlpIun12g9v/zAOFThwjNayVgytu99VZzv8ZGAae5nD45UiRC9OCDYtzS\ngtvWJqy0HNp57Ke0Yk5Oip/Ly6VQrx4zNVEPh+CODr1lUgVJt5I68pg4lXwJUkFY4p3EXSMDg54q\nynGMDmYzt3WUSaOOJ8gXAQVtga1F/GhgNxg08utqKw9ZNxE9liN6KU10kR+/HST8ZruJyqjXbtuf\n032FSsNeJVC2kzNYVvH+zDUlr+dUPSRN8Rvnqe7j1aJaoILFKigtnnaSVs8EJShWfIBEmREeCykB\nUwU3O/AqcWH18YhQRBu32U3dBlRL2L2b7naF50ZqpJ2009aKa4nVzcOSEMnro4EajLE1U7MFKFUr\nppObrXQJb6EWow27mwb8PQ7Fk0b1YH/nUajqKATP61S1TOYiLYPt7VagUeGXX/D299uPTXdhXChY\nYzwTibzFCiohTn3ybLRqoh4VVmXG2JERotH95gUpZuZKwJRbILn7bSoloFOXBddprKmUdT343HTW\nSb5GsZjVTVin6Wmxfr299u6w8ngNDYmSLzy+VNZWVa3adsmbpIK0UlY70zK/qNsMSXMcx5gmW2h0\nW0d5bh2S+3s0Q1UT4qqlat5g8AueaTIP2Xf4ix4SBNfO3uvXNlF2f3Yo4GUbJ1Xq7hvKKq/nlO5C\nv9YX417jPMsBVBVCOPQN0mDJPHn2U1kGhGd3lRlxJQQ5xYKqjxjFaJEWPVsivTyaqIkiFHF0//UT\n58kfUYoSj4m9LX9bSVkVdXu1LzU77gRZ45tUK6bOgimPSZrS1Ed9JZZQN8kbCLwuqe7c8xLfHIJq\nsArBM1SJOAzwZDPlXsyrsZh2yWu8goPcTrW4qVZPCXdDQ86gJy2R6ns33mju19oqxj0wIPqMf1pc\nkOIP9hsWTwmYXV1m7c7hYaLOTvG7jIHUuaByi6f8XE0cxK1Pcq5NTcIyK9fAa6kU9XjzBEHcasuP\nPb9ZwefQ1GQdqx9rY5BWys2QGddOtXQTJiJH30XtOjLT1keLrtmZIaJ1tww2vMtNYCVWFeQNhkou\nXnhdziEqWjpBRG1k/idtKv5sIcPiWYlF0ot767M5onya6K/HiVY34d/d9SrdhX6tM3h6jfO0SwIk\nS5o4/U3JvzkJjLrstmofur4SlLB8Zgd1W2mr4b56F91l1KEkElmHvbjx3k63G5ZT7iLcRm10E92k\nrdnpFGPpNOZO6ix5f5EWiciE92ma1pZsAYFaqVW7bk41W3OUM/aXllCdBdPufPT6PerkSu43vrnW\nfxvXukLwvMYUdMZZDjBBJBLS1XCU4+Yur06ySy40OmpNMCQ/y2ZNEFSf0WhpLCWHQPW9hobi781r\nhNycxc1WPrnltbdXuNbyGEhdtlf5HB01gXx5WV96ZMcOMwsuz5Y7NWU9dk6lUqRLcTxutcjyOXML\nsHrs5RySSatLMre+ejkXg7JS1hzcAlY5AF7RnP36LqbJ+MZezxLNDfqP79wMVuJqyrx4eYy66W99\nwWCazH+YWSLz+I2RSYeLJCydy/r9/H59ezlFLlXSQagNU7nlKao9Bp34uHbTbouVz62WIweGOMVp\nmZapn/oNWEtT2gJJEgxjFKOdtNN3vOdO2kljNEZt1Ebt1G6bEMfp0U/9xto8SA9aPnMaj5OFM0KR\nkqyzcYqXWDKTlDSATgKeXRxnX/Eh2weZGWydYjb5OqiWULvjbgekTtZrJzD1G98cZrcNViF4XmMK\nwoKkSzwjy4b4gQopbkFRy5DoMs+qQGpnfbUDWgGfeQMsl5f1CXTk0y7Jj/rkrrR2z85OPeRGIgJI\nl5fF2FU3XgNoYU1gpFqfcjnrfrIdCW7crXlx0Yz7VI8Rt3Cq41ePPYdCp/jaZNK+jqfduejXSml3\n3lU7vpOoSuVUiioHwGsxZ0Oq+StNIXD4lLx4aaOXxNLl856Xztb66EKH1UrKIy2pL1Y7608AtVOD\nLpWbW8hR+kCaxg+NVy2zbrnW8Uq+o+o1g6ddDc8kJS11Op0sWxxOucVTwouEJLs4TJ1lz+sjTnEL\n3PJEPeqjkRrpFrrFiH90cnH1+miiJtt27GC1mZrNmNK8eE+1qk7SpGUtpTtyP/Vb4lHVGwJeIY5b\nXPnfAt/fzXodlHWyXv82NqtC8LzGFIQFSU08wy1fEorsLJde2tZZ0uzGzS+u1f14vUtptRwelsCU\nN7aVgCQBk1tDYzEzGQ+3/KnPeNw6ltZWe0up01PWueT7NjSYbXO4jcfFdmNjRNu3C1jkgAoQ3XST\nsEr39YljwqGXx4WqwKZLtgSIJEb82KtQaBdfq8tQXI2YSzvYqnZ8J1F1wbMcN+FazNmQCjjXcppR\nN1Ipk2TkxcsYXSQQ0W35vOfdp0nkDBrz16Utl1YKY2kSh7+jQHSsmsGZsqMKbnAE0IS1vQNpwmMg\nPAaamqvOXZdyL56r+R1ViYKKkZPQIWFqjMYoS9mSNmV/YzRm1NFU64xKoORutNK9VloH26iNeqlX\na62MUMSSTMjJ4ihrcd5MN9PddHdJ6RW7h86t1u8jRjHP/ekerfnWklIuHdRhWctGatTGmyYpabFE\npihFHdRBvdTrOWZT/Vtwqs3Kz5GNtE7KucqbIyGwWhWC5zWmasS5cSul1apoXvT6sYCqF8xObrY6\n66uM7ezvFz85xE1OijZ5TOfNN5tWuslJqyvs6GhpzCJA1NxcCqLqvioE8ufWraVxlg0NZkZbbnHs\n7RXuqlu2mLGSvAao3bOx0d6FmMOZ2t/UlNU9VkKo6o7r5dhJ2QFpENZML/1v5vjOcrWhc/brqltN\nFSlq4fdzdGB/mg4dGqe1MixREsYW0+RMKm6fu6icpauwS9/tuYFpze47BNCR1ya8wvj4oXHCY6CR\n/SNVs3jWw8VzkAoqmYuEDrckQ7y/ARowwG+apmmURqmXektAkceaLtOyxY13kiZLMtruol1G7GiE\nInSUjlIzNTtCHG/TS6mVXbSrpOSJ7M8NNu0+S1DCyFIr20lS0tbau4t2WWC9gzos2WVV4JTQnqSk\nBS5VeFePm90xd/pb8JLddiPkNtfrXSF4hnKVvMBV3VV55lmvbn86yFT3zeXE5zJpjcy0qovt5E8O\nI3KsQ0PW7WWtTZ45trvbhMTOTgGt6bR1XFu3ijHoINzumc2WWhYnJ52TC3mBWgmd/LVdPOrOnVYw\nlzGYHOCcss7anQvqtkFY33TngQqi1yNghnJRmohAdOD30vTYY6DHHgPNlWGJKjZDh9xIxQPJ6CCm\n2ol+gmwvTc5gWrP7DkpH5bi5eh1rmrzBfWGtQFNzU1WDTqL6uHgmKt9SqR4nGVfZTu2eLF1uoCph\nJE7xklhK/rnqjilBUX1Id1g+bxnbKOMWVbfdSZo0YkaXaZlylNOWPOHxjzImsp3ataDHYbid2mmU\nRn0nE2qmZlqkRUsdS/mIUpSWaZluoBuM9/qoT5tAiEPsIi1SlrJGsiR+XkhraDM10wRNWBJF8WzB\nPMZ0iIZKXGjtjjn/W9gs2WX5uSLPn1CmQvAM5VncXXVkxBpzqVoj7axWOriQYDQ0pLc+qjAr4xgl\nnG3daq1zSUR08GC+JK6Uw58OIKUFlYMjt3BKd9JUyjpGaaWMxcz95fqoVtOJCefkQl6fst22Nqul\n1OnJYzCle+wNN5juvF7Knehg0E9iKCc5ldepegxjUU61Ju+/P1+7BEZBB6ZdyypS1KF/O06PPQba\nv3+kLIunhLFMgWjdiVQ8kEyaSiFGfc+PW2TQoOfWXr16UlfTzbVe5+xHQbvaluvyqx4nr2VQpCSo\ncusaV4EKNEiDFqthP/VbXGylCy6HUB4Tyl1sG6mRuqhLmwhI1oAsUMFw2+2kThqlUQsAuSUPmqRJ\nC/DJtviji7pogiboZrpZC7FOjwZqoDjFLVlpB2jAso1M5sMhM0tZ57HnRdvSZTRHOQtETtCEAd9S\nTomJ+qhPC5perPzViN+shgpUoEma1LqBhwrBM5RP2ZXU6OwU4MFdOHWw4AQX2ayAGLWOZTQqXEol\nHOksnmpf/B/w9LR1295eot27rRldOzsFhMnX0u1UQm4kYoW7m282614++KAA7rEx03o4Pa1P4HPz\nzdbsu8mktV272Evd0642qe7Z11cKS2pSJd3xUuFPB4O8nWw2mHNLPVeCKOvjRbpakxK229rytQPh\nNJWSy/WqHFHu0wuU/twBGj9QekNAUtTamwWam5sqCzpZM74uE+wscDqIHj3k7QAAIABJREFUUd+r\n13g8ovrypOby4+bq995Nvc7Zj4I+p8p1+VWPk992dKCqWrs4hEQoQsu0bHlPlvUoUIHaqM2oNxml\nKKUpTYu0WBL72ERNNE7jtkmLnFx9dXU6pWVStsNBbIImaJImSwC0h3psrY8gZzdaPm8islg9pbWT\niCwW06N0VDt2Dp58rmqCJ102WTWBkwRVuQ47aIcxhjvoDuM4qVZ+bjHldVT9nI+bxUp6PSkEz1Bl\nS2c11JX/4HKCC25RtXtyWJTupmqGXFU6CypPVARYLZuAsIoS6SFXzaLL40mlFVdXN7S93bqfdFWW\n1uLhYQGuXuAzEjH3c4sLvfNO/dpwF9xEQr+Nenx0LrUcgCfss6P7lt1NjmpCn67WpHr+1CSZz7Vg\nfglKaaL075XeEKgHqZYdCaJjh8Zpcq1Aa4x+VgtWsAmN2v7lx801TaX3bsKLUH+yc3N0q5+pHiev\n7pK6ups6i+IgDRpwFqUoLdKixT1WhUK1lIgENOn6CrK6uU7SpKOrswS1buo2Mrn2UE8JFMYoRgM0\nYFhH5RyGadhYwz7qc6yLyduyi/lUH73UWwK6EmrHabwkvvVBerAkVvNd9C7L6wQlLC65shyNnAfv\nSwJvK7VSL/XSIi1a1pMfjz7qsz3/dJZYWW7GqzaLlfR6UgieoRzllPBFjf30Gy8o2+AZUe3KfKiA\nq3uqQCLHLuM3pWtuR4cVJoaHhUVQvr7rLnP80uKpWg7V9zmQ2MVwdnaaUBmJiO102WNVS2ZLS2lb\nN91kurcuLpbG4N5xh4BAmWxJJ7l9ImG6yMr4Wul+qx5PXYwlT3DELZ5uyYL8fK4r7eKkcmtc6mpN\nStgeGtKXpqmKrgXzi07l0NY40finijcE9u33Vwe0ynSnWnZKXEHTVEo/RTl8VKvhbwqpoOI5CRCV\n3rtxvwgNV9xOfO14rKTfi3m7Y6C6cKqAYRe3mRWVbUsghceT2sV28mytchsJk17qQKqWPd2Dx2hO\n0IS2NIx8SCBspEZby6Yue+xtdFvJ9iKD9pjxuo3aXMfKH1nKGvsnKFFiUZYPtV+ZpImveYpSlpsV\nMlFTC7U4xvzKY65aTP3oWkvUdS0oBM9QjvJiaao04Qvvo7dXD209Pc5JeWS9Tql8Pm/ZXiYq4hbN\nyUmigQERI8nb2rLFBGHuOsz7UPeRGWbHxqwQK2HXzhoZi5mlUCQkqZlqJyas69LWZh3X4CAZWXud\nYFOFMbdyKV6ti9xia9eWHCMHQbdzS3XD9nOOBWkhlet08GC+soY2mcqFd0elyZ22VBWICh9fo6lD\nc/6gs9z+fEi17JS4gjpYrt+fz9NjOaKX0kQXbTinysPfFFJBJU3e1kR378b9ItRr6/Upe1fbyoHa\nLlYyKBDgbqEJSpS067WMBgcsCbbLtEx91Ee7aJelvAqfx27aTXGKW9xQdVDNb4RIiynfp5VaDRhT\nY0kbqIESlKCx4oNDle7RT/1a0OSPCEVojMYsfycyoQ2PNZT9eQHQERoxMgAn82Z2WhUE+WOYhi3J\nh1RrKwfRbbSNGqiBOqjDsdyIPOYyYZGbpd2pjRA660cheIZyVC1qBaoZVgcG9Flao1EBoNJNVk3c\nE4uZLrf33583XEnV7Vpbze2cYFYHwm1tzlZZoNRtV93faV9dW6OjYrzcNZaXs+HuuzrAk+JuzNKV\nWEq1DutA0k5eMt3yMcpasBLQ29v1SYn8nnuVWEid2pL7u8VPVQXUypRTkiSvqop7cz+Jb/12Io9l\n3CpTNV2WlWv53EKORp8apdQXU7R8tjg5B8v1wXye/jZNjpxT9vADNNw5NVUL11UVMio5pO4XoZvb\nx93+OypNlQB1jnI0SqOUohQt03JFF/N2+3JQSVLSk8VRF3/pBsV8X/67as2TtSpV8e04FHLLX5ay\nRrvc6sgf0vq5TMvaDLQRilCi+ODv30V3GRDHXXNl1lme0Ib/fUqwlmP+Z/TPjBjXrbSVQKA76U4D\nHo155k0An6Zp6qEeSlHKaOcOuoPaqM2SXVhdyxEaMSC9gzrobrrb8rkb4OvcrYN2mw3d8GunEDxD\nOapa5Sv4RbrqzukGg5OTArDuvlufYEdNgmP3bG52r4M5MWGN19QBMf/8rrtM6NHFecZiRNu2lcKw\n7iktofK1Gv8qY0TtAC+Vsh43vladnfbWx74+/y6lbqVP5Bj5vDlI68BGdcN2Go9aq3RyMjgrvFfo\n0u1TVRh1IAJdkiS/qspNp1Fyvv5V51QpQFXgsszhfXptrXQYaSICUe4TOUr/cZqSe5P+M666cE7Z\nwy+OrUzO8NxU0BeCumRNKmRU1wv9WvVxrwyoaxEnJwGNw5TXWo9c8nxRrWNu+6oJdiZoQruPLhFP\nkpKOCYl0YCmz5eYoZ8l2207tJdbCFmqhJCWpl3ot4M8trmlKl8xL5xrLQVW1KHJglWNoozbDKqlr\nL0tZC3T3UZ+xRsM0TDfTzTRKoxYrKV+PIRpyBXw1gZO0yAYJimEsaO0UgmcoV1Vy8Wy3r5P1TS03\nwoFJJhLigMVrXnZ0WEHHCRL5Mx4XsZLc4ifb0sVY2j1lzVEny6bdGPizrc1aN3RkRGTilfvyhEo6\nwNNBkw6ypQVX164fOYEaL7fC4VBak2UJHlnOxo87rq5/Wau0EpUDXbp9qmI1lEqTLRHokiT5VVVu\nOvktIKm+rqE4vPfMzZUOoziX9B+mDeD0mnHV0DQR9RDRGAXLOgEa7pyaCjp+qprlUjazKrfGVAbU\ntYiTU2FKV0rFCQ6cst6q2VgHabBkPXm5FAlDuv4KVLBYOhuowYDBQRrUxocWqGCJJ+XWVBXmeqnX\nYiUdpmHbcjRqjKm6JnbZanlyI905pQNMXvJElnqR5wNfD2n1tLMkt1EbpSltZPX1k2DKzkIdBCgG\ndY6HllN3heAZylWVXDzb7cutXTIhjYTUsTErnMm4Re7CKsGVgyJAdPSoaOs3fzNvAGlbG9GuXaIN\nHhspP+eJfiQ8yJqXuZyZPdfrs6fHCsPlPmVdTHnxr8v4K7Pocuux3E6FSDUL7siINe6Vj1l3nNXE\nQxxInECNnwMSNmUG36kp5/I4XgFQPW6VSgddbq62un2q6qruQAS6JEl1IbfrX+mK20HCFXcDPR/H\n/2MR3v94P33kzbXSYRTnMn5AxHUOPzlMk9+Y9Ayd+Xy+emAdoOHOqamg46f8lEvxKruSN26f1ZO8\nXmRXq0RPLePknGp+6uBAVzNSVxfSLjkR70Odp6wnyhMVEZnJiiIUMepmEjkfJ9l/kpKWtnRw2Eu9\nNEET2lqk3HrL4yZlXCfXNE1b2o1SlCZowhXcLLGcebNfuT67aTf1UI+tJbSbug3wkm0N0ZAFvu3O\nY96WUwbboG+GBHWOh5ZTd4XgGcpVlVw82+2rS0ijA5TOTtMKpsueq0JLf79oK5nMW96XcMsBZedO\n676yFidPzuPkshuJlJY+0Vk6t261utfq3HXtnhzK1f10WXQl+HAglxAnwYjDrNyupcVshx8rDrXq\nWnM4dbKOqTG8dnDGgdgui66dBb1aLuFc5VzUyXGtTVNgMXdm47T5vAJzRJQioiQR9ZFwvR0nyn2z\n6Nb62UNUaF4zQczrHKuQjLQwtkZTuTkqNK/RlRTRJws28LVWoMHHB2n0qVFfAJPP5zd7SGHg8lMu\nxaucrKibxcLq9SK7nmvDepXdXNU4UyldPKEav0lkBQuv62kHqMu0TP3Ub4zDzkrHrV+qO6uUTACk\n1vPk/cnYSgl63FU1RSlLXCcXX5sYxSzrxqF6N+22WOm4C246X+rCy/uXVkv5Hk9eJMcst/Gy7l6P\nTb0mDQqz6LorBM9Qrqrkot5uX/n+9LQ+IYwOLHt7S2MPuWtpczPRrbfau7K2t4u+BgZEuxzOeNZZ\nHhtp57KrezY2Wi2IgJkJ160+KX9yy6N0Q+Zw2d5uhWM5RlnjNBo1LcpuNwuWlwWsLy/rjxXvl89h\naKi03XKhUAfEdqqq62o1lSbzG28zjTsoSTBMkva/QPrfspjU3Jx/EEuT4/qWZdmSUGjXLoPd9P4y\nAaZGNw9yCzlKfSlFyb1JGjs4VtfWvaDlZEWthoWVKyi3u6AusjeDG6DdXDnsyBIqRMxiuADCAVD7\noXYaW9NnSpXz5zDkJAlnOrdfLg54TdSktQS6Wb84vKnQorbDrbsyVlQnbmXdTbspRSkjHpUn+OG1\nQb1Y6Xj/smaodDWWyZB02YW9nMf1CpRetdnHXwuF4HkdqxqJT3Rt2vWjJoTRlcxQwU9CIXfL3bVL\nJMRZXnbPOCthUP4uE+nwGpgc+NRapX7cbrkLL3ct5gApLbtyTCMjJlwNDQkwT6XMz3nNTSk5RhV6\nm5rKi9fkUq2V2awJvepx1UGhn/PB73g2OnOsL13vlq00lX7zbyVjTcY/W3Rr/cx+KrxrrXSN3Cya\n6voq25dl2SqQsM7aHTc2p/HPVRdgvMgJrvn8a2LdCzKrboXusE5WVDcLayV95xZy1HGgg3AIhLX6\ncLvbrG6AOcpZ4gg5bBkxhgfM8xtz+vnp5u8E405uv1x2pVzsXGT7qd82FlRN8qOzpMo42K20tcRa\nyeWUtZdDrt/yOGqmXrk2vA9etiaEsFBcIXhex+Kg4FSGo9w2JXzYWan4+3YJYXSxjWrWWt6macXM\na2GQu8LyupyFgtVS2tVlXYvpafdMtPLz4WFrQqQtW0wglv0nkyJZkEy6s7hoQje3BqsgzRMxqQDH\n3X6bm/Xr41fcWukGml6T61RitayFS62dKnJj24xusQ4yMr7+x0NUGFtzBwwJhkNEtI2IukiAyaTY\nr/CRolvrhzTQSaS3aHK4WSaiQTJcd9XsuY6WLe7+y5P85IrtpEhf+kXOqY2oMF6gqUP+XEQt51MA\noOYE13L+eAw09MRQ9eE4TaXHq9ymNtAdtpK++b7JuWRNLr5131EcrCqpv1lLOSUK0pU56aZuAfiP\ngbAfNLSmz5Sqy4qqxobabe/FSqeurwqSdkl7dHNWt+fxjhyI3ayVunjQLuqiu+luT+Vx8vl8ydjs\nrLN8vexci0OFCsHzOpZdGQ5dVlIvUJrLmZY9HrtpZ6Xi8Za7d9v3weFzZEQAmewnEhFWQGnZW14W\nVsydO/M0OSmArq9PWEWzWSuQybnK+cnkRRxOJZDrLJa6pyxxYrd9U5MA3HTaWiNUTbCki6lU3VtV\ngJP1TQETouWaB2HddgNNr8l1amG1rIY1//7787Wt01lSJ7Ly2pxBjSe9X3GNdQOMaSLqJgF2HApT\nJECrQFZwVNdXZzFOs3bUDLiKpdLRssX34/NQ21dVIJGRVreNB5C0QIJbXzopfTjBdWGtQNlvZH0l\nP6pIQWbVrUbCIY8up5X0LfdN7k/S8tpyTRIZ6cCTw8skTXqGgY10y1Utk3aJeaSWaZn61vpo19wu\nmlyzd6F1sgB2U3eJFVIHZE7r4uZmaUna4wGA5fZ8bNM0bWw7TMMW2JVtcYsqh9Q+6qMsZT1bconE\nOaUeDx5vyy2uEjaDLnUS6tpSCJ7XsXidRGkpdMtK6rWkBbfM2dVjtLPs2dV0lFDD+1EhkqgUOvjr\nrVutcKa2199vjTXVZVyVTw56gABgOUfdGNXEQ9yCqovllJAcjQpwVt2UJdxKIFVhV0Kwn2PoJC+g\n6XTcnN7zIj8wGcR8a9Gmc4dkgZEganP6FV/zSwwYxz9nZnwtNK+5A0ba3NeAQj+gp7MYq3DDXy9r\nti+ZXLHPbtZvJ5nwO6a0r5MdYDnNxU87TlL68J2YpwoJmQwFaOGvSsIhjy6n5SaOkvvycfuxngYF\nfQu5HH0unaRPjYM+VNBbAe3k1y21XDnVyrSzHPppy06yj67iQ8YmSpfZDuowSoNwleOuLMfVR33U\nRV2UprSREEgFYF35EA6KquWSx6vqLKrywbPe+k2Ao27P++HjaaZmGqVRRytyqFAheF7n4hfTjY2i\n3Ih6Ye+3pIUfeFXjPLnLLb/o1SUh4jGN3Bqo9sVfSxfYSERYQ/m4ZfkRnuSmv9+Ev2TSatFdXrbC\nI3evVcu/qJ8DJuzzsfM15GCbNXMplMxxYMBqsQUEYPNYULdj6AXq/ABjNSyOfsCvGlbVmseXKjBi\nV5uzGmstxdf8ZQmMbUSF8TWaOjRHhTfXvAGGCoW62Ek3+FJBSYUbJ9jRQVYzmf+FeologIja2XsD\nZFpp7eZn16dfkCy2Y2T39WLVrtSqmCbPcFxNRt0I+bnwDsrV14/1tBy40elAOk2PAfQYQIemsu47\n8PFq1iiocXHp2iw3QYuf8emgTs5X1qmULq5cXhMO2Y2LAxt3fx6mYduER9zyKQG5kRpL5qrW2eQP\nNS7Wz/qq2/NzQ433tINoVZsh0VWo6igEz+tcdllbufVwdFRY33RQyuW1pIadu2gsZoUl/hmHsMlJ\n0c/u3QK2GhoEaHV3i/cEHOaNUizcmru4aGZx5ePm7XOo0Vk8uSVRth2JmCDc2ioAVk1Y1Nlp/t7R\noc/iyteQWzClRVRCBp8THyOPU/Va7kRda/XGQDlQE3R7RP7ArxqxoAcP5stus6x5K1BjV5uzmpZY\nvuary2R1LZVusl7E56LGTkqqGSOirEObHBQnfE6EW1nl9VeEvddHVhBLUkmcqC95sPhp3SL9WLUr\ntSr6ANc0lb8U9Si7JC66i+CgXH2dLLcq2JdbkkE9pw6Nj9NjAO0fGaE1n19cOjipRqmIctq0O17l\ntCX34eAnrXgt1FIClxxUB2nQm8u24mLLkxDp3J91MZU6SAYJ92PVQrqbdlOMYsY2uhqfXqX7nuLn\nBo/3lMDrBNFSXm4ShHB6baom4AngnwP4ewD/AOD/0Hxek8mGKhWPn5SWR7vkMJVc3NqBgLywjUbJ\nyACrfjYyYnV/dRqbaVXMWyyAHBZ1cotD5TUmZabZZFJAX1+fgHJ1LBMT1qy1sg6nhE439fUR4RML\n1PjoAUrvP0TT/2rNYh2Wc3JbJy/ycmPAz3EPuj2i6sCkH1WSXKhWcOhpbXyYr0rW3K3EiBellf3V\n13aKsu36HLbTzY+XcZGGn67i6xYSACznllReczDjbVdYm1V3PtlZtasiH+B6rSdldroIroarb2n/\n1j+Bci1+6jm1VijQ3NSUb+hUxcuQ2NWM5NtxUHCDh3Lmane8ymlLdxPibrqb4hSnRVos2Z7DrddY\nSdmHjIF0S/LES8d0UqexdqpF0y7mla9PH/VVBG1e/u+p8yvHfVenaljY60HXO1BXHTwBRAD8I4Bb\nAMQAHAdwu7JNjaYbSienOoryolYHparKseo4WRv5Ra9T4hqd+6pfCLODGt3aqMDLE+2o4KnOT+c2\na6fRUSL8nmkB6fmDOQtgy3jS5WUzhnZsrLTWqU7qsXK7MeAXZCttb8cOcc51d3uD9JqpTJ/Darrp\n+gbyNOlBz8vcCmRaD9vI2Q3VTk6xmbwtOZ5+EtZHCZ7NpM8yK/fpoNL5yXjN4WIfO0iUc2kgoqNs\nblNkAuUYGVl3DaVZ29z6202B+KHaWbX9qBoXNZUaV+tdG130vd7B3isA6LarBjx4OV7l/h24jZeD\narnnjRsg8xhJPhavgFeOO3Ct5eUmwUb/XVZL1ypQe1UtwPODAL7BXn8GwGeUbWoy2VD+5QSlqoK2\njnkdm3Q1veMO6ziDiEnUvc8hU8ZSTk9by5lw91jd9p7X5VPCAtL1+f3UceOacROAW1jVONaentK4\n2HITRvEEUEHEEXo9Jqplt26UJj20uWijrbUWVZoQp0DeXW51MMspxqlkCR+PfG5h2+na5vskbfok\nssKpen7xNtR14GsnYbbNYXsnVSlwkl/UDC4MBp5JNcjsrG5tldNXOcBRroUxCOUWcjR6IE2pQ+O0\nvEE1YN1kV4/Si6trNeDBy/Eq9+Lez3irdd5Ii+hW2lrW2vnJWstVb5a4jfy7rKauVaD2qlqA50cB\n/Df2+hMA/l9lm5pMNlT1ZFdKxYt0F+V+QFC3v3QP8dqOHYjp3i8UrPGa3d2lGWUjEWuNUO72K8HQ\nixV28uNrlD00R723rFksqSqs8wRJvB87uNTBvpPF2glUq5HcRlquW1ocQL3GGU/y+Xz9mya8yM58\n5WZ55Ovs1eU2zbbpodJjxT8fJGs9TQl2EhKdQHmw+FpmqG0iors1/UnJ7aSbrZd1ILKunfxdzX7r\n8bzM/0q+PGB1kcUV8MCokRhn8PFgIDTIuppubZXT12axJsiL/OSBZGDrWVGtYQepAODH1XWj4KFa\n1shaqBzXVa5y5647rtU6p65n1cM5tpGqBXj+ixA8rz05gRsvpVKu/ICgTvLL0ms7bjGeaj1MCUZq\niRTVBVeulQRTvr1d+Red1ERDKmzL19y9WP4us/W6lTThayLrl8oxlZOxuBItLwtLp1N913Ktj+WC\ncj6fv7Z9DnmtTTvAky6lu4koVnyvtXQfucYvSsCzswr2F99rJwGK/L9HlES22UVyB2WeCKi/uJ98\nLV3bORAuFrfT3dTwe4zV7dM2c1WUf3++KjcxLK6ALDHO6FOjtoDjx7IYZF1Nt7bK6WuzWBOMi/xD\nCGw97//P91e9VijR5ljjal3clxPHWkvxGpt+3Wx1xzUEz1BBywt4RlGZXgewjb3eBuCEutHv/M7v\n4JZbbgEAJBIJDA0NIZPJAADm5+cBIHxdR69ffBFYXBSvs9l5XLgAABmMjAD/8l/OY37euv3nPw+c\nO5dBSwvw8MPzaGsTn8/MAC++OI94HHjuuQwSCbE9b296WrQ3O5vBK68AwDze9S5gzx7n8c7MwNj+\n3e+2bq+2DwBtbRns2QMcP262Nzsr5vfpTwOJRAZLS8DCgvi8vz+D97wHOHJEtL+6msGpU6X9vfji\nPAoF0d/amv7zxUXx+cyMWB91PoODQKGQwdCQWN/jx4F9+6zz3bcvg9VVc7wf/nAG27cDp07N48gR\n4PbbM/jxj835qfu3tIjXt902j6YmYGHBPL6f/rR+fQDgwgXxemQkg+ZmYGio9Hjqjo/b65//PINM\nxlzvmZkM9u1j2xfHO3/bPDANZOCtfS/r7fj64Xng+Mb8/dn9vQBAZjYDLAHzP5oHUkBmWwaYBeaP\nzwOfBzLnMkBLcfz/n/K6Dci8lgFOAfNH5oEskJkv9l88vpm24ueH54EOIHOp+Pn5eeAIkLktA4wA\n81fmcf+3gP9wJYMLAA5F59HaUDw+I8D89DwwX5zfADB/Yh44C2S+X2wPxf4uZ4CTwPz/Ng/8EZB5\nVJlfKgNkgfn/eR74v1n7fzgPfJydD9+ZB4aAzD9lgEKx/XeAzM9t1vv4PPAwkEl4PD7q9nK9RjLA\nHof9n8sAM8X1CPB8Oj5/HA/jYSQyCczeO4vsf8ni07d8Gv/18n8FANy2chumb5mG1Pz8PF489iIW\nexYBANn/ksUf7fwj2/Yfjj6Md95+B09/8mkk4omKxsvHl4gnfH+uHd/8w3gH7+DpzNNIIIEH/vQB\nnDh3An3DfZi9dxbHv3u8ovUN6nVLpgUA8K7ou5B6O4Wvf/LrFa/nucFzWFhYAADMNM1g39i+qoz/\nYTyMv8/8PeKI4775+/BZfBYPZB6oyno9MP8ATuAE+jJ9mMUsjs97P377YD//2cwslrCEC/MXfI3/\nxfkXsYhFIAPMYAYPzz+MF/EiFjPFv5/5LP4I9n8/1X7Nx/cIHsHD8w973n8Ws8jOZ/FpfBqJjPh7\nk9tsxPHbLK8/j8/jXOYcWtCCh+cfRhva6mp8G/36+PHjWF1dBQAsLy/Dk9zI1OkJIArgJxDJhZoQ\nJheqa3m1BqkWsELBTHDjx1XT7n03i5xM0OPVPVS1wpYbc8fnPT0t5ptKCQtdoSD6UZP76NxgeXkU\nLy7K09PCdVa1XHodr594TjcLp9N+QVs/ZR3V9naNy22Z1sea1+MMUI7rm6bSb+ApzWd2mWSl9bGD\nrJZAnUup6gbbyNrrptJxgIjiZO/Wyi2iTez3rWwfp/mp54Ic3xBZraHlWBj9unRXySpeqWe5U3bW\nIK2YGyl1jXILOer4i47AXFmDVDUscrU8jrVyafbaj1+rY5AxoPVkAa6nsRBtHtf3SnQ9zDFIodqu\ntqIPjAN4FSK77e9rPq/JZEO5y2/SGa+lMry6sjpJt61dn/l83ti+u1sAYn8/0Q03CNDzC3C6eftd\nK7eSME4uyuUCHS+X4we01ONb7g2JSsVrlNrN26/rbLk3HerB5chxfSXE6WIinTLJyiyucj8OdFwc\nqKRbboqs9TBBlrInV9X/BmoiomkSQNpAJmguklnqhO/jND+nscr9hsi5Tqid0uS8LnZyIcWS88ll\ne2MYCwvUfeAAjR86FFjJlWqUDAkyCZFXpUm5v8JiRKN7orR8Vr3zUXtVc10OPnew6qVfpGoFOF77\n8XvxH2QM6DRNUzd10xiNbQjsceguNy7UTpX+36s3EK6Groc5BqmagKdrByF41o0qAQenfe0son4g\nwKmkitpnPp8vyXprF4NZrvyulZ/xV9qXW79+VckNiUrkZd7ViDHVqR7A03F9JWwtU6nFTbXC8ddp\nsn4jDyv75sia9Ee3j58nLz2ia2eSSpMXDZKZ/dYu5tNOfK7lmA3LTSiVJkdgLTmfXLY3jMMHzBJL\nU3NzPgZUW7klBqoGgJXcXylaAIO2eFYy9iCTM6kK4jvKq+WwVglSvPbj9+I/yPFvtMWrmv1Xek5d\nD4l0roc5BqkQPENZVAk4uO0rLW/cVdar7KxaXsar1iJ1c2v1YkHL5axutuXK63pvdDmOoC2ZXq2U\nfo6vtGwHmV3XrxznVY30v5UqRwIo+TdykpivIlktoSBhjZTutPIz1erZSPpv+1b2+6Cmb5CwSr6b\nvW4hyv1POUr/XprGPzVOhY9r1q7Ex5L0gJlm7Xq9PlOh3av8AqvL9nIYY4dEiaWR/fsDs3hWQxL6\n2v68jca+PlYCaNUAsJL7K2sFSn0pZet+Wi5AljN22Vf3F7rr2q2gjvvyAAAgAElEQVR5oyHKTm5A\nvJEX/xtt8apV/0ElUaqnZEyhaq8QPEP5VrnXz2pmVrUdv+U8/MLL7t2irElvrwmLuja8WND8lBfR\n9VGPDGKnHTtEjGVTE9HiYjBtBmml1Fm21ay8VRWDnPudXIMtaYPnaloKRjdWV8ulGguqPmVWWB7/\nKZ8xm33k+2omWv7cWfpe+vfYhf4fTJWuGR+nXQwrkb+SMZXKL7B63L6wtkZTc3PeoXODvmwKawUD\nsvAYqPsL3RbAq1U8opMbMQfI1FzK80Wwl7GrF9e8r/6v9FdtzpVakmsNUV7HW69ATLTxFq9a9R/U\nMajnYxmq+grBM5RvlQsNMsZxaEgfI+k3RtRpe517iG573Xt2JVT4dZuf8iJe+3XSRoIqtxT393vb\nx6l+aipFFI1az4UgxI+Jl9hQv7J1OUqT8W224BRPaxngJT0YBakdJCyS3STKn6TJamGcIhPEtpIV\nDGMkypvE2fYgUfYEJJIB9RHRDcU202zbDjKhspUsMZ8EEvGcaSqFVYfn+P9avND/zAgVmgti7BwW\n1VqadoBpB3dpqv7xUFQz1+1a+aJrxK2eqoXQa1ypDkyCctM1XHH3g7Dm7SI4t5Cj0adHKfWllGPM\nqHpxXQvQzufzWmus03qpgFxriPJqPd5oq6IfXUsWPf49FdQx2EzHMlTwCsEzlG+V63apuk2q7bjF\niPqJj9Rd1Om2172n9qW7bnNyAfUyLz9rmMtZ4a/G145GzdKWFu9uxXZu1Xwty3G5dpJbVt5K4d0W\nFMaJcp9YoPQfHqCx/Ydo8uNrNjGYfICkB6NKxWFTwqSEPf6tK/uVILZc/KnW0lSfSSJKlL6f+0SO\n0v+m6ArbXCiFTd1ThVqQ6aIrf24RPwvNBZrKTYm2ZQxqmu2XJfsYVjdxd2M1vrWKqhl4bmAaZwmX\nY18fc3S7dZIOTPSwkqPcQorSB5I0fshbH4W1AqXmUoQ194tgCW/JvcmyQKkaCZxU5fN5LeA6wZ0K\nyLVOCuUVyDfaquhH1bLobQTQ8u+poI7BZjqWoYJXCJ6hfCuoeEO1Hb/tBrG9nxjCcpMIeenXq5tx\nMul9vkFZSZeXhaXTTywrd6vu7S0Fbrc420qlW/OqGX4KROk/9pnwxa8bpk7c4icz03Lgk7DZQtY4\nzRgJC+E02397cRs7F9lGTbvsaXGFzU25f+tr2qBeInpQmUNv8WcrCVDtJDPBUVDwnmb93czW5Fq5\nJtroAHEqdbv1E9OpAxM9rKQpfcBfIqHcQo5GD4xS6lCKltecv+A4vEnX4dGnRm0BTXdxXQuo0wGu\nE9ypgFzN5Edex7vZVS2LXuiiGupaUAieoTa1JFz191cvsUwtrtvsoIjX+ezo8Ad/G+hhZ7hV6yzF\nulqntZCvGwg+Y/7Giwlf2v79fhr7iI3FM2ilyfwWVWtnthDRUTLjMKUraqvDPk5POyAtPsc/pbjC\nSlj1Yvm0eyaLY9eNc5Lc4d3rMeS1RLk1N7yuC1TluprqwEQPK+M0fgjFPoY99SETD+ExUPYbzu4X\ncvxDTwxR9htZGn1q1Deg1RrqpJzgTgXka6Wm60aqWha90EU11LWgEDxDbapEN6pU100JOUG6sflZ\nn3LX0g6K+PyyWX/tO4FWtY95oVBe/VA3VTJuXzcQ0lQCIE7nVGFtjXr+YI7QvFZ90JdAJYFshEyw\nvItE7KV6g0JC2phmnwDA0+IKK9/XWTXVZ5fN+xI6mRvsFfb5+i4P65Rm7dkdjxyJOFW1/6BdoDVy\n+47aiDqY1ZQKP8HPr0CFtUmamst6bo+7zU5+Y1I7rtxCjlJfSlHHX3RQ7xd7jbhOP4DmlNE2yHUo\n9/+epQ7k2vI1Z4GsZ/lxn90IF9V6KCMW6tpSCJ6hNtQy5iY30JBw1d5uhZxKvyx5v06JatTxlbuW\ndlCkwqOf9p1AqxbHvFJLsVvG4VSqijdKNG6cJeeUYlHzHUrnN5Oq3J4nCOov7jtNRD0koHPUoU1u\nJSyQqItZ9W94h2cXETVr3lsujjdtvn+RbfNCn4f18uKKy9o3ni3kvIYBye07aqOsYxWDkMe7Q3x+\nasbbcsfhd5+xg2OGFdMuHlJ1sfWbHEltU81oG+RxLvf/XujCuXGq97UPwTNU0ArBM9RG5p5wlRsg\nSbhZXg7WHVYFHC8ZbFMp08U0qLV0S8hUrur5mEs5ZRyu+o0SLzGYaTK/xabKAG1lf1cQ5durQKV+\n5tRmjgTsRYioSbOf16edRdOre61drU+QAGIex9lGtFps9++aiVYlmNrNL03Copu1WUsp2b583knW\nMi/VOL883nDYKJdHJxCyAzvL+2PmnbrcZwdtQdAp463bOMoZu3UiRJQmKnykQFOH9PGQ3V/opt4v\n9lJ0T9Roc/hJby68qhxjLFl/o0/bx4zaTiUAi+lmceGsZXKdEst3FfrOUY6SlCQQaJiG63rtQ4UK\nSiF4hqqH3BO26u8nw6LpJ76xUnEwc4JaFYSy2equZbUSO9Wj7DIOB+XCW7G7caXJbdT90+QMPHL7\nISoFKhWgkpo2e0hYSNupgm9r0ma1tTy3FPuy+zzio68kWeB4rYFoldeS1a2Z2zpyFUjEi06QGTda\nrYzDUh7Ht1FJV5wgyQ7sLLGSf9hr/IGm99vHQaoZb9X+dONwgyxP9TUXcpT+v1gGZuUYFNYKNPj4\nILX/ebvF0tn35b6yj4VjjKWmPz+WzyAsppsly2gtrYMllu8q9M3bnKTJQNoMFareFYLnJtBmjsGs\nVOXWY6zUPcQrmFUrlnHTKsCT1e4YBAXNft2NS84pL1ZRJ+uWun8ReH7aTXS/LlGWU38FMl1Wo0S0\nWOy7Eoum3dMtdjPoPtX++LHSQaJXcLQ7Nl6OayUqji9/Wz64Pvy6bTtIgpAuY6sd2FliJQ9OGH+g\nfPvpb09rodEOynTvu0GWE+DpyqGk/lOKCm+WQi1PHITHQJ17Ox2tkeVaHXVjSu5N+or/5Gt88LmD\nnvv2q3qoTVlLy6x6rlej71pbmss5hqGrbaigFYLnJlA9x2BWW+W6hNbyy3IzWA+rpRLO3ICTNeiE\nTka7yj/pss6pNOmBSaci8By+gSgPokMgyjuV98iRcElNkojt5HU7p5S+y4VI3fZbfe7j9PRi/eSu\nu3eQOyR6BUe+Pj1UuxIqxfHlD+aDazNN3s8zL83ZAJ4d2OliJdXty3HhVVWO+7EO7nQxm3x8qS8K\nC27HX3TQxLMTNPq0CaKpL6U8W4LtxiLnqcaPJvcmjeRFXtvla1zN/3v1EItYS8useq5Xo+9aW5rL\nOYYheIYKWl7As0FsVz01NDRQtfvYzJqYAA4fBkZGgLk5IJHY6BHVTqurwMwMsGePOe+ZGWBpCWhp\nAWZn62M96nFMQclpbpkMsLAgfp+aAvadq/3JWjKGfd72051blnaRwQJEw1OYwj54bJhrAsBhACMA\n5gB4WI5XOoG7CuL3q11A4+niB1PF/ZcAtAA4C+CYTSMdAKIATtt8Xom6fLTbDOCC5v1WAE0ACprP\nOgH80qa9LIA8gHMAGgH8VnEsLQBm4Wl9Dclj01ZsDxBrXMZh3nCVcZ7pNPP8DJZWl/Cjwo9wav0U\nRrpHMHf/HBJx5wZX11cxc3QGe+7ZY9lWttcSbcEluoQjrx+xtDmDGSxhCa888woKK+JkmLp1Comm\nhLHf7L2zRpt2/fD+Dr52EL9c/yVaIi24ePUi1q6s4SquGtsMdw3j9fOv4+TaSWMsj77wKJ786ZMo\nXCxgqHMIT9/3NB554RGjn4lnJ3D4xGGjjalbp7BvzDxR5Odu65V5JoOFFfGd0hPvAYFwav0U2qJt\naIm24MXffhEDWwd8t+tX/Ljw9XXSBCZwGIcxghHMYQ6Jck+yUBum8BiGqgc1NDSAiBoctwnBc2Pl\ndoF8valc0Kim6nFMQclpbiU3RVD7kzWQGzMzMIGuCDCB/JNeLba9B95gYAa4+gTQuApcvhOI3gDg\nCARQvBfAAQBnitumAKwo+0cBXGavGwA4fbW2A0gC6AbwsofxyT6uuLQLCDD8VQAv2HweA3CpuN1V\nm224hgF8G2KsV4rv8flxaHwPxNqsARiCgFkVTOWxKcBc4wqgbUPl9zyzEQej/tZ+/PCjP6wIdnh7\nkwOTaIo0Yc89e/DoC49iaXUJr0RfQeHeAvAtACeAtmgbPnDDB3Dh0gUcOynuqkjI8wJLN375Rqxc\nUP8ohKINUdzUehP6W/rRHG1GW6wNezN78egLj2LfT/bhzCXxh5UdyOKp+56y7Lu6vorb992OlQsr\nWgjkQPzoC4/i4GsHsX5lHTt7duKJsSeMbbc9vg0nzp9ABBFcKZ7ETY1NuHj1omWuunaDgk7AelzU\nPu20ilXMYAZ7sCcElk2q8BiGqgd5Ac9orQYTSq9E4toCmUrV0iJ+jowIvtFpfn4emUymrsa0WeU0\nt9lZlTNrf7KWjsGjOGxy6+EMgH3ALGYt/6TLOqcS8GdBWxLQCQDRdwHYC+B9AOIADsKEziSA7wF4\nP4CTEBBHsEKnnbWR63xxv9d8jPGy+yYABEy+aPNZFAI6ATEXaUHdCuBtzfYpCOhMQIDqFQjo7IGY\nfweACIAMxPH8BcQxBUzwLR5XQ/LY6KBNcyMiaAX6HeXzPLODuJao+GMPysLG2/tC5gtGe0urSwb4\n4CjQ2dyJsw1nce7yORx5/QhSW1LGfnvu2VOyz7bHtyHSEEGsMYaXHnzJsBKuX1m39M8BrzXailRz\nygK0ibiwrEroTDYlsTezt2QeiXgCP/4ffoyZozNojjQj+1wWLdEW9DT34LW3X7Os49LqkgG/R14/\ngtSXU2iJtmBn907c1HwTTpw/YYxppHsEiaYEjrxxxDJXqUdfeBQn3zmJh771kCfLpO6c0h1rflzU\nPu2UQKI8r49QdaNyjmGtr6VChQLEv/lQoepGs7PC8lZPbsf1OKag5DQ3eVNkQ+Y8MwNkMkg8NIF9\ne1b9j2EJwAKEi+JPiu+NQAAIzH/SNbkzPAMBTT9i49gL4FEIt9NjMN1SkwB+AGAAwKsQlr5mlAKh\nA3Q+jxk8gwyevTKB9bdXncfW5XkWpeJWUf6fhI/1fRAutJMAfojSW52tAO5gr18CsAXAcQDbi++d\ngbCayeO5pvTJjqtFM8W+zynv83NjRrOf2szzM8g8k8HEsxNYXXdZzzqQhLjDJw5j5qg5wdl7ZzF1\n61Rgbp127UnwaY22one9F9vPbsdlEidFsimJ7/3290r247DUiEacuXQGp9ZPYcfXdhhrvrN7JwCg\nPdaOba3bMNQ1ZPR55tIZHD993GhDAtdP3hZ//A1owK1tt+Khbz2kPYaJeAL7xvbhmye+aazdV//x\nq8bvt++7Havrq8Y4AWHBXb+6jsLFAo68cQSvnRN3eIY6h5AdyGLu/jk8sesJ2zW3O06A93NO10bQ\nxzlUqFChglToahuqItVr/ONGjKte12JTqlL/Zh4X9ySAR1Cxq6Krpczu8wxQDCcF+iEALKG83wRh\nERwG8ITSdg+AU96H+QwyWCk2fCumMOZ0F1x139VJ59LLXWgjxedFzb6TAJ4u/i6tkmcg5neO9Z0C\n8GNY582PYQKmy+zNAJ6CWK8tEJbXAZQqA3N9uauuz5jJclwXN1J+YwdlLGYLWjCL2YpvxqyuryL1\n5RTWrwoLZe+WXpxcO4lkUxL39d+HX7zzC8f4zu1/uR2n1s0TXq453yb7XNa0qgJoibTgu9nv4t/9\n4N/h+KnjOHnhJNaurCHWGMO5y9Y7D3ZxpjPPz2Dvq3sNSFY1desUmiPNOPTaIUQaI7g9eTsWfiHG\noIsddZN6nKSLcku0BWcvncWxN63uyDpJ996OWAcWP7poiSENFSpUqFordLUNVXUtLZl8MDNTP27D\nGzGuel2LcrThEO3Vv9luoLOwulgGcSykpQwode10+lwaSVTQke8nAdwG4TZ6RNP2SwA+DOAd2Cfm\nkeoComdbgEtAN0Zwj9YUyOTFtTai2Y7HbV4BZj4+g6XeJbRcbMHsn88icSEhrJl/yrbj7sRnlTZW\nYM5bAnwMwmIpvSPl8cxCgPDZ4vN3YcItl1zfNgiL8irE2qvnhovKcV3cSM3eO+srdnAJS0airRnM\n+HbXs3P3XL8owPPq1avIDmSxN7PXAowzR2e0APjSgy9hx9d2YP3qumXNpVUSsFoyCYR4JI6Opg7s\nG9uHxN6E4V4r4TfaEMVluoxGNOKZ5WfQ1NCEt68Iv+/eL/Ui3ZfGhcsXLNB5V+ddWHlnBSfXTqIt\n2obCWgFvXHkDpy8K3/Erp6+gd0svRnpG8PhvPI5HX3gUR39xFLd+9daS+M+SNcMMzt57FqmjKTx5\nz5OGG69cm+ZIMwCgI9aBP7n7T2zXfqB1ACfOn8CZS2fwyAuP1P1NkVChQoUKXW1DVaSNiH+cn593\n3WYjxnUtxYJKiD58WLBdzeXVv9luoBI2PQKzl3PKApC641v8/MdtwFRBJA4DIEBnClbonIGAphSE\na21n8X0OSVL3QcRGcuiM2YzxNHBvxyxu7ZzC/ZhDPAhXYg9wunTDEhZ2LODwnYcx84nicTgPYWmW\n4uNXEw51AXgDwhr5dxAAfwRinglYj2eLsq/dvdVZiGRF52ACPeD73CjHddHT+RSAnp+ZwTOZDJ6d\nmMB68YSTgOZlrDPPz+CVZ14BngWG1oewx+1GhUY6d0/pFgsApy6eQiwS08Yf6vb93A8+h46mDsQa\nYmiNtRrtvOdr70FibwI9X+zBDVtuAABQ0RRfuFjAh5/5MAAg1mj944g1xvDygy8j2hDFVVzF+tV1\nAzoBGBl5v3/q+wBE7Oium3Zh4bcW8OrHXkVPvEfEp75xBD85K4C3LdqG0xdP4+TaSbTGWi3xn4WL\nBRx5/Qhmjs7YuswuYQnH4sew0rSCkedGMPHsBGKRmLE2dyXvAgADKAH9OdXe1G7s0xxp3lQu4aE2\nXrX6ngoViisEz1AVqZ7iH4thgZiYAP7sz2o/rmqsBZ/Tag2vJTYcohMJzCT2IZNNOM+9lgPVAaTy\neb4H+OA54MkjjIN1oLMEEdu5AgFnKiTdBgFhq8VtzsCqXcWx3Fd8zeArfiqBsXP7vEGnCm3N7ruU\nKAa0bCkCxekR7PlK8Ti0ohSipSKa947BNibXolkAvcXfh2FaRFUlIDLvOrVlJxmXOwEkLngHuVpr\ndWkJKwsLOHH4MI6WcYdoaXVJlDo5Adxy9Jay3GxVmJx5fgYXrlxAU0OT5X1AQPzg1kHEI3E89K2H\nDEhUEw2dXDuJS3QJC79YMIB05Z0VI/bzbwt/CwCINIgTqSXSgr/+yF8DAF568CXEG+MARFbZ93W+\nD5954TPoaOqwnUNXvAvRBuEAdgVX8N03v4tbZm9B6sspIyvtUOcQvpcV8akS+BrRiJMXTmJ1fdWw\nwgJAW6wNf3L3n1jAevtfbjegsKV496RttQ2nVk7h8InDaI22Gjc4Ord0lqyLTvymyGtvv2YbMxoq\nVKhQ9aIwxjPUNaNrsezJRs2pHsr8eJp7PQyUqaT8y6PQx32qcYYfgACuyxDAdr64XQrCUsjjJ+8A\ncBSlcaKq+iGAVZdJlisGkTm2ubitHeQPF+dxDNa4zwZgdcsqZj4xgz3P7UHijoRwG5bZbFMAfhPA\n4zBLpXQW57gOsQZvFJ/txbn9Ozi7wrqVGOHuum0QcOrn9MhAHx9aZ3p2YgInDh9G98gI7p+bQ9zn\n30AQtSTVsiBOZVtmnp8pKW8Si8QsbsG8ruZQ5xDyv5XHoy88asRfNjc2Y3zbOI6uHMVtidvws7d/\nhu9MfscS3yjH9Ma5N4xMtxPbJnDk9SMGSBprsG0CL731Ek6unQQg3FuJCGcvn7Vs1xXvwvt73o/Z\ne2fxwDceMGIwARGHyfuS73135bs48c4JNKLRqDea2pLC9z72PTwSfwTHnj2GN068gY7uDizev4iB\n+IB2Tb1IdyzLqekZKlSoUOXKS4xnaPEMdc1ow610VdBGzWlDM9oW5WnuNRqoV8tzidVbzaAqLWmX\nIBLixAE8BJHBVrq08uviFQhw4vp7CBhahel2OgTTXRcQcaM/hD7hj6pLEMmLfg576ERxLj+E+K/B\n/3NQ0Sr43/Yh8U/F2M73K3P4GkzoBARM//PiPN4LM/PsWQjodHOFdXOXlevO3XX9yM2tuk507+ws\nbp2aKgs6Z56fwdmLZ5HaksKTu54sG0pU115uAVVrherKm6jW5J7mHnTFu9C7pRdP3/e04cYq4y9/\n/aZfx+n103hr/S0ce/MYzl48i1958lfQ80VR/gQwS5W8euZVAMI19uLVi/i1G3/NMvYHtj2A85fO\n45frph/4aGoUTZGmknmeXj+NwycO47a/vA2vrr5qvD/cNYw99+wxrKD8PQnDV5lv+craCn59/6/j\n5DMncf7qeWAAOHP/GTwSFy61M8/PIPtcFucuqumYnaVzCXfKnBsqVKhQG6EQPENtOtnFJdST229Q\nuhbn5FW1nLtbrIvXmNcSDlYB5iBMIHodpnspF0FkuZX7vU/5/HJx/9sB/BkEvOVhlhkBgGcgYKsd\nztK5vOqULG4rkwJdcdhuD4R1N1V8bwQi+yxXG4TFcw9EnVFpXIoCsMulwtxfHQEZqBwc3dyqXVSr\n2Kl4IoGxfft8QycgoOTYyWNYWVvBB576gOe4QKdSHyrMPvrCo5ZtJZQmm5L4wb/4QQnsvudr78Hj\n//A4Tq+L+EkZ3yj3645348z6GfyoIGoTjXSPYO3ymuGC+6EDHwIAfOUfvoKFlQWcWj+FKKJGDdFX\nTr2CRJPZ5wsnX8DCyoIBta2RVly8ehHfeuBb2rk3ohFvrb+FU+un0NTQhIltE/j2A99GIp7A7L2z\nmByYRHYga7zXHms32o01xIw2fn7+51hYWcCZN84AJ4Gt2Io/KZ74drDodk7pYnvLSYy12coHhSpf\nYYxnqI1QCJ6hrhnVg5UuaF2Lc/Kqepp72ZbnWQCDMC2bvP6mtHB2wATEhuL7FyHgKV58fwJmXKPU\nCoB3AXgOouYl/zZPQ2TCLcBZV2CfnEeqCSLm1KF2qDH2H8BMBvRjmPAmYy3vhEgkJGNZ+wAssjYu\nw5qQCDCB80l4r79ZITj6TUBUkfwAdYDiNSlX1lYMyJHgse0r2/DhAx8uTYyjAaOZ52dw45dvxF/8\n/V8YMPvIC49Ytn3fX70PZy+dRXOkGdGGKIb3D2PX13dZ2l55ZwVXinc1Yo0xS2zo1K1T2NGxA8dO\nHsOp9VPob+3H3P1zlvM30hDBjV++EReumCfrFXaX5OS6KLMCCJfa9ybfC0CAIQCcv3IeR14/gt9/\n8feNGFUubrm8SBfx49UfI/tcFhPPTgAAnr7vaTx131MG/M3eO4vueDfOXzmPS3TJaMNSsuUC8PbX\n3sbvrv+u5bi4waIXQCwnMVZoJQ0VKlQ1FYJnqE2nTCaz0UMI5UMblSDJj9zOKd/WVwkTD0HAlbRs\nxjXbnoGAxH4A0hNwBCKm8hgEoLVCuON2KvtegbAWnoIZFwoIq+QxuGekbURpjU6md1reAW0hEbN5\nyaWt4zDrac5AWGSPQABgd/F5A8S8pC7AClvDELGmGZggJt1mJUR7sWJK+M2i5kAH+PyOUt2xayBp\nmWxqLE0AJMHjxDsncOzNYyUAwsGoOdJsAOfKBRMak01J7Llnj2XbvpY+HHvzGC5cuYC31t8S2V/f\nOILbv3a7AU4y2VAEEbz02y8ZcYrS9bQ5JrJfNaIRFy5fwL8++q/REhF9vLfjvbh49SJWLqxY5krK\nCX71qoDHM5fO4Kdv/xTRhijOXzlv2eb7p76P4e5hy3vSeik11DmEvpY+R0hLxBP41Z5fLXm/OdKM\nni095htrQMNRQdCz985isG0Q8UaRgEmujTynJHA++dMnXQHRT4Zjqc1WPihU+QqvpUJthELwDBUq\nVFW14aVZApCr9VW1WnGY4Flaf0Oz7wgElL0LZu3KOZhWUAlaCQB3F98zKjAXL6ob1oBDZ0yw9epC\nKw04sr1GGBaky42XEb0YRcPZhtI22TUzUBzrv4EJeEsQFtkCBHwegYDjIxButnblYG6GcL3lICYN\nc8MAJuHdirkBQFeWNiCeVLrZXrx60bAcqjGajcXLg6HOIQuA8My0B187aAFOAEg0JQw3Wm5xk+Cm\nAtzK2gre/dV3Y+LZCXzrgW+hv7UfP/n4T3BX111GMiIJWPNvzAMQVsPT66fxV8t/ZSQB2p7YjsK6\ns4k/2hDFFTLH+sb5N6zWx6Le1/0+dMbFXZ7hrmFMDkzilY++gp64eeL/ePXHeOHkC8Y2KqRJQLx0\n9RJ6twh3hc6mTsQaYnh/7/vxN7/9N8b7w93D2HuPSM+ciCdwc9vNOHayFPoB88ZA4aKYa9CAWI6V\nNFSoUKG8KgTPUJtOYVzC5tJmSPpU8TnFIWc7gB8V3x8B8D2Ybp+/YPu0wwQpCVs8GY7OXbQHAlJH\nUYTFIhTSL4G9OQFugD4GU01SJNUB4OViX6cB/BK4HL2M6NUomi4X3Q2TMGEzBtFPb/F9QFhdea1M\nXmtzqPiU67EXwhUYEBl6e9lnX0ApiMl1+DaAp+Hd/XUDEwT5Op8qdQv2INUtk1u1fqPvNwx30dX1\nVcM9VLqV3rL1FguAJOIJ3Nx6M469ecyAH0AA5cS2Cfzs4z8zkupIi9ujLzyKl0+9jFhDDHcm78S2\n1m2W8Z2+KBL33HfoPvzwoz/EwNaBkgy4ACyQ2BJpMeCwPdaOP/3QnxrWTztdpssGJDei0QLMHTFR\nbiWCCL6z8h384NQPEG+M42dnf4bzl8+jo6kD8YjpsrB+dd0Yz9LqEm6ZvQVb/vsWfOCpD2Db49vw\ntX/8GhZWFnDkjSP44A0fxNStU7g9ebtRJmb7X27H7cnbRUzo/d92jc2U55T8TAJxJYCoc9ctx0oa\nanMqvJYKtREKwTNUqFBV1XWRIEle77ZBWPZOQbjOzkG4n15tJGsAACAASURBVMp4QbldEsI6+gKA\nWyGyxQJWSNLFGX6z2PYCGFyeA+4olpQ5aTO+LRAlWwABmlH2WSuAu4p9PQogC0QuC/MmgYQl96cQ\nFllAuNy+DNEXNzANsbHPinYwUGwfMC25CQBPQMDWUQgXYg5eKoiVG29ZA6ALRDWIJ1Xj9pzqP3L3\n0JHuEezNlBZK5fAzsW0C2YEsXnvoNRwaP2QBFu4WKmtzHjt5DG9eeFM7zpW1Fdy+T7je8gy4XM2R\nZqSaU/jIwEcMt9qzl84i93zOErfqpq54l+W17OsKrmD10ipW1lawfnUdq5dWceT1I+j5Uk+JGy8g\ngDXSEMGZS2ewfnUdL7z1Ak6cP2FYYiOI4Pyl89hzzx68du41Yz8JoAAMl2IJgX92z59Z6p3yGE55\n7L79wLeNrL/lKoznDBUqVK0V1vEMFSpUIJqZEW61LS0CNq8pyJR1IdV6nFKyrqR0LZX1OdXttgNY\nhrB2bocAT6l+CAB1WrdOWGEvfhW491PA4/9eLHgPBJhyNUMk+umAWftye3G7SHGsncV53Q7hIsvV\nCGFl/SVEzOhWiHqfjTBddbdAWHPVsWdgrYcpS8z8BAJKXyv+bId+XUMFIqd6nbrP3OpIrq6v4n37\n34e+lj60N7Xb1ojkNT25Yg0xI9mOTk0NTfhg6oMGmAFAAxoQa4zhgW0P4PT6abzyy1csFtcGNKCj\nqQOrF70F9EYQwVVcLYkB9aNIQwTff/D7uPfr9+LUuvmHF22I4jJdNn4C+lqfANC7pRevfuxVZJ/L\nGms1desUTr5z0ngdb4yjJdqCnd078cSuJyx1USup0xlEHddQoUKFkvJSxzMEz1ChQgWiTEbEcgLC\nwrlv34YOJ1hlYAUou7lJAJUxmaoSEMmEABGPuV78fQiiJIrbdd8uCLCVygJ4qvj7DIBXIJL8HAHw\nH4q/fwfA5wB8BSKZTxTC4roKAdJyPFPF/dwy4U5AWDG/DeGaG4EA18sAdkJYMxNs28MwQTwLcx1V\nOa1rHer5mRmsLi0h2tKCe2dnyyppUitpQbJ4p2i1I4aZ/6UNe+7d6ws8bvzyjYYFMDuQxVP3PVWy\njQSboc4hvP7O63hr7S0Mdw2jPdaOhZUFNKDBFvzijXG0xdpwev205X0Oc3aKNkSRfyCPe79+Ly6S\nl2K25Wvq1im8/NbL+OnbPy2ZT7IpicLFAhrRiGQ8iVhjTGs1HWwbxOrFVRQuFtAWbcMHej8ANABH\nXj+Ctmgbzl0+Z+lv35j4Q+Fgz9/3KrcbDKH0msEMlrCEFrRgFrNIhHfMQoUC4A08Q1fbUJtOYVxC\nfWozxHLayfWc8hov6OY2KZPqtAB4EQLEbv4B0PoA8JBN2l+euOi/w6yJuRXAf2bbLUFYUNcB/D8A\nDkHUCh0ofiYrTFwG8BaEy6yETjmvncXXt8NMVMRzwQxBWD9PQsRn9kFA51swrb3cY091d5Xr2K78\n9BKHuUFlR+y0urSElYUFnDh8GEeVrFn19h2ljdsrZv1KPHME+74Q04KHU8mO9Svrxu928CjdQvO/\nlcfSx5YsLqLd8W7LfhElI9b61XX8cv2Xlvca0egKnYCI5dz17C7c2XknGlzrBZWvtmgbCmsF/NO5\nfwJQug7SIiuTIZ1eMyFajqs10orCesHY9tzlczjyhqg5mh3I4gM3FH3kXxWJnpojzcYxiUXEF0q5\nCYbCeM7ytIQlLGABh3EYM3WducxZ9fY9Fer6UAieoUKFCkTXdCxnUPGCL0G41P4dRFzlUwAungCO\nfRo4/Cngd/730n144qJHYMLh28XXEsh4QiP1GtQu9K0NAuTU2MvvAPgYTPiMQ9TzXAbwTHE8CxCu\nttxjskPpWwVxuY6vABh8Cbj9Y0DqeeDJM+7rapeldoOANFq809I9MoJ7NtudFsDTnSK1DieH0J3d\n4kRsjbTi/OXzFjCVwPrQtx4yrGmJeAKJpgSyz2Xx0LcewlC3yDiVaEpgYtsEIg1W8GyLtpWAXKOP\nS5a1K2t4+fTLIBCaI83oineVwK1Ue6wdu2/d7bntCCKINkQNSOSZcrk6Yh1GPdBGNOL9ve8HYJ3b\n+SvnsXpp1RiH1Mn1k4hFYnhi7AlkB7IYvWEU+d/K45snvmkck6bGJl8ZaL3U/gzlrpbiF+oIRrCn\n1pnLQoXa5ApdbUOFClXfulaDR2Xc6LGzwOXiBWf2ItDTZI2DfBUiHlO6q94J4ASEtfAVAJ+E6b7a\nBFELlLu7AgLI/kcA34ewiHJ3WuniqsaxOrnFcm0BsAbhcvt9CKB2mq/Rfsafb7bqtivnl4E3N+iA\ntb66iqMzM7hnz566crP1HPe3uir+tvbssfxN8f0v0SUcef0IRrpHEI/EcexNEZ+Yak7he9nvYeSv\nRozYxsG2QVy4cgHr/z97bx8U13nne377HZoGGmhkhJBakkvWSyIZJBzJsRS1IyleEyd0XshcM3cs\nu2rdU8luJffurrh3tu7O3Jqb3Joqp27NTO2uK9pkxEzingQpkWLZZhRhCSThGFu2XhxJMQ6KiRBC\nvIgWIKBpoPePp5/T55w+p885/QIN+n1UlOjz8pznvAD97d/Ldy6MKKJCraVaami9tx5XR64KdaKd\ndzsxFmENeRxmB3Y9sktS4+k0OzE5P8nWmxwIR+MRVy2SpeceWHUALftbJDWWRpBfG47NZMN2z3a8\nN/SeIDJXOVcJ12ckPIIiWxHGImMoc5QhGo1ia9lW3Bi9gcHpQaHusqmrSXI/1//reiE66vf6UZ5f\nLlkv3158/9NNzSUYIYQQQACHcZjSbAlCBNV4EgSx9FmuxaM+SIVdzSxwxqos+KrAPEB7AVxBPLLX\nAGACTJC5Yt/z5fLLxIXfNcQbENWA1Wq6AaxEvLGQH0ygtsZe84ZCcmrBBCdvkpRM+InPtwHARB3Q\n6gdcO4Bd24CjtuRRT7X6WTVB+pCSrriQi0O7xY7Dew6j8e1GtPa1Cts1rG/AxMwEWvtaYYYZZlNi\nGqxcPF0bvYbh8DBcVheK7EWYmJkQur+uyFuBwel4W+byvHJE5iJCNJCLx+rSajgsDnQNdUGJUnsp\n7s3cU1wnF6Al9hLcfP4m3A43Vr+2Gn0P+gxdKwB43P04yp3luDBwAdPz03BZXfjCyi/g/sz9BDFq\nN9sxM89qTqsKqnDhqxdwqOuQpOmQ+Jq7HW7J/eDpySPhEaEWdGp2SthX3pTI4/DgifInBAFKzYQI\ngsgmVONJLEuoLkGFQICJtDqVWsGlygIUjy7YMyVOC+X1ntVgQu+MlYmmnthy/qu7CMAFxL0++a2V\n+1zuki2Xw1NVh8FqM/2Ii04g3ugIAKKIRz3rwbrt1iPuucnnfBqsxlN8XLXU1wR/ziDgeQqY2AG0\n2aBZKqVWP7tQtikGUnoX/HeUaG5CGqBK3Z9WuqXYQ7LZ1yzUAAb3BVGRXyEZO7gvCKvJinnMSwTd\nttJtEo9JnrI7HB6G3WzHxOwE+if7BdFZbCvGFyu/KJnH0PQQ8iysoNlldQnj90/249bELdVLoSQ6\nnWYnVuStSPD5HJ0ZxSP/8gieb3seY+Ex1TGTcSV0BW39bZienwbAajQvDl1EvjU/YdsnVzwJgF0/\n7lfasr8FRfYiYbn4mgPx+5H3hzwMh4cxEh4RrmFbfxt6xnqEfQ/vOSxs77K6MBwelliliG10SHQS\n9F6KWAxIeBLEciHWLAStrUyELheUikeXqsgW1ym6AKwD6xDLRV8AAH//yxNFxsBqOfl75howESj3\nueT1mWoCTCz8roHVl4q347Wj1QCaY+uOAzgBlvJ7AnHPzbOi/eXCT3yOm8FSgX1gtaCSebuBJz4T\nn5PaZwpagq8JrNlRo8p6AyQVZWo1ppkYO11Ecwv+PLm40PJuVBMnbocbN751Q7KO120CgNPixIFV\nB+D3+tHxlQ6Jx6RYzH5h5RcSjllkL0LrrVbJspqyGvgqfXCYHZianRKWD04PYnpuWtdlMcMME0yY\nnJ/E4PSgYofbmegMfnHzF4IIlrPKuUry2mlOLJiWNzAanB5Ed6g7YS4wAWsK1sBhZv6cB88ehPMn\nTvym7zewmWz4yd6foKmrSfKc8PuxpWQLgNg1rPiC8P27X3tXck/49rwpkfgDCLVmQlT7SRDEQkGp\ntgSxXKirY6KztnaZdvgRsVTTb3laqAssQjkFgGfjNYAJKJ6OagXrQMtTSL8X2/dxJNZwqiGuq3wV\nTMCqWb1oWcHohZ8jx4N4aq88FVfPMX1IXsOptd4ASdNU00zpzWp9nYG58XRLj8ODje6NKLIp+3Dq\nrRXtHe/F7td348JXL8Bb6FUcw2axocBagGJ7MXrGevDe4HuCj6fVZMUOzw5J6mx5Xjne/9r72HVi\nl6L9iBVWzEK7u61RtKxaHGYHyvLK0D/Zn3ScbaXbUGgtTPDs5GOE59knTfLzqHRWYkPRBsXnRGx9\nAkDTBsWIVQrVfhIEkQn0pNpaF2oyBEFkmWBQsVnIsmSpercEATwGZj/SBqAitpxH/Bpjr0sAtAP4\nPuKirFe0XwDKDYHkt51HwgAmOpO9n3RrrBeT7LhBsEjnQOy83LE5K0U19RxTy8pGr9WNDsSRuYQ0\n1SDSEuZJx04XA3ML7gsicD6A/gf9Qg1i4HxAEBvcn/S3X7qK33lGE9bL8RZ6cevP46mvYsE6FhkT\njtGwvgG9470JdY+z0Vl8OPwhAMBismAuOoeh6SEc6joksWyR7JNh0WmGOSFdWIlCWyEGJhOFsJx1\nheswM5cYXXVZXZLorfw8qsuqcXXkKgDW4faVna8I65q6mjA4OYjGtxvRebcTDyIPcOKPJ3Dx6xex\nrSyxoxePbuohlWdTdxMrgiAIEZRqSyw5qC5BBbebRf6Wu+gEMu7dsmDPlBtMICH2/7uQpqnytNWb\nYN1hxTWNSgJLK/1TTZSla0GS7LhuADcQPy+tFGAttGo4y2NfGXjsk9bAqdWYKqD0PKVaXxc4dw6+\n119H3VtvIRRW6eRqYG5ckHDrDrnY4P6k0Tujiuu1ONl7Ukjl/eT+J5IxuMApc5RJ9olEI6gqqMLT\nK5+WbM8tW7LNPOZ1bTccHtbc1mayodnXjOC+IErtpZJ1E7MTmEPceoVbrQBAHvIwMz+DVQUstXcs\nMoZDXYeE9e+df0+4rmORMcxhDpFoBDtP7ExIlRW/Pnj2oGYabSrPplbKNpFZAgjABx/qUIdQhnyj\n6L0UsRhQxJMgiKUHF9nZJADgJFj95Q7oT2/VQh6dEp9GsgigUlSLC0sPgH4wISmOQKpFwsSRUB49\n1WITmCCeRbzxUQ2Uo4zy80jnVmlFRZUiwRyDVjy6okRaUeZ0xlagOxRCxwCLsgXOn0fL/v2Gx1CC\nRz7lqZjcn/Q/fViNE8+uxU/2HTEklMVRytryWhTYCnB4z2E0dTVhLDKGivwKeF1ejAyNCNtZYMFE\nZAKRaAT13no0+5rhdrhRWVAJj8ODueicYCHisrowMTuRcFwxWimz2cICCy5+/aLQxddsSvxs3wIL\niu3FsJqsmJqbwswsi4xOYxptt9sSGjhxHBaHsPzKyBVEohGYYEKXvwvf7fyukCq74ecbJNer3FGO\nofAQAPXIdSrPZlYj+EQC3ehGR+yXdgABtCyUbxRBZBiKeBJLDp/Pt9hTIBaTdKN1Cig+U91g6aKj\niIuaTGAgOqW5H48GbgSrFZVHINWOlUp66gBYg6AoIAR9/gRpUx899ybT9y/ZuWSj4ZaOJkOZ/B3l\ntLLPh2s9Hhzes8fw/mqNY9QazewLBrG+oQHfevMsfll33HAK5Y5yFqWsKavBa198DS37W9DU1YSW\nnhZ03u3EwNQArt5j6aRWkxXFtmLMYQ6hmZDg28mP2Tvei+HwMEZnRuEwO7C6YDXyLdJusfJmP2ZI\nbV1K7aUosZdItrEqfOZugSWhSVDCNiYLDqw6gJ3lOxPWmWBCz/M92Fa2TdLFVz7mPOZxb+YeBsPx\nJknm2FuxWk8t3vW/KzQT8p/yC/dt085N8Dg8cDvcOPPcGeRZ8nD5G5exrWxbQidbLjprPbV4vOxx\n4ftMCkS9UVJqXJQZhG7VqMXhdGsKYtB7KWIxoOZCBEEsLXzIWDOZpIib5Ij9LnMRo41vUmkkVI54\nkyBA2TfUB+17o2cbI8jORVJ79qMI3K+3Zbbh1gL7hobCYQTOn8fhPXvgdjgM778QjWPE1/zVPa/i\nUNch5Fvy0Tvey2o9Z8aERjsl9hI8VvQYuoZZM6GK/ApJA6EyRxk+V/45BPcFE7xDxdE7Tt3qOpzu\nOy00K+LbiCOjJpgQRfx9yFv/01v4Hx/9D7TdbjN8rmL/Uo4JJkEEBs4FcOzmMYzOjMICiyS11hz7\nx2s7rbDCbDLj7efexj9e+0dJ9HnlT1cK16XeW49QOKR6H3kjodHwKNput6G6tBprC9fiiO8Iuz86\nmwxlA2pclBlCCCGAAA7jMNw5+4eIeNghH09iWUJ1CQ85GWwmw1F8poJg9h9+5LboBIx7WaYSdb0I\n5v95AOyaKPmGiu9NPvT5eRpBKVoqOxdJ7dlfujJaCwxA17XO5O8ot8OBlv37UxKdANAzznwei23F\nkmY1mSJwLoCWnhbhmh/qOoSW/S3oHe8VlnGvyRJ7CS594xJK81jtI4/wrchjBrEuqwsj4RG09rVi\n+6+2Y2xmDHazXdiWR+84BZYCXB65LLx22Vxoe65NYicCQCI6ASbEju4/mlBrqoXL6sJoeBSv7nkV\nDesbsKN0hzD+9y99HwB7/njEUZ5qO495mEzx92SzmMVMdAY/vPrDhOizOGX5VN8pXOy8CIBFkuWR\nSx69Prr/KBrWN+DsV87i+DPHBcsbpcj2QkEpuZnBDTda0JJR0UnvpYjFgGo8CYJYWqTZXVQ3bjDv\nyoXEaP1gKvWGKdYowgvgtui1UtRUfG/8UK4jTef+6ahNlbzR3XcEqMvwQ5Ks5pRf2ykAp5CR55N3\nmbU6ndgXDMJhUEB7C7zoe9CH+5H7gijUQq1jqdLy7lA37kfuA2DpqqPTowiFQ4LYLLIV4dSXT+H7\nl74vRN3k9aUf/9nH2P7L7bgXvgcAqC6tRoGtQOiAazfbcX30OiwmC2wmG56qeApXR67i3sw9PJh8\nIMx7IjKBr576KsJzYdybvqd6fo8WPYrvvfM9zEf1NRUqsBQgYolgYmYCbbfbsPZf16Iiv0LoUFtT\nVoPLw5fhPuLG5OwkAPb89T3ow8DUgBBxLbIVYWvp1oTOvkopvjvKdwgR2em5afAGuLcf3E7YlpNq\nHXE2UaslJgji4YRSbQmCIFIlVRGnhg/G0lCNbp/qPqmQjZRUHWMa8S/MOD6kdW2VhN3rPh8GYp61\n6xsasN9gUy3u21nrqcWWki1C+msyCwy19Eil5Xx8cUOfhvUNEruWdYXrsKZgTdLjisf2e/0Iz4XR\n2teq2EyoqqAKW0u2orWvFcW2YkH4AqxT7Ew00cpEjlLabqrUe+vR3t8umcfeir2YnpuW+JMC7Nyi\niOKdu+9gaHoIDrMDDosD4bkwqsuqUeooRXBfEACw+RebMTA9AJvJJqQSA5SyShBEbkKptgRBENlE\nR6MZQxhNQ00lbTULqcoJBACMgfmUHkPmItM60lwXNbUwzWurZFHBu8x6amuxJwXPWnETGHH6azIL\nDLX0SKXlfHzfSp9kndiupdJZqXlc8dhHfEeEcXetYCmzVhNL0HJanNj9yG6hQ+6+yn1Cs6CtJVuF\ncZJRYi9JSNtNlc+6P4tmXzNsZptkecdAh+BPajOxdTazDXce3MHM3Aze/9r7aFjfAIfFgbHIGMLz\nYXQNdQnXyO1w48af3UDD+gZs92yXzJ1SVgmCWKqQ8CSWHFSXQGSalJ+pTIs4o7WaRrfX2idTHWe7\nwbrsDgA4pLGtEZRqU7PQ5ThlYte2/W/aUxLbSsKOd5n98unThtNsAakQ11tvp9axNLgvCJfVhe5Q\nNzb8fAN6x3vj9YUHjkr2EY+h5Bkq9yWUH1M+7gdf/wBVBVW4/q3ruDN5R+iQe37gvNCsZ33RetSu\nYEa5ltg/JbaVbcOP9/5YELNizAbfFl0PXcf6f12Pje6NcJildbjcn3R7GROOkfkIuoa7JLWwvIZV\n3NmWXyN+DW4/uA18zMR3+1fa0/5QhTrNEgC9lyIWB0q1JZYc7e3t1Aac0I8OL8eUn6lUusPmMj4k\npIp+5Qeb8Mf5AeTBhjf/8iIeWeHVHmchO7/6sDCpwwZI9XnKdppwJsZ3H3ELKaVVBVW49ee3Ujqu\nDz50nOsAQkCFtQI39t2QzClZbas4fdhtd6Otvw21nlqc/vJpAMBjP39MkkZbZCvCWGQMFpMFc1HW\nZdbj8OD+zH1JCmuRtQhOm1PSZVeMFVZB5Crh9/rxzt13MDg9CIDVqj6YfYCbYzcl3W1L7CW4+fxN\nNHU14cQfT2A4PIzPrfgc7jy4g9UFq1FkL5KkJO/+9W50nusENmYmzZY6zRIAvZciMo+eVFsSngRB\nLG98PublCLAOpwZr5B4qFATjZ/+bG9ceYULjC3er0PF/aQuNjAvyZLW0C2xv8rBT/s/lGA4Pw2lx\n4vq3rsNb6FVtRpSM1edWo6+nj3nDIlEA8drWn/57YOyzHqza+oQwtljIAol2IVyY1pTVYI1rDfIt\n+Wi52YL5mAGt0+LE5BxrAmSCCSX2EhTYCrDGtQbXRq8hNJMYBXRanHhixRPouNORYM8CANtKt6Hj\nK+z3zOrXVmNqdgpuhxszczMYnx0XtjPBhO2l27HCuQJjkTFJoyFx3an4eoiFtpZvph4yPR5BEARA\nwpMgCAKoqwNaWzPr5bhcURCMtf+tHB88MoxHB53oDFzXF/HMND6oRzWXWNQ53S61mSQVwdg73ovd\nr+/Gha9egLeQPQvyCJrb7lYdlx/zyr0rgsDjEUDxdm/V1aGvtRX/8DcuXK+cEMbWE52TR1jF8xMj\nblwkbo6khAUWFNmL8OQjT6JrsAsj4RGYYRbEbL23HivyV6A71I3Ou53CWEoilSP2MK0pq0GZo0wS\nvXU73AicC+D6vevoGevBu197V7jm6bCoDbgIgli2UHMhYllCdQnLiIWozwsGNb0cl/UzZeQaK9RQ\nvvmXF/GFu1UZFZ2Ga8yS1dKm4kmaZZI9T6Hubgx0dKCvtRXnA5noSJU6Ss2MtPAWenHrz29JBJC8\ndjTZuCd7T6JjoEMiOi9941KCAOK1rVXbWXMh7qEpfl7UniN5gymlhkNOixMWE6sBLbAWCEKx2FYM\nv9ePYluxZPs5zGF0ZhRX710V6k0dlnhN5/DUMF7vfR0dAx3CWE6LE+e+ck6o4xRTXVqNd/3vot5b\nD7/XjzPPnUmokwXYPeoc7MTAlQEc6ooXTBv5GZJvu9jenkRusKz/7hE5C/l4EgSxeIh9GbdfBNb8\nH0lrMVPC7V6c9FodtaULgg7vy2SprI+s8OpLrzUypZgwAViapGYUa6G8WxeAdLvUZhK9zYa0kHs1\nJhs3PBcWvq90VuJawzVFAeRwu9Hyv7rxYLQfNpMNE7PMQ1P8vIifo+2/2o6p2SmE58LY4dmByoJK\nwTrm1T2v4onjT2BomqWxVpdWo8BagM5BluZaYC3Ag9kHggj2Fnpx4M0Dgo+mmP4H/dj4i42oLqvG\nnQd3hOWdg50SP06H2SGkIu+r3IfWvlbJOKMzozh49mBCVLhlf4skEm2zsI64RbYi9E/0o+6tOgT3\nBQ39DOnZNpXoN0EQhFEo1ZYgiMVDXJ/neA7ofJMtXw61mLlSW6qnBtKHBW3Q8zDXmIVDIZwPBLDn\n8GHNNFuxGPhfTpZj7kYvrE4nfvkfy9Ezpe3HqTXmq3texaGuQxlPueSpnPmW/ATfUC7oaspqcOa5\nM0mPK0+RlT8v4ufIYXFI6iXlvqKH9xzGS+0vIYoomn3N2Hp0K/om+1BkK8L5r57H9y99XzLfV/e8\nisd+/pguT1CA2arcenBLaLzk9/px/JnjwvXgnpwAS6t1WpyC8F3nWoc1rrjPqf+UXzjvem897BY7\n+if6he0b1jdgYmZC8WdISUDq+XmjhkMEQaQL1XgSBJHbiOvzGpdZLebq1UBfH1BUBFy9CnhFaarJ\nmuVkGn6N8wH0qhwz0w16NM7vYasxSzWaJBYDT/WW44X/ziJ2/8/feXC1ZBiAMZEQOBdAS0+LII6y\nLTCUxIyRey9vEtTsa5bsIx6r8e1GIapYYCnAg7kHAJTrR4FYp9i7cSHXsr8lYb6v7HwFW45uQWQu\nIul+y9lctBmjkVFYTVZ4XV78/v7vMRIeURTVoXAIL7a/CBNMOOI7Isy31lMLh9mhKSpX/2y1IJSv\nfvMqiu3FitfRyDUXP5eRaARtt9seyg+DCILIDFTjSSxLqC5hGSGuz9NRi5ktsvJMcaE5NgYckplZ\n8vTXVjCRlk34Ne5NcsxU/ECToXF+y73GTP48adVSqtXriVNWv/u7xwGwFN2KzdXCciMpst2hbkF0\nlthLDO2bivejUsptsnsvPwb39txauhWhcAiNbzeq1nIG9wXh9/pR761HsZ3VZybzveTeouLaUfl8\nvYVePOF5QlF0AsDGko248xd38GjRo+gc7MRIeARVBVWKkVy3w40Tz5zA8WeOY9eJXegc6ITdbMdP\n9v4ERXapz6mSj6r7U/b/WGQMh7oOqV5HI9dc/FwWWAsUvVuJ5Qu9lyIWAxKeBEHkBrwWc6lHOjlF\n7M0kamsBeS1fsmY52WIhG/QsxvnlMFq1lGrCVCxA6v/5KNY3NODLp0/jF88kNqExMg+1hj7JSKUR\nkZKAMnIMLph6x3s1j+12uHH8meM48cwJrCtaBwCYjc7i+5e+rzo3j8MjqR1Vmq9SYyKA1Yke8R2R\nbFPrqcVH3/xI81wHJgcwNjuGmfkZfPnfvpxwXCWhqLce18g1F4/Z7GvW/DAolQ8fCIIgxFCqLUEs\ndXKliQ0hJRRi9+bw4cR7YtQCJBP3eCFtR5aYxUm2EOlmbQAAIABJREFU0UovTaXmNZX03XRSnNOp\nyw0ggG50wwknggjCrfJQqB3D6LH1bq9nu1A4hJfaX8KD2Qf46N5H2Fq6FU6rU5L2K76uTV1NmlYy\n79x9B5FoROKFqoXeYxjB6PNAdaAEQSSDajwJ4mEgV5rY5DpLWaDTPRbIJR/MTJGKIFxoEaCnTlBN\nBPngQ0ese1UDGtCi0r1K7RjJmhUZGUc+32w0V0p2X8Tr8ix5+P23fp+SL+diCcCHuSkYQRDaUI0n\nsSyhugQZMXsGxZROIk53NxNvra1MhIrI+WdqOd9jg16uRn0wzwUCeN3nw1t1dQiHFiY90OjzlErN\na6asUPSip05QLQ3WGcu9rkUtDifJvVY7hpGU22TjyOd7qOtQwnbidNKDZw9mpK5Vad2df3/HkOgU\nP1MLfe85RlOnidwm5//uEcsS8vEkiKVOMKie0vmwI45y2pgfnm7xZrTzbDYjqnrvsWwOgSZ37gd5\n9fiMijDqg8mFKgCcDwSwfwGjxdn0RpR7Zy4kSj6TyURQEEEEEMBhHFZNs00WyebHuzZ6TfNYWuit\ntwWAckc5hsKsk7Auv1kkvy+ZumeLde+5oCcIgkgVSrUlCCKz5FJKqzhF1e9n4lOvQPfBmLdlLqTD\nyubgG2xZ9ClpYtDKxYgPJgC8VVeHvtZWeGpr8eXTpxc0NTeXa+LSEcXi8+I+k+mKoNd9PuEDgvUN\nDZIPCMTHqyqo0tXARw0j9bZuuxtt/Zm3GMnmBxIEQRCLhZ5UW4p4EgSRWXhKK8BE6GKqHXGK6pEj\nxkSw0c6suZAOK5uDs1FlSgEAJwGEAewAcBRAExbOW1RMEIYaETncbkNRy33BoCGhyslELelipUTq\nQRzZ0xvN48i7oaoJJyMCK1kkW3w8w42NzgVwsvckwnNh7PDswNEDR5OeqziaCCArkcV0rj1BEMRS\nhiKexJKjvb0dPp9vsadBqFFXx+ooa2sXxZNTQrLOsiIUnymjnVl1HiuryOagOiUf4tFcgEV0B2Es\nwrvMSRaB04I/T+l0kVUjU9GydBrF6D0vIxHfZJHsdK6jeA565pEJ+D3qGe+Bt8CLInsRyveVo9fR\nCyeciLwVQVtfGzwODza6N6LIVqR5L+nvHpFp6JkiMg1FPAmCWHhyqeaUe4OmtC+Mia90jpUpZHNQ\nnZLYmrAaTFzHoqPkvckwWkuqRDZq4lKNlsktTdKpE9R7XkoRX7VIcrJIdjrXUezDWV1avSCRZ/E9\n6nvQBwAoP1+Oof2sXrR+Xz0azjeg/0E/Ou92AqDIJ0EQDwcU8SQIgnjYCAF4CUAUQDOYyF6i3pvZ\nslcxWkuaCfScS6qRSr2WJplEKVKZTiQ51Tm81P4SoogmTQtOF3EkOhKNoO12G2xmGyLzERTbilH9\nzWp0FHagFrU4jdNww032JARBLCvIx5MgiMyRS02DlhpGO+QSulloIZNN9JxLqmmndahDK1olwidb\nJEsHXsxmT3pINZVZqeHSn8b+hK7hLgCAf70ftv02SWffbKRiEwRBLBbk40ksS8h7apFI4oO5JAkE\nWBfYujq0v/FGWvtDyx+SW4a0gonQ5YxBX850yURKbDLEvo56vRyN/o7ix/jbfdcwmZ/8XJJ5VCbz\nLA0iiAY0ZF10Asm9PfcFg1jf0JCTohPQ50uqhLzhUsv+FpTmlQrLjuw5gha0SK69Ef9W+rtHZBp6\npojFgIQnQRD6yIWurZlELKR/+MP09tcS4kY75C4gRvSzLhZYZGdbyKQqRFI5xgePDOP4X1WlfC7c\ns7SvtRXnZc+kG+4E4ZMtknXz5bWciyk6AwjABx/qUIeQ7NORVDsRB/cF0bC+QZIyq7SMIAjiYYaE\nJ7HkoC5si0QwyMwgF7tTbaYQCWnfiRNp7a8pxINgnWJ1+FQuNBkPZC+wyM62kElFiBj9HSU+xq+b\nPkr5XLId/dVLrguubnSjAx1oRSsCsk9HUp17U1cTBicH0fh2oxAZNxLR1IL+7hGZhp4pYjGgGk+C\nIB5O0rU/yaB9SiYa5KRagptx9xuNJkXZagaUjHSOuRB1eJk6hpGGSJmyZVmKZKPe1Yh1DEEQxHKE\najyJZQnVJRC6SZZH6nazL78f7Tt3Gs8z5V4lGRBOqimSBvJgU41cZjyQzW1oVMZKlg6aLdI5ZipR\nK6O/ozIVGTMS/V2IFOJcJRv1rqmm6OqF/u4RmYaeKWIxIOFJEMTyYNM5wH0ZKH8f6L3PlmmpMb7+\nvfcWtWGSaoqkATWZagluBvWzLvSmg6bS1CfdYz5MZFso5TJ6612NPIO5nl5MEASRC1CqLUEQywP3\nZeB+Nfu+6h3g1ue180i11i+QhYxqiqSBPNgMZv5mFb3poJlMXVwMT85ch6w8tKH0WYIgCP2QjydB\nEA8P5e8Dw08AzmvA9SrAW6ytxrTW+3ws4giwfFS3e2G9TJeKmswCdW/VobWvFbWeWooiEYsCPYME\nQRD6oRpPYllCdQmEIhcfY5HOr/7fwMF6Fi0EkueRxvJM2y9fVl7P81c9HqC/Hzh2bGG9TBc6DzaH\nWMqpi/Q7anmQS88gPVNEpqFnilgMUhaeJpOpwWQyXTOZTHMmk2l7JidFEMRDRKaMJL3FLL32zg1t\ncaj3mLzzzsaNQGcnMDrKli8XL9McJp2GO+cCAbzu8+GtujqEM2JOSjyMZNIOhSAIgkgj1dZkMm0C\nMA/gRwD+92g0+qHKdpRqSxBEHHndpN8fT2etqABu3EgvwqenLlKeQtuiUbvFx6ypAdasAZqbtee4\nQPWhRBxum3Lv6lXMxD4kWN/QgP1a93eRWAxrmUUnAKAbzO81iJzztSUIgiBSQ0+qrTXVwaPR6O/5\nQQiCWMIstEDinVr5sXk6KwAMDLBl6QiFYFC7LtJoC1i1MZNdO/l55qj4WYqoCTZum8LJ9S624vme\nDwQUBfKy89vsBsBvUQDMeocgCIJ4KKAaT2LJQXUJKqSaspqqAWSy4x88qD4XuegLBlmkU7wsHfTU\nRQaDwLp1gMMBNDai/Y03Uhsz2bVL1d+E0ETNl5PbppRWV8Pr9+PLp08vShRR7+8oPTYvy85vk3/O\nVAuAfix0Q3/3iExDzxSxGCSNeJpMptMAKhRW/Z/RaPSk3oO8+OKLWLt2LQDA7XajuroaPp8PQPzB\np9f0Wu/ry5cv59R8cuZ1dzfaY9ETXyzCpmv/qSn4AKC2Fu0vvAC0t6d2/JMn0T4wwF6XlQEjI2gH\nAL8fvth27e3twHe+A5/LBRw+LDT18X3pS0BrK9rn54ELF+B77rnsX681a4TrhclJ4LnnjI83NcVe\nx8Rle3s78MMfwjcxAdhsaH/kEWB6Gr7GRiAYjJ9vLjwvS/g1F2wDjz2GtS+8AI71O9/B+OQkDp44\nAYfbveDz+4fnnsNEXx8sDgeimzbhnStXYHE48B9PnVKcj575Tl2fAkqZ3+YL8y+gPdWfz1x5/R3A\n5/IBh4H2yzkwH3pNrx/S15fp7xG9TvP15cuXEYoFFz799FPoIW07FZPJdBZU40kQi48Bz0cJmbLs\nKC2NN99ZsQIYHIzPpakpeTqvz2es5jITpHq9xChdO/G5eDzA8DD7fqHO6yEgV305X/f5hNRZR3k5\nwkNDANKrMyW/TYIgCGIpsJB2KlToSRCLDe/AalREpWrZEQgAK1cywXngALBtG1teXQ289550Llrp\nvIuRlprq9RLDr11TU/xa/O53bF1tLbsW/HtKt9WNVldah9uN/S0tOSU6AWnqbNnjjwvfp1NnSp1V\nCYIgiOVCOl1tvwbgHwF4ANwHcCkajT6rsB1FPImM0i5KNSMWGHEznbExZjHC8fsBm005cqoVXcxU\n1DVF0nqmeOOg+/fjy6qqgI8+iq9fpPNaqogjh7nclVYOj8TOv/AC9u7enZNRWWJpQn/3iExDzxSR\nabLd1fY4gOOp7k8QxBJg0ybg5k0gGgWeegqYnY2LzQpR+XdNDXDkiLq40uo0yyOHegkEgJMngXAY\n2LEDOHpUeVx511mtlF+dh5YM0d0tFZ01NcCZM/Gxl4hoyiUSmu4EkLYFx0JYl/BIbHt7u/D9YqN1\n3g+lpQtBEASxKKRd46l5AIp4EsTSxe2WiiqbDYhEWArppk3Ab34DWK0stdbrTdxfrNLKy4He3tRF\nX7Joq1r9pLx2dHBQu5ZUw14moRx1IhbNdbuBz38eeO21hYtuZkCQsXGk53yuqWlRxUhCDacPcQuO\nBqRkwZHrUdRkAvBcIIDekycxFw7Ds2MHDhw9qvueaJ13rl8XgiAIYmmQ1YgnQRDLlE2bmJ+mzQaY\nRWXgBQXAgwfs+7VrgTt3gHv32Otdu4AbN9TtRuRs3qy8vRrydFZ5tFVWQye8ib92DfsAOHiNZWMj\n20Ct5lJ+HAX/zcRyVI1objbJlCeizHM0NDio6S+pRTqRNIfbDbvbjVN+P9vfFoQD7rQsOPRYlywm\nSp6e/Breu3oVM7HGXf1tbYbuidZ5q61fdv6hBEEQxKKTqeZCBLFg8JbORJYYGGDCa3iY+VxWVgKr\nV7PIJhBPq+UKjO+TrGmQ0jGMeIaK01lLSoB33wXq61ldqTitNYbg8zg8jPMOB3DsGNtGpaGQ8EzJ\nj6PwRj1hiFSbM2WCTHkiytR0JkSamtdmSvu7AizSeRopR3X3BYNY39CwIN6een5HyRsoKV1zfg24\n6ASYR6mRe6J13mrrl51/6BKH/u4RmYaeKWIxoIgnQTwsJEshFa/jAtPpZALP65Xml65ZExdxmzcz\nEcnDf/JjBIPAI48AMzNsX7MZmJ833uWVC6OSEvb1+OMsInvxoqLgE97EA9gTDgOHDsXFoVKk6Ic/\nBP7rfwWuXYsf59IlxbGNlqNK0EjjNUwQLNJ5GMqCTG8qrqwGd18wmHZjnHTFq2T/I4dTTyOOsRg1\nl8mivvIIp9I159egrKYGzpUrYbbZ4GtuNhw9TnbeauudVnbsWk8tDu/JvQgxQRAEsQSJRqNZ/WKH\nIIglzssvR6N790ajzz4bjY6OLvz+mWDv3miUtQmKRhsapOsqKuLrDhyIRquqotFPP42vf/ZZtq62\nVjr/0VE21ugoO8fi4sRjfPppNFpZGY3W1bHv+fZKbNzIxvB4pMfnx3nhhWjUYokfo6pKcZjp0dHo\n6YqK6LTSnJWOI742VVXZu0fJ7kFWjheN/zZegMOJmR4djZ5uaIhOp3gt090/F/j13r3RHwHRHwHR\n07L7/eazz0Z/BER/WVureo4LfQ06Xn45+uu9e6NvPvtsdODup9GG0w3R0emle/0JgiCIhSOm+ZLq\nQmouRBB6SOgoYzByku7+mSCZpUlpKcDT+errgRMnpPvqsTsRn2NJCeuGqxWZkUcA166Np7pWVQG3\nbqkfw2IBenpYRFYpksjnnP9ToNchjfqJmyZVVQFbt8avzZYt6k2Q9EQsk22jZSuTaeoAtIKl4hpN\nU00zOqsW7XuYuqi+VVeHvtZWeGprE1JZExoo5QDUaIggCIJIFT3NhajGk1hyLEpdQmJHmYXd3wiB\nABNodXVMfHFU6hsBMEsSgHWrLS5O3F9cx6g2fk9P/PstW/TN6+RJJiRbW4GXXmLpswC7XhcuJO4j\nPkZBQfx73hyntTVeO8rn3OtgDXhawVJPgYTjtH/nO/Fr09ubOFay48hJtk2ye5ANgki9NlLPuSZB\nrcYz3drPdJDXVWYL/jtKrX7yXCCAU34/ZiYmMjrPdM9PnN5syc9fkGtF6IPq8YhMQ88UsRhQjSdB\n6EHLhzLb+2uhZjUi7sra1MTsRBobEyNYR4/GooP5wK9/HY8GirvP8mNcvRqPjm7YADzxBBvP6wX6\n+tjyzk7gsceY0ObHOnmS1YMCwIsvsqhqOByfwzvvAG+/DXz5y8Du3axT7vAw8w7l5yI+xtgY2+7W\nreTCXqkBz8WLwO7dOLd7N0IHD+L61BSePHWKiYOkY+n4ACHZNmkViKaAG6l3uk3zwxK1Gs/F7C6r\n1Dk2E8ijuBy1+kmteYjX/3zDBpQ/8QTyy8sx3tsriRTLj5vu+YnrTE/5/Vm5VgRBEMTDC6XaEsRy\nQJyCWlERb/gjjqzJ033d7sRUSvE2HL6t2GZETkMD8NvfxkWh1RoXjGVlTNDydQBw4ABLq5WP6fEw\nISv36eSpu1u3xsfJywN+/3smRg8eZJG5xx9nIlosqkNgkc5LTwBDn8SbEnm9yqmFydKKldbJU1L5\nssWwV8kketKrk6CWSprNFFOtNN5kqa/pYDRFVWsefL3V5cJsLCrq8HgQHh6WHEN+3JmJiYydX7au\nFUEQBLE8oVRbglguqKW3csTRqXffVU7nFG+Tn89EnzyVkm/DO9vyaNfJk3GBaLFIj81tR7ze+DIu\nOgFgZEQqOgHWPVZsXQKwjrfDw2w+4pRaiwVob2fnIj7GF78Yf93bCwwNAW1tiWmhPOo39EncJmb3\nbnaaPPrmcmHP6Ci7tsnsUZTWyVNSNexVFirdMxm65pCmTQyP9skFi9ryTKCVxptfXg6zw4GRK1cQ\nXLcObxw4IJx/OvdFLYrLx3xt9Wqc2L1bGFuvxckju3YJ43qqqxOOwY/r8Hgw0d+P+UgEXr8/I0JR\nj/1MLjzLBEEQxNKBhCex5Hgo6xK06u3EtYNer7JgEG/T2yv1q8zPB1auZFHLFSuADz4A1q1jPp6N\njUw8cubm4t8XF8dtR3p79Z1Lfj5Lq+Uit6aGRUXn59lru52dAxe/c3PA/v1MdOfns2W1tcBrr8XH\nFAvm06dZRFX+Rnh6Ov691wtwAVBeDtfEBBxKolUPBlNSF7PGMZfmkA200njHe3sxHw4jGokgEgqh\nv61NOP+k1yQAwAfWrElBX8lFGv8dxcd80NeHwc5OYWwuvruamhSFG1+//+hRYVzx91wI8uMWb9yI\nwc5O9Le1wWKzZUTU6/mAYLk+R7nIQ/l3j8gq9EwRiwEJT4JYCmiJGz3RKfE2fDy7HZicBP7lX1h6\nbijE6kD/6q+YX2dnJxO78nR5sxlwudjy2lomOsXRSCXsdpYGvHIlS4k9c4bNpayMiU++jcMBdHXF\no6ZmM4tmtrYykVtRARw7Jj3XYJCl6c7OsnNQEpGxiBEAdl5cANTWwp7s2mphsGHQYtY4pjqHpRLZ\n0orS8fPm2AoLsfOVVyTrFK9JNxIbVIlQE2l8TFtxseLYWsJNPK7SMRxuN+xuN0LXrwNgfp/yuWfz\n3uXCs0wQBEEsHajGkyAyRZr2E0nHtNlYF9fmZmlt4cmTrEHPjh3x2kaleciXfe97wC9+wYSaOILJ\nsdlYNHN4mAm6SESaFnvlCvCFL0iXlZTEmw6p0dDAmgpFItLlK1YATz7Jjieu7RRjscTnqmRJw61K\nACYyz55VtjKRr1erZczG/URu2GgYncNSsNlIVt/J11lsNpjtdtz97W8xE3tW8yoq8Gc3bgCA+jVJ\n0ZaGX+edr7yCrkOHEsbORB2l+N546+vxjMgK6VwggJ6WFkRiP6da986o1U0uPMsEQRBEbqCnxpOE\nJ0Fkimx4dSYbU94IaN06FqUUd53l+8jHOX8+3mE2GZWVbFwuBk0m4PJlYNs21txH3JVWiVWrWAQ1\nEmFRzTNngPJyaQ0op6EBmJhg4pA3JyoqYo2GLBb2/eiougdmKMQsWaJRJtB37WLnyJsJFRdL12u9\nUc4F79UcIZcbzXCxdO/qVUFMygWWWhMejqaY5g2qDsO4LU0SMiHckt0b8XnDYkHl00/jwNGjqsda\nCh8wEARBELkJNRciliU5W5eQDa9OPdYeABN1lZVMKHHR6fEA/f0s0sd9K/k4WoKR87nPMcHHx/v8\n54H//J+ZyBOnrirhcrHj8OjmypVM7D31FHv9mc+wSKd4XsEgE7o7drCU2vPnmVCdm2PnVVWlntLq\ndgPHj7OIqtvNRKe4mZB8PSA0bWrfuTOxJnQhvVdzHD2NZhYLnq7KRadS2qfcnzIyNgaT3a66fQK8\nQZXo1M8FAvjpypVoLi3Fm6ImRYD+31GZaLSU7N5IUovn5iQ1rUrkYursUknzzjY5+3ePWLLQM0Us\nBiQ8CSJTGKz1S3vMYBCorwf8fhZJ5AKxpoYt37gxXqPpcknH2bFD/ZguV3wcHnHMz2ciko+3eTNQ\nWJh87hMT0qZEnBMn2FwuXAA+/pgJzT/9CVi/nonRkRFW4zkwwIRvzEICRUVsH73Xlottp5PtpwRv\n2vTee4k1odm4n6mi1dU4y8d0AFnrRJsuXCyVVlerdnQVi7Px3l7c7exEdGYGBVVVKYvpUHc3pgYG\nMDM6itttbfjl9u2CQJqJWaAsBGLxKhdp+4JBOMrLhW3tJSVJBWUufsBADYwIgiCWD5RqSxALQZbq\nBYVxe3pYWuuVK6xxT2kpizS2tbGI3ZYtrAGQ08kiiP/2b6xhj/xnc/VqlhobDrOmPkC826wSNhtb\nz2svS0uBe/fY99XVzHtzbIy9NplYumttLYvO8vnIPTs54ppOjpGU195eFum8cIE1PlK6B7zuUy19\nN1dYjLTfJZJqLE9XPRcIoPfkScyFw/Ds2JGQWpqptGE+DsCa+licTgzGnuNkaao8NXispweFXi9s\nRUXYFwyiq6nJUH2lEkqpsnye9pISfOPSJRRqNQHLMXI5zZsgCIKIQzWeBJErZPpNPBdR4npOJXh9\n43e/Gz++xxOPIsrhtZVGsNuBmRkm2jZuZOI3P599TUzEhaeY8nImfAGWUiuvNzWZWPMicQ1raSmL\ntBYVGRPvSteK3wO1xkK5xmII5KUiymVI6hqRKAL11lUqNdoRL9vz6qt453vfA0wm+I4cwduNjboE\nknx+fI6Tg4OK9ZVGGv6IRVrJli0Y7+2FxWaDtaAAvubmJSnatO6X0YZIBEEQRHYg4UksS9rb2+Hz\n+RZ7GsaimJl+Ey9vLKQUHeRUVbH/+/qYaKupie9rVGh+5jMsPVa8j9vN0m7v31cWmXK2bWPCt7+f\nRUDPnQP+5m+AN99kUVqLBfjwQ9Yo6cUX2TKbTdrx1oh4l18rhXvQ/txz8E1MZD4inSkWQyAvsihP\nVVCII5Gl1dX4ytmzhsRIsmZFyZrvcIFkyc/HO1euoKayUlGwRiMR3G5rg62oCJGxMUGoqglXpWOq\nXRuxSDvl9z8UjYIeloZIOfN3j1g20DNFZBo9wtO6UJMhiGUHrw8E2Bv0ZG94gkFjb+LVRC1ffu0a\ne22N/Qi7XEwoOJ1MqPGGPvn5LNV00yb2emyMRSi9XuDWLePRzU8+SdwnFFKuOzSZElN5AVbTWVjI\nhOf9+8AzzwA3brDvt2wBtm5lDYzKy+Pn1NwMNDay/fU0+xFfP17rWV0NrF0LHDmSeA/6+liklu+b\n7TevRlOvuQfrQqJxzGxHmnhtH8BsTvQKin3BINpj3YvVonzJ5i4+LgDYioo0vT7F6b2IRnEvFELf\nlSv45fbtcK1ZIxGx3vp6rG9okFisdDU1ITI2hvyKChw4dkwiVkdjP+viY6pdG17vmWyuelhKUcRc\nbIhEEARBqBCNRrP6xQ5BEMuQZ5+NRoFotLY2Gh0dTVz/8svR6N69bDul9cnYu5eNDUSjDQ3xsUpK\n4svlX3Z7/PuKimi0sjIa/fRTNp7JFF9nNkejFov6OJn4qqqKRgsLE5d7PNHoU09Fow6HdHlDQ+J5\nl5dL14+Oxv/Xus7icerrlfczci8zjfz+LkF+vXdv9EdA9EdA9HQWzuHNZ5+N/giI/rK2Njpt8J50\nvPxy9F8qKqJHSkqiJ/fvT9g/2dz5cX9kNidsMz06Gj3d0JB0PP71E5cr+k/FxZJlaueiNB/xsp9V\nVUn203Nt1Oaqh2zf20ySznkSBEEQmSOm+ZLqQop4EkSqaEUx5RFRt1t/lKunh/1vsbBmP/39yg14\nOG43iwTyZkI8lXTTJlY/KY48JmsWlAm4X+eGDcD4OFu2cyezU/ntbxPPw2pl5+nzxSO5tbVs/vx8\n+DUWR72Uajd5tFJshaLHs9NoRDpdloFVS7YjTfuCwZRrMXnHWQCChYg4Ypps7vuCQfx8wwaEY3XQ\nJosF06OjCIdCQkRRfswx/vPKMZsxK+psW1ZTA9eaNaoRWKX5iJd9+fRpSfOhPa++KkRL1a6NOPpp\nlGSR3VQjofJ9M9FMCUjvPAmCIIgFRkuZpvsFingSGebs2bOLOwG9kUweReNRPnG0ct06NkZVFVsn\nH+upp5SjmTU1iVFEkyka3bEjGt2/n0X3xOPYbOlFLs1m9XVWq/Jyj4fN4dNPpVHYhgb1iK04ullV\nxfZXi3ByxFFDebRydJRdY6Vrq0DCM5VOtFoPWue2BMiFSFPHyy9Looo8OidELYHo0erqhDlqzV3Y\n32JRjPyJI4L/XFER/dXOncLr/89uj/5vJpPw+qeVlZrXSGk+8mXZikJ2vPxy9Nd790bffPZZ4Vh6\nIrtKc1AaS23fpRRVzQUW/e8eseygZ4rINNAR8SQfT4IwCo9ktrYmej+K4T6Q3E+TR+W4nUhHB6st\n5N6Y4rG4JydnZoY1CTpzJl7XyYlGgQ8+YNHBq1eZr+fq1cxKhNd6pkqy6KhafejwMIt2fvvbrDMt\nEI/sKfmHFhay2k6+3Ucfsagj//L7lf0redSwupptI24Y1NTEbF2Urq0WPGqq5x6nCo/eLnLtnNz3\n0Qhi/8jFItTdjcj9+wCkHpX7gkF4/X546+sTmgudCwRwyu9P6rXJ/SxXPf00gMTI37gowjk9MICJ\n3l5hDmU1NZIMg+INGzTPw+F2w+5245TfL9wL+fXNVoRZySdT7d5qzSGZ56Z8X6rNJAiCeAjRUqbp\nfoEinsRyQ289II+aiaOGxcUsMrl/P3vNay1raqLRF16IR9k+/ZRFL/Py4tvt3cu2KS6W7iv+2rEj\nvQhnJr7E8yovj0b9/vh1euGFaLSsLDFa6vcrRwArKuLb1NdL1yWLGoqjoSUlxiKL6ey7hFCLFi4l\neGTySElJdIzXM2sgjrQ1ezyK0TmOOPInjuZCKrKxAAAgAElEQVSJI5z82Hw7cbRVHBXVinpqRQCz\nFWE2UkurN1KsNJZ831yImBMEQRCZAzoinmSnQhBG0WszIbfxEOP3s26z3E+zvp6NK/f6fOQRVuPJ\nsdniUUwlKxTuqcntVZLZrKSLy8W65g4Nsbns3s3sUTo6pNFJsfWJ0jXZto1FLXt7E+tfS0vjkWK/\nHzh+XN/cuH1NSQlw6RLr4quXdPZdQohtKOwlJXj+5s2Uo5eZ6IJqdIxzgQBGr1/HWE8P/O++i0KN\n+yTuEhseHobV5RLqMPMrKvCtGzeSHlN8vfIrKjA1MAB7SQmqnnkGk3fuwOp0Ir+8HPd7ejD0/vuI\nzsxI9tey+hB7cCbzAc0Ecj9SrXrRhRqLIAiCWLrosVOhVFtiydHe3r64E9CbJslTQS0W6fKaGmbp\n8cQT7DVvgCNuOJOfz0TavXvSfbnoNJmUU13n59k6LjazJTpNJpY2+/77TFgODbH02lAIMIt+rRQU\nMOHIhai8CQvA7FV6e5VTW/Pz2f+FhcDf/33ivoEAu07yVFye5nzzpi7hKDxTgQCznKmoWNaiE4in\nPtpLSvCNS5fSEgrJUiy14Om+N48dSxgjWSpwqLsbdzs7MTUwgK5Dh3TPMTw8jIKqKjyya5ewbmpg\nQHPe/HpZXS64N26Et74ez9+8ick7d4R5/+Ff/xWDnZ34/cwMnJWVMDscAKSWLGrw9F7eSCjVFGgl\n5NdRfL9+vmEDpvmHOykgHqvr0KFFT79eriz63z1i2UHPFLEYkPAkiGzBxc+HH7JIJGfNGiZa+Xpe\nmyh+zYWYWh2lWhbB7Kz6OjW4f6URolE2v8ceA155Jd6xt6ODiWWnkwnuBw9Y7WkgEBd1YqxW4B/+\nQb3LKxfO4+PA976XOA+1etvYhwPnjL6B7+5mdaEDA4AOMbOU4ULn+Zs3NaOFWqRTr8eFC/e5FI/R\ne/KkIGraX3oprWOKt//mRx9h/9GjyK+oUBxDSfDuCwbh8HgwOzGBOx0dGHjnHbzd2Agz94kFEI19\nMFT82GNouHYN5bW1AIDI2JimOBbXVeoR8mqiXGm5fDx+LficeeffVKBaTYIgCEIvJDyJJYfP51vs\nKeiDR0a3bQP27WPLeHRTvJ5HB8Sv+RvDmhqWbqqF1crScI1gsQD/7t8BzzxjbD8xMzMsxTYQYFYp\nAItObt0aF40lJSxy2dKSKDxnZ5nAk4tw8fgck0L2hoYtid5InPBMLQObE72k2hxITZTxaJ3R8bhw\nKa2uhtfvl4wxFw7HN5R9oKJ1TPk85ds73G5868YNxTHUGu5Y8/KEbcJDQ+hrbYXN5YJJ9LPnrKzE\nf+rqgsPtxnis6ZCtqAiwWJJ+CMLn+7PVqzES+zCotLpaVcypPdtKy+XicF8wKIhureMoXUsx6dx7\nQj9L5u8esWSgZ4pYDEh4EsRCoCasxIjTRouLgfJyoKwM2L6drd+4Mb5tYaF03y99KS6a9GC1srTX\nO3dYdM8oXAQ6nUzw/tM/xUXi+DiL2ALxOsneXiDWfVQ4PpDo0Sm/NrwLLk9PlqNxXQ1HY/Tcp4cc\nI11QAe3OuVy4fOXsWTxz/LhkDE/s/pdWV8NeXCwZR0s4y+fZ1dSEycFBvN3YKMzDSPfWc4EAwvIP\nTiwWRCYmUPH5zwNgfp0N164J4/FIcmRsDLfb2pJ+CMLnO9nXh0hsfoVr10rm9otNm3DE7cY/l5eD\nfwwjf7aV5q4mutU6/2pdSzG50N2YIAiCWBpQcyFiydHe3r60P6kLBFhKp7yRzsqVcRFYVgaMjLDv\n6+uZTUplJYscfvIJS2HljYkA1njnvfeA/n59c9i5k0VSOzqAyUlj83e7gS9+EXjjDeDJJ5mwFL8h\nt1qZcB4bAz7/eeDECaCxkaXDihsiVVXFrVPU0NvISYVwKITzgYBms5OMPFNq93UByUSTHy2MNsER\nN+XRarAjR3z/Tvn9muOIz38+lkLK56lnf6Xj8vMTn4ccl9eLyIMHsNjtcK1bh99HIti5aRN6T57E\nzOgoSqurke/x4LZoPvLrxq+rragIkbExxe2OuN2CfYyzshIVTz2FPYcPo6upKasNfhay8RGhzJL/\nu0fkHPRMEZlGT3Mha7KVBEGkiZIY4XWJAItmrlnD1k9Px/fjzT6sVuBv/xb47nfj+4g72wIsZfbU\nKUCclqhFV1d8fD3w7rglJezr+PF4nac8xXd2Ni6aOzqAF19k5x4IsPNqa2ORTj1RRR4JTREejVkQ\nxPeVe4EuMDwyBQDnA4G0z11JyO4LBnWJeY44AmfJz8frPp9uYSy+f3qi1+Lz9/r9WN/QIMxTvr/8\n3MTibV8wmHDtrCoZBVaXCzP372MmFqWc7O/HEIBP3ntP2KZw7Vr4jhwRrpv8WOLruvOVVwTh2NXU\nhN6TJzEXDqN8xw6YYo3KLE4n6t95R4iois/7V088IdSWZgqj95wgCIIglKCIJ0Gkgt7oltg+pKGB\nbXfsGBNgSnYoAEujjUYBbnBvtwNFRdIIZyawWlkHWpntAwAgL48dl0cyy8pYLWdzM7B2rTRt9sAB\ndo5K4wCsM+zatSy1d9Uqlnb77rvGO8bmQEQxKdyGRa+ozgKZjkylE63kGI1aJhvnl9u3w1lZCXtR\nkaJwVTp/LjDvf/IJ5sNhWBwOFK5bh9Hr14WGRo7yckRnZ4XXJquV+Y2Zzfj6xYso27YN4VAIv9i8\nGdOxrAT3Zz+LqTt3EOYfsqhhsaDy6adRUFmJ8d5eWJ1ODH/wAaZjNkkurxeutWsFOxa+zb5gUHK9\nAGB1XR3uXb2Kr164IGkIxc9bbBGT6v0iCIIgiFTQE/Ek4UkQRuHRLC6+xD6VcrgY8XhY1HB4WJ/F\nicnExCf/P1uYzcyCRUxBAUuhjUSknpvl5Uz4bdgQF8EWC/PztFji1i9btjB7laGh+DqxUAWSXzM1\n5CI+195Up5kWrIaR9NlwKIRfxcSZTUWcGUGPkFWbn9Jy8XglW7ZIRJaeeWoJYaUU2Z84nZibmlId\nUyzWABYRHf7wQ+HnwpKXhw1/8RcIdXfDbLPBYrfDbLPB19yMtxsb0dfaitLqajy4dStBhJosFkRj\nP++O8nKEh4bYcptN6IDrKCsT9hNv4ygvR2RsDPOxTIbSbdvwlY4OxevEz3t6dFSSXkzRSYIgCGKh\nIB9PYlmyKN5TgQCrwSwtlYpOi0XqUynfh3tCPvoocPductHpcsW/56JzxYr4cZKxebOx8+FjykWn\n2Ry3QCkokK4bGmLndPEiqze129n53L/PRGdFBas17exkArW8nEVt+bUqLmb/p9oxNosdZzPyTOn1\ndzWIWmMXpaY9DrcbBWvW4G5np2YnX62mP4C+jqVGuquKxxvv7ZWs1zMftXRbvu/bjY0J6aBzski8\nragIAJjHpsWC2ViKO++qW7Jli+TnwrNjB+5dv46Bjg70t7XBVlCAZ06cENJjC9etg62gQNJ1+Q9O\nJ1bX1WHl008L8y17/HHh+0dizYhKq6vhqalJ2MbqciE8NCSITgAoXLdOiOAq3ff9LS0oiHmH3v/D\nH3C6oUFYr+fayklln6XCUjw38lwkMg09U8RiQMKTIPTQ3c0a/4yOSkXn3Fzcp1JpH+4JKar3SsBu\nZ+mqzz0nXR6NxlNxuWC125VF6I0bxs7HYmHpu0C8ztPlkgrRU6ek+xQVMcHn9QK3bycK0127WO2n\n282+HA62vLCQRX6vXEmvY+xD2nFWTWypCT69nXz1WM3o6Viqdryxnh4ATOjtfOWVhPHk++mZj5oQ\n1uq6CgDmvDysrqvDN69exfqGBiY85+aA2VlY8vKErrrcAoVzt7MTY598IpkrFy7Htm7F1MgI7nZ2\nIjw8DJPdjrwVK+D7yU9QsGoVZqemkFdRgQPHjuHA0aMoXLcOD27dwr0rV5C3YgXcmzYhIttmfUMD\nVuzaJZmDvaQEvuZmnAsE8HFzs6q36XhvL+bDYURCIfS3taFl82aEQyHdtkJiUtlnqbCcz40gCCKX\nIeFJLDkWpQubuLHI1q2s02wsmpEQgeO2KNeuxZclS5edmWEi7s4d6fKaGvYlJhLRl6qrhLgJ0Nxc\nvIGRy8UaBonSDYVtOEVFTDz6/ez/UChudQIwr1K53QmvQRsfZ+fn9TLBuHkzixwfOBBPT+U2Msmi\nD1mKKAK57WemJrbUBJ9eX0XDVjMa8yvZsgUtmzejubQUbxw4gIJVqwAwK5GuQ4c0z0vPfMTCVRy1\nssSebaV9v/7BByioqsKf/f73ePbNN1Ho9WJ/SwssdjsAlg5b+vjjgs2KUhOhsscfl8yVC5cHfX2Y\nFXV0js7MYHpwEJaf/xyh7m4MdnZiemAAXYcOCdHoqbt3MRMKYXpwELfffluyDbd8MQFwxLIdzHY7\nih97DG83NmL0+nUhRZcdUPp7RT73qYEBnA8EUrrXmXo+cpGleG65/DuKWJrQM0UsBlTjSRB6CIWA\nl15ib/Sam5n4UavpE9cickwmZi3yySdArKmIhKoqZoUijjiaTCwyqdSAKBWS1Ys6HKwrrrzhkdnM\nROLFiyyiye1e/H4mNF98kY175EiiIFRqtiO/NuXl7HhcBOdi7WaOotcqJp39jdSXyu1G8isqMDUw\noLvekM/Hkp+vWvspns/M2BgGOzsBAN76eljs9qT7yml7/nl8+qtfwZKfL1iU8C645wMB/OnUKUFU\neuvrkb9iBULd3Rjv6cHM+LiwjxJevx/DFy/iQV8fYLFg5e7d+NKJE0JNKMDSaedmZhCdmYGtuBjf\nvHIFZw8elHTlvd3WJqk/zVuxQmhK5P7sZ1F//rzkHMOhENpfegl333kH04ODcHg8KN64Edb8fNhc\nLviOHNH9rKT7fOUyy/ncCIIgFgtqLkQsS3Lee4oLLpcrMYro97N1cusTLvz0UlwMrF4N/O536c/X\nbGbNhPr6WK3m+HjiNuvWAX/8Y/y11cpE5NGj6hFIJWHOrw3AoqAPHsS3V+sGuwDdbHP+mVokjHS1\n5Y2DAFa7+MyJEyn5SSY7pnhdXkUFpmXCVu98zwUC6GlpkYhHuUB+48AB9Le1wVJQAEdxMaYGBxHV\n8SFQ6bZtKPrBDzD5d38nCGM+nz2HD6P9xRcxcOFCQiOi9Q0NmJmYkDRzCq5dK5nj6ro6mO12IBqF\nr7k5QZT3njyJ8L17sOTnwxzr3jscs06iLrdLG/odRWQaeqaITEM+ngRhlEyIHLlnJaekhKWsyhv6\nFBayqKER4enzAR98YHxuQLyTLY+Azs+zWtSyMmXRWVLCmgmJhefsLDu3DRuAJ55QvlZKHpzBYDxy\nzJsYVVczuxWlqCmQE/6YDytGUhL3BYOs5lAkivQKHXEk05wkbVY8nwPHjqHr0CFY8vNxyu9nkcjY\nBz1WlwufvvEGjhQXw2y34+sXL+LSD34giZZyQWcrLkbl008L0UA+F4vNBntpKWbu3cOk+AOSGLai\nIkTGxmCy2WArKGAdb/PyMDkwgIvPP49NsVReACirqREE+DMnTkhEOgDAYkF4dBRf+PGPcfLpp2F2\nOPB2YyPMIp9dW3Exwvfvw15UhPzycpzy+yWR3d6TJzEVy0iYjzVUMpnNqteSIAiCIBYaingShBgt\nyw49wpRvY7MBV6+y1NqSEuDSJVbfqGTtoGRrokZNDXDmDGtGJIqoaKJlzbJiBZurON3W7QYuXwa+\n/e14pJIjjlimkiKr134kB/wxH1YynY6rtq0kkrliBR558smEiB4AnD14EH9qbUXZ44/jwNGjCVFO\nNQqqqjA9MiLYqpgdDsyHwzBZrfj6Bx+gbNs2YdufrlwpCDie2spFJsAEoMlqRelnPwtHSQmmh4Zw\nN/ZzaLJaJVFRS34+rE4nympqhPny69qyeTOmBgYklivrGxowOTgonI/JaoXJbMbKvXsRmZwUIqgO\njwfhmKURj2Q2l5YKPqQAizq7N23C7bY2eKqrsV90fIIgCILINGSnQhBG0bLs4NG31lblTrYAcPIk\n26atjY2zbh3ztvz2t1kjoXT53e9YZ13elZajZbnCRSfvNiumpoZ13m1oAP7wB9Y8ye9nUU6vl4ls\nbu1iszEhnZfHXqdqb6K3WZBWN1u9zYkIw+jpamukQ6hWJ14ArDmP3a54zL7f/AbhoSH0t7Wh/cUX\nAQDjse65ptjzb5P/XAB40Ncn8fLkNiXR2Vlc/Ou/lmwb5n60iDcVWl1XB0dZGfJWrEDxpk2YGRnB\nQEcHbre14e4777CNzWaJ6LQVFqJ02zaER0bQ39YmOV+H241v3biB9Q0NEsuVPYcPS65FdHYW8zMz\ncLjdsMfOy1NbC091tWQfACiPNfsy2Wywud3I93iYt+jwMG7Ljp8NlqJFCUEQBLGwkPAklhwZ8Z5S\nEyvl5eyLv+mVb6fHS1KcMtvWBty6xSKTra0sssnhvp1ud/JIpJxIBHjsMfZ/XR378npZ859kIq6m\nhglKufCsq2Odeg8eZDWpxcXAiRNxaxQ+x48/ZgLwc59jacQjI6wpUrajkFoCVc+HARqQn1nqGEnH\nTdaJ15KfD4BFFGGx4KcrV6K5tBQ/W7UKJ3bvxlt1dYLnJgDBN7Mg1j05OjeHgqoqwS7FKvbFTcLA\nhQv42erV+PXu3Xht9WohTRUAzDYb9re0YPLOHYRHRjA9OIiJmN2Kp7YWc+Fw/GdXlLHwMYDI+Dju\nXbkiLLv1m9/gzQMHEA6F8ItNm/AvK1bgj8ePY25qCt76eqG+dF8wiLyKivg1KyhAeHQUe159Veis\nuz9muyKuSXVWVsJRXg6r04lIKITbbW2CpY3V5UJ4dFRTEKYjHsmiJLvQ7ygi09AzRSwGJDyJhxM1\nsdLbCwwNxb055dvp8ZLkNiMFBSzCyaMgJSWsO2xVFbBzZ7zx0NSUMeFpNrNx29rYMVatYqK4s5P9\nz43sucjlgvPMGSYoRbVnOH8eePNNdt5a4o0LQB5Rqq0FPvoo+6mvWhFNPR8GEFlDr31Lsm0dbjfK\ntm8HAETu38fttjZMDQxgZnQUk/39GOzsRF9rq2CBUlZTA3tREV73+TD4298K40zevYtjjz+O6dFR\nWJQi+3LMZoRHRjDZ14e7nZ2sC62IW7/5DX62apUkqln86KOCUNT6uXV/5jPC9/y82l98EZMDA4hG\nIojOzuJuZ6ckwutwu7H6S1+CKXausw8e4HZbG7oOHYLd7cYpvx9vNzYK6c9cLPaePInw0JBQu2p1\nueDeuBGOsjLMTkzoinqmIx6XokUJQRAEsbBQjSfxcKJWNyhf3thovL6Q1y52djKLFIClwX74IfO7\nlB/nD38wliJaUsIijnxOmzfHbU4A4K232PHffBP4/vcTayh7e4Hdu4ELF4Af/ICJ62vXgOFhfZ1l\nX30VOHRIuzbTKGr1s1p1t3prRYkFwUjNpxjecMdTWwuH243b4sZcgGA5wjvl8hpJNRxlZZidnITZ\nbsfc1JQkkmkpLMScqJGWvDZTC0teHsp27EDo+nVWV2mxKPrrmmw2qe8mgILVqzF5545wPJPVCmtB\nAeYjEZisVljsdhQ9+iiGYt1oAcBeUoLnb97EKb8/oWuvvMbV5nLBZLMJ9Z6Ttgo4IwMoqanFV88k\n/3BAfA/0fJAghixKCIIgHm7IToUg1FASK4EAcP060NMDvPsuE2Xi17GUPmFbuUjiy3p62La8FpPj\n9bLurVy8Pf00a85z7x6Liqq8eVWkspKJRbebpc6Ka0erqlh6rx7Eoq6qSj2CqSX+MoHaMai5kCp6\nRF6qQjDVfY1YsIgRCxcAaH/pJfS+8YaQMbC6rg7PvvmmsL28mY4SjrKyuG2JqGkW9xi1FBRgTqFj\nrRgtUWp1OmEpKEB4aEjzHIF4Y6PkB403AjPZbPBs3w5HaSnmIxH0t7UJwrCrqQk3jx1LuA7cambE\nVYsfThzDN3AIE/WHETyhz0uVxCNBEARhFGouRCxLMlKXoFQ32N3NopQDAyyiJ38tRilVly/r62P7\nyQ3mx8fj+xw6BKxZw7rI8je1zzyj3PhHic99Lj73WG2cgOjNuSbiNNVkabPZSmcVp9HGbDQSjqEn\nvTlNlmqti57UyHTSJ1PZVy3lUqt+UNzIyOF245njx7Eq5jFXVlODL772mmR7TyylvWjzZpjz82Er\nKYGJP0MxTOKGW7GfM09tLfzvvov1DQ2oePJJzfOp2L0bXr9f+VwLC1GydWuC6PwYTLACLOXVHEub\nNVmtmJdFQBWJRuGsrIS1oADRSARDXV1CqvH6hgaUbNmCU36/ougsq6nB12Ln9+6u07gHLy7VtuD/\nbU7ebfh1nw9vNzYK9jTUJCi3WKq/o4jchZ4pYjEgH09i+WLUk1Murhobpa/FY167xl57PCyddvXq\neM2mEtXVbFve6VY8Pl//2mvA+vXafp5mM0u1fewxJlwnJ6Xr//qvWS2nEvJrwj1HtdJU9W5nBO7J\nyQV6fT0TmPJjKPmBLkNSiS7qqatLp/YulX33BYOKUTMuYgHgfCCgGAnl12CspweFXi+s+fnw1tcr\nWqscOHoUv9q+HfmlpZjo6UGEC7BYtNBRVobCdesQDoUQnZlBWU0NXGvWwNfcjK6mJtw5fx6zU1OK\n6bBiBi5cQP6KFYp2RLPj47h39ariftHZWZjtdsyKfi8ki5yu9Plw97e/xXw4LEQ0g2vXSra529UF\na34+ImNjgr0LwDrorti1C9aCAsGP1O5242C/HzsrnPifjwXhFnmUyp8x8b0RW7Wo3SeCIAiCSAWK\neBJLDl8sCqIJtzVpbQVi1gtJkUfWlCJtPKo5PMxSUzduZNHNvj7lOk2Xi0Xztm1jTYQqKoBjx+Lj\n+/1McJ09y5bxxkQAi2TyRkDV1ay2E2DdMzs6WG3o/fusu60YU5IsB3mkVq+lid7tdHIuEMDrLS14\n6/59hAF2bs3NGT2GEczB4KJbQaQSXdTT2MdI8x+1fXmETc/14aJHvr0eEcuvAW/2c7utTdVaxeF2\no2DNGtzt7JTUbyIaRUFVFdybNmGoqwvRmRlYnU5YnU7MxbYLdXdjamAAkfv3EY1EYLLZhAilnOjs\nLCb7+1UbCc2Ju+Da7XCUl2MjWKSTd9a1FRezDVQsjyxOJ8xWK/7s44+Fe9XV1CQRl6b8fMzEGiEJ\ny2PjRcbHhSixWEwOd3bAM9CKq4cCkuurZmejZtVCLD66/+4RhE7omSIWAxKexPJFHDlMJsY4bjf7\n8vuZWOTL+Gu5ncpHH8U7vPL/a2rYtqWl7PXEBOs829ubmLbrdjPLkhUr4sf48Y/Z93Y7E6ozM6ye\n8+xZZpciPh/xG+Gysvjxi4rUu8DmSAfYUHc3Bu7fRx+A8zYbcOnSotZu5oIVRCrRRT0+m3q20dp3\nvLfX0PVRup77gkEUrlsHi8OBtxsbFQUsvwbci9Ph8aC/owPNpaV4I2ZFwjkXCMQ72ooEXWl1Nb75\n0UfCGJ7aWpTV1OBurDPuzyorMXL5srC9yWoVOsymhKgue35mBiaTSegkO3PvHqxOJ9ybNiG/ogJ5\n/OdUPsTkJG63teGd//AfsL+lBV1NTehpaZH8jDsKCyXXxl5SgpW7d7Pr5nJhWmaXovQ8cXsVW1ER\ndr7yirCt+MMJJasWgiAIgsgEJDyJJYfuugRe+1hYCPz93+vbRx4R1LJT4a+vXmX/nznDaiy5wCsu\nBl55Jf7a5WJpsuI33eJjHDrExGhBQXw9r+cMBuMNjsSi0+Vix+XHF1ujbN8uFaELUC+pB+FNcUkJ\n9nzyibRxUwYw6kd4fWqKzWcRozzpRCazjVFRrLS9OEKpJmD5NeBenCazGdODg5gZHUW/zA4k1N0d\nj3TOzcFst2N1XR2+cvas4IfpWrcOZocDoY8/Fvabm5oSLEdgMglzNYJadBQApgcHcZU3NAIwGw5j\nqKsLUwMDmB4cTD5G7Oc61N0dnyOYsCzZvBkurxfuzZuRX1GBb1y6hC+dOAFHeTlmJyYSro/S81QY\n+zmLjI2hS1S3Lq+vTfWDCiJ7UD0ekWnomSIWAxKexPJl3Tr2//h4YnMgNd5/n/1vtQL/5b9II4T/\nf3v3HhzVeeZ5/PdKfdENqYUkLMsYGceY4AQb2fgaKGvWJo4xDp148SSe3eCdyqomrtp1qiZ4s5PL\nTtXEtalJpWaSmirXpioLGSfEBmKIMSYuZK7GNg4bcBJDjA22bAxCCCSEuLRuZ/84fY5Ot7p1aZ1W\nq8X3U0WZVp8+5+3Tr4Ueve/zPMXF9mqkN5fzqafsPMtFi+xcz8ceswM8J5A6d86+9tq1Uk2N/drm\nZmnOnNSrqM4P9c6W24YGafVq+++RiF0VN1l3t902xdmm6g1yz57NbGttlrk/FB87prDPQac09hXM\nW7/3vZwHfZP5B/7kIGakwD5dED1SAOvcg2n19bp/3bqEQjzBioqE1yQHjAM9PTr9hz8knKts1iy1\n7d3r5iwOYVmD21a9uyIKhv+ncUyro2kqVYeSPufK+fPVuGaNJM/Kb0WFVFCgvu5undy1S93Hj7tB\n7Kb4DoiahQsl2avDF06ccD+T5Pm0u6lJHYcOSbILELGNFgAw0WingsllrAWBhpNJG46KCsn5QdRp\nL+IU1YlGB9t91NZKhw8nfs2xYoUdDCZf2xlPWdlg8BoMSvfcYz+/Zs3gGJPbvXiLGrW326+74w57\n+27y++vstAsPeSttLlwo3XSTvRrqx72d5MbTjxAj86Nlymg+k5eWLNGJ5mbJGLdCbe3nPqfPx4tn\nPTd3rmKeVUTJbj9SXFOjstmz1b5//8itS5IU19YqMneuTib/f+2nggLNuPtuheO5nwWhkFsUSJJ2\nrFypo88/r8LiYvUOs2I/bfZsldTVqevoUQ309bkBdn00qgc2bkw49tmrr3b7nia3pgEAYLxop4L8\nk6pNSaa820qfeip93qOXU8ynpER67bXEFULvCktrq11YyGnf4OR4OquWqba0Ol/z5mr29trvNxRK\nXcnVCTrXrUssatTWJr30Uupts5GIPffB4sIAACAASURBVA7JXjFdvtw+xrsFN/neetuaTIEWCpls\nWx3r9tx8Nt736qzIhaur1e1ZZRvJWFd1S+vqFK6pkQoKZPX1yerr08ldu7SnqUnhSERfefddlc2a\nlbBt1ert1cUTJ9S2d2/KoNMEg27Rn1Rm3HmnHdiOJi98rJyV1IEBte3dq/Y//EHn3ntPJ3bs0HNz\n5uh8S4sk6XxLiwZisZRBp/NeqxcuVO+FCzq1d68utbaqx3PsqddfV6yzM+Fz7otvJ5fktncZyZX0\n/wQAIPtY8cTkMopVyp07d469Gltj4+DK5IoV6dtztLTY22Zfe21o3mFnp10IyFtFNhCwCwlt22Zv\nd03VbiR5FVeS5s2zg1fJDg63b098nfc1XV32yqZkV389diz9sc5KZvKKqTT8vR3t/Zmidu7cqa5/\n/MeMVvHy0XArlqNp6+KsXHbHA7zk84ylNUy6Y3c3NenounUJuY6SvSW1uqFB51taFCgpUW9Xl045\n/384Cgrs6s/Ofx2FhTIFBWnbpwQjEVV+5jPqbmnRxZMn026TTSdQXq6+ri69K2muJBMKSZalUHm5\nZtx5p/p7euwVXA8TCLhbdwMlJaq+/XZ1vPPO0O3BhYUyxmjGnXeqqLpajWvW6NfXX+/28fS2QZHs\n1dDLZ8+6969oxgxdbmtTVUODlm3fPqrgP9OVbfgvo3/3gGEwp+A3VjyRf7JV/Ga01Vzr66WPPx4a\ndDY12dtq45UlXX199uqjN8cyWaoWJocP2yuR0ejQoDP5NfFKlKqsTF39NdUqcXIuZ1OTHcB627lk\ncn+msPH0u8w3w73X0eTHOiuXIU/lWO95xpJjm+5Yb4GdYHm5CouLFaqsVMlVV+nc0aPua5xKrdMX\nLNC1S5cqVFXlBptOFdnpN99sf72/P2XQaYJBFc2YoYJgUG179+ri8eNu0BmKRNxKsl4F4fCQr/Wd\nP5/w2OrpkdXbq9iZMwqWlmrJ+vUKJ1W2teLXKSwpUaC0VK27dtkBZHzFtaCkxF7l7O+X1denU3v3\nui1mquO54NMXLNCX9+9XcW2tJPvzKKmrc+9fqLJSX3rrLV2/YsWog04p9TxhFRQAkCkCT0wuoyh+\nk9Fv6JyA9qabEtujjJYT3J09a2+vdbbYSnaPzeEClVRBXSRir552dAwWJHI0NdlVciV7NbW+3g4Y\nDxxIXf11NEHjkSND27l4TZJqt7nS2Ng4qavK+m249zqWADzTIkKjud75eEBpAgF9cc8e1dxxh3o6\nOvRJc7O7yjp9wQJF33xT169YoYd37NCDW7YoGK9mHayo0EPNzfZzu3YpEP+6kyvqLSBk9fbqclub\nYt686Pi1p99yi6obGoaMO5hqu258d8/cFO+z4bvfVTgSUc0ddwx5TWFRkR49dEgD3qJF8XMNXLyY\nUMzIWxhoSbz1ycM7dmhafb0ePXzY/Ty649t2TSCgh3fudAs2jWVup/p8J0ProSsRK1PwG3MKuUDg\niSuDE9AOl+c4HG+l2N5e+09dnb1quWPH8MFauqAuXT7rkSN2QCrZqx779qUNGHc3NenFri69XFur\nWKqVzOTxpwtOJ0m121yazFVl/Zbuve5ualJvV5eKa2u1ZMOGEe9FuvOk69mZarUsXfBaGv8li9XX\npwM/+EHKticXT5xQqKJCoUhEr0SjennpUhVfc40kqffcOR34wQ/c8V08ccI+X3+/CouKFHT6YsYD\nSG/epyksVKiyUlZfn1p37VIoEhmywhnztEwZjd899JB7fwuKitxczekLFug/nTypA08/LSctxRlL\nMF58SIWFUiCggnBYJhRy72ny/Q9HIgpFIlo3b54uOO83fv9SGWn1MtXneyXtDAAA+Ct9MzJgkso4\nL8G7ktjQMLYtpWvX2q/v6LDboYylUq4T1HnH4VSolYYGg94gMRIZvF6K8XYeOaLW+OrPnlWr0udg\nOeNPlYMKcl3iOo8ccfMl9w03n1JIztUsnTXLzQ/c09Sk+9etc1fLvF9zgptkqbbxPj9vni47udGy\ne2b+e02NpMEWJ0We7aaLf/Yzd1zeXM/+y5fVf/myJNnVZSMRxeKrqZIdnDqBZvXChWpcs0a/W7Zs\naC5pGk6Op1fJNdeo49ChIeeYdt11Ckci9tbiePAXKC7WNffdp3t+8hO9sHChm7s50Nen9n37Eu6f\n974X19Tow9/+NiEvNlRZOSRAdF5z9o9/dHNEnfOl4r3G4mee0b5Vq9zKxGPJ50Xm+B4FvzGnkAsE\nnrhyeFcSZ80aWwDmBI+pivZIY2sD46x0SnaF2uQA1hskOudOEzAG4tsRqysqtPhHPxp5/JOFn21z\n4JvxrGYlB5WpzjXS+Xc3Nall82b1x2Kquvlm1Uejaly9WvueekqdR46o6rOfVcGtt+r0/v263Nbm\nVrt1FRaq4lOf0lV33qlQRYVeiUbV9sYbGujpGfY9hyIRdZ84oYJQSAM9PQpXV2tafb2MpPIbbtAr\n0ag633039QmSCxilcXrfPhV6tvta/f12UBvv0+td0b18+rQ+2rJFH//udxrwFDgKlJWpr7tbgbIy\nxTo6FOvsTLjv4ZqahKAzWFGhRw4cGBIMel8jjfx5e49P/oVEql8mAACQClVtceXIpK/naHmrwobD\ndkB1223S+vVDr+PjOGKLFmnP3r1aLCk8nmq0Ex0IXuFVdCcrb59NJ9gb7UpWcu9USQk9O3c3Nanj\n0CF1HT2q6JtvalpSvnKqKrZOJdXk6qrtBw+q6/333TzI5ODv+hUrdLGtLSG4SiUYiWggFlO/p9VI\n6cyZKquvd1cmvVVnTTA4WJyooEChigrJGPWcPatQZaWqbr45ff/PggIFy8rUG+8TXDpzpv7jn/7k\n3tdYZ6fWzZvn9tpMJVRZqd7ubncM4epq9Z4/b7eNKSxUqLxcPR0dCkUiuuqee/QffvWrlJ+b81lV\nNTSobNYsNa5ZM+znm/zZeufGQG+vTjQ30zMXAK5wVLXFlWe4fpRr10qzZ9uBYXJBn/FyVisCASkW\nG9ySmyqP1MdCPuHyct0vKTzaarTp7o+f/VNHgyq6WTHeiqPenL6xFpFJztVMzg90tvFeam3VvhT5\nyt4qtlJiEZ3kvqFdx44NBp2SwtOnu3+fvmCBCouLddbZVl+Q+p+5wqIiTf/MZxKCTkn64muvuf00\nvSuqocpK1d177+CBAwPq6ehQz9mzCpSUKHLTTTLBYPoemQMDbtAZLC/XF197LSFIC0cievTwYYWr\nq1O+vKCkRD0dHW7QGSgrU6y9fbBXaX+/ejo6VFJXp69+8IEe3LLFDfjT5dUu275dD2zaNGKwmPzZ\neudGsKzsiinKBQAYHwJP5J2dO3emf3K4ACoSsbfY7t3rT4DlDeKeecYOJr2VLisqsl/IZ6xBbLr7\nM9GB4CSrojvsnMojflYcHeu225GKM410Puf5YEWFrl26NKHtR9f778sEAurp7LQr2nq2n1bcdJO+\nvH+/yurrFaqqUlF1tbqOHh3sb+kJSh2FJSV69C9/Sdkm5fUnnxxcjY2vooYqK/XIgQO6f/16t2WJ\niVe2DpaXq3L+fLXt3asTzc0a6O1Vmk25dpEgSb1dXUOC791NTVo3b5560vzCIOQUQSotlQoK1BcP\nmANJ1XVrbr894TNINSfGUkhrd1OTXolG1dPd7X7N+1k2rl59xRTlyqWp8j0KkwdzCrlA4ImpJVUA\n5Q0QnTYofgRYmzcPBnFPPmkHkwsX2s8VFtptVrJtrEFsugBzogNBquhmhZ8VR/1uLzPS+ZznH/vw\nQ3e1znGprU1WX5+7+ljozYc8dUp7vvENlcycqZ4zZ3SiuVltb70labC/pVNwqHL+fJXU1enRQ4c0\nrb7eLoI0c2biQIxxA9LC0lIVzZihRw4c0LT6endV8voVK1R9662S7CDSWSFNDgK9XwtFIiqOF0IK\nVlRIhYXuSuSOlSt1dN06XWptdd9jcW2tTHy11gQCWvKb3yhcXa2+CxfsgDgefAfLyhSeMcN9v41r\n1iRef5xzIlXgeiW1HgIA+IccT0wNTo5iMCiVlkpr1gwGNd58wuXLpVDIn+qu06cPFiuKRqWNG+3t\nq3PmSPEqlJMufzFdcSRMCd4czYkOCLJZ3fQXNTWKtbersKREdY2NGujp0SfNzW6xHUl26yHLSsj3\nrI9G9cDGjdqxcqU+2rpVVbfcoiXr1yeMzZs/agIBfeX997X/+9/Xe88+627nrV++XA9s2pTwPjve\neUex9nZ7a6wxQ3qASlLRjBn60ltvuVVgty5b5vYg9eaOhmtqEl5f1dCgZdu361f19erz5IRWzp+v\n41u3uq8tLClR/Re/qAsff5wyd3Z3U5POxvNqvxR/bqyfU3J+J4EmACAVcjxx5XC2kDY324Gl94cj\n7yrfmjX+rbTddpv934YGKV6ZUpGIdPvtg9cb6wrDcDmqfmClcUrLZS9Sv7b5pspJ/PL+/SqdOVOP\nHjqkB7ds0f3r16ts9myZ+NbVwtLSwZzPeNBZvXChQuXlerGxUe/98peKnT6tE83N+vWcOQn5r95q\nslZfn15/8kl7BdPzC9OBeF6lUwCpddcuxdrbVRAKyerrSxl0StJVd9+tA08/rYttbXr1scd05sCB\nhGtJ9kpo1S23SBq6zbgwni9aWFKiqxYtUk9Xl4pqa7Vsxw73flw8edLNnX3h9tsT7lvnkSNq27tX\nlz15tePN3QUAIFMEnsg7KfMShstRzNY20vXr7fNu3z60HUqm15voIj+QRK7LcEZbsCiTLZ3ec+9Y\nuVIvNjbq2IYNQwKjafX1+puPP3ZX7F6JRtXT2ekWIwqWlrrnrJw/X/XRqB7atk3nW1rs1UxPxdue\n9nY9W1en3y5apJeXLtXiZ56xV0vjBnp7E4JRSTr7xz+6Y3MLIBUWaqCnJyEnM1hRoaIZM/SupOC0\nabrnJz9JCPRStXW56p57VFpXZ/cNNUb9nmO8AffFkyfdIPKdn/7UvR/OWANlZYqdPq3jW7dq3bx5\ninV2ZtTSJlkuf5mBQXyPgt+YU8gF+nhiavD2vkz+ASlbPSzTnXc816PaKyaZ0fZpvG/t2jFv803o\nQVldrZizRV3pA6NUPSiXbNig1598UjJGjatXu9d3giynb6Zj4NIlt13KvlWrFKqocAPIglBIjatX\n6xc1NVJ8VfLC8eO6cPy4+3pv65SqhgZdbm9X78WLqm5oUO/581Jbm3rPn9e+VasSAr3zH3yg2Jkz\ng9uCJX28dav794FYTCeam/X83Ln663ffdQNuSeqK9+wNlpfrTk/PXue+X+7o0InmZknSpdZW7Wlq\nSvmZZPI5AQDgB3I8gVxK7p/pfM3vHMyJ7tOJKSObOX7ec4cjEX3S3Dykt2RyTuKG+fN14fhxBadN\nU+3ixWl7VUqDOa8N3/2utj74oPp6etTjCW5DlZX66rFjal6xwr32su3bte+pp3Rs/fohFWZDkYiu\nvvdet4CPE8C9Eo26wXBxba0utbYqXF0tU1CggZ4eFYRC+lK84NGLixbpC1u26KX77ksItJM5PUwd\nv120yA2Wk59z3qvTB9Tbb7Nl82b1x2Kqvu22IfmtAAD4ZTQ5ngSeQC55Cx9lsxDRRF0HEy6bRX2k\n7BYs8p5bUsrreIv/XL9ihbpPnHAL9JTNnq2yWbNG/d5jnZ16ft48XW5ttStPO/82FRTomr/6K3dL\nqfeajlAkokcOHkwo3iPZ9//Yhg3q6eiQCQQUKClRYVGRps2erdP79rnHeYNF72tchYVupVonAPa+\nn9H8AiD5s0p+H6kCVgAA/EBxIUxJ485LyHYBn7EYaWutX2NlC++w8jnXxc/enamMJ8dvpPxQ77nT\nXSc5JzEUb3VSvXChSurqRvXedzc16dmrr9avr79elXPnKlxVZQd5AwP2n74+ffLqq0OuWdXQoGuX\nLlV9NKqvfvCBDjz99JD303nkiBtAWn196u3q0tttbeqOt1iR7DYn3m3D3tdI0jVLluirR4+qfvly\n1UejQ4JOyd4iO232bBWGw3r1sccU6+wccn+T76E3V7WwtFSxjo5h83QxeeXz9yhMTswp5AKBJ648\nk6mAz0iFiPwa60T36byCjLb4Trb42bvTT94KsOMJipOrqnofe4PQ4d5755EjutTaqp6ODp3ctUsF\nTj9fr4GBIX0ql23frge3bNEDGzcqHIkkBPnP3XijXl661D2X0/tTkspvuEHRN99UWX29QlVVKqqu\ndu/Ji42Nat2zJ+HS4UhE0+rr9cCmTe61vMe/vHSpJKl01iyd2rvXvZ8j/dLhvrVrVR+NKlxVpf4L\nF/RJc3NWfjkBAMBosNUWV56lS+1AbuHCkQOxXOdGjmWsyInkraATvZUxl707h+O9L04uZfL4xrtN\neLTv3dmmKtmrmJ/fuFH7Vq3S+Y8+GtwOW1CgqxcvVll9vY6tX6/+S5dkAgFd9bnP6YFNmxSORNzz\nePuHmkBA4enT9dC2bdr//e8nFDjy3oPi2lpZlqXLp04ljM0Eg/paW1vK8SfPrZ7ubvf6M+66S5J0\norl5xPxbenECALKNHE8glc7O0RfwyXVu5FjGipzgh/rUnPsSqqzUIwcODMmNlMYftI82cI11dmrn\n448PqXob6+zUr2+4QT1nzrjHhquq7MqzHsW1tXr08GFJGlJB1nvMzM9/XudbWhQoKVFxTY1aNm9O\n2FJrgkFZ8Z6gjof37NHVixalHHfy3JKk52680e0bWh+NqjAYHDHwnqy/nAAATB3keGJKGndegtPu\nZDQ/gOU6N3IsY0XGxjOnkreCwlZcU6NwTY2qb7tNoYqKlMckbxMe67bl0ea3hiMRFc+YobY339Sz\nV12l1ZGIXlqyRJI04447Eo41hYVDXn+ptVXPz5snSbp/3TotWb9exbW1Q475aOtWte7apVe3btVH\nW7cmBJ3B8nIVTZ8ev8jgv8vv/PSnacedPLfCkYhqFi6UZN+zxtWrR5V/Sy/O/Ec+HvzGnEIuZBx4\nGmN+ZIw5bIx52xjzgjEm9U8WQD4jNxIj4If61M63tCh2+rRODJNXmBxYjbVQ0ljyW508z4GeHvWe\nO+eO6761a1VQVGSfb9o0PbRtm0qvvVYmGJQCg62uL8d7Y0r2Z/7o4cOqX75cRTNmuGOouuUWSVLF\njTeq0MkjLbD/me3t6tKltjb7a/FdQN5xpwq6U80tftEBAMhXGW+1NcYskfSqZVkDxpgfSpJlWd9O\ncRxbbQHgCpPJFuSxvmbHypX66OWXVb1ggUrq6txtrqm23XrzPCUpNH26KufNU7C8XKd//3u3p2Z9\nNKpYR4e7BbggHNZALJZ2TOlawmxdtsxt+5JKSV2dVrzzjnu+Z6++WpdaWyXZ231r7rgjK+1xAADI\nhtFstQ0M9+RwLMva5nm4T9IjmZ4LADC13Ld27ajyCr15moufeUb7Vq1yXzNSDuf5lhbF2tv1SXOz\nwjU1bu7jnqYm3b9u3ZBzv/7Nb2qgp0cFwaAut7frlBMYera+DvT0JKyklt9wg33+NO/BWZV0OH93\nKu4Wlpaq/8KFhNdMX7BAVTffrFeiUfe99cdi7vOxM2fcVV/6bgIApgq/cjz/VtLLPp0LGNao8hIm\nU69OTHrkuvhvtFuQvdtr961alfCakbbenj96VJIUrKjQ9JtukpS4fbVl82b39a9/85t6YONGuz3K\npk1u+5NwdXVC4FkQDCZsZ7148qQb3I62FcnOnTvdc9TefXfCc6UzZ+rhHTt0vqUl4b1V33abJCkw\nTIuYXLfuSWeyjmsq4XsU/MacQi4Mu+JpjNkmqTbFU/9gWdbm+DHfkdRjWdbadOd5/PHHdd1110mS\nIpGIFixYoMbGRkmDE5/HPB7t44MHD458fLz/5U5JikbVGP/6ZBg/jyffY8dkGc+V9PjQpUuaLjvQ\nGvja17Rz5073+UOXLum0pM/Fg7Dk138Qiajj+HHNPXdOoUhE5++9V9d961tu4PpOd7d6Jc2VdHL3\nbv3wzjt16/e+p88vW6b71q7Vv0WjajtzRjPi22yPlpbquq9/3Q2aveMLlJXp90ePauCll1T04ovq\nPHJEhy5dcs/nfX+SHXgHnnhCA93dKvrzn3W5tVWtN96ou378Y/u5khK9KzsfdGU8wPy3aFTz/u7v\nFHrhBS3+2c/0xsGDCe93z1tv6ezbb2uu7FXdwBNP5Pzzk6Su+C8I3pV0OBrV3/P9lsc8nvSPDyZ9\nf8n1eHicf48PHjyozvgvGz/88EONxrjaqRhjHpf0XyXdZ1nW5TTHkOOJiUf/y7HJdb9SXLGGa/Ux\nUhuQkXJCX1qyRCeamxO2u16/YoVC8UJGgZISDfT26kRzs0KVlZr5wAO6ePJkwtbeWGennpszx80B\nLZs9WxeOH3fbotQvX64HNm3S85/+tC62tqogGNSX9+9PaB+T6n1k0uJksrbumazjAgBMnKz28TTG\nfEHSjyXda1lW+zDHEXhi4tH/cmwaG3PbrxTwGEt/zuGCN+f5WEeHPmluVqCsTDPuukv9ly65+Z3e\nXpivRKMp+4p6A6uCcDihaFB9NKoHNm7U6khEvefOSbK30/7Nxx/7ek9G835zZbKOCwAwcbIdeL4n\nKSTpbPxLb1iW9USK4wg84audnq148MkVvkLMnJpcXmxsTBkAjiRdwBrr7NRzN97oFh8qrq3VpdbW\nISt06VbuvIHVq4895lbHnX7zzXp41y6FIxH9oqZGsfZ2FZaU6Oqf/1xLv/KVMY9zrMeM5/zIL3yP\ngt+YU/DbaALPgkxPblnWHMuy6i3Laoj/GRJ0AsgT9CvFJDKW/pxe6YoRhSMR1Sxc6J4z+uabKXth\npuuR6S2UdN/atapfvlz10agbdO5uatK0T31KBeGwom+8oZLaVKURRh7nWI8Zz/kBAJho48rxHNUF\nWPEEAIxBpls3h8s1zOZ20LGu0CaPc99TTw1ZoRxP3iQ5lwCAiZbVFU8AALJhtK1YkqVbsRzPOVNJ\nbh8y1hXa5HGmWqEc7r2MpLimRuHqagJOAMCkQuCJvOOUdAb8wpyaGvwMLoeTHCgmB4kjzafkcaYK\nXMfzXs63tIy59ygmN75HwW/MKeQCgScAAGOQHCiON+Adz+rmaMYHAMBkQI4nAABjMNnbh0z28QEA\npp6stlMZwyAIPAEAAABgiqK4EKYk8hLgN+YU/MR8gt+YU/Abcwq5QOAJAAAAAMgqttoCAAAAADLG\nVlsAAAAAQM4ReCLvkJcAvzGn4CfmE/zGnILfmFPIBQJPAAAAAEBWkeMJAAAAAMgYOZ4AAAAAgJwj\n8ETeIS8BfmNOwU/MJ/iNOQW/MaeQCwSeAAAAAICsIscTAAAAAJAxcjwBAAAAADlH4Im8Q14C/Mac\ngp+YT/Abcwp+Y04hFwg8AQAAAABZRY4nAAAAACBj5HgCAAAAAHKOwBN5h7wE+I05BT8xn+A35hT8\nxpxCLhB4AgAAAACyihxPAAAAAEDGyPEEAAAAAOQcgSfyDnkJ8BtzCn5iPsFvzCn4jTmFXCDwBAAA\nAABkFTmeAAAAAICMkeMJAAAAAMg5Ak/kHfIS4DfmFPzEfILfmFPwG3MKuUDgCQAAAADIKnI8AQAA\nAAAZI8cTAAAAAJBzBJ7IO+QlwG/MKfiJ+QS/MafgN+YUcoHAEwAAAACQVeR4AgAAAAAyRo4nAAAA\nACDnCDyRd8hLgN+YU/AT8wl+Y07Bb8wp5AKBJwAAAAAgq8jxBAAAAABkjBxPAAAAAEDOEXgi75CX\nAL8xp+An5hP8xpyC35hTyAUCTwAAAABAVpHjCQAAAADIGDmeAAAAAICcI/BE3iEvAX5jTsFPzCf4\njTkFvzGnkAsEngAAAACArCLHEwAAAACQMXI8AQAAAAA5R+CJvENeAvzGnIKfmE/wG3MKfmNOIRcI\nPAEAAAAAWUWOJwAAAAAgY+R4AgAAAAByjsATeYe8BPiNOQU/MZ/gN+YU/MacQi4QeAIAAAAAsooc\nTwAAAABAxsjxBAAAAADkHIEn8g55CfAbcwp+Yj7Bb8wp+I05hVwg8AQAAAAAZBU5ngAAAACAjJHj\nCQAAAADIOQJP5B3yEuA35hT8xHyC35hT8BtzCrlA4AkAAAAAyCpyPAEAAAAAGSPHEwAAAACQcwSe\nyDvkJcBvzCn4ifkEvzGn4DfmFHKBwBMAAAAAkFXkeAIAAAAAMkaOJwAAAAAg5wg8kXfIS4DfmFPw\nE/MJfmNOwW/MKeQCgScAAAAAIKvI8QQAAAAAZIwcTwAAAABAzmUceBpj/skY87Yx5qAx5lVjzLV+\nDgxIh7wE+I05BT8xn+A35hT8xpxCLoxnxfOfLcu6xbKsBZI2SfpfPo0JGNbBgwdzPQRMMcwp+In5\nBL8xp+A35hRyIePA07Ks856HZZLaxz8cYGSdnZ25HgKmGOYU/MR8gt+YU/Abcwq5EBjPi40xT0v6\nz5IuSrrLlxEBAAAAAKaUYVc8jTHbjDF/SvHnYUmyLOs7lmXNkrRG0r9MwHgBffjhh7keAqYY5hT8\nxHyC35hT8BtzCrngSzsVY8wsSS9blvXZFM/RSwUAAAAAprCR2qlkvNXWGDPHsqz34g+XSzqQyQAA\nAAAAAFNbxiuexpgNkuZK6pd0VNI3LMtq83FsAAAAAIApwJettgAAAAAApDOePp6jZoz5J2PM28aY\ng8aYV40x107EdTE1GWN+ZIw5HJ9TLxhjKnI9JuQ3Y8wKY8w7xph+Y8ytuR4P8pcx5gvGmL8YY94z\nxvyPXI8H+c0Y83+NMaeMMX/K9VgwNRhjrjXG7Ij/m/dnY8x/z/WYkL+MMUXGmH3xGO+QMeZ/D3v8\nRKx4GmOmOX0/jTH/TdItlmV9PesXxpRkjFki6VXLsgaMMT+UJMuyvp3jYSGPGWM+LWlA0v+R9PeW\nZf0hx0NCHjLGFEp6V9L9kj6R9HtJX7Us63BOB4a8ZYxZLKlb0r9bljU/1+NB/jPG1EqqtSzroDGm\nTNL/kxTl+xQyZYwpsSzrojEm47bo4wAAAphJREFUIOk1Sd+yLOu1VMdOyIqnE3TGlUlqn4jrYmqy\nLGubZVkD8Yf7JM3M5XiQ/yzL+otlWUdyPQ7kvTskvW9Z1oeWZfVKek528T0gI5Zl7ZHUketxYOqw\nLKvVsqyD8b93SzosqS63o0I+syzrYvyvIUmFks6mO3ZCAk9JMsY8bYz5SNJKST+cqOtiyvtbSS/n\nehAAIOkaSR97Hh+Pfw0AJh1jzHWSGmT/Eh/IiDGmwBhzUNIpSTssyzqU7tiM26mkuOg2SbUpnvoH\ny7I2W5b1HUnfMcZ8W9K/SPovfl0bU89I8yl+zHck9ViWtXZCB4e8NJo5BYwT1foA5IX4NtsNkp6M\nr3wCGYnvQlwQr7nyijGm0bKsnamO9S3wtCxrySgPXStWqDCCkeaTMeZxSUsl3TchA0LeG8P3KCBT\nn0jyFs+7VvaqJwBMGsaYoKTfSPqlZVmbcj0eTA2WZZ0zxmyRtFDSzlTHTFRV2zmeh8slHZiI62Jq\nMsZ8QdIqScsty7qc6/FgyjG5HgDy1n5Jc4wx1xljQpL+WtKLOR4TALiMMUbSzyUdsizrX3M9HuQ3\nY0y1MSYS/3uxpCUaJs6bqKq2GyTNldQv6aikb1iW1Zb1C2NKMsa8JzuB2UlefsOyrCdyOCTkOWPM\nlyT9VFK1pHOSDliW9WBuR4V8ZIx5UNK/yi6w8HPLsoYtLQ8Mxxjza0n3SqqS1Cbp+5Zlrc7tqJDP\njDGLJO2W9EcNpgf8T8uyfpe7USFfGWPmS/qF7MXMAknPWpb1o7THT0TgCQAAAAC4ck1YVVsAAAAA\nwJWJwBMAAAAAkFUEngAAAACArCLwBAAAAABkFYEnAAAAACCrCDwBAAAAAFlF4AkAAAAAyCoCTwAA\nAABAVv1/lzHCzGUnjVoAAAAASUVORK5CYII=\n", + "text": [ + "" + ] + } + ], + "prompt_number": 5 + } + ], + "metadata": {} + } + ] +} \ No newline at end of file diff --git a/examples/triplet/mnist_triplet.prototxt b/examples/triplet/mnist_triplet.prototxt new file mode 100644 index 00000000000..0e903f85909 --- /dev/null +++ b/examples/triplet/mnist_triplet.prototxt @@ -0,0 +1,113 @@ +name: "mnist_siamese" +input: "data" +input_dim: 10000 +input_dim: 1 +input_dim: 28 +input_dim: 28 +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 500 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 10 + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 2 + } +} diff --git a/examples/triplet/mnist_triplet_solver.prototxt b/examples/triplet/mnist_triplet_solver.prototxt new file mode 100644 index 00000000000..39222b89cf0 --- /dev/null +++ b/examples/triplet/mnist_triplet_solver.prototxt @@ -0,0 +1,25 @@ +# The train/test net protocol buffer definition +net: "examples/triplet/mnist_triplet_train_test.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of MNIST, we have test batch size 100 and 100 test iterations, +# covering the full 10,000 testing images. +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 500 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.01 +momentum: 0.9 +weight_decay: 0.0000 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +max_iter: 50000 +# snapshot intermediate results +snapshot: 5000 +snapshot_prefix: "examples/triplet/mnist_triplet" +# solver mode: CPU or GPU +solver_mode: CPU diff --git a/examples/triplet/mnist_triplet_train_test.prototxt b/examples/triplet/mnist_triplet_train_test.prototxt new file mode 100644 index 00000000000..3cea2376c11 --- /dev/null +++ b/examples/triplet/mnist_triplet_train_test.prototxt @@ -0,0 +1,498 @@ +name: "mnist_triplet_train_test" +layer { + name: "triplet_data" + type: "Data" + top: "triplet_data" + top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/mnist_triplet_train_leveldb" + batch_size: 64 + } +} +layer { + name: "triplet_data" + type: "Data" + top: "triplet_data" + top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/mnist_triplet_test_leveldb" + batch_size: 100 + } +} +layer { + name: "slice_triplet" + type: "Slice" + bottom: "triplet_data" + top: "data" + top: "data_true" + top: "data_false" + slice_param { + slice_dim: 1 + slice_point: 1 + slice_point: 2 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_true" + type: "Convolution" + bottom: "data_true" + top: "conv1_true" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_true" + type: "Pooling" + bottom: "conv1_true" + top: "pool1_true" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_true" + type: "Convolution" + bottom: "pool1_true" + top: "conv2_true" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_true" + type: "Pooling" + bottom: "conv2_true" + top: "pool2_true" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_true" + type: "InnerProduct" + bottom: "pool2_true" + top: "ip1_true" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_true" + type: "ReLU" + bottom: "ip1_true" + top: "ip1_true" +} +layer { + name: "ip2_true" + type: "InnerProduct" + bottom: "ip1_true" + top: "ip2_true" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_true" + type: "InnerProduct" + bottom: "ip2_true" + top: "feat_true" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_false" + type: "Convolution" + bottom: "data_false" + top: "conv1_false" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_false" + type: "Pooling" + bottom: "conv1_false" + top: "pool1_false" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_false" + type: "Convolution" + bottom: "pool1_false" + top: "conv2_false" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_false" + type: "Pooling" + bottom: "conv2_false" + top: "pool2_false" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_false" + type: "InnerProduct" + bottom: "pool2_false" + top: "ip1_false" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_false" + type: "ReLU" + bottom: "ip1_false" + top: "ip1_false" +} +layer { + name: "ip2_false" + type: "InnerProduct" + bottom: "ip1_false" + top: "ip2_false" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_false" + type: "InnerProduct" + bottom: "ip2_false" + top: "feat_false" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "loss" + type: "ContrastiveLoss" + bottom: "feat" + bottom: "feat_false" + bottom: "sim" + top: "loss" + contrastive_loss_param { + margin: 1 + } +} diff --git a/examples/triplet/readme.md b/examples/triplet/readme.md new file mode 100644 index 00000000000..ce98ec10819 --- /dev/null +++ b/examples/triplet/readme.md @@ -0,0 +1,179 @@ +--- +title: Siamese Network Tutorial +description: Train and test a siamese network on MNIST data. +category: example +include_in_docs: true +layout: default +priority: 100 +--- + +# Siamese Network Training with Caffe +This example shows how you can use weight sharing and a contrastive loss +function to learn a model using a siamese network in Caffe. + +We will assume that you have caffe successfully compiled. If not, please refer +to the [Installation page](../../installation.html). This example builds on the +[MNIST tutorial](mnist.html) so it would be a good idea to read that before +continuing. + +*The guide specifies all paths and assumes all commands are executed from the +root caffe directory* + +## Prepare Datasets + +You will first need to download and convert the data from the MNIST +website. To do this, simply run the following commands: + + ./data/mnist/get_mnist.sh + ./examples/siamese/create_mnist_siamese.sh + +After running the script there should be two datasets, +`./examples/siamese/mnist_siamese_train_leveldb`, and +`./examples/siamese/mnist_siamese_test_leveldb`. + +## The Model +First, we will define the model that we want to train using the siamese network. +We will use the convolutional net defined in +`./examples/siamese/mnist_siamese.prototxt`. This model is almost +exactly the same as the [LeNet model](mnist.html), the only difference is that +we have replaced the top layers that produced probabilities over the 10 digit +classes with a linear "feature" layer that produces a 2 dimensional vector. + + layers { + name: "feat" + type: INNER_PRODUCT + bottom: "ip2" + top: "feat" + blobs_lr: 1 + blobs_lr: 2 + inner_product_param { + num_output: 2 + } + } + +## Define the Siamese Network + +In this section we will define the siamese network used for training. The +resulting network is defined in +`./examples/siamese/mnist_siamese_train_test.prototxt`. + +### Reading in the Pair Data + +We start with a data layer that reads from the LevelDB database we created +earlier. Each entry in this database contains the image data for a pair of +images (`pair_data`) and a binary label saying if they belong to the same class +or different classes (`sim`). + + layers { + name: "pair_data" + type: DATA + top: "pair_data" + top: "sim" + data_param { + source: "examples/siamese/mnist-siamese-train-leveldb" + scale: 0.00390625 + batch_size: 64 + } + include: { phase: TRAIN } + } + +In order to pack a pair of images into the same blob in the database we pack one +image per channel. We want to be able to work with these two images separately, +so we add a slice layer after the data layer. This takes the `pair_data` and +slices it along the channel dimension so that we have a single image in `data` +and its paired image in `data_p.` + + layers { + name: "slice_pair" + type: SLICE + bottom: "pair_data" + top: "data" + top: "data_p" + slice_param { + slice_dim: 1 + slice_point: 1 + } + } + +### Building the First Side of the Siamese Net + +Now we can specify the first side of the siamese net. This side operates on +`data` and produces `feat`. Starting from the net in +`./examples/siamese/mnist_siamese.prototxt` we add default weight fillers. Then +we name the parameters of the convolutional and inner product layers. Naming the +parameters allows Caffe to share the parameters between layers on both sides of +the siamese net. In the definition this looks like: + + ... + param: "conv1_w" + param: "conv1_b" + ... + param: "conv2_w" + param: "conv2_b" + ... + param: "ip1_w" + param: "ip1_b" + ... + param: "ip2_w" + param: "ip2_b" + ... + +### Building the Second Side of the Siamese Net + +Now we need to create the second path that operates on `data_p` and produces +`feat_p`. This path is exactly the same as the first. So we can just copy and +paste it. Then we change the name of each layer, input, and output by appending +`_p` to differentiate the "paired" layers from the originals. + +### Adding the Contrastive Loss Function + +To train the network we will optimize a contrastive loss function proposed in: +Raia Hadsell, Sumit Chopra, and Yann LeCun "Dimensionality Reduction by Learning +an Invariant Mapping". This loss function encourages matching pairs to be close +together in feature space while pushing non-matching pairs apart. This cost +function is implemented with the `CONTRASTIVE_LOSS` layer: + + layers { + name: "loss" + type: CONTRASTIVE_LOSS + contrastive_loss_param { + margin: 1.0 + } + bottom: "feat" + bottom: "feat_p" + bottom: "sim" + top: "loss" + } + +## Define the Solver + +Nothing special needs to be done to the solver besides pointing it at the +correct model file. The solver is defined in +`./examples/siamese/mnist_siamese_solver.prototxt`. + +## Training and Testing the Model + +Training the model is simple after you have written the network definition +protobuf and solver protobuf files. Simply run +`./examples/siamese/train_mnist_siamese.sh`: + + ./examples/siamese/train_mnist_siamese.sh + +# Plotting the results + +First, we can draw the model and siamese networks by running the following +commands that draw the DAGs defined in the .prototxt files: + + ./python/draw_net.py \ + ./examples/siamese/mnist_siamese.prototxt \ + ./examples/siamese/mnist_siamese.png + + ./python/draw_net.py \ + ./examples/siamese/mnist_siamese_train_test.prototxt \ + ./examples/siamese/mnist_siamese_train_test.png + +Second, we can load the learned model and plot the features using the iPython +notebook: + + ipython notebook ./examples/siamese/mnist_siamese.ipynb + diff --git a/examples/triplet/train_mnist_triplet.sh b/examples/triplet/train_mnist_triplet.sh new file mode 100755 index 00000000000..e005970824a --- /dev/null +++ b/examples/triplet/train_mnist_triplet.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env sh + +TOOLS=./build/tools + +$TOOLS/caffe train --solver=examples/triplet/mnist_triplet_solver.prototxt From 3d7cf658af4f9fd4cd3cf2f96e777db2696e17de Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 8 Jun 2015 07:56:03 +0800 Subject: [PATCH 02/82] New triplet loss layer added(beta1 version-no test source files) --- include/caffe/data_layers.hpp | 3 + include/caffe/data_transformer.hpp | 36 -- include/caffe/filler.hpp | 71 +-- include/caffe/layer.hpp | 1 - include/caffe/loss_layers.hpp | 69 +++ include/caffe/net.hpp | 3 - include/caffe/neuron_layers.hpp | 70 +-- include/caffe/python_layer.hpp | 13 +- include/caffe/solver.hpp | 15 +- include/caffe/test/test_caffe_main.hpp | 28 +- .../caffe/test/test_gradient_check_util.hpp | 7 +- include/caffe/util/math_functions.hpp | 6 - include/caffe/util/mkl_alternate.hpp | 1 - include/caffe/vision_layers.hpp | 66 --- src/caffe/blob.cpp | 1 - src/caffe/data_transformer.cpp | 116 +--- src/caffe/layers/base_data_layer.cpp | 11 +- src/caffe/layers/base_data_layer.cu | 6 +- src/caffe/layers/concat_layer.cu | 44 +- src/caffe/layers/contrastive_loss_layer.cpp | 25 +- src/caffe/layers/contrastive_loss_layer.cu | 34 +- src/caffe/layers/conv_layer.cpp | 7 + src/caffe/layers/conv_layer.cu | 7 + src/caffe/layers/cudnn_conv_layer.cu | 2 + src/caffe/layers/data_layer.cpp | 90 ++- src/caffe/layers/deconv_layer.cpp | 7 + src/caffe/layers/deconv_layer.cu | 7 + src/caffe/layers/flatten_layer.cpp | 16 +- src/caffe/layers/image_data_layer.cpp | 42 +- src/caffe/layers/inner_product_layer.cpp | 4 +- src/caffe/layers/inner_product_layer.cu | 4 +- src/caffe/layers/lrn_layer.cu | 102 ++-- src/caffe/layers/mvn_layer.cpp | 23 +- src/caffe/layers/mvn_layer.cu | 23 +- src/caffe/layers/pooling_layer.cu | 218 ++++--- src/caffe/layers/prelu_layer.cpp | 4 +- src/caffe/layers/prelu_layer.cu | 16 +- .../sigmoid_cross_entropy_loss_layer.cpp | 2 +- .../sigmoid_cross_entropy_loss_layer.cu | 22 +- src/caffe/layers/slice_layer.cu | 47 +- src/caffe/layers/triplet_loss_layer.cpp | 124 ++++ src/caffe/net.cpp | 46 +- src/caffe/proto/caffe.proto | 206 ++----- src/caffe/solver.cpp | 554 ++++++++++-------- src/caffe/test/test_accuracy_layer.cpp | 5 +- src/caffe/test/test_argmax_layer.cpp | 3 +- .../test/test_contrastive_loss_layer.cpp | 58 +- src/caffe/test/test_convolution_layer.cpp | 9 +- .../test/test_data/generate_sample_data.py | 12 +- src/caffe/test/test_dummy_data_layer.cpp | 5 +- src/caffe/test/test_filler.cpp | 98 ---- src/caffe/test/test_flatten_layer.cpp | 46 +- src/caffe/test/test_gradient_based_solver.cpp | 82 +-- src/caffe/test/test_im2col_kernel.cu | 4 +- src/caffe/test/test_math_functions.cpp | 51 +- .../test_multinomial_logistic_loss_layer.cpp | 3 +- src/caffe/test/test_net.cpp | 145 ----- src/caffe/test/test_neuron_layer.cpp | 139 +---- src/caffe/test/test_pooling_layer.cpp | 13 +- src/caffe/test/test_softmax_layer.cpp | 4 +- src/caffe/test/test_stochastic_pooling.cpp | 35 +- src/caffe/test/test_triplet_loss_layer.cpp | 107 ++++ src/caffe/util/math_functions.cpp | 10 - src/caffe/util/math_functions.cu | 21 - 64 files changed, 1186 insertions(+), 1863 deletions(-) create mode 100644 src/caffe/layers/triplet_loss_layer.cpp create mode 100644 src/caffe/test/test_triplet_loss_layer.cpp diff --git a/include/caffe/data_layers.hpp b/include/caffe/data_layers.hpp index 3958cb7ecb0..2bb9d948169 100644 --- a/include/caffe/data_layers.hpp +++ b/include/caffe/data_layers.hpp @@ -14,6 +14,7 @@ #include "caffe/filler.hpp" #include "caffe/internal_thread.hpp" #include "caffe/layer.hpp" +#include "caffe/net.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/util/db.hpp" @@ -28,6 +29,7 @@ template class BaseDataLayer : public Layer { public: explicit BaseDataLayer(const LayerParameter& param); + virtual ~BaseDataLayer() {} // LayerSetUp: implements common data layer setup functionality, and calls // DataLayerSetUp to do special data layer setup for individual layer types. // This method may not be overridden except by the BasePrefetchingDataLayer. @@ -56,6 +58,7 @@ class BasePrefetchingDataLayer : public: explicit BasePrefetchingDataLayer(const LayerParameter& param) : BaseDataLayer(param) {} + virtual ~BasePrefetchingDataLayer() {} // LayerSetUp: implements common data layer setup functionality, and calls // DataLayerSetUp to do special data layer setup for individual layer types. // This method may not be overridden. diff --git a/include/caffe/data_transformer.hpp b/include/caffe/data_transformer.hpp index 0ad68c80216..880356601a4 100644 --- a/include/caffe/data_transformer.hpp +++ b/include/caffe/data_transformer.hpp @@ -62,7 +62,6 @@ class DataTransformer { */ void Transform(const vector & mat_vector, Blob* transformed_blob); - /** * @brief Applies the transformation defined in the data layer's * transform_param block to a cv::Mat @@ -88,41 +87,6 @@ class DataTransformer { */ void Transform(Blob* input_blob, Blob* transformed_blob); - /** - * @brief Infers the shape of transformed_blob will have when - * the transformation is applied to the data. - * - * @param datum - * Datum containing the data to be transformed. - */ - vector InferBlobShape(const Datum& datum); - /** - * @brief Infers the shape of transformed_blob will have when - * the transformation is applied to the data. - * It uses the first element to infer the shape of the blob. - * - * @param datum_vector - * A vector of Datum containing the data to be transformed. - */ - vector InferBlobShape(const vector & datum_vector); - /** - * @brief Infers the shape of transformed_blob will have when - * the transformation is applied to the data. - * It uses the first element to infer the shape of the blob. - * - * @param mat_vector - * A vector of Mat containing the data to be transformed. - */ - vector InferBlobShape(const vector & mat_vector); - /** - * @brief Infers the shape of transformed_blob will have when - * the transformation is applied to the data. - * - * @param cv_img - * cv::Mat containing the data to be transformed. - */ - vector InferBlobShape(const cv::Mat& cv_img); - protected: /** * @brief Generates a random integer from Uniform({0, 1, ..., n-1}). diff --git a/include/caffe/filler.hpp b/include/caffe/filler.hpp index ff3542e1f99..bb18e8e1e28 100644 --- a/include/caffe/filler.hpp +++ b/include/caffe/filler.hpp @@ -126,18 +126,17 @@ class PositiveUnitballFiller : public Filler { }; /** - * @brief Fills a Blob with values @f$ x \sim U(-a, +a) @f$ where @f$ a @f$ is - * set inversely proportional to number of incoming nodes, outgoing - * nodes, or their average. + * @brief Fills a Blob with values @f$ x \sim U(-a, +a) @f$ where @f$ a @f$ + * is set inversely proportional to the number of incoming nodes. * * A Filler based on the paper [Bengio and Glorot 2010]: Understanding - * the difficulty of training deep feedforward neuralnetworks. + * the difficulty of training deep feedforward neuralnetworks, but does not + * use the fan_out value. * - * It fills the incoming matrix by randomly sampling uniform data from [-scale, - * scale] where scale = sqrt(3 / n) where n is the fan_in, fan_out, or their - * average, depending on the variance_norm option. You should make sure the - * input blob has shape (num, a, b, c) where a * b * c = fan_in and num * b * c - * = fan_out. Note that this is currently not the case for inner product layers. + * It fills the incoming matrix by randomly sampling uniform data from + * [-scale, scale] where scale = sqrt(3 / fan_in) where fan_in is the number + * of input nodes. You should make sure the input blob has shape (num, a, b, c) + * where a * b * c = fan_in. * * TODO(dox): make notation in above comment consistent with rest & use LaTeX. */ @@ -149,16 +148,7 @@ class XavierFiller : public Filler { virtual void Fill(Blob* blob) { CHECK(blob->count()); int fan_in = blob->count() / blob->num(); - int fan_out = blob->count() / blob->channels(); - Dtype n = fan_in; // default to fan_in - if (this->filler_param_.variance_norm() == - FillerParameter_VarianceNorm_AVERAGE) { - n = (fan_in + fan_out) / Dtype(2); - } else if (this->filler_param_.variance_norm() == - FillerParameter_VarianceNorm_FAN_OUT) { - n = fan_out; - } - Dtype scale = sqrt(Dtype(3) / n); + Dtype scale = sqrt(Dtype(3) / fan_in); caffe_rng_uniform(blob->count(), -scale, scale, blob->mutable_cpu_data()); CHECK_EQ(this->filler_param_.sparse(), -1) @@ -166,47 +156,6 @@ class XavierFiller : public Filler { } }; -/** - * @brief Fills a Blob with values @f$ x \sim N(0, \sigma^2) @f$ where - * @f$ \sigma^2 @f$ is set inversely proportional to number of incoming - * nodes, outgoing nodes, or their average. - * - * A Filler based on the paper [He, Zhang, Ren and Sun 2015]: Specifically - * accounts for ReLU nonlinearities. - * - * Aside: for another perspective on the scaling factor, see the derivation of - * [Saxe, McClelland, and Ganguli 2013 (v3)]. - * - * It fills the incoming matrix by randomly sampling Gaussian data with std = - * sqrt(2 / n) where n is the fan_in, fan_out, or their average, depending on - * the variance_norm option. You should make sure the input blob has shape (num, - * a, b, c) where a * b * c = fan_in and num * b * c = fan_out. Note that this - * is currently not the case for inner product layers. - */ -template -class MSRAFiller : public Filler { - public: - explicit MSRAFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { - CHECK(blob->count()); - int fan_in = blob->count() / blob->num(); - int fan_out = blob->count() / blob->channels(); - Dtype n = fan_in; // default to fan_in - if (this->filler_param_.variance_norm() == - FillerParameter_VarianceNorm_AVERAGE) { - n = (fan_in + fan_out) / Dtype(2); - } else if (this->filler_param_.variance_norm() == - FillerParameter_VarianceNorm_FAN_OUT) { - n = fan_out; - } - Dtype std = sqrt(Dtype(2) / n); - caffe_rng_gaussian(blob->count(), Dtype(0), std, - blob->mutable_cpu_data()); - CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; - } -}; /** * @brief Get a specific filler from the specification given in FillerParameter. @@ -227,8 +176,6 @@ Filler* GetFiller(const FillerParameter& param) { return new UniformFiller(param); } else if (type == "xavier") { return new XavierFiller(param); - } else if (type == "msra") { - return new MSRAFiller(param); } else { CHECK(false) << "Unknown filler name: " << param.type(); } diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index 8f924a75755..2d13ef97c05 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -406,7 +406,6 @@ template inline Dtype Layer::Forward(const vector*>& bottom, const vector*>& top) { Dtype loss = 0; - Reshape(bottom, top); switch (Caffe::mode()) { case Caffe::CPU: Forward_cpu(bottom, top); diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index 86c34241168..2f9c1f567a1 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -210,6 +210,73 @@ class ContrastiveLossLayer : public LossLayer { Blob summer_vec_; // tmp storage for gpu forward pass }; +template +class TripletLossLayer : public LossLayer { + public: + explicit TripletLossLayer(const LayerParameter& param) + : LossLayer(param), diff_() {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline int ExactNumBottomBlobs() const { return 4; } + virtual inline const char* type() const { return "TripletLoss"; } + /** + * Unlike most loss layers, in the TripletLossLayer we can backpropagate + * to the first three inputs. + */ + virtual inline bool AllowForceBackward(const int bottom_index) const { + return bottom_index != 3; + } + + protected: + /// @copydoc TripletLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the Triplet error gradient w.r.t. the inputs. + * + * Computes the gradients with respect to the two input vectors (bottom[0] and + * bottom[1]), but not the similarity label (bottom[2]). + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$a@f$; Backward fills their diff with + * gradients if propagate_down[0] + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$b@f$; Backward fills their diff with gradients if + * propagate_down[1] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob diff_; // cached for backward pass + Blob diff_pos; + Blob diff_neg; + Blob dist_sq_; // cached for backward pass + Blob dist_sq_pos; + Blob dist_sq_neg; + Blob diff_sq_; // tmp storage for gpu forward pass + Blob diff_sq_pos; + Blob diff_sq_neg; + Blob summer_vec_; // tmp storage for gpu forward pass +}; + /** * @brief Computes the Euclidean (L2) loss @f$ * E = \frac{1}{2N} \sum\limits_{n=1}^N \left| \left| \hat{y}_n - y_n @@ -605,6 +672,8 @@ class SigmoidCrossEntropyLossLayer : public LossLayer { /// @copydoc SigmoidCrossEntropyLossLayer virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); /** * @brief Computes the sigmoid cross-entropy loss error gradient w.r.t. the diff --git a/include/caffe/net.hpp b/include/caffe/net.hpp index 5665df1edf2..075afebc9b0 100644 --- a/include/caffe/net.hpp +++ b/include/caffe/net.hpp @@ -137,9 +137,6 @@ class Net { inline const vector& blob_loss_weights() const { return blob_loss_weights_; } - inline const vector& layer_need_backward() const { - return layer_need_backward_; - } /// @brief returns the parameters inline const vector > >& params() const { return params_; diff --git a/include/caffe/neuron_layers.hpp b/include/caffe/neuron_layers.hpp index c2e0774aaa2..323215134c7 100644 --- a/include/caffe/neuron_layers.hpp +++ b/include/caffe/neuron_layers.hpp @@ -8,6 +8,7 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/layer.hpp" +#include "caffe/net.hpp" #include "caffe/proto/caffe.pb.h" #define HDF5_DATA_DATASET_NAME "data" @@ -267,72 +268,6 @@ class ExpLayer : public NeuronLayer { Dtype inner_scale_, outer_scale_; }; -/** - * @brief Computes @f$ y = log_{\gamma}(\alpha x + \beta) @f$, - * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, - * and base @f$ \gamma @f$. - */ -template -class LogLayer : public NeuronLayer { - public: - /** - * @param param provides LogParameter log_param, - * with LogLayer options: - * - scale (\b optional, default 1) the scale @f$ \alpha @f$ - * - shift (\b optional, default 0) the shift @f$ \beta @f$ - * - base (\b optional, default -1 for a value of @f$ e \approx 2.718 @f$) - * the base @f$ \gamma @f$ - */ - explicit LogLayer(const LayerParameter& param) - : NeuronLayer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "Log"; } - - protected: - /** - * @param bottom input Blob vector (length 1) - * -# @f$ (N \times C \times H \times W) @f$ - * the inputs @f$ x @f$ - * @param top output Blob vector (length 1) - * -# @f$ (N \times C \times H \times W) @f$ - * the computed outputs @f$ - * y = log_{\gamma}(\alpha x + \beta) - * @f$ - */ - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - - /** - * @brief Computes the error gradient w.r.t. the exp inputs. - * - * @param top output Blob vector (length 1), providing the error gradient with - * respect to the outputs - * -# @f$ (N \times C \times H \times W) @f$ - * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ - * with respect to computed outputs @f$ y @f$ - * @param propagate_down see Layer::Backward. - * @param bottom input Blob vector (length 1) - * -# @f$ (N \times C \times H \times W) @f$ - * the inputs @f$ x @f$; Backward fills their diff with - * gradients @f$ - * \frac{\partial E}{\partial x} = - * \frac{\partial E}{\partial y} y \alpha \log_e(gamma) - * @f$ if propagate_down[0] - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - Dtype base_scale_; - Dtype input_scale_, input_shift_; - Dtype backward_num_scale_; -}; - /** * @brief Computes @f$ y = (\alpha x + \beta) ^ \gamma @f$, * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, @@ -799,8 +734,7 @@ class PReLULayer : public NeuronLayer { const vector& propagate_down, const vector*>& bottom); bool channel_shared_; - Blob multiplier_; // dot multiplier for backward computation of params - Blob backward_buff_; // temporary buffer for backward computation + Blob multiplier_; // dot multipler for backward computation of params Blob bottom_memory_; // memory for in-place computation }; diff --git a/include/caffe/python_layer.hpp b/include/caffe/python_layer.hpp index 19cf18c9742..816ef453720 100644 --- a/include/caffe/python_layer.hpp +++ b/include/caffe/python_layer.hpp @@ -14,12 +14,12 @@ template class PythonLayer : public Layer { public: PythonLayer(PyObject* self, const LayerParameter& param) - : Layer(param), self_(bp::handle<>(bp::borrowed(self))) { } + : Layer(param), self_(self) { } virtual void LayerSetUp(const vector*>& bottom, const vector*>& top) { try { - self_.attr("setup")(bottom, top); + bp::call_method(self_, "setup", bottom, top); } catch (bp::error_already_set) { PyErr_Print(); throw; @@ -29,7 +29,7 @@ class PythonLayer : public Layer { virtual void Reshape(const vector*>& bottom, const vector*>& top) { try { - self_.attr("reshape")(bottom, top); + bp::call_method(self_, "reshape", bottom, top); } catch (bp::error_already_set) { PyErr_Print(); throw; @@ -42,7 +42,7 @@ class PythonLayer : public Layer { virtual void Forward_cpu(const vector*>& bottom, const vector*>& top) { try { - self_.attr("forward")(bottom, top); + bp::call_method(self_, "forward", bottom, top); } catch (bp::error_already_set) { PyErr_Print(); throw; @@ -51,7 +51,8 @@ class PythonLayer : public Layer { virtual void Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { try { - self_.attr("backward")(top, propagate_down, bottom); + bp::call_method(self_, "backward", top, propagate_down, + bottom); } catch (bp::error_already_set) { PyErr_Print(); throw; @@ -59,7 +60,7 @@ class PythonLayer : public Layer { } private: - bp::object self_; + PyObject* self_; }; } // namespace caffe diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index c2ced487d6f..4dcdc3dc20b 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -11,7 +11,7 @@ namespace caffe { /** * @brief An interface for classes that perform optimization on Net%s. * - * Requires implementation of ApplyUpdate to compute a parameter update + * Requires implementation of ComputeUpdateValue to compute a parameter update * given the current state of the Net parameters. */ template @@ -39,8 +39,8 @@ class Solver { int iter() { return iter_; } protected: - // Make and apply the update value for the current iteration. - virtual void ApplyUpdate() = 0; + // Get the update value for the current iteration. + virtual void ComputeUpdateValue() = 0; // The Solver::Snapshot function implements the basic snapshotting utility // that stores the learned net. You should implement the SnapshotSolverState() // function that produces a SolverState protocol buffer that needs to be @@ -80,10 +80,7 @@ class SGDSolver : public Solver { protected: void PreSolve(); Dtype GetLearningRate(); - virtual void ApplyUpdate(); - virtual void Normalize(int param_id); - virtual void Regularize(int param_id); - virtual void ComputeUpdateValue(int param_id, Dtype rate); + virtual void ComputeUpdateValue(); virtual void ClipGradients(); virtual void SnapshotSolverState(SolverState * state); virtual void RestoreSolverState(const SolverState& state); @@ -105,7 +102,7 @@ class NesterovSolver : public SGDSolver { : SGDSolver(param_file) {} protected: - virtual void ComputeUpdateValue(int param_id, Dtype rate); + virtual void ComputeUpdateValue(); DISABLE_COPY_AND_ASSIGN(NesterovSolver); }; @@ -119,7 +116,7 @@ class AdaGradSolver : public SGDSolver { : SGDSolver(param_file) { constructor_sanity_check(); } protected: - virtual void ComputeUpdateValue(int param_id, Dtype rate); + virtual void ComputeUpdateValue(); void constructor_sanity_check() { CHECK_EQ(0, this->param_.momentum()) << "Momentum cannot be used with AdaGrad."; diff --git a/include/caffe/test/test_caffe_main.hpp b/include/caffe/test/test_caffe_main.hpp index fc156091476..bd5f31e063f 100644 --- a/include/caffe/test/test_caffe_main.hpp +++ b/include/caffe/test/test_caffe_main.hpp @@ -40,36 +40,34 @@ class MultiDeviceTest : public ::testing::Test { typedef ::testing::Types TestDtypes; -template -struct CPUDevice { - typedef TypeParam Dtype; +struct FloatCPU { + typedef float Dtype; static const Caffe::Brew device = Caffe::CPU; }; -template -class CPUDeviceTest : public MultiDeviceTest > { +struct DoubleCPU { + typedef double Dtype; + static const Caffe::Brew device = Caffe::CPU; }; #ifdef CPU_ONLY -typedef ::testing::Types, - CPUDevice > TestDtypesAndDevices; +typedef ::testing::Types TestDtypesAndDevices; #else -template -struct GPUDevice { - typedef TypeParam Dtype; +struct FloatGPU { + typedef float Dtype; static const Caffe::Brew device = Caffe::GPU; }; -template -class GPUDeviceTest : public MultiDeviceTest > { +struct DoubleGPU { + typedef double Dtype; + static const Caffe::Brew device = Caffe::GPU; }; -typedef ::testing::Types, CPUDevice, - GPUDevice, GPUDevice > - TestDtypesAndDevices; +typedef ::testing::Types + TestDtypesAndDevices; #endif diff --git a/include/caffe/test/test_gradient_check_util.hpp b/include/caffe/test/test_gradient_check_util.hpp index cc5dcbad0ee..22937711b58 100644 --- a/include/caffe/test/test_gradient_check_util.hpp +++ b/include/caffe/test/test_gradient_check_util.hpp @@ -80,14 +80,11 @@ void GradientChecker::CheckGradientSingle(Layer* layer, CHECK_EQ(top_count, bottom[blob_id]->count()); } } - // First, figure out what blobs we need to check against, and zero init - // parameter blobs. + // First, figure out what blobs we need to check against. vector*> blobs_to_check; vector propagate_down(bottom.size(), check_bottom < 0); for (int i = 0; i < layer->blobs().size(); ++i) { - Blob* blob = layer->blobs()[i].get(); - caffe_set(blob->count(), static_cast(0), blob->mutable_cpu_diff()); - blobs_to_check.push_back(blob); + blobs_to_check.push_back(layer->blobs()[i].get()); } if (check_bottom < 0) { for (int i = 0; i < bottom.size(); ++i) { diff --git a/include/caffe/util/math_functions.hpp b/include/caffe/util/math_functions.hpp index 2cacd8e72cd..f43036fcebc 100644 --- a/include/caffe/util/math_functions.hpp +++ b/include/caffe/util/math_functions.hpp @@ -88,9 +88,6 @@ void caffe_rng_bernoulli(const int n, const Dtype p, unsigned int* r); template void caffe_exp(const int n, const Dtype* a, Dtype* y); -template -void caffe_log(const int n, const Dtype* a, Dtype* y); - template void caffe_abs(const int n, const Dtype* a, Dtype* y); @@ -206,9 +203,6 @@ void caffe_gpu_abs(const int n, const Dtype* a, Dtype* y); template void caffe_gpu_exp(const int n, const Dtype* a, Dtype* y); -template -void caffe_gpu_log(const int n, const Dtype* a, Dtype* y); - template void caffe_gpu_powx(const int n, const Dtype* a, const Dtype b, Dtype* y); diff --git a/include/caffe/util/mkl_alternate.hpp b/include/caffe/util/mkl_alternate.hpp index 3355b6658a3..32fdbf79932 100644 --- a/include/caffe/util/mkl_alternate.hpp +++ b/include/caffe/util/mkl_alternate.hpp @@ -33,7 +33,6 @@ extern "C" { DEFINE_VSL_UNARY_FUNC(Sqr, y[i] = a[i] * a[i]); DEFINE_VSL_UNARY_FUNC(Exp, y[i] = exp(a[i])); -DEFINE_VSL_UNARY_FUNC(Ln, y[i] = log(a[i])); DEFINE_VSL_UNARY_FUNC(Abs, y[i] = fabs(a[i])); // A simple way to define the vsl unary functions with singular parameter b. diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index a6bd86a93f5..cd0ab8babb0 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -453,72 +453,6 @@ class CuDNNPoolingLayer : public PoolingLayer { }; #endif -/** - * @brief Does spatial pyramid pooling on the input image - * by taking the max, average, etc. within regions - * so that the result vector of different sized - * images are of the same size. - */ -template -class SPPLayer : public Layer { - public: - explicit SPPLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "SPP"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { return 1; } - // MAX POOL layers can output an extra top blob for the mask; - // others can only output the pooled inputs. - virtual inline int MaxTopBlobs() const { - return (this->layer_param_.pooling_param().pool() == - PoolingParameter_PoolMethod_MAX) ? 2 : 1; - } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - // calculates the kernel and stride dimensions for the pooling layer, - // returns a correctly configured LayerParameter for a PoolingLayer - virtual LayerParameter GetPoolingParam(const int pyramid_level, - const int bottom_h, const int bottom_w, const SPPParameter spp_param); - - int pyramid_height_; - int bottom_h_, bottom_w_; - int channels_; - int kernel_h_, kernel_w_; - int pad_h_, pad_w_; - - /// the internal Split layer that feeds the pooling layers - shared_ptr > split_layer_; - /// top vector holder used in call to the underlying SplitLayer::Forward - vector*> split_top_vec_; - /// bottom vector holder used in call to the underlying PoolingLayer::Forward - vector*>*> pooling_bottom_vecs_; - /// the internal Pooling layers of different kernel sizes - vector > > pooling_layers_; - /// top vector holders used in call to the underlying PoolingLayer::Forward - vector*>*> pooling_top_vecs_; - /// pooling_outputs stores the outputs of the PoolingLayers - vector*> pooling_outputs_; - /// the internal Flatten layers that the Pooling layers feed into - vector*> flatten_layers_; - /// top vector holders used in call to the underlying FlattenLayer::Forward - vector*>*> flatten_top_vecs_; - /// flatten_outputs stores the outputs of the FlattenLayers - vector*> flatten_outputs_; - /// bottom vector holder used in call to the underlying ConcatLayer::Forward - vector*> concat_bottom_vec_; - /// the internal Concat layers that the Flatten layers feed into - shared_ptr > concat_layer_; -}; - } // namespace caffe #endif // CAFFE_VISION_LAYERS_HPP_ diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 94fdcc35fb6..6d2b3f502d9 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -26,7 +26,6 @@ void Blob::Reshape(const vector& shape) { shape_.resize(shape.size()); for (int i = 0; i < shape.size(); ++i) { CHECK_GE(shape[i], 0); - CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX"; count_ *= shape[i]; shape_[i] = shape[i]; } diff --git a/src/caffe/data_transformer.cpp b/src/caffe/data_transformer.cpp index 6f75bdb3852..b0b98e478c1 100644 --- a/src/caffe/data_transformer.cpp +++ b/src/caffe/data_transformer.cpp @@ -125,31 +125,10 @@ void DataTransformer::Transform(const Datum& datum, template void DataTransformer::Transform(const Datum& datum, Blob* transformed_blob) { - // If datum is encoded, decoded and transform the cv::image. - if (datum.encoded()) { - CHECK(!param_.force_color() && !param_.force_gray()) - << "cannot set both force_color and force_gray"; - cv::Mat cv_img; - if (param_.force_color() || param_.force_gray()) { - // If force_color then decode in color otherwise decode in gray. - cv_img = DecodeDatumToCVMat(datum, param_.force_color()); - } else { - cv_img = DecodeDatumToCVMatNative(datum); - } - // Transform the cv::image into blob. - return Transform(cv_img, transformed_blob); - } else { - if (param_.force_color() || param_.force_gray()) { - LOG(ERROR) << "force_color and force_gray only for encoded datum"; - } - } - - const int crop_size = param_.crop_size(); const int datum_channels = datum.channels(); const int datum_height = datum.height(); const int datum_width = datum.width(); - // Check dimensions. const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); const int width = transformed_blob->width(); @@ -160,6 +139,8 @@ void DataTransformer::Transform(const Datum& datum, CHECK_LE(width, datum_width); CHECK_GE(num, 1); + const int crop_size = param_.crop_size(); + if (crop_size) { CHECK_EQ(crop_size, height); CHECK_EQ(crop_size, width); @@ -215,12 +196,10 @@ void DataTransformer::Transform(const vector & mat_vector, template void DataTransformer::Transform(const cv::Mat& cv_img, Blob* transformed_blob) { - const int crop_size = param_.crop_size(); const int img_channels = cv_img.channels(); const int img_height = cv_img.rows; const int img_width = cv_img.cols; - // Check dimensions. const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); const int width = transformed_blob->width(); @@ -233,6 +212,7 @@ void DataTransformer::Transform(const cv::Mat& cv_img, CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte"; + const int crop_size = param_.crop_size(); const Dtype scale = param_.scale(); const bool do_mirror = param_.mirror() && Rand(2); const bool has_mean_file = param_.has_mean_file(); @@ -317,23 +297,11 @@ void DataTransformer::Transform(const cv::Mat& cv_img, template void DataTransformer::Transform(Blob* input_blob, Blob* transformed_blob) { - const int crop_size = param_.crop_size(); const int input_num = input_blob->num(); const int input_channels = input_blob->channels(); const int input_height = input_blob->height(); const int input_width = input_blob->width(); - if (transformed_blob->count() == 0) { - // Initialize transformed_blob with the right shape. - if (crop_size) { - transformed_blob->Reshape(input_num, input_channels, - crop_size, crop_size); - } else { - transformed_blob->Reshape(input_num, input_channels, - input_height, input_width); - } - } - const int num = transformed_blob->num(); const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); @@ -345,7 +313,7 @@ void DataTransformer::Transform(Blob* input_blob, CHECK_GE(input_height, height); CHECK_GE(input_width, width); - + const int crop_size = param_.crop_size(); const Dtype scale = param_.scale(); const bool do_mirror = param_.mirror() && Rand(2); const bool has_mean_file = param_.has_mean_file(); @@ -427,82 +395,6 @@ void DataTransformer::Transform(Blob* input_blob, } } -template -vector DataTransformer::InferBlobShape(const Datum& datum) { - if (datum.encoded()) { - CHECK(!param_.force_color() && !param_.force_gray()) - << "cannot set both force_color and force_gray"; - cv::Mat cv_img; - if (param_.force_color() || param_.force_gray()) { - // If force_color then decode in color otherwise decode in gray. - cv_img = DecodeDatumToCVMat(datum, param_.force_color()); - } else { - cv_img = DecodeDatumToCVMatNative(datum); - } - // InferBlobShape using the cv::image. - return InferBlobShape(cv_img); - } - - const int crop_size = param_.crop_size(); - const int datum_channels = datum.channels(); - const int datum_height = datum.height(); - const int datum_width = datum.width(); - // Check dimensions. - CHECK_GT(datum_channels, 0); - CHECK_GE(datum_height, crop_size); - CHECK_GE(datum_width, crop_size); - // Build BlobShape. - vector shape(4); - shape[0] = 1; - shape[1] = datum_channels; - shape[2] = (crop_size)? crop_size: datum_height; - shape[3] = (crop_size)? crop_size: datum_width; - return shape; -} - -template -vector DataTransformer::InferBlobShape( - const vector & datum_vector) { - const int num = datum_vector.size(); - CHECK_GT(num, 0) << "There is no datum to in the vector"; - // Use first datum in the vector to InferBlobShape. - vector shape = InferBlobShape(datum_vector[0]); - // Adjust num to the size of the vector. - shape[0] = num; - return shape; -} - -template -vector DataTransformer::InferBlobShape(const cv::Mat& cv_img) { - const int crop_size = param_.crop_size(); - const int img_channels = cv_img.channels(); - const int img_height = cv_img.rows; - const int img_width = cv_img.cols; - // Check dimensions. - CHECK_GT(img_channels, 0); - CHECK_GE(img_height, crop_size); - CHECK_GE(img_width, crop_size); - // Build BlobShape. - vector shape(4); - shape[0] = 1; - shape[1] = img_channels; - shape[2] = (crop_size)? crop_size: img_height; - shape[3] = (crop_size)? crop_size: img_width; - return shape; -} - -template -vector DataTransformer::InferBlobShape( - const vector & mat_vector) { - const int num = mat_vector.size(); - CHECK_GT(num, 0) << "There is no cv_img to in the vector"; - // Use first cv_img in the vector to InferBlobShape. - vector shape = InferBlobShape(mat_vector[0]); - // Adjust num to the size of the vector. - shape[0] = num; - return shape; -} - template void DataTransformer::InitRand() { const bool needs_rand = param_.mirror() || diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index 26a1118282f..352200915d7 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -2,6 +2,7 @@ #include #include "caffe/data_layers.hpp" +#include "caffe/net.hpp" #include "caffe/util/io.hpp" namespace caffe { @@ -20,11 +21,11 @@ void BaseDataLayer::LayerSetUp(const vector*>& bottom, } else { output_labels_ = true; } + // The subclasses should setup the size of bottom and top + DataLayerSetUp(bottom, top); data_transformer_.reset( new DataTransformer(transform_param_, this->phase_)); data_transformer_->InitRand(); - // The subclasses should setup the size of bottom and top - DataLayerSetUp(bottom, top); } template @@ -62,15 +63,13 @@ void BasePrefetchingDataLayer::Forward_cpu( JoinPrefetchThread(); DLOG(INFO) << "Thread joined"; // Reshape to loaded data. - top[0]->ReshapeLike(prefetch_data_); + top[0]->Reshape(this->prefetch_data_.num(), this->prefetch_data_.channels(), + this->prefetch_data_.height(), this->prefetch_data_.width()); // Copy the data caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), top[0]->mutable_cpu_data()); DLOG(INFO) << "Prefetch copied"; if (this->output_labels_) { - // Reshape to loaded labels. - top[1]->ReshapeLike(prefetch_label_); - // Copy the labels. caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), top[1]->mutable_cpu_data()); } diff --git a/src/caffe/layers/base_data_layer.cu b/src/caffe/layers/base_data_layer.cu index 9335a5bc9a9..775f6c47f7e 100644 --- a/src/caffe/layers/base_data_layer.cu +++ b/src/caffe/layers/base_data_layer.cu @@ -10,14 +10,12 @@ void BasePrefetchingDataLayer::Forward_gpu( // First, join the thread JoinPrefetchThread(); // Reshape to loaded data. - top[0]->ReshapeLike(this->prefetch_data_); + top[0]->Reshape(this->prefetch_data_.num(), this->prefetch_data_.channels(), + this->prefetch_data_.height(), this->prefetch_data_.width()); // Copy the data caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { - // Reshape to loaded labels. - top[1]->ReshapeLike(prefetch_label_); - // Copy the labels. caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), top[1]->mutable_gpu_data()); } diff --git a/src/caffe/layers/concat_layer.cu b/src/caffe/layers/concat_layer.cu index 8f2e85d8f52..dbadb5aeb30 100644 --- a/src/caffe/layers/concat_layer.cu +++ b/src/caffe/layers/concat_layer.cu @@ -6,41 +6,21 @@ namespace caffe { -template -__global__ void Concat(const int nthreads, const Dtype* in_data, - const bool forward, const int num_concats, const int concat_size, - const int top_concat_axis, const int bottom_concat_axis, - const int offset_concat_axis, Dtype* out_data) { - CUDA_KERNEL_LOOP(index, nthreads) { - const int total_concat_size = concat_size * bottom_concat_axis; - const int concat_num = index / total_concat_size; - const int concat_index = index % total_concat_size; - const int top_index = concat_index + - (concat_num * top_concat_axis + offset_concat_axis) * concat_size; - if (forward) { - out_data[top_index] = in_data[index]; - } else { - out_data[index] = in_data[top_index]; - } - } -} - template void ConcatLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { Dtype* top_data = top[0]->mutable_gpu_data(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); - const bool kForward = true; for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); - const int bottom_concat_size = bottom_concat_axis * concat_input_size_; - const int nthreads = bottom_concat_size * num_concats_; - Concat // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, bottom_data, kForward, num_concats_, concat_input_size_, - top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); + for (int n = 0; n < num_concats_; ++n) { + caffe_copy(bottom_concat_axis * concat_input_size_, + bottom_data + n * bottom_concat_axis * concat_input_size_, + top_data + (n * top_concat_axis + offset_concat_axis) + * concat_input_size_); + } offset_concat_axis += bottom_concat_axis; } } @@ -51,17 +31,15 @@ void ConcatLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); - const bool kForward = false; for (int i = 0; i < bottom.size(); ++i) { if (!propagate_down[i]) { continue; } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); - const int bottom_concat_size = bottom_concat_axis * concat_input_size_; - const int nthreads = bottom_concat_size * num_concats_; - Concat // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, top_diff, kForward, num_concats_, concat_input_size_, - top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); + for (int n = 0; n < num_concats_; ++n) { + caffe_copy(bottom_concat_axis * concat_input_size_, top_diff + + (n * top_concat_axis + offset_concat_axis) * concat_input_size_, + bottom_diff + n * bottom_concat_axis * concat_input_size_); + } offset_concat_axis += bottom_concat_axis; } } diff --git a/src/caffe/layers/contrastive_loss_layer.cpp b/src/caffe/layers/contrastive_loss_layer.cpp index 25e167819d3..0692c11c257 100644 --- a/src/caffe/layers/contrastive_loss_layer.cpp +++ b/src/caffe/layers/contrastive_loss_layer.cpp @@ -41,8 +41,6 @@ void ContrastiveLossLayer::Forward_cpu( diff_.mutable_cpu_data()); // a_i-b_i const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.contrastive_loss_param().margin(); - bool legacy_version = - this->layer_param_.contrastive_loss_param().legacy_version(); Dtype loss(0.0); for (int i = 0; i < bottom[0]->num(); ++i) { dist_sq_.mutable_cpu_data()[i] = caffe_cpu_dot(channels, @@ -50,12 +48,7 @@ void ContrastiveLossLayer::Forward_cpu( if (static_cast(bottom[2]->cpu_data()[i])) { // similar pairs loss += dist_sq_.cpu_data()[i]; } else { // dissimilar pairs - if (legacy_version) { - loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)); - } else { - Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]), 0.0); - loss += dist*dist; - } + loss += std::max(margin-dist_sq_.cpu_data()[i], Dtype(0.0)); } } loss = loss / static_cast(bottom[0]->num()) / Dtype(2); @@ -66,8 +59,6 @@ template void ContrastiveLossLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.contrastive_loss_param().margin(); - bool legacy_version = - this->layer_param_.contrastive_loss_param().legacy_version(); for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; @@ -85,20 +76,10 @@ void ContrastiveLossLayer::Backward_cpu(const vector*>& top, Dtype(0.0), bout + (j*channels)); } else { // dissimilar pairs - Dtype mdist(0.0); - Dtype beta(0.0); - if (legacy_version) { - mdist = margin - dist_sq_.cpu_data()[j]; - beta = -alpha; - } else { - Dtype dist = sqrt(dist_sq_.cpu_data()[j]); - mdist = margin - dist; - beta = -alpha * mdist / (dist + Dtype(1e-4)); - } - if (mdist > Dtype(0.0)) { + if ((margin-dist_sq_.cpu_data()[j]) > Dtype(0.0)) { caffe_cpu_axpby( channels, - beta, + -alpha, diff_.cpu_data() + (j*channels), Dtype(0.0), bout + (j*channels)); diff --git a/src/caffe/layers/contrastive_loss_layer.cu b/src/caffe/layers/contrastive_loss_layer.cu index 931239316ac..78a55995a0a 100644 --- a/src/caffe/layers/contrastive_loss_layer.cu +++ b/src/caffe/layers/contrastive_loss_layer.cu @@ -32,20 +32,12 @@ void ContrastiveLossLayer::Forward_gpu( Dtype(0.0), dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2 Dtype margin = this->layer_param_.contrastive_loss_param().margin(); - bool legacy_version = - this->layer_param_.contrastive_loss_param().legacy_version(); Dtype loss(0.0); for (int i = 0; i < bottom[0]->num(); ++i) { if (static_cast(bottom[2]->cpu_data()[i])) { // similar pairs loss += dist_sq_.cpu_data()[i]; } else { // dissimilar pairs - if (legacy_version) { - loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)); - } else { - Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]), - Dtype(0.0)); - loss += dist*dist; - } + loss += std::max(margin-dist_sq_.cpu_data()[i], Dtype(0.0)); } } loss = loss / static_cast(bottom[0]->num()) / Dtype(2); @@ -53,8 +45,8 @@ void ContrastiveLossLayer::Forward_gpu( } template -__global__ void CLLBackward(const int count, const int channels, - const Dtype margin, const bool legacy_version, const Dtype alpha, +__global__ void CLLForward(const int count, const int channels, + const Dtype margin, const Dtype alpha, const Dtype* y, const Dtype* diff, const Dtype* dist_sq, Dtype *bottom_diff) { CUDA_KERNEL_LOOP(i, count) { @@ -62,18 +54,8 @@ __global__ void CLLBackward(const int count, const int channels, if (static_cast(y[n])) { // similar pairs bottom_diff[i] = alpha * diff[i]; } else { // dissimilar pairs - Dtype mdist(0.0); - Dtype beta(0.0); - if (legacy_version) { - mdist = (margin - dist_sq[n]); - beta = -alpha; - } else { - Dtype dist = sqrt(dist_sq[n]); - mdist = (margin - dist); - beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i]; - } - if (mdist > 0.0) { - bottom_diff[i] = beta; + if ((margin-dist_sq[n]) > 0.0) { + bottom_diff[i] = -alpha * diff[i]; } else { bottom_diff[i] = 0; } @@ -89,14 +71,12 @@ void ContrastiveLossLayer::Backward_gpu(const vector*>& top, const int count = bottom[0]->count(); const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.contrastive_loss_param().margin(); - const bool legacy_version = - this->layer_param_.contrastive_loss_param().legacy_version(); const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast(bottom[0]->num()); // NOLINT_NEXT_LINE(whitespace/operators) - CLLBackward<<>>( - count, channels, margin, legacy_version, alpha, + CLLForward<<>>( + count, channels, margin, alpha, bottom[2]->gpu_data(), // pair similarity 0 or 1 diff_.gpu_data(), // the cached eltwise difference between a and b dist_sq_.gpu_data(), // the cached square distance between a and b diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp index 928ef5ee468..c0c9f6f3371 100644 --- a/src/caffe/layers/conv_layer.cpp +++ b/src/caffe/layers/conv_layer.cpp @@ -39,6 +39,13 @@ void ConvolutionLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->cpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); + if (this->param_propagate_down_[0]) { + caffe_set(this->blobs_[0]->count(), Dtype(0), weight_diff); + } + if (this->bias_term_ && this->param_propagate_down_[1]) { + caffe_set(this->blobs_[1]->count(), Dtype(0), + this->blobs_[1]->mutable_cpu_diff()); + } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->cpu_diff(); const Dtype* bottom_data = bottom[i]->cpu_data(); diff --git a/src/caffe/layers/conv_layer.cu b/src/caffe/layers/conv_layer.cu index b8a98ff7cc9..3902fdf3930 100644 --- a/src/caffe/layers/conv_layer.cu +++ b/src/caffe/layers/conv_layer.cu @@ -31,6 +31,13 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); + if (this->param_propagate_down_[0]) { + caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); + } + if (this->bias_term_ && this->param_propagate_down_[1]) { + caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), + this->blobs_[1]->mutable_gpu_diff()); + } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Bias gradient, if necessary. diff --git a/src/caffe/layers/cudnn_conv_layer.cu b/src/caffe/layers/cudnn_conv_layer.cu index b4e802e13d1..4a1a4c4f4f2 100644 --- a/src/caffe/layers/cudnn_conv_layer.cu +++ b/src/caffe/layers/cudnn_conv_layer.cu @@ -101,10 +101,12 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); + caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); + caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp index 161a75e0c8c..0f2d66776a9 100644 --- a/src/caffe/layers/data_layer.cpp +++ b/src/caffe/layers/data_layer.cpp @@ -38,17 +38,32 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, cursor_->Next(); } } - // Read a data point, to initialize the prefetch and top blobs. + // Read a data point, and use it to initialize the top blob. Datum datum; datum.ParseFromString(cursor_->value()); - // Use data_transformer to infer the expected blob shape from datum. - vector top_shape = this->data_transformer_->InferBlobShape(datum); - this->transformed_data_.Reshape(top_shape); - // Reshape top[0] and prefetch_data according to the batch_size. - top_shape[0] = this->layer_param_.data_param().batch_size(); - this->prefetch_data_.Reshape(top_shape); - top[0]->ReshapeLike(this->prefetch_data_); + bool force_color = this->layer_param_.data_param().force_encoded_color(); + if ((force_color && DecodeDatum(&datum, true)) || + DecodeDatumNative(&datum)) { + LOG(INFO) << "Decoding Datum"; + } + // image + int crop_size = this->layer_param_.transform_param().crop_size(); + if (crop_size > 0) { + top[0]->Reshape(this->layer_param_.data_param().batch_size(), + datum.channels(), crop_size, crop_size); + this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), + datum.channels(), crop_size, crop_size); + this->transformed_data_.Reshape(1, datum.channels(), crop_size, crop_size); + } else { + top[0]->Reshape( + this->layer_param_.data_param().batch_size(), datum.channels(), + datum.height(), datum.width()); + this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), + datum.channels(), datum.height(), datum.width()); + this->transformed_data_.Reshape(1, datum.channels(), + datum.height(), datum.width()); + } LOG(INFO) << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); @@ -71,17 +86,25 @@ void DataLayer::InternalThreadEntry() { CHECK(this->prefetch_data_.count()); CHECK(this->transformed_data_.count()); - // Reshape according to the first datum of each batch - // on single input batches allows for inputs of varying dimension. + // Reshape on single input batches for inputs of varying dimension. const int batch_size = this->layer_param_.data_param().batch_size(); - Datum datum; - datum.ParseFromString(cursor_->value()); - // Use data_transformer to infer the expected blob shape from datum. - vector top_shape = this->data_transformer_->InferBlobShape(datum); - this->transformed_data_.Reshape(top_shape); - // Reshape prefetch_data according to the batch_size. - top_shape[0] = batch_size; - this->prefetch_data_.Reshape(top_shape); + const int crop_size = this->layer_param_.transform_param().crop_size(); + bool force_color = this->layer_param_.data_param().force_encoded_color(); + if (batch_size == 1 && crop_size == 0) { + Datum datum; + datum.ParseFromString(cursor_->value()); + if (datum.encoded()) { + if (force_color) { + DecodeDatum(&datum, true); + } else { + DecodeDatumNative(&datum); + } + } + this->prefetch_data_.Reshape(1, datum.channels(), + datum.height(), datum.width()); + this->transformed_data_.Reshape(1, datum.channels(), + datum.height(), datum.width()); + } Dtype* top_data = this->prefetch_data_.mutable_cpu_data(); Dtype* top_label = NULL; // suppress warnings about uninitialized variables @@ -89,31 +112,48 @@ void DataLayer::InternalThreadEntry() { if (this->output_labels_) { top_label = this->prefetch_label_.mutable_cpu_data(); } - timer.Start(); for (int item_id = 0; item_id < batch_size; ++item_id) { - // get a datum + timer.Start(); + // get a blob Datum datum; datum.ParseFromString(cursor_->value()); + + cv::Mat cv_img; + if (datum.encoded()) { + if (force_color) { + cv_img = DecodeDatumToCVMat(datum, true); + } else { + cv_img = DecodeDatumToCVMatNative(datum); + } + if (cv_img.channels() != this->transformed_data_.channels()) { + LOG(WARNING) << "Your dataset contains encoded images with mixed " + << "channel sizes. Consider adding a 'force_color' flag to the " + << "model definition, or rebuild your dataset using " + << "convert_imageset."; + } + } read_time += timer.MicroSeconds(); timer.Start(); + // Apply data transformations (mirror, scale, crop...) int offset = this->prefetch_data_.offset(item_id); this->transformed_data_.set_cpu_data(top_data + offset); - this->data_transformer_->Transform(datum, &(this->transformed_data_)); - // Copy label. + if (datum.encoded()) { + this->data_transformer_->Transform(cv_img, &(this->transformed_data_)); + } else { + this->data_transformer_->Transform(datum, &(this->transformed_data_)); + } if (this->output_labels_) { top_label[item_id] = datum.label(); } trans_time += timer.MicroSeconds(); - timer.Start(); - // go to the next item. + // go to the next iter cursor_->Next(); if (!cursor_->valid()) { DLOG(INFO) << "Restarting data prefetching from start."; cursor_->SeekToFirst(); } } - timer.Stop(); batch_timer.Stop(); DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; diff --git a/src/caffe/layers/deconv_layer.cpp b/src/caffe/layers/deconv_layer.cpp index a4612963b6b..e6d65ab526b 100644 --- a/src/caffe/layers/deconv_layer.cpp +++ b/src/caffe/layers/deconv_layer.cpp @@ -39,6 +39,13 @@ void DeconvolutionLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->cpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); + if (this->param_propagate_down_[0]) { + caffe_set(this->blobs_[0]->count(), Dtype(0), weight_diff); + } + if (this->bias_term_ && this->param_propagate_down_[1]) { + caffe_set(this->blobs_[1]->count(), Dtype(0), + this->blobs_[1]->mutable_cpu_diff()); + } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->cpu_diff(); const Dtype* bottom_data = bottom[i]->cpu_data(); diff --git a/src/caffe/layers/deconv_layer.cu b/src/caffe/layers/deconv_layer.cu index 39bc4de8c66..9198dd64c72 100644 --- a/src/caffe/layers/deconv_layer.cu +++ b/src/caffe/layers/deconv_layer.cu @@ -31,6 +31,13 @@ void DeconvolutionLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); + if (this->param_propagate_down_[0]) { + caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); + } + if (this->bias_term_ && this->param_propagate_down_[1]) { + caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), + this->blobs_[1]->mutable_gpu_diff()); + } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); const Dtype* bottom_data = bottom[i]->gpu_data(); diff --git a/src/caffe/layers/flatten_layer.cpp b/src/caffe/layers/flatten_layer.cpp index f7e5c9c2172..745f271ea45 100644 --- a/src/caffe/layers/flatten_layer.cpp +++ b/src/caffe/layers/flatten_layer.cpp @@ -9,19 +9,9 @@ namespace caffe { template void FlattenLayer::Reshape(const vector*>& bottom, const vector*>& top) { - const int start_axis = bottom[0]->CanonicalAxisIndex( - this->layer_param_.flatten_param().axis()); - const int end_axis = bottom[0]->CanonicalAxisIndex( - this->layer_param_.flatten_param().end_axis()); - vector top_shape; - for (int i = 0; i < start_axis; ++i) { - top_shape.push_back(bottom[0]->shape(i)); - } - const int flattened_dim = bottom[0]->count(start_axis, end_axis + 1); - top_shape.push_back(flattened_dim); - for (int i = end_axis + 1; i < bottom[0]->num_axes(); ++i) { - top_shape.push_back(bottom[0]->shape(i)); - } + vector top_shape(2); + top_shape[0] = bottom[0]->num(); + top_shape[1] = bottom[0]->count() / bottom[0]->num(); top[0]->Reshape(top_shape); CHECK_EQ(top[0]->count(), bottom[0]->count()); } diff --git a/src/caffe/layers/image_data_layer.cpp b/src/caffe/layers/image_data_layer.cpp index 18c035cba9d..38ebbd5ec14 100644 --- a/src/caffe/layers/image_data_layer.cpp +++ b/src/caffe/layers/image_data_layer.cpp @@ -62,15 +62,21 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, // Read an image, and use it to initialize the top blob. cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, new_height, new_width, is_color); - // Use data_transformer to infer the expected blob shape from a cv_image. - vector top_shape = this->data_transformer_->InferBlobShape(cv_img); - this->transformed_data_.Reshape(top_shape); - // Reshape prefetch_data and top[0] according to the batch_size. + const int channels = cv_img.channels(); + const int height = cv_img.rows; + const int width = cv_img.cols; + // image + const int crop_size = this->layer_param_.transform_param().crop_size(); const int batch_size = this->layer_param_.image_data_param().batch_size(); - top_shape[0] = batch_size; - this->prefetch_data_.Reshape(top_shape); - top[0]->ReshapeLike(this->prefetch_data_); - + if (crop_size > 0) { + top[0]->Reshape(batch_size, channels, crop_size, crop_size); + this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size); + this->transformed_data_.Reshape(1, channels, crop_size, crop_size); + } else { + top[0]->Reshape(batch_size, channels, height, width); + this->prefetch_data_.Reshape(batch_size, channels, height, width); + this->transformed_data_.Reshape(1, channels, height, width); + } LOG(INFO) << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); @@ -101,19 +107,19 @@ void ImageDataLayer::InternalThreadEntry() { const int batch_size = image_data_param.batch_size(); const int new_height = image_data_param.new_height(); const int new_width = image_data_param.new_width(); + const int crop_size = this->layer_param_.transform_param().crop_size(); const bool is_color = image_data_param.is_color(); string root_folder = image_data_param.root_folder(); - // Reshape according to the first image of each batch - // on single input batches allows for inputs of varying dimension. - cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, - new_height, new_width, is_color); - // Use data_transformer to infer the expected blob shape from a cv_img. - vector top_shape = this->data_transformer_->InferBlobShape(cv_img); - this->transformed_data_.Reshape(top_shape); - // Reshape prefetch_data according to the batch_size. - top_shape[0] = batch_size; - this->prefetch_data_.Reshape(top_shape); + // Reshape on single input batches for inputs of varying dimension. + if (batch_size == 1 && crop_size == 0 && new_height == 0 && new_width == 0) { + cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, + 0, 0, is_color); + this->prefetch_data_.Reshape(1, cv_img.channels(), + cv_img.rows, cv_img.cols); + this->transformed_data_.Reshape(1, cv_img.channels(), + cv_img.rows, cv_img.cols); + } Dtype* prefetch_data = this->prefetch_data_.mutable_cpu_data(); Dtype* prefetch_label = this->prefetch_label_.mutable_cpu_data(); diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp index 83c3235eb71..89e0c8fbad7 100644 --- a/src/caffe/layers/inner_product_layer.cpp +++ b/src/caffe/layers/inner_product_layer.cpp @@ -101,13 +101,13 @@ void InnerProductLayer::Backward_cpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->cpu_data(); // Gradient with respect to weight caffe_cpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., - top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_cpu_diff()); + top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_cpu_diff()); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->cpu_diff(); // Gradient with respect to bias caffe_cpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, - bias_multiplier_.cpu_data(), (Dtype)1., + bias_multiplier_.cpu_data(), (Dtype)0., this->blobs_[1]->mutable_cpu_diff()); } if (propagate_down[0]) { diff --git a/src/caffe/layers/inner_product_layer.cu b/src/caffe/layers/inner_product_layer.cu index dd90cac12a8..a9e1784a205 100644 --- a/src/caffe/layers/inner_product_layer.cu +++ b/src/caffe/layers/inner_product_layer.cu @@ -33,13 +33,13 @@ void InnerProductLayer::Backward_gpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight caffe_gpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., - top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_gpu_diff()); + top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_gpu_diff()); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias caffe_gpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, - bias_multiplier_.gpu_data(), (Dtype)1., + bias_multiplier_.gpu_data(), (Dtype)0., this->blobs_[1]->mutable_gpu_diff()); } if (propagate_down[0]) { diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu index 001b3c34ac1..24aa6a30130 100644 --- a/src/caffe/layers/lrn_layer.cu +++ b/src/caffe/layers/lrn_layer.cu @@ -7,46 +7,44 @@ namespace caffe { template -__global__ void LRNFillScale(const int nthreads, const Dtype* const in, +__global__ void LRNFillScale(const int nthreads, const Dtype* in, const int num, const int channels, const int height, const int width, const int size, const Dtype alpha_over_size, - const Dtype k, Dtype* const scale) { + const Dtype k, Dtype* scale) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset - const int w = index % width; - const int h = (index / width) % height; - const int n = index / width / height; - const int offset = (n * channels * height + h) * width + w; - const int step = height * width; - const Dtype* const in_off = in + offset; - Dtype* const scale_off = scale + offset; + int w = index % width; + int h = (index / width) % height; + int n = index / width / height; + int offset = (n * channels * height + h) * width + w; + int step = height * width; + in += offset; + scale += offset; int head = 0; - const int pre_pad = (size - 1) / 2; - const int post_pad = size - pre_pad - 1; + int pre_pad = (size - 1) / 2; + int post_pad = size - pre_pad - 1; Dtype accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad && head < channels) { - accum_scale += in_off[head * step] * in_off[head * step]; + accum_scale += in[head * step] * in[head * step]; ++head; } // both add and subtract while (head < channels) { - accum_scale += in_off[head * step] * in_off[head * step]; + accum_scale += in[head * step] * in[head * step]; if (head - size >= 0) { - accum_scale -= in_off[(head - size) * step] - * in_off[(head - size) * step]; + accum_scale -= in[(head - size) * step] * in[(head - size) * step]; } - scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { - accum_scale -= in_off[(head - size) * step] - * in_off[(head - size) * step]; + accum_scale -= in[(head - size) * step] * in[(head - size) * step]; } - scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } } @@ -70,8 +68,8 @@ void LRNLayer::Forward_gpu(const vector*>& bottom, // TODO: check if it would be faster to just put it into the previous kernel. template -__global__ void LRNComputeOutput(const int nthreads, const Dtype* const in, - const Dtype* const scale, const Dtype negative_beta, Dtype* const out) { +__global__ void LRNComputeOutput(const int nthreads, const Dtype* in, + const Dtype* scale, const Dtype negative_beta, Dtype* out) { CUDA_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } @@ -120,58 +118,56 @@ void LRNLayer::Backward_gpu(const vector*>& top, } template -__global__ void LRNComputeDiff(const int nthreads, - const Dtype* const bottom_data, const Dtype* const top_data, - const Dtype* const scale, const Dtype* const top_diff, +__global__ void LRNComputeDiff(const int nthreads, const Dtype* bottom_data, + const Dtype* top_data, const Dtype* scale, const Dtype* top_diff, const int num, const int channels, const int height, const int width, const int size, const Dtype negative_beta, - const Dtype cache_ratio, Dtype* const bottom_diff) { + const Dtype cache_ratio, + Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset - const int w = index % width; - const int h = (index / width) % height; - const int n = index / width / height; - const int offset = (n * channels * height + h) * width + w; - const int step = height * width; - const Dtype* const bottom_off = bottom_data + offset; - const Dtype* const top_off = top_data + offset; - const Dtype* const scale_off = scale + offset; - const Dtype* const top_diff_off = top_diff + offset; - Dtype* const bottom_diff_off = bottom_diff + offset; + int w = index % width; + int h = (index / width) % height; + int n = index / width / height; + int offset = (n * channels * height + h) * width + w; + int step = height * width; + bottom_data += offset; + top_data += offset; + scale += offset; + top_diff += offset; + bottom_diff += offset; int head = 0; - const int pre_pad = size - (size + 1) / 2; - const int post_pad = size - pre_pad - 1; + int pre_pad = size - (size + 1) / 2; + int post_pad = size - pre_pad - 1; Dtype accum_ratio = 0; // accumulate values while (head < post_pad && head < channels) { - accum_ratio += top_diff_off[head * step] * top_off[head * step] / - scale_off[head * step]; + accum_ratio += top_diff[head * step] * top_data[head * step] / + scale[head * step]; ++head; } // both add and subtract while (head < channels) { - accum_ratio += top_diff_off[head * step] * top_off[head * step] / - scale_off[head * step]; + accum_ratio += top_diff[head * step] * top_data[head * step] / + scale[head * step]; if (head - size >= 0) { - accum_ratio -= top_diff_off[(head - size) * step] * - top_off[(head - size) * step] / scale_off[(head - size) * step]; + accum_ratio -= top_diff[(head - size) * step] * + top_data[(head - size) * step] / scale[(head - size) * step]; } - bottom_diff_off[(head - post_pad) * step] = - top_diff_off[(head - post_pad) * step] - * pow(scale_off[(head - post_pad) * step], negative_beta) - - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] + * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * + bottom_data[(head - post_pad) * step] * accum_ratio; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { - accum_ratio -= top_diff_off[(head - size) * step] * - top_off[(head - size) * step] / scale_off[(head - size) * step]; + accum_ratio -= top_diff[(head - size) * step] * + top_data[(head - size) * step] / scale[(head - size) * step]; } - bottom_diff_off[(head - post_pad) * step] = - top_diff_off[(head - post_pad) * step] - * pow(scale_off[(head - post_pad) * step], negative_beta) - - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] + * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * + bottom_data[(head - post_pad) * step] * accum_ratio; ++head; } } diff --git a/src/caffe/layers/mvn_layer.cpp b/src/caffe/layers/mvn_layer.cpp index 3e79bddcdde..b74d7b4f300 100644 --- a/src/caffe/layers/mvn_layer.cpp +++ b/src/caffe/layers/mvn_layer.cpp @@ -22,7 +22,6 @@ void MVNLayer::Reshape(const vector*>& bottom, bottom[0]->height(), bottom[0]->width()); Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data(); caffe_set(sum_multiplier_.count(), Dtype(1), multiplier_data); - eps_ = this->layer_param_.mvn_param().eps(); } template @@ -37,6 +36,7 @@ void MVNLayer::Forward_cpu(const vector*>& bottom, num = bottom[0]->num() * bottom[0]->channels(); int dim = bottom[0]->count() / num; + Dtype eps = 1e-10; if (this->layer_param_.mvn_param().normalize_variance()) { // put the squares of bottom into temp_ @@ -66,7 +66,7 @@ void MVNLayer::Forward_cpu(const vector*>& bottom, caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5), variance_.mutable_cpu_data()); - caffe_add_scalar(variance_.count(), eps_, variance_.mutable_cpu_data()); + caffe_add_scalar(variance_.count(), eps, variance_.mutable_cpu_data()); caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., @@ -102,6 +102,7 @@ void MVNLayer::Backward_cpu(const vector*>& top, num = bottom[0]->num() * bottom[0]->channels(); int dim = bottom[0]->count() / num; + Dtype eps = 1e-10; if (this->layer_param_.mvn_param().normalize_variance()) { caffe_mul(temp_.count(), top_data, top_diff, bottom_diff); @@ -124,6 +125,24 @@ void MVNLayer::Backward_cpu(const vector*>& top, // put the squares of bottom into temp_ caffe_powx(temp_.count(), bottom_data, Dtype(2), temp_.mutable_cpu_data()); + + // computes variance using var(X) = E(X^2) - (EX)^2 + caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, + sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); // EX + caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, temp_.cpu_data(), + sum_multiplier_.cpu_data(), 0., + variance_.mutable_cpu_data()); // E(X^2) + caffe_powx(mean_.count(), mean_.cpu_data(), Dtype(2), + temp_.mutable_cpu_data()); // (EX)^2 + caffe_sub(mean_.count(), variance_.cpu_data(), temp_.cpu_data(), + variance_.mutable_cpu_data()); // variance + + // normalize variance + caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5), + variance_.mutable_cpu_data()); + + caffe_add_scalar(variance_.count(), eps, variance_.mutable_cpu_data()); + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., temp_.mutable_cpu_data()); diff --git a/src/caffe/layers/mvn_layer.cu b/src/caffe/layers/mvn_layer.cu index 3888a0c7106..0667f50380f 100644 --- a/src/caffe/layers/mvn_layer.cu +++ b/src/caffe/layers/mvn_layer.cu @@ -36,6 +36,8 @@ void MVNLayer::Forward_gpu(const vector*>& bottom, caffe_gpu_sub(mean_.count(), variance_.gpu_data(), temp_.gpu_data(), variance_.mutable_gpu_data()); // variance + Dtype eps = 1e-10; + // do mean and variance normalization // subtract mean caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., @@ -48,7 +50,7 @@ void MVNLayer::Forward_gpu(const vector*>& bottom, caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5), variance_.mutable_gpu_data()); - caffe_gpu_add_scalar(variance_.count(), eps_, variance_.mutable_gpu_data()); + caffe_gpu_add_scalar(variance_.count(), eps, variance_.mutable_gpu_data()); caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.gpu_data(), sum_multiplier_.gpu_data(), 0., @@ -85,6 +87,8 @@ void MVNLayer::Backward_gpu(const vector*>& top, int dim = bottom[0]->count() / num; + Dtype eps = 1e-10; + if (this->layer_param_.mvn_param().normalize_variance()) { caffe_gpu_mul(temp_.count(), top_data, top_diff, bottom_diff); caffe_gpu_gemv(CblasNoTrans, num, dim, 1., bottom_diff, @@ -107,6 +111,23 @@ void MVNLayer::Backward_gpu(const vector*>& top, caffe_gpu_powx(temp_.count(), bottom_data, Dtype(2), temp_.mutable_gpu_data()); + // computes variance using var(X) = E(X^2) - (EX)^2 + caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, + sum_multiplier_.gpu_data(), 0., mean_.mutable_gpu_data()); // EX + caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, temp_.gpu_data(), + sum_multiplier_.gpu_data(), 0., + variance_.mutable_gpu_data()); // E(X^2) + caffe_gpu_powx(mean_.count(), mean_.gpu_data(), Dtype(2), + temp_.mutable_gpu_data()); // (EX)^2 + caffe_gpu_sub(mean_.count(), variance_.gpu_data(), temp_.gpu_data(), + variance_.mutable_gpu_data()); // variance + + // normalize variance + caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5), + variance_.mutable_gpu_data()); + + caffe_gpu_add_scalar(variance_.count(), eps, variance_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.gpu_data(), sum_multiplier_.gpu_data(), 0., temp_.mutable_gpu_data()); diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index ca4b13f7c41..d1d48501af3 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -9,32 +9,31 @@ namespace caffe { template -__global__ void MaxPoolForward(const int nthreads, - const Dtype* const bottom_data, const int num, const int channels, - const int height, const int width, const int pooled_height, - const int pooled_width, const int kernel_h, const int kernel_w, - const int stride_h, const int stride_w, const int pad_h, const int pad_w, - Dtype* const top_data, int* mask, Dtype* top_mask) { +__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, const int pad_w, Dtype* top_data, + int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { - const int pw = index % pooled_width; - const int ph = (index / pooled_width) % pooled_height; - const int c = (index / pooled_width / pooled_height) % channels; - const int n = index / pooled_width / pooled_height / channels; + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; - const int hend = min(hstart + kernel_h, height); - const int wend = min(wstart + kernel_w, width); + int hend = min(hstart + kernel_h, height); + int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; - const Dtype* const bottom_slice = - bottom_data + (n * channels + c) * height * width; + bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - if (bottom_slice[h * width + w] > maxval) { + if (bottom_data[h * width + w] > maxval) { maxidx = h * width + w; - maxval = bottom_slice[maxidx]; + maxval = bottom_data[maxidx]; } } } @@ -48,32 +47,30 @@ __global__ void MaxPoolForward(const int nthreads, } template -__global__ void AvePoolForward(const int nthreads, - const Dtype* const bottom_data, const int num, const int channels, - const int height, const int width, const int pooled_height, - const int pooled_width, const int kernel_h, const int kernel_w, - const int stride_h, const int stride_w, const int pad_h, const int pad_w, - Dtype* const top_data) { +__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - const int pw = index % pooled_width; - const int ph = (index / pooled_width) % pooled_height; - const int c = (index / pooled_width / pooled_height) % channels; - const int n = index / pooled_width / pooled_height / channels; + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); - const int pool_size = (hend - hstart) * (wend - wstart); + int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; - const Dtype* const bottom_slice = - bottom_data + (n * channels + c) * height * width; + bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - aveval += bottom_slice[h * width + w]; + aveval += bottom_data[h * width + w]; } } top_data[index] = aveval / pool_size; @@ -82,38 +79,37 @@ __global__ void AvePoolForward(const int nthreads, template __global__ void StoPoolForwardTrain(const int nthreads, - const Dtype* const bottom_data, + const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* const rand_idx, Dtype* const top_data) { + const int stride_w, Dtype* rand_idx, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - const int pw = index % pooled_width; - const int ph = (index / pooled_width) % pooled_height; - const int c = (index / pooled_width / pooled_height) % channels; - const int n = index / pooled_width / pooled_height / channels; - const int hstart = ph * stride_h; - const int hend = min(hstart + kernel_h, height); - const int wstart = pw * stride_w; - const int wend = min(wstart + kernel_w, width); + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + kernel_w, width); Dtype cumsum = 0.; - const Dtype* const bottom_slice = - bottom_data + (n * channels + c) * height * width; + bottom_data += (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - cumsum += bottom_slice[h * width + w]; + cumsum += bottom_data[h * width + w]; } } - const float thres = rand_idx[index] * cumsum; + float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - cumsum += bottom_slice[h * width + w]; + cumsum += bottom_data[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; - top_data[index] = bottom_slice[h * width + w]; + top_data[index] = bottom_data[h * width + w]; return; } } @@ -124,30 +120,29 @@ __global__ void StoPoolForwardTrain(const int nthreads, template __global__ void StoPoolForwardTest(const int nthreads, - const Dtype* const bottom_data, + const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* const top_data) { + const int stride_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - const int pw = index % pooled_width; - const int ph = (index / pooled_width) % pooled_height; - const int c = (index / pooled_width / pooled_height) % channels; - const int n = index / pooled_width / pooled_height / channels; - const int hstart = ph * stride_h; - const int hend = min(hstart + kernel_h, height); - const int wstart = pw * stride_w; - const int wend = min(wstart + kernel_w, width); + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = FLT_MIN; Dtype cumvalues = 0.; - const Dtype* const bottom_slice = - bottom_data + (n * channels + c) * height * width; + bottom_data += (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - cumsum += bottom_slice[h * width + w]; - cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; + cumsum += bottom_data[h * width + w]; + cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w]; } } top_data[index] = cumvalues / cumsum; @@ -215,43 +210,43 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, template -__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, - const int* const mask, const Dtype* const top_mask, const int num, - const int channels, const int height, const int width, - const int pooled_height, const int pooled_width, const int kernel_h, - const int kernel_w, const int stride_h, const int stride_w, const int pad_h, - const int pad_w, Dtype* const bottom_diff) { +__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, + const int* mask, const Dtype* top_mask, const int num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, const int pad_h, const int pad_w, + Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - const int w = index % width; - const int h = (index / width) % height; - const int c = (index / width / height) % channels; - const int n = index / width / height / channels; - const int phstart = - (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; - const int phend = min((h + pad_h) / stride_h + 1, pooled_height); - const int pwstart = - (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; - const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); + int w = index % width; + int h = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + int phstart = + (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; + int phend = min((h + pad_h) / stride_h + 1, pooled_height); + int pwstart = + (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; + int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; - const int offset = (n * channels + c) * pooled_height * pooled_width; - const Dtype* const top_diff_slice = top_diff + offset; + int offset = (n * channels + c) * pooled_height * pooled_width; + top_diff += offset; if (mask) { - const int* const mask_slice = mask + offset; + mask += offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { - if (mask_slice[ph * pooled_width + pw] == h * width + w) { - gradient += top_diff_slice[ph * pooled_width + pw]; + if (mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff[ph * pooled_width + pw]; } } } } else { - const Dtype* const top_mask_slice = top_mask + offset; + top_mask += offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { - if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { - gradient += top_diff_slice[ph * pooled_width + pw]; + if (top_mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff[ph * pooled_width + pw]; } } } @@ -261,26 +256,25 @@ __global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, } template -__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, +__global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, - Dtype* const bottom_diff) { + Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - const int w = index % width + pad_w; - const int h = (index / width) % height + pad_h; - const int c = (index / width / height) % channels; - const int n = index / width / height / channels; - const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; - const int phend = min(h / stride_h + 1, pooled_height); - const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; - const int pwend = min(w / stride_w + 1, pooled_width); + int w = index % width + pad_w; + int h = (index / width) % height + pad_h; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + int phend = min(h / stride_h + 1, pooled_height); + int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; - const Dtype* const top_diff_slice = - top_diff + (n * channels + c) * pooled_height * pooled_width; + top_diff += (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size @@ -289,7 +283,7 @@ __global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); - gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; + gradient += top_diff[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; @@ -299,31 +293,29 @@ __global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, template __global__ void StoPoolBackward(const int nthreads, - const Dtype* const rand_idx, const Dtype* const top_diff, + const Dtype* rand_idx, const Dtype* top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* const bottom_diff) { + const int stride_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - const int w = index % width; - const int h = (index / width) % height; - const int c = (index / width / height) % channels; - const int n = index / width / height / channels; - const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; - const int phend = min(h / stride_h + 1, pooled_height); - const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; - const int pwend = min(w / stride_w + 1, pooled_width); + int w = index % width; + int h = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + int phend = min(h / stride_h + 1, pooled_height); + int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; - const Dtype* const rand_idx_slice = - rand_idx + (n * channels + c) * pooled_height * pooled_width; - const Dtype* const top_diff_slice = - top_diff + (n * channels + c) * pooled_height * pooled_width; + rand_idx += (n * channels + c) * pooled_height * pooled_width; + top_diff += (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { - gradient += top_diff_slice[ph * pooled_width + pw] * - (index == static_cast(rand_idx_slice[ph * pooled_width + pw])); + gradient += top_diff[ph * pooled_width + pw] * + (index == static_cast(rand_idx[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; diff --git a/src/caffe/layers/prelu_layer.cpp b/src/caffe/layers/prelu_layer.cpp index 81831755512..7119a274dd3 100644 --- a/src/caffe/layers/prelu_layer.cpp +++ b/src/caffe/layers/prelu_layer.cpp @@ -45,8 +45,7 @@ void PReLULayer::LayerSetUp(const vector*>& bottom, // Propagate gradients to the parameters (as directed by backward pass). this->param_propagate_down_.resize(this->blobs_.size(), true); - multiplier_.Reshape(vector(1, bottom[0]->count(1))); - backward_buff_.Reshape(vector(1, bottom[0]->count(1))); + multiplier_.Reshape(vector(1, bottom[0]->count() / bottom[0]->num())); caffe_set(multiplier_.count(), Dtype(1), multiplier_.mutable_cpu_data()); } @@ -113,6 +112,7 @@ void PReLULayer::Backward_cpu(const vector*>& top, // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_cpu_diff(); + caffe_set(this->blobs_[0]->count(), Dtype(0), slope_diff); for (int i = 0; i < count; ++i) { int c = (i / dim) % channels / div_factor; slope_diff[c] += top_diff[i] * bottom_data[i] * (bottom_data[i] <= 0); diff --git a/src/caffe/layers/prelu_layer.cu b/src/caffe/layers/prelu_layer.cu index e1f20048f60..fd0eda5d191 100644 --- a/src/caffe/layers/prelu_layer.cu +++ b/src/caffe/layers/prelu_layer.cu @@ -75,36 +75,38 @@ void PReLULayer::Backward_gpu(const vector*>& top, bottom_data = bottom_memory_.gpu_data(); } - // Propagate to param + // Propagte to param // Since to write bottom diff will affect top diff if top and bottom blobs // are identical (in-place computaion), we first compute param backward to // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); + // slope_diff is set as 0, then accumulated over batches + caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), slope_diff); int cdim = channels * dim; Dtype dsum = 0.; for (int n = 0; n < bottom[0]->num(); ++n) { + Dtype* temp_buff = multiplier_.mutable_gpu_diff(); // compute element-wise diff // NOLINT_NEXT_LINE(whitespace/operators) - PReLUParamBackward<<<<>>( cdim, top_diff + top[0]->offset(n), - bottom_data + bottom[0]->offset(n), - backward_buff_.mutable_gpu_diff()); + bottom_data + bottom[0]->offset(n), multiplier_.mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; if (channel_shared_) { Dtype d; - caffe_gpu_dot(channels * dim, backward_buff_.gpu_diff(), + caffe_gpu_dot(channels * dim, multiplier_.gpu_diff(), multiplier_.gpu_data(), &d); dsum += d; } else { caffe_gpu_gemv(CblasNoTrans, channels, dim, 1., - backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1., + multiplier_.gpu_diff(), multiplier_.gpu_data(), 1., slope_diff); } } if (channel_shared_) { - caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff); + caffe_gpu_set(this->blobs_[0]->count(), Dtype(dsum), slope_diff); } } // Propagate to bottom diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp index cc236fe1e8e..077d949981c 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp @@ -71,7 +71,7 @@ void SigmoidCrossEntropyLossLayer::Backward_cpu( } #ifdef CPU_ONLY -STUB_GPU_BACKWARD(SigmoidCrossEntropyLossLayer, Backward); +STUB_GPU(SigmoidCrossEntropyLossLayer); #endif INSTANTIATE_CLASS(SigmoidCrossEntropyLossLayer); diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu index 547fa80c72f..08f7f492297 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu @@ -8,6 +8,26 @@ namespace caffe { +template +void SigmoidCrossEntropyLossLayer::Forward_gpu( + const vector*>& bottom, const vector*>& top) { + // The forward pass computes the sigmoid outputs. + sigmoid_bottom_vec_[0] = bottom[0]; + sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_); + // Compute the loss (negative log likelihood) + const int count = bottom[0]->count(); + const int num = bottom[0]->num(); + // Stable version of loss computation from input data + const Dtype* input_data = bottom[0]->cpu_data(); + const Dtype* target = bottom[1]->cpu_data(); + Dtype loss = 0; + for (int i = 0; i < count; ++i) { + loss -= input_data[i] * (target[i] - (input_data[i] >= 0)) - + log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); + } + top[0]->mutable_cpu_data()[0] = loss / num; +} + template void SigmoidCrossEntropyLossLayer::Backward_gpu( const vector*>& top, const vector& propagate_down, @@ -31,7 +51,7 @@ void SigmoidCrossEntropyLossLayer::Backward_gpu( } } -INSTANTIATE_LAYER_GPU_BACKWARD(SigmoidCrossEntropyLossLayer); +INSTANTIATE_LAYER_GPU_FUNCS(SigmoidCrossEntropyLossLayer); } // namespace caffe diff --git a/src/caffe/layers/slice_layer.cu b/src/caffe/layers/slice_layer.cu index 796841d3f52..e6e65677bd8 100644 --- a/src/caffe/layers/slice_layer.cu +++ b/src/caffe/layers/slice_layer.cu @@ -6,41 +6,22 @@ namespace caffe { -template -__global__ void Slice(const int nthreads, const Dtype* in_data, - const bool forward, const int num_slices, const int slice_size, - const int bottom_slice_axis, const int top_slice_axis, - const int offset_slice_axis, Dtype* out_data) { - CUDA_KERNEL_LOOP(index, nthreads) { - const int total_slice_size = slice_size * top_slice_axis; - const int slice_num = index / total_slice_size; - const int slice_index = index % total_slice_size; - const int bottom_index = slice_index + - (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; - if (forward) { - out_data[index] = in_data[bottom_index]; - } else { - out_data[bottom_index] = in_data[index]; - } - } -} - template void SliceLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { int offset_slice_axis = 0; const Dtype* bottom_data = bottom[0]->gpu_data(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); - const bool kForward = true; for (int i = 0; i < top.size(); ++i) { Dtype* top_data = top[i]->mutable_gpu_data(); const int top_slice_axis = top[i]->shape(slice_axis_); - const int top_slice_size = top_slice_axis * slice_size_; - const int nthreads = top_slice_size * num_slices_; - Slice // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, bottom_data, kForward, num_slices_, slice_size_, - bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data); + for (int n = 0; n < num_slices_; ++n) { + const int top_offset = n * top_slice_axis * slice_size_; + const int bottom_offset = + (n * bottom_slice_axis + offset_slice_axis) * slice_size_; + caffe_copy(top_slice_axis * slice_size_, + bottom_data + bottom_offset, top_data + top_offset); + } offset_slice_axis += top_slice_axis; } } @@ -52,16 +33,16 @@ void SliceLayer::Backward_gpu(const vector*>& top, int offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); - const bool kForward = false; for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); const int top_slice_axis = top[i]->shape(slice_axis_); - const int top_slice_size = top_slice_axis * slice_size_; - const int nthreads = top_slice_size * num_slices_; - Slice // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, top_diff, kForward, num_slices_, slice_size_, - bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff); + for (int n = 0; n < num_slices_; ++n) { + const int top_offset = n * top_slice_axis * slice_size_; + const int bottom_offset = + (n * bottom_slice_axis + offset_slice_axis) * slice_size_; + caffe_copy(top_slice_axis * slice_size_, + top_diff + top_offset, bottom_diff + bottom_offset); + } offset_slice_axis += top_slice_axis; } } diff --git a/src/caffe/layers/triplet_loss_layer.cpp b/src/caffe/layers/triplet_loss_layer.cpp new file mode 100644 index 00000000000..fc8b9fe036f --- /dev/null +++ b/src/caffe/layers/triplet_loss_layer.cpp @@ -0,0 +1,124 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/loss_layers.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void TripletLossLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + LossLayer::LayerSetUp(bottom, top); + CHECK_EQ(bottom[0]->channels(), bottom[1]->channels()); + CHECK_EQ(bottom[0]->channels(), bottom[2]->channels()); + CHECK_EQ(bottom[0]->height(), 1); + CHECK_EQ(bottom[0]->width(), 1); + CHECK_EQ(bottom[1]->height(), 1); + CHECK_EQ(bottom[1]->width(), 1); + CHECK_EQ(bottom[2]->height(), 1); + CHECK_EQ(bottom[2]->width(), 1); + CHECK_EQ(bottom[3]->channels(), 1); + CHECK_EQ(bottom[3]->height(), 1); + CHECK_EQ(bottom[3]->width(), 1); + diff_pos.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + diff_neg.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + diff_sq_pos.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + diff_sq_neg.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + dist_sq_.Reshape(bottom[0]->num(), 1, 1, 1); + dist_sq_pos.Reshape(bottom[0]->num(), 1, 1, 1); + dist_sq_neg.Reshape(bottom[0]->num(), 1, 1, 1); + // vector of ones used to sum along channels + summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); + for (int i = 0; i < bottom[0]->channels(); ++i) + summer_vec_.mutable_cpu_data()[i] = Dtype(1); +} + +template +void TripletLossLayer::Forward_cpu( + const vector*>& bottom, + const vector*>& top) { + int count = bottom[0]->count(); + caffe_sub( + count, + bottom[0]->cpu_data(), // a + bottom[1]->cpu_data(), // b + diff_pos.mutable_cpu_data()); // a_i-b_i + caffe_sub( + count, + bottom[0]->cpu_data(), // a + bottom[2]->cpu_data(), // c + diff_neg.mutable_cpu_data()); // a_i-c_i + const int channels = bottom[0]->channels(); + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype loss(0.0); + + //Loss component calculated from ab + for (int i = 0; i < bottom[0]->num(); ++i) { + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, + diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); + //ab is a similar pair + dist_sq_ += dist_sq_pos.cpu_data()[i]; + + + //Loss component calculated from ac + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, + diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); + //ac is a dissimilar pair + dist_sq_ -= dist_sq_neg.cpu_data()[i]; + loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); + + } + loss = loss / static_cast(bottom[0]->num()) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; +} + +template +void TripletLossLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + for (int i = 0; i < 3; ++i) { + if (propagate_down[i]) { + const Dtype sign = (i == 0) ? 1 : -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(bottom[i]->num()); + int num = bottom[i]->num(); + int channels = bottom[i]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[i]->mutable_cpu_diff(); + if ((margin-dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pairs + caffe_cpu_axpby( + channels, + alpha, + diff_pos.cpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + + // dissimilar pairs + + caffe_cpu_axpby( + channels, + -alpha, + diff_neg.cpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); + } + } + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(TripletLossLayer); +#endif + +INSTANTIATE_CLASS(TripletLossLayer); +REGISTER_LAYER_CLASS(TripletLoss); + +} // namespace caffe diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index a18ee63818e..888eec1d501 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -79,17 +79,10 @@ void Net::Init(const NetParameter& in_param) { } // Setup layer. const LayerParameter& layer_param = param.layer(layer_id); - if (layer_param.propagate_down_size() > 0) { - CHECK_EQ(layer_param.propagate_down_size(), - layer_param.bottom_size()) - << "propagate_down param must be specified " - << "either 0 or bottom_size times "; - } layers_.push_back(LayerRegistry::CreateLayer(layer_param)); layer_names_.push_back(layer_param.name()); LOG(INFO) << "Creating Layer " << layer_param.name(); bool need_backward = false; - // Figure out this layer's input and output for (int bottom_id = 0; bottom_id < layer_param.bottom_size(); ++bottom_id) { @@ -158,33 +151,15 @@ void Net::Init(const NetParameter& in_param) { // Go through the net backwards to determine which blobs contribute to the // loss. We can skip backward computation for blobs that don't contribute // to the loss. - // Also checks if all bottom blobs don't need backward computation (possible - // because the skip_propagate_down param) and so we can skip bacward - // computation for the entire layer set blobs_under_loss; - set blobs_skip_backp; for (int layer_id = layers_.size() - 1; layer_id >= 0; --layer_id) { bool layer_contributes_loss = false; - bool layer_skip_propagate_down = true; for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]]; if (layers_[layer_id]->loss(top_id) || (blobs_under_loss.find(blob_name) != blobs_under_loss.end())) { layer_contributes_loss = true; - } - if (blobs_skip_backp.find(blob_name) == blobs_skip_backp.end()) { - layer_skip_propagate_down = false; - } - if (layer_contributes_loss && !layer_skip_propagate_down) break; - } - // If this layer can skip backward computation, also all his bottom blobs - // don't need backpropagation - if (layer_need_backward_[layer_id] && layer_skip_propagate_down) { - layer_need_backward_[layer_id] = false; - for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); - ++bottom_id) { - bottom_need_backward_[layer_id][bottom_id] = false; } } if (!layer_contributes_loss) { layer_need_backward_[layer_id] = false; } @@ -203,11 +178,6 @@ void Net::Init(const NetParameter& in_param) { } else { bottom_need_backward_[layer_id][bottom_id] = false; } - if (!bottom_need_backward_[layer_id][bottom_id]) { - const string& blob_name = - blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; - blobs_skip_backp.insert(blob_name); - } } } // Handle force_backward if needed. @@ -397,9 +367,9 @@ void Net::AppendTop(const NetParameter& param, const int layer_id, // Helper for Net::Init: add a new bottom blob to the net. template -int Net::AppendBottom(const NetParameter& param, const int layer_id, - const int bottom_id, set* available_blobs, - map* blob_name_to_idx) { +int Net::AppendBottom(const NetParameter& param, + const int layer_id, const int bottom_id, + set* available_blobs, map* blob_name_to_idx) { const LayerParameter& layer_param = param.layer(layer_id); const string& blob_name = layer_param.bottom(bottom_id); if (available_blobs->find(blob_name) == available_blobs->end()) { @@ -411,12 +381,7 @@ int Net::AppendBottom(const NetParameter& param, const int layer_id, bottom_vecs_[layer_id].push_back(blobs_[blob_id].get()); bottom_id_vecs_[layer_id].push_back(blob_id); available_blobs->erase(blob_name); - bool propagate_down = true; - // Check if the backpropagation on bottom_id should be skipped - if (layer_param.propagate_down_size() > 0) - propagate_down = layer_param.propagate_down(bottom_id); - const bool need_backward = blob_need_backward_[blob_id] && - propagate_down; + const bool need_backward = blob_need_backward_[blob_id]; bottom_need_backward_[layer_id].push_back(need_backward); return blob_id; } @@ -445,7 +410,7 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, // (i.e., not given a param_name) or explicitly given a name that we // haven't already seen. param_owners_.push_back(-1); - if (param_name.size()) { + if (param_size) { param_names_index_[param_name] = net_param_id; } } else { @@ -505,6 +470,7 @@ Dtype Net::ForwardFromTo(int start, int end) { } for (int i = start; i <= end; ++i) { // LOG(ERROR) << "Forwarding " << layer_names_[i]; + layers_[i]->Reshape(bottom_vecs_[i], top_vecs_[i]); Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], top_vecs_[i]); loss += layer_loss; if (debug_info_) { ForwardDebugInfo(i); } diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 81a8c69d88e..edf7ae81d58 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -49,14 +49,6 @@ message FillerParameter { // The expected number of non-zero output weights for a given input in // Gaussian filler -- the default -1 means don't perform sparsification. optional int32 sparse = 7 [default = -1]; - // Normalize the filler variance by fan_in, fan_out, or their average. - // Applies to 'xavier' and 'msra' fillers. - enum VarianceNorm { - FAN_IN = 0; - FAN_OUT = 1; - AVERAGE = 2; - } - optional VarianceNorm variance_norm = 8 [default = FAN_IN]; } message NetParameter { @@ -96,7 +88,7 @@ message NetParameter { // NOTE // Update the next available ID when you add a new SolverParameter field. // -// SolverParameter next available ID: 37 (last added: iter_size) +// SolverParameter next available ID: 36 (last added: clip_gradients) message SolverParameter { ////////////////////////////////////////////////////////////////////////////// // Specifying the train and test networks @@ -149,8 +141,6 @@ message SolverParameter { // Display the loss averaged over the last average_loss iterations optional int32 average_loss = 33 [default = 1]; optional int32 max_iter = 7; // the maximum number of iterations - // accumulate gradients over `iter_size` x `batch_size` instances - optional int32 iter_size = 36 [default = 1]; optional string lr_policy = 8; // The learning rate decay policy. optional float gamma = 9; // The parameter to compute the learning rate. optional float power = 10; // The parameter to compute the learning rate. @@ -269,7 +259,7 @@ message ParamSpec { // NOTE // Update the next available ID when you add a new LayerParameter field. // -// LayerParameter next available layer-specific ID: 137 (last added: reduction_param) +// LayerParameter next available layer-specific ID: 132 (last added: prelu_param) message LayerParameter { optional string name = 1; // the layer name optional string type = 2; // the layer type @@ -290,10 +280,6 @@ message LayerParameter { // The blobs containing the numeric parameters of the layer. repeated BlobProto blobs = 7; - - // Specifies on which bottoms the backpropagation should be skipped. - // The size must be either 0 or equal to the number of bottoms. - repeated bool propagate_down = 11; // Rules controlling whether and when a layer is included in the network, // based on the current NetState. You may specify a non-zero number of rules @@ -326,14 +312,12 @@ message LayerParameter { optional DummyDataParameter dummy_data_param = 109; optional EltwiseParameter eltwise_param = 110; optional ExpParameter exp_param = 111; - optional FlattenParameter flatten_param = 135; optional HDF5DataParameter hdf5_data_param = 112; optional HDF5OutputParameter hdf5_output_param = 113; optional HingeLossParameter hinge_loss_param = 114; optional ImageDataParameter image_data_param = 115; optional InfogainLossParameter infogain_loss_param = 116; optional InnerProductParameter inner_product_param = 117; - optional LogParameter log_param = 134; optional LRNParameter lrn_param = 118; optional MemoryDataParameter memory_data_param = 119; optional MVNParameter mvn_param = 120; @@ -341,16 +325,14 @@ message LayerParameter { optional PowerParameter power_param = 122; optional PReLUParameter prelu_param = 131; optional PythonParameter python_param = 130; - optional ReductionParameter reduction_param = 136; optional ReLUParameter relu_param = 123; - optional ReshapeParameter reshape_param = 133; optional SigmoidParameter sigmoid_param = 124; optional SoftmaxParameter softmax_param = 125; - optional SPPParameter spp_param = 132; optional SliceParameter slice_param = 126; optional TanHParameter tanh_param = 127; optional ThresholdParameter threshold_param = 128; optional WindowDataParameter window_data_param = 129; + optional TripletLossParameter triplet_loss_param = 132; } // Message that stores parameters used to apply transformation @@ -370,10 +352,6 @@ message TransformationParameter { // or can be repeated the same number of times as channels // (would subtract them from the corresponding channel) repeated float mean_value = 5; - // Force the decoded image to have 3 color channels. - optional bool force_color = 6 [default = false]; - // Force the decoded image to have 1 color channels. - optional bool force_gray = 7 [default = false]; } // Message that stores parameters shared by loss layers @@ -385,9 +363,7 @@ message LossParameter { optional bool normalize = 2 [default = true]; } -// Messages that store parameters used by individual layer types follow, in -// alphabetical order. - +// Message that stores parameters used by AccuracyLayer message AccuracyParameter { // When computing accuracy, count as correct by comparing the true label to // the top k scoring classes. By default, only compare to the top scoring @@ -405,12 +381,14 @@ message AccuracyParameter { optional int32 ignore_label = 3; } +// Message that stores parameters used by ArgMaxLayer message ArgMaxParameter { // If true produce pairs (argmax, maxval) optional bool out_max_val = 1 [default = false]; optional uint32 top_k = 2 [default = 1]; } +// Message that stores parameters used by ConcatLayer message ConcatParameter { // The axis along which to concatenate -- may be negative to index from the // end (e.g., -1 for the last axis). Other axes must have the @@ -422,18 +400,16 @@ message ConcatParameter { optional uint32 concat_dim = 1 [default = 1]; } +// Message that stores parameters used by ContrastiveLossLayer message ContrastiveLossParameter { - // margin for dissimilar pair + //margin for dissimilar pair optional float margin = 1 [default = 1.0]; - // The first implementation of this cost did not exactly match the cost of - // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2. - // legacy_version = false (the default) uses (margin - d)^2 as proposed in the - // Hadsell paper. New models should probably use this version. - // legacy_version = true uses (margin - d^2). This is kept to support / - // reproduce existing models and results - optional bool legacy_version = 2 [default = false]; } - +message TripletLossParameter { + //margin for negative triplet + optional float margin = 1 [default = 1.0]; +} +// Message that stores parameters used by ConvolutionLayer message ConvolutionParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms @@ -459,6 +435,7 @@ message ConvolutionParameter { optional Engine engine = 15 [default = DEFAULT]; } +// Message that stores parameters used by DataLayer message DataParameter { enum DB { LEVELDB = 0; @@ -489,10 +466,12 @@ message DataParameter { optional bool force_encoded_color = 9 [default = false]; } +// Message that stores parameters used by DropoutLayer message DropoutParameter { optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio } +// Message that stores parameters used by DummyDataLayer. // DummyDataLayer fills any number of arbitrarily shaped blobs with random // (or constant) data generated by "Fillers" (see "message FillerParameter"). message DummyDataParameter { @@ -512,6 +491,7 @@ message DummyDataParameter { repeated uint32 width = 5; } +// Message that stores parameters used by EltwiseLayer message EltwiseParameter { enum EltwiseOp { PROD = 0; @@ -526,6 +506,7 @@ message EltwiseParameter { optional bool stable_prod_grad = 3 [default = true]; } +// Message that stores parameters used by ExpLayer message ExpParameter { // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. // Or if base is set to the default (-1), base is set to e, @@ -535,18 +516,6 @@ message ExpParameter { optional float shift = 3 [default = 0.0]; } -/// Message that stores parameters used by FlattenLayer -message FlattenParameter { - // The first axis to flatten: all preceding axes are retained in the output. - // May be negative to index from the end (e.g., -1 for the last axis). - optional int32 axis = 1 [default = 1]; - - // The last axis to flatten: all following axes are retained in the output. - // May be negative to index from the end (e.g., the default -1 for the last - // axis). - optional int32 end_axis = 2 [default = -1]; -} - // Message that stores parameters used by HDF5DataLayer message HDF5DataParameter { // Specify the data source. @@ -562,6 +531,7 @@ message HDF5DataParameter { optional bool shuffle = 3 [default = false]; } +// Message that stores parameters used by HDF5OutputLayer message HDF5OutputParameter { optional string file_name = 1; } @@ -575,6 +545,7 @@ message HingeLossParameter { optional Norm norm = 1 [default = L1]; } +// Message that stores parameters used by ImageDataLayer message ImageDataParameter { // Specify the data source. optional string source = 1; @@ -606,11 +577,13 @@ message ImageDataParameter { optional string root_folder = 12 [default = ""]; } +// Message that stores parameters InfogainLossLayer message InfogainLossParameter { // Specify the infogain matrix source. optional string source = 1; } +// Message that stores parameters used by InnerProductLayer message InnerProductParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms @@ -623,16 +596,6 @@ message InnerProductParameter { optional int32 axis = 5 [default = 1]; } -// Message that stores parameters used by LogLayer -message LogParameter { - // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. - // Or if base is set to the default (-1), base is set to e, - // so y = ln(shift + scale * x) = log_e(shift + scale * x) - optional float base = 1 [default = -1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - // Message that stores parameters used by LRNLayer message LRNParameter { optional uint32 local_size = 1 [default = 5]; @@ -646,6 +609,7 @@ message LRNParameter { optional float k = 5 [default = 1.]; } +// Message that stores parameters used by MemoryDataLayer message MemoryDataParameter { optional uint32 batch_size = 1; optional uint32 channels = 2; @@ -653,17 +617,16 @@ message MemoryDataParameter { optional uint32 width = 4; } +// Message that stores parameters used by MVNLayer message MVNParameter { // This parameter can be set to false to normalize mean only optional bool normalize_variance = 1 [default = true]; // This parameter can be set to true to perform DNN-like MVN optional bool across_channels = 2 [default = false]; - - // Epsilon for not dividing by zero while normalizing variance - optional float eps = 3 [default = 1e-9]; } +// Message that stores parameters used by PoolingLayer message PoolingParameter { enum PoolMethod { MAX = 0; @@ -693,6 +656,7 @@ message PoolingParameter { optional bool global_pooling = 12 [default = false]; } +// Message that stores parameters used by PowerLayer message PowerParameter { // PowerLayer computes outputs y = (shift + scale * x) ^ power. optional float power = 1 [default = 1.0]; @@ -700,40 +664,12 @@ message PowerParameter { optional float shift = 3 [default = 0.0]; } +// Message that stores parameters used by PythonLayer message PythonParameter { optional string module = 1; optional string layer = 2; } -// Message that stores parameters used by ReductionLayer -message ReductionParameter { - enum ReductionOp { - SUM = 1; - ASUM = 2; - SUMSQ = 3; - MEAN = 4; - } - - optional ReductionOp operation = 1 [default = SUM]; // reduction operation - - // The first axis to reduce to a scalar -- may be negative to index from the - // end (e.g., -1 for the last axis). - // (Currently, only reduction along ALL "tail" axes is supported; reduction - // of axis M through N, where N < num_axes - 1, is unsupported.) - // Suppose we have an n-axis bottom Blob with shape: - // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). - // If axis == m, the output Blob will have shape - // (d0, d1, d2, ..., d(m-1)), - // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) - // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. - // If axis == 0 (the default), the output Blob always has the empty shape - // (count 1), performing reduction across the entire input -- - // often useful for creating new loss functions. - optional int32 axis = 2 [default = 0]; - - optional float coeff = 3 [default = 1.0]; // coefficient for output -} - // Message that stores parameters used by ReLULayer message ReLUParameter { // Allow non-zero slope for negative inputs to speed up optimization @@ -750,70 +686,7 @@ message ReLUParameter { optional Engine engine = 2 [default = DEFAULT]; } -message ReshapeParameter { - // Specify the output dimensions. If some of the dimensions are set to 0, - // the corresponding dimension from the bottom layer is used (unchanged). - // Exactly one dimension may be set to -1, in which case its value is - // inferred from the count of the bottom blob and the remaining dimensions. - // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8: - // - // layer { - // type: "Reshape" bottom: "input" top: "output" - // reshape_param { ... } - // } - // - // If "input" is 2D with shape 2 x 8, then the following reshape_param - // specifications are all equivalent, producing a 3D blob "output" with shape - // 2 x 2 x 4: - // - // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 0 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 0 dim: 2 dim: -1 } } - // reshape_param { shape { dim: -1 dim: 0 dim: 2 } } - // - optional BlobShape shape = 1; - - // axis and num_axes control the portion of the bottom blob's shape that are - // replaced by (included in) the reshape. By default (axis == 0 and - // num_axes == -1), the entire bottom blob shape is included in the reshape, - // and hence the shape field must specify the entire output shape. - // - // axis may be non-zero to retain some portion of the beginning of the input - // shape (and may be negative to index from the end; e.g., -1 to begin the - // reshape after the last axis, including nothing in the reshape, - // -2 to include only the last axis, etc.). - // - // For example, suppose "input" is a 2D blob with shape 2 x 8. - // Then the following ReshapeLayer specifications are all equivalent, - // producing a blob "output" with shape 2 x 2 x 4: - // - // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 2 dim: 4 } axis: 1 } - // reshape_param { shape { dim: 2 dim: 4 } axis: -3 } - // - // num_axes specifies the extent of the reshape. - // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on - // input axes in the range [axis, axis+num_axes]. - // num_axes may also be -1, the default, to include all remaining axes - // (starting from axis). - // - // For example, suppose "input" is a 2D blob with shape 2 x 8. - // Then the following ReshapeLayer specifications are equivalent, - // producing a blob "output" with shape 1 x 2 x 8. - // - // reshape_param { shape { dim: 1 dim: 2 dim: 8 } } - // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 } - // reshape_param { shape { dim: 1 } num_axes: 0 } - // - // On the other hand, these would produce output blob shape 2 x 1 x 8: - // - // reshape_param { shape { dim: 2 dim: 1 dim: 8 } } - // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 } - // - optional int32 axis = 2 [default = 0]; - optional int32 num_axes = 3 [default = -1]; -} - +// Message that stores parameters used by SigmoidLayer message SigmoidParameter { enum Engine { DEFAULT = 0; @@ -823,6 +696,7 @@ message SigmoidParameter { optional Engine engine = 1 [default = DEFAULT]; } +// Message that stores parameters used by SliceLayer message SliceParameter { // The axis along which to slice -- may be negative to index from the end // (e.g., -1 for the last axis). @@ -849,6 +723,7 @@ message SoftmaxParameter { optional int32 axis = 2 [default = 1]; } +// Message that stores parameters used by TanHLayer message TanHParameter { enum Engine { DEFAULT = 0; @@ -858,10 +733,12 @@ message TanHParameter { optional Engine engine = 1 [default = DEFAULT]; } +// Message that stores parameters used by ThresholdLayer message ThresholdParameter { optional float threshold = 1 [default = 0]; // Strictly positive values } +// Message that stores parameters used by WindowDataLayer message WindowDataParameter { // Specify the data source. optional string source = 1; @@ -895,22 +772,6 @@ message WindowDataParameter { optional string root_folder = 13 [default = ""]; } -message SPPParameter { - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional uint32 pyramid_height = 1; - optional PoolMethod pool = 2 [default = MAX]; // The pooling method - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 6 [default = DEFAULT]; -} - // DEPRECATED: use LayerParameter. message V1LayerParameter { repeated string bottom = 2; @@ -957,6 +818,7 @@ message V1LayerParameter { SPLIT = 22; SLICE = 33; TANH = 23; + TRIPLET_LOSS = 40; WINDOW_DATA = 24; THRESHOLD = 31; } @@ -1002,6 +864,7 @@ message V1LayerParameter { optional TransformationParameter transform_param = 36; optional LossParameter loss_param = 42; optional V0LayerParameter layer = 1; + optional TripletLossParameter triplet_loss_param = 43; } // DEPRECATED: V0LayerParameter is the old way of specifying layer parameters @@ -1098,6 +961,7 @@ message V0LayerParameter { optional HDF5OutputParameter hdf5_output_param = 1001; } +// Message that stores parameters used by PReLULayer message PReLUParameter { // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: // Surpassing Human-Level Performance on ImageNet Classification, 2015. diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index aabe0edec80..096980dd7af 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -167,26 +167,7 @@ void Solver::Step(int iters) { vector losses; Dtype smoothed_loss = 0; - while (iter_ < stop_iter) { - // zero-init the params - for (int i = 0; i < net_->params().size(); ++i) { - shared_ptr > blob = net_->params()[i]; - switch (Caffe::mode()) { - case Caffe::CPU: - caffe_set(blob->count(), static_cast(0), - blob->mutable_cpu_diff()); - break; - case Caffe::GPU: -#ifndef CPU_ONLY - caffe_gpu_set(blob->count(), static_cast(0), - blob->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - } - } - + for (; iter_ < stop_iter; ++iter_) { if (param_.test_interval() && iter_ % param_.test_interval() == 0 && (iter_ > 0 || param_.test_initialization())) { TestAll(); @@ -194,13 +175,7 @@ void Solver::Step(int iters) { const bool display = param_.display() && iter_ % param_.display() == 0; net_->set_debug_info(display && param_.debug_info()); - // accumulate the loss and gradient - Dtype loss = 0; - for (int i = 0; i < param_.iter_size(); ++i) { - loss += net_->ForwardBackward(bottom_vec); - } - loss /= param_.iter_size(); - // average the loss across iterations for smoothed reporting + Dtype loss = net_->ForwardBackward(bottom_vec); if (losses.size() < average_loss) { losses.push_back(loss); int size = losses.size(); @@ -232,14 +207,11 @@ void Solver::Step(int iters) { } } } - ApplyUpdate(); - - // Increment the internal iter_ counter -- its value should always indicate - // the number of times the weights have been updated. - ++iter_; + ComputeUpdateValue(); + net_->Update(); // Save a snapshot if needed. - if (param_.snapshot() && iter_ % param_.snapshot() == 0) { + if (param_.snapshot() && (iter_ + 1) % param_.snapshot() == 0) { Snapshot(); } } @@ -355,14 +327,15 @@ void Solver::Snapshot() { string model_filename, snapshot_filename; const int kBufferSize = 20; char iter_str_buffer[kBufferSize]; - snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); + // Add one to iter_ to get the number of iterations that have completed. + snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_ + 1); filename += iter_str_buffer; model_filename = filename + ".caffemodel"; LOG(INFO) << "Snapshotting to " << model_filename; WriteProtoToBinaryFile(net_param, model_filename.c_str()); SolverState state; SnapshotSolverState(&state); - state.set_iter(iter_); + state.set_iter(iter_ + 1); state.set_learned_net(model_filename); state.set_current_step(current_step_); snapshot_filename = filename + ".solverstate"; @@ -480,138 +453,95 @@ void SGDSolver::ClipGradients() { } template -void SGDSolver::ApplyUpdate() { +void SGDSolver::ComputeUpdateValue() { + const vector > >& net_params = this->net_->params(); + const vector& net_params_lr = this->net_->params_lr(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); + // get the learning rate Dtype rate = GetLearningRate(); if (this->param_.display() && this->iter_ % this->param_.display() == 0) { LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; } ClipGradients(); - for (int param_id = 0; param_id < this->net_->params().size(); ++param_id) { - Normalize(param_id); - Regularize(param_id); - ComputeUpdateValue(param_id, rate); - } - this->net_->Update(); -} - -template -void SGDSolver::Normalize(int param_id) { - if (this->param_.iter_size() == 1) { return; } - // Scale gradient to counterbalance accumulation. - const vector > >& net_params = this->net_->params(); - const Dtype accum_normalization = Dtype(1.) / this->param_.iter_size(); - switch (Caffe::mode()) { - case Caffe::CPU: { - caffe_scal(net_params[param_id]->count(), accum_normalization, - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - caffe_gpu_scal(net_params[param_id]->count(), accum_normalization, - net_params[param_id]->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void SGDSolver::Regularize(int param_id) { - const vector > >& net_params = this->net_->params(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); + Dtype momentum = this->param_.momentum(); Dtype weight_decay = this->param_.weight_decay(); string regularization_type = this->param_.regularization_type(); - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; switch (Caffe::mode()) { - case Caffe::CPU: { - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // Compute the value to history, and then copy them to the blob's diff. + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } } + + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + history_[param_id]->mutable_cpu_data()); + // copy + caffe_copy(net_params[param_id]->count(), + history_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); } break; - } - case Caffe::GPU: { + case Caffe::GPU: #ifndef CPU_ONLY - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // Compute the value to history, and then copy them to the blob's diff. + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } } - } -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} -template -void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector > >& net_params = this->net_->params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype momentum = this->param_.momentum(); - Dtype local_rate = rate * net_params_lr[param_id]; - // Compute the update to history, then copy it to the parameter diff. - switch (Caffe::mode()) { - case Caffe::CPU: { - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - history_[param_id]->mutable_cpu_data()); - caffe_copy(net_params[param_id]->count(), - history_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - history_[param_id]->mutable_gpu_data()); - caffe_copy(net_params[param_id]->count(), - history_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + history_[param_id]->mutable_gpu_data()); + // copy + caffe_copy(net_params[param_id]->count(), + history_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } #else NO_GPU; #endif break; - } default: LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); } @@ -638,138 +568,252 @@ void SGDSolver::RestoreSolverState(const SolverState& state) { } template -void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { +void NesterovSolver::ComputeUpdateValue() { const vector > >& net_params = this->net_->params(); const vector& net_params_lr = this->net_->params_lr(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); + // get the learning rate + Dtype rate = this->GetLearningRate(); + if (this->param_.display() && this->iter_ % this->param_.display() == 0) { + LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; + } + SGDSolver::ClipGradients(); Dtype momentum = this->param_.momentum(); - Dtype local_rate = rate * net_params_lr[param_id]; + Dtype weight_decay = this->param_.weight_decay(); + string regularization_type = this->param_.regularization_type(); switch (Caffe::mode()) { - case Caffe::CPU: { - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // update history - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - this->history_[param_id]->mutable_cpu_data()); - - // compute update: step back then over step - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->cpu_data(), -momentum, - this->update_[param_id]->mutable_cpu_data()); - - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + this->temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // update history + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + this->history_[param_id]->mutable_cpu_data()); + + // compute udpate: step back then over step + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->cpu_data(), -momentum, + this->update_[param_id]->mutable_cpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } break; - } - case Caffe::GPU: { + case Caffe::GPU: #ifndef CPU_ONLY - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // update history - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - this->history_[param_id]->mutable_gpu_data()); - - // compute update: step back then over step - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->gpu_data(), -momentum, - this->update_[param_id]->mutable_gpu_data()); - - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + this->temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // update history + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + this->history_[param_id]->mutable_gpu_data()); + + // compute udpate: step back then over step + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->gpu_data(), -momentum, + this->update_[param_id]->mutable_gpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } #else NO_GPU; #endif break; - } default: LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); } } template -void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { +void AdaGradSolver::ComputeUpdateValue() { const vector > >& net_params = this->net_->params(); const vector& net_params_lr = this->net_->params_lr(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); + // get the learning rate + Dtype rate = this->GetLearningRate(); Dtype delta = this->param_.delta(); - Dtype local_rate = rate * net_params_lr[param_id]; + if (this->param_.display() && this->iter_ % this->param_.display() == 0) { + LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; + } + SGDSolver::ClipGradients(); + Dtype weight_decay = this->param_.weight_decay(); + string regularization_type = this->param_.regularization_type(); switch (Caffe::mode()) { - case Caffe::CPU: { - // compute square of gradient in update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history - caffe_add(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - this->history_[param_id]->cpu_data(), - this->history_[param_id]->mutable_cpu_data()); - - // prepare update - caffe_powx(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_cpu_data()); - - caffe_div(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), - this->update_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // scale and copy - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->cpu_data(), Dtype(0), - net_params[param_id]->mutable_cpu_diff()); + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + this->temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_add(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + this->history_[param_id]->cpu_data(), + this->history_[param_id]->mutable_cpu_data()); + + // prepare update + caffe_powx(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_cpu_data()); + + caffe_div(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), + this->update_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // scale and copy + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->cpu_data(), Dtype(0), + net_params[param_id]->mutable_cpu_diff()); + } break; - } - case Caffe::GPU: { + case Caffe::GPU: #ifndef CPU_ONLY - // compute square of gradient in update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history - caffe_gpu_add(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - this->history_[param_id]->gpu_data(), - this->history_[param_id]->mutable_gpu_data()); - - // prepare update - caffe_gpu_powx(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_div(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), - this->update_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // scale and copy - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->gpu_data(), Dtype(0), - net_params[param_id]->mutable_gpu_diff()); + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + this->temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_add(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + this->history_[param_id]->gpu_data(), + this->history_[param_id]->mutable_gpu_data()); + + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // scale and copy + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), + net_params[param_id]->mutable_gpu_diff()); + } #else NO_GPU; #endif break; - } default: LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); } diff --git a/src/caffe/test/test_accuracy_layer.cpp b/src/caffe/test/test_accuracy_layer.cpp index c14b67cc0e9..6cbf51df45e 100644 --- a/src/caffe/test/test_accuracy_layer.cpp +++ b/src/caffe/test/test_accuracy_layer.cpp @@ -16,7 +16,7 @@ namespace caffe { template -class AccuracyLayerTest : public CPUDeviceTest { +class AccuracyLayerTest : public ::testing::Test { protected: AccuracyLayerTest() : blob_bottom_data_(new Blob()), @@ -92,6 +92,7 @@ TYPED_TEST(AccuracyLayerTest, TestSetupTopK) { TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); AccuracyLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); @@ -117,6 +118,7 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { } TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { + Caffe::set_mode(Caffe::CPU); this->blob_bottom_data_->Reshape(2, 10, 4, 5); vector label_shape(3); label_shape[0] = 2; label_shape[1] = 4; label_shape[2] = 5; @@ -160,6 +162,7 @@ TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { } TYPED_TEST(AccuracyLayerTest, TestForwardIgnoreLabel) { + Caffe::set_mode(Caffe::CPU); LayerParameter layer_param; const TypeParam kIgnoreLabelValue = -1; layer_param.mutable_accuracy_param()->set_ignore_label(kIgnoreLabelValue); diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp index 895c3d372ff..3487d42f21e 100644 --- a/src/caffe/test/test_argmax_layer.cpp +++ b/src/caffe/test/test_argmax_layer.cpp @@ -13,12 +13,13 @@ namespace caffe { template -class ArgMaxLayerTest : public CPUDeviceTest { +class ArgMaxLayerTest : public ::testing::Test { protected: ArgMaxLayerTest() : blob_bottom_(new Blob(10, 20, 1, 1)), blob_top_(new Blob()), top_k_(5) { + Caffe::set_mode(Caffe::CPU); Caffe::set_random_seed(1701); // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_contrastive_loss_layer.cpp b/src/caffe/test/test_contrastive_loss_layer.cpp index 1e9447cbc51..d269fbc26f2 100644 --- a/src/caffe/test/test_contrastive_loss_layer.cpp +++ b/src/caffe/test/test_contrastive_loss_layer.cpp @@ -22,15 +22,15 @@ class ContrastiveLossLayerTest : public MultiDeviceTest { protected: ContrastiveLossLayerTest() - : blob_bottom_data_i_(new Blob(512, 2, 1, 1)), - blob_bottom_data_j_(new Blob(512, 2, 1, 1)), - blob_bottom_y_(new Blob(512, 1, 1, 1)), + : blob_bottom_data_i_(new Blob(128, 10, 1, 1)), + blob_bottom_data_j_(new Blob(128, 10, 1, 1)), + blob_bottom_y_(new Blob(128, 1, 1, 1)), blob_top_loss_(new Blob()) { // fill the values FillerParameter filler_param; - filler_param.set_min(-1.0); - filler_param.set_max(1.0); // distances~=1.0 to test both sides of margin - UniformFiller filler(filler_param); + filler_param.set_mean(0.0); + filler_param.set_std(0.3); // distances~=1.0 to test both sides of margin + GaussianFiller filler(filler_param); filler.Fill(this->blob_bottom_data_i_); blob_bottom_vec_.push_back(blob_bottom_data_i_); filler.Fill(this->blob_bottom_data_j_); @@ -79,8 +79,7 @@ TYPED_TEST(ContrastiveLossLayerTest, TestForward) { if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs loss += dist_sq; } else { - Dtype dist = std::max(margin - sqrt(dist_sq), 0.0); - loss += dist*dist; + loss += std::max(margin-dist_sq, Dtype(0)); } } loss /= static_cast(num) * Dtype(2); @@ -100,47 +99,4 @@ TYPED_TEST(ContrastiveLossLayerTest, TestGradient) { this->blob_top_vec_, 1); } -TYPED_TEST(ContrastiveLossLayerTest, TestForwardLegacy) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_contrastive_loss_param()->set_legacy_version(true); - ContrastiveLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // manually compute to compare - const Dtype margin = layer_param.contrastive_loss_param().margin(); - const int num = this->blob_bottom_data_i_->num(); - const int channels = this->blob_bottom_data_i_->channels(); - Dtype loss(0); - for (int i = 0; i < num; ++i) { - Dtype dist_sq(0); - for (int j = 0; j < channels; ++j) { - Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; - dist_sq += diff*diff; - } - if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs - loss += dist_sq; - } else { - loss += std::max(margin - dist_sq, Dtype(0.0)); - } - } - loss /= static_cast(num) * Dtype(2); - EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); -} - -TYPED_TEST(ContrastiveLossLayerTest, TestGradientLegacy) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_contrastive_loss_param()->set_legacy_version(true); - ContrastiveLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - GradientChecker checker(1e-2, 1e-2, 1701); - // check the gradient for the first two bottom layers - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 0); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 1); -} - } // namespace caffe diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index 67d41fff844..c1fe3b58c58 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -424,7 +424,7 @@ TYPED_TEST(ConvolutionLayerTest, TestGradientGroup) { #ifdef USE_CUDNN template -class CuDNNConvolutionLayerTest : public GPUDeviceTest { +class CuDNNConvolutionLayerTest : public ::testing::Test { protected: CuDNNConvolutionLayerTest() : blob_bottom_(new Blob(2, 3, 6, 4)), @@ -467,6 +467,7 @@ class CuDNNConvolutionLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNConvolutionLayerTest, TestDtypes); TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { + Caffe::set_mode(Caffe::GPU); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); LayerParameter layer_param; @@ -504,6 +505,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { + Caffe::set_mode(Caffe::GPU); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); LayerParameter layer_param; @@ -539,6 +541,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionGroupCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); @@ -569,7 +572,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { // Test separable convolution by computing the Sobel operator // as a single filter then comparing the result // as the convolution of two rectangular filters. - + Caffe::set_mode(Caffe::GPU); // Fill bottoms with identical Gaussian noise. shared_ptr > filler; FillerParameter filler_param; @@ -662,6 +665,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); @@ -679,6 +683,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientGroupCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); diff --git a/src/caffe/test/test_data/generate_sample_data.py b/src/caffe/test/test_data/generate_sample_data.py index ab5572685cb..e5dbc3406d8 100644 --- a/src/caffe/test/test_data/generate_sample_data.py +++ b/src/caffe/test/test_data/generate_sample_data.py @@ -5,8 +5,6 @@ import numpy as np import h5py -script_dir = os.path.dirname(os.path.abspath(__file__)) - num_cols = 8 num_rows = 10 height = 6 @@ -29,12 +27,12 @@ print data print label -with h5py.File(script_dir + '/sample_data.h5', 'w') as f: +with h5py.File(os.path.dirname(__file__) + '/sample_data.h5', 'w') as f: f['data'] = data f['label'] = label f['label2'] = label2 -with h5py.File(script_dir + '/sample_data_2_gzip.h5', 'w') as f: +with h5py.File(os.path.dirname(__file__) + '/sample_data_2_gzip.h5', 'w') as f: f.create_dataset( 'data', data=data + total_size, compression='gzip', compression_opts=1 @@ -48,6 +46,6 @@ compression='gzip', compression_opts=1 ) -with open(script_dir + '/sample_data_list.txt', 'w') as f: - f.write(script_dir + '/sample_data.h5\n') - f.write(script_dir + '/sample_data_2_gzip.h5\n') +with open(os.path.dirname(__file__) + '/sample_data_list.txt', 'w') as f: + f.write(os.path.dirname(__file__) + '/sample_data.h5\n') + f.write(os.path.dirname(__file__) + '/sample_data_2_gzip.h5\n') diff --git a/src/caffe/test/test_dummy_data_layer.cpp b/src/caffe/test/test_dummy_data_layer.cpp index c9ed38db3a5..99548352746 100644 --- a/src/caffe/test/test_dummy_data_layer.cpp +++ b/src/caffe/test/test_dummy_data_layer.cpp @@ -13,7 +13,7 @@ namespace caffe { template -class DummyDataLayerTest : public CPUDeviceTest { +class DummyDataLayerTest : public ::testing::Test { protected: DummyDataLayerTest() : blob_top_a_(new Blob()), @@ -44,6 +44,7 @@ class DummyDataLayerTest : public CPUDeviceTest { TYPED_TEST_CASE(DummyDataLayerTest, TestDtypes); TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) { + Caffe::set_mode(Caffe::CPU); LayerParameter param; DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); dummy_data_param->add_num(5); @@ -73,6 +74,7 @@ TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) { } TYPED_TEST(DummyDataLayerTest, TestTwoTopConstant) { + Caffe::set_mode(Caffe::CPU); LayerParameter param; DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); dummy_data_param->add_num(5); @@ -111,6 +113,7 @@ TYPED_TEST(DummyDataLayerTest, TestTwoTopConstant) { } TYPED_TEST(DummyDataLayerTest, TestThreeTopConstantGaussianConstant) { + Caffe::set_mode(Caffe::CPU); LayerParameter param; DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); dummy_data_param->add_num(5); diff --git a/src/caffe/test/test_filler.cpp b/src/caffe/test/test_filler.cpp index 728b8dc5f0d..e04b0fd22af 100644 --- a/src/caffe/test/test_filler.cpp +++ b/src/caffe/test/test_filler.cpp @@ -142,102 +142,4 @@ TYPED_TEST(GaussianFillerTest, TestFill) { EXPECT_LE(var, target_var * 5.); } -template -class XavierFillerTest : public ::testing::Test { - protected: - XavierFillerTest() - : blob_(new Blob(1000, 2, 4, 5)), - filler_param_() { - } - virtual void test_params(FillerParameter_VarianceNorm variance_norm, - Dtype n) { - this->filler_param_.set_variance_norm(variance_norm); - this->filler_.reset(new XavierFiller(this->filler_param_)); - this->filler_->Fill(blob_); - EXPECT_TRUE(this->blob_); - const int count = this->blob_->count(); - const Dtype* data = this->blob_->cpu_data(); - Dtype mean = 0.; - Dtype ex2 = 0.; - for (int i = 0; i < count; ++i) { - mean += data[i]; - ex2 += data[i] * data[i]; - } - mean /= count; - ex2 /= count; - Dtype std = sqrt(ex2 - mean*mean); - Dtype target_std = sqrt(2.0 / n); - EXPECT_NEAR(mean, 0.0, 0.1); - EXPECT_NEAR(std, target_std, 0.1); - } - virtual ~XavierFillerTest() { delete blob_; } - Blob* const blob_; - FillerParameter filler_param_; - shared_ptr > filler_; -}; - -TYPED_TEST_CASE(XavierFillerTest, TestDtypes); - -TYPED_TEST(XavierFillerTest, TestFillFanIn) { - TypeParam n = 2*4*5; - this->test_params(FillerParameter_VarianceNorm_FAN_IN, n); -} -TYPED_TEST(XavierFillerTest, TestFillFanOut) { - TypeParam n = 1000*4*5; - this->test_params(FillerParameter_VarianceNorm_FAN_OUT, n); -} -TYPED_TEST(XavierFillerTest, TestFillAverage) { - TypeParam n = (2*4*5 + 1000*4*5) / 2.0; - this->test_params(FillerParameter_VarianceNorm_AVERAGE, n); -} - -template -class MSRAFillerTest : public ::testing::Test { - protected: - MSRAFillerTest() - : blob_(new Blob(1000, 2, 4, 5)), - filler_param_() { - } - virtual void test_params(FillerParameter_VarianceNorm variance_norm, - Dtype n) { - this->filler_param_.set_variance_norm(variance_norm); - this->filler_.reset(new MSRAFiller(this->filler_param_)); - this->filler_->Fill(blob_); - EXPECT_TRUE(this->blob_); - const int count = this->blob_->count(); - const Dtype* data = this->blob_->cpu_data(); - Dtype mean = 0.; - Dtype ex2 = 0.; - for (int i = 0; i < count; ++i) { - mean += data[i]; - ex2 += data[i] * data[i]; - } - mean /= count; - ex2 /= count; - Dtype std = sqrt(ex2 - mean*mean); - Dtype target_std = sqrt(2.0 / n); - EXPECT_NEAR(mean, 0.0, 0.1); - EXPECT_NEAR(std, target_std, 0.1); - } - virtual ~MSRAFillerTest() { delete blob_; } - Blob* const blob_; - FillerParameter filler_param_; - shared_ptr > filler_; -}; - -TYPED_TEST_CASE(MSRAFillerTest, TestDtypes); - -TYPED_TEST(MSRAFillerTest, TestFillFanIn) { - TypeParam n = 2*4*5; - this->test_params(FillerParameter_VarianceNorm_FAN_IN, n); -} -TYPED_TEST(MSRAFillerTest, TestFillFanOut) { - TypeParam n = 1000*4*5; - this->test_params(FillerParameter_VarianceNorm_FAN_OUT, n); -} -TYPED_TEST(MSRAFillerTest, TestFillAverage) { - TypeParam n = (2*4*5 + 1000*4*5) / 2.0; - this->test_params(FillerParameter_VarianceNorm_AVERAGE, n); -} - } // namespace caffe diff --git a/src/caffe/test/test_flatten_layer.cpp b/src/caffe/test/test_flatten_layer.cpp index 7b6757cba32..3042d293cf7 100644 --- a/src/caffe/test/test_flatten_layer.cpp +++ b/src/caffe/test/test_flatten_layer.cpp @@ -42,48 +42,13 @@ TYPED_TEST(FlattenLayerTest, TestSetup) { LayerParameter layer_param; FlattenLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - ASSERT_EQ(this->blob_top_->num_axes(), 2); - EXPECT_EQ(this->blob_top_->shape(0), 2); - EXPECT_EQ(this->blob_top_->shape(1), 3 * 6 * 5); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3 * 6 * 5); + EXPECT_EQ(this->blob_top_->height(), 1); + EXPECT_EQ(this->blob_top_->width(), 1); } -TYPED_TEST(FlattenLayerTest, TestSetupWithAxis) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_flatten_param()->set_axis(2); - FlattenLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - ASSERT_EQ(this->blob_top_->num_axes(), 3); - EXPECT_EQ(this->blob_top_->shape(0), 2); - EXPECT_EQ(this->blob_top_->shape(1), 3); - EXPECT_EQ(this->blob_top_->shape(2), 6 * 5); -} - -TYPED_TEST(FlattenLayerTest, TestSetupWithEndAxis) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_flatten_param()->set_end_axis(-2); - FlattenLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - ASSERT_EQ(this->blob_top_->num_axes(), 3); - EXPECT_EQ(this->blob_top_->shape(0), 2); - EXPECT_EQ(this->blob_top_->shape(1), 3 * 6); - EXPECT_EQ(this->blob_top_->shape(2), 5); -} - -TYPED_TEST(FlattenLayerTest, TestSetupWithStartAndEndAxis) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_flatten_param()->set_axis(0); - layer_param.mutable_flatten_param()->set_end_axis(-2); - FlattenLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - ASSERT_EQ(this->blob_top_->num_axes(), 2); - EXPECT_EQ(this->blob_top_->shape(0), 2 * 3 * 6); - EXPECT_EQ(this->blob_top_->shape(1), 5); -} - -TYPED_TEST(FlattenLayerTest, TestForward) { +TYPED_TEST(FlattenLayerTest, Test) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; FlattenLayer layer(layer_param); @@ -106,4 +71,5 @@ TYPED_TEST(FlattenLayerTest, TestGradient) { this->blob_top_vec_); } + } // namespace caffe diff --git a/src/caffe/test/test_gradient_based_solver.cpp b/src/caffe/test/test_gradient_based_solver.cpp index c9135d64e70..eb2569c04f2 100644 --- a/src/caffe/test/test_gradient_based_solver.cpp +++ b/src/caffe/test/test_gradient_based_solver.cpp @@ -23,7 +23,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { protected: GradientBasedSolverTest() : - seed_(1701), num_(4), channels_(3), height_(10), width_(10) {} + seed_(1701), num_(5), channels_(3), height_(10), width_(10) {} shared_ptr > solver_; int seed_; @@ -56,21 +56,19 @@ class GradientBasedSolverTest : public MultiDeviceTest { } void RunLeastSquaresSolver(const Dtype learning_rate, - const Dtype weight_decay, const Dtype momentum, const int num_iters, - const int iter_size = 1) { + const Dtype weight_decay, const Dtype momentum, const int num_iters) { ostringstream proto; proto << "max_iter: " << num_iters << " " "base_lr: " << learning_rate << " " "lr_policy: 'fixed' " - "iter_size: " << iter_size << " " "net_param { " " name: 'TestNetwork' " " layer { " " name: 'data' " " type: 'DummyData' " " dummy_data_param { " - " num: " << num_ / iter_size << " " + " num: " << num_ << " " " channels: " << channels_ << " " " height: " << height_ << " " " width: " << width_ << " " @@ -78,10 +76,6 @@ class GradientBasedSolverTest : public MultiDeviceTest { " height: 1 " " width: 1 " " data_filler { " - " type: 'constant' " - " value: 1.0 " - " } " - " data_filler { " " type: 'gaussian' " " std: 1.0 " " } " @@ -276,45 +270,6 @@ class GradientBasedSolverTest : public MultiDeviceTest { } } - void CheckAccumulation(const Dtype kLearningRate, const Dtype kWeightDecay, - const Dtype kMomentum, const int kNumIters, const int kIterSize) { - const double kPrecision = 1e-2; - const double kMinPrecision = 1e-7; - // Solve without accumulation and save parameters. - this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, - kNumIters); - // Save parameters for comparison. - Net& net = *this->solver_->net(); - const vector > >& param_blobs = - net.layer_by_name("innerprod")->blobs(); - vector > > noaccum_params(param_blobs.size()); - for (int i = 0; i < param_blobs.size(); ++i) { - noaccum_params[i].reset(new Blob()); - noaccum_params[i]->CopyFrom(*param_blobs[i], false, true); - } - // Solve by equivalent accumulation of gradients over divided batches. - this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, - kNumIters, kIterSize); - Net& net_accum = *this->solver_->net(); - const vector > >& accum_params = - net_accum.layer_by_name("innerprod")->blobs(); - // Compare accumulated parameters against no accumulation standard. - const int D = this->channels_ * this->height_ * this->width_; - for (int i = 0; i < D; ++i) { - const Dtype expected_param = noaccum_params[0]->cpu_data()[i]; - const Dtype accum_param = accum_params[0]->cpu_data()[i]; - const Dtype error_margin = std::max(kMinPrecision, kPrecision * - std::min(fabs(expected_param), fabs(accum_param))); - EXPECT_NEAR(expected_param, accum_param, error_margin); - } - ASSERT_EQ(1, accum_params[1]->count()); - const Dtype expected_bias = noaccum_params[1]->cpu_data()[0]; - const Dtype accum_bias = accum_params[1]->cpu_data()[0]; - const Dtype error_margin = std::max(kMinPrecision, kPrecision * - std::min(fabs(expected_bias), fabs(accum_bias))); - EXPECT_NEAR(expected_bias, accum_bias, error_margin); - } - // Test that the correct update is computed for a regularized least squares // problem: // @@ -417,16 +372,6 @@ TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverything) { } } -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} template class AdaGradSolverTest : public GradientBasedSolverTest { @@ -471,16 +416,6 @@ TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithEverything) { } } -TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} template class NesterovSolverTest : public GradientBasedSolverTest { @@ -547,15 +482,4 @@ TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithEverything) { } } -TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - } // namespace caffe diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index 0017ac23e69..ee684c00255 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -25,7 +25,7 @@ __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; template -class Im2colKernelTest : public GPUDeviceTest { +class Im2colKernelTest : public ::testing::Test { protected: Im2colKernelTest() // big so launches > 1024 threads @@ -68,6 +68,8 @@ class Im2colKernelTest : public GPUDeviceTest { TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); TYPED_TEST(Im2colKernelTest, TestGPU) { + Caffe::set_mode(Caffe::GPU); + // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, diff --git a/src/caffe/test/test_math_functions.cpp b/src/caffe/test/test_math_functions.cpp index a095b544e17..667f744bdd7 100644 --- a/src/caffe/test/test_math_functions.cpp +++ b/src/caffe/test/test_math_functions.cpp @@ -15,10 +15,8 @@ namespace caffe { -template -class MathFunctionsTest : public MultiDeviceTest { - typedef typename TypeParam::Dtype Dtype; - +template +class MathFunctionsTest : public ::testing::Test { protected: MathFunctionsTest() : blob_bottom_(new Blob()), @@ -66,19 +64,14 @@ class MathFunctionsTest : public MultiDeviceTest { Blob* const blob_top_; }; -template -class CPUMathFunctionsTest - : public MathFunctionsTest > { -}; - -TYPED_TEST_CASE(CPUMathFunctionsTest, TestDtypes); +TYPED_TEST_CASE(MathFunctionsTest, TestDtypes); -TYPED_TEST(CPUMathFunctionsTest, TestNothing) { +TYPED_TEST(MathFunctionsTest, TestNothing) { // The first test case of a test suite takes the longest time // due to the set up overhead. } -TYPED_TEST(CPUMathFunctionsTest, TestHammingDistance) { +TYPED_TEST(MathFunctionsTest, TestHammingDistanceCPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); const TypeParam* y = this->blob_top_->cpu_data(); @@ -86,7 +79,7 @@ TYPED_TEST(CPUMathFunctionsTest, TestHammingDistance) { caffe_cpu_hamming_distance(n, x, y)); } -TYPED_TEST(CPUMathFunctionsTest, TestAsum) { +TYPED_TEST(MathFunctionsTest, TestAsumCPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); TypeParam std_asum = 0; @@ -97,7 +90,7 @@ TYPED_TEST(CPUMathFunctionsTest, TestAsum) { EXPECT_LT((cpu_asum - std_asum) / std_asum, 1e-2); } -TYPED_TEST(CPUMathFunctionsTest, TestSign) { +TYPED_TEST(MathFunctionsTest, TestSignCPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_cpu_sign(n, x, this->blob_bottom_->mutable_cpu_diff()); @@ -107,7 +100,7 @@ TYPED_TEST(CPUMathFunctionsTest, TestSign) { } } -TYPED_TEST(CPUMathFunctionsTest, TestSgnbit) { +TYPED_TEST(MathFunctionsTest, TestSgnbitCPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_cpu_sgnbit(n, x, this->blob_bottom_->mutable_cpu_diff()); @@ -117,7 +110,7 @@ TYPED_TEST(CPUMathFunctionsTest, TestSgnbit) { } } -TYPED_TEST(CPUMathFunctionsTest, TestFabs) { +TYPED_TEST(MathFunctionsTest, TestFabsCPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_abs(n, x, this->blob_bottom_->mutable_cpu_diff()); @@ -127,7 +120,7 @@ TYPED_TEST(CPUMathFunctionsTest, TestFabs) { } } -TYPED_TEST(CPUMathFunctionsTest, TestScale) { +TYPED_TEST(MathFunctionsTest, TestScaleCPU) { int n = this->blob_bottom_->count(); TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % this->blob_bottom_->count()]; @@ -140,10 +133,11 @@ TYPED_TEST(CPUMathFunctionsTest, TestScale) { } } -TYPED_TEST(CPUMathFunctionsTest, TestCopy) { +TYPED_TEST(MathFunctionsTest, TestCopyCPU) { const int n = this->blob_bottom_->count(); const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); TypeParam* top_data = this->blob_top_->mutable_cpu_data(); + Caffe::set_mode(Caffe::CPU); caffe_copy(n, bottom_data, top_data); for (int i = 0; i < n; ++i) { EXPECT_EQ(bottom_data[i], top_data[i]); @@ -152,14 +146,8 @@ TYPED_TEST(CPUMathFunctionsTest, TestCopy) { #ifndef CPU_ONLY -template -class GPUMathFunctionsTest : public MathFunctionsTest > { -}; - -TYPED_TEST_CASE(GPUMathFunctionsTest, TestDtypes); - // TODO: Fix caffe_gpu_hamming_distance and re-enable this test. -TYPED_TEST(GPUMathFunctionsTest, DISABLED_TestHammingDistance) { +TYPED_TEST(MathFunctionsTest, DISABLED_TestHammingDistanceGPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); const TypeParam* y = this->blob_top_->cpu_data(); @@ -170,7 +158,7 @@ TYPED_TEST(GPUMathFunctionsTest, DISABLED_TestHammingDistance) { EXPECT_EQ(reference_distance, computed_distance); } -TYPED_TEST(GPUMathFunctionsTest, TestAsum) { +TYPED_TEST(MathFunctionsTest, TestAsumGPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); TypeParam std_asum = 0; @@ -182,7 +170,7 @@ TYPED_TEST(GPUMathFunctionsTest, TestAsum) { EXPECT_LT((gpu_asum - std_asum) / std_asum, 1e-2); } -TYPED_TEST(GPUMathFunctionsTest, TestSign) { +TYPED_TEST(MathFunctionsTest, TestSignGPU) { int n = this->blob_bottom_->count(); caffe_gpu_sign(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); @@ -193,7 +181,7 @@ TYPED_TEST(GPUMathFunctionsTest, TestSign) { } } -TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) { +TYPED_TEST(MathFunctionsTest, TestSgnbitGPU) { int n = this->blob_bottom_->count(); caffe_gpu_sgnbit(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); @@ -204,7 +192,7 @@ TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) { } } -TYPED_TEST(GPUMathFunctionsTest, TestFabs) { +TYPED_TEST(MathFunctionsTest, TestFabsGPU) { int n = this->blob_bottom_->count(); caffe_gpu_abs(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); @@ -215,7 +203,7 @@ TYPED_TEST(GPUMathFunctionsTest, TestFabs) { } } -TYPED_TEST(GPUMathFunctionsTest, TestScale) { +TYPED_TEST(MathFunctionsTest, TestScaleGPU) { int n = this->blob_bottom_->count(); TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % this->blob_bottom_->count()]; @@ -228,10 +216,11 @@ TYPED_TEST(GPUMathFunctionsTest, TestScale) { } } -TYPED_TEST(GPUMathFunctionsTest, TestCopy) { +TYPED_TEST(MathFunctionsTest, TestCopyGPU) { const int n = this->blob_bottom_->count(); const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); TypeParam* top_data = this->blob_top_->mutable_gpu_data(); + Caffe::set_mode(Caffe::GPU); caffe_copy(n, bottom_data, top_data); bottom_data = this->blob_bottom_->cpu_data(); top_data = this->blob_top_->mutable_cpu_data(); diff --git a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp index b2db984feb1..9038017e3e2 100644 --- a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp +++ b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp @@ -16,7 +16,7 @@ namespace caffe { template -class MultinomialLogisticLossLayerTest : public CPUDeviceTest { +class MultinomialLogisticLossLayerTest : public ::testing::Test { protected: MultinomialLogisticLossLayerTest() : blob_bottom_data_(new Blob(10, 5, 1, 1)), @@ -51,6 +51,7 @@ TYPED_TEST_CASE(MultinomialLogisticLossLayerTest, TestDtypes); TYPED_TEST(MultinomialLogisticLossLayerTest, TestGradientCPU) { LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); MultinomialLogisticLossLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); GradientChecker checker(1e-2, 2*1e-2, 1701, 0, 0.05); diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index 782a96bc9b6..08106e79274 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -613,103 +613,6 @@ class NetTest : public MultiDeviceTest { InitNetFromProtoString(proto); } - virtual void InitSkipPropNet(bool test_skip_true) { - string proto = - "name: 'SkipPropTestNetwork' " - "layer { " - " name: 'data' " - " type: 'DummyData' " - " dummy_data_param { " - " shape { " - " dim: 5 " - " dim: 2 " - " dim: 3 " - " dim: 4 " - " } " - " data_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " shape { " - " dim: 5 " - " } " - " data_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'silence' " - " bottom: 'label' " - " type: 'Silence' " - "} " - "layer { " - " name: 'innerproduct' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 1 " - " weight_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " bias_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " param { " - " lr_mult: 1 " - " decay_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " decay_mult: 0 " - " } " - " bottom: 'data' " - " top: 'innerproduct' " - "} " - "layer { " - " name: 'ip_fake_labels' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 1 " - " weight_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " bias_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " bottom: 'data' " - " top: 'fake_labels' " - "} " - "layer { " - " name: 'argmax' " - " bottom: 'fake_labels' " - " top: 'label_argmax' " - " type: 'ArgMax' " - "} " - "layer { " - " name: 'loss' " - " bottom: 'innerproduct' " - " bottom: 'label_argmax' "; - if (test_skip_true) - proto += " propagate_down: [true, false] "; - else - proto += " propagate_down: [true, true] "; - proto += - " top: 'cross_entropy_loss' " - " type: 'SigmoidCrossEntropyLoss' " - " loss_weight: 0.1 " - "} "; - InitNetFromProtoString(proto); - } - int seed_; shared_ptr > net_; }; @@ -2321,52 +2224,4 @@ TYPED_TEST(NetTest, TestReshape) { } } -TYPED_TEST(NetTest, TestSkipPropagateDown) { - // check bottom_need_backward if propagate_down is true - this->InitSkipPropNet(false); - vector vec_layer_need_backward = this->net_->layer_need_backward(); - for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { - string layer_name = this->net_->layer_names()[layer_id]; - if (layer_name == "loss") { - // access to bottom_need_backward coresponding to label's blob - bool need_back = this->net_->bottom_need_backward()[layer_id][1]; - // if propagate_down is true, the loss layer will try to - // backpropagate on labels - EXPECT_TRUE(need_back) << "bottom_need_backward should be True"; - } - // layer_need_backward should be True except for data and silence layers - if (layer_name.find("data") != std::string::npos || - layer_name == "silence") { - EXPECT_FALSE(vec_layer_need_backward[layer_id]) - << "layer_need_backward for " << layer_name << " should be False"; - } else { - EXPECT_TRUE(vec_layer_need_backward[layer_id]) - << "layer_need_backward for " << layer_name << " should be True"; - } - } - // check bottom_need_backward if propagat_down is false - this->InitSkipPropNet(true); - vec_layer_need_backward.clear(); - vec_layer_need_backward = this->net_->layer_need_backward(); - for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { - string layer_name = this->net_->layer_names()[layer_id]; - if (layer_name == "loss") { - // access to bottom_need_backward coresponding to label's blob - bool need_back = this->net_->bottom_need_backward()[layer_id][1]; - // if propagate_down is false, the loss layer will not try to - // backpropagate on labels - EXPECT_FALSE(need_back) << "bottom_need_backward should be False"; - } - // layer_need_backward should be False except for innerproduct and - // loss layers - if (layer_name == "innerproduct" || layer_name == "loss") { - EXPECT_TRUE(vec_layer_need_backward[layer_id]) - << "layer_need_backward for " << layer_name << " should be True"; - } else { - EXPECT_FALSE(vec_layer_need_backward[layer_id]) - << "layer_need_backward for " << layer_name << " should be False"; - } - } -} - } // namespace caffe diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index c6e4d27b903..c9d52f247a6 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -117,49 +117,6 @@ class NeuronLayerTest : public MultiDeviceTest { + slope_data[c] * std::min(bottom_data[i], (Dtype)(0))); } } - - void LogBottomInit() { - FillerParameter filler_param; - GaussianFiller filler(filler_param); - filler.Fill(this->blob_bottom_); - Dtype* bottom_data = this->blob_bottom_->mutable_cpu_data(); - caffe_exp(this->blob_bottom_->count(), bottom_data, bottom_data); - } - - void TestLogForward(const float base, const float scale, const float shift) { - LogBottomInit(); - LayerParameter layer_param; - layer_param.mutable_log_param()->set_base(base); - layer_param.mutable_log_param()->set_scale(scale); - layer_param.mutable_log_param()->set_shift(shift); - LogLayer layer(layer_param); - layer.SetUp(blob_bottom_vec_, blob_top_vec_); - layer.Forward(blob_bottom_vec_, blob_top_vec_); - const Dtype kDelta = 2e-4; - const Dtype* bottom_data = blob_bottom_->cpu_data(); - const Dtype* top_data = blob_top_->cpu_data(); - for (int i = 0; i < blob_bottom_->count(); ++i) { - const Dtype bottom_val = bottom_data[i]; - const Dtype top_val = top_data[i]; - if (base == -1) { - EXPECT_NEAR(top_val, log(shift + scale * bottom_val), kDelta); - } else { - EXPECT_NEAR(top_val, log(shift + scale * bottom_val) / log(base), - kDelta); - } - } - } - - void TestLogGradient(const float base, const float scale, const float shift) { - LogBottomInit(); - LayerParameter layer_param; - layer_param.mutable_log_param()->set_base(base); - layer_param.mutable_log_param()->set_scale(scale); - layer_param.mutable_log_param()->set_shift(shift); - LogLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-2); - checker.CheckGradientEltwise(&layer, blob_bottom_vec_, blob_top_vec_); - } }; TYPED_TEST_CASE(NeuronLayerTest, TestDtypesAndDevices); @@ -382,88 +339,6 @@ TYPED_TEST(NeuronLayerTest, TestExpGradientBase2Shift1Scale3) { this->TestExpGradient(kBase, kScale, kShift); } -TYPED_TEST(NeuronLayerTest, TestLogLayer) { - typedef typename TypeParam::Dtype Dtype; - // Test default base of "-1" -- should actually set base := e. - const Dtype kBase = -1; - const Dtype kScale = 1; - const Dtype kShift = 0; - this->TestLogForward(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogGradient) { - typedef typename TypeParam::Dtype Dtype; - // Test default base of "-1" -- should actually set base := e. - const Dtype kBase = -1; - const Dtype kScale = 1; - const Dtype kShift = 0; - this->TestLogGradient(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogLayerBase2) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 1; - const Dtype kShift = 0; - this->TestLogForward(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogGradientBase2) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 1; - const Dtype kShift = 0; - this->TestLogGradient(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Shift1) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 1; - const Dtype kShift = 1; - this->TestLogForward(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Shift1) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 1; - const Dtype kShift = 1; - this->TestLogGradient(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Scale3) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 3; - const Dtype kShift = 0; - this->TestLogForward(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Scale3) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 3; - const Dtype kShift = 0; - this->TestLogGradient(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Shift1Scale3) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 3; - const Dtype kShift = 1; - this->TestLogForward(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Shift1Scale3) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 3; - const Dtype kShift = 1; - this->TestLogGradient(kBase, kScale, kShift); -} - TYPED_TEST(NeuronLayerTest, TestDropoutHalf) { const float kDropoutRatio = 0.5; this->TestDropoutForward(kDropoutRatio); @@ -666,10 +541,14 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { caffe_copy(ip2.blobs()[0]->count(), ip.blobs()[0]->cpu_data(), ip2.blobs()[0]->mutable_cpu_data()); // Forward in-place + ip.Reshape(this->blob_bottom_vec_, this->blob_top_vec_); ip.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + prelu.Reshape(this->blob_top_vec_, this->blob_top_vec_); prelu.Forward(this->blob_top_vec_, this->blob_top_vec_); // Forward non-in-place + ip2.Reshape(blob_bottom_vec_2, blob_middle_vec_2); ip2.Forward(blob_bottom_vec_2, blob_middle_vec_2); + prelu2.Reshape(blob_middle_vec_2, blob_top_vec_2); prelu2.Forward(blob_middle_vec_2, blob_top_vec_2); // Check numbers for (int s = 0; s < blob_top_2->count(); ++s) { @@ -711,7 +590,7 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { #ifdef USE_CUDNN template -class CuDNNNeuronLayerTest : public GPUDeviceTest { +class CuDNNNeuronLayerTest : public ::testing::Test { protected: CuDNNNeuronLayerTest() : blob_bottom_(new Blob(2, 3, 4, 5)), @@ -734,6 +613,7 @@ class CuDNNNeuronLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNNeuronLayerTest, TestDtypes); TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNReLULayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -748,6 +628,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNReLULayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); @@ -756,6 +637,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CHECK(google::protobuf::TextFormat::ParseFromString( "relu_param { negative_slope: 0.01 }", &layer_param)); @@ -775,6 +657,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CHECK(google::protobuf::TextFormat::ParseFromString( "relu_param { negative_slope: 0.01 }", &layer_param)); @@ -785,6 +668,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSigmoidLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -801,6 +685,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSigmoidLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); @@ -809,6 +694,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNTanHLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -831,6 +717,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestTanHGradientCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNTanHLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index 69f2d5c1135..e9964e7f0b7 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -608,7 +608,7 @@ TYPED_TEST(PoolingLayerTest, TestGradientAvePadded) { #ifdef USE_CUDNN template -class CuDNNPoolingLayerTest : public GPUDeviceTest { +class CuDNNPoolingLayerTest : public ::testing::Test { protected: CuDNNPoolingLayerTest() : blob_bottom_(new Blob()), @@ -963,6 +963,7 @@ class CuDNNPoolingLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNPoolingLayerTest, TestDtypes); TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -976,6 +977,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -992,6 +994,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { /* TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; layer_param.set_kernelsize(3); layer_param.set_stride(2); @@ -1017,6 +1020,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxCuDNN) { + Caffe::set_mode(Caffe::GPU); this->TestForwardSquare(); this->TestForwardRectHigh(); this->TestForwardRectWide(); @@ -1026,6 +1030,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxCuDNN) { // the corresponding backward test. /* TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxTopMaskCuDNN) { + Caffe::set_mode(Caffe::GPU); this->blob_top_vec_.push_back(this->blob_top_mask_); this->TestForwardSquare(); this->TestForwardRectHigh(); @@ -1034,6 +1039,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxTopMaskCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { + Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1053,6 +1059,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -1098,6 +1105,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { /* TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { + Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1118,6 +1126,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -1143,6 +1152,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { + Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1160,6 +1170,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAvePaddedCuDNN) { + Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; diff --git a/src/caffe/test/test_softmax_layer.cpp b/src/caffe/test/test_softmax_layer.cpp index 996da4b8f7c..f6674422e56 100644 --- a/src/caffe/test/test_softmax_layer.cpp +++ b/src/caffe/test/test_softmax_layer.cpp @@ -82,7 +82,7 @@ TYPED_TEST(SoftmaxLayerTest, TestGradient) { #ifdef USE_CUDNN template -class CuDNNSoftmaxLayerTest : public GPUDeviceTest { +class CuDNNSoftmaxLayerTest : public ::testing::Test { protected: CuDNNSoftmaxLayerTest() : blob_bottom_(new Blob(2, 10, 2, 3)), @@ -104,6 +104,7 @@ class CuDNNSoftmaxLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNSoftmaxLayerTest, TestDtypes); TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSoftmaxLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -137,6 +138,7 @@ TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { } TYPED_TEST(CuDNNSoftmaxLayerTest, TestGradientCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSoftmaxLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); diff --git a/src/caffe/test/test_stochastic_pooling.cpp b/src/caffe/test/test_stochastic_pooling.cpp index f84464c322c..12962c65d85 100644 --- a/src/caffe/test/test_stochastic_pooling.cpp +++ b/src/caffe/test/test_stochastic_pooling.cpp @@ -16,10 +16,8 @@ using std::min; namespace caffe { -template -class StochasticPoolingLayerTest : public MultiDeviceTest { - typedef typename TypeParam::Dtype Dtype; - +template +class StochasticPoolingLayerTest : public ::testing::Test { protected: StochasticPoolingLayerTest() : blob_bottom_(new Blob()), @@ -47,14 +45,9 @@ class StochasticPoolingLayerTest : public MultiDeviceTest { vector*> blob_top_vec_; }; -template -class CPUStochasticPoolingLayerTest - : public StochasticPoolingLayerTest > { -}; - -TYPED_TEST_CASE(CPUStochasticPoolingLayerTest, TestDtypes); +TYPED_TEST_CASE(StochasticPoolingLayerTest, TestDtypes); -TYPED_TEST(CPUStochasticPoolingLayerTest, TestSetup) { +TYPED_TEST(StochasticPoolingLayerTest, TestSetup) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -67,16 +60,8 @@ TYPED_TEST(CPUStochasticPoolingLayerTest, TestSetup) { EXPECT_EQ(this->blob_top_->width(), 2); } -#ifndef CPU_ONLY - -template -class GPUStochasticPoolingLayerTest - : public StochasticPoolingLayerTest > { -}; - -TYPED_TEST_CASE(GPUStochasticPoolingLayerTest, TestDtypes); - -TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochastic) { +TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPU) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; layer_param.set_phase(TRAIN); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); @@ -119,7 +104,8 @@ TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochastic) { EXPECT_GE(total / this->blob_top_->count(), 0.55); } -TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochasticTestPhase) { +TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPUTestPhase) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; layer_param.set_phase(TEST); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); @@ -156,7 +142,8 @@ TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochasticTestPhase) { } } -TYPED_TEST(GPUStochasticPoolingLayerTest, TestGradient) { +TYPED_TEST(StochasticPoolingLayerTest, TestGradientGPU) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; layer_param.set_phase(TRAIN); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); @@ -171,6 +158,6 @@ TYPED_TEST(GPUStochasticPoolingLayerTest, TestGradient) { this->blob_top_vec_); } -#endif + } // namespace caffe diff --git a/src/caffe/test/test_triplet_loss_layer.cpp b/src/caffe/test/test_triplet_loss_layer.cpp new file mode 100644 index 00000000000..c8d9377fa23 --- /dev/null +++ b/src/caffe/test/test_triplet_loss_layer.cpp @@ -0,0 +1,107 @@ +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class TripletLossLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + TripletLossLayerTest() + : blob_bottom_data_i_(new Blob(128, 10, 1, 1)), + blob_bottom_data_j_(new Blob(128, 10, 1, 1)), + blob_bottom_data_k_(new Blob(128, 10, 1, 1)), + blob_bottom_y_(new Blob(128, 1, 1, 1)), + blob_top_loss_(new Blob()) { + // fill the values + FillerParameter filler_param; + filler_param.set_mean(0.0); + filler_param.set_std(0.3); // distances~=1.0 to test both sides of margin + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_data_i_); + blob_bottom_vec_.push_back(blob_bottom_data_i_); + filler.Fill(this->blob_bottom_data_j_); + blob_bottom_vec_.push_back(blob_bottom_data_j_); + filler.Fill(this->blob_bottom_data_k_); + blob_bottom_vec_.push_back(blob_bottom_data_k_); + for (int i = 0; i < blob_bottom_y_->count(); ++i) { + blob_bottom_y_->mutable_cpu_data()[i] = caffe_rng_rand() % 2; // 0 or 1 + } + blob_bottom_vec_.push_back(blob_bottom_y_); + blob_top_vec_.push_back(blob_top_loss_); + } + virtual ~TripletLossLayerTest() { + delete blob_bottom_data_i_; + delete blob_bottom_data_j_; + delete blob_bottom_data_k_; + delete blob_bottom_y_; + delete blob_top_loss_; + } + + Blob* const blob_bottom_data_i_; + Blob* const blob_bottom_data_j_; + Blob* const blob_bottom_data_k_; + Blob* const blob_bottom_y_; + Blob* const blob_top_loss_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(TripletLossLayerTest, TestDtypesAndDevices); + +TYPED_TEST(TripletLossLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // manually compute to compare + const Dtype margin = layer_param.contrastive_loss_param().margin(); + const int num = this->blob_bottom_data_i_->num(); + const int channels = this->blob_bottom_data_i_->channels(); + Dtype loss(0); + for (int i = 0; i < num; ++i) { + Dtype dist_sq(0); + for (int j = 0; j < channels; ++j) { + Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq += diff*diff; + } + if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs + loss += dist_sq; + } else { + loss += std::max(margin-dist_sq, Dtype(0)); + } + } + loss /= static_cast(num) * Dtype(2); + EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); +} + +TYPED_TEST(TripletLossLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + // check the gradient for the first two bottom layers + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 1); +} + +} // namespace caffe diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index 0aab6b17b85..13e17be582b 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -206,16 +206,6 @@ void caffe_exp(const int n, const double* a, double* y) { vdExp(n, a, y); } -template <> -void caffe_log(const int n, const float* a, float* y) { - vsLn(n, a, y); -} - -template <> -void caffe_log(const int n, const double* a, double* y) { - vdLn(n, a, y); -} - template <> void caffe_abs(const int n, const float* a, float* y) { vsAbs(n, a, y); diff --git a/src/caffe/util/math_functions.cu b/src/caffe/util/math_functions.cu index 2631a0740d6..43e65eb9a69 100644 --- a/src/caffe/util/math_functions.cu +++ b/src/caffe/util/math_functions.cu @@ -324,27 +324,6 @@ void caffe_gpu_exp(const int N, const double* a, double* y) { N, a, y); } -template -__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { - CUDA_KERNEL_LOOP(index, n) { - y[index] = log(a[index]); - } -} - -template <> -void caffe_gpu_log(const int N, const float* a, float* y) { - // NOLINT_NEXT_LINE(whitespace/operators) - log_kernel<<>>( - N, a, y); -} - -template <> -void caffe_gpu_log(const int N, const double* a, double* y) { - // NOLINT_NEXT_LINE(whitespace/operators) - log_kernel<<>>( - N, a, y); -} - template __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { From f23813f3c779e4ec35c0b0b75a4ed574d0c086cd Mon Sep 17 00:00:00 2001 From: Wangyida Date: Thu, 4 Jun 2015 22:31:06 +0800 Subject: [PATCH 03/82] triplet data generation and network update --- .../triplet/convert_mnist_triplet_data.cpp | 127 +++++ examples/triplet/create_mnist_triplet.sh | 21 + examples/triplet/mnist_siamese.ipynb | 154 ++++++ examples/triplet/mnist_triplet.prototxt | 113 ++++ .../triplet/mnist_triplet_solver.prototxt | 25 + .../triplet/mnist_triplet_train_test.prototxt | 498 ++++++++++++++++++ examples/triplet/readme.md | 179 +++++++ examples/triplet/train_mnist_triplet.sh | 5 + 8 files changed, 1122 insertions(+) create mode 100644 examples/triplet/convert_mnist_triplet_data.cpp create mode 100755 examples/triplet/create_mnist_triplet.sh create mode 100644 examples/triplet/mnist_siamese.ipynb create mode 100644 examples/triplet/mnist_triplet.prototxt create mode 100644 examples/triplet/mnist_triplet_solver.prototxt create mode 100644 examples/triplet/mnist_triplet_train_test.prototxt create mode 100644 examples/triplet/readme.md create mode 100755 examples/triplet/train_mnist_triplet.sh diff --git a/examples/triplet/convert_mnist_triplet_data.cpp b/examples/triplet/convert_mnist_triplet_data.cpp new file mode 100644 index 00000000000..d1eed30cba6 --- /dev/null +++ b/examples/triplet/convert_mnist_triplet_data.cpp @@ -0,0 +1,127 @@ +// This script converts the MNIST dataset to the leveldb format used +// by caffe to train siamese network. +// Usage: +// convert_mnist_data input_image_file input_label_file output_db_file +// The MNIST dataset could be downloaded at +// http://yann.lecun.com/exdb/mnist/ +#include // NOLINT(readability/streams) +#include + +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "stdint.h" + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" + +uint32_t swap_endian(uint32_t val) { + val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); + return (val << 16) | (val >> 16); +} + +void read_image(std::ifstream* image_file, std::ifstream* label_file, + uint32_t index, uint32_t rows, uint32_t cols, + char* pixels, char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index + 8); + label_file->read(label, 1); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename) { + // Open files + std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); + std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); + CHECK(image_file) << "Unable to open file " << image_filename; + CHECK(label_file) << "Unable to open file " << label_filename; + // Read the magic and the meta data + uint32_t magic; + uint32_t num_items; + uint32_t num_labels; + uint32_t rows; + uint32_t cols; + + image_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + CHECK_EQ(magic, 2051) << "Incorrect image file magic."; + label_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + CHECK_EQ(magic, 2049) << "Incorrect label file magic."; + image_file.read(reinterpret_cast(&num_items), 4); + num_items = swap_endian(num_items); + label_file.read(reinterpret_cast(&num_labels), 4); + num_labels = swap_endian(num_labels); + CHECK_EQ(num_items, num_labels); + image_file.read(reinterpret_cast(&rows), 4); + rows = swap_endian(rows); + image_file.read(reinterpret_cast(&cols), 4); + cols = swap_endian(cols); + + // Open leveldb + + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = true; + options.error_if_exists = true; + leveldb::Status status = leveldb::DB::Open( + options, db_filename, &db); + CHECK(status.ok()) << "Failed to open leveldb " << db_filename + << ". Is it already existing?"; + + char label_i; + char label_j; + char label_k; + char* pixels = new char[3 * rows * cols]; + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + + caffe::Datum datum; + datum.set_channels(3); // one channel for each image in the pair + datum.set_height(rows); + datum.set_width(cols); + LOG(INFO) << "A total of " << num_items << " items."; + LOG(INFO) << "Rows: " << rows << " Cols: " << cols; + for (int itemid = 0; itemid < num_items; ++itemid) { + int i = caffe::caffe_rng_rand() % num_items; // pick a random pair + int j = caffe::caffe_rng_rand() % num_items; + int k = caffe::caffe_rng_rand() % num_items; + read_image(&image_file, &label_file, i, rows, cols, + pixels, &label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels + (rows * cols), &label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels + (2 * rows * cols), &label_k); + datum.set_data(pixels, 3*rows*cols); + if (label_i == label_j && label_i == label_k) { + datum.set_label(1); + } else { + datum.set_label(0); + } + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); + } + + delete db; + delete pixels; +} + +int main(int argc, char** argv) { + if (argc != 4) { + printf("This script converts the MNIST dataset to the leveldb format used\n" + "by caffe to train a triplet network.\n" + "Usage:\n" + " convert_mnist_data input_image_file input_label_file " + "output_db_file\n" + "The MNIST dataset could be downloaded at\n" + " http://yann.lecun.com/exdb/mnist/\n" + "You should gunzip them after downloading.\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3]); + } + return 0; +} diff --git a/examples/triplet/create_mnist_triplet.sh b/examples/triplet/create_mnist_triplet.sh new file mode 100755 index 00000000000..f404f2aa255 --- /dev/null +++ b/examples/triplet/create_mnist_triplet.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env sh +# This script converts the mnist data into leveldb format. + +EXAMPLES=./build/examples/triplet +DATA=./data/mnist + +echo "Creating leveldb..." + +rm -rf ./examples/triplet/mnist_siamese_train_leveldb +rm -rf ./examples/triplet/mnist_siamese_test_leveldb + +$EXAMPLES/convert_mnist_triplet_data.bin \ + $DATA/train-images-idx3-ubyte \ + $DATA/train-labels-idx1-ubyte \ + ./examples/triplet/mnist_triplet_train_leveldb +$EXAMPLES/convert_mnist_triplet_data.bin \ + $DATA/t10k-images-idx3-ubyte \ + $DATA/t10k-labels-idx1-ubyte \ + ./examples/triplet/mnist_triplet_test_leveldb + +echo "Done." diff --git a/examples/triplet/mnist_siamese.ipynb b/examples/triplet/mnist_siamese.ipynb new file mode 100644 index 00000000000..8e076663ca6 --- /dev/null +++ b/examples/triplet/mnist_siamese.ipynb @@ -0,0 +1,154 @@ +{ + "metadata": { + "description": "Extracting features and plotting the Siamese network embedding.", + "example_name": "Siamese network embedding", + "include_in_docs": true, + "priority": 6, + "signature": "sha256:845bb18929f96543ba2611eb5eca744fd98939cbef876df6bc319c29f616fc64" + }, + "nbformat": 3, + "nbformat_minor": 0, + "worksheets": [ + { + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Setup\n", + "\n", + "Import Caffe and the usual modules." + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "%matplotlib inline\n", + "\n", + "# Make sure that caffe is on the python path:\n", + "caffe_root = '../../' # this file is expected to be in {caffe_root}/examples/siamese\n", + "import sys\n", + "sys.path.insert(0, caffe_root + 'python')\n", + "\n", + "import caffe" + ], + "language": "python", + "metadata": {}, + "outputs": [], + "prompt_number": 1 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load the trained net\n", + "\n", + "Load the model definition and weights and set to CPU mode TEST phase computation with input scaling." + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "MODEL_FILE = 'mnist_siamese.prototxt'\n", + "# decrease if you want to preview during training\n", + "PRETRAINED_FILE = 'mnist_siamese_iter_50000.caffemodel' \n", + "caffe.set_mode_cpu()\n", + "net = caffe.Net(MODEL_FILE, PRETRAINED_FILE, caffe.TEST)" + ], + "language": "python", + "metadata": {}, + "outputs": [], + "prompt_number": 2 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load some MNIST test data" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "TEST_DATA_FILE = '../../data/mnist/t10k-images-idx3-ubyte'\n", + "TEST_LABEL_FILE = '../../data/mnist/t10k-labels-idx1-ubyte'\n", + "n = 10000\n", + "\n", + "with open(TEST_DATA_FILE, 'rb') as f:\n", + " f.read(16) # skip the header\n", + " raw_data = np.fromstring(f.read(n * 28*28), dtype=np.uint8)\n", + "\n", + "with open(TEST_LABEL_FILE, 'rb') as f:\n", + " f.read(8) # skip the header\n", + " labels = np.fromstring(f.read(n), dtype=np.uint8)" + ], + "language": "python", + "metadata": {}, + "outputs": [], + "prompt_number": 3 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Generate the Siamese features" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "# reshape and preprocess\n", + "caffe_in = raw_data.reshape(n, 1, 28, 28) * 0.00390625 # manually scale data instead of using `caffe.io.Transformer`\n", + "out = net.forward_all(data=caffe_in)" + ], + "language": "python", + "metadata": {}, + "outputs": [], + "prompt_number": 4 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Visualize the learned Siamese embedding" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "feat = out['feat']\n", + "f = plt.figure(figsize=(16,9))\n", + "c = ['#ff0000', '#ffff00', '#00ff00', '#00ffff', '#0000ff', \n", + " '#ff00ff', '#990000', '#999900', '#009900', '#009999']\n", + "for i in range(10):\n", + " plt.plot(feat[labels==i,0].flatten(), feat[labels==i,1].flatten(), '.', c=c[i])\n", + "plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])\n", + "plt.grid()\n", + "plt.show()" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "metadata": {}, + "output_type": "display_data", + "png": "iVBORw0KGgoAAAANSUhEUgAAA54AAAIXCAYAAAD0R4FDAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXtwXOWZr/usvurWUktqGdmxaawEHEMuthGXITiIyMaJ\nwbEMFmCTDMkkoyqSyTnZMwdqpmYyzEyS2ruKue2ZqSTHO/vYGQbhCxdjwI637ViWMEEEMJhgB4MB\ngSRLsizJkiypuyX1+WP1Wlp971YvSd3y+1S5rF69Lt/6+lOrf/2+v/dVgsEggiAIgiAIgiAIgjBT\nWOZ6AIIgCIIgCIIgCML8RoSnIAiCIAiCIAiCMKOI8BQEQRAEQRAEQRBmFBGegiAIgiAIgiAIwowi\nwlMQBEEQBEEQBEGYUUR4CoIgCIIgCIIgCDNKRsJTUZQ8RVFaFUV5U1GUU4qi/HezBiYIgiAIgiAI\ngiDMD5RM+3gqilIQDAZHFEWxAS8B/08wGHzJlNEJgiAIgiAIgiAIOU/GqbbBYHAk9KMDsAJ9mZ5T\nEARBEARBEARBmD9kLDwVRbEoivIm0A0cDQaDpzIfliAIgiAIgiAIgjBfMCPiORkMBlcAi4EvK4pS\nk/GoBEEQBEEQBEEQhHmDzawTBYPBi4qivAhUA03adkVRMjORCoIgCIIgCIIgCFlNMBhUEj2fkfBU\nFMUDjAeDwQFFUfKBtcDfxxhEJpcRhDC+9a1vsWPHjrkehjCPkDUlmImsJ8FsZE0JZiNrSjAbRUmo\nOYHMI54LgV8pimJBTdt9PBgMHsnwnIIgCIIgCIIgCMI8IiPhGQwG3wZWmTQWQUiJq666aq6HIMwz\nZE0JZiLrSTAbWVOC2ciaEuaCjIsLCcJsU1NTM9dDEOYZsqYEM5H1JJiNrCnBbGRNCXOBCE9BEARB\nEARBEARhRjGtqq0gCIIgCIIgCIIQTSrFd3KF6RaOVWa64qyiKEGpaisIgiAIgiAIwuWKoijzotNH\nvPsIbU+oriXVVhAEQRAEQRAEQZhRRHgKOUdTU9NcD0GYZ8iaEsxE1pNgNrKmBLORNSXMBSI8BUEQ\nBEEQBEEQhBlFPJ6CIAiCIAiCIAgziHg8JeIpCIIgCIIgCIJwWdPX18emTZsoKiriqquu4sknnzT9\nGiI8hZxDfAmC2ciaEsxE1pNgNrKmBLORNSVE8v3vf5+8vDx6enp44okneOihhzh16pSp1xDhKQiC\nIAiCIAiCcJly6dIlnnnmGX784x9TUFDAl770JTZu3Mjjjz9u6nXE4ykIgiAIgiAIgjCDJPV4NjTA\nmTNQUACNjeB2p3eBDI4/ceIEt956K5cuXdK3/fM//zNNTU3s27cvpfsQj6cgCIIgCIIgCEK2c+YM\nHDsGBw6oInIWjx8eHqa4uDhsm8vlYmhoKP1xJECEp5BziC9BMBtZU4KZyHoSzEbWlGA2sqaykIIC\n9f/qati2bVaPLyoqYnBwMGzbxYsXcblc6Y8jASI8BUEQBEEQBEEQ5pLGRqivh0OH0k+zzfD4a665\nhvHxcd5//31921tvvcXnPve59MeRAPF4CoIgCIIgCIIgzCDZ3sdzy5YtKIrCL3/5S9544w3uuusu\nfvvb37J8+fKw/cTjKQiCIAiCIAiCIEyLn/3sZ4yOjrJgwQK+8Y1v8Itf/CJKdGaKCE8h5xBfgmA2\nsqYEM5H1JJiNrCnBbGRNCZGUlpby7LPPMjw8zEcffcT9999v+jVEeAqCIAiCIAiCIAgzing8BUEQ\nBEEQBEEQZpBs93iming8BUEQBEEQBEEQhKxFhKeQc4gvQTAbWVOCmch6EsxG1pRgNrKmhLlAhKcg\nCIIgCIIgCIIwo4jHUxAEQRAEQRAEYQYRj6dEPAVBEARBEARBEIQZRoSnkHOIL0EwG1lTgpnIehLM\nRtaUYDaypoS5QISnIAiCIAiCIAiCMKOIx1MQBEEQBEEQBGEGyWaP53/8x3+wY8cOfv/737Nlyxa2\nb98ed99MPJ62zIcqCIIgCIIgCIIg5CKf+tSn+NGPfsTBgwcZHR2dsetIqq2Qc4gvQTAbWVOCmch6\nEsxG1pRgNrKmBCObNm1i48aNlJeXz+h1RHgKgiAIgiAIgiDMKQ1ADbAeGJiD45nxVGDxeAqCIAiC\nIAiCIMwgyT2eNcCx0M/1wO40r5Dp8fCjH/2I9vZ28XgKgiAIgiBoNDc3MDBwBputgNraRpxO91wP\nSRAEIQMKQv9XA9vm4PiZj3hKqq2Qc4gvQTAbWVOCmch6mh0GBs7Q1XWM9vYDtLQ0zPVwZhRZU4LZ\nyJrKRhpRI5WHgOl8kZbp8WrUciaRiKcgCIIgCDmHzaZ+u+/xVLN69fS+3RcEQcge3EwnPdaM4ycm\nJggEAoyPjzMxMYHP58Nms2G1WjMYTzTi8RQEQRAEIefw+QZoaWlg9eptkmYrCELWk819PP/u7/6O\nf/iHf4ja9rd/+7dR+2bi8RThKQiCIAiCIAiCMINks/BMh0yEp3g8hZxDfAmC2ciaEswkcj01Nzew\nb18N+/evx+ebXon7TMmGMQjTR96jBLORNSXMBSI8BUEQBGEGyYYiONkwBkEQBOHyRlJtBUEQBGEG\n2b9/Pe3tB/B4qrnzzkNz4kfMhjEIgiBczkiqrQhPQRAEQZhRsqEITjaMQRAE4XJGhKek2go5iPgS\nBLORNSWYSeR6cjrdrFmze04FnxljEJ/o3CHvUYLZyJoS5gLp4ykIgiAIOUZzcwMDA2ew2QqorW2c\nFVGr+UQBWloaWLMmk35zU8zFvQiCIAizj6TaCoIgCEKOsW9fjS4Cq6rqTROBiUjHJ5qOmJyLexEE\nQZhtJNVWUm0FQRAEIeew2QoA8HiqWb16W9hzM5USW1vbSFVVfUrFidKpopvoXgRBEIT5gwhPIecQ\nX4JgNrKmBDOZjfWUSATOVOuUdHyi6YjJdATt5Yq8RwlmI2tKmAtEeAqCIAhCDmCMZAJxRWA2RBDz\n8ytwOj0pCclsKL4kCIJwOeP3+/nOd77DVVddRXFxMStXruTXv/616dcRj6cgCIJw2ZMLBW5S9UJm\nQ+sU8W0KgiCEk80ez5GRER577DG+/e1vc+WVV/Liiy+yZcsW3n77bbxeb9i+mXg8paqtIAiCcNkz\nUxVbzSTVSKYWQYyFWQI72XmyIeqqkQtfKgiCIMwlBQUFPProo/rjO++8k6VLl/LGG29ECc9MkFRb\nIecQX4JgNrKmBDOF0kytJzO8kGb5P5OdJ5t8mzPleZ1N5D1KMBtZU9lHAw3UUMN61jNA+oXhMj3e\nSHd3N2fOnOG6667L6DyRSMRTEARBuOyprW2c8/TUZLS2PsLISA9HjmxNO3KnRf36+98BUhPYiSKF\nyYR6oqhrJuza9VlGRrqwWOzcffdruFzJv4nPpuirIAhCPM5whmOomTcNNLCb9N5DMz1eIxAI8MAD\nD/Ctb32La665ZlrniId4PAVBEAQhy4gl+tLxTUYef/BgnX5sYeFiNm9+O6lwTXS9ufKRbt/uJhC4\nCKj38cADnyQ9Jhs8r4IgCMk8nutZzwEOUE01hziEm/TerzI9HmBycpKtW7cyPDzMc889h9VqTfk+\nxOMpCIIgCFlMvKhiLM9prMhdqscbj001/TVRpHCmIprJsFjsAFitBXz96y/F3U+bl6GhsxQWenE4\nimdriIIgCNOikUYaaGAb26YlGjM9PhgM8p3vfIfz58+zf//+mKIzU8TjKeQc4ksQzEbWlADh7Up8\nvun7Y9JZT/H8h4ODZ0M/WRkd7cHnG4jpm4x3fKRonI7nMpt8mhp33/0ahYWLuffeUwnTbLV5uXSp\nnZ6e4znt7wR5jxLMR9ZU9uHGzW52T0s0mnH8Qw89xB/+8Af27duH0+mc1jmSIcJTEARBuGxIJC7n\nogiNJjDt9mJuuukxffvkpD/00wTnzh2jpaUhZr/LeFHJSNE4nV6Z2dhf0+Xy8sADnyT1dmrzYreX\nAOLvFARBSERbWxvbtm3jrbfeorKyEpfLhcvl4sknnzT1OuLxFARBEC4bEvkW9+9fT3v7gbTSUdNF\nSwEdHDxLMBhgdLQXmIgaz44dZfj9/YAqnrZu/SjmeDL1L2baaiRbW5Vo83LTTY/R2vqw+DsFQZhz\nsrmPZzpk4vEU4SkIgiBcNiQSl7NRhMYofI1YLA48nhtwOIqprW3k0KF6OjsPY7eXsGTJ1xgZOYfN\nVsDg4HuMjp5PqaprKqIwlhBvbm6gre15JiZ8eDzXs3btnrjzkU7BI0EQhMsZEZ6SaivkIOJLEMxG\n1tTlQyLfolmppYnW01QK6FSxG4fDTVnZyjAv4tq1e6iqqmfr1o84d65JTwEeHPyIQOAiPl8vu3Zd\nE9eP2tzcwNmzu5OmDsdK1R0YOMPoaBd+fz+dnYdpaWngf/0vB9u2KWzbZuHcuZcSHp8uZnlr5zPy\nHiWYjawpYS4Q4SkIgiBcNqQjLmdCEGnCd/Pmk3i9G/F669iy5UPy8soAVcBZrfns3r2c9vbDHDpU\nz8TEmOEM4/pPk5P+uKJyYOCM3nYELHz88UF+9asKhobawu7twoW3cDrLcTiifaMAZWUrWL16G8Fg\nILQlyPPP36afIxAYJD+/krVrn5q2YJ8Lb60gCIIw+0iqrSAIgpDVzJWPcDbTSI8efZCPP96Px7OC\nCxdOMjbWoz+nKHaD8FOxWBxMTvqx20vYvPktPeVWm6uenleYnPShKDYsljwmJoaBqd6XsVJ+tXv0\n+QZoavo2EKSmZgdOp5tt2yyA+rd8w4YWFi681bT5mQ1vrSAIwlwjqbbSx1MQBEHIcmL1tJwNUk0j\nzUQYa8f29Z3E7++no+MwimIP20cVnQqa8AO1yTdAIHCRZ56ppqLiBmprG8PmSj12XK+Qa7UWYLE4\n2L7dzfj4SNg1HI5SrNZ89u2riXkfGzY08/zzt7FhwzEWLrw1rflJRm1t44x7awVBEIS5RyKeQs7R\n1NRETU3NXA9DmEfImspuzI6IpSoUUy02ZIz8uVxL+eCDYlauXER+vprammpxH1BFnN1eyLlz0QWI\nNCyWPK644ibOnTuGzVbE+LgazayqqsfvH6a9/YC+r93uprj40/T3/55Nm15l374vG1JwVRTFxt13\nv87x4/+XPpaioqUUFV2ZcOyx5idbq9zmOvIeJZiNrKnZRyKe4vEUBEEQspxEBYESEc+jmaqnMFU/\nqDHyV1CwiL6+t2hvP8DHHx9IubhPWdkKvN46Skuvpb//dNR+ZWVf4FOfWktBwSKuuqqOyclx8vMr\nqai4PnSeIsbG+lm9+udhxwUCA1y48DqTkz5eeGGNIdJpwWp1AWpU9MUX12G1qpFWRbExPNyWdOxO\npxuHw83Bg3X6HItfUxAEQYiHRDwFQRCEeYkxmpifX8m9957G6XSbHkE1Rv6OHNmqn9vhcNPZeRib\nrYgrrriZNWvUtiS7dn2WkZEuLBY7d911hBMnfqJHDR9/fCGjo11h53c4SrHZ8pmcHGdyMkAgMGzw\nfNqwWCx6Oq3XW0db296oMRojo/HwejfS1fUyPt/5sGtv2fJBVERzaOgsExMBxsbC+5BqEdd05lai\npIIgXA5IxFOEpyAIgjBP0QSmhrF4jlmeQqNoys+v4OLFs1y48DplZV/Ebndx4cIJfL4LAOTlVeJw\nuBgcfE8/vrBwMUuWfE0/R0/Pb/H7p19B1+vdSFvb88Ckvq2gYBFu97V0dh4O29fpLMfvv0gwOE5Z\n2Qo2bDiqC2dQ27zcc8+bnDjxU318vb2vhxU+0tAEKpD23EovUEEQLgdEeIrwFHIQ8SUIZiNran7i\n8w2we/dyRke7oiJwZkXZjKLJ6fTg8/Xy7ruwbJn6fH5+ZVgEU1FsBINTLVEWLryNiYlxenqOT/Mu\nw1Er4E6iRSGt1gIqK2/hy1/+Jbt3f5aJiTHsdheVlas5f/41XUTabIWhCrg2PJ5V2GyF9Pf/ntHR\n84yPXzKMObzIkXbNpUvvYWTk3LTmczaq2uZ6VFXeowSzkTU1+2S78PzGN77BkSNHuHTpEh6Ph+98\n5zv89V//ddR+4vEUBEEQhAicTjf33ns6pj/ULC+i0d/p8awIe87hKGXhwhq9Sm1Z2QocjpKwfc6d\nO0ZPz8sJrpDen2k1BVcVnYpiZ2JihI6Ow7S2Poz2OSEQGAJgcnKqRcv4+BiBwEV8vgt0dBzmllv+\nldHR8wQCF8OEcqTotNuLuf/+9xgZOTft+dQ8vKWl14b5Rc1EvKeCIAiJ+au/+is+/PBDBgcHOXDg\nAP/+7//Or3/9a1OvIcJTyDnkGzrBbGRNzV/iFQjKtBWIVrgoGAxQVOTFanUCasXZZcvUyOaGDU2M\njJzT/ZiXLn1CWdnnKChYRH5+ZehMViLF3BQKxpRZu70ktC05ijLVLc1ud3HTTY8xOTmmb+vtfQOP\nRy1MVFa2ImJ+guzbdysWiyqYrdYCnE4PAOXlK1myZD1LlqynqMhLWdnnaWl5SN83cj7jFXgyor1G\nQ0PJCxpNF7Nav8wV8h4lmI2sKSGS6667jry8PP2xzWZjwYIFpl5DhKcgCIJw2THdSrkaWgSto+Mw\ngcAluruP09FxGLu9EFArxf6f/1NHX99J/Rif7wLnzh2jouIGios/Hdo6keAqU4JUUewsWfI1Uv2z\nHQyO64I3EBjihRduD3ve41nF2rV7qKqqZ8OGo9x99+toolZRrIyP+5icDKAoDurqfsv9979HUdFS\nrNYC+vpO4vNdxO8fpLv7OO3tB7DbizKOLM+kOMz09RYEQZhpGoAaYD0wnZyPTI8H+N73vkdhYSHX\nXXcdf/M3f8OqVaumeabYiMdTyDnElyCYjawpIV2MvkSn001Hx2G9ku3Ro4f50peqsVqddHdHezeX\nLFnPhQsnGRlpT+laDoebxYu/yocfPm2oZps6dnsJZWWf08eiKHYqKm7k/PlXCAaDKIqVu+9+DYej\nhH37bqWg4FOcP9+qH+90VlBRUY3fPxjTi5rIm5mOfzPdok/NzQ20tT3PxIQPj+d61q7dM29FpbxH\nCWYja2r2SebxrAG0Ds71QLpl1jI9XiMYDHLs2DE2b97M/v37ufHGG8Oez8TjaUv0pCAIgiBc7kRW\nrh0aasNqteP1bqSmZgcwVckV4N1367jzzr0cObI15vk6OtQWK6mgKDbKy79IZ+dvpiU6QSEYnKSn\n51VA9Z0WF18TJiCDwUmefnolDz54gQce+IT9+9eHHe/znae9/QB5eZVEUlCwKGHRptraRp55ZhVW\nq5MjR7YmLOyjpdymysDAGb1wU2fnYVpaGnA43DldREgQhMuXgtD/1cB0cj4yPV5DURRqamqor6/n\nySefjBKemSART0EQBEEwoImnwcGzuFxe+vtP4ff3A1rVWFUAxmr9YRReq1f/nGefvZmxsa6oa4Rj\nBSawWBx6P04zyMtbwPj4GOPjg/p1Fiy4md7e15ic9EWPwppPefkqbLZ8rFYH3d2t+P1qKxiHo5R7\n7jnBM8/coPf5dDjcbNnyYZi4i9UaxbitqGgpRUVXRgnDVKrORu5jbP2itYM5eLBOWrMIgpCVJIt4\nDqCmy24DpvOVWabHR/Ld736XyspKfvKTn4Rtl6q2giAIgmASmi9xZKSd7u7juugEdNEZz4do9DS2\ntj7Mffed1gvzxGPDhiZcrqVYrXkJ90uXsbEeg+i0UF6+gp6e4zFFJ8DExCg9Pcfp7DxMd/crKIr6\n+cFicVBScg0tLQ9RXv5FQBOib6ZUtMm4raBgUUzPZype0Mh9amsb8Xrr8Ho3smHDUZxOd84XERLm\nK2a474T5jhs1PXa6ojGT48+fP8/OnTu5dOkSExMTHDx4kD179rBx48ZpjiY2IjyFnKOpqWmuhyDM\nM2RNXd5EVl7VxIvdXhy1b1nZCrzeurh+RZutgHffnRI+TqebioobEl7/nXf+jcLCKwkEBhPulxmT\n9PefSnlvv78Pn68Xi8VJeflKzp9v1YsIuVxLKS29lpaWh6Iq1cYq4mPc5nCocxopDCMFo/aaPPHE\nEvbuvZX9+9dHVc51Ot2sW/cs69btjXmt+ZRmK+9Ruc4ZVPfdAVQROvfImhKMKIrCL37xCxYvXkx5\neTk/+tGPePzxx7nhhsR/v9JFPJ6CIAhCzpFKamaqaJE0QI+ktbQ0cNNNj7F793ImJkax211UVq7m\nK195Iupakem1p08/SGmpl4MH6/Rte/fezOhoFzZbEePjw/qximJncPBjhobOTnv8qRLej1NDTfMF\nsNkKGR+/hN3uMvT69DE01AaA0+lhdPQ8Pt8AQ0MfAvDMM6soLAxPnXU43Pq9a55YTVhqcxtZQChy\nu/E1uXRJLcLk9dZRVLQUiyW+XzRdn6ggzA5mue8EYWbweDyz8mVERh5PRVGWAP8JLECt+74tGAz+\nW8Q+4vEUBEEQTCWWl3C6JKq8unfvrXohHqfTQ0XFDdTWNtLa+oguNgOBQb1irOZh7Os7qafoVlXV\nY7Xm8/HHBwgEBuOmuloszrjPzSRWax6LF3+VW275V1pbH+ammx4LE8oVFdczOPghPt+AIXUXbLYi\nJiZ8evqx11vHunXPhr02TmeF7gnNy6vkvvtOh81dvC8NtNfEbi8hELiovzbi4RRyE7Pdd0Iukszj\nmSvMZVXbAPDfgsHgm4qiFAGvK4pyKBgMns7wvIIgCIIQFzO9fPn5FTidnpgCSEsNtdmK8Pl6dX/h\nyEiPLoDy8yv1sVgsTn07qGJ1eLiTgYFTYV7RWMy86JyKbhqpqLiJmprtYdHCe+89zeOPVzI+Psy5\nc8dwOsvDRCcQFrkF9MJIxtfG4XDT2XkYgLGxrqi5i1eJ1hh1bm19GKs1n927lzM21guoKc/i4RRy\nB819l000AM8DPuB6YA8iioWZJiOPZzAY7AoGg2+Gfh4GTgOLzBiYIMRDfAmC2ciayj3M9PINDbXh\n8/XS0XE4qrCNdp0rrrgZmBK6mrjS2qIoioOLF9/j4sU/8O67YLUWkpe3AL9/iJ6e44aquOr3vQ6H\nO2nRIQ2bzWV4pKAKyPQxFt8x0tV1TL9vzVt55MhWrNb8qasqqV/T+NqsXbtHb8MSOXfaY2PRoJ07\nr9HbuaxZsxuXy8uaNbsZGmpjdLRLTxd2ua6aVx7OZMh7lGA2TU2vAl1AP3CYbPGeCvMb0zyeiqJc\nBawEWhPvKQiCIAiZYaaXL1H0VLuOzzcQ5kHMz69AUWyMjw/rkb9AQI34KYqViYlLTExcirpWMDiO\nzVaA232d7pNMRFnZ9fT1nTCegVhRy1Tw+S7E3K4oNnp732T7dneowJGaQuVweEJjWMG6dXtpbX2Y\njo7f4PNdwGrNZ9Gi22lvP6Sn2p4//zt8voGo1+a++06HzV2kn9Mo4rWeoTt3XkNFRbUeATV6YMvK\nvkBNzfZpzYEgCBpOw88rEe+pMBuY0sczlGbbBPwkGAzujXgu+OCDD3LVVVcB4Ha7WbFiBTU1NcDU\nt3jyWB7LY3ksj+XxXDz+oz9aQUtLA5OTf4zDURT1vMXSyMDAGVpb3yE/v5JVq5YQCAzS3Kz6Opct\nA4B331X//+IXFzA21kNX1zWMjHRSVTWsP2+x5HH11T4gqO8febz2+IMPigkEBuM+b9bjlSsXMTLS\nzbvvToQ9399/EzZbHn/2Z2rV2KamJkZGuujvf5ivf/0lXn/9Q158cS1XXz2un2/Rotv4i79omtb8\n/+53Z+ntfYNlyyzAZNj59u69lZYWdb7vuGMj69btzZr1I4/ny+O7gHZqahYBjTQ1vZll44v1+B+p\nqRkGCmhq+h4Q/f4V//ELwP+gpqYC2J4j95vbj2+//fZ54/E8evQob775JgMDamXzjz76iF/96ldJ\nPZ4ZC09FUezAC8CBYDD4rzGel+JCgiAIQk7S3NzA2bO7CQQuhm3Pz69kdLQLh6OU0tJr9eJCpaWf\nZ3z8UiiaacHjqaav74Tuf0wVq1Ut6mP0i84csb2fDkcpixevY2TkXNxCQGqU9GJofzdbtnw47RRY\nn2+AnTuv0YsRORylbNnyAU6nO2EBqHQwsxqyMF9oQG13chI17RSgnuzzZMaiBrVNC+TOmC9fpLhQ\nhh5PRe0u/b+BU7FEpyDMBNq3SIJgFrKmhHgMDJzRhZXmz/R4qqmre4Wqqnq2bPmAr371BbzeOgoL\nr2R0tIfXXvsALSW2t7cVu10VN1N9QRP+XQZgYmKYrq7mNEdr0ceYHrFTd/3+fj74YLfuv2xq+nbU\nPgsWqD3eHA4399zzZlwhF9krNRZq2q2W/qdQXFzFkSNb8fkGTPP0Gv2kkX7ebEbeo2YSrcemJjpz\nqeXJ9Nu0TG9NNaCK3fWolXoFIT0yEp7Al4BvALcrinIi9O+rJoxLEARBEOYEo0gaHHwPUEXnXXcd\n1cWPVvTG6XTjdLpZt+5ZJif9jI11R51vwYJqqqrq2bz5JFVV9TgcpWHPxxeL6XwzbmHLlg/Iz1+Q\nxjGxiBTFxsfh42lubiAQGAWsTE5O8Mwz1XrPz0hSFXyFhV79Wr29r+v7a77RTCOUZlZDFuYLmnhb\nAdQBh8id6q6NqJHO2RqzJtIPIMWIhOlgiscz4QUk1VYQBEGYYcxMoQzvQ1luKMqjsHDhl7njjr0x\nz//LXzqjUmq1VNzh4TYKC704HMV0d78clbprJP1+nqrodLm8YX1H06WsbAWjo12MjnZFPWe3u9i8\n+W1OnPgpbW3P4/P1MTk5QWS0tLBwMQ888EnU8ammyk7171T9rZmm1kYSWSRKEOa+x6aW6luAKiTN\nHoOZ51+PKjqryS2Bnh1Iqm3mEU9BEARBmBOMkcm+vlOmpVAao2Iez0rDM0HOnTsW9/zGViWKYiMv\nbwElJdfQ3X2cS5fa6ek5Tnv7AaxWZ8zjVSw4naWk2jJFUWxcdVUdR48+yP7967HZ8pMfFPGn32Yr\nZMmS9ZSXf0Hvk2l8DiAQGKK19WEGBs4wOtoVEtjhotNqLeDrX38p5hXVXqkVOByJP6hqKbVadDhS\ndKaSspsIsyKnwnxC67E5V2vCGEW8mvTTWJOlv5oZpZztCKswF7z33nvk5eXxzW9+0/Rzi/AUcg7x\nughmI2u1e9m8AAAgAElEQVQqNzGmbw4Oqu02pptCaRQ0q1f/XBc9a9bsQVEc+n6lpZ9n9eptMQVQ\nRUU1AO+/n0cwOM7YWA+9vW8AasQQ1JYhbvdyLJZ44nMyFHFM1jJFFbb33/8+Y2MX9Hm4cOFE2Hhj\noSjhf/rHxy/R0XGE999v1Ptkqvs59HtSW530Y7Xaw46120vYsKGFwsLF3HvvKVwuL7FQe6Wep7Mz\nvFdq5DxqwtCYymwkVz2amZJb71HiA0wP7QurIqCX9AWiUViuInruY/tAp7em5lqkC7PB97//fW68\n8UbUUj7mYlofT0EQBEGYTYyRybVrn6K19eGUUygjU3M1QQPQ2vpwWB/KpUvv5sMPn8HhKOarX30e\np9Mdtn9LSwMOh5tAYJS8vEqCwT79WK3HpcXixOFw4PerIlEtCK+hkJ6fE0AVti+//H9H9MGM3avT\niCoup65ptRYwMTESYz8/Docbp9ODz9dLR8dhFMXOpz61FovFjsVip6ZmB06nmwce+CRhunM8b2Xk\nPCbrzTpdj6ZUs51NNCEEqoCajUqrM52uaiaRY20MbesHDqMKxHxUAXkW8ALFxL8vTVh6gAuA1h94\nOXDacP65SiUWcomdO3dSWlrKtddey/vvv2/6+SXiKeQcWk8kQTCLy3VNZZq2ONcYK52eOPFTRkZ6\n9Cqoye4tMnKWSNCMjJwjGPTj8/XS2vowEC2ABgbO0NNznLGxLq65ZjLqej5fLxbLlNjUBGnoERZL\n4ihlOMY/3Qr5+RVYLE7Gx0dTOtpmK+aee97EYsnH4SiLm57rcJRSU7ODioobwsbd3/8OX/vai+Tn\nL+DgwTp9jo1zunPnNWFzH68qbbpCcrrVbXM9Uppb71HTr7Q6fXKp6M3zTI3128AjQE/oOa24UVto\nn3bgOInvS0t/tQCDhu1doWNiRymj15REqueahgaoqYH162FgGi9BpscPDg7y6KOP8i//8i8z5kUV\n4SkIwmVBQ3MzNfv2sX7/fgZ86RRumb9k84fxVNtvaOmYkfeiPf7v7eXcuPP/jXrdjYLHas3H7x/E\nas1DUay6eI21ryaOIgWQcR+HowQARbGiJRaVl69k06ZXyM+vDJ3VmMJkxWo1ir/o9Can04PDUUpe\n3gKuuOKPQtvK6e5+mfffbwwVI0qWnhu6mtVJUdGVLFhwI35/X8woqcNRyj33nMDpdFNb2xj2XHn5\nCiC+eFcjr+fD1lWkt1J7fScnA3i9dSkLyel6NKWa7WwyFz7AuRC706EBVRBq+JkSzYcBO+qcafej\nfVlVAjwWca7PAg6gAlW49kc8n+5c5JJ4n5+cOQPHjsGBA6qInO3jf/SjH/Hd736XRYsWzUiaLYjw\nFHKQ3PK6CNnCmYEBjnV1caC9nYaWlrDnLtc1lc0fxtMVxUbRMzY25UXss32ak75S/XXXBE8wGMDr\n3ciddx5iaKiNnp7jTEyMcf58a9Q1a2sbcbmWYrU6dVEaKYCMQrSi4j8oLFxMeXk1oHomh4ba2Lfv\n1lC7kMjU2gm9yq0qQKMLC/l8vfj9/YyN9dDd/dvQtgHGxnrCfJnxcDrLDec6T1PTt8KKIRlRFDsV\nFdfrAtrpdLNw4W2A6nHNy/Owb18N/f3vAFPrR5uDBQtu1rdbrfkxv0DQXt/OzsNYrfYZT301qw/o\nXJFb71Fz4QPMlaI3ZyIe24ktmrX7WRV6fBG4nfCIZBcQQH2POUb4e8oiEs9FA01NK0jFCyrMHgWh\nl6C6GrZN4yXI5Pg333yTI0eO8MMf/hBAIp6CIAiZUGBTI0/VHg/bVq+e49FkB9n8YTxSFCeLgKq+\nvQrGx4fp7DyMzVZIVVU9i69QP7hpr7smeDo6DmO1OsKilXZ7cdg1NZxON+Pjo3R3q1Vpn3zy01Hj\nMArRgoJKHnjgE/LyykL3UoTf38elS+309rYS6ee0Wl36ddUiRPGFpM1WBGipvKlFODdsaGFiIhCx\nVdFf/8g+osFggI6O8CJAd9yxl6qqejyelXz00XN0dR3D5+ulsHCxvn60OVi7do++roaG2vQvEHbv\nXq7P2Wx/6SHVbLMZM1I8s7nojfH+jN7uzwE7mBKZF4Ey1C+mqlAjnGWhfatRxaQxImk81xeAL4V+\nXgGsQU3bjTWnDaHrvhU617LQPrki3ucvjY1QXw+HDoF7Gi9BJscfO3aMjz76iCuvvJKFCxfyT//0\nTzz99NNUV1enP5AESB9PQRBynobmZs4MDFBgs9FYW4vbGV0xdMDno6GlhW2rV8d8XsguIvstGntr\nVlXVxyxCE6tXZOTrHmsf7Vo33fRYVIEirShNd/fxqMhiVVU9Doc7btEa7bw+X79emEf1dlpRRaON\ngoIrmJjw4/cPUFl5C11dL0f4P9V9S0quxe2+hkBAFdbxsNmKGB8fDtvmci1laOjDsG2LFq1h7do9\nUXOrEa9/ZuS+DkcZ99zzBi6XV5+roaGzFBZ6GR5uY3x8GL9/6oOv9tql0k9TCgLlItMp8lPDVDGi\nemanGNFsUsPU/S1AFZG/B5agFg2qQPV0vkT4l0mLgHdQo56LgNcAX+iYk6F9bkEVmk+EHmtFhOoM\n16xELTKkvRbG8WjMx3nPPrK5j+fo6ChDQ0OAGu38x3/8Rz766CN+8YtfUF5eHrZvJn08RXgKgpDz\n1Ozbx7Eu1TdTX1XF7jVr5nhEgtnEEoyRpCJmUtnHSKTQsttdBAJD+jgOHqxLKIibmxvo6zvF4OBZ\nCgs/xYULrwNgsThYuPDLnDt3nMnJ5EWBvN6NrFu3F59vgJ07r8bn68VudxMIDGH8sLpkyXo++WR/\nxNFWYkVHi4qupKhoKRaLnXPnjhEMBrBa81m06Ha+8pUnosS3zVbA5GQgSvhaLE6++c2usLmIRaLX\nLhapfNkQOb54AtW4T35+BUNDbSJoZ4Qa0heR61Ejb9XkVrQtmcjWnn8FVTBqeFCLAPlDj50Rz2tY\nUFusjBCdBVEJ3IEqWGNdX5tTDeNrEfncCuBojPELZpPNwjOSv//7v+fs2bP853/+Z9RzmQhPSbUV\nco7c8roIs0GmabSyprKfVNKCW1sfCatsG4t0Uy61lNCyshV4vXVs3vx23KJCWsqocT0Zq90ODJwK\nbbUyOemno+NwSqLTYrHT3/8O27e72bnzaiorvxxKK76EUVDa7S5uvfVnRBcnMorOqeeGhz/WfZYO\nRwlWawF2exHd3b/l8OF6fQ6Nflu7vYiqqvowz+jkpI9du5brVXvt9pLQ/2rqcnn5St1Pm2zejSnV\n2vmSpeOm4gc27vPxxweytqhWPHLnPWo6PsFcTfE0FuOpRE2LXctUaqtWvdYoKq2ovTr9hm2xfz/V\nlPpBYqfedwFPEr8YUGNoTBD9WjQCG2lqugnYiIhOIRaPPvpoTNGZKSI8BUHIeRpra6mvquLQnXdK\nGu08JRXBmG5Boli+0chtmuAtL/8CPl8/LS0PhUVLkwliTZg6nR6Dl3IixjYj4cWFNm16jdHR8wQC\nF/H5emlrexaf73xESi4EAkO0tj4c8oFGoyhqlDUWPl8vExOjjI2dx+/vD/N4GsV1Tc121qzZzd13\nv47FMvW7NjbWpYvSzZvfCv1/kqqqeu666zesW7c3JbEfKXIjizrFIhW/qHGf8vIvJt1fmC7TEZHZ\n7M+MJJZf04YqLrU+nNp7T6woZnSrpaltVuBOwr2bidB+/2NVvHUDrtDYPkT1jxqf2wv8D9TU30Re\n0Jo4zwnC9JBUW0EQBGFekEo6LkylXfb1ncTvV1sQaKmc8dI7jdudzgoqKqqTpmka02xdLi/nz7cC\n4HCoVWLHxnrp7j6u719evpKioisZH79ER8dhLBY7mza9Rnn5F/jVryrw+XrDzq+l/Uam/2qpuEYs\nlnzuu+80DkcJO3deg893Puz5SG+oxeLA47kBh6OY1at/zvPP305BwSIcjmL9vn2+AXbtWs7YWJd+\n7dbWRzLyZUa+hslSmSH9FGsgrXRr4XLls6iRxTHUdNQy4HWmem6WoqbJjjDVP9OKWn12D6oAj+/H\nVlkGdAJDMZ6zMyUujdiIjoIuCe3rA64PXf8qpgTnYuCTGOeqIX5qdKLnhOmQS6m2iRCPpyAIOUMq\nhYCE7CJXiryk6t+M9G0aRdMHHzyF399PeflKyso+r3sBNW+jUaAl8h1GXic/v5LR0S69P6bL5dVF\nliY4a2p2hBU7Mt7H0FAbu3YtY3LSp+9/yy3/kxdeuJ28vAUMDbWxadMruFxehobaeO65W1AUK4HA\nCH5/H1dccQvFxZ9maKiN/v538Pl6sVjs3Hnnb3jnnX9jbKxf927a7SWUlHw2VIFXjcwGgxNRIj3W\nnCfzZSZbS5HnS/XLhFjkyroVspEG4P8jtcrRsTzUTtRIZjD0L955FFRBG91LN1pg5qOmMmuCNNYx\nGvWoKbS9ofFVAx2AF7U4keYJTeSvzVXvbfYiwlNSbYUcJHe8LkIsEvXTnCuycU01NDdTs28f6/fv\nZ8AXK2Vr9kg3hXWuSNW/Genb1CvgDpzRxVVR0ZVhrUD6+k7i9W7Ue1Rq/UJjpX9q68mY3llX9wpV\nVfVs2fIBLpcXmErTjUxFjXUfJ078FIejBEWx43AU43CUcPTog/h8A5w/38rYWBetrQ8D4HJ5+cY3\nOnC5qvD7LwBBuruP09b2ot4GxWJxct9977Fw4a16CxSvdyNebx1bt36kt4IBi95DVLuXyFYzxrEm\nS3uNtZaM6c1A3P6o6QrHXFm3qZCN71HzD2Nq6SkSi04tHd5D7PRZH1M9NhOdJxjneIiOavpRxWYX\niUXnClRP52uokc5qoBVoB46jeULVNZUoNTpXvbdCNiPCUxCEWUX6aaZGNgn02e65ONNoYmbDhqOs\nW/dsTNFUU7NDfwwwNtaD1eoItSCZ6heaSNDU1jbici3l0qVPePrplfh8/WHPp1PoaGDgDGNjPQSD\nAc6dO8b77z9JV9exuIJQTfM9GXYOi2XKOzo56dOFqjaWdev2sm7ds7S2PkIgMIii2NE+FCuKjSVL\n1idNYfb7B8nLq2Tt2qcSel6N400kEDPpvznf1q0w0xiLBZ1Nsm9F6N9FIvvypk9/jG2xgkbJoq/F\nqKL5C6i+zYeAt5nqBVoS+t9YbCiRvzaXvLdCriCptoIgzCrSTzM11u/fz4H2dqo9njkvmpRuC5Jc\nITIVE6a8f62tj9DXd4re3t8xOekPS/VMJ/0zMq03WXpuvPRQ7ZqRlJWtwO/vZ3x8lMnJABUV11NQ\nsIiPPnqOQGCqoEh5+UruuONZdu1azuTkKIpix+NZhdNZFtVeJF5bFK2lS7wxx/LMRhJrLaXrzU01\ndXa+rlshVRK1O9GeO8tU+mkA1ZPpCe0T7pMOJ57/ci7ZiFo0qIZwb+Y21Pt9DHg49Fh+H+YCSbUV\n4SkIgpCViECfeeL5EZubGzh7drcu3AoLF7N589u6eElH0BgFY3n5Su666zcpC9W8vEruu++07vts\navo23d0vMzbWE+YLjRSKTqdHLy5kt5ewaNHt1NRsp7X1Ec6e3UUgMBh2TaezQi825HRWAMGo4kQA\nXm8d69Y9m3DMkH6/zul4c5MJeCPi9cxmkvXCzIQawgWY23CtQdS0UyN1qIKyM8ZzELuoT7bgQo1u\n/hR4CjWKWgTcjFpoSNZ8NiDCU1JthRxEvC5CLDLxRJq5pszyZrqdTnavWSOic4YwpqKWla0IS8Uc\nGDiji06HozRMdELy9E9tPWmpp07nApYsWZ9UdAIR6b1d7Nz5Gd37uG7ds9x337u4XEux2QqYmPBH\nHVNevhKPZ4Vh7G/p6cTqfamiU2vjYrMVhQlRn++87gEFtXKudt6amu0Jx2zs19na+khUq5p4pOvN\nTTd1dj54Pefv3z1jeqtZr43m1Xwn9FhLLTVe69XQcy7DPsWoFWtfi3FOK9kpOrXWK0Oo0cwzTKXu\nDhPe3iWc+bumhGxGhKcgCPOCbPFEZss4hMQYCwmNjHSGPacJHK0C7XQjZAMDZ+jpOY7P14PdXpjS\neWprG0PeShWf7wLt7QfYufMaXYAWFl5Jd/dxfXswGGDJkvV4vXXcdddvWLNmj17IaP/+dWzf7uZX\nv6pACX0P7XCUcvfdr+N0ehgfH2ZyMvwLEoejlPvuezfUi/NtvQBSvPFrntmyss/j8w1w5MhW+vtP\nZdxTNd510i00NJ+8nqnMU26hfWli9B1miiYwe1GL62jFcbRrFTGVJrsaNRp6LfBc6LhYXximUt12\nJlmAGnE14gZuC/2szZ92j8UR2wUhO5BUW0EQ5gXZ4onMlnEIiYn0TBYVLaWo6EpstgJWr/45ra0P\nZ+wNnG4rkBdfXEtHx2G9P2dkCxe/f5j29gMptXbZvt2tR28VxU5eXjl1da+EtXMxoig27r//fb3y\nbjoYU2Gt1nwmJkax24vZvPlk0vNNN402FeaT13Mm52luGECNyJnpO4zXBkS7Vj9qJND4fA1TabnZ\nSDmq8OwOPbYCbwBXEj5/2j2KnzMbyfZU25qaGlpbW7GFikAuXryY06dPR+0nHk9BEC57ssUTmS3j\nEBLj8w2wa9dyxsa68HiqsVic9PSovi6zPtD7fAM888wqCgoWYbcXR/kLY3kPm5sb6O8/RW/vG5SU\nXMvISAelpcs4d+6YLmBBLYKk9d50Oj1YLFYmJvxUVFzPmjV7aG19hIGBM3R3v0wwGECtkhkMuz/j\nHIDqB928+a24IjFyvNo1tMdHjmzVhbaiWDl/vjXl+cykX+flhMyTRiJvaDIxG/l8A1O+yGxm6ndY\nZSmq8JwJf6wwE2S78Lz99tv55je/yZ/8yZ8k3E88nsJlhfgShFhk4onMZE1FejrFm5kbOJ1u7rvv\ntJ666XCoqWlmpGNq68npdIelxUamnMbyHqpi8TgTE6P09b3O2FgXfX2/Jz+/kuLiz3DwYB1Hjmxl\n9eptrF2rptS63csYHe3G7++no0Nt8aKdOxgMYLXmsXDhl6PuT5sDr3cjRUVeyso+R0vLQ3FTOCPH\nG/k4PBW2LK35zKRfpxGzUlGzqY8uTK0ps+YpOzH20Uz22kV6Q43HQuI2IMY2IQ2hn7NddFoJF502\n1F6eifyxiedTPksJsZhpYRyZMC4IgnDZ8Y9vvcXfDQ5SYLPRWFsbJRobmps5MzAQ8/nn29roGh0F\n4NtNTTy7bt2sjj1byYVKolpRG1A/0E8nHVO7z8HBs7hcXuz2Ymy27+nPJ/IXDg6qvQLt9mJuuumx\niN6bFmASq7UQn09tFt/RcUSvPtvS0oDD4ebcuRbGxqYq0JaXr2T16m0cObJVv64xShp5f1r/TmMK\nZ0tLQ8wIZeS97Nnz+bDxZzKfxmMj5zadNaSJ4UT3kQqaVxugoaWF3WvWTOs8ZhNrnsxhJqvLpoom\nJrXxJLrPSG9oXZJjY7VPaQSeR+3Fma0UovbfXAK0GrYXMSUmS4nt40xnPoWsINNfQxN+jf/qr/6K\nv/zLv2TZsmX89Kc/5bbbbkt+UBpIqq0gCFlJPLGXSATGe17bdnZwEK/LRbHdHnZszb59+ofM+qoq\ndq9ZE3aewUCA492qt6YyP5/T996rH1u2Ywf9frW66JVFRfgnJvBNTHC9x8OetWsv28inUcg4nRVU\nVFRnrQBNF6Mg+tfea/loLIgDP9/lf1PAaFhqqeYvtFrzw3plOp1unnvuVrq7p9J7R0Z6ovpn5uUt\nYGysB4+nGofDTWfnYTyeakpLr43q1VlQsIj6+nf09iuJhF+kqDOmyRqjacb9Ir2vkeM3QxAZr+f3\nD6ad/qylojqdHkpKluFwRKc4p8L892pHfkI1Crd65kakxPNmQvR4tW1aumyyY3cQ3XfTgxo1zObP\nqG7gj4C3UNu8aCxArcBbCpxAFdORaHPiAZYxJbZz/z04V0maaltDZr+GGR7/6quvct111+FwOHjy\nySf5sz/7M958802qqqrC9pNUW0EQ5h3xqsMat6965pmodLhYx2nb2kdGON7dHXXOgpCRvtrjYdvq\n1VHnOTs41W6ia3Q07NjrPWqz8UKrlUG/n67RUfr9fg53dl7WVW216JjNVoTPd35GWllkklaZybHG\nFNOPfXbeYxnv8Hn+i29ERTa1CNXQUFtUWq3dHp7eq82Z3V6ib9+06VU9tVJLrb3zzkMMDbWFic7y\n8pW66DReN57oPHt2d4I02aljjPfa2vpw2Dkjx28GxutpEeF0zq/dR0nJMnp6Yqc4p0JjbS31VVXz\nVHRCdKpq8uqyxt+Zo0cfnIHquo2on5YjhWOs8WrpsjeHfn4VVWjFEp27iRadCmrV27kUnZH3GOvz\n+gDqPY8ZtpWg3m898AGxRSdMzacVtS/pAeBb0x+uMPNkWuQ5w+NvvPFGCgsLsdvt/PEf/zFf+tKX\n2L9//zQGEh8RnkLOIb6E+UmkpyqWGIRwkbiooCBKZGrPF9ls9Pt8YefSKHU4ws75PZst6kOm8Tqv\n1NVRmZ8fczx71q7F43RyaWKCgVDkE2BFWVnYfmbMyVwxnXFoAmDBgpuBmWllkUl/xkyONaacXll5\nAwCryor5G+8wd955iN/+9s2Ex2jzECn2tMebN79FX1U9P7/zEPe5vFSHxJ5RTE61fHGn3CPUeO/G\nPqVaBDOWUE2UKjwTfkPj9TZteiXt82v3kalnd7a92sm+CDH/717kJ9REok8l7AuXj/fPQG/UR1Cj\neFuZSiON15NTows1VfYCcDLG2OOl0mZDlDPydS6JeKwJ0ULDz6XA14AHUft0JkIT537DtilxK5+l\nspDkv4Yze/wsIMJTEISsIDJSGS/iYNxebFf7HRrFYGNtLR6nk+HxcQ53dOjnyrdaAbApCk0bNoSd\ns8jhiPqQabyO1+Xi9L33xhyP2+nkhooKAFaWl7OksJBypxNPSKiaNSfLd+9OKPpmUqROpzepJgCM\nUTqz02wz6c+YybFGwbX7jjupr6riyIZN1K2Ln9IZS6RFij3tscvl5ddrdnPY6Y5bNkQ735YtH/K1\nr704rb6Wxj6l8YSPcdw/aD0Ztsa08ba2PmJa9Cs/vwKnswKHw43DURI3apuMXCvCk8kXIdMj8hOq\nseBObIy/Mx7PCv1n875QioxqGrdF9uTU0HreFgAvhY5bCJQBawmPFEZ+5E2YETgHXIp4HDRsv4B6\n/xtQ5ydRUaFIrg/9vxLYnvkwhZkj+a/hjB1/8eJFDh48yNjYGOPj4zzxxBO0tLTw1a9+dZqDiY14\nPAVByAqm46mK17ok1rlu3buX4z09wJSPM1WS+UoHfD5WPf00iwoKODUwoHs+66uqcDsc+rEV+fm0\nDQ3FPU+8OdGIHHeYD9Xvj7q/ZONOlWz1u2XSn9Hs3o6ZFlOKPH5TSHTGcqxlSqx7T6U/ZCwvdKrH\npsr861OZGpm1SZmdwkDGdQOxi1VlRiyfZiLvJkAbcCuq6Pwp6qduY4RT80LagMnQv2zAguq97Elx\n/xLgI8K9uA7gBuJ7N7V1YUctRrQ9xj7CbJLN7VR6e3tZv349f/jDH7BarSxfvpwf//jH1NbWRu2b\nicdTqtoKgjCrxBNDjbW1afW/NJ4nkljnKnY4gOhU2VTGF6/CpXHfRQUFuvAzXqfu4EH9WKfFgm9S\n/eCTSgXcxtpalu/eTdfoaMxxG8dVmZcXdX9mVeZM97WZLTKp8JnKsemIyUyrqUYe/98cbm4bOMNy\nWwH5tY2Q5MN9OmONde+pRIDjpb9nEj2OxMxz5RLTraqsMjvVSyPXjXlfChgFUh3hAqmR+D05teM+\njyrMzhAuOhXgfOjncZPGahZfBl5JY/8bUe9fS5EuBa5B9W5C7NfduC48qCnMUlxIiI3H4+HVV1+d\n8etIqq2Qc4gvIbeJl7aZyFMVK4001nm0/bYeORIlkhIVCzGuqcjzNjQ3c7KvD4Ayh4NjnZ2U7djB\n2hde4FR/f1QBohVlZdR5vfp1jB/W8w0iOZXvPB9pbeXTxcVU5ufzVIwKuWE+1E2b4vpUPU4nncPD\n007DNb422eI7nQ1STX80tkEpL1/J5OQfJ9w3VlpqpOAaHThDadcxulJMvcw0VTOV1NR4v0NmprXm\nWoqsWSQqBgXJ/u5lWpFkrtEE0mFU8Wmcg8jcQWNvylPELpCkESQ7vJyxOA7Ee/+0od5fsWGbdm9a\nivQHqOnEMPW6R/bt1I4pQk1VDk/Nlc9SwlwgEU9BEGaMWNHDWFGTRC1QItuZLN+1i9P33ZewEi3A\noscfZ1VFhd465ZHWVnpGRth65EjCtNMwsXbpEofb2/XUWUVR6BlTPUOHOzv1gkMepxOvywWKwt51\n69SfQxijhfWHDnG4s5MVZWXsqKlJOn/GHqE/fPnlqAhpZCQyMhJrt1rZ6PXSOzqqR2Mz7Uk4nSiq\nWSm/s02q0beBgTP4/WoD+qKiK3E4ihLuGysyGhnxSjfyl2mkMJUIsHGNpXusmeMQIkkUFcwFEgln\nYxpxBfAcU1HNyhjHLUctOFQNvBbjWjayI/oZWWW3HNXHaWyPshZVjK9AbQcDU0Icol/3yMi39nx/\n6Dy5+sWEMJ8Qj6cgCDNGLE9YLF9mrP2M2yrz83UBBqrQW+HxUGizsaOmRj9PpCdSo76qip6RkZj+\ntEi08XVeuqSLXVAFrtvh4HCn2kttRVkZe9et4/bnn+fC2BiD4+Nxz20UgpFjbmhu5vm2NrX3Z0UF\newxRX2OP0I1eL3sTpOYm8nsO+/2meTSn4/eM5w1Mh0w9lNMhVR9oOv68VPdN14Nqhmc1kzmei9fH\nbF/t5RRhnXuMgvLnwMPEFs41TImpCqZSZzWBZjzus8A51IJCdwH7yA6RCWqCoZUpwWlFFYKtwBdQ\nxxo5BwOk94WC5octQm0zsyd0XLrnEWaKbPZ4poP08RQEISuJFZWMlVIba7+odiYhD2ORzUavz8fh\njg4cVmtUOq22n1bxVotcvtPfrx+vtVmJhTY+7fhyp5NyhwO3w8Evb7uNOq+XjV4vRzds4KcnTtDn\n8ySAV+QAACAASURBVOmiM7JNi4YWJTSOWUtZfeqDD6Z6f3Z0cPXOnXoaq9YjNJUIaay+o9p8mtmT\ncDrniucNTIfZr/qZPP1RI5300FT3TfXaxv0dDjcHD9ZNu7rsXLWnmS7Ga+7ceU3a9z0XY85+ItM1\nZwpjBduHiV+K0xgN/WLoZ2NU0HhcFzCI2j7kWbJHdK5FjWYaP48XAi6migVF3gukX6K0EdXLOYwa\n4dTWdKalUgXBPCTVVsg5mpqaqEkhTVGYe2IVpdEic2eHhvAWFlLscFDicFDhdOIOFQAyHptvtfLg\n0aN8rqyMm+12vU1KpIjRzvu58nJustn4n7fcwsOtrWGRSwvox3/xqadY6nJRYLPxPZuNu+64I+bY\nO4eHOd7Tw+HOTh5ubQ1Ldz0zMMDFwFTK1BNf+UpMMZYsLVijMCSqNX/pnrVrUy7qY7zGU2vX8nBr\nK9tWr+aR1ta4RZimQ7x0y0SYUZwom4vORKaHJnqPmslU0kwLHM1Vexoj6UQhtWvabEX4fOd1AZnq\nfWfzmorErL97yed3dgoVpe5LNaaTamPKR43o+VBbhWiRPWNrlQDR6azTwYIqwl+I87xWNTeSItQ2\nKM2oVXcJjVvrqTmIKg4row+dNm7UKrdaFeDEa1o+SwlzgaTaCjmHvFnmNsa0Sw2P00lvKAIZmYoZ\nmaa5bfXqmCImXjqnMTX0/YsXGQgJxXKnkwuha942NETTX/wFn921i66REcYmJii22xkPBrEoChd8\nPpwWC5PBIEHgS5WV7L3jDrYeORKW2msFbq2sjPIyxkov1sa1oqyMRYWFOCyWMFGdbnQyXmsZM9Jc\ns4FEqaTZljI5V+9RqabxxpuveHOcyvymkuqbynnSaaeiXXNsrJ/OzsNptyIxu6XOTGLWmko+v8na\nl5hFvPTPVFrD1DAljkEttrObqdYqt4Yem9U6xYNanCcSK3AWeDBiPEa0scGUZ9MFDMXZJ5J0W+Wk\nnlYrn6VmH0m1FeEpCMIsowmuErudi4GA6p10OuMKLm3/IpuNm6+4gkUFBTF7YUbut2fNGh5pbeVU\nXx9nBwd5ZdMmvtvczOGODlaWl9M9MkLn6CjFdjsnN2/G63Lh3r49LIKpoQBWRWHc8F5W5/VS7HDw\nn++9p2+zMPVRR+vhqfs3PR72rF3LzXv30jUygs1i4aYFC8KipPHEoxnznaqYzcVCQJdr78dIjELq\nB0533I+r6c5XuvvHE5jJztPc3MAHHzyF399PeflK7rrrN7Pmb71cSP7lhFl+wOn2Fq1hSsTFE2Sa\nOAa1sutypnpZPkJ0L89MsKPeQ+T5lNC1J1E9mu8DHRH7lKJWn53ybDawijMsoIATNOLHnVTg15B8\nPoRcQYSneDwFQZhlNI/gW5s3617BPWvWxPUNNtbW4nE69Wjgk++/H7MdS0V+PjZF0fdraGnhzMAA\nx3t66Bob4+HWVv06v7nrLpYWq6XqBwMBlu3aRdmOHYyMx/YEBSFMdAI0nzvHzrNnw7ZpolNLqT0z\nMDDl3+zspKGlha6RES4GAlwI+VS3Hjmi+01j+V8zbV+SriczXrubbCaXUiZnEqMv1Oigi3Qvpjtf\nQ0PqOrfbS7jppseS7h/PO5nsupHVgdPxt6bjh72cSe4xTtS+JB3PZ6IVqBHr3Mkq3NagptCuBzai\nOsaOh67zbcJ7edpRo4vTQUvbDRAtOhcBt6D6NvtR7zNWRHQQ+AxqJBbAzRmu5BitHMBPA4tJHlU2\no1XObPl2BSE5IjyFnEN6T+U2mrjyuly6yErUw9PtdHJDRYX+OBASgJq404TZ821tujjUivyE9dC0\nWqk7eJDhUJXYtiE11ckK+E6fpt/vJxAM4rRYuL68POl99Pn9+CfDU7lcdjvrlyzh2tJS6g4e1Asa\ngVogaNvq1dgt6tuuBfBPTnKgvZ2rd+5kyX/9F7c+91yUwMxUCCaa21iYUQhotsm23o+pvEfN9EfB\nRB9X052vwkIvAIHARVpbH066fzyBmey6xuNqanYkvc7lhFl/96JFerKVmIqAjEUqginWubU+lbEE\nmbHf5+9Q/ZLGL+N+DbwV+tkOrCLc95kMLVCzErgt9LM1Yp+VwDuE99gsI7yQkXbNCVRxugxtbgtC\n46jGwza8wFYSvwMkmg8jiV7H2K+hfJYS5gIRnoIgZDUNzc0MBgI4QoJtZXk5VxYW4rRY2HrkCPva\n2jjW1aW3HSl1ODhxzz24nU4q8vPxhITt2YsXdQG36umnGQztPxFxva8tWcKCUH/OVLEp6geWodA4\n24aGONbVRa/Px6KCAr0Krtvp5LW772ZxYSGrFy7Uj+31+WgfGeF4d/eUEH3iCW7du1cXr5p4ziT6\nmQpmVsCdLcyKeM1mXGC6H+dTJdHH1XTny+FQP2SnGiGNJzCTXTfbvkC4PEi2EqcbcYu3Ao2/ZZpA\nM547UQVWbSxFqG1VDgDG96gxpn5zi1FblfQBi1EjlLEwCkstq+VK1KimhymB+TnUCrS/CY2tMfR4\nI2qqr/aXpIQp0arhQ5vbRhqpp55DLMOtR2qvJv67TqoVaRO9jmZETYXLhZ07d7J8+XKKior4zGc+\nw0svvWTq+cXjKQhCVhHpMVy+e7few3NRQQHv1NdTd/CgXjDHrih6FNSCWjRoPBjkeo+H0YkJvaJt\nZV4eXWNjVHs8OC0Wvc+l2+HQxd/K8nJ+c9ddACzftYuusTF9XEU2G8MxUnEVpj6uACxwOvmCx8Ph\njg48TifLSkoodjioyM8P86YCNLS0cKi9nQG/Xz++0Grl0kS4HF5cWMjbmzeH3XesQkHZVmQn16hh\n9txUs1XCxQw0D6XVms/QUFvY+pI1l+skW4lm94CsYeq3rA5VfCY7dwPwPDCKKjSXh85RDTyFWuG2\nK3SuAKqYLEZNg60GrkUtAvQuagQyiCrGalArzx4L7T/I1DxobU5AFa5vJxijNodam5cS4AHgCKro\njDW3xp6bw6FtmbzrJHodpY9ntpDtHs9Dhw7xp3/6p+zevZsbb7yRc+fOEQwGWbRoUdh+4vEUBGHe\nEJla6jOIsPFQaqvWp7LYbufGBQv05yeB8z6f7qk09rOsWbRI7ek5MsKr59Um5FbgKwsXsqykhDyr\nFYfFwuefeoq7Dhzgc+XlrF+yhCWh6Gq8N8vIt94en49jnZ2sX7IEi6JwvKeHA+3tvBCKzGr3paXA\nVhvSiAHyQqmuJaE+otUeD29v3ozb6UyaBit9CTNjNuMCqSbQZQNapHJoqC1qfcmay3WSrcRMekAm\n83BuT/HcZ1CF5UXU1iWnUYXhIdT+l6dD97AqtP8EqujUPJRtqD7QXuBroe2ngBeBvaFjTxI+D8Zx\nJhKdMDWHH4TG4w6du4v4c6sdc7PhOpm86yR6HaWPp5Aajz76KI8++ig33ngjAAsXLowSnZkiwlPI\nOcSXML+JFFfXG4RZz9gYDS0teF1qwYjBQID3Ll5kQV4eoApRjRVlZbxSV6enjZ4bGaHX56NzZESP\nkE4A+z7+mOMtLYxNTNB6/jztly6pfTs7Oii026lyufBNTjIYp/BQLALBIEc6OsI8oFq01ON00jk8\nrKfL7omIWg4FAtR5vWHFl7SUV2Ma7COtrVFpt5pHbo/zh/zDpftZv38/Dx49OuPpufMFs8RgKu9R\nufhRMJZ383Iq7NTc3MC+fTXs378eny+zZOx0zzVzf/fMWomxRKYx/XMVU4WBNpLeb1mkP7MHNbr5\nCLAQqEIVlu8a9lnJlGDUisAVAz8DPkEViDB1/17C5yGdd4N4c5hobrXn9qRxnemMIT7yWSr7aG5o\nYF9NDfvXr8c3kP57TCbHT0xM8Prrr9PT08PVV1/NkiVL+MEPfsCYIfPLDER4CoKQVUR6DPesWUNl\nyHOpidG24WF9/56xMW654grqq6o4uXkzdV6v7qnUChjdvHcvL4VSVItCwhbUiGdktVoNh8XCsc5O\nPVU35j5K/IwS3+QkYwax6p+cxGGxMD45qUdBtchnucFL6Z+cxG618tMTJ+gZGYlb9dYYGb5m507W\n79/P9at3UFVVj8+9mpbuXg60t3Pg449zrkrtXHGyuYFv7KvhZROERTLMFDGxMd+xGsuDORu+zJmf\nq9QwRnd37rw6o/HMv0hxLI+hMWqopbQeRjUopLNWGlHFqpbdokUH/ws1qtgPdDK1ziuZ8mLClMgc\nBD6L2ucz2e/FbH01lItfQQkzxcCZM3QdO0b7gQO0NKT/vpDJ8d3d3QQCAZ5++mleeukl3nzzTU6c\nOMFPfvKTtMeRCBGeQs4hDY/nB/HahERWYXU7nZy+994wMeotLNT3L3U42F5Tg9vh4Oa9ezl27hyX\nDL04G5qbef/iRb30Q5HdrotTlxYhXbYsanzjk5P0jI3pwjRSYlqBP6qsxB5HfHqcTr2Crba/f3KS\ngdDYtCq3AK/ffTfO0L42ReHQJ5+w++xZXTBeHRKWxnnSIsNFNhvnfT4OtLfzg9aTrFmzmyK7GgGu\n9nj4YqhC70xUqc201Uu2YZYYSOU9amDgDI91LeGH7ctZvftnMzB/5pcvilUcaDZamWSLSNOiuzZb\nET5fb0bjSTdSnP1/92IlqmtRw2tRi/xopOtxc6OmxL5LeHQwuueyyirChZyxAu0YpFTUZ/6T/Wvq\n8sNWEHpfqK5m9bb0M0gyOT4/9AX/D37wA6644grKy8v58z//c/bv35/2OBIhwlMQhDkhnTYhRjHa\n0NzMqVAKiRX4QkhYRfbM/OLTT1Ozbx9PffCBLjqtwCt1dTy7bh17162jwJCaG8lkxOMg6OLQqihM\nAMfOnWPt4sUsLiykZcMGFhcWcrfXi8fp5ILPx1Ao4mlTFL0qr8ZVLpcurr0uF13f/CaeUGGkgUCA\niwbx3BsSlvcePqxvq8jPp8Lp1M9rFJbGqPGetWtnrEptLvb8TMRspo3abAV0s4D3WMbvRj0zMH/Z\nVckyk6il8XWxWvPnLPqpRXevuOJmfTzTXSfzr4JvrNRULZrXxlS7kRJgxzSvERkd1ASlhfB+nY4Y\nY6s0XB/Uoj69qAJ0OdHiM52MAemTKZhDbWMjVfX13HnoEE53+u8LmRxfWlrK4sWL075mukhVWyHn\naGpqkm/q5gHr9+/nQHs71R5PWqKoZt8+vbKrxtL/n723D2/ivNNGb1lf/rZsy8QhBgU3hKYfCU7c\n0ha81tZOKSbUboKSJu1F0rO1djdtt/tuN+w53bNnu233fa/T9Lq63Z7Tbjh9NyRN/YKTNIEU3BQT\n/FGSOk1DIF+NuyTQGjDGIGHjD9mY3/lj5hk9Gs1IM9JIlsxzc+nCmo9nnueZkTT33L/f/SstRXhu\nTimpAsS7zTptNmxZuRI9IyOYW1iAx+3G9SUl+N2FC8A772iqnjxYmZaHhoYQmp1F7+nTcX0PDgyg\n+/jxGNIIALVFRTg/O6vkljptNtx7ww0xLrcetxsrfvYzjExNAZBupdTk111QgNkvfSluHpjrrVli\nuXv3+zE9PYqCAifuvPMVlJX5lHVqd+FjQ1+Ncy9N9RzmKph7a1PTjrTIgJHvqEgkjKbuH+G3M15l\n/rTmOHXklpPl3r1+jI5KLqH19QG0thp37+TPy/PPd6TcjlWw6joxg/z+3VO7vvoSb24YJyGFzf4a\nwJcghfE2IDbMFpA+B29ByvV8HsB3IIXn9nLbqB1l/TDucW1m29xBfl9T+Ylcd7X953/+Z/T09GDf\nvn1wOBz4zGc+g09+8pP4l3/5l5jt0nG1dSRaKSAgIGA1GKFx2u1o9/mw0+83RViKOdfXi/Pzkro4\nOxtn/sN/JfJlWGZkl9zzkQguGAxvdNhs6ONyRsORCIKDg9jR1KSosMPhMI5duBBHOovtdvymowM3\n7NqlLFtWVIQ9J04o2960ezfevuce+EpKFOKpJp0A8PJnPxs3D+mQvunpUczPXwQA7N27AZ///J+U\ndUzNBKSyL1+YHlZu+AcHg2ht7UZXS0vMPKihJq+5TkxZ2Gi2jvWru78eM38spBSIznHqkNShXDkH\n6ajJ/HnJBTMjq66TIIIYxjCKUYwudMGTAw8IMoMuZOYhiA+SURAgmfSojxGEFHJ+DBLRBCTS2Q3p\nwcxNkHJEtaICzEQM5FZ0gYBAqvinf/onjI+P48Ybb0RhYSHuuece/OM//qOlxxCKp4CAQFbBK3Va\ntSjVUN84f+3FF9Hzxz/iA5WVqHS7cW52VjEAsgMokOt68ornNYWFIADnZmeVZWpFlEeVy4Wpy5cR\n4Vxp230+PLtxY1x/tg8NYec77yhqJoPDZsNlIqnkS02NUlP05qoqjE5PY0zlFBeor8dLZ88qxPOD\nHg9WV1Tg6zffjNv378dQR4cSVgwgjvxqzZWaZKjX79lVh0hkHHZ7Me6++60YxVOtZr548LMYGemB\n19toODzQ7LnOZaRD4NjtbzGkW3C9mdu/v830HCdDrpwDq1TCxVAbMwU//OiXlbIAAujOE6Usf+BH\nVIkE4mtcakUFsE+rE0AJpLDgZNdZutEFRr8hjG4nkKvIdcXTKITiKSAgkJPQullPVotSDbXyNjY9\njXORCPpHRxGor1dKqNhtNiwQYYEIdSUluMjlWJ7VsAP3ut04F4koyimP8NxcnOI4cOYM2vbvx8Tc\nHA6PjSn9+cXJk3GkE4i65U7Mz6P39GksKyyEr7QUZQ4H3lIprWwu7ujpUYjnDRUVeGbjRgDAzF/8\nRcz2/Lzy749duKCEGwcHB+NIhnou/+edr2Dv3g04cM0j+Omhoyh2vKmcJ7Wa2dLSZfqG3+y5zmWo\n584MgWM2P4B066hHL1KZ42TIlXNglUqYTVU60yiWlbJGNGLHoiplmSY0VrQf28bAwHYDYelMiVwL\n4HpIdUP57VjOKA/+0xow2FetdszA6DeE0e0EBHIXwlxIIO8gak/lD7TMZ9TlUpJBfeN8fGICgFSz\n8+F165T2/vzaa5XtXt+6NaaGpvqLrqG6Gr+9804E6utxdOtWOP7wB2WdQ8elNjQ3h56REYV0Mlfa\nuYUFze3VGJudxclLl3B4bEwhpaUOB9pWrFDmotzlUsZQ4XLpOsby83pTdzfeunAB/aOjCumskOdG\nDfXclZX58PnP/wknpi/HnSe1u3Aq7qVmz3UuwwyBU39HGQ3Ey4RD7FI6B0sNXehCAAEcwIGkYbaZ\n/d1L7ICcfjkbKxyWY9sw5nTMDI8OAXgGxkjkYoTNGj2mke2MGx2JeymBxYAgngICAhmD1s26mtAk\nA3Nv9cikzFcmuRdOzM/joaEhpb0nb78dq0pL4S4owH0HD+JDVVVKG7x6aYNEwD6xZw/6T5/Gjbt3\n4zJHUi8TaeZXqnHswgXUPP44ktHOKpfaYVEKCQaAS5cvo/fUKVyUCWNXSwtWlZXBbbfjuZMnFTJ4\n689/HkNCi7lapKMzMwqhLLFLLV+U50YN9dwx6JEqM+VStLY1e65zGekQODOl6K1GqueAkY3v7m/D\n7ZFw3vh15krNTyPwwINudOdAbmdiQpN+ORsryFxsG8ZyfVOpkbkYn1ajxzSynfVllAQErITI8RQQ\nEMgYtPIQjUIrfDRQX49LsvKoZarD57PVFhVhdGYG5U4nJubnUepw4JLKgCgTYLmdDO0+H4bOnsWo\nHO5rB1DucsU48BYVFOCjy5bh+MQEJubnMcGF/jZUV6PY4VDyWGvcbtzi9eLY+fMYm51Fo9eLp26/\nXXLbjUTQe+oUGr1eXJybw9jMDJwFBXjlzjvhKyvTdaHVO09m8gNzJZdQwBrwLrSv1Afw/7V254Vf\nZzruuVcvEucopp97bIXDcmwbkcj9GBzsQVPTLXC7n0yj3aUG5iCszmcVyAWIHE+heAoICGQQ6She\nLJyUEbRGrxdFdjsm5uZQW1iIp26/Pa5dXrn7TUcHAvX1OLZ1KwL19fjYsmUx25Y6MpPi/ufLl6NI\nVh6dNhtGp6bwoepqtK1YAXdBARaAGNIJQKoJOjqKkenpGNIJSPMwJIf3ljgcOCeTy49fc42i8G7Y\nswcDZ87gt2NjWFZYiBvKy/HuxAQuzs9jPBLBhr17AeirdnrnyUx4KdvW63bj9NSUIZVUIHfBFKWQ\ntxFPNO3IG7/OXHC9XSykrvYmVgbN1xxVh3smat9oaGhsG273SbS2noPb3Quh7PFYzPgKAYHkEIqn\nQN5B1J66OsDUuYbqalyIRFBXUoK3QiGFtBXZ7bjV60W506kY4oQjEdz6859jeXExyp1O1BQVKbUy\nf9zUhBt37cKc/H10+3XXoffUKcnZ1kAdTyP4QEUFGpctw7PvvRdX3qXa7cbFubkYNRSQyKmWOREg\nhelqGR1Vu914v8cTMx88vG43xmXSZwdw/N57lTBbI1BK3hQUoNTpxKMGSt4w1fT01JSizl6tyudS\n+I5i7rE3N+3AV9weU1rVwEDQwnqk5pCK620+lDUxck3ljtrrR+ZrYAplL10she+pfINQPIWrrYCA\nQI6Cd1XteP55JYyTgZUnAYA1u3fjnXvuwfahIVyYncV7k5MApLDUczIB+8jPfx5D4EocDlQ4nQir\nFMZUUVtUhMMdHVj+xBNKrVAe5zn1rwBS3qmroADuggLM64QAT12+rJtvysYOQAknBqTQ3OrCQvSe\nOgWnzaaE2ZoB7+AaqK/H9qGhpKVEmGratn8/gFiV1EgpksUkKwLx4N1jzdIXa+uRmkMqrrfDGFbK\nmgQRzNuyJrmj9maqBibvbPtjAA/B+tqgySBKmggIpAMRaiuQdxBP6K4O8OGfLIyz2u1WnpbZuW3H\nZmcRHBzEcydPKqVRKpxO3CLXvSyVQ1SZ2thQXY1ylytKOtNUO20A3r77bmwfGtIknWowMrlw5YpS\n8oX1k2FtVRWucE8UPXLZGK/bjQV5+YcrK9Hu8+HY1q1o9/nQ4fPhhTvuwJOtrQjU12Ns27aY2p9G\noQ6x1XIn1oNWOK+R/dM3MEkPVprSsO+ofDK6sRKJCJBxz03rEEQQfvjRhjaENY6aO2VN9GHkd898\nSGymYCbc08y2vHHOhwD8CsBqACfT6axJLB3zHnEvJbAYEMRTQEAg58HIjMNmA6NpFTIRA4AyhwMP\nr1uHCEf67HJZlA6fT8nvLLHbsaywEM986lM4KauiPHgyW66TA1pss6FtxYqYZZUuF+7o6cFT775r\neEwFQIwj7u3XXYc3AgF0+Hxo9/lwaMsWlHB9KHO54HW7cZkIYTm8tnj6GIILP8C3XvkNwpGIMv7t\nQ0MYm57GfQcPKnmWZhxq1eTRTK6nVr6okf0XW62xjPgGg4DfD7S1ITz+VrTNbwWzy7YWEYkI0GLc\ntjNFswc9CGoc1UxZk1xGJsrxpAYzbrJmtuXV0QIAFwGMA9iA7D3SyGa5lcV4TCMgkFmIHE+BvIPI\nS8gv8GGWfM6lXshlonb+8/e/V8ia02ZDicOhqJZetxsEKaSVd7AN1NdjR1MTan/6U0TksikdPh9e\nOXcOI9PTUmOqHM+bystxXWkpek+fjutHAYCm2lq8eu4cJg3W8DSClSUlmLtyBZGFBdxWU4PlxcXY\ne/IkwnNzuLmqCmUOh1JDFAAKMYfr8Qd4cBG/wcch6a5A24oVmJqfj3OY5V1na9xuNNbUxJyDROGw\n6bgTG90/ldw8K5G+c6cMvx99/f3wA9j/r7UY8Y7Ce74Rm799AO4Zj7k0tiWIxcjMa0MbetCDRjTm\nLbm8un739MJZeWfb1QDGMTBgRzjcCIdjGC0tIUhfL5n8kFnh0GsUfqSW/2oMV9c1lRvI5RzP0tJS\n2Lg65jMzM3jwwQfx7//+73HbihxPAQGBnMVzJ09idGYGAFDtcuG8rNYFBwcV4xkjOYDD4XCMQjhP\nhGmZXJY6HIqZTl1JCd5fUYHe06cVh9X7Dh7EHFers+/MmYR9/v3EBMpdLk3jnytAXL6pHZJ6yXI3\nU8HU5ctKHmjvqVOocbsVZfOtCxdQLtcDXVtVhT9NTeF8BPg9PogyzICRTgB47fx53CLXMOUVRqY6\nsrBjFvbKzgGf18kvB6IqZqowsn8quXlWoqWlyxriWywrIo2NaPnSUxg89hCa9u2QSGe+WMNaAD3q\n0IXs3bazPjixB+3oxE78W16SzqsPTBcHpLPIvhc83N+vANiAcPg6jI5KNYkHB4HW1kx/yPg+ZBrZ\nVFcFrnZcunRJ+Xtqagq1tbW4++67LT+OUDwFBAQsg5pAbh8ailEplxUWKrUn+RxAIzUgmcstj2q3\nGxtqa/Hbc+dwenoa5U4njm3digqXC7c+/TTOz87GucuqcXNVFd4OhXSdZY2gyuXCBQ13WQZXQUEM\n8dXcxmZTHHdtALR6U1dSgte3bsV9Bw+iZ2QEN7ou4rrq1Th0RlJCCwsK8Pt77kGFy6UojMwYyGm3\no8ThwNT8PHpPn445B+/fvRv/dfEiFgB8yOPBYHt7QmXTyIOCqxbhsBRuu2MH4JFJTjZFEg0shnGT\nH/FaTbZtWbT6IKCP3DH4Mq6LRyMVGrB580q43TsTbp8vGAgGER5+C47i42jp+g3cHt9id0nAAuSy\n4snjsccew7e//W3813/9l+Z6oXgKCAjkBNSq2dj0tEI6PS4XXv7sZ/HQ0FBcyKVWDqCa3DCXW75c\nx/lIBIdHRxXToIn5edy4ezeG77kHK0tL8Z78BM9hs8WVMWGoKynBssJCzbBaI3DYbLipshKHz55F\nid2OKY3wW5fNBp6Waimpc9x7rZ6udk3hmyVP4sWDP0OV629Q43ZjZfVN+ElzMx789a/x2vnzeLG9\nXXGw1VIyA/X12On3x4W9jnLn6cLcXFIiqT7PHpdLEFEGjwfoVlGcbIokGlgMl1ktrUZPx7IKauJU\nLBMnoRcZw2/Dz6FM/lwfHPwi2lqfWaSeGNfFLYtUyDGEh4cx2n8YADAYfAit6u8UgSWJdB/+WPXw\n6LHHHsO2bdtS2jcZhLmQQN6hr69vsbsgoAM1gWTvK10uvHbXXfCVlcUZzwCxZjbbh4bg37sXMEr3\nVgAAIABJREFUT737ruKEeuOuXbjv4EHsaGrCLz79aRTZJRsgO4DxSEQJSQWAuStXsGHv3phjr7/m\nGmV9md0ec2xXQQEK/vCHpGMrQNRZlsdlIrw6Pg4boEk6AeCSarndFv9AkPWKN00qtNlwXXExql0u\nFNFFjI0dxv8YqcYz7x3HuUgEvadP46GhIezbtAmnvvCFmLIpzEzozVAIQPScaJn/zMr9KwDQs2lT\n0rlIx/X2akCufUcthnGTlldppgMH1QZR6j4EB4Lw7/WjbX8bwnnmMpyNa2rCIYX6v+cFnmhaTFXG\nuOHQ4hoqZc78xyGH7HsbG9G0IzOf2Vz7nhJI3+TOCpO8kydPYmBgAPfff39K+yeDIJ4CAgKWQe2G\nyt6/e++9CWtJ8mSIkZiQTCbVOYketxu3yiVCGJ1rqK6GUyZzxXY7fv2ZzyjHri4sxJHxcYk4ulyw\nq4hnKBLBb8+di+vT8uJiLCssVN5fAXRrfi5cuaKpUlbJeZnqZc6C+K/eBUjq69GtW9G2YgWK7Hbc\n4vVi+vJlnJ+bw7H55XgCX8AFx/swTRI5rXS5dF1i2TyORyKoKymJIfVqZ9u1ck7oFQDfOXJEsz0g\nSmbnr1xBh8+XkuutQPZRVFQDt9ub1ZtzLepgpnBGKlATbHUfhsPD6B/tR89ID4KLULJnMZCslAyP\n11puwyv1wMDmtfiRe2fGerR0nFqfQ9Sj+QFLW27p6kJ9IIDNBw7A7Vk6Sq5AYqT7kNCKh4w//elP\n0dTUBJ8vM+HdIsdTQEBgUcBCaY9PTsJXUoKTly7BV1aGd8JhjEciWFtVhevLynBJzklkxPHVu+7C\nXw8OomdkBCV2O0qcTrz82c8CADbs3YsN11yDM9PTStjn9V1dSm1PPahDcddWVeHQli0AgJt278bo\n7KyyzobYUigFAApU+zttNthtNly+cgV8hul1xcWYnJuLyTtlDrylDgc+ds01eFIm4HzeKwDcVl2J\n/7P0GXzl3Cacmp6Bw2bD7+68U7dOJ8uJ5XM59XJptbbVgt7+6breZgq5k7O2uNi716+E2tbXBxbV\nxCmTSOaM3La/DT0jPWj0NuLA5gPwXAXXgx9+9MsBzgEE0J0gwDmMMIIIYgd2ZNCEyY/sZ95mKru4\nCkBI/rsDwGKFJgvkC5LleKbr7m6FO/yNN96Ib3zjG3jggQd0t0knx1MQTwEBgZQQDALDw5KJZ1dX\n1Ecl6X4y4Tx24YKiaqrBTHQ8bjfCkQhqHn9cIXZs3epduxQnW54EqcnRk6ramg3V1Tg1NYUxjkzW\nFBbiHPfeV1qK60tLcXxyEtcVFWFofFxZV2a3JyyjoudsawdQ6nQqJNhhs+FTdXX40YYNuGX347h4\nRVIx7/TV4emNbQoZbKiuxsrSUuz0++Fxu7Fhzx4lx1XPiAnQJoN6BJPflpkRaeVrGiWouQKjhGup\nE1TLSsXkOcKRMIKDQexo2nFVkE4gF0vJpFtQJxUS6UdmyO7tAHoBNAB4wWBfBK5m5Lq50IsvvohP\nfepTOHv2LEpKSnS3E+ZCAlcVRO2p3MDwMNAv/5YHg/F+Knrgy6sAUk7jxfl5lDudmJifR6nDgfd7\nPPjaiy/iVyMjiCwsKMVCWBitx+3GR2pqFBLEh3eysE+v243TnD04Q3VhIZ751Kfw0WeewdjsLNZW\nVeHs0aPAihUAJHLI18EcmZqK2Z+RTlZChY2hwGZDaG5Ot5zKAqCQzgqnE0e3blXCj9/nGMOrc9fB\nh/cwef4U/Hsv4w8XL6La7Ua1262QTgAol3NA2bjVynG5y6UQRjUpZQZNjIxqudMmKqui3j/XYTTs\nyGrzHfYdlcj9NxnZtZIMLyUDlnS0K4/bg+48VXuN/e7Fz04XurKgYppBugV1UrGoylR28ZNYVLvq\nNCHupQTUePzxx3HXXXclJJ3pQuR4CggIpASuXCHMeB9EOLXQXVCAgc98BoH6ehzbuhVetxuXLl9G\n76lT6PnjHzE6M4PQ3BzmiVBot+Otu+/Gvx45ouQZ+kpL4S4owH0HDyo5i10tLVhVWoq5hQUcHhuL\nO37vqVN4aGgI79xzDwL19Ti0ZQvGOCJ8aX4eFxOURgGAQrsdf37ddQAkc6L3V1QoNUWdNptiFGTX\n2f/Ply+PyXn9B+9ruA2v4NtVAzi2UI/+0VGcnpnBedlAqObxx1H4k5/gY888g3kitHP5lYwojkxN\n4fDYWEKDH4/bDY/LhY7nn0fb/v1468KFOFOgRPmaamMilvOpzhnNFbS0dKG+PpBU5cuU+U4i06Vk\nJhBWmEQwLK4Bi7VgtKMH0i1/MqgzCpdShmE84mfHAw+60Z0C6czUTBk3DtJGKiQyU9nF6Y5FQCC3\n8B//8R947LHHMnoMoXgK5B3EE7rcQFdXfLlCPfDKz83V1eg/cwYAELlyBd85ckRR1XgV0+NyKSVO\nGqqr8cIdd8Qpcl63WyGXK372M9htNjgLCvC+8vJoKRVIamOFy4Xw3BwavV4U2e3oeP55hWTZ1qwB\n5LARp82GIocD8/PzUt1LjTqgQx0dWFlaiuDgIPpPn44JxeXLpGgF5DZ6vXhUdQ0/X/IVzLtfxRNF\ndyBy6ULcPpeJcJkIQ7IJUm1RkbKOjYEpx8kMfvj5q5XNk7xuNwZOn0bVzp24uboa7T5fjMpqpC2m\njuZSjU9GuJLBakWQfUclIvHJyO5iONFaiUxl1ZmlHWp9bAyZLemSKah/97QVcSuVvUwXv0n1CklF\nMV3kekY5CnEvJbAYEIqngMASwGIoT6xcoZHcTl758bhcCnFS35DzrrhP3n472n0+dPh8CukEYm/m\nXbJDrdNmw/Tly7g4P4/xSASvnT8PQFIjFyDVxQzPzaG2qAgHNm/GycnJGCXKKxMwO4Cbq6owkcSM\naMsvf4mburvROzKCC6r5ZuVQtNTO5cXFmrmRxydncCxSiV+dGoVLdry9uaoK7T4fHBqlV0ZnZhQF\njc3Z0a1bYxyF9a6J4xMTAKSQ3ec3b0agvh5rKipwdnYWobk59J85A5fdbogwahGrfCytkilFUO3y\nzCOZGmtUreWRS+VCzCqTRmFWu1JTsePy+woAD1vYr+xBUiLD4ac0FHErlb3MFb+RnHZ3og39CJt2\nhBUqo4BAPkMQT4G8g6g9FY9cv9nnCcpOvx9v33235g05H8rpcbvx7MaNeGbjxpht+Jv5VXK46jyR\nkltpB/DyZz+LQH09PlJTE1PmpMBmiyn/wfJAJ994A4CkUL4qk1YboKl2AsDpqSklDJiZHhXb7VhW\nWKiEDm+49lrpmNx+H6mp0SxpwvpT6nCg4Gwlqv/kw7KfbsHOdRtjapDyGJueRjgSwfahIYxNT+Ov\nVbmXz508qVwTD/T1KUT0kkyqJ+bn0fqLX+DS3ByKHNHgl4bqasMlUbSIlSitEv2O0qqZypCM7KZC\nhnOpXEimaItZ2qGmYj4AXxgI4i/2+vGz/W2I5Ek9z+jvnkTpHQ7JTTVWEbeSlGWu+M0whtGPee6h\nREIvEoEMQdxLCSwGBPEUEEiAYBDw+4G2NiCcw/cnuX6zryYoiW7Ik4HflxntMNgAvHrXXfh/3nwT\nY9PTeIc7aYUFBXixvT2mP2sqKnB4bCyGYJLqfy3wdJQplNMLCxibncV3jhzBsfPnldqh7Eu21OHA\nDz7xCc2HBF0tLUp+62jFGZw/a0fvXjeCQeDZjRtjQmsZ+kdHERwc1H3owCuxg2fO4Kl330X/6KhS\ni5Svj1rqdGqqy8mgdR4TqXwCEjIVoVAsh+c2ehuxY5HDczNds9Mo1FSsHMCy8DDWjPbDa0H+rFUY\nGAhi714/9uuQ4e/he3I9zjcRBtDSshb19R0ZdCnOnLJYLD+WkB5KfBjAo5YfwzyWdvavgECuQJRT\nERBIAL8/6twaCBh3bs02crWOYqYRjkRwU3c3RmdmUOly4chdd8FXVhZTUqW2qAgFNhtebG+PMfQB\nouVB1lZV4Y1QKKYWp91mw4LGdxdzs61wOrG+tha/PnNGqcvptNlw7w034Gd/+INmfqdDru8ZuXIF\ndgAbamvx7MaN2D40hKfefRehuTmUhasx+c074P3LIaxpCqO80IF3Ll7Eu5OTMW1VOJ04cd99uO/g\nQc0SJ5WPPqqQTB7q+qj5UhplKUGvHmq6SKVciFamXabyM5P3JYhhDKMYxehCV0ZcWMMAfrS/Dd4c\nKy+TrPRPbD3OOnTjdeRruKlUL/SL2AGCBzuRG+PwI/v1RQWuNuR6ORWjEHU8BQQyhLY2oKdHcm49\ncMB4rUqB5EhmQmPUpMZMvcpE+wZ6e9F76hQKIJHOPRs34rO/+hUiV2ILpNx+3XXwuN3K8Woeewzj\nkQhsAG71evHuxIRufVItOGSCy74l3QUFKL1Qg4WaEMLzc8o2PCkuANB07bV49lOfkuZK46HD7b/4\nBXpPn0a5w4GJy5fj6oHqPawIDgzg5ZO/hX1hAh77AubLb0Wps9BSo6BUa8DmMsyYKuVSPVQ/4m+3\ntZZlpy88uQqgO0NHNlNkPVskPFmt1dyrx7nUkG590Vhk4yGKQP5BEE9BPAXyENmsPRUOG3duFTCH\nZKrPtT/9qVLvs8PnwzMbNxpum5GqIrsdJycnY8gATxBqiopwcnISM2+9he4vfxk37NqlEDxXQQH+\n7Npr4SoowMFTpxC5cgVlTide5+pvAsDJyUls2LsX1xUXK66zDOu83hjHW0AijXq1PrXQ6PXivclJ\nnJdDMvn6oYnUMjYHD69bh4eGhgyr4fx5KcUELqE86bHMIt1IArWj51eHji26ky4/b82Tk+j7+td1\nt82lCAWt221rb8GNIYggnsJTCCGEBjTgBbyQlZv1ZMTSj+yQ8GRk+Bd9v8Dj/sdzqB7nUgMrtmNN\nTc5sPURJB6KOZ/YhiKfI8RQQSAgzzq0C2tDLZ0uWl8rX+zT7Nc1yD9XutUCsEVPPH/+I/tFRvHzu\nHB4aGoKdc5Cdu3IFvadOocTpRGNNDQBgcn4eDw0NxRxr4/79mJybw6sywWyorka1ywUAGBofh1Nu\n0wbA43Jpkk69L2Lmgvu7O+9Esd2OMrtdIZ0Omw0Pr1uXdA58ZWWm8mkVoyNM4gqkcVS5XDg9NZVW\nTiJ/HTgrpDbM1oBlUNe4zAVzLf56/vsPfzjhtunkOFsNrVxMftn2LDlmD2MYIUiGOSuxMmvkKpn7\nrtUmSXqZhMnMpEpRmmI9TgFjsDanNZrH2ogdFrsCCwjkM9Imnjab7T9tNttZm832uhUdEhBIBvGE\nLreQzChFjxQkM6G5zesFIOUkVrhcMccwas6iRW75ZbdUV0t/r1+PHU1NWCu/Z/C4XNjR1KSYGGmR\n5NHpaVycn8c8EWwAqt1uNMh997rduLm6Gu6CArx21134+LJlAKIlVz7k8WB5cTE6fL64osoN1dV4\nMxCAx+2Gr6wMH6mpwSRHxi8TxZFgK9DV0oI7fXXwuRYwDanMzNTlyzh89ix6RkbwRZNOiOxcMXOj\nnpERlP7lIAKB1MPX1TUu2Tm9wXEeW2d/uChOpfz1fIccAp0LSGaZonW7zS/LFqnnb9R3YmfGjhN/\nXMjHjRJLfs5+DGtNklItM5Pq755UusSPNrQhLExzsoYudCGAQE6HRYt7KYHFgBWK56MAPm1BOwIC\nAnmIZDemespmMtXnydtvR6C+Hoe2bIlRLmsefxyPvvOO8n71rl26BFSL3Kprha4qLYW7oAAffuop\n/F5lXfyJa66R8jiLilDjdsMjK5k8nLKrbQEkZbb39GmUOp0I1NejwGbD78bHEblyBf/8yitKO2u9\nXrT7fBhsb8epL3wB5yMRxSm3wunUdJdl88jqelrlYKwm8R63G09vbMPKZR9SjsOXW0mmPqvbY9cH\ny3ttqK7Goy1NSiRBKg6v6hqXXS0tWO8+iS9f/jbCp/cuilNpLqmYPNKtp5kJx2yteqOJbtQz6Teq\nVnyDkEg3m7NGAJcsOA4bw5vye+urY2pDKl3Sjx70IGhpRVUjiD1zi0WCkzkGZwIeeIRCLZB3GBkZ\nwZYtW1BdXY1rr70WX/3qV7GwoGWVmDrSJp5ENAjI8TECAlmAqD2VW0h2Y5pqeQ3+Rp4dowCS0sfy\nMO0AxuWSIFpKnBYZUNcKXVlaisODgxiZmsJFzgV2bVUVfvbJTwKQ8jjPRSLoPX06jly/cuedqCsp\nwTK55Em504lCux1j09MxJU2Ia6f/zBm47Pa42peVLhc2rViBUCSC+w4ejCFibB7/63Ofs7RcCV/v\nc83u3cox+fPWyKnPO5M8JecfRGzZ/d/htseuX1laGtNvvQcXiQipOizR43bjGzVHUIwZVV3DxUEu\nfUelGyqayuc3GVHUqjea6EY9XfKcCEzd3S73+SkAF+V1dgDjFh2XjWEcQB3iFdRkc5bqNbW4IZ+x\nZy4bJFiL3KpD8wUk5NL3lEBu4G/+5m/g9Xpx5swZvPbaa+jv78ePfvQjS48hcjwFBATSQrIbUyuU\nIHaMSlUbBVxOJq/E6ZEWreWM9LHw17VVVejw+XBoy5Y4YqhFrn1lZfjT5z+P95VLJjwT8/PoPXUK\n/aOjCkEusdsxdfmyoo6q22Hje/fee3FmelohYjdxRNBszqZ6rHpzwufSjs3OKuSPHW/70BBmLl9G\nbWEhnt24Melx2Vz58B7umn0Yf+3YhdrCQmXcjLiy/rwZCmnOidkQT7UKKiAh3XqaqXx+k+ZNquqN\napEutmwFgKPysrXInErI+syeolcCqJH/rgDwcJrt8w8AtAqhZIpcL27IZ+xjj2yQYC1yqw7NF+Ah\n6pcKRPHmm2/innvugcvlwjXXXINPf/rTePPNN5PvaALqtKKM4IEHHsD1118PAPB4PFi7dq0SW86e\nuIj34r2Z9wy50p9cfX/HD36AkUuXsLyhAV0tLXjtpZcycjzmdmpm/+DAAF4eHITbbsfz/+2/weN2\nJ9ze43Jh2cmTOH/xIrBmDRq9Xhz/7W+l2pcf/CB+8IlPKNsPT0xIDqPvvIOOt99WHEZfHhzE0QsX\ngDVrEBwcxIMOBxbeeQc1N98Me0EB6J13UHD+PB79u7+L6U9XSwuCg4O4+Prr8H/ve3Hz2VVQgLdC\nIeCdd/C+8nKsamxE76lTKHv3XUxdvoypG29E76lTWB8Oo9lux7P33x833u7WVvT19WHmrbeAqioA\nwOjRo+g4d07pv5n5HQ6H0S9bxwZdLoxNT8e8Z8dbdeYMQnJu6w1nzmCb/F3N2nv58GEclc2V7t+x\nA9+87TZ0FRRgOBzGzFtv4Z9uvVXJaezr68ODDgcmC0/hrtkfYGJ0BYqvvw9v33M7goOD2HblCl57\n6aW4/tXdeisObN4cc30WOxzAO+/gxooK7Lj//qTjdbs9cDgexEsvvZYznz+t998DcMnvRzGAB/v6\nUJqF43dnuP0uvx/DAGb6+vBPAIrl9Tf29WGbtEPs9i1dCA4Gse3KNrz20msY9vsl/8++PnQA6JPb\n62ff9/L+JX19+IKB+VP35w4D4ymWj38DgA/6/dgJoKmvD6MALvr9eEg+Xqrz1QWgo68Pfw/Ak+D4\nNwLYobHe7/encf67TffXmvcPApiG3/8sAA8e7HsQ05jGs/5n4YEnI8efwQzgl8jttr5t6EMfWlq6\nMDgYxJUr23L++yGb76VlL8PvPyr/3QHgmznTv6X6PhGCA0EMh4dR7ChGV0uX4XrMVu2/ceNGdHV1\nobm5GRcuXEBPTw++853vaG7b19eH1157DWE5RenEiROGjmFJORWbzXY9gOeIKM7KT5RTERBYPGSq\nUL0VMNs3fvu6khK8vnUr7vjlL3H47Nm4NvTqJGot59tl0OsPv+2q0lKsLC1FscOBifl5pR9Omw2f\nuOYaVLrdODczg8NjYwCkMNp37703qXIUjkRw0+7dGJ2djet/OrUi7zt4UHNOwpEIHujrgw3Ao35/\nXJusHa97Cmsq9qPc5cTE/Jdw+Oy47lwZqZOYrJalVsmR4MAAnjt5EpGFBdxWU4MnczCnMhn8yE55\njiCsqz/Jt1UD4KSqXT+iY6oF8BsADwEoUm27XadPiUq6MNgB/DmAGQCH5WXq+WP9PIaocml0jrWK\naWSzrIy1xTz0YOVVkZsIy7mkouyMUSxG8aSrF8nKqfj3+tE/KpfhqQ+gu9XcL0S6+1+4cAGtra14\n/fXXsbCwgAceeAD/+Z//GbedKKcicFXByFMjAQks7NE75cXp/7sJbW1SbdJcgFnTEn7717duhcft\n1nWbVYf/srDO+StX0OHzxRAdpqwlcq5lOD45CUAKy11WVKSEgh6fmFC2mSdC/+gonHY7jly4oCzf\nu3Ejtg8NJTXS8bjdePueezTDl82En6rnQC8k2uN249mNG/GMThgt229NxX4cHutFz0gPjk/8Xneu\n3r97N67pegb3ntqM0Tl7XHt6/dOaB3WI53A4jNGZGYTm5tB76tSilU5JhkTfUVaX59CDlaGbfFv/\nS6PdYm7bUUiksxsS6eS3fQJh5f1qXFEC+7qgXdKllmt3AUAvgOPyey+A04gNEFSHy5bDeIislruv\nVr9SgRFTnWTFPKz53ctktmxuQJj6GId0TVl1lQtYAXUaQjb3JyJs3LgRgUAA09PTGB8fx4ULF/AP\n//APpvuRCGkTT5vN9r8AvAjgRpvN9iebzfbF9LslICBgBRTSsH8zDve60dMDBDN0vxEMShFxK74x\ngA0/T+5S2tXSojjKqo109LZP5FDLq2I3dXejd2QEgQMHEI5EFAOd3tOnQUAMmelqaUHz8uVoW7Ei\nzrlWnRfpKykBAFycn8fL584BkBTO64qL4SqIfp2W2O0IRSKwc08E733hhRgjnwcS3Ejq5dWZIese\neSw3dXejaudOBA4cUNRDM06yrC/lLkbMG/Gbji/pkkZWXmY8EsGGvXtNjzERijl33YbqastcVrOJ\nbN3mpUtwg5CUzDYATm45s98qhUTwwogliV5I1KYKUi4j34c57pZjHAW4Sd5fi3RtB/A+1bFdkAio\nTT72YWgTYHaUCfnYibLX+HGqt7GqsuPiOsvyyNxjD2scaxOdDYHMwNr6pQLpoaulC4H6AA5sPmA6\nTDbd/cfHx/G73/0OX/nKV+B0OlFVVYUHHngA+/fvN92PRLAk1DbhAUSorYDAoqOtDejpARobU6+d\nmAx+P9DfD+Dv9gJrjIXQZiIUWB06G6ivR+/IiFLOo8PnwzMbNxrqi3rZpbk5qQ6lw4FLly/HtbG8\nuBiRhQWcl8mcDZLpUbHdjrfuvhsNTz+dtB+AfkitVvip2bnwuFzoPn5ccfA1Ou/hSBjBwSB2NO1I\n+INW89hjGI9ElDH7ysqStm0UycKC9TAwEEQ4PAyHoxgtLV15bT5kNFgy1dBNrXDVlQDOQCKdHkjl\nRdjVz0JZ2fFOIxoKC0gOrsxMx4tXcR63xhyvBhINqgHwKwARALchNqQWkEinG8Ckqr+VAKoBnIMU\njgsALM7ADomo8v1Uw4/Mhz63oQ096EEjGhe5rmPmAnr98KNfnskAAuhOaSb9yE4guoDA4iBZqO1i\ngohQV1eHr33ta/j617+OyclJfPGLX0RJSQmeeOKJmG1FqK2AgEBCdHUBgUB6pDOZSnb8EwPA3+2F\nfaW2S6kWMlEjkFfF1lZVYUdTE26TzXEaqqvxKGeskKwv6mVMYf3YNdco+5VxIbpvBgL4aE2Nso4g\nfcm+1NEBX1mZoX4A+iG1TMXseP75mPOgd2605mI4HFZIZ6XLZXjePW4Pulu7kz5FZeVlrCadUh8S\nhwXrYSmVUzAaLJmqjqEOV22E5CzLlE47oqSzElHdjB3vJNfWhxDr4Po7vA/FGIND9qAuhUQYewDs\nhxSmG0JsSG0DgHa5DTXp9AA4IrdxERLhnOL6toEbA+snr6ndD4lgA8Ycc1PV46xwlrWmFmXm1C1r\nHGuzFYguICCghs1mw89//nM899xz8Hq9WL16NdxuN77//e9behxBPAXyDiLH0zw8HqC7Oz2lM1l+\noa8xDKwZxUJRBHUlJYbq/tUUFcWFt6aLrpYWdPh8aOdKojzZ2opAfT1euOMOzT7d8YMfYGJ+HrVF\nRXjq9tt1Q3m3f9WNse+0Ajta0bbchw6fD5tXrIBXrgnK9mHlQwDgCoDvHDkCAEn7wZCIkGudB71z\nozUXfM3QI3fdZbk5DysvYzXpTAfZLqeQye+oTN+aHx8YAPbuRdn+/VgRicAN4B3umA3y35WQSJ+6\nFuUE9/4GROtjtgGoQAXKsQyXIT0QvyRv9yFIxI+hDMAnIKmg1QB2Ikp8AWAZAB8kFbQBwLS8vBjA\ny5C0sncBPItoWDNfp5PPV2UE+3qNsfghke4Ncv/fQmoZklbkHQ4Ovqz78MSaMNf0YE3ZFpFvmE2I\neykBNdatW4fBwUGEQiGcO3cOu3btQg33MN0KZKWcioCAQP4jmTpZXhhdb7TY/MnJSZyLRNB7+jSC\ng4OWhNp63O64EFaWT8iDD2f90+Qk3pBdaR8aGlK2Ve83PCyHE8ONVbcUYWVjGMcuXIgxu+lubcXb\n99wT40zL5otXLFkY7fahIQyHwzg+MQFfWRnKnU78uKkJDw0NaYbUJlJmSx0OhCIRhCMR6Vgac8FK\nw+iF65pxzs0XsHIKiVx28wVdsDZYUh266wuHMTI6ikkAhYODOCxf/3UAPgBJiWTOtT5VO92IEs9K\nAI8C6EA0ePJWSOqkGhcADEIiquchKZuD8ra98n6MpJZCIpf3c+0yPA/gZkQDNIMAxgDcB+B38t88\nGJllobor5DGVy+Ngob4j8v8sjzUR6TcSCp2Kt6zdLn0OtR6esBxSqe1gimGuZnoZv46R6/TAFFkB\nAYGlCpHjKSAgYAjJ8gvN5h8CyUtqWA1Gqo5PTmIiEsGEnKdZW1SE0ZmZpP3gc2Xd/8deHB6P5k/y\n+wYHBvBWKITjExP4jRxmy6DOGx2bnjZczgXQnudwJILVu3ZhXA6zTSdfVi/vNhiUiHfnNT18AAAg\nAElEQVRxsRS6nYk8YYHsw4/YrLpL3GfSs3kzet1updDCTZDCYQGJUD4D7ZxQJ4A/QCJxrFhDKaLk\nkUcxJCXxXwE8BmBOXs7yM9cCKEFsvqcbUiQBr4Kytj4CiRz75HZZn1i+NSAppXcPBLEsPIw5RzEe\na+nCpOqBRK081gpIYbyNkNTSh5CY9PuRPEvRyDZqJCpRlJkc0kS9TLRucRBEEMMYRjGK0YUu4Wor\nkJPI5RxPMxA5ngICArow42Cqub/sVnvfZ93Y0ajvQpqKS6le2ZNU+5oMLCR1ZGpKIZ2VLhd+09GR\nsLSHUo7lr/aj/d4IDhyIKrxrq6riSrQMh8M4fPYsRmdm8NDQUExbasWSvTdSzgXQnmeP242PyOEw\n6ebL6inbTO3NpDPyUkC++XKqQ3f5z+STbjcCkJROnnQCURKnzgmtBHAXJEWyDcCPIRFFLdIJSGZC\nHwOwG1HSCURNgV5HfGhWBPGks0DuYz8khfIw16dSxN7stAKoCw9jzWg/PjzSg8/JoavMnKgBkqIb\nAHAU0eBPH5JnSBoJhU4lXNrt9qC1tRtDQ9vjcj2tCXM108vcy8XMHedgAQGBRBDEUyDvIPISzMFM\n7UfN/TNIONQkKt2+JgMjVRUyybMDcNvtuOMHP8CluTnd/Vi/ekdH4PrfBuHxRG/QD23ZgmdUNTr/\nINf1rHA68fC6dTFtsf0+UFmJjuefxzwRfKWluMnjicsxNQOj5WkSkfvgwIBmrisgKZ2ApPbuyI17\nzZzEMID+vr6crJSoJsUsJ7MWkprnAbDd7cZYayvuk889MwziSacTwDhiS600QHK//QCkkFeWC7kG\nElHUw4Lc9kSC9YMAEj5Ch6SAHtFYboNEehmRXQvgZwA+Luf9vudtxM+bdsDBbbMSUZJphGzyMJKl\nmEomI/vd0zLKykztykS9zJ1cTJbf+ibeBKBtbpQLObC5CHEvJbAYEMRTQGCJI13n2GwSjuOTkm+l\nFmEzAz1yxUjf0a1b4XW7pZvemRm8EQolJLtac5iINEcWpFvYi/PzcYon2+/k5KREZk+dwtT8PIbO\nndNUSOPGJivQbW1AmLuH8rjdWFlaisNjYwnHwvdz9a5dMXOkp9QGg8DEBFBbCzz1lAiz5aG+1qys\nn2n1LbLaEXcYkjI4CimEVGsbIKpvARLN8CBaQ9MFYBWAU4iWUuHDW62IW7iCqMKqhwpVP/n+AlF3\n3EOQjIZ+2NKFN+oD+H83H8B5t0dx6m2U2/IjtXNgxDc2HW/Z7BllJepl7tR+ZErnOMZRhzpN1Tdf\n1VBBmAWWIgTxFMg7+BOUoRCIhzqc1fT+cimWD/z3AXQMZC4MFgB8JSUAtAmbGTz32yi5+uLB2HIk\n3a2t8JWVKaGpFU4nsGZNQmJuZA55ctpQXa38rdcmv/1arzfp9gyJFGi+zaKnmzQJKm9ENB6JxJDU\nRGG2hw8Do6PAQw9BgINape8CEPD7U9aCjJZLSQVqUqx+H0S0vEgVgAH5/2lI5kLV8rbjsvMt9u/H\n85EILkAy7lFXtk01k6k0hX0uIlpKhUcIQCEkYjwA4IOQwnp73R78sLVbye30QCKmByApvJk6B6mC\n/e61tHShvj6AzZsP5L1RFo9USRZfxuV1vK6p+lpT6iX9vppFpgmzuJcSWAwIcyEBgTxHtkxf9Exn\nrIQRsyEjrqtV/7wfoetGgPe8aD+5Gc926ZshPbxuna6DrBnwpj8Akhotmd2egTc4Utdl5dvs2OiW\nHXilBwfd3bHbhCIR9J46FTPXauMiNtdvHnVg/HgRSq+fxMdudeDJjUvD7dYKWG2Qxcx4mKGPlR9n\nFl7LzHHY+yJIZIs3CHIglkjy5jzYu1d6CgEA9fWAxd8FyxDvQJsMMf0zCVYahrn0ZvIcLDYGBoII\nh4fhcBSjpaUrZ8irH37FmTeAgGGH3DDCCCKIHdihG2psZJts9NUsMmMaJbCYEOZCgngK5CH6+vrE\nkzoOfj80CYbVyIYDrRFnXCME+Pb2CHqvGcTaN5twaJ87KRnPp2sqHJYeNuzYkfghQyKCCsTPtRah\n5+caBCXRrsPniyvTcrVC65pN53pSk0OjYKGzxyGZ9MwDuA3AckikMlHpDj/iS5PwKIAU7qpg/35g\nZATweoHNmwELvwtskGp4HtZZH9eXNFEHycCInxc1ITdT9sQKaBUyseo7au9eP0ZHpbNdXx9Aa+vi\nO9IC2SFZVjnfZosQahFmK9178+l3b6lAEE8RaisgkPfIVg5muiG7RmDEGddIzuqTj7kRCLcaIp35\nBo9HeriQbFwsRFqLdALGjJ3YXAOIcXex+mcz027GmUQqbs4J20Nq2XMsRHcEkloYglQDczeiYaMP\naOzHh9d+GJIDrRpxRK+lRVI6LSadgHRt6ZFOzb4YAH+jUw5JUQWksU4AWA2JYDKwc7BYIbeZDLfO\nXo6oOWTGmTcWRkJXjYTRZqOvgLZplJnwW5EjKpCLEIqngECew6gClktIJzw4lXqhAsmhpWiHIxHc\n1N2N0ZkZlDmdmJyfx9qqKhzassXSuc9GGPdSBVPH3oTkNFuOqENsFaTcR+bWympv8vAjqnZ2ILY0\nihFUAbhgttMyqgGcT3HfVOCEVOrlT5CU4SkAk/K6Onk5j8UKuc3kcRPVAzUKI6pbLtbVNKJUZiuM\nNlWYUVtzfSxXI3Jd8Xz77bfx5S9/Ga+++ipqamrw8MMPo6OjI247EWorICCQV8hUeLCR/M+rCWbm\nQ4/QW50Lq4VMhnFrhS0m3SdPrqMgJGXuovy+DsCvAfwtJOVwHFH10APgPcSPX01yApCUUjuihFUP\ntZAUSLP5mJmGOj+VwQ7JuIjNlwtSWHIxgLcQzfFk14wTQAmAnchunmeq4dbZghFCs5ikR4/0Gsn1\nzPW8SjP5qrk+lqsRuUw8L1++jA984AN48MEH8bWvfQ19fX3YsmULjhw5gtWrV8dsK4inwFWFxcxL\nyJcb0lSQ7tjM7J8s/zBVpKKcBYPAyy/3YflyvyXmTGbmIdG26ZyP4MAAnjt5EudmZhTykMtKohbp\nteqz5kdU0QsAhm5/01Vgs/Ud5Ud0bJUA3kUsUWGkUm2ew4PPZ/wVogpkCRKXErHJLyvzLdOBHUAZ\nJDJ5AMBnEBs+q0YFgF8AuBcSWefnxg/9a4aRmuP4B/hwO8rhQBekEi1mH3CYQS7l4xkhNItJetIh\nvVYbES0G2DXqhBMlKMFO7NQcSy5dU1cLcpl4vvHGG/j4xz+OyclJZdnGjRuxbt06fOtb34rZVuR4\nCghkCVp5cEsF/Nhu/f6gZikOo/snm5tk+YepIpWapcPDwNGj2uVJUsFzJ08q83DL008nzF1MNGda\n64zmQg6HwxjlSGely5VSDddsQStP0qrPWip1NdOtfZstsLExYqn+KHVBIk7vAvhXaNem5PMZRyGZ\nEs0jef1KQmZIZ8I7FhXs3N8FkPo8BuD/AnB9kvYvAvh3AJsAfAxSyPB1ADYA+I28TTmAh1X7sxy7\nERThMBwxNVHVeZmsJusKud1M1GZdDBjJccxkHuTAQBB79/qxf38bIpH4GU2nfIpWXmW+gV2jveiF\nCy7dsXwP3xM5oDmGdP0OrPZLuHLlCt5444202+HhSL6JgEBuIZNP6JLlHubLDalR8ON1/lV0bO4n\nm5RQ2GDQWCismblhBjnpgil7kYUF3Ob14ifNzYbCQXk1zVnRAsBvmTlTZCEaoDg1P68oZ8HBwTjl\nLNGcaa1jZEyvPfW+AOBxuXDkrrvyTp3XmxuzSmgXzIctdrW0pJVHbPV3lDpcmKlrTki1J3dCe2yM\nVAJRYgQAtwJYKbdXA4l0vmlpj1PH5weCWBYexpyjGP+zpQszCfIQ+VBgngTbECXlgERK/wySey1T\ndB2QSOXHIBFuQMptPc3tNyGvfxvR+WWkphxOTCD6QOM+eT3/gIOf8xH5fxYebRaZ+N1LJQwdiJKz\ndLdJFeHwsOLMOzgYjHPm7UJXTqmWeqG/mcqDNUq8L/kvKcpwEEGRA5oDMPobn4n916xZg2XLluHh\nhx/G3/7t3+LQoUMYGBjAJz/5SVN9SAaheAoIcBgelnIP9dQvI86uwSBMqYVmt7cS/HhLdkXHVu6U\nxmaUjAWDwMT3W1B7qh5Pbcic660aTNkLzc2h9/RpPDQ0ZMhhlFfTSv9y0FL19baaGgBAQ3U1Gqqr\nAeiT8a6WFqwqK4Pbbsd9Bw/GPKFUX2vBIHDsdxIZa6hMTO67WlrQ7vOhw+fDe/feC19ZWfoDyzL0\nPmtmldBUXGKtdqpNB0FIxJJ3pmWEphdSaKne2Jji1gbgD/Iy5urK2ntc/nscUrhtJcypjkk7zzpg\n8LttWXgYa0b78eGRHnxhsNPwodgcVAM4B0m1vQ4SOW+CZKr0UW77ywAeAsBrAuxxTTm3bBSxzrJM\nyTuGDyGAqPkPU5d5MyBGfp1cu2oFdTFh1j03V1xSkznz5ppqqedEa8ah1gyMqs3pKMMCmUG64kY6\n+zudTjz77LPYt28frr32Wnz/+9/H3Xffjbq6OtP9SAgiyuhLOoSAgHU4dOhQxtretIkIIGpsJAqF\nUmujuVlqAyAKBKzf3gqsWUNUUUHkdMaOt7NT6k9rK1FHh/E5WIwxEBFt2reP8MgjhEceobVPPkmh\n2VlT+zU+/TSFZmctvaZCs7MUOHCAQrOzMX/roXnPHmUMgQMH9LdrJkLRLKHzALXfa2yci4nO/n5q\n3rOHNu3bZ/i8GIH63OUirLyemin2R7WDiDbJfzcSEfuIdsrbbuKW8fs6ub9dlPzHu8DANklfzdHv\nBQSM7fOVfZvokUdA//vTjVQ0GzJ8LJc8xnKd9W3yvNSq5q6Vm5/b5PVHNbZLBSEiChDROq4fqX49\nGr2mtK4DPWhdR4nQTM0E+V8g5ZGkhk7qpFqqpUqqpE2zzbTvQAfNzsb3upM6qZmaaRNtolDKZ858\n3xIdcxNtIhCokRpj1ustzxaeO/QcBSiwKMe+WpGMExm5Z8jk/mp8/OMfpx07dsQt1xuHvDwxL0y2\nQbovQTwFzIARn02b9ElPJolnKCQRp1RJJ5F58mp2eyNzlAwVFdEbwsLCaDupEkgrCHsqCM3OUscv\nf0ntv/xl0i9angidmJiI+XLe/G//lhGSpHX8uieeoPXPPKMcyyiRWqw5ThXJCHWqxNTqH9ZMwMrv\nKEYOQEQfJokgMELDXwbN3HYBjX3NEMsGIjpBREXcstVEVJVgH82XfM2ikQghY/sUzYao80DAFOks\n0VhWqnrvlOfjBDd3nUS0jOIJK1uvnmMz6O/vpD17mmnfvk30GXks6ZBYo9dUMxknuUbG2N/ZSXua\nm2nfpk30mVBr1oiSmszxpDcR8V0McpzsmCEKaRI8veXZQibvpQS0keuc6NixYzQzM0NTU1P08MMP\nU319Pc3NzcVtlw7xFK62AjmFTJXZyCbM1tU0u70Vc1RTA4yPS7mdb70F+GRLx1TdZtOtJZpOXU+j\nSORUmo06kvwxGAL19djR1GQonzDf6rUmK5EiancaQxjAFyGZ+eyEflitVu3HMICbIIWLNgA4Bcl8\npxGSMc+QTltVANZBqs/JtukA8AqiuYqGO5+huiCrAcwCmIY0N6yWaDGkkik3IZpfyWMVovmtE4iW\nm2EwUzszUY7k3r1+JQ+xrj6Ana3dWSmPYnUN0L1+P0blH5y6QAd2djtjciczlaeodqa9hEvoQQ8A\noAENeAEv5IybrihbImAUuexqCwDbt2/HT37yE8zPz+PP/uzP8MMf/hD19fVx26XjaivMhQRyCsVy\nUoxVRi9WwwhBSmaco9WGGfJoxRy98gqwYQPw619HSScg9UeL3AwEgwgPD8NRXIyWri645ZV682GW\nSLJcU7ZvJh44mDXyydTxK5xOXJyfV47F8gmTwSpDJjNIp6RJMoOepWbUlQqMmLt4IOUnJgMzUSqC\nRBKZcdD75PXPQCohwnggb4YzD+Ao19YFSOSlVn7vALBvIIgr4WHAUQy0dAEq058KROtjxnQ+Q9fs\nJIA1iCeX0/LrEwC8kHJXSwFcgjRWN7dPLbffhwHUQyL3Rkuj6Bk2dSE2D7GlaQfazA8xJfOfVMy0\nEsEh/+B4GxvRsuNRtKlaZXmKUn9TM6jRIq9a+YcP4AHYYMOjeDSG3PH7/xg/xkN4KGvGQgMDQXwp\nPIENjlp8qeUpeFSfi1SJeaL9MkX2BQS++93v4rvf/W5mD5JMEk33hRyXlQVyC0ZCXQ3nuqQZkqre\nv7MzNkQ11VxGo+Gsev1PNkdWhOKqsae5mR4B6BGADnCd5seyalX0uOvXm5unbISRbnvhBfLu3Emt\nv/hFXJjmc88/n/HwTRYiqg7zzWUYzT9NBfkQMpsqMhEWaRR8m16N9jtJCqG1ycvXUzTPkX9VkhSW\nWsOW7WkmPALpdSCQ2RsHAy87Nwb1y8uNq4Niw2v5vMYT8vp2Sh62rEanPEcgKTR5vWqf2dkQHTgQ\n0MxD5NtoJv18TL4fzYsUFjkbCtGBQIBmdb6YrchT1ApVNROGyu9fS7VZDV3ds6eZHnkE9MgjoP9x\nYFVcrmeyMFy9/NBE+1kVTixCbbOPpcKJ9MYBA6G2QvEUyClYqeqkq6Kp9x8bAy7Kj/QrK1NXG5Mp\nlkwtPHYMCIXi+59sjsyM26iixT/1buI6zY/F7Y4et7Y28RjV0FNarcTJyUmMRyLoPXUqzma81OVC\nd4YLafPKZr6ElWZSlTSq9OYjvnf0KL45MZH0c5WsxqhRxYvfjjmoNsrb96rafwLADLfvYQBc0IOC\nmyGpmI2Q1E/ICh68jYCGk2i2saCxzAWgFZLyykJonQBehhRiex+AH0Nys2WKoJaabKT26zCk8iuA\npHTOqfZxuz1xZT602mCKabTMSvRsFmMPACcaAfx9wpYyB7fHg9YEPyJWlC7RUjfNlGMpVs4YMIpR\n3IpbsRIrs6II8sr2k03uOPVXzzmWqZYv4SXMyVfPA3gAz+LZmDFpOc7mixutUGYFNJGMmab7whJh\n9wL5h3RVNPX+7H1lJdGJE6n3S0ux5FVKXi1Mpf9mxm1U0dJ76s2PhT/uiRPpmzRZjXxwQ801LGVV\nMpMw+rlKZu7STMYUUX67Dq5NdfudFP8jXUX6TrBs33YiqpwNSUqnCdMfvZfX5PZOInJTcqfdFfJc\nuIkI/Z2SSrtvU0yfjehDRkx31I6wqZgR8W1sI6Z+vkQhqiAiUIjuT8vgKF9gVN3UUgc7qZPW03py\nkUtRXtfTeksUQSPglW0t9VdvbGqzJBCogzqU9Wy/bbQtbsyLbUpkFEaU2cVwIV5MLBVOpDcOCFdb\ngaWIRKGk/Lp0yY+aIOqFuFoR2sqHrNbWSv83NBC1t5tv04wzr5VkzApH4EziaiNRmQi5FjAGs58r\nPYdfo+Uu1NutIaIKkgge/4ysmWJ/oGsonnR6KEoO11M0DJQd40OUfqmVExRbYiTRS8uxFiSF2fLr\nKik23JUPDXbIocGJ5tFMGRKi9F1v1W00c30P0C6d3prtZXaRaRKhRWT4ZXVURyHSJoCZ6iPf3gk6\noUsI1cdlfSyjMgKBGqhBcz9+fF7y5hVBMxKGfbWR06XCiQTxFFhSSHbDfMsth3TzB9X5k9m4+bai\nhuViqYWMjG37q1nNedKbv6VGapZirsti1Va1ApmqAZotJMoZ1qy3qaOQbiOJALZSYpqhJkGSXia9\n3BRV09RKo149z+UUS+K88rIquS/JSKNe7iXkNowS1zqK5p8yglxBUk1Ovn9OksgsI8d2IknpfARU\n8XQjHZ0NJSWJzVx7i/FxiT48mKcQ3U/q3krfUc20uL1MjEyVMmHEw0veOCJjRmXMRB+NtqfejvUx\nEVnlx1dKpaYJWjLCls7vnhEyaESZtYqc5guWCicSxFNgSSHZDfNHP3pIN5RUHWaq15aVxKmuTmq/\noiL1ENxU1EIrSaHePJldnq/IBvHMNplKJ9Q8o301INpk0tQoG0h0PTVTPHXQU0i1ttWCekrVBJN/\nz0hhMRHVkvYPdztFiZC6HiYS7Gflq5QkMrmN/oZq6AVqpm3UQRHlkmH9YyZIJI9dMUOaDdHyAwEK\nceY+iS49I+pyJvXGZAqqdE0Z1cAlZFspStVoKFk/tVRNBrNhp6yPXvLSelqf9twYHXOqc8PG10rJ\na6iqCRr/3k1uqqRKaqVWZX/+e8rstVJLtUrbfIiwWVhFTvMFS4UTCeIpsGTQ2SnlULJQU60b5lBI\nclBdvz654yu7+fZ6Y7dXh7amQz7NOrjyY02H/FpJCvVIitnlVoLNT12d9rnOZWidW6vJVDJyyH8W\nzBLJjBK/ZkrKpqwKAefHrafqZxta1EEvDNwozeCJYDtJRMxNRI90Er3STHRwE1FFSFILB0lSEk+Q\nKjRVfn2AYnMW1Y635RR1ia3Q2N/Kl5eIKuklAlUoN9OM/LUSkY+IlpFEPpkqzBNmtVLczLXNu/yy\n9tqTzLPW/gzZCYI1F+CbbaUo1dxDvX4mUjrT7WO6eaCsb63USh3Uodsvre06qZNqqTaOCKr30cvr\n1COJalLN5o1XS/XGq3UOEpHRSqpUtm+ndtPzZwb5ktNqBJDKDi+Jl974SBBPgXwCT5raE3yXGSVX\n7OZbTQ4ZcbJCtUuVhKWrGlpJCvUUV7PL04GarPHzk+ghQS6G/WqdW6vNjcyQQ7NEku/rthdesEb9\nZHfmTH5LwKasysflx13zjQM5odKboQ5Gt2VlPUCkaA8hInqjObpiVyCeMG2i+B/t5Rp94NcvI+lU\nGlU97RQlgmZuHopj3u9SSAc3pDhll82VVhkZfrz8pdess60WEj0I4NtZRbmRiWm1UpQpBVWvn4mU\nTjN91iJ56c5NqiG26mVa+ydrW289I2jLaJmyfjktV9RSEGgtrY0ZbyJyn6gfrM0SKtEkzwJXJwTx\nFMg7GCFNhw4dMk2u1NuHQlETn3RVu1RJGOtTaSlRa6t1JkK5bvKjBzVZY/NTXp74IYEVYb9Wh9pq\nXZ9Wmht19vdT5aOPEh55hNY++WTSNs2SXr6vlqmfzRT9ZaijrNyR8+Nu/cxsxlV6hmznDC8naVqd\nJOVfKoRHZkpHGiXFk6mVRBJ5XE8SkXRSlOydoHj1jpFHkJRf2UzxP/YOjWXLKaqOnlCts9EsNdM2\naqMILdPYl4XMNtA8tdP9tI1m455b8GosPzY9gqhF5M0Er4ZIIpW86ZJWO+qanlYglWvKaqUoEwoq\nc6WtpVo6EWOFZX2NUL7f6c5Nsr5pETrmUMuW8USQJ/VaYbXJ1vNQq5HbaBtVUzUto2Uxc3zo0KGE\n5D7RGEMUihnHKoqvYZoqlpKZ0NUGQTwF8g4x4YE6StahQ4dMkyut7a3Mq0wFoZAUAsyTJr79bdsy\nq+Rl+lhac5Vo/rQeDgQCUt5soocEVoT9Wk0UMkn+OzuJKr4VJYMdv/xl8v6kQXotU2rNpafpwkzY\nMD/ubD6QySTx1Arp1AqZDRApTGtjKJ4INXPbekgKzT2qsS7AvS8hiey1EsWUKymYDSnmP4ykrqX4\n0xxLTs8RaB+10/0UIkmpdXLr2yiWJPJ9Ys8tQnK/2XIWJJOuqpzoxpfvRy23H9+O3qXe2d9JzXua\nadO+TTG5p0aQCwZomVBQK+Qwai0yawVxZn0G6TvHpoJkfdMidPyy5bSc2qldU11cSSuphmpilER+\nfTu1Jzw2I6aM1Oo9MDh06FBScmnE+MjqEjZLyUzoaoMgngJ5DSuULKthdZ/UxkR8+2pSajX4Yzkc\n1h9La64SzV8iYpDquqWI5mYifEUig5XfzXxNUsuUWjNsIAHy3XgoHXRSbF6lYk4kv2fr1IRHTYQ6\nKRqey5ckYURKnSd5gmKdaJ1EMeVKquRyJSBJqewg7dMcDc+9rGzfQRFlPVMwtUirHpnTCjNOF4lu\nfNXhyTz5ZNC71Gu5Oes4kCM/aiaQSQW1kipTbncNraEKqiA3uWkdrYvLjWyn9pg8zGwoalqETs/Y\nqJM6FZWygRpiSFwN1VAd1ZGHPIbIs5aCbIRcbqNtVERFZCc7VVN1nPqsFbLMXw9WPpRYSmZCVxsE\n8RTIa2TDwMYsrO6TXu5pY6MUfssfy+pcRj7Ul/WhslK77TVrJHLs9Rp37tWaq1w8p/mGTZuIUDRL\nldsP0Imz+VdqJF1YnSubVZhwoNEsu0LRH9dKbjkLAV1HEhFSf0TVRIjPz1TnSbL9a7hlAYoNtwWR\nUq6k8ulGWj4bilmnNu5hY1Arsw00HzMNfD/V++qRueX0JoGICmiKmmnO8G2qun3+fSttTXCjHp/f\napRCVspzhqcbqd2k4rkUwQhGJVXS5+hzKZNBXjXlCZtWW2qVdRWtSmj0YwTJzIDYNowQrqN1MQ82\n1GqmVgkVfr3WMdl7PszWTFixOiS5juoSrs+EOp2JtgSyC0E8BfIaekqWkZAjK0iaVhtWq2t64aXq\nv4ni1dB0CShzB/Z4KEZ15cHmgFdE6+o0m9Ns34rwZiuRKHx7sZAwdFSDfSz2HC42rMyVzRQObT6k\nTTCbyTBb0dpUq4SI2aY7KT5nU4tIsWN5SSKMcbU5Z0PkPBCgdbMh8nDLeUKs7lOd/HeZfNxEl7DR\n8Syj/QSKxGxrhN+r2+ffd1Ak4Y0vTz55FTnZMVtnQ4QDAVo7a/6WOhdCba0AT5j4GpbphFeyXMMC\nKogjbImMeyqpMkZdNHJsLZKpV1qE35Y/Dtte7T7LHnSoS6gwoszWa4Uoq4lhIzXSalpNFVRBXvIq\nCibfp+cOPaf0lQ9JLqIi3XxbPoTX7DwZhcjxzF8I4imwqMiE22hnJ9EttxxK2GZnp0Si1CGd6v4k\n6x/LKwSIOkzGcBkdu1ESwZeZKSmJH1uq4Mms1hj59QBRcXHqtUpzAXqhvmkV0k6z5mXC0NFmMi+r\n5BhSmZ9s1zxNB1p9PXTLIe3zZiLPVbPsCulHKxttupmiXfNQVE1UEyl2LK380ZGHiMoAACAASURB\nVOUk5VOq19nl9tnx1X0yYrpjwvyYiIgq6Sg3ngUKkf7HJlbVjG3fbAqy+lzoHTPRPmawqA/HEhAB\nsyRBTTD1XFXNtHuCTlAd1dFROhpD2LQUa15lPUEnEuaA8n1gxkBaiqLazIft5yKXsryQChUSyfrJ\nk1E3uWPIHq+Qsu218j0d5IgZRwM1KLmjPDllCmYM8T4UDW8OUYjaqI2W0/I40snWd1AHraSVhuqf\npvMgQeR45i8E8RRYVGQiR9NIm+rcRUaU1PtqtcUTRp68Jirtkmo/9aBZA5Jrb9kySilcVasupjqc\nVw2myN58M9Hy5eZIZy6WOclEqG+6OYcJQ0ctMuRZTKQyP2b3yU4NRW1o9tWMraoOkm1qNBRVDS3V\nlDncaoXpMpWSKZ5a7rFriaiaoj/8Xnk/deivVq4pPwaiWALnovhanGq00pxCOvWOw8Aru2rzonRI\nYaJj5iPU5yURETBLEtT5e1omPKm0yyNRqCaf09hMUn3NNmrTrMXJ96GGapS/Wf9ZG1VURUwJ3Ebb\nNEN/WY4mPx6e9IKiYb8ucpGNbMpyJzljyGAxFcfsx8ZaSqVUTuVKrquTnAQCFVOxEsrMO9GCQD7y\npfXggEj74YNenqaRBwp8qLEo1ZJfEMRTwDDSJQla+6dyk5+sH0ba1KvRqS5fokW6tAje2rXax0rk\nCsv306xjrBZp5dv73OeIamrMl2BRq5eMUCdSXNMJ68zEg4d0kYkw1XRzDhOGjqZ7N5wDSGV+zO7T\nTNEfHe1LLQE1TZO1avY1C+etmZKNWRtaXUvUlpbi2UHxZJU3JFJvz9pMpBJqGRsZGZ/WeEJEVEpn\nqZyOkZdephMUJqJYIyKTzxKTIh8/qnqXfjPFzn0isxezRjBqUqi3v5F2UwnJTJQLqQbfB94p1kc+\nWk/rY9oopuK4ZexfBVVoqrAhCilht2pnWPU/plh2UqcSUsz+HZX9qBnR5P8xJZUnjDypraZq5W+9\nkijJSrlokVE98q+neKvzY3mCLFTP/IEgngKGkS5J0NrfTBgpI2Zqsx01QiGi5uZDScNXtcpvhEJE\nbne0/ba2+NItzEm2sVFS99T95/vKiClAVF0d22+WP7l+fTRE1ujcataA5OaSn+tVq4yTWtYuU3L1\nyLtVSmU+GQmlE8aWDzmHhpEB6TCV+TG7T3K1qZl0aUyCVUawbXaWag4coNbZWeXY/PVk1ZSq27FS\nYePb2qY6DlM8+Vc7xU8bI16tqm35nE+94yZqJ9XxVdARpd06OkxEiV1zGbKlnps9jjWhtrFHbSbt\nS199bRlREFNVpfT2N2uIo2cmxKBFOJMRW74PvFKqVjS1SCMjdw5y0FE6SttoG3nJG6fgrabV5CAH\nVVN1TK6o+l8zNccpxOyfi1xxKij710ZtRBRLolkb7zv0vhgiqVcShe9XG7XFnRczDx8SKd78MYWz\nbX5CEE8Bw0iXJKSzP0+kEtVrZND7AeYJkxZpJIolgcuWRUknH1ZbV6d/bC3VUC/8Vb0tv05PLd22\nTSKrtbXaYa18rmdDQzxRT0Qa+bqYeg8E9PJj9eY5lfzVXAzBXSrGHWmjmdIiYYuF5GpTApqWJoNr\npvgp468nrfWpQN1OojEnIzVsfR1JqmUrSWQypHEcXvF8pJPot81Ec5uItoa0yeoJig1p1TJCYghR\nfG4pwzaSnHWThdrqwUsvE4iomN5QFE8jqmQzZecjYPY41nxHxR7VgojwjCKZoqnl/qqnjqkJG58L\naaYP6vzKNmqjEIWojuoIBCqjMmqjthjn2lW0Ks4MiLVrJ7uynFcs+fxQfj92HPZPrX6q/7Gc0/W0\nngqpkNbROmqlVmqndnru0HMxhFhLzeykTnKQI649fk7MPHwwqngLZ9v8hCCeAoaRbghiOvvzpJWR\nIrPhqUTGVFsWXquX66lXTkTdV74EidNJtG5dPFlk27rdRHa7pIqy9bxxEa+WJqvdyfe1vT2e8GvN\ngVZupxFizeZCTRQzoY4L5AgskNE6+zupeU8zbdq3iUI5UzIiwa10mnfZyaYs1SlNR+FspsSkhl+v\n3k59HPa+gYhe53aMBKLTpj5eiIgc3LI6jfEw6E1/sjEkwwkKUx0dVkinUWjNs7UqqNTaJpkYZzcn\nNHZ0uUIw9ZAsz1Pt/qquj8mDN99hobJGQnTVfVDnZTrIQV7y0m10WwyBZCGsaiXRTnZqpVbNsFpG\nQhuoQXH8VZNBfj8b2RQFlyew7F8Jlegei82nlprJclfVbrwVVJFQpUwFIQrRKlpF62k91VGd4fMi\nkJsQxFPAFLKlRKmJUGur5Kj6/7P39tFtnfed55cEQIgvIgG+GaYp03QiK87YLhmxcRLGBVpT9ZB2\nQ9QTbhRvDtOzO+DO+GS3ezqxN+2cnHZ3JzOd05w5090507VmWuXNTCNbtWVFVhwqAWlVSezaieg0\nTc02Cd3IDi1LASVLFqm33/7x4Ln3dx889w24AEHpfnFwSAD3Pm/3Eryf+3vjffqBE9VNtrvbHrCm\npwUoSoshj8eMREQ7dq61vMRJX5/YJxol2rlTP1a5bXu7+bksRcItrxxg5batraIPdR5yrHKOY2MC\nQCWoc1dhO8ur05rK9pNJfVKmwUFz7F5iX8uN0w21QarkSrR4dZ7+XFrUKnwMNDV37d9ZcFsyr0u6\ng4g6SCTmWSZ/Fk5VXmG4XbOd2o/ltabhHJmxk8PFt3NkgmezzXwqnYNfGMxRjlL0DCVpkcYc6n3q\n1pmPfZD+xndcoVWitQJ10BQdq/Hldb2jplVO7pa6six2rqJEVgsaB6cttMUWdnKUM8Cui7polEap\nj/oMCyCPlYxTvATu+qiPClSgVmot+ayXeg04VD/roi4jk67MbCuTC6ngK/tZpMWShETy92ZqJgnJ\ncj0lXPJ9pFsuXx8JprzWqpxrO7VrM+C6ycmKHBTQhtoYheAZypdqZYnSgZBal9IJTqTLkQQcDnES\nZnXzUN1IJyfFe6OjJoyqlkL+Pi83wvuQ2wwNEW3fLl5HoybEShjkpUhUy6vbU42b5fGl2ax1TVVX\nYTXZkpNVV2e55seCz7urSw+XbueRrg+nRE1uCuKGSehqWyrfJU3SRASi8U+NEx4DjewfKbF4Vlom\npT6tqaUq53zqIPMfZz85g5cbdLnhxTQJwE2TSBTk2bKnaTjNxj2peW+i+F6l5UqsylGaxXB6+Xcl\nLm7zYp+FHHX7OI94wqMOepFQdJss7+K4PBt4kN9R1aiTWI02ndwtdfGdTmVUuHTwJsFMxmCqtTJ5\nEh75kG6ujdRIR+loSYxmL/XSNE1r3WFvpBstLqx8DLo+4xSnJmqidmov2UeCqlyTIRqiPuoz4JBb\nY2Xm4DSlCXlrO5PFv2AO/Ha1Vvm4kpT0lH3WLrGT7E/OLcxmu3kVgmcoX1KtadWyfKpJbrjbqkyW\no7OCSt1/f74EODmkcndYbjXk0NTQIPrnYOlmKZQxoXwO0aj5+cSEFWwleO3eTdTUZLWmTk+XwmUk\nYl0Xaf3UwTefu87lVo13la693JLpVRwUda7GKlw6jcWLi29PjzO4Ou1b7g2TEDxL5VbSpAQii9fT\nhQ8VaOrQlPaCvtLSM+kD6U1hTS3nfOomAUQNB9KUPjROy2sFW/BKk/lPtpv8u4Dy/acc3ucgqiYd\nktJhlO69YG1taRqnQ8U+/sGjFXicUNyn9cCvOZ5HulI1PcQvbv6ygqQn/lciRzn6lfyvBAZ1Xlwl\n/VqUy3W/LBdYdfGdWcqWuIryWEVuIdVlgOXj5/ORYMXhaIRGLEAnrZLSkikfavkS+eD9N1ADdVAH\nbaEtFkiVbekAVffopE5KUYp2027LPvL3IRoy1qOf+i3geQfdYXymAr9aa5UDorpuXs8RuYbcqrtI\ni2E2202uEDxDeZIEA+m26ZZZtlKpSW54WRPed0+PsN7dcIMAJlk+RAXCoSErpHIrI39K+JKApz77\n+pwthdzKJ8eeSJifxWJWILzrLrJk2AWIBgZKrbT8GY8TLS7qkwBxF2UJpXfcYXUB1kFzLCZeT06W\ntuX35oLsx67+p7Qg83hXL2DIYdWttqjTvqHrbnByK2lSApEerqcrLT0zfsjemrrZtUxETQyse+am\ntBf93LW1lfQA6SY7m5v6fpq1H7PpS3fYvaCVDmy8w8540VX1m1TwGMNZoAJN0icpS+s05nIepal0\nrmas6yWapE/W1BrjDRQFHHiJk/OSMTRNzueWCozlZiH1A6y8z920m3qox6ih6VbeQ3Ufla9VeBqm\nYct8JHgu0iIN0iDdTXcbkKmrwzlKo0ZioBEasWSb3UpbCSRcX50y2Eq42027tS68bg872OU1O3ny\nI7kOHdRB3dRNy7RsWWvuwtxCLcYa8DWV6+YkHmcrEzvZxdCGVs/NqRA8Q3mSCga1uJC3y0Db3+8M\nh3x8w8MmTKkgpSsdooIuf27daoISB/GJiVKrKAcoO5fZjg4TlDlk8kRCdk/pdjw9LQBOQjd3r5XP\naNRaz1ONd9WNWXfM/Wp6WvTR12e9MaC7aeHlfOLg7DdRVTVqc4Yimv72v6KeL/wBjR38iPbivByI\nrLT0TGGtQFNzemvqtSAJ1m37RwhrBeIX/RLKeC3KXtIDpJq11mtCH/m+tG52s77k064vvwCZZm3K\nOaYWcoQDacKhcZp0PMaV2U/dzqPqW2z9yRsomhfwbiBX6sJaesRM9+K/ozH6qOF+qoMR2ZbqFuvF\nmsnnprNU2s2Rw5WsoamDYNmmCmO91EtZytIyLVOWsrSNtlEXddEYjRlWOL59IzVa3FwlfPI6nFto\ni+Xz7bS95Jg0UAMdpaOONTtBoAQlSkq/RClqicm0e/BtZNKhDuqwwCa3uLZRm8XS2kiNlmRFco7d\n1G1Zg17qpQmaoCxlPQGi7hxRz+0CFaiHelzPYT83Wpz2DxMZBasQPEN5kgoGlV7I+3Wt5ODDwYW7\nmwIi4c7YGNFXv5ovGZ/anlPpkELB6iKrjkNtSyYS4k/pNlsoEDU22kMkB92hIevvW7aY20nQ5i6s\n3OVUQqZTP+rYec1SmUjJ7pjbaccOAdHd3VYXXdXqLJ929VPrHQyr4mpbq4KAVZKbW+umqF/q9RgE\nfKzKPZ8kEI0VoZODT5pKAXCZ3DPCqnDnRXz/ePHnMJklV/hy8XIrU5r97frVwV2SnXPZDXSl3kjI\n1Gmapqkj3+FoAaosTi5N6hET7sXfJh7PqloNdTDsBKc62SX90W3PIcWp/iQvEaJmgVVBTq4Rt0Dq\n4jl1D2nhbKZmCyzJ9VHrfcpHH/UZc0lSsiRuUyYDUh8TNKGN8XR6SIuwtt186fbqGsmkQj3Uo3VP\n5sfJDuacIM8LjOrPWO83Wtz2D116g1MInqE8iYNBEIla/LpW6oCoq0s802lhdeSWwnQ6b2yfywnY\nkVCmZlq1m48EwK1bS8fB4xjHxkSpFCfLJM9qC5ggOjJC9O53C3huahIutHKtp6etUN3dLdyFJeTy\nDLfqUwXdri4zjlXOq61NzHvbNvH52Jg1aY9TLU8utb6pepz4c3jYe7v1pqqAZ5rKu+rfQPG/l7ED\nG+fWWmkSIkNp8nYMvG7nUZWeTzrwkaA2RNaEQDrJbTuoFO68iEOhDm7TZC6Xrg6nl/Q5qnV1nIjS\nRYvv8DXoSl2J0pQ2IMEN4JZpuYw4Of0RUwHALulMyVjJhC83gLCOwhk4OKTw39X9dGNQXWr5Y4AG\ntO9voS22FsYRGimJ53QCOG5BnKAJCxyrbrcS8Pg+7dRO0zTtyeIpH13UZbS1lbZaYlJBKAHPOMUt\na5egBO2m3bYArbrX2sGcX3dqNZOvDlzlMZdj8+viXa5reChnheB5naoSeKzUBZPI2ZrG3Vh1yYMk\nmHHL5+CgADcJXdLaqI4XEJDqZT6yn927hWWRu6uqMaLSisctjq2t5u+qW/CuXWLMo6PWz+Jxsw8e\n98nHp1p8nZ6plFhDvk82ax27zuXW6diq544uIy+RtSxNY6NYQ79Ji+pVgUFPADUx7RTYGBXxv5fJ\nj2+cW2ulSYgMeT0GVTxWfuRkePVjhZPb6qDRi9z6cgPTAhENkrCG2rn7SqXJvGCYvMZdqcuV34tk\n/xfV+iOuWqOcsszq+raDU/tRuLevQkgHdVAjNRourHZjkBZS3cMteY+M2ZSPPurTutHaPSZowoCv\nO+nOEjjWWVjjFLdAZoYyWiufXZIk1TUYhBLA1Y1TxEJP0gANUC/1auuDykeEIkZMqLruXiyYO2iH\nJa6UyD0+d4qmLHC6SIu+zjE/51oo/wrB8zpVJfDoN75TB7mFggleKvx6HZtdCQ91X9XyFo1a3ULt\n5qMrxdLTY45XxprGYsKimUoJi2U2K+JKeQIcaTWV26sJhXTjdsvIq85Hvnay0Kpt8EQ9/Kkrp6Jb\nD+mq3N9fCpU6V9tqluCpVY1ZogChp4r+euWO0WuN1f3dRJecaKHKqjQJkSGHY2CB9zfX6sK3Mk3m\nP896NpJ7ObXTVHpBoJtTnTB/XcvvRfJGXlTbZUStVkZeDkZbaIt2DPI9p0y2TnCluuumKKUtkWIH\ngKq1NUYxo80RGjFKnzg9uHuu20Pn+ttIjTRKozRBE7SNtlGCEhSnuJHwCCQAWMZMqvGlTo9+6rdd\nd50FU4pbUmUbOkhV3+MAnaUshaofheB5naqS5EB+4/HsQFJ9X016Yzc2nUVUhbFbbskbbqNjY9ZY\nRvlsahJWOZ5hlccrqu6zlpIun12g9v/zAOFThwjNayVgytu99VZzv8ZGAae5nD45UiRC9OCDYtzS\ngtvWJqy0HNp57Ke0Yk5Oip/Ly6VQrx4zNVEPh+CODr1lUgVJt5I68pg4lXwJUkFY4p3EXSMDg54q\nynGMDmYzt3WUSaOOJ8gXAQVtga1F/GhgNxg08utqKw9ZNxE9liN6KU10kR+/HST8ZruJyqjXbtuf\n032FSsNeJVC2kzNYVvH+zDUlr+dUPSRN8Rvnqe7j1aJaoILFKigtnnaSVs8EJShWfIBEmREeCykB\nUwU3O/AqcWH18YhQRBu32U3dBlRL2L2b7naF50ZqpJ2009aKa4nVzcOSEMnro4EajLE1U7MFKFUr\nppObrXQJb6EWow27mwb8PQ7Fk0b1YH/nUajqKATP61S1TOYiLYPt7VagUeGXX/D299uPTXdhXChY\nYzwTibzFCiohTn3ybLRqoh4VVmXG2JERotH95gUpZuZKwJRbILn7bSoloFOXBddprKmUdT343HTW\nSb5GsZjVTVin6Wmxfr299u6w8ngNDYmSLzy+VNZWVa3adsmbpIK0UlY70zK/qNsMSXMcx5gmW2h0\nW0d5bh2S+3s0Q1UT4qqlat5g8AueaTIP2Xf4ix4SBNfO3uvXNlF2f3Yo4GUbJ1Xq7hvKKq/nlO5C\nv9YX417jPMsBVBVCOPQN0mDJPHn2U1kGhGd3lRlxJQQ5xYKqjxjFaJEWPVsivTyaqIkiFHF0//UT\n58kfUYoSj4m9LX9bSVkVdXu1LzU77gRZ45tUK6bOgimPSZrS1Ed9JZZQN8kbCLwuqe7c8xLfHIJq\nsArBM1SJOAzwZDPlXsyrsZh2yWu8goPcTrW4qVZPCXdDQ86gJy2R6ns33mju19oqxj0wIPqMf1pc\nkOIP9hsWTwmYXV1m7c7hYaLOTvG7jIHUuaByi6f8XE0cxK1Pcq5NTcIyK9fAa6kU9XjzBEHcasuP\nPb9ZwefQ1GQdqx9rY5BWys2QGddOtXQTJiJH30XtOjLT1keLrtmZIaJ1tww2vMtNYCVWFeQNhkou\nXnhdziEqWjpBRG1k/idtKv5sIcPiWYlF0ot767M5onya6K/HiVY34d/d9SrdhX6tM3h6jfO0SwIk\nS5o4/U3JvzkJjLrstmofur4SlLB8Zgd1W2mr4b56F91l1KEkElmHvbjx3k63G5ZT7iLcRm10E92k\nrdnpFGPpNOZO6ix5f5EWiciE92ma1pZsAYFaqVW7bk41W3OUM/aXllCdBdPufPT6PerkSu43vrnW\nfxvXukLwvMYUdMZZDjBBJBLS1XCU4+Yur06ySy40OmpNMCQ/y2ZNEFSf0WhpLCWHQPW9hobi781r\nhNycxc1WPrnltbdXuNbyGEhdtlf5HB01gXx5WV96ZMcOMwsuz5Y7NWU9dk6lUqRLcTxutcjyOXML\nsHrs5RySSatLMre+ejkXg7JS1hzcAlY5AF7RnP36LqbJ+MZezxLNDfqP79wMVuJqyrx4eYy66W99\nwWCazH+YWSLz+I2RSYeLJCydy/r9/H59ezlFLlXSQagNU7nlKao9Bp34uHbTbouVz62WIweGOMVp\nmZapn/oNWEtT2gJJEgxjFKOdtNN3vOdO2kljNEZt1Ebt1G6bEMfp0U/9xto8SA9aPnMaj5OFM0KR\nkqyzcYqXWDKTlDSATgKeXRxnX/Eh2weZGWydYjb5OqiWULvjbgekTtZrJzD1G98cZrcNViF4XmMK\nwoKkSzwjy4b4gQopbkFRy5DoMs+qQGpnfbUDWgGfeQMsl5f1CXTk0y7Jj/rkrrR2z85OPeRGIgJI\nl5fF2FU3XgNoYU1gpFqfcjnrfrIdCW7crXlx0Yz7VI8Rt3Cq41ePPYdCp/jaZNK+jqfduejXSml3\n3lU7vpOoSuVUiioHwGsxZ0Oq+StNIXD4lLx4aaOXxNLl856Xztb66EKH1UrKIy2pL1Y7608AtVOD\nLpWbW8hR+kCaxg+NVy2zbrnW8Uq+o+o1g6ddDc8kJS11Op0sWxxOucVTwouEJLs4TJ1lz+sjTnEL\n3PJEPeqjkRrpFrrFiH90cnH1+miiJtt27GC1mZrNmNK8eE+1qk7SpGUtpTtyP/Vb4lHVGwJeIY5b\nXPnfAt/fzXodlHWyXv82NqtC8LzGFIQFSU08wy1fEorsLJde2tZZ0uzGzS+u1f14vUtptRwelsCU\nN7aVgCQBk1tDYzEzGQ+3/KnPeNw6ltZWe0up01PWueT7NjSYbXO4jcfFdmNjRNu3C1jkgAoQ3XST\nsEr39YljwqGXx4WqwKZLtgSIJEb82KtQaBdfq8tQXI2YSzvYqnZ8J1F1wbMcN+FazNmQCjjXcppR\nN1Ipk2TkxcsYXSQQ0W35vOfdp0nkDBrz16Utl1YKY2kSh7+jQHSsmsGZsqMKbnAE0IS1vQNpwmMg\nPAaamqvOXZdyL56r+R1ViYKKkZPQIWFqjMYoS9mSNmV/YzRm1NFU64xKoORutNK9VloH26iNeqlX\na62MUMSSTMjJ4ihrcd5MN9PddHdJ6RW7h86t1u8jRjHP/ekerfnWklIuHdRhWctGatTGmyYpabFE\npihFHdRBvdTrOWZT/Vtwqs3Kz5GNtE7KucqbIyGwWhWC5zWmasS5cSul1apoXvT6sYCqF8xObrY6\n66uM7ezvFz85xE1OijZ5TOfNN5tWuslJqyvs6GhpzCJA1NxcCqLqvioE8ufWraVxlg0NZkZbbnHs\n7RXuqlu2mLGSvAao3bOx0d6FmMOZ2t/UlNU9VkKo6o7r5dhJ2QFpENZML/1v5vjOcrWhc/brqltN\nFSlq4fdzdGB/mg4dGqe1MixREsYW0+RMKm6fu6icpauwS9/tuYFpze47BNCR1ya8wvj4oXHCY6CR\n/SNVs3jWw8VzkAoqmYuEDrckQ7y/ARowwG+apmmURqmXektAkceaLtOyxY13kiZLMtruol1G7GiE\nInSUjlIzNTtCHG/TS6mVXbSrpOSJ7M8NNu0+S1DCyFIr20lS0tbau4t2WWC9gzos2WVV4JTQnqSk\nBS5VeFePm90xd/pb8JLddiPkNtfrXSF4hnKVvMBV3VV55lmvbn86yFT3zeXE5zJpjcy0qovt5E8O\nI3KsQ0PW7WWtTZ45trvbhMTOTgGt6bR1XFu3ijHoINzumc2WWhYnJ52TC3mBWgmd/LVdPOrOnVYw\nlzGYHOCcss7anQvqtkFY33TngQqi1yNghnJRmohAdOD30vTYY6DHHgPNlWGJKjZDh9xIxQPJ6CCm\n2ol+gmwvTc5gWrP7DkpH5bi5eh1rmrzBfWGtQFNzU1WDTqL6uHgmKt9SqR4nGVfZTu2eLF1uoCph\nJE7xklhK/rnqjilBUX1Id1g+bxnbKOMWVbfdSZo0YkaXaZlylNOWPOHxjzImsp3ataDHYbid2mmU\nRn0nE2qmZlqkRUsdS/mIUpSWaZluoBuM9/qoT5tAiEPsIi1SlrJGsiR+XkhraDM10wRNWBJF8WzB\nPMZ0iIZKXGjtjjn/W9gs2WX5uSLPn1CmQvAM5VncXXVkxBpzqVoj7axWOriQYDQ0pLc+qjAr4xgl\nnG3daq1zSUR08GC+JK6Uw58OIKUFlYMjt3BKd9JUyjpGaaWMxcz95fqoVtOJCefkQl6fst22Nqul\n1OnJYzCle+wNN5juvF7Knehg0E9iKCc5ldepegxjUU61Ju+/P1+7BEZBB6ZdyypS1KF/O06PPQba\nv3+kLIunhLFMgWjdiVQ8kEyaSiFGfc+PW2TQoOfWXr16UlfTzbVe5+xHQbvaluvyqx4nr2VQpCSo\ncusaV4EKNEiDFqthP/VbXGylCy6HUB4Tyl1sG6mRuqhLmwhI1oAsUMFw2+2kThqlUQsAuSUPmqRJ\nC/DJtviji7pogiboZrpZC7FOjwZqoDjFLVlpB2jAso1M5sMhM0tZ57HnRdvSZTRHOQtETtCEAd9S\nTomJ+qhPC5perPzViN+shgpUoEma1LqBhwrBM5RP2ZXU6OwU4MFdOHWw4AQX2ayAGLWOZTQqXEol\nHOksnmpf/B/w9LR1295eot27rRldOzsFhMnX0u1UQm4kYoW7m282614++KAA7rEx03o4Pa1P4HPz\nzdbsu8mktV272Evd0642qe7Z11cKS2pSJd3xUuFPB4O8nWw2mHNLPVeCKOvjRbpakxK229rytQPh\nNJWSy/WqHFHu0wuU/twBGj9QekNAUtTamwWam5sqCzpZM74uE+wscDqIHj3k7QAAIABJREFUUd+r\n13g8ovrypOby4+bq995Nvc7Zj4I+p8p1+VWPk992dKCqWrs4hEQoQsu0bHlPlvUoUIHaqM2oNxml\nKKUpTYu0WBL72ERNNE7jtkmLnFx9dXU6pWVStsNBbIImaJImSwC0h3psrY8gZzdaPm8islg9pbWT\niCwW06N0VDt2Dp58rmqCJ102WTWBkwRVuQ47aIcxhjvoDuM4qVZ+bjHldVT9nI+bxUp6PSkEz1Bl\nS2c11JX/4HKCC25RtXtyWJTupmqGXFU6CypPVARYLZuAsIoS6SFXzaLL40mlFVdXN7S93bqfdFWW\n1uLhYQGuXuAzEjH3c4sLvfNO/dpwF9xEQr+Nenx0LrUcgCfss6P7lt1NjmpCn67WpHr+1CSZz7Vg\nfglKaaL075XeEKgHqZYdCaJjh8Zpcq1Aa4x+VgtWsAmN2v7lx801TaX3bsKLUH+yc3N0q5+pHiev\n7pK6ups6i+IgDRpwFqUoLdKixT1WhUK1lIgENOn6CrK6uU7SpKOrswS1buo2Mrn2UE8JFMYoRgM0\nYFhH5RyGadhYwz7qc6yLyduyi/lUH73UWwK6EmrHabwkvvVBerAkVvNd9C7L6wQlLC65shyNnAfv\nSwJvK7VSL/XSIi1a1pMfjz7qsz3/dJZYWW7GqzaLlfR6UgieoRzllPBFjf30Gy8o2+AZUe3KfKiA\nq3uqQCLHLuM3pWtuR4cVJoaHhUVQvr7rLnP80uKpWg7V9zmQ2MVwdnaaUBmJiO102WNVS2ZLS2lb\nN91kurcuLpbG4N5xh4BAmWxJJ7l9ImG6yMr4Wul+qx5PXYwlT3DELZ5uyYL8fK4r7eKkcmtc6mpN\nStgeGtKXpqmKrgXzi07l0NY40finijcE9u33Vwe0ynSnWnZKXEHTVEo/RTl8VKvhbwqpoOI5CRCV\n3rtxvwgNV9xOfO14rKTfi3m7Y6C6cKqAYRe3mRWVbUsghceT2sV28mytchsJk17qQKqWPd2Dx2hO\n0IS2NIx8SCBspEZby6Yue+xtdFvJ9iKD9pjxuo3aXMfKH1nKGvsnKFFiUZYPtV+ZpImveYpSlpsV\nMlFTC7U4xvzKY65aTP3oWkvUdS0oBM9QjvJiaao04Qvvo7dXD209Pc5JeWS9Tql8Pm/ZXiYq4hbN\nyUmigQERI8nb2rLFBGHuOsz7UPeRGWbHxqwQK2HXzhoZi5mlUCQkqZlqJyas69LWZh3X4CAZWXud\nYFOFMbdyKV6ti9xia9eWHCMHQbdzS3XD9nOOBWkhlet08GC+soY2mcqFd0elyZ22VBWICh9fo6lD\nc/6gs9z+fEi17JS4gjpYrt+fz9NjOaKX0kQXbTinysPfFFJBJU3e1kR378b9ItRr6/Upe1fbyoHa\nLlYyKBDgbqEJSpS067WMBgcsCbbLtEx91Ee7aJelvAqfx27aTXGKW9xQdVDNb4RIiynfp5VaDRhT\nY0kbqIESlKCx4oNDle7RT/1a0OSPCEVojMYsfycyoQ2PNZT9eQHQERoxMgAn82Z2WhUE+WOYhi3J\nh1RrKwfRbbSNGqiBOqjDsdyIPOYyYZGbpd2pjRA660cheIZyVC1qBaoZVgcG9Flao1EBoNJNVk3c\nE4uZLrf33583XEnV7Vpbze2cYFYHwm1tzlZZoNRtV93faV9dW6OjYrzcNZaXs+HuuzrAk+JuzNKV\nWEq1DutA0k5eMt3yMcpasBLQ29v1SYn8nnuVWEid2pL7u8VPVQXUypRTkiSvqop7cz+Jb/12Io9l\n3CpTNV2WlWv53EKORp8apdQXU7R8tjg5B8v1wXye/jZNjpxT9vADNNw5NVUL11UVMio5pO4XoZvb\nx93+OypNlQB1jnI0SqOUohQt03JFF/N2+3JQSVLSk8VRF3/pBsV8X/67as2TtSpV8e04FHLLX5ay\nRrvc6sgf0vq5TMvaDLQRilCi+ODv30V3GRDHXXNl1lme0Ib/fUqwlmP+Z/TPjBjXrbSVQKA76U4D\nHo155k0An6Zp6qEeSlHKaOcOuoPaqM2SXVhdyxEaMSC9gzrobrrb8rkb4OvcrYN2mw3d8GunEDxD\nOapa5Sv4RbrqzukGg5OTArDuvlufYEdNgmP3bG52r4M5MWGN19QBMf/8rrtM6NHFecZiRNu2lcKw\n7iktofK1Gv8qY0TtAC+Vsh43vladnfbWx74+/y6lbqVP5Bj5vDlI68BGdcN2Go9aq3RyMjgrvFfo\n0u1TVRh1IAJdkiS/qspNp1Fyvv5V51QpQFXgsszhfXptrXQYaSICUe4TOUr/cZqSe5P+M666cE7Z\nwy+OrUzO8NxU0BeCumRNKmRU1wv9WvVxrwyoaxEnJwGNw5TXWo9c8nxRrWNu+6oJdiZoQruPLhFP\nkpKOCYl0YCmz5eYoZ8l2207tJdbCFmqhJCWpl3ot4M8trmlKl8xL5xrLQVW1KHJglWNoozbDKqlr\nL0tZC3T3UZ+xRsM0TDfTzTRKoxYrKV+PIRpyBXw1gZO0yAYJimEsaO0UgmcoV1Vy8Wy3r5P1TS03\nwoFJJhLigMVrXnZ0WEHHCRL5Mx4XsZLc4ifb0sVY2j1lzVEny6bdGPizrc1aN3RkRGTilfvyhEo6\nwNNBkw6ypQVX164fOYEaL7fC4VBak2UJHlnOxo87rq5/Wau0EpUDXbp9qmI1lEqTLRHokiT5VVVu\nOvktIKm+rqE4vPfMzZUOoziX9B+mDeD0mnHV0DQR9RDRGAXLOgEa7pyaCjp+qprlUjazKrfGVAbU\ntYiTU2FKV0rFCQ6cst6q2VgHabBkPXm5FAlDuv4KVLBYOhuowYDBQRrUxocWqGCJJ+XWVBXmeqnX\nYiUdpmHbcjRqjKm6JnbZanlyI905pQNMXvJElnqR5wNfD2n1tLMkt1EbpSltZPX1k2DKzkIdBCgG\ndY6HllN3heAZylWVXDzb7cutXTIhjYTUsTErnMm4Re7CKsGVgyJAdPSoaOs3fzNvAGlbG9GuXaIN\nHhspP+eJfiQ8yJqXuZyZPdfrs6fHCsPlPmVdTHnxr8v4K7Pocuux3E6FSDUL7siINe6Vj1l3nNXE\nQxxInECNnwMSNmUG36kp5/I4XgFQPW6VSgddbq62un2q6qruQAS6JEl1IbfrX+mK20HCFXcDPR/H\n/2MR3v94P33kzbXSYRTnMn5AxHUOPzlMk9+Y9Ayd+Xy+emAdoOHOqamg46f8lEvxKruSN26f1ZO8\nXmRXq0RPLePknGp+6uBAVzNSVxfSLjkR70Odp6wnyhMVEZnJiiIUMepmEjkfJ9l/kpKWtnRw2Eu9\nNEET2lqk3HrL4yZlXCfXNE1b2o1SlCZowhXcLLGcebNfuT67aTf1UI+tJbSbug3wkm0N0ZAFvu3O\nY96WUwbboG+GBHWOh5ZTd4XgGcpVlVw82+2rS0ijA5TOTtMKpsueq0JLf79oK5nMW96XcMsBZedO\n676yFidPzuPkshuJlJY+0Vk6t261utfq3HXtnhzK1f10WXQl+HAglxAnwYjDrNyupcVshx8rDrXq\nWnM4dbKOqTG8dnDGgdgui66dBb1aLuFc5VzUyXGtTVNgMXdm47T5vAJzRJQioiQR9ZFwvR0nyn2z\n6Nb62UNUaF4zQczrHKuQjLQwtkZTuTkqNK/RlRTRJws28LVWoMHHB2n0qVFfAJPP5zd7SGHg8lMu\nxaucrKibxcLq9SK7nmvDepXdXNU4UyldPKEav0lkBQuv62kHqMu0TP3Ub4zDzkrHrV+qO6uUTACk\n1vPk/cnYSgl63FU1RSlLXCcXX5sYxSzrxqF6N+22WOm4C246X+rCy/uXVkv5Hk9eJMcst/Gy7l6P\nTb0mDQqz6LorBM9Qrqrkot5uX/n+9LQ+IYwOLHt7S2MPuWtpczPRrbfau7K2t4u+BgZEuxzOeNZZ\nHhtp57KrezY2Wi2IgJkJ160+KX9yy6N0Q+Zw2d5uhWM5RlnjNBo1LcpuNwuWlwWsLy/rjxXvl89h\naKi03XKhUAfEdqqq62o1lSbzG28zjTsoSTBMkva/QPrfspjU3Jx/EEuT4/qWZdmSUGjXLoPd9P4y\nAaZGNw9yCzlKfSlFyb1JGjs4VtfWvaDlZEWthoWVKyi3u6AusjeDG6DdXDnsyBIqRMxiuADCAVD7\noXYaW9NnSpXz5zDkJAlnOrdfLg54TdSktQS6Wb84vKnQorbDrbsyVlQnbmXdTbspRSkjHpUn+OG1\nQb1Y6Xj/smaodDWWyZB02YW9nMf1CpRetdnHXwuF4HkdqxqJT3Rt2vWjJoTRlcxQwU9CIXfL3bVL\nJMRZXnbPOCthUP4uE+nwGpgc+NRapX7cbrkLL3ct5gApLbtyTCMjJlwNDQkwT6XMz3nNTSk5RhV6\nm5rKi9fkUq2V2awJvepx1UGhn/PB73g2OnOsL13vlq00lX7zbyVjTcY/W3Rr/cx+KrxrrXSN3Cya\n6voq25dl2SqQsM7aHTc2p/HPVRdgvMgJrvn8a2LdCzKrboXusE5WVDcLayV95xZy1HGgg3AIhLX6\ncLvbrG6AOcpZ4gg5bBkxhgfM8xtz+vnp5u8E405uv1x2pVzsXGT7qd82FlRN8qOzpMo42K20tcRa\nyeWUtZdDrt/yOGqmXrk2vA9etiaEsFBcIXhex+Kg4FSGo9w2JXzYWan4+3YJYXSxjWrWWt6macXM\na2GQu8LyupyFgtVS2tVlXYvpafdMtPLz4WFrQqQtW0wglv0nkyJZkEy6s7hoQje3BqsgzRMxqQDH\n3X6bm/Xr41fcWukGml6T61RitayFS62dKnJj24xusQ4yMr7+x0NUGFtzBwwJhkNEtI2IukiAyaTY\nr/CRolvrhzTQSaS3aHK4WSaiQTJcd9XsuY6WLe7+y5P85IrtpEhf+kXOqY2oMF6gqUP+XEQt51MA\noOYE13L+eAw09MRQ9eE4TaXHq9ymNtAdtpK++b7JuWRNLr5131EcrCqpv1lLOSUK0pU56aZuAfiP\ngbAfNLSmz5Sqy4qqxobabe/FSqeurwqSdkl7dHNWt+fxjhyI3ayVunjQLuqiu+luT+Vx8vl8ydjs\nrLN8vexci0OFCsHzOpZdGQ5dVlIvUJrLmZY9HrtpZ6Xi8Za7d9v3weFzZEQAmewnEhFWQGnZW14W\nVsydO/M0OSmArq9PWEWzWSuQybnK+cnkRRxOJZDrLJa6pyxxYrd9U5MA3HTaWiNUTbCki6lU3VtV\ngJP1TQETouWaB2HddgNNr8l1amG1rIY1//7787Wt01lSJ7Ly2pxBjSe9X3GNdQOMaSLqJgF2HApT\nJECrQFZwVNdXZzFOs3bUDLiKpdLRssX34/NQ21dVIJGRVreNB5C0QIJbXzopfTjBdWGtQNlvZH0l\nP6pIQWbVrUbCIY8up5X0LfdN7k/S8tpyTRIZ6cCTw8skTXqGgY10y1Utk3aJeaSWaZn61vpo19wu\nmlyzd6F1sgB2U3eJFVIHZE7r4uZmaUna4wGA5fZ8bNM0bWw7TMMW2JVtcYsqh9Q+6qMsZT1bconE\nOaUeDx5vyy2uEjaDLnUS6tpSCJ7XsXidRGkpdMtK6rWkBbfM2dVjtLPs2dV0lFDD+1EhkqgUOvjr\nrVutcKa2199vjTXVZVyVTw56gABgOUfdGNXEQ9yCqovllJAcjQpwVt2UJdxKIFVhV0Kwn2PoJC+g\n6XTcnN7zIj8wGcR8a9Gmc4dkgZEganP6FV/zSwwYxz9nZnwtNK+5A0ba3NeAQj+gp7MYq3DDXy9r\nti+ZXLHPbtZvJ5nwO6a0r5MdYDnNxU87TlL68J2YpwoJmQwFaOGvSsIhjy6n5SaOkvvycfuxngYF\nfQu5HH0unaRPjYM+VNBbAe3k1y21XDnVyrSzHPppy06yj67iQ8YmSpfZDuowSoNwleOuLMfVR33U\nRV2UprSREEgFYF35EA6KquWSx6vqLKrywbPe+k2Ao27P++HjaaZmGqVRRytyqFAheF7n4hfTjY2i\n3Ih6Ye+3pIUfeFXjPLnLLb/o1SUh4jGN3Bqo9sVfSxfYSERYQ/m4ZfkRnuSmv9+Ev2TSatFdXrbC\nI3evVcu/qJ8DJuzzsfM15GCbNXMplMxxYMBqsQUEYPNYULdj6AXq/ABjNSyOfsCvGlbVmseXKjBi\nV5uzGmstxdf8ZQmMbUSF8TWaOjRHhTfXvAGGCoW62Ek3+FJBSYUbJ9jRQVYzmf+FeologIja2XsD\nZFpp7eZn16dfkCy2Y2T39WLVrtSqmCbPcFxNRt0I+bnwDsrV14/1tBy40elAOk2PAfQYQIemsu47\n8PFq1iiocXHp2iw3QYuf8emgTs5X1qmULq5cXhMO2Y2LAxt3fx6mYduER9zyKQG5kRpL5qrW2eQP\nNS7Wz/qq2/NzQ433tINoVZsh0VWo6igEz+tcdllbufVwdFRY33RQyuW1pIadu2gsZoUl/hmHsMlJ\n0c/u3QK2GhoEaHV3i/cEHOaNUizcmru4aGZx5ePm7XOo0Vk8uSVRth2JmCDc2ioAVk1Y1Nlp/t7R\noc/iyteQWzClRVRCBp8THyOPU/Va7kRda/XGQDlQE3R7RP7ArxqxoAcP5stus6x5K1BjV5uzmpZY\nvuary2R1LZVusl7E56LGTkqqGSOirEObHBQnfE6EW1nl9VeEvddHVhBLUkmcqC95sPhp3SL9WLUr\ntSr6ANc0lb8U9Si7JC66i+CgXH2dLLcq2JdbkkE9pw6Nj9NjAO0fGaE1n19cOjipRqmIctq0O17l\ntCX34eAnrXgt1FIClxxUB2nQm8u24mLLkxDp3J91MZU6SAYJ92PVQrqbdlOMYsY2uhqfXqX7nuLn\nBo/3lMDrBNFSXm4ShHB6baom4AngnwP4ewD/AOD/0Hxek8mGKhWPn5SWR7vkMJVc3NqBgLywjUbJ\nyACrfjYyYnV/dRqbaVXMWyyAHBZ1cotD5TUmZabZZFJAX1+fgHJ1LBMT1qy1sg6nhE439fUR4RML\n1PjoAUrvP0TT/2rNYh2Wc3JbJy/ycmPAz3EPuj2i6sCkH1WSXKhWcOhpbXyYr0rW3K3EiBellf3V\n13aKsu36HLbTzY+XcZGGn67i6xYSACznllReczDjbVdYm1V3PtlZtasiH+B6rSdldroIroarb2n/\n1j+Bci1+6jm1VijQ3NSUb+hUxcuQ2NWM5NtxUHCDh3Lmane8ymlLdxPibrqb4hSnRVos2Z7DrddY\nSdmHjIF0S/LES8d0UqexdqpF0y7mla9PH/VVBG1e/u+p8yvHfVenaljY60HXO1BXHTwBRAD8I4Bb\nAMQAHAdwu7JNjaYbSienOoryolYHparKseo4WRv5Ra9T4hqd+6pfCLODGt3aqMDLE+2o4KnOT+c2\na6fRUSL8nmkB6fmDOQtgy3jS5WUzhnZsrLTWqU7qsXK7MeAXZCttb8cOcc51d3uD9JqpTJ/Darrp\n+gbyNOlBz8vcCmRaD9vI2Q3VTk6xmbwtOZ5+EtZHCZ7NpM8yK/fpoNL5yXjN4WIfO0iUc2kgoqNs\nblNkAuUYGVl3DaVZ29z6202B+KHaWbX9qBoXNZUaV+tdG130vd7B3isA6LarBjx4OV7l/h24jZeD\narnnjRsg8xhJPhavgFeOO3Ct5eUmwUb/XVZL1ypQe1UtwPODAL7BXn8GwGeUbWoy2VD+5QSlqoK2\njnkdm3Q1veMO6ziDiEnUvc8hU8ZSTk9by5lw91jd9p7X5VPCAtL1+f3UceOacROAW1jVONaentK4\n2HITRvEEUEHEEXo9Jqplt26UJj20uWijrbUWVZoQp0DeXW51MMspxqlkCR+PfG5h2+na5vskbfok\nssKpen7xNtR14GsnYbbNYXsnVSlwkl/UDC4MBp5JNcjsrG5tldNXOcBRroUxCOUWcjR6IE2pQ+O0\nvEE1YN1kV4/Si6trNeDBy/Eq9+Lez3irdd5Ii+hW2lrW2vnJWstVb5a4jfy7rKauVaD2qlqA50cB\n/Df2+hMA/l9lm5pMNlT1ZFdKxYt0F+V+QFC3v3QP8dqOHYjp3i8UrPGa3d2lGWUjEWuNUO72K8HQ\nixV28uNrlD00R723rFksqSqs8wRJvB87uNTBvpPF2glUq5HcRlquW1ocQL3GGU/y+Xz9mya8yM58\n5WZ55Ovs1eU2zbbpodJjxT8fJGs9TQl2EhKdQHmw+FpmqG0iors1/UnJ7aSbrZd1ILKunfxdzX7r\n8bzM/0q+PGB1kcUV8MCokRhn8PFgIDTIuppubZXT12axJsiL/OSBZGDrWVGtYQepAODH1XWj4KFa\n1shaqBzXVa5y5647rtU6p65n1cM5tpGqBXj+ixA8rz05gRsvpVKu/ICgTvLL0ms7bjGeaj1MCUZq\niRTVBVeulQRTvr1d+Red1ERDKmzL19y9WP4us/W6lTThayLrl8oxlZOxuBItLwtLp1N913Ktj+WC\ncj6fv7Z9DnmtTTvAky6lu4koVnyvtXQfucYvSsCzswr2F99rJwGK/L9HlES22UVyB2WeCKi/uJ98\nLV3bORAuFrfT3dTwe4zV7dM2c1WUf3++KjcxLK6ALDHO6FOjtoDjx7IYZF1Nt7bK6WuzWBOMi/xD\nCGw97//P91e9VijR5ljjal3clxPHWkvxGpt+3Wx1xzUEz1BBywt4RlGZXgewjb3eBuCEutHv/M7v\n4JZbbgEAJBIJDA0NIZPJAADm5+cBIHxdR69ffBFYXBSvs9l5XLgAABmMjAD/8l/OY37euv3nPw+c\nO5dBSwvw8MPzaGsTn8/MAC++OI94HHjuuQwSCbE9b296WrQ3O5vBK68AwDze9S5gzx7n8c7MwNj+\n3e+2bq+2DwBtbRns2QMcP262Nzsr5vfpTwOJRAZLS8DCgvi8vz+D97wHOHJEtL+6msGpU6X9vfji\nPAoF0d/amv7zxUXx+cyMWB91PoODQKGQwdCQWN/jx4F9+6zz3bcvg9VVc7wf/nAG27cDp07N48gR\n4PbbM/jxj835qfu3tIjXt902j6YmYGHBPL6f/rR+fQDgwgXxemQkg+ZmYGio9Hjqjo/b65//PINM\nxlzvmZkM9u1j2xfHO3/bPDANZOCtfS/r7fj64Xng+Mb8/dn9vQBAZjYDLAHzP5oHUkBmWwaYBeaP\nzwOfBzLnMkBLcfz/n/K6Dci8lgFOAfNH5oEskJkv9l88vpm24ueH54EOIHOp+Pn5eeAIkLktA4wA\n81fmcf+3gP9wJYMLAA5F59HaUDw+I8D89DwwX5zfADB/Yh44C2S+X2wPxf4uZ4CTwPz/Ng/8EZB5\nVJlfKgNkgfn/eR74v1n7fzgPfJydD9+ZB4aAzD9lgEKx/XeAzM9t1vv4PPAwkEl4PD7q9nK9RjLA\nHof9n8sAM8X1CPB8Oj5/HA/jYSQyCczeO4vsf8ni07d8Gv/18n8FANy2chumb5mG1Pz8PF489iIW\nexYBANn/ksUf7fwj2/Yfjj6Md95+B09/8mkk4omKxsvHl4gnfH+uHd/8w3gH7+DpzNNIIIEH/vQB\nnDh3An3DfZi9dxbHv3u8ovUN6nVLpgUA8K7ou5B6O4Wvf/LrFa/nucFzWFhYAADMNM1g39i+qoz/\nYTyMv8/8PeKI4775+/BZfBYPZB6oyno9MP8ATuAE+jJ9mMUsjs97P377YD//2cwslrCEC/MXfI3/\nxfkXsYhFIAPMYAYPzz+MF/EiFjPFv5/5LP4I9n8/1X7Nx/cIHsHD8w973n8Ws8jOZ/FpfBqJjPh7\nk9tsxPHbLK8/j8/jXOYcWtCCh+cfRhva6mp8G/36+PHjWF1dBQAsLy/Dk9zI1OkJIArgJxDJhZoQ\nJheqa3m1BqkWsELBTHDjx1XT7n03i5xM0OPVPVS1wpYbc8fnPT0t5ptKCQtdoSD6UZP76NxgeXkU\nLy7K09PCdVa1XHodr594TjcLp9N+QVs/ZR3V9naNy22Z1sea1+MMUI7rm6bSb+ApzWd2mWSl9bGD\nrJZAnUup6gbbyNrrptJxgIjiZO/Wyi2iTez3rWwfp/mp54Ic3xBZraHlWBj9unRXySpeqWe5U3bW\nIK2YGyl1jXILOer4i47AXFmDVDUscrU8jrVyafbaj1+rY5AxoPVkAa6nsRBtHtf3SnQ9zDFIodqu\ntqIPjAN4FSK77e9rPq/JZEO5y2/SGa+lMry6sjpJt61dn/l83ti+u1sAYn8/0Q03CNDzC3C6eftd\nK7eSME4uyuUCHS+X4we01ONb7g2JSsVrlNrN26/rbLk3HerB5chxfSXE6WIinTLJyiyucj8OdFwc\nqKRbboqs9TBBlrInV9X/BmoiomkSQNpAJmguklnqhO/jND+nscr9hsi5Tqid0uS8LnZyIcWS88ll\ne2MYCwvUfeAAjR86FFjJlWqUDAkyCZFXpUm5v8JiRKN7orR8Vr3zUXtVc10OPnew6qVfpGoFOF77\n8XvxH2QM6DRNUzd10xiNbQjsceguNy7UTpX+36s3EK6Groc5BqmagKdrByF41o0qAQenfe0son4g\nwKmkitpnPp8vyXprF4NZrvyulZ/xV9qXW79+VckNiUrkZd7ViDHVqR7A03F9JWwtU6nFTbXC8ddp\nsn4jDyv75sia9Ee3j58nLz2ia2eSSpMXDZKZ/dYu5tNOfK7lmA3LTSiVJkdgLTmfXLY3jMMHzBJL\nU3NzPgZUW7klBqoGgJXcXylaAIO2eFYy9iCTM6kK4jvKq+WwVglSvPbj9+I/yPFvtMWrmv1Xek5d\nD4l0roc5BqkQPENZVAk4uO0rLW/cVdar7KxaXsar1iJ1c2v1YkHL5axutuXK63pvdDmOoC2ZXq2U\nfo6vtGwHmV3XrxznVY30v5UqRwIo+TdykpivIlktoSBhjZTutPIz1erZSPpv+1b2+6Cmb5CwSr6b\nvW4hyv1POUr/XprGPzVOhY9r1q7Ex5L0gJlm7Xq9PlOh3av8AqvL9nIYY4dEiaWR/fsDs3hWQxL6\n2v68jca+PlYCaNUAsJL7K2sFSn0pZet+Wi5AljN22Vf3F7rr2q2gjvvyAAAgAElEQVR5oyHKTm5A\nvJEX/xtt8apV/0ElUaqnZEyhaq8QPEP5VrnXz2pmVrUdv+U8/MLL7t2irElvrwmLuja8WND8lBfR\n9VGPDGKnHTtEjGVTE9HiYjBtBmml1Fm21ay8VRWDnPudXIMtaYPnaloKRjdWV8ulGguqPmVWWB7/\nKZ8xm33k+2omWv7cWfpe+vfYhf4fTJWuGR+nXQwrkb+SMZXKL7B63L6wtkZTc3PeoXODvmwKawUD\nsvAYqPsL3RbAq1U8opMbMQfI1FzK80Wwl7GrF9e8r/6v9FdtzpVakmsNUV7HW69ATLTxFq9a9R/U\nMajnYxmq+grBM5RvlQsNMsZxaEgfI+k3RtRpe517iG573Xt2JVT4dZuf8iJe+3XSRoIqtxT393vb\nx6l+aipFFI1az4UgxI+Jl9hQv7J1OUqT8W224BRPaxngJT0YBakdJCyS3STKn6TJamGcIhPEtpIV\nDGMkypvE2fYgUfYEJJIB9RHRDcU202zbDjKhspUsMZ8EEvGcaSqFVYfn+P9avND/zAgVmgti7BwW\n1VqadoBpB3dpqv7xUFQz1+1a+aJrxK2eqoXQa1ypDkyCctM1XHH3g7Dm7SI4t5Cj0adHKfWllGPM\nqHpxXQvQzufzWmus03qpgFxriPJqPd5oq6IfXUsWPf49FdQx2EzHMlTwCsEzlG+V63apuk2q7bjF\niPqJj9Rd1Om2172n9qW7bnNyAfUyLz9rmMtZ4a/G145GzdKWFu9uxXZu1Xwty3G5dpJbVt5K4d0W\nFMaJcp9YoPQfHqCx/Ydo8uNrNjGYfICkB6NKxWFTwqSEPf6tK/uVILZc/KnW0lSfSSJKlL6f+0SO\n0v+m6ArbXCiFTd1ThVqQ6aIrf24RPwvNBZrKTYm2ZQxqmu2XJfsYVjdxd2M1vrWKqhl4bmAaZwmX\nY18fc3S7dZIOTPSwkqPcQorSB5I0fshbH4W1AqXmUoQ194tgCW/JvcmyQKkaCZxU5fN5LeA6wZ0K\nyLVOCuUVyDfaquhH1bLobQTQ8u+poI7BZjqWoYJXCJ6hfCuoeEO1Hb/tBrG9nxjCcpMIeenXq5tx\nMul9vkFZSZeXhaXTTywrd6vu7S0Fbrc420qlW/OqGX4KROk/9pnwxa8bpk7c4icz03Lgk7DZQtY4\nzRgJC+E02397cRs7F9lGTbvsaXGFzU25f+tr2qBeInpQmUNv8WcrCVDtJDPBUVDwnmb93czW5Fq5\nJtroAHEqdbv1E9OpAxM9rKQpfcBfIqHcQo5GD4xS6lCKltecv+A4vEnX4dGnRm0BTXdxXQuo0wGu\nE9ypgFzN5Edex7vZVS2LXuiiGupaUAieoTa1JFz191cvsUwtrtvsoIjX+ezo8Ad/G+hhZ7hV6yzF\nulqntZCvGwg+Y/7Giwlf2v79fhr7iI3FM2ilyfwWVWtnthDRUTLjMKUraqvDPk5POyAtPsc/pbjC\nSlj1Yvm0eyaLY9eNc5Lc4d3rMeS1RLk1N7yuC1TluprqwEQPK+M0fgjFPoY99SETD+ExUPYbzu4X\ncvxDTwxR9htZGn1q1Deg1RrqpJzgTgXka6Wm60aqWha90EU11LWgEDxDbapEN6pU100JOUG6sflZ\nn3LX0g6K+PyyWX/tO4FWtY95oVBe/VA3VTJuXzcQ0lQCIE7nVGFtjXr+YI7QvFZ90JdAJYFshEyw\nvItE7KV6g0JC2phmnwDA0+IKK9/XWTXVZ5fN+xI6mRvsFfb5+i4P65Rm7dkdjxyJOFW1/6BdoDVy\n+47aiDqY1ZQKP8HPr0CFtUmamst6bo+7zU5+Y1I7rtxCjlJfSlHHX3RQ7xd7jbhOP4DmlNE2yHUo\n9/+epQ7k2vI1Z4GsZ/lxn90IF9V6KCMW6tpSCJ6hNtQy5iY30JBw1d5uhZxKvyx5v06JatTxlbuW\ndlCkwqOf9p1AqxbHvFJLsVvG4VSqijdKNG6cJeeUYlHzHUrnN5Oq3J4nCOov7jtNRD0koHPUoU1u\nJSyQqItZ9W94h2cXETVr3lsujjdtvn+RbfNCn4f18uKKy9o3ni3kvIYBye07aqOsYxWDkMe7Q3x+\nasbbcsfhd5+xg2OGFdMuHlJ1sfWbHEltU81oG+RxLvf/XujCuXGq97UPwTNU0ArBM9RG5p5wlRsg\nSbhZXg7WHVYFHC8ZbFMp08U0qLV0S8hUrur5mEs5ZRyu+o0SLzGYaTK/xabKAG1lf1cQ5durQKV+\n5tRmjgTsRYioSbOf16edRdOre61drU+QAGIex9lGtFps9++aiVYlmNrNL03Copu1WUsp2b583knW\nMi/VOL883nDYKJdHJxCyAzvL+2PmnbrcZwdtQdAp463bOMoZu3UiRJQmKnykQFOH9PGQ3V/opt4v\n9lJ0T9Roc/hJby68qhxjLFl/o0/bx4zaTiUAi+lmceGsZXKdEst3FfrOUY6SlCQQaJiG63rtQ4UK\nSiF4hqqH3BO26u8nw6LpJ76xUnEwc4JaFYSy2equZbUSO9Wj7DIOB+XCW7G7caXJbdT90+QMPHL7\nISoFKhWgkpo2e0hYSNupgm9r0ma1tTy3FPuy+zzio68kWeB4rYFoldeS1a2Z2zpyFUjEi06QGTda\nrYzDUh7Ht1FJV5wgyQ7sLLGSf9hr/IGm99vHQaoZb9X+dONwgyxP9TUXcpT+v1gGZuUYFNYKNPj4\nILX/ebvF0tn35b6yj4VjjKWmPz+WzyAsppsly2gtrYMllu8q9M3bnKTJQNoMFareFYLnJtBmjsGs\nVOXWY6zUPcQrmFUrlnHTKsCT1e4YBAXNft2NS84pL1ZRJ+uWun8ReH7aTXS/LlGWU38FMl1Wo0S0\nWOy7Eoum3dMtdjPoPtX++LHSQaJXcLQ7Nl6OayUqji9/Wz64Pvy6bTtIgpAuY6sd2FliJQ9OGH+g\nfPvpb09rodEOynTvu0GWE+DpyqGk/lOKCm+WQi1PHITHQJ17Ox2tkeVaHXVjSu5N+or/5Gt88LmD\nnvv2q3qoTVlLy6x6rlej71pbmss5hqGrbaigFYLnJlA9x2BWW+W6hNbyy3IzWA+rpRLO3ICTNeiE\nTka7yj/pss6pNOmBSaci8By+gSgPokMgyjuV98iRcElNkojt5HU7p5S+y4VI3fZbfe7j9PRi/eSu\nu3eQOyR6BUe+Pj1UuxIqxfHlD+aDazNN3s8zL83ZAJ4d2OliJdXty3HhVVWO+7EO7nQxm3x8qS8K\nC27HX3TQxLMTNPq0CaKpL6U8W4LtxiLnqcaPJvcmjeRFXtvla1zN/3v1EItYS8useq5Xo+9aW5rL\nOYYheIYKWl7As0FsVz01NDRQtfvYzJqYAA4fBkZGgLk5IJHY6BHVTqurwMwMsGePOe+ZGWBpCWhp\nAWZn62M96nFMQclpbpkMsLAgfp+aAvadq/3JWjKGfd72051blnaRwQJEw1OYwj54bJhrAsBhACMA\n5gB4WI5XOoG7CuL3q11A4+niB1PF/ZcAtAA4C+CYTSMdAKIATtt8Xom6fLTbDOCC5v1WAE0ACprP\nOgH80qa9LIA8gHMAGgH8VnEsLQBm4Wl9Dclj01ZsDxBrXMZh3nCVcZ7pNPP8DJZWl/Cjwo9wav0U\nRrpHMHf/HBJx5wZX11cxc3QGe+7ZY9lWttcSbcEluoQjrx+xtDmDGSxhCa888woKK+JkmLp1Comm\nhLHf7L2zRpt2/fD+Dr52EL9c/yVaIi24ePUi1q6s4SquGtsMdw3j9fOv4+TaSWMsj77wKJ786ZMo\nXCxgqHMIT9/3NB554RGjn4lnJ3D4xGGjjalbp7BvzDxR5Odu65V5JoOFFfGd0hPvAYFwav0U2qJt\naIm24MXffhEDWwd8t+tX/Ljw9XXSBCZwGIcxghHMYQ6Jck+yUBum8BiGqgc1NDSAiBoctwnBc2Pl\ndoF8valc0Kim6nFMQclpbiU3RVD7kzWQGzMzMIGuCDCB/JNeLba9B95gYAa4+gTQuApcvhOI3gDg\nCARQvBfAAQBnitumAKwo+0cBXGavGwA4fbW2A0gC6AbwsofxyT6uuLQLCDD8VQAv2HweA3CpuN1V\nm224hgF8G2KsV4rv8flxaHwPxNqsARiCgFkVTOWxKcBc4wqgbUPl9zyzEQej/tZ+/PCjP6wIdnh7\nkwOTaIo0Yc89e/DoC49iaXUJr0RfQeHeAvAtACeAtmgbPnDDB3Dh0gUcOynuqkjI8wJLN375Rqxc\nUP8ohKINUdzUehP6W/rRHG1GW6wNezN78egLj2LfT/bhzCXxh5UdyOKp+56y7Lu6vorb992OlQsr\nWgjkQPzoC4/i4GsHsX5lHTt7duKJsSeMbbc9vg0nzp9ABBFcKZ7ETY1NuHj1omWuunaDgk7AelzU\nPu20ilXMYAZ7sCcElk2q8BiGqgd5Ac9orQYTSq9E4toCmUrV0iJ+jowIvtFpfn4emUymrsa0WeU0\nt9lZlTNrf7KWjsGjOGxy6+EMgH3ALGYt/6TLOqcS8GdBWxLQCQDRdwHYC+B9AOIADsKEziSA7wF4\nP4CTEBBHsEKnnbWR63xxv9d8jPGy+yYABEy+aPNZFAI6ATEXaUHdCuBtzfYpCOhMQIDqFQjo7IGY\nfweACIAMxPH8BcQxBUzwLR5XQ/LY6KBNcyMiaAX6HeXzPLODuJao+GMPysLG2/tC5gtGe0urSwb4\n4CjQ2dyJsw1nce7yORx5/QhSW1LGfnvu2VOyz7bHtyHSEEGsMYaXHnzJsBKuX1m39M8BrzXailRz\nygK0ibiwrEroTDYlsTezt2QeiXgCP/4ffoyZozNojjQj+1wWLdEW9DT34LW3X7Os49LqkgG/R14/\ngtSXU2iJtmBn907c1HwTTpw/YYxppHsEiaYEjrxxxDJXqUdfeBQn3zmJh771kCfLpO6c0h1rflzU\nPu2UQKI8r49QdaNyjmGtr6VChQLEv/lQoepGs7PC8lZPbsf1OKag5DQ3eVNkQ+Y8MwNkMkg8NIF9\ne1b9j2EJwAKEi+JPiu+NQAAIzH/SNbkzPAMBTT9i49gL4FEIt9NjMN1SkwB+AGAAwKsQlr5mlAKh\nA3Q+jxk8gwyevTKB9bdXncfW5XkWpeJWUf6fhI/1fRAutJMAfojSW52tAO5gr18CsAXAcQDbi++d\ngbCayeO5pvTJjqtFM8W+zynv83NjRrOf2szzM8g8k8HEsxNYXXdZzzqQhLjDJw5j5qg5wdl7ZzF1\n61Rgbp127UnwaY22one9F9vPbsdlEidFsimJ7/3290r247DUiEacuXQGp9ZPYcfXdhhrvrN7JwCg\nPdaOba3bMNQ1ZPR55tIZHD993GhDAtdP3hZ//A1owK1tt+Khbz2kPYaJeAL7xvbhmye+aazdV//x\nq8bvt++7Havrq8Y4AWHBXb+6jsLFAo68cQSvnRN3eIY6h5AdyGLu/jk8sesJ2zW3O06A93NO10bQ\nxzlUqFChglToahuqItVr/ONGjKte12JTqlL/Zh4X9ySAR1Cxq6Krpczu8wxQDCcF+iEALKG83wRh\nERwG8ITSdg+AU96H+QwyWCk2fCumMOZ0F1x139VJ59LLXWgjxedFzb6TAJ4u/i6tkmcg5neO9Z0C\n8GNY582PYQKmy+zNAJ6CWK8tEJbXAZQqA3N9uauuz5jJclwXN1J+YwdlLGYLWjCL2YpvxqyuryL1\n5RTWrwoLZe+WXpxcO4lkUxL39d+HX7zzC8f4zu1/uR2n1s0TXq453yb7XNa0qgJoibTgu9nv4t/9\n4N/h+KnjOHnhJNaurCHWGMO5y9Y7D3ZxpjPPz2Dvq3sNSFY1desUmiPNOPTaIUQaI7g9eTsWfiHG\noIsddZN6nKSLcku0BWcvncWxN63uyDpJ996OWAcWP7poiSENFSpUqFordLUNVXUtLZl8MDNTP27D\nGzGuel2LcrThEO3Vv9luoLOwulgGcSykpQwode10+lwaSVTQke8nAdwG4TZ6RNP2SwA+DOAd2Cfm\nkeoComdbgEtAN0Zwj9YUyOTFtTai2Y7HbV4BZj4+g6XeJbRcbMHsn88icSEhrJl/yrbj7sRnlTZW\nYM5bAnwMwmIpvSPl8cxCgPDZ4vN3YcItl1zfNgiL8irE2qvnhovKcV3cSM3eO+srdnAJS0airRnM\n+HbXs3P3XL8owPPq1avIDmSxN7PXAowzR2e0APjSgy9hx9d2YP3qumXNpVUSsFoyCYR4JI6Opg7s\nG9uHxN6E4V4r4TfaEMVluoxGNOKZ5WfQ1NCEt68Iv+/eL/Ui3ZfGhcsXLNB5V+ddWHlnBSfXTqIt\n2obCWgFvXHkDpy8K3/Erp6+gd0svRnpG8PhvPI5HX3gUR39xFLd+9daS+M+SNcMMzt57FqmjKTx5\nz5OGG69cm+ZIMwCgI9aBP7n7T2zXfqB1ACfOn8CZS2fwyAuP1P1NkVChQoUKXW1DVaSNiH+cn593\n3WYjxnUtxYJKiD58WLBdzeXVv9luoBI2PQKzl3PKApC641v8/MdtwFRBJA4DIEBnClbonIGAphSE\na21n8X0OSVL3QcRGcuiM2YzxNHBvxyxu7ZzC/ZhDPAhXYg9wunTDEhZ2LODwnYcx84nicTgPYWmW\n4uNXEw51AXgDwhr5dxAAfwRinglYj2eLsq/dvdVZiGRF52ACPeD73CjHddHT+RSAnp+ZwTOZDJ6d\nmMB68YSTgOZlrDPPz+CVZ14BngWG1oewx+1GhUY6d0/pFgsApy6eQiwS08Yf6vb93A8+h46mDsQa\nYmiNtRrtvOdr70FibwI9X+zBDVtuAABQ0RRfuFjAh5/5MAAg1mj944g1xvDygy8j2hDFVVzF+tV1\nAzoBGBl5v3/q+wBE7Oium3Zh4bcW8OrHXkVPvEfEp75xBD85K4C3LdqG0xdP4+TaSbTGWi3xn4WL\nBRx5/Qhmjs7YuswuYQnH4sew0rSCkedGMPHsBGKRmLE2dyXvAgADKAH9OdXe1G7s0xxp3lQu4aE2\nXrX6ngoViisEz1AVqZ7iH4thgZiYAP7sz2o/rmqsBZ/Tag2vJTYcohMJzCT2IZNNOM+9lgPVAaTy\neb4H+OA54MkjjIN1oLMEEdu5AgFnKiTdBgFhq8VtzsCqXcWx3Fd8zeArfiqBsXP7vEGnCm3N7ruU\nKAa0bCkCxekR7PlK8Ti0ohSipSKa947BNibXolkAvcXfh2FaRFUlIDLvOrVlJxmXOwEkLngHuVpr\ndWkJKwsLOHH4MI6WcYdoaXVJlDo5Adxy9Jay3GxVmJx5fgYXrlxAU0OT5X1AQPzg1kHEI3E89K2H\nDEhUEw2dXDuJS3QJC79YMIB05Z0VI/bzbwt/CwCINIgTqSXSgr/+yF8DAF568CXEG+MARFbZ93W+\nD5954TPoaOqwnUNXvAvRBuEAdgVX8N03v4tbZm9B6sspIyvtUOcQvpcV8akS+BrRiJMXTmJ1fdWw\nwgJAW6wNf3L3n1jAevtfbjegsKV496RttQ2nVk7h8InDaI22Gjc4Ord0lqyLTvymyGtvv2YbMxoq\nVKhQ9aIwxjPUNaNrsezJRs2pHsr8eJp7PQyUqaT8y6PQx32qcYYfgACuyxDAdr64XQrCUsjjJ+8A\ncBSlcaKq+iGAVZdJlisGkTm2ubitHeQPF+dxDNa4zwZgdcsqZj4xgz3P7UHijoRwG5bZbFMAfhPA\n4zBLpXQW57gOsQZvFJ/txbn9Ozi7wrqVGOHuum0QcOrn9MhAHx9aZ3p2YgInDh9G98gI7p+bQ9zn\n30AQtSTVsiBOZVtmnp8pKW8Si8QsbsG8ruZQ5xDyv5XHoy88asRfNjc2Y3zbOI6uHMVtidvws7d/\nhu9MfscS3yjH9Ma5N4xMtxPbJnDk9SMGSBprsG0CL731Ek6unQQg3FuJCGcvn7Vs1xXvwvt73o/Z\ne2fxwDceMGIwARGHyfuS73135bs48c4JNKLRqDea2pLC9z72PTwSfwTHnj2GN068gY7uDizev4iB\n+IB2Tb1IdyzLqekZKlSoUOXKS4xnaPEMdc1ow610VdBGzWlDM9oW5WnuNRqoV8tzidVbzaAqLWmX\nIBLixAE8BJHBVrq08uviFQhw4vp7CBhahel2OgTTXRcQcaM/hD7hj6pLEMmLfg576ERxLj+E+K/B\n/3NQ0Sr43/Yh8U/F2M73K3P4GkzoBARM//PiPN4LM/PsWQjodHOFdXOXlevO3XX9yM2tuk507+ws\nbp2aKgs6Z56fwdmLZ5HaksKTu54sG0pU115uAVVrherKm6jW5J7mHnTFu9C7pRdP3/e04cYq4y9/\n/aZfx+n103hr/S0ce/MYzl48i1958lfQ80VR/gQwS5W8euZVAMI19uLVi/i1G3/NMvYHtj2A85fO\n45frph/4aGoUTZGmknmeXj+NwycO47a/vA2vrr5qvD/cNYw99+wxrKD8PQnDV5lv+craCn59/6/j\n5DMncf7qeWAAOHP/GTwSFy61M8/PIPtcFucuqumYnaVzCXfKnBsqVKhQG6EQPENtOtnFJdST229Q\nuhbn5FW1nLtbrIvXmNcSDlYB5iBMIHodpnspF0FkuZX7vU/5/HJx/9sB/BkEvOVhlhkBgGcgYKsd\nztK5vOqULG4rkwJdcdhuD4R1N1V8bwQi+yxXG4TFcw9EnVFpXIoCsMulwtxfHQEZqBwc3dyqXVSr\n2Kl4IoGxfft8QycgoOTYyWNYWVvBB576gOe4QKdSHyrMPvrCo5ZtJZQmm5L4wb/4QQnsvudr78Hj\n//A4Tq+L+EkZ3yj3645348z6GfyoIGoTjXSPYO3ymuGC+6EDHwIAfOUfvoKFlQWcWj+FKKJGDdFX\nTr2CRJPZ5wsnX8DCyoIBta2RVly8ehHfeuBb2rk3ohFvrb+FU+un0NTQhIltE/j2A99GIp7A7L2z\nmByYRHYga7zXHms32o01xIw2fn7+51hYWcCZN84AJ4Gt2Io/KZ74drDodk7pYnvLSYy12coHhSpf\nYYxnqI1QCJ6hrhnVg5UuaF2Lc/Kqepp72ZbnWQCDMC2bvP6mtHB2wATEhuL7FyHgKV58fwJmXKPU\nCoB3AXgOouYl/zZPQ2TCLcBZV2CfnEeqCSLm1KF2qDH2H8BMBvRjmPAmYy3vhEgkJGNZ+wAssjYu\nw5qQCDCB80l4r79ZITj6TUBUkfwAdYDiNSlX1lYMyJHgse0r2/DhAx8uTYyjAaOZ52dw45dvxF/8\n/V8YMPvIC49Ytn3fX70PZy+dRXOkGdGGKIb3D2PX13dZ2l55ZwVXinc1Yo0xS2zo1K1T2NGxA8dO\nHsOp9VPob+3H3P1zlvM30hDBjV++EReumCfrFXaX5OS6KLMCCJfa9ybfC0CAIQCcv3IeR14/gt9/\n8feNGFUubrm8SBfx49UfI/tcFhPPTgAAnr7vaTx131MG/M3eO4vueDfOXzmPS3TJaMNSsuUC8PbX\n3sbvrv+u5bi4waIXQCwnMVZoJQ0VKlQ1FYJnqE2nTCaz0UMI5UMblSDJj9zOKd/WVwkTD0HAlbRs\nxjXbnoGAxH4A0hNwBCKm8hgEoLVCuON2KvtegbAWnoIZFwoIq+QxuGekbURpjU6md1reAW0hEbN5\nyaWt4zDrac5AWGSPQABgd/F5A8S8pC7AClvDELGmGZggJt1mJUR7sWJK+M2i5kAH+PyOUt2xayBp\nmWxqLE0AJMHjxDsncOzNYyUAwsGoOdJsAOfKBRMak01J7Llnj2XbvpY+HHvzGC5cuYC31t8S2V/f\nOILbv3a7AU4y2VAEEbz02y8ZcYrS9bQ5JrJfNaIRFy5fwL8++q/REhF9vLfjvbh49SJWLqxY5krK\nCX71qoDHM5fO4Kdv/xTRhijOXzlv2eb7p76P4e5hy3vSeik11DmEvpY+R0hLxBP41Z5fLXm/OdKM\nni095htrQMNRQdCz985isG0Q8UaRgEmujTynJHA++dMnXQHRT4Zjqc1WPihU+QqvpUJthELwDBUq\nVFW14aVZApCr9VW1WnGY4Flaf0Oz7wgElL0LZu3KOZhWUAlaCQB3F98zKjAXL6ob1oBDZ0yw9epC\nKw04sr1GGBaky42XEb0YRcPZhtI22TUzUBzrv4EJeEsQFtkCBHwegYDjIxButnblYG6GcL3lICYN\nc8MAJuHdirkBQFeWNiCeVLrZXrx60bAcqjGajcXLg6HOIQuA8My0B187aAFOAEg0JQw3Wm5xk+Cm\nAtzK2gre/dV3Y+LZCXzrgW+hv7UfP/n4T3BX111GMiIJWPNvzAMQVsPT66fxV8t/ZSQB2p7YjsK6\ns4k/2hDFFTLH+sb5N6zWx6Le1/0+dMbFXZ7hrmFMDkzilY++gp64eeL/ePXHeOHkC8Y2KqRJQLx0\n9RJ6twh3hc6mTsQaYnh/7/vxN7/9N8b7w93D2HuPSM+ciCdwc9vNOHayFPoB88ZA4aKYa9CAWI6V\nNFSoUKG8KgTPUJtOYVzC5tJmSPpU8TnFIWc7gB8V3x8B8D2Ybp+/YPu0wwQpCVs8GY7OXbQHAlJH\nUYTFIhTSL4G9OQFugD4GU01SJNUB4OViX6cB/BK4HL2M6NUomi4X3Q2TMGEzBtFPb/F9QFhdea1M\nXmtzqPiU67EXwhUYEBl6e9lnX0ApiMl1+DaAp+Hd/XUDEwT5Op8qdQv2INUtk1u1fqPvNwx30dX1\nVcM9VLqV3rL1FguAJOIJ3Nx6M469ecyAH0AA5cS2Cfzs4z8zkupIi9ujLzyKl0+9jFhDDHcm78S2\n1m2W8Z2+KBL33HfoPvzwoz/EwNaBkgy4ACyQ2BJpMeCwPdaOP/3QnxrWTztdpssGJDei0QLMHTFR\nbiWCCL6z8h384NQPEG+M42dnf4bzl8+jo6kD8YjpsrB+dd0Yz9LqEm6ZvQVb/vsWfOCpD2Db49vw\ntX/8GhZWFnDkjSP44A0fxNStU7g9ebtRJmb7X27H7cnbRUzo/d92jc2U55T8TAJxJYCoc9ctx0oa\nanMqvJYKtREKwTNUqFBV1XWRIEle77ZBWPZOQbjOzkG4n15tJGsAACAASURBVMp4QbldEsI6+gKA\nWyGyxQJWSNLFGX6z2PYCGFyeA+4olpQ5aTO+LRAlWwABmlH2WSuAu4p9PQogC0QuC/MmgYQl96cQ\nFllAuNy+DNEXNzANsbHPinYwUGwfMC25CQBPQMDWUQgXYg5eKoiVG29ZA6ALRDWIJ1Xj9pzqP3L3\n0JHuEezNlBZK5fAzsW0C2YEsXnvoNRwaP2QBFu4WKmtzHjt5DG9eeFM7zpW1Fdy+T7je8gy4XM2R\nZqSaU/jIwEcMt9qzl84i93zOErfqpq54l+W17OsKrmD10ipW1lawfnUdq5dWceT1I+j5Uk+JGy8g\ngDXSEMGZS2ewfnUdL7z1Ak6cP2FYYiOI4Pyl89hzzx68du41Yz8JoAAMl2IJgX92z59Z6p3yGE55\n7L79wLeNrL/lKoznDBUqVK0V1vEMFSpUIJqZEW61LS0CNq8pyJR1IdV6nFKyrqR0LZX1OdXttgNY\nhrB2bocAT6l+CAB1WrdOWGEvfhW491PA4/9eLHgPBJhyNUMk+umAWftye3G7SHGsncV53Q7hIsvV\nCGFl/SVEzOhWiHqfjTBddbdAWHPVsWdgrYcpS8z8BAJKXyv+bId+XUMFIqd6nbrP3OpIrq6v4n37\n34e+lj60N7Xb1ojkNT25Yg0xI9mOTk0NTfhg6oMGmAFAAxoQa4zhgW0P4PT6abzyy1csFtcGNKCj\nqQOrF70F9EYQwVVcLYkB9aNIQwTff/D7uPfr9+LUuvmHF22I4jJdNn4C+lqfANC7pRevfuxVZJ/L\nGms1desUTr5z0ngdb4yjJdqCnd078cSuJyx1USup0xlEHddQoUKFkvJSxzMEz1ChQgWiTEbEcgLC\nwrlv34YOJ1hlYAUou7lJAJUxmaoSEMmEABGPuV78fQiiJIrbdd8uCLCVygJ4qvj7DIBXIJL8HAHw\nH4q/fwfA5wB8BSKZTxTC4roKAdJyPFPF/dwy4U5AWDG/DeGaG4EA18sAdkJYMxNs28MwQTwLcx1V\nOa1rHer5mRmsLi0h2tKCe2dnyyppUitpQbJ4p2i1I4aZ/6UNe+7d6ws8bvzyjYYFMDuQxVP3PVWy\njQSboc4hvP7O63hr7S0Mdw2jPdaOhZUFNKDBFvzijXG0xdpwev205X0Oc3aKNkSRfyCPe79+Ly6S\nl2K25Wvq1im8/NbL+OnbPy2ZT7IpicLFAhrRiGQ8iVhjTGs1HWwbxOrFVRQuFtAWbcMHej8ANABH\nXj+Ctmgbzl0+Z+lv35j4Q+Fgz9/3KrcbDKH0msEMlrCEFrRgFrNIhHfMQoUC4A08Q1fbUJtOYVxC\nfWozxHLayfWc8hov6OY2KZPqtAB4EQLEbv4B0PoA8JBN2l+euOi/w6yJuRXAf2bbLUFYUNcB/D8A\nDkHUCh0ofiYrTFwG8BaEy6yETjmvncXXt8NMVMRzwQxBWD9PQsRn9kFA51swrb3cY091d5Xr2K78\n9BKHuUFlR+y0urSElYUFnDh8GEeVrFn19h2ljdsrZv1KPHME+74Q04KHU8mO9Svrxu928CjdQvO/\nlcfSx5YsLqLd8W7LfhElI9b61XX8cv2Xlvca0egKnYCI5dz17C7c2XknGlzrBZWvtmgbCmsF/NO5\nfwJQug7SIiuTIZ1eMyFajqs10orCesHY9tzlczjyhqg5mh3I4gM3FH3kXxWJnpojzcYxiUXEF0q5\nCYbCeM7ytIQlLGABh3EYM3WducxZ9fY9Fer6UAieoUKFCkTXdCxnUPGCL0G41P4dRFzlUwAungCO\nfRo4/Cngd/730n144qJHYMLh28XXEsh4QiP1GtQu9K0NAuTU2MvvAPgYTPiMQ9TzXAbwTHE8CxCu\nttxjskPpWwVxuY6vABh8Cbj9Y0DqeeDJM+7rapeldoOANFq809I9MoJ7NtudFsDTnSK1DieH0J3d\n4kRsjbTi/OXzFjCVwPrQtx4yrGmJeAKJpgSyz2Xx0LcewlC3yDiVaEpgYtsEIg1W8GyLtpWAXKOP\nS5a1K2t4+fTLIBCaI83oineVwK1Ue6wdu2/d7bntCCKINkQNSOSZcrk6Yh1GPdBGNOL9ve8HYJ3b\n+SvnsXpp1RiH1Mn1k4hFYnhi7AlkB7IYvWEU+d/K45snvmkck6bGJl8ZaL3U/gzlrpbiF+oIRrCn\n1pnLQoXa5ApdbUOFClXfulaDR2Xc6LGzwOXiBWf2ItDTZI2DfBUiHlO6q94J4ASEtfAVAJ+E6b7a\nBFELlLu7AgLI/kcA34ewiHJ3WuniqsaxOrnFcm0BsAbhcvt9CKB2mq/Rfsafb7bqtivnl4E3N+iA\ntb66iqMzM7hnz566crP1HPe3uir+tvbssfxN8f0v0SUcef0IRrpHEI/EcexNEZ+Yak7he9nvYeSv\nRozYxsG2QVy4cgHr/z97bx8U13nne377HZoGGmhkhJBakkvWSyIZJBzJsRS1IyleEyd0XshcM3cs\nu2rdU8luJffurrh3tu7O3Jqb3Joqp27NTO2uK9pkxEzingQpkWLZZhRhCSThGFu2XhxJMQ6KiRBC\nvIgWIKBpoPePp5/T55w+p885/QIN+n1UlOjz8pznvAD97d/Ldy6MKKJCraVaami9tx5XR64KdaKd\ndzsxFmENeRxmB3Y9sktS4+k0OzE5P8nWmxwIR+MRVy2SpeceWHUALftbJDWWRpBfG47NZMN2z3a8\nN/SeIDJXOVcJ12ckPIIiWxHGImMoc5QhGo1ia9lW3Bi9gcHpQaHusqmrSXI/1//reiE66vf6UZ5f\nLlkv3158/9NNzSUYIYQQQACHcZjSbAlCBNV4EgSx9FmuxaM+SIVdzSxwxqos+KrAPEB7AVxBPLLX\nAGACTJC5Yt/z5fLLxIXfNcQbENWA1Wq6AaxEvLGQH0ygtsZe84ZCcmrBBCdvkpRM+InPtwHARB3Q\n6gdcO4Bd24CjtuRRT7X6WTVB+pCSrriQi0O7xY7Dew6j8e1GtPa1Cts1rG/AxMwEWvtaYYYZZlNi\nGqxcPF0bvYbh8DBcVheK7EWYmJkQur+uyFuBwel4W+byvHJE5iJCNJCLx+rSajgsDnQNdUGJUnsp\n7s3cU1wnF6Al9hLcfP4m3A43Vr+2Gn0P+gxdKwB43P04yp3luDBwAdPz03BZXfjCyi/g/sz9BDFq\nN9sxM89qTqsKqnDhqxdwqOuQpOmQ+Jq7HW7J/eDpySPhEaEWdGp2SthX3pTI4/DgifInBAFKzYQI\ngsgmVONJLEuoLkGFQICJtDqVWsGlygIUjy7YMyVOC+X1ntVgQu+MlYmmnthy/qu7CMAFxL0++a2V\n+1zuki2Xw1NVh8FqM/2Ii04g3ugIAKKIRz3rwbrt1iPuucnnfBqsxlN8XLXU1wR/ziDgeQqY2AG0\n2aBZKqVWP7tQtikGUnoX/HeUaG5CGqBK3Z9WuqXYQ7LZ1yzUAAb3BVGRXyEZO7gvCKvJinnMSwTd\nttJtEo9JnrI7HB6G3WzHxOwE+if7BdFZbCvGFyu/KJnH0PQQ8iysoNlldQnj90/249bELdVLoSQ6\nnWYnVuStSPD5HJ0ZxSP/8gieb3seY+Ex1TGTcSV0BW39bZienwbAajQvDl1EvjU/YdsnVzwJgF0/\n7lfasr8FRfYiYbn4mgPx+5H3hzwMh4cxEh4RrmFbfxt6xnqEfQ/vOSxs77K6MBwelliliG10SHQS\n9F6KWAxIeBLEciHWLAStrUyELheUikeXqsgW1ym6AKwD6xDLRV8AAH//yxNFxsBqOfl75howESj3\nueT1mWoCTCz8roHVl4q347Wj1QCaY+uOAzgBlvJ7AnHPzbOi/eXCT3yOm8FSgX1gtaCSebuBJz4T\nn5PaZwpagq8JrNlRo8p6AyQVZWo1ppkYO11Ecwv+PLm40PJuVBMnbocbN751Q7KO120CgNPixIFV\nB+D3+tHxlQ6Jx6RYzH5h5RcSjllkL0LrrVbJspqyGvgqfXCYHZianRKWD04PYnpuWtdlMcMME0yY\nnJ/E4PSgYofbmegMfnHzF4IIlrPKuUry2mlOLJiWNzAanB5Ed6g7YS4wAWsK1sBhZv6cB88ehPMn\nTvym7zewmWz4yd6foKmrSfKc8PuxpWQLgNg1rPiC8P27X3tXck/49rwpkfgDCLVmQlT7SRDEQkGp\ntgSxXKirY6KztnaZdvgRsVTTb3laqAssQjkFgGfjNYAJKJ6OagXrQMtTSL8X2/dxJNZwqiGuq3wV\nTMCqWb1oWcHohZ8jx4N4aq88FVfPMX1IXsOptd4ASdNU00zpzWp9nYG58XRLj8ODje6NKLIp+3Dq\nrRXtHe/F7td348JXL8Bb6FUcw2axocBagGJ7MXrGevDe4HuCj6fVZMUOzw5J6mx5Xjne/9r72HVi\nl6L9iBVWzEK7u61RtKxaHGYHyvLK0D/Zn3ScbaXbUGgtTPDs5GOE59knTfLzqHRWYkPRBsXnRGx9\nAkDTBsWIVQrVfhIEkQn0pNpaF2oyBEFkmWBQsVnIsmSpercEATwGZj/SBqAitpxH/Bpjr0sAtAP4\nPuKirFe0XwDKDYHkt51HwgAmOpO9n3RrrBeT7LhBsEjnQOy83LE5K0U19RxTy8pGr9WNDsSRuYQ0\n1SDSEuZJx04XA3ML7gsicD6A/gf9Qg1i4HxAEBvcn/S3X7qK33lGE9bL8RZ6cevP46mvYsE6FhkT\njtGwvgG9470JdY+z0Vl8OPwhAMBismAuOoeh6SEc6joksWyR7JNh0WmGOSFdWIlCWyEGJhOFsJx1\nheswM5cYXXVZXZLorfw8qsuqcXXkKgDW4faVna8I65q6mjA4OYjGtxvRebcTDyIPcOKPJ3Dx6xex\nrSyxoxePbuohlWdTdxMrgiAIEZRqSyw5qC5BBbebRf6Wu+gEMu7dsmDPlBtMICH2/7uQpqnytNWb\nYN1hxTWNSgJLK/1TTZSla0GS7LhuADcQPy+tFGAttGo4y2NfGXjsk9bAqdWYKqD0PKVaXxc4dw6+\n119H3VtvIRRW6eRqYG5ckHDrDrnY4P6k0Tujiuu1ONl7Ukjl/eT+J5IxuMApc5RJ9olEI6gqqMLT\nK5+WbM8tW7LNPOZ1bTccHtbc1mayodnXjOC+IErtpZJ1E7MTmEPceoVbrQBAHvIwMz+DVQUstXcs\nMoZDXYeE9e+df0+4rmORMcxhDpFoBDtP7ExIlRW/Pnj2oGYabSrPplbKNpFZAgjABx/qUIdQhnyj\n6L0UsRhQxJMgiKUHF9nZJADgJFj95Q7oT2/VQh6dEp9GsgigUlSLC0sPgH4wISmOQKpFwsSRUB49\n1WITmCCeRbzxUQ2Uo4zy80jnVmlFRZUiwRyDVjy6okRaUeZ0xlagOxRCxwCLsgXOn0fL/v2Gx1CC\nRz7lqZjcn/Q/fViNE8+uxU/2HTEklMVRytryWhTYCnB4z2E0dTVhLDKGivwKeF1ejAyNCNtZYMFE\nZAKRaAT13no0+5rhdrhRWVAJj8ODueicYCHisrowMTuRcFwxWimz2cICCy5+/aLQxddsSvxs3wIL\niu3FsJqsmJqbwswsi4xOYxptt9sSGjhxHBaHsPzKyBVEohGYYEKXvwvf7fyukCq74ecbJNer3FGO\nofAQAPXIdSrPZlYj+EQC3ehGR+yXdgABtCyUbxRBZBiKeBJLDp/Pt9hTIBaTdKN1Cig+U91g6aKj\niIuaTGAgOqW5H48GbgSrFZVHINWOlUp66gBYg6AoIAR9/gRpUx899ybT9y/ZuWSj4ZaOJkOZ/B3l\ntLLPh2s9Hhzes8fw/mqNY9QazewLBrG+oQHfevMsfll33HAK5Y5yFqWsKavBa198DS37W9DU1YSW\nnhZ03u3EwNQArt5j6aRWkxXFtmLMYQ6hmZDg28mP2Tvei+HwMEZnRuEwO7C6YDXyLdJusfJmP2ZI\nbV1K7aUosZdItrEqfOZugSWhSVDCNiYLDqw6gJ3lOxPWmWBCz/M92Fa2TdLFVz7mPOZxb+YeBsPx\nJknm2FuxWk8t3vW/KzQT8p/yC/dt085N8Dg8cDvcOPPcGeRZ8nD5G5exrWxbQidbLjprPbV4vOxx\n4ftMCkS9UVJqXJQZhG7VqMXhdGsKYtB7KWIxoOZCBEEsLXzIWDOZpIib5Ij9LnMRo41vUmkkVI54\nkyBA2TfUB+17o2cbI8jORVJ79qMI3K+3Zbbh1gL7hobCYQTOn8fhPXvgdjgM778QjWPE1/zVPa/i\nUNch5Fvy0Tvey2o9Z8aERjsl9hI8VvQYuoZZM6GK/ApJA6EyRxk+V/45BPcFE7xDxdE7Tt3qOpzu\nOy00K+LbiCOjJpgQRfx9yFv/01v4Hx/9D7TdbjN8rmL/Uo4JJkEEBs4FcOzmMYzOjMICiyS11hz7\nx2s7rbDCbDLj7efexj9e+0dJ9HnlT1cK16XeW49QOKR6H3kjodHwKNput6G6tBprC9fiiO8Iuz86\nmwxlA2pclBlCCCGAAA7jMNw5+4eIeNghH09iWUJ1CQ85GWwmw1F8poJg9h9+5LboBIx7WaYSdb0I\n5v95AOyaKPmGiu9NPvT5eRpBKVoqOxdJ7dlfujJaCwxA17XO5O8ot8OBlv37UxKdANAzznwei23F\nkmY1mSJwLoCWnhbhmh/qOoSW/S3oHe8VlnGvyRJ7CS594xJK81jtI4/wrchjBrEuqwsj4RG09rVi\n+6+2Y2xmDHazXdiWR+84BZYCXB65LLx22Vxoe65NYicCQCI6ASbEju4/mlBrqoXL6sJoeBSv7nkV\nDesbsKN0hzD+9y99HwB7/njEUZ5qO495mEzx92SzmMVMdAY/vPrDhOizOGX5VN8pXOy8CIBFkuWR\nSx69Prr/KBrWN+DsV87i+DPHBcsbpcj2QkEpuZnBDTda0JJR0UnvpYjFgGo8CYJYWqTZXVQ3bjDv\nyoXEaP1gKvWGKdYowgvgtui1UtRUfG/8UK4jTef+6ahNlbzR3XcEqMvwQ5Ks5pRf2ykAp5CR55N3\nmbU6ndgXDMJhUEB7C7zoe9CH+5H7gijUQq1jqdLy7lA37kfuA2DpqqPTowiFQ4LYLLIV4dSXT+H7\nl74vRN3k9aUf/9nH2P7L7bgXvgcAqC6tRoGtQOiAazfbcX30OiwmC2wmG56qeApXR67i3sw9PJh8\nIMx7IjKBr576KsJzYdybvqd6fo8WPYrvvfM9zEf1NRUqsBQgYolgYmYCbbfbsPZf16Iiv0LoUFtT\nVoPLw5fhPuLG5OwkAPb89T3ow8DUgBBxLbIVYWvp1oTOvkopvjvKdwgR2em5afAGuLcf3E7YlpNq\nHXE2UaslJgji4YRSbQmCIFIlVRGnhg/G0lCNbp/qPqmQjZRUHWMa8S/MOD6kdW2VhN3rPh8GYp61\n6xsasN9gUy3u21nrqcWWki1C+msyCwy19Eil5Xx8cUOfhvUNEruWdYXrsKZgTdLjisf2e/0Iz4XR\n2teq2EyoqqAKW0u2orWvFcW2YkH4AqxT7Ew00cpEjlLabqrUe+vR3t8umcfeir2YnpuW+JMC7Nyi\niOKdu+9gaHoIDrMDDosD4bkwqsuqUeooRXBfEACw+RebMTA9AJvJJqQSA5SyShBEbkKptgRBENlE\nR6MZQxhNQ00lbTULqcoJBACMgfmUHkPmItM60lwXNbUwzWurZFHBu8x6amuxJwXPWnETGHH6azIL\nDLX0SKXlfHzfSp9kndiupdJZqXlc8dhHfEeEcXetYCmzVhNL0HJanNj9yG6hQ+6+yn1Cs6CtJVuF\ncZJRYi9JSNtNlc+6P4tmXzNsZptkecdAh+BPajOxdTazDXce3MHM3Aze/9r7aFjfAIfFgbHIGMLz\nYXQNdQnXyO1w48af3UDD+gZs92yXzJ1SVgmCWKqQ8CSWHFSXQGSalJ+pTIs4o7WaRrfX2idTHWe7\nwbrsDgA4pLGtEZRqU7PQ5ThlYte2/W/aUxLbSsKOd5n98unThtNsAakQ11tvp9axNLgvCJfVhe5Q\nNzb8fAN6x3vj9YUHjkr2EY+h5Bkq9yWUH1M+7gdf/wBVBVW4/q3ruDN5R+iQe37gvNCsZ33RetSu\nYEa5ltg/JbaVbcOP9/5YELNizAbfFl0PXcf6f12Pje6NcJildbjcn3R7GROOkfkIuoa7JLWwvIZV\n3NmWXyN+DW4/uA18zMR3+1fa0/5QhTrNEgC9lyIWB0q1JZYc7e3t1Aac0I8OL8eUn6lUusPmMj4k\npIp+5Qeb8Mf5AeTBhjf/8iIeWeHVHmchO7/6sDCpwwZI9XnKdppwJsZ3H3ELKaVVBVW49ee3Ujqu\nDz50nOsAQkCFtQI39t2QzClZbas4fdhtd6Otvw21nlqc/vJpAMBjP39MkkZbZCvCWGQMFpMFc1HW\nZdbj8OD+zH1JCmuRtQhOm1PSZVeMFVZB5Crh9/rxzt13MDg9CIDVqj6YfYCbYzcl3W1L7CW4+fxN\nNHU14cQfT2A4PIzPrfgc7jy4g9UFq1FkL5KkJO/+9W50nusENmYmzZY6zRIAvZciMo+eVFsSngRB\nLG98PublCLAOpwZr5B4qFATjZ/+bG9ceYULjC3er0PF/aQuNjAvyZLW0C2xv8rBT/s/lGA4Pw2lx\n4vq3rsNb6FVtRpSM1edWo6+nj3nDIlEA8drWn/57YOyzHqza+oQwtljIAol2IVyY1pTVYI1rDfIt\n+Wi52YL5mAGt0+LE5BxrAmSCCSX2EhTYCrDGtQbXRq8hNJMYBXRanHhixRPouNORYM8CANtKt6Hj\nK+z3zOrXVmNqdgpuhxszczMYnx0XtjPBhO2l27HCuQJjkTFJoyFx3an4eoiFtpZvph4yPR5BEARA\nwpMgCAKoqwNaWzPr5bhcURCMtf+tHB88MoxHB53oDFzXF/HMND6oRzWXWNQ53S61mSQVwdg73ovd\nr+/Gha9egLeQPQvyCJrb7lYdlx/zyr0rgsDjEUDxdm/V1aGvtRX/8DcuXK+cEMbWE52TR1jF8xMj\nblwkbo6khAUWFNmL8OQjT6JrsAsj4RGYYRbEbL23HivyV6A71I3Ou53CWEoilSP2MK0pq0GZo0wS\nvXU73AicC+D6vevoGevBu197V7jm6bCoDbgIgli2UHMhYllCdQnLiIWozwsGNb0cl/UzZeQaK9RQ\nvvmXF/GFu1UZFZ2Ga8yS1dKm4kmaZZI9T6Hubgx0dKCvtRXnA5noSJU6Ss2MtPAWenHrz29JBJC8\ndjTZuCd7T6JjoEMiOi9941KCAOK1rVXbWXMh7qEpfl7UniN5gymlhkNOixMWE6sBLbAWCEKx2FYM\nv9ePYluxZPs5zGF0ZhRX710V6k0dlnhN5/DUMF7vfR0dAx3CWE6LE+e+ck6o4xRTXVqNd/3vot5b\nD7/XjzPPnUmokwXYPeoc7MTAlQEc6ooXTBv5GZJvu9jenkRusKz/7hE5C/l4EgSxeIh9GbdfBNb8\nH0lrMVPC7V6c9FodtaULgg7vy2SprI+s8OpLrzUypZgwAViapGYUa6G8WxeAdLvUZhK9zYa0kHs1\nJhs3PBcWvq90VuJawzVFAeRwu9Hyv7rxYLQfNpMNE7PMQ1P8vIifo+2/2o6p2SmE58LY4dmByoJK\nwTrm1T2v4onjT2BomqWxVpdWo8BagM5BluZaYC3Ag9kHggj2Fnpx4M0Dgo+mmP4H/dj4i42oLqvG\nnQd3hOWdg50SP06H2SGkIu+r3IfWvlbJOKMzozh49mBCVLhlf4skEm2zsI64RbYi9E/0o+6tOgT3\nBQ39DOnZNpXoN0EQhFEo1ZYgiMVDXJ/neA7ofJMtXw61mLlSW6qnBtKHBW3Q8zDXmIVDIZwPBLDn\n8GHNNFuxGPhfTpZj7kYvrE4nfvkfy9Ezpe3HqTXmq3texaGuQxlPueSpnPmW/ATfUC7oaspqcOa5\nM0mPK0+RlT8v4ufIYXFI6iXlvqKH9xzGS+0vIYoomn3N2Hp0K/om+1BkK8L5r57H9y99XzLfV/e8\nisd+/pguT1CA2arcenBLaLzk9/px/JnjwvXgnpwAS6t1WpyC8F3nWoc1rrjPqf+UXzjvem897BY7\n+if6he0b1jdgYmZC8WdISUDq+XmjhkMEQaQL1XgSBJHbiOvzGpdZLebq1UBfH1BUBFy9CnhFaarJ\nmuVkGn6N8wH0qhwz0w16NM7vYasxSzWaJBYDT/WW44X/ziJ2/8/feXC1ZBiAMZEQOBdAS0+LII6y\nLTCUxIyRey9vEtTsa5bsIx6r8e1GIapYYCnAg7kHAJTrR4FYp9i7cSHXsr8lYb6v7HwFW45uQWQu\nIul+y9lctBmjkVFYTVZ4XV78/v7vMRIeURTVoXAIL7a/CBNMOOI7Isy31lMLh9mhKSpX/2y1IJSv\nfvMqiu3FitfRyDUXP5eRaARtt9seyg+DCILIDFTjSSxLqC5hGSGuz9NRi5ktsvJMcaE5NgYckplZ\n8vTXVjCRlk34Ne5NcsxU/ECToXF+y73GTP48adVSqtXriVNWv/u7xwGwFN2KzdXCciMpst2hbkF0\nlthLDO2bivejUsptsnsvPwb39txauhWhcAiNbzeq1nIG9wXh9/pR761HsZ3VZybzveTeouLaUfl8\nvYVePOF5QlF0AsDGko248xd38GjRo+gc7MRIeARVBVWKkVy3w40Tz5zA8WeOY9eJXegc6ITdbMdP\n9v4ERXapz6mSj6r7U/b/WGQMh7oOqV5HI9dc/FwWWAsUvVuJ5Qu9lyIWAxKeBEHkBrwWc6lHOjlF\n7M0kamsBeS1fsmY52WIhG/QsxvnlMFq1lGrCVCxA6v/5KNY3NODLp0/jF88kNqExMg+1hj7JSKUR\nkZKAMnIMLph6x3s1j+12uHH8meM48cwJrCtaBwCYjc7i+5e+rzo3j8MjqR1Vmq9SYyKA1Yke8R2R\nbFPrqcVH3/xI81wHJgcwNjuGmfkZfPnfvpxwXCWhqLce18g1F4/Z7GvW/DAolQ8fCIIgxFCqLUEs\ndXKliQ0hJRRi9+bw4cR7YtQCJBP3eCFtR5aYxUm2EOlmbQAAIABJREFU0UovTaXmNZX03XRSnNOp\nyw0ggG50wwknggjCrfJQqB3D6LH1bq9nu1A4hJfaX8KD2Qf46N5H2Fq6FU6rU5L2K76uTV1NmlYy\n79x9B5FoROKFqoXeYxjB6PNAdaAEQSSDajwJ4mEgV5rY5DpLWaDTPRbIJR/MTJGKIFxoEaCnTlBN\nBPngQ0ese1UDGtCi0r1K7RjJmhUZGUc+32w0V0p2X8Tr8ix5+P23fp+SL+diCcCHuSkYQRDaUI0n\nsSyhugQZMXsGxZROIk53NxNvra1MhIrI+WdqOd9jg16uRn0wzwUCeN3nw1t1dQiHFiY90OjzlErN\na6asUPSip05QLQ3WGcu9rkUtDifJvVY7hpGU22TjyOd7qOtQwnbidNKDZw9mpK5Vad2df3/HkOgU\nP1MLfe85RlOnidwm5//uEcsS8vEkiKVOMKie0vmwI45y2pgfnm7xZrTzbDYjqnrvsWwOgSZ37gd5\n9fiMijDqg8mFKgCcDwSwfwGjxdn0RpR7Zy4kSj6TyURQEEEEEMBhHFZNs00WyebHuzZ6TfNYWuit\ntwWAckc5hsKsk7Auv1kkvy+ZumeLde+5oCcIgkgVSrUlCCKz5FJKqzhF1e9n4lOvQPfBmLdlLqTD\nyubgG2xZ9ClpYtDKxYgPJgC8VVeHvtZWeGpr8eXTpxc0NTeXa+LSEcXi8+I+k+mKoNd9PuEDgvUN\nDZIPCMTHqyqo0tXARw0j9bZuuxtt/Zm3GMnmBxIEQRCLhZ5UW4p4EgSRWXhKK8BE6GKqHXGK6pEj\nxkSw0c6suZAOK5uDs1FlSgEAJwGEAewAcBRAExbOW1RMEIYaETncbkNRy33BoCGhyslELelipUTq\nQRzZ0xvN48i7oaoJJyMCK1kkW3w8w42NzgVwsvckwnNh7PDswNEDR5OeqziaCCArkcV0rj1BEMRS\nhiKexJKjvb0dPp9vsadBqFFXx+ooa2sXxZNTQrLOsiIUnymjnVl1HiuryOagOiUf4tFcgEV0B2Es\nwrvMSRaB04I/T+l0kVUjU9GydBrF6D0vIxHfZJHsdK6jeA565pEJ+D3qGe+Bt8CLInsRyveVo9fR\nCyeciLwVQVtfGzwODza6N6LIVqR5L+nvHpFp6JkiMg1FPAmCWHhyqeaUe4OmtC+Mia90jpUpZHNQ\nnZLYmrAaTFzHoqPkvckwWkuqRDZq4lKNlsktTdKpE9R7XkoRX7VIcrJIdjrXUezDWV1avSCRZ/E9\n6nvQBwAoP1+Oof2sXrR+Xz0azjeg/0E/Ou92AqDIJ0EQDwcU8SQIgnjYCAF4CUAUQDOYyF6i3pvZ\nslcxWkuaCfScS6qRSr2WJplEKVKZTiQ51Tm81P4SoogmTQtOF3EkOhKNoO12G2xmGyLzERTbilH9\nzWp0FHagFrU4jdNww032JARBLCvIx5MgiMyRS02DlhpGO+QSulloIZNN9JxLqmmndahDK1olwidb\nJEsHXsxmT3pINZVZqeHSn8b+hK7hLgCAf70ftv02SWffbKRiEwRBLBbk40ksS8h7apFI4oO5JAkE\nWBfYujq0v/FGWvtDyx+SW4a0gonQ5YxBX850yURKbDLEvo56vRyN/o7ix/jbfdcwmZ/8XJJ5VCbz\nLA0iiAY0ZF10Asm9PfcFg1jf0JCTohPQ50uqhLzhUsv+FpTmlQrLjuw5gha0SK69Ef9W+rtHZBp6\npojFgIQnQRD6yIWurZlELKR/+MP09tcS4kY75C4gRvSzLhZYZGdbyKQqRFI5xgePDOP4X1WlfC7c\ns7SvtRXnZc+kG+4E4ZMtknXz5bWciyk6AwjABx/qUIeQ7NORVDsRB/cF0bC+QZIyq7SMIAjiYYaE\nJ7HkoC5si0QwyMwgF7tTbaYQCWnfiRNp7a8pxINgnWJ1+FQuNBkPZC+wyM62kElFiBj9HSU+xq+b\nPkr5XLId/dVLrguubnSjAx1oRSsCsk9HUp17U1cTBicH0fh2oxAZNxLR1IL+7hGZhp4pYjGgGk+C\nIB5O0rU/yaB9SiYa5KRagptx9xuNJkXZagaUjHSOuRB1eJk6hpGGSJmyZVmKZKPe1Yh1DEEQxHKE\najyJZQnVJRC6SZZH6nazL78f7Tt3Gs8z5V4lGRBOqimSBvJgU41cZjyQzW1oVMZKlg6aLdI5ZipR\nK6O/ozIVGTMS/V2IFOJcJRv1rqmm6OqF/u4RmYaeKWIxIOFJEMTyYNM5wH0ZKH8f6L3PlmmpMb7+\nvfcWtWGSaoqkATWZagluBvWzLvSmg6bS1CfdYz5MZFso5TJ6612NPIO5nl5MEASRC1CqLUEQywP3\nZeB+Nfu+6h3g1ue180i11i+QhYxqiqSBPNgMZv5mFb3poJlMXVwMT85ch6w8tKH0WYIgCP2QjydB\nEA8P5e8Dw08AzmvA9SrAW6ytxrTW+3ws4giwfFS3e2G9TJeKmswCdW/VobWvFbWeWooiEYsCPYME\nQRD6oRpPYllCdQmEIhcfY5HOr/7fwMF6Fi0EkueRxvJM2y9fVl7P81c9HqC/Hzh2bGG9TBc6DzaH\nWMqpi/Q7anmQS88gPVNEpqFnilgMUhaeJpOpwWQyXTOZTHMmk2l7JidFEMRDRKaMJL3FLL32zg1t\ncaj3mLzzzsaNQGcnMDrKli8XL9McJp2GO+cCAbzu8+GtujqEM2JOSjyMZNIOhSAIgkgj1dZkMm0C\nMA/gRwD+92g0+qHKdpRqSxBEHHndpN8fT2etqABu3EgvwqenLlKeQtuiUbvFx6ypAdasAZqbtee4\nQPWhRBxum3Lv6lXMxD4kWN/QgP1a93eRWAxrmUUnAKAbzO81iJzztSUIgiBSQ0+qrTXVwaPR6O/5\nQQiCWMIstEDinVr5sXk6KwAMDLBl6QiFYFC7LtJoC1i1MZNdO/l55qj4WYqoCTZum8LJ9S624vme\nDwQUBfKy89vsBsBvUQDMeocgCIJ4KKAaT2LJQXUJKqSaspqqAWSy4x88qD4XuegLBlmkU7wsHfTU\nRQaDwLp1gMMBNDai/Y03Uhsz2bVL1d+E0ETNl5PbppRWV8Pr9+PLp08vShRR7+8oPTYvy85vk3/O\nVAuAfix0Q3/3iExDzxSxGCSNeJpMptMAKhRW/Z/RaPSk3oO8+OKLWLt2LQDA7XajuroaPp8PQPzB\np9f0Wu/ry5cv59R8cuZ1dzfaY9ETXyzCpmv/qSn4AKC2Fu0vvAC0t6d2/JMn0T4wwF6XlQEjI2gH\nAL8fvth27e3twHe+A5/LBRw+LDT18X3pS0BrK9rn54ELF+B77rnsX681a4TrhclJ4LnnjI83NcVe\nx8Rle3s78MMfwjcxAdhsaH/kEWB6Gr7GRiAYjJ9vLjwvS/g1F2wDjz2GtS+8AI71O9/B+OQkDp44\nAYfbveDz+4fnnsNEXx8sDgeimzbhnStXYHE48B9PnVKcj575Tl2fAkqZ3+YL8y+gPdWfz1x5/R3A\n5/IBh4H2yzkwH3pNrx/S15fp7xG9TvP15cuXEYoFFz799FPoIW07FZPJdBZU40kQi48Bz0cJmbLs\nKC2NN99ZsQIYHIzPpakpeTqvz2es5jITpHq9xChdO/G5eDzA8DD7fqHO6yEgV305X/f5hNRZR3k5\nwkNDANKrMyW/TYIgCGIpsJB2KlToSRCLDe/AalREpWrZEQgAK1cywXngALBtG1teXQ289550Llrp\nvIuRlprq9RLDr11TU/xa/O53bF1tLbsW/HtKt9WNVldah9uN/S0tOSU6AWnqbNnjjwvfp1NnSp1V\nCYIgiOVCOl1tvwbgHwF4ANwHcCkajT6rsB1FPImM0i5KNSMWGHEznbExZjHC8fsBm005cqoVXcxU\n1DVF0nqmeOOg+/fjy6qqgI8+iq9fpPNaqogjh7nclVYOj8TOv/AC9u7enZNRWWJpQn/3iExDzxSR\nabLd1fY4gOOp7k8QxBJg0ybg5k0gGgWeegqYnY2LzQpR+XdNDXDkiLq40uo0yyOHegkEgJMngXAY\n2LEDOHpUeVx511mtlF+dh5YM0d0tFZ01NcCZM/Gxl4hoyiUSmu4EkLYFx0JYl/BIbHt7u/D9YqN1\n3g+lpQtBEASxKKRd46l5AIp4EsTSxe2WiiqbDYhEWArppk3Ab34DWK0stdbrTdxfrNLKy4He3tRF\nX7Joq1r9pLx2dHBQu5ZUw14moRx1IhbNdbuBz38eeO21hYtuZkCQsXGk53yuqWlRxUhCDacPcQuO\nBqRkwZHrUdRkAvBcIIDekycxFw7Ds2MHDhw9qvueaJ13rl8XgiAIYmmQ1YgnQRDLlE2bmJ+mzQaY\nRWXgBQXAgwfs+7VrgTt3gHv32Otdu4AbN9TtRuRs3qy8vRrydFZ5tFVWQye8ib92DfsAOHiNZWMj\n20Ct5lJ+HAX/zcRyVI1objbJlCeizHM0NDio6S+pRTqRNIfbDbvbjVN+P9vfFoQD7rQsOPRYlywm\nSp6e/Breu3oVM7HGXf1tbYbuidZ5q61fdv6hBEEQxKKTqeZCBLFg8JbORJYYGGDCa3iY+VxWVgKr\nV7PIJhBPq+UKjO+TrGmQ0jGMeIaK01lLSoB33wXq61ldqTitNYbg8zg8jPMOB3DsGNtGpaGQ8EzJ\nj6PwRj1hiFSbM2WCTHkiytR0JkSamtdmSvu7AizSeRopR3X3BYNY39CwIN6een5HyRsoKV1zfg24\n6ASYR6mRe6J13mrrl51/6BKH/u4RmYaeKWIxoIgnQTwsJEshFa/jAtPpZALP65Xml65ZExdxmzcz\nEcnDf/JjBIPAI48AMzNsX7MZmJ833uWVC6OSEvb1+OMsInvxoqLgE97EA9gTDgOHDsXFoVKk6Ic/\nBP7rfwWuXYsf59IlxbGNlqNK0EjjNUwQLNJ5GMqCTG8qrqwGd18wmHZjnHTFq2T/I4dTTyOOsRg1\nl8mivvIIp9I159egrKYGzpUrYbbZ4GtuNhw9TnbeauudVnbsWk8tDu/JvQgxQRAEsQSJRqNZ/WKH\nIIglzssvR6N790ajzz4bjY6OLvz+mWDv3miUtQmKRhsapOsqKuLrDhyIRquqotFPP42vf/ZZtq62\nVjr/0VE21ugoO8fi4sRjfPppNFpZGY3W1bHv+fZKbNzIxvB4pMfnx3nhhWjUYokfo6pKcZjp0dHo\n6YqK6LTSnJWOI742VVXZu0fJ7kFWjheN/zZegMOJmR4djZ5uaIhOp3gt090/F/j13r3RHwHRHwHR\n07L7/eazz0Z/BER/WVureo4LfQ06Xn45+uu9e6NvPvtsdODup9GG0w3R0emle/0JgiCIhSOm+ZLq\nQmouRBB6SOgoYzByku7+mSCZpUlpKcDT+errgRMnpPvqsTsRn2NJCeuGqxWZkUcA166Np7pWVQG3\nbqkfw2IBenpYRFYpksjnnP9ToNchjfqJmyZVVQFbt8avzZYt6k2Q9EQsk22jZSuTaeoAtIKl4hpN\nU00zOqsW7XuYuqi+VVeHvtZWeGprE1JZExoo5QDUaIggCIJIFT3NhajGk1hyLEpdQmJHmYXd3wiB\nABNodXVMfHFU6hsBMEsSgHWrLS5O3F9cx6g2fk9P/PstW/TN6+RJJiRbW4GXXmLpswC7XhcuJO4j\nPkZBQfx73hyntTVeO8rn3OtgDXhawVJPgYTjtH/nO/Fr09ubOFay48hJtk2ye5ANgki9NlLPuSZB\nrcYz3drPdJDXVWYL/jtKrX7yXCCAU34/ZiYmMjrPdM9PnN5syc9fkGtF6IPq8YhMQ88UsRhQjSdB\n6EHLhzLb+2uhZjUi7sra1MTsRBobEyNYR4/GooP5wK9/HY8GirvP8mNcvRqPjm7YADzxBBvP6wX6\n+tjyzk7gsceY0ObHOnmS1YMCwIsvsqhqOByfwzvvAG+/DXz5y8Du3axT7vAw8w7l5yI+xtgY2+7W\nreTCXqkBz8WLwO7dOLd7N0IHD+L61BSePHWKiYOkY+n4ACHZNmkViKaAG6l3uk3zwxK1Gs/F7C6r\n1Dk2E8ijuBy1+kmteYjX/3zDBpQ/8QTyy8sx3tsriRTLj5vu+YnrTE/5/Vm5VgRBEMTDC6XaEsRy\nQJyCWlERb/gjjqzJ033d7sRUSvE2HL6t2GZETkMD8NvfxkWh1RoXjGVlTNDydQBw4ABLq5WP6fEw\nISv36eSpu1u3xsfJywN+/3smRg8eZJG5xx9nIlosqkNgkc5LTwBDn8SbEnm9yqmFydKKldbJU1L5\nssWwV8kketKrk6CWSprNFFOtNN5kqa/pYDRFVWsefL3V5cJsLCrq8HgQHh6WHEN+3JmJiYydX7au\nFUEQBLE8oVRbglguqKW3csTRqXffVU7nFG+Tn89EnzyVkm/DO9vyaNfJk3GBaLFIj81tR7ze+DIu\nOgFgZEQqOgHWPVZsXQKwjrfDw2w+4pRaiwVob2fnIj7GF78Yf93bCwwNAW1tiWmhPOo39EncJmb3\nbnaaPPrmcmHP6Ci7tsnsUZTWyVNSNexVFirdMxm65pCmTQyP9skFi9ryTKCVxptfXg6zw4GRK1cQ\nXLcObxw4IJx/OvdFLYrLx3xt9Wqc2L1bGFuvxckju3YJ43qqqxOOwY/r8Hgw0d+P+UgEXr8/I0JR\nj/1MLjzLBEEQxNKBhCex5Hgo6xK06u3EtYNer7JgEG/T2yv1q8zPB1auZFHLFSuADz4A1q1jPp6N\njUw8cubm4t8XF8dtR3p79Z1Lfj5Lq+Uit6aGRUXn59lru52dAxe/c3PA/v1MdOfns2W1tcBrr8XH\nFAvm06dZRFX+Rnh6Ov691wtwAVBeDtfEBBxKolUPBlNSF7PGMZfmkA200njHe3sxHw4jGokgEgqh\nv61NOP+k1yQAwAfWrElBX8lFGv8dxcd80NeHwc5OYWwuvruamhSFG1+//+hRYVzx91wI8uMWb9yI\nwc5O9Le1wWKzZUTU6/mAYLk+R7nIQ/l3j8gq9EwRiwEJT4JYCmiJGz3RKfE2fDy7HZicBP7lX1h6\nbijE6kD/6q+YX2dnJxO78nR5sxlwudjy2lomOsXRSCXsdpYGvHIlS4k9c4bNpayMiU++jcMBdHXF\no6ZmM4tmtrYykVtRARw7Jj3XYJCl6c7OsnNQEpGxiBEAdl5cANTWwp7s2mphsGHQYtY4pjqHpRLZ\n0orS8fPm2AoLsfOVVyTrFK9JNxIbVIlQE2l8TFtxseLYWsJNPK7SMRxuN+xuN0LXrwNgfp/yuWfz\n3uXCs0wQBEEsHajGkyAyRZr2E0nHtNlYF9fmZmlt4cmTrEHPjh3x2kaleciXfe97wC9+wYSaOILJ\nsdlYNHN4mAm6SESaFnvlCvCFL0iXlZTEmw6p0dDAmgpFItLlK1YATz7Jjieu7RRjscTnqmRJw61K\nACYyz55VtjKRr1erZczG/URu2GgYncNSsNlIVt/J11lsNpjtdtz97W8xE3tW8yoq8Gc3bgCA+jVJ\n0ZaGX+edr7yCrkOHEsbORB2l+N546+vxjMgK6VwggJ6WFkRiP6da986o1U0uPMsEQRBEbqCnxpOE\nJ0Fkimx4dSYbU94IaN06FqUUd53l+8jHOX8+3mE2GZWVbFwuBk0m4PJlYNs21txH3JVWiVWrWAQ1\nEmFRzTNngPJyaQ0op6EBmJhg4pA3JyoqYo2GLBb2/eiougdmKMQsWaJRJtB37WLnyJsJFRdL12u9\nUc4F79UcIZcbzXCxdO/qVUFMygWWWhMejqaY5g2qDsO4LU0SMiHckt0b8XnDYkHl00/jwNGjqsda\nCh8wEARBELkJNRciliU5W5eQDa9OPdYeABN1lZVMKHHR6fEA/f0s0sd9K/k4WoKR87nPMcHHx/v8\n54H//J+ZyBOnrirhcrHj8OjmypVM7D31FHv9mc+wSKd4XsEgE7o7drCU2vPnmVCdm2PnVVWlntLq\ndgPHj7OIqtvNRKe4mZB8PSA0bWrfuTOxJnQhvVdzHD2NZhYLnq7KRadS2qfcnzIyNgaT3a66fQK8\nQZXo1M8FAvjpypVoLi3Fm6ImRYD+31GZaLSU7N5IUovn5iQ1rUrkYursUknzzjY5+3ePWLLQM0Us\nBiQ8CSJTGKz1S3vMYBCorwf8fhZJ5AKxpoYt37gxXqPpcknH2bFD/ZguV3wcHnHMz2ciko+3eTNQ\nWJh87hMT0qZEnBMn2FwuXAA+/pgJzT/9CVi/nonRkRFW4zkwwIRvzEICRUVsH73Xlottp5PtpwRv\n2vTee4k1odm4n6mi1dU4y8d0AFnrRJsuXCyVVlerdnQVi7Px3l7c7exEdGYGBVVVKYvpUHc3pgYG\nMDM6itttbfjl9u2CQJqJWaAsBGLxKhdp+4JBOMrLhW3tJSVJBWUufsBADYwIgiCWD5RqSxALQZbq\nBYVxe3pYWuuVK6xxT2kpizS2tbGI3ZYtrAGQ08kiiP/2b6xhj/xnc/VqlhobDrOmPkC826wSNhtb\nz2svS0uBe/fY99XVzHtzbIy9NplYumttLYvO8vnIPTs54ppOjpGU195eFum8cIE1PlK6B7zuUy19\nN1dYjLTfJZJqLE9XPRcIoPfkScyFw/Ds2JGQWpqptGE+DsCa+licTgzGnuNkaao8NXispweFXi9s\nRUXYFwyiq6nJUH2lEkqpsnye9pISfOPSJRRqNQHLMXI5zZsgCIKIQzWeBJErZPpNPBdR4npOJXh9\n43e/Gz++xxOPIsrhtZVGsNuBmRkm2jZuZOI3P599TUzEhaeY8nImfAGWUiuvNzWZWPMicQ1raSmL\ntBYVGRPvSteK3wO1xkK5xmII5KUiymVI6hqRKAL11lUqNdoRL9vz6qt453vfA0wm+I4cwduNjboE\nknx+fI6Tg4OK9ZVGGv6IRVrJli0Y7+2FxWaDtaAAvubmJSnatO6X0YZIBEEQRHYg4UksS9rb2+Hz\n+RZ7GsaimJl+Ey9vLKQUHeRUVbH/+/qYaKupie9rVGh+5jMsPVa8j9vN0m7v31cWmXK2bWPCt7+f\nRUDPnQP+5m+AN99kUVqLBfjwQ9Yo6cUX2TKbTdrx1oh4l18rhXvQ/txz8E1MZD4inSkWQyAvsihP\nVVCII5Gl1dX4ytmzhsRIsmZFyZrvcIFkyc/HO1euoKayUlGwRiMR3G5rg62oCJGxMUGoqglXpWOq\nXRuxSDvl9z8UjYIeloZIOfN3j1g20DNFZBo9wtO6UJMhiGUHrw8E2Bv0ZG94gkFjb+LVRC1ffu0a\ne22N/Qi7XEwoOJ1MqPGGPvn5LNV00yb2emyMRSi9XuDWLePRzU8+SdwnFFKuOzSZElN5AVbTWVjI\nhOf9+8AzzwA3brDvt2wBtm5lDYzKy+Pn1NwMNDay/fU0+xFfP17rWV0NrF0LHDmSeA/6+liklu+b\n7TevRlOvuQfrQqJxzGxHmnhtH8BsTvQKin3BINpj3YvVonzJ5i4+LgDYioo0vT7F6b2IRnEvFELf\nlSv45fbtcK1ZIxGx3vp6rG9okFisdDU1ITI2hvyKChw4dkwiVkdjP+viY6pdG17vmWyuelhKUcRc\nbIhEEARBqBCNRrP6xQ5BEMuQZ5+NRoFotLY2Gh0dTVz/8svR6N69bDul9cnYu5eNDUSjDQ3xsUpK\n4svlX3Z7/PuKimi0sjIa/fRTNp7JFF9nNkejFov6OJn4qqqKRgsLE5d7PNHoU09Fow6HdHlDQ+J5\nl5dL14+Oxv/Xus7icerrlfczci8zjfz+LkF+vXdv9EdA9EdA9HQWzuHNZ5+N/giI/rK2Njpt8J50\nvPxy9F8qKqJHSkqiJ/fvT9g/2dz5cX9kNidsMz06Gj3d0JB0PP71E5cr+k/FxZJlaueiNB/xsp9V\nVUn203Nt1Oaqh2zf20ySznkSBEEQmSOm+ZLqQop4EkSqaEUx5RFRt1t/lKunh/1vsbBmP/39yg14\nOG43iwTyZkI8lXTTJlY/KY48JmsWlAm4X+eGDcD4OFu2cyezU/ntbxPPw2pl5+nzxSO5tbVs/vx8\n+DUWR72Uajd5tFJshaLHs9NoRDpdloFVS7YjTfuCwZRrMXnHWQCChYg4Ypps7vuCQfx8wwaEY3XQ\nJosF06OjCIdCQkRRfswx/vPKMZsxK+psW1ZTA9eaNaoRWKX5iJd9+fRpSfOhPa++KkRL1a6NOPpp\nlGSR3VQjofJ9M9FMCUjvPAmCIIgFRkuZpvsFingSGebs2bOLOwG9kUweReNRPnG0ct06NkZVFVsn\nH+upp5SjmTU1iVFEkyka3bEjGt2/n0X3xOPYbOlFLs1m9XVWq/Jyj4fN4dNPpVHYhgb1iK04ullV\nxfZXi3ByxFFDebRydJRdY6Vrq0DCM5VOtFoPWue2BMiFSFPHyy9Looo8OidELYHo0erqhDlqzV3Y\n32JRjPyJI4L/XFER/dXOncLr/89uj/5vJpPw+qeVlZrXSGk+8mXZikJ2vPxy9Nd790bffPZZ4Vh6\nIrtKc1AaS23fpRRVzQUW/e8eseygZ4rINNAR8SQfT4IwCo9ktrYmej+K4T6Q3E+TR+W4nUhHB6st\n5N6Y4rG4JydnZoY1CTpzJl7XyYlGgQ8+YNHBq1eZr+fq1cxKhNd6pkqy6KhafejwMIt2fvvbrDMt\nEI/sKfmHFhay2k6+3Ucfsagj//L7lf0redSwupptI24Y1NTEbF2Urq0WPGqq5x6nCo/eLnLtnNz3\n0Qhi/8jFItTdjcj9+wCkHpX7gkF4/X546+sTmgudCwRwyu9P6rXJ/SxXPf00gMTI37gowjk9MICJ\n3l5hDmU1NZIMg+INGzTPw+F2w+5245TfL9wL+fXNVoRZySdT7d5qzSGZ56Z8X6rNJAiCeAjRUqbp\nfoEinsRyQ289II+aiaOGxcUsMrl/P3vNay1raqLRF16IR9k+/ZRFL/Py4tvt3cu2KS6W7iv+2rEj\nvQhnJr7E8yovj0b9/vh1euGFaLSsLDFa6vcrRwArKuLb1NdL1yWLGoqjoSUlxiKL6ey7hFCLFi4l\neGTySElJdIzXM2sgjrQ1ezyK0TmOOPInjuZCKrKxAAAgAElEQVSJI5z82Hw7cbRVHBXVinpqRQCz\nFWE2UkurN1KsNJZ831yImBMEQRCZAzoinmSnQhBG0WszIbfxEOP3s26z3E+zvp6NK/f6fOQRVuPJ\nsdniUUwlKxTuqcntVZLZrKSLy8W65g4Nsbns3s3sUTo6pNFJsfWJ0jXZto1FLXt7E+tfS0vjkWK/\nHzh+XN/cuH1NSQlw6RLr4quXdPZdQohtKOwlJXj+5s2Uo5eZ6IJqdIxzgQBGr1/HWE8P/O++i0KN\n+yTuEhseHobV5RLqMPMrKvCtGzeSHlN8vfIrKjA1MAB7SQmqnnkGk3fuwOp0Ir+8HPd7ejD0/vuI\nzsxI9tey+hB7cCbzAc0Ecj9SrXrRhRqLIAiCWLrosVOhVFtiydHe3r64E9CbJslTQS0W6fKaGmbp\n8cQT7DVvgCNuOJOfz0TavXvSfbnoNJmUU13n59k6LjazJTpNJpY2+/77TFgODbH02lAIMIt+rRQU\nMOHIhai8CQvA7FV6e5VTW/Pz2f+FhcDf/33ivoEAu07yVFye5nzzpi7hKDxTgQCznKmoWNaiE4in\nPtpLSvCNS5fSEgrJUiy14Om+N48dSxgjWSpwqLsbdzs7MTUwgK5Dh3TPMTw8jIKqKjyya5ewbmpg\nQHPe/HpZXS64N26Et74ez9+8ick7d4R5/+Ff/xWDnZ34/cwMnJWVMDscAKSWLGrw9F7eSCjVFGgl\n5NdRfL9+vmEDpvmHOykgHqvr0KFFT79eriz63z1i2UHPFLEYkPAkiGzBxc+HH7JIJGfNGiZa+Xpe\nmyh+zYWYWh2lWhbB7Kz6OjW4f6URolE2v8ceA155Jd6xt6ODiWWnkwnuBw9Y7WkgEBd1YqxW4B/+\nQb3LKxfO4+PA976XOA+1etvYhwPnjL6B7+5mdaEDA4AOMbOU4ULn+Zs3NaOFWqRTr8eFC/e5FI/R\ne/KkIGraX3oprWOKt//mRx9h/9GjyK+oUBxDSfDuCwbh8HgwOzGBOx0dGHjnHbzd2Agz94kFEI19\nMFT82GNouHYN5bW1AIDI2JimOBbXVeoR8mqiXGm5fDx+LficeeffVKBaTYIgCEIvJDyJJYfP51vs\nKeiDR0a3bQP27WPLeHRTvJ5HB8Sv+RvDmhqWbqqF1crScI1gsQD/7t8BzzxjbD8xMzMsxTYQYFYp\nAItObt0aF40lJSxy2dKSKDxnZ5nAk4tw8fgck0L2hoYtid5InPBMLQObE72k2hxITZTxaJ3R8bhw\nKa2uhtfvl4wxFw7HN5R9oKJ1TPk85ds73G5868YNxTHUGu5Y8/KEbcJDQ+hrbYXN5YJJ9LPnrKzE\nf+rqgsPtxnis6ZCtqAiwWJJ+CMLn+7PVqzES+zCotLpaVcypPdtKy+XicF8wKIhureMoXUsx6dx7\nQj9L5u8esWSgZ4pYDEh4EsRCoCasxIjTRouLgfJyoKwM2L6drd+4Mb5tYaF03y99KS6a9GC1srTX\nO3dYdM8oXAQ6nUzw/tM/xUXi+DiL2ALxOsneXiDWfVQ4PpDo0Sm/NrwLLk9PlqNxXQ1HY/Tcp4cc\nI11QAe3OuVy4fOXsWTxz/LhkDE/s/pdWV8NeXCwZR0s4y+fZ1dSEycFBvN3YKMzDSPfWc4EAwvIP\nTiwWRCYmUPH5zwNgfp0N164J4/FIcmRsDLfb2pJ+CMLnO9nXh0hsfoVr10rm9otNm3DE7cY/l5eD\nfwwjf7aV5q4mutU6/2pdSzG50N2YIAiCWBpQcyFiydHe3r60P6kLBFhKp7yRzsqVcRFYVgaMjLDv\n6+uZTUplJYscfvIJS2HljYkA1njnvfeA/n59c9i5k0VSOzqAyUlj83e7gS9+EXjjDeDJJ5mwFL8h\nt1qZcB4bAz7/eeDECaCxkaXDihsiVVXFrVPU0NvISYVwKITzgYBms5OMPFNq93UByUSTHy2MNsER\nN+XRarAjR3z/Tvn9muOIz38+lkLK56lnf6Xj8vMTn4ccl9eLyIMHsNjtcK1bh99HIti5aRN6T57E\nzOgoSqurke/x4LZoPvLrxq+rragIkbExxe2OuN2CfYyzshIVTz2FPYcPo6upKasNfhay8RGhzJL/\nu0fkHPRMEZlGT3Mha7KVBEGkiZIY4XWJAItmrlnD1k9Px/fjzT6sVuBv/xb47nfj+4g72wIsZfbU\nKUCclqhFV1d8fD3w7rglJezr+PF4nac8xXd2Ni6aOzqAF19k5x4IsPNqa2ORTj1RRR4JTREejVkQ\nxPeVe4EuMDwyBQDnA4G0z11JyO4LBnWJeY44AmfJz8frPp9uYSy+f3qi1+Lz9/r9WN/QIMxTvr/8\n3MTibV8wmHDtrCoZBVaXCzP372MmFqWc7O/HEIBP3ntP2KZw7Vr4jhwRrpv8WOLruvOVVwTh2NXU\nhN6TJzEXDqN8xw6YYo3KLE4n6t95R4iois/7V088IdSWZgqj95wgCIIglKCIJ0Gkgt7oltg+pKGB\nbXfsGBNgSnYoAEujjUYBbnBvtwNFRdIIZyawWlkHWpntAwAgL48dl0cyy8pYLWdzM7B2rTRt9sAB\ndo5K4wCsM+zatSy1d9Uqlnb77rvGO8bmQEQxKdyGRa+ozgKZjkylE63kGI1aJhvnl9u3w1lZCXtR\nkaJwVTp/LjDvf/IJ5sNhWBwOFK5bh9Hr14WGRo7yckRnZ4XXJquV+Y2Zzfj6xYso27YN4VAIv9i8\nGdOxrAT3Zz+LqTt3EOYfsqhhsaDy6adRUFmJ8d5eWJ1ODH/wAaZjNkkurxeutWsFOxa+zb5gUHK9\nAGB1XR3uXb2Kr164IGkIxc9bbBGT6v0iCIIgiFTQE/Ek4UkQRuHRLC6+xD6VcrgY8XhY1HB4WJ/F\nicnExCf/P1uYzcyCRUxBAUuhjUSknpvl5Uz4bdgQF8EWC/PztFji1i9btjB7laGh+DqxUAWSXzM1\n5CI+195Up5kWrIaR9NlwKIRfxcSZTUWcGUGPkFWbn9Jy8XglW7ZIRJaeeWoJYaUU2Z84nZibmlId\nUyzWABYRHf7wQ+HnwpKXhw1/8RcIdXfDbLPBYrfDbLPB19yMtxsb0dfaitLqajy4dStBhJosFkRj\nP++O8nKEh4bYcptN6IDrKCsT9hNv4ygvR2RsDPOxTIbSbdvwlY4OxevEz3t6dFSSXkzRSYIgCGKh\nIB9PYlmyKN5TgQCrwSwtlYpOi0XqUynfh3tCPvoocPductHpcsW/56JzxYr4cZKxebOx8+FjykWn\n2Ry3QCkokK4bGmLndPEiqze129n53L/PRGdFBas17exkArW8nEVt+bUqLmb/p9oxNosdZzPyTOn1\ndzWIWmMXpaY9DrcbBWvW4G5np2YnX62mP4C+jqVGuquKxxvv7ZWs1zMftXRbvu/bjY0J6aBzski8\nragIAJjHpsWC2ViKO++qW7Jli+TnwrNjB+5dv46Bjg70t7XBVlCAZ06cENJjC9etg62gQNJ1+Q9O\nJ1bX1WHl008L8y17/HHh+0dizYhKq6vhqalJ2MbqciE8NCSITgAoXLdOiOAq3ff9LS0oiHmH3v/D\nH3C6oUFYr+fayklln6XCUjw38lwkMg09U8RiQMKTIPTQ3c0a/4yOSkXn3Fzcp1JpH+4JKar3SsBu\nZ+mqzz0nXR6NxlNxuWC125VF6I0bxs7HYmHpu0C8ztPlkgrRU6ek+xQVMcHn9QK3bycK0127WO2n\n282+HA62vLCQRX6vXEmvY+xD2nFWTWypCT69nXz1WM3o6Viqdryxnh4ATOjtfOWVhPHk++mZj5oQ\n1uq6CgDmvDysrqvDN69exfqGBiY85+aA2VlY8vKErrrcAoVzt7MTY598IpkrFy7Htm7F1MgI7nZ2\nIjw8DJPdjrwVK+D7yU9QsGoVZqemkFdRgQPHjuHA0aMoXLcOD27dwr0rV5C3YgXcmzYhIttmfUMD\nVuzaJZmDvaQEvuZmnAsE8HFzs6q36XhvL+bDYURCIfS3taFl82aEQyHdtkJiUtlnqbCcz40gCCKX\nIeFJLDkWpQubuLHI1q2s02wsmpEQgeO2KNeuxZclS5edmWEi7s4d6fKaGvYlJhLRl6qrhLgJ0Nxc\nvIGRy8UaBonSDYVtOEVFTDz6/ez/UChudQIwr1K53QmvQRsfZ+fn9TLBuHkzixwfOBBPT+U2Msmi\nD1mKKAK57WemJrbUBJ9eX0XDVjMa8yvZsgUtmzejubQUbxw4gIJVqwAwK5GuQ4c0z0vPfMTCVRy1\nssSebaV9v/7BByioqsKf/f73ePbNN1Ho9WJ/SwssdjsAlg5b+vjjgs2KUhOhsscfl8yVC5cHfX2Y\nFXV0js7MYHpwEJaf/xyh7m4MdnZiemAAXYcOCdHoqbt3MRMKYXpwELfffluyDbd8MQFwxLIdzHY7\nih97DG83NmL0+nUhRZcdUPp7RT73qYEBnA8EUrrXmXo+cpGleG65/DuKWJrQM0UsBlTjSRB6CIWA\nl15ib/Sam5n4UavpE9cickwmZi3yySdArKmIhKoqZoUijjiaTCwyqdSAKBWS1Ys6HKwrrrzhkdnM\nROLFiyyiye1e/H4mNF98kY175EiiIFRqtiO/NuXl7HhcBOdi7WaOotcqJp39jdSXyu1G8isqMDUw\noLvekM/Hkp+vWvspns/M2BgGOzsBAN76eljs9qT7yml7/nl8+qtfwZKfL1iU8C645wMB/OnUKUFU\neuvrkb9iBULd3Rjv6cHM+LiwjxJevx/DFy/iQV8fYLFg5e7d+NKJE0JNKMDSaedmZhCdmYGtuBjf\nvHIFZw8elHTlvd3WJqk/zVuxQmhK5P7sZ1F//rzkHMOhENpfegl333kH04ODcHg8KN64Edb8fNhc\nLviOHNH9rKT7fOUyy/ncCIIgFgtqLkQsS3Lee4oLLpcrMYro97N1cusTLvz0UlwMrF4N/O536c/X\nbGbNhPr6WK3m+HjiNuvWAX/8Y/y11cpE5NGj6hFIJWHOrw3AoqAPHsS3V+sGuwDdbHP+mVokjHS1\n5Y2DAFa7+MyJEyn5SSY7pnhdXkUFpmXCVu98zwUC6GlpkYhHuUB+48AB9Le1wVJQAEdxMaYGBxHV\n8SFQ6bZtKPrBDzD5d38nCGM+nz2HD6P9xRcxcOFCQiOi9Q0NmJmYkDRzCq5dK5nj6ro6mO12IBqF\nr7k5QZT3njyJ8L17sOTnwxzr3jscs06iLrdLG/odRWQaeqaITEM+ngRhlEyIHLlnJaekhKWsyhv6\nFBayqKER4enzAR98YHxuQLyTLY+Azs+zWtSyMmXRWVLCmgmJhefsLDu3DRuAJ55QvlZKHpzBYDxy\nzJsYVVczuxWlqCmQE/6YDytGUhL3BYOs5lAkivQKHXEk05wkbVY8nwPHjqHr0CFY8vNxyu9nkcjY\nBz1WlwufvvEGjhQXw2y34+sXL+LSD34giZZyQWcrLkbl008L0UA+F4vNBntpKWbu3cOk+AOSGLai\nIkTGxmCy2WArKGAdb/PyMDkwgIvPP49NsVReACirqREE+DMnTkhEOgDAYkF4dBRf+PGPcfLpp2F2\nOPB2YyPMIp9dW3Exwvfvw15UhPzycpzy+yWR3d6TJzEVy0iYjzVUMpnNqteSIAiCIBYaingShBgt\nyw49wpRvY7MBV6+y1NqSEuDSJVbfqGTtoGRrokZNDXDmDGtGJIqoaKJlzbJiBZurON3W7QYuXwa+\n/e14pJIjjlimkiKr134kB/wxH1YynY6rtq0kkrliBR558smEiB4AnD14EH9qbUXZ44/jwNGjCVFO\nNQqqqjA9MiLYqpgdDsyHwzBZrfj6Bx+gbNs2YdufrlwpCDie2spFJsAEoMlqRelnPwtHSQmmh4Zw\nN/ZzaLJaJVFRS34+rE4nympqhPny69qyeTOmBgYklivrGxowOTgonI/JaoXJbMbKvXsRmZwUIqgO\njwfhmKURj2Q2l5YKPqQAizq7N23C7bY2eKqrsV90fIIgCILINGSnQhBG0bLs4NG31lblTrYAcPIk\n26atjY2zbh3ztvz2t1kjoXT53e9YZ13elZajZbnCRSfvNiumpoZ13m1oAP7wB9Y8ye9nUU6vl4ls\nbu1iszEhnZfHXqdqb6K3WZBWN1u9zYkIw+jpamukQ6hWJ14ArDmP3a54zL7f/AbhoSH0t7Wh/cUX\nAQDjse65ptjzb5P/XAB40Ncn8fLkNiXR2Vlc/Ou/lmwb5n60iDcVWl1XB0dZGfJWrEDxpk2YGRnB\nQEcHbre14e4777CNzWaJ6LQVFqJ02zaER0bQ39YmOV+H241v3biB9Q0NEsuVPYcPS65FdHYW8zMz\ncLjdsMfOy1NbC091tWQfACiPNfsy2Wywud3I93iYt+jwMG7Ljp8NlqJFCUEQBLGwkPAklhwZ8Z5S\nEyvl5eyLv+mVb6fHS1KcMtvWBty6xSKTra0sssnhvp1ud/JIpJxIBHjsMfZ/XR378npZ859kIq6m\nhglKufCsq2Odeg8eZDWpxcXAiRNxaxQ+x48/ZgLwc59jacQjI6wpUrajkFoCVc+HARqQn1nqGEnH\nTdaJ15KfD4BFFGGx4KcrV6K5tBQ/W7UKJ3bvxlt1dYLnJgDBN7Mg1j05OjeHgqoqwS7FKvbFTcLA\nhQv42erV+PXu3Xht9WohTRUAzDYb9re0YPLOHYRHRjA9OIiJmN2Kp7YWc+Fw/GdXlLHwMYDI+Dju\nXbkiLLv1m9/gzQMHEA6F8ItNm/AvK1bgj8ePY25qCt76eqG+dF8wiLyKivg1KyhAeHQUe159Veis\nuz9muyKuSXVWVsJRXg6r04lIKITbbW2CpY3V5UJ4dFRTEKYjHsmiJLvQ7ygi09AzRSwGJDyJhxM1\nsdLbCwwNxb055dvp8ZLkNiMFBSzCyaMgJSWsO2xVFbBzZ7zx0NSUMeFpNrNx29rYMVatYqK4s5P9\nz43sucjlgvPMGSYoRbVnOH8eePNNdt5a4o0LQB5Rqq0FPvoo+6mvWhFNPR8GEFlDr31Lsm0dbjfK\ntm8HAETu38fttjZMDQxgZnQUk/39GOzsRF9rq2CBUlZTA3tREV73+TD4298K40zevYtjjz+O6dFR\nWJQi+3LMZoRHRjDZ14e7nZ2sC62IW7/5DX62apUkqln86KOCUNT6uXV/5jPC9/y82l98EZMDA4hG\nIojOzuJuZ6ckwutwu7H6S1+CKXausw8e4HZbG7oOHYLd7cYpvx9vNzYK6c9cLPaePInw0JBQu2p1\nueDeuBGOsjLMTkzoinqmIx6XokUJQRAEsbBQjSfxcKJWNyhf3thovL6Q1y52djKLFIClwX74IfO7\nlB/nD38wliJaUsIijnxOmzfHbU4A4K232PHffBP4/vcTayh7e4Hdu4ELF4Af/ICJ62vXgOFhfZ1l\nX30VOHRIuzbTKGr1s1p1t3prRYkFwUjNpxjecMdTWwuH243b4sZcgGA5wjvl8hpJNRxlZZidnITZ\nbsfc1JQkkmkpLMScqJGWvDZTC0teHsp27EDo+nVWV2mxKPrrmmw2qe8mgILVqzF5545wPJPVCmtB\nAeYjEZisVljsdhQ9+iiGYt1oAcBeUoLnb97EKb8/oWuvvMbV5nLBZLMJ9Z6Ttgo4IwMoqanFV88k\n/3BAfA/0fJAghixKCIIgHm7IToUg1FASK4EAcP060NMDvPsuE2Xi17GUPmFbuUjiy3p62La8FpPj\n9bLurVy8Pf00a85z7x6Liqq8eVWkspKJRbebpc6Ka0erqlh6rx7Eoq6qSj2CqSX+MoHaMai5kCp6\nRF6qQjDVfY1YsIgRCxcAaH/pJfS+8YaQMbC6rg7PvvmmsL28mY4SjrKyuG2JqGkW9xi1FBRgTqFj\nrRgtUWp1OmEpKEB4aEjzHIF4Y6PkB403AjPZbPBs3w5HaSnmIxH0t7UJwrCrqQk3jx1LuA7cambE\nVYsfThzDN3AIE/WHETyhz0uVxCNBEARhFGouRCxLMlKXoFQ32N3NopQDAyyiJ38tRilVly/r62P7\nyQ3mx8fj+xw6BKxZw7rI8je1zzyj3PhHic99Lj73WG2cgOjNuSbiNNVkabPZSmcVp9HGbDQSjqEn\nvTlNlmqti57UyHTSJ1PZVy3lUqt+UNzIyOF245njx7Eq5jFXVlODL772mmR7TyylvWjzZpjz82Er\nKYGJP0MxTOKGW7GfM09tLfzvvov1DQ2oePJJzfOp2L0bXr9f+VwLC1GydWuC6PwYTLACLOXVHEub\nNVmtmJdFQBWJRuGsrIS1oADRSARDXV1CqvH6hgaUbNmCU36/ougsq6nB12Ln9+6u07gHLy7VtuD/\nbU7ebfh1nw9vNzYK9jTUJCi3WKq/o4jchZ4pYjEgH09i+WLUk1Murhobpa/FY167xl57PCyddvXq\neM2mEtXVbFve6VY8Pl//2mvA+vXafp5mM0u1fewxJlwnJ6Xr//qvWS2nEvJrwj1HtdJU9W5nBO7J\nyQV6fT0TmPJjKPmBLkNSiS7qqatLp/YulX33BYOKUTMuYgHgfCCgGAnl12CspweFXi+s+fnw1tcr\nWqscOHoUv9q+HfmlpZjo6UGEC7BYtNBRVobCdesQDoUQnZlBWU0NXGvWwNfcjK6mJtw5fx6zU1OK\n6bBiBi5cQP6KFYp2RLPj47h39ariftHZWZjtdsyKfi8ki5yu9Plw97e/xXw4LEQ0g2vXSra529UF\na34+ImNjgr0LwDrorti1C9aCAsGP1O5242C/HzsrnPifjwXhFnmUyp8x8b0RW7Wo3SeCIAiCSAWK\neBJLDl8sCqIJtzVpbQVi1gtJkUfWlCJtPKo5PMxSUzduZNHNvj7lOk2Xi0Xztm1jTYQqKoBjx+Lj\n+/1McJ09y5bxxkQAi2TyRkDV1ay2E2DdMzs6WG3o/fusu60YU5IsB3mkVq+lid7tdHIuEMDrLS14\n6/59hAF2bs3NGT2GEczB4KJbQaQSXdTT2MdI8x+1fXmETc/14aJHvr0eEcuvAW/2c7utTdVaxeF2\no2DNGtzt7JTUbyIaRUFVFdybNmGoqwvRmRlYnU5YnU7MxbYLdXdjamAAkfv3EY1EYLLZhAilnOjs\nLCb7+1UbCc2Ju+Da7XCUl2MjWKSTd9a1FRezDVQsjyxOJ8xWK/7s44+Fe9XV1CQRl6b8fMzEGiEJ\ny2PjRcbHhSixWEwOd3bAM9CKq4cCkuurZmejZtVCLD66/+4RhE7omSIWAxKexPJFHDlMJsY4bjf7\n8vuZWOTL+Gu5ncpHH8U7vPL/a2rYtqWl7PXEBOs829ubmLbrdjPLkhUr4sf48Y/Z93Y7E6ozM6ye\n8+xZZpciPh/xG+Gysvjxi4rUu8DmSAfYUHc3Bu7fRx+A8zYbcOnSotZu5oIVRCrRRT0+m3q20dp3\nvLfX0PVRup77gkEUrlsHi8OBtxsbFQUsvwbci9Ph8aC/owPNpaV4I2ZFwjkXCMQ72ooEXWl1Nb75\n0UfCGJ7aWpTV1OBurDPuzyorMXL5srC9yWoVOsymhKgue35mBiaTSegkO3PvHqxOJ9ybNiG/ogJ5\n/OdUPsTkJG63teGd//AfsL+lBV1NTehpaZH8jDsKCyXXxl5SgpW7d7Pr5nJhWmaXovQ8cXsVW1ER\ndr7yirCt+MMJJasWgiAIgsgEJDyJJYfuugRe+1hYCPz93+vbRx4R1LJT4a+vXmX/nznDaiy5wCsu\nBl55Jf7a5WJpsuI33eJjHDrExGhBQXw9r+cMBuMNjsSi0+Vix+XHF1ujbN8uFaELUC+pB+FNcUkJ\n9nzyibRxUwYw6kd4fWqKzWcRozzpRCazjVFRrLS9OEKpJmD5NeBenCazGdODg5gZHUW/zA4k1N0d\nj3TOzcFst2N1XR2+cvas4IfpWrcOZocDoY8/Fvabm5oSLEdgMglzNYJadBQApgcHcZU3NAIwGw5j\nqKsLUwMDmB4cTD5G7Oc61N0dnyOYsCzZvBkurxfuzZuRX1GBb1y6hC+dOAFHeTlmJyYSro/S81QY\n+zmLjI2hS1S3Lq+vTfWDCiJ7UD0ekWnomSIWAxKexPJl3Tr2//h4YnMgNd5/n/1vtQL/5b9II4T/\nf3v3HhzVeeZ5/PdKfdENqYUkLMsYGceY4AQb2fgaKGvWJo4xDp148SSe3eCdyqomrtp1qiZ4s5PL\nTtXEtalJpWaSmirXpioLGSfEBmKIMSYuZK7GNg4bcBJDjA22bAxCCCSEuLRuZ/84fY5Ot7p1aZ1W\nq8X3U0WZVp8+5+3Tr4Ueve/zPMXF9mqkN5fzqafsPMtFi+xcz8ceswM8J5A6d86+9tq1Uk2N/drm\nZmnOnNSrqM4P9c6W24YGafVq+++RiF0VN1l3t902xdmm6g1yz57NbGttlrk/FB87prDPQac09hXM\nW7/3vZwHfZP5B/7kIGakwD5dED1SAOvcg2n19bp/3bqEQjzBioqE1yQHjAM9PTr9hz8knKts1iy1\n7d3r5iwOYVmD21a9uyIKhv+ncUyro2kqVYeSPufK+fPVuGaNJM/Kb0WFVFCgvu5undy1S93Hj7tB\n7Kb4DoiahQsl2avDF06ccD+T5Pm0u6lJHYcOSbILELGNFgAw0WingsllrAWBhpNJG46KCsn5QdRp\nL+IU1YlGB9t91NZKhw8nfs2xYoUdDCZf2xlPWdlg8BoMSvfcYz+/Zs3gGJPbvXiLGrW326+74w57\n+27y++vstAsPeSttLlwo3XSTvRrqx72d5MbTjxAj86Nlymg+k5eWLNGJ5mbJGLdCbe3nPqfPx4tn\nPTd3rmKeVUTJbj9SXFOjstmz1b5//8itS5IU19YqMneuTib/f+2nggLNuPtuheO5nwWhkFsUSJJ2\nrFypo88/r8LiYvUOs2I/bfZsldTVqevoUQ309bkBdn00qgc2bkw49tmrr3b7nia3pgEAYLxop4L8\nk6pNSaa820qfeip93qOXU8ynpER67bXEFULvCktrq11YyGnf4OR4OquWqba0Ol/z5mr29trvNxRK\nXcnVCTrXrUssatTWJr30Uupts5GIPffB4sIAACAASURBVA7JXjFdvtw+xrsFN/neetuaTIEWCpls\nWx3r9tx8Nt736qzIhaur1e1ZZRvJWFd1S+vqFK6pkQoKZPX1yerr08ldu7SnqUnhSERfefddlc2a\nlbBt1ert1cUTJ9S2d2/KoNMEg27Rn1Rm3HmnHdiOJi98rJyV1IEBte3dq/Y//EHn3ntPJ3bs0HNz\n5uh8S4sk6XxLiwZisZRBp/NeqxcuVO+FCzq1d68utbaqx3PsqddfV6yzM+Fz7otvJ5fktncZyZX0\n/wQAIPtY8cTkMopVyp07d469Gltj4+DK5IoV6dtztLTY22Zfe21o3mFnp10IyFtFNhCwCwlt22Zv\nd03VbiR5FVeS5s2zg1fJDg63b098nfc1XV32yqZkV389diz9sc5KZvKKqTT8vR3t/Zmidu7cqa5/\n/MeMVvHy0XArlqNp6+KsXHbHA7zk84ylNUy6Y3c3NenounUJuY6SvSW1uqFB51taFCgpUW9Xl045\n/384Cgrs6s/Ofx2FhTIFBWnbpwQjEVV+5jPqbmnRxZMn026TTSdQXq6+ri69K2muJBMKSZalUHm5\nZtx5p/p7euwVXA8TCLhbdwMlJaq+/XZ1vPPO0O3BhYUyxmjGnXeqqLpajWvW6NfXX+/28fS2QZHs\n1dDLZ8+6969oxgxdbmtTVUODlm3fPqrgP9OVbfgvo3/3gGEwp+A3VjyRf7JV/Ga01Vzr66WPPx4a\ndDY12dtq45UlXX199uqjN8cyWaoWJocP2yuR0ejQoDP5NfFKlKqsTF39NdUqcXIuZ1OTHcB627lk\ncn+msPH0u8w3w73X0eTHOiuXIU/lWO95xpJjm+5Yb4GdYHm5CouLFaqsVMlVV+nc0aPua5xKrdMX\nLNC1S5cqVFXlBptOFdnpN99sf72/P2XQaYJBFc2YoYJgUG179+ri8eNu0BmKRNxKsl4F4fCQr/Wd\nP5/w2OrpkdXbq9iZMwqWlmrJ+vUKJ1W2teLXKSwpUaC0VK27dtkBZHzFtaCkxF7l7O+X1denU3v3\nui1mquO54NMXLNCX9+9XcW2tJPvzKKmrc+9fqLJSX3rrLV2/YsWog04p9TxhFRQAkCkCT0wuoyh+\nk9Fv6JyA9qabEtujjJYT3J09a2+vdbbYSnaPzeEClVRBXSRir552dAwWJHI0NdlVciV7NbW+3g4Y\nDxxIXf11NEHjkSND27l4TZJqt7nS2Ng4qavK+m249zqWADzTIkKjud75eEBpAgF9cc8e1dxxh3o6\nOvRJc7O7yjp9wQJF33xT169YoYd37NCDW7YoGK9mHayo0EPNzfZzu3YpEP+6kyvqLSBk9fbqclub\nYt686Pi1p99yi6obGoaMO5hqu258d8/cFO+z4bvfVTgSUc0ddwx5TWFRkR49dEgD3qJF8XMNXLyY\nUMzIWxhoSbz1ycM7dmhafb0ePXzY/Ty649t2TSCgh3fudAs2jWVup/p8J0ProSsRK1PwG3MKuUDg\niSuDE9AOl+c4HG+l2N5e+09dnb1quWPH8MFauqAuXT7rkSN2QCrZqx779qUNGHc3NenFri69XFur\nWKqVzOTxpwtOJ0m121yazFVl/Zbuve5ualJvV5eKa2u1ZMOGEe9FuvOk69mZarUsXfBaGv8li9XX\npwM/+EHKticXT5xQqKJCoUhEr0SjennpUhVfc40kqffcOR34wQ/c8V08ccI+X3+/CouKFHT6YsYD\nSG/epyksVKiyUlZfn1p37VIoEhmywhnztEwZjd899JB7fwuKitxczekLFug/nTypA08/LSctxRlL\nMF58SIWFUiCggnBYJhRy72ny/Q9HIgpFIlo3b54uOO83fv9SGWn1MtXneyXtDAAA+Ct9MzJgkso4\nL8G7ktjQMLYtpWvX2q/v6LDboYylUq4T1HnH4VSolYYGg94gMRIZvF6K8XYeOaLW+OrPnlWr0udg\nOeNPlYMKcl3iOo8ccfMl9w03n1JIztUsnTXLzQ/c09Sk+9etc1fLvF9zgptkqbbxPj9vni47udGy\ne2b+e02NpMEWJ0We7aaLf/Yzd1zeXM/+y5fVf/myJNnVZSMRxeKrqZIdnDqBZvXChWpcs0a/W7Zs\naC5pGk6Op1fJNdeo49ChIeeYdt11Ckci9tbiePAXKC7WNffdp3t+8hO9sHChm7s50Nen9n37Eu6f\n974X19Tow9/+NiEvNlRZOSRAdF5z9o9/dHNEnfOl4r3G4mee0b5Vq9zKxGPJ50Xm+B4FvzGnkAsE\nnrhyeFcSZ80aWwDmBI+pivZIY2sD46x0SnaF2uQA1hskOudOEzAG4tsRqysqtPhHPxp5/JOFn21z\n4JvxrGYlB5WpzjXS+Xc3Nall82b1x2Kquvlm1Uejaly9WvueekqdR46o6rOfVcGtt+r0/v263Nbm\nVrt1FRaq4lOf0lV33qlQRYVeiUbV9sYbGujpGfY9hyIRdZ84oYJQSAM9PQpXV2tafb2MpPIbbtAr\n0ag633039QmSCxilcXrfPhV6tvta/f12UBvv0+td0b18+rQ+2rJFH//udxrwFDgKlJWpr7tbgbIy\nxTo6FOvsTLjv4ZqahKAzWFGhRw4cGBIMel8jjfx5e49P/oVEql8mAACQClVtceXIpK/naHmrwobD\ndkB1223S+vVDr+PjOGKLFmnP3r1aLCk8nmq0Ex0IXuFVdCcrb59NJ9gb7UpWcu9USQk9O3c3Nanj\n0CF1HT2q6JtvalpSvnKqKrZOJdXk6qrtBw+q6/333TzI5ODv+hUrdLGtLSG4SiUYiWggFlO/p9VI\n6cyZKquvd1cmvVVnTTA4WJyooEChigrJGPWcPatQZaWqbr45ff/PggIFy8rUG+8TXDpzpv7jn/7k\n3tdYZ6fWzZvn9tpMJVRZqd7ubncM4epq9Z4/b7eNKSxUqLxcPR0dCkUiuuqee/QffvWrlJ+b81lV\nNTSobNYsNa5ZM+znm/zZeufGQG+vTjQ30zMXAK5wVLXFlWe4fpRr10qzZ9uBYXJBn/FyVisCASkW\nG9ySmyqP1MdCPuHyct0vKTzaarTp7o+f/VNHgyq6WTHeiqPenL6xFpFJztVMzg90tvFeam3VvhT5\nyt4qtlJiEZ3kvqFdx44NBp2SwtOnu3+fvmCBCouLddbZVl+Q+p+5wqIiTf/MZxKCTkn64muvuf00\nvSuqocpK1d177+CBAwPq6ehQz9mzCpSUKHLTTTLBYPoemQMDbtAZLC/XF197LSFIC0cievTwYYWr\nq1O+vKCkRD0dHW7QGSgrU6y9fbBXaX+/ejo6VFJXp69+8IEe3LLFDfjT5dUu275dD2zaNGKwmPzZ\neudGsKzsiinKBQAYHwJP5J2dO3emf3K4ACoSsbfY7t3rT4DlDeKeecYOJr2VLisqsl/IZ6xBbLr7\nM9GB4CSrojvsnMojflYcHeu225GKM410Puf5YEWFrl26NKHtR9f778sEAurp7LQr2nq2n1bcdJO+\nvH+/yurrFaqqUlF1tbqOHh3sb+kJSh2FJSV69C9/Sdkm5fUnnxxcjY2vooYqK/XIgQO6f/16t2WJ\niVe2DpaXq3L+fLXt3asTzc0a6O1Vmk25dpEgSb1dXUOC791NTVo3b5560vzCIOQUQSotlQoK1BcP\nmANJ1XVrbr894TNINSfGUkhrd1OTXolG1dPd7X7N+1k2rl59xRTlyqWp8j0KkwdzCrlA4ImpJVUA\n5Q0QnTYofgRYmzcPBnFPPmkHkwsX2s8VFtptVrJtrEFsugBzogNBquhmhZ8VR/1uLzPS+ZznH/vw\nQ3e1znGprU1WX5+7+ljozYc8dUp7vvENlcycqZ4zZ3SiuVltb70labC/pVNwqHL+fJXU1enRQ4c0\nrb7eLoI0c2biQIxxA9LC0lIVzZihRw4c0LT6endV8voVK1R9662S7CDSWSFNDgK9XwtFIiqOF0IK\nVlRIhYXuSuSOlSt1dN06XWptdd9jcW2tTHy11gQCWvKb3yhcXa2+CxfsgDgefAfLyhSeMcN9v41r\n1iRef5xzIlXgeiW1HgIA+IccT0wNTo5iMCiVlkpr1gwGNd58wuXLpVDIn+qu06cPFiuKRqWNG+3t\nq3PmSPEqlJMufzFdcSRMCd4czYkOCLJZ3fQXNTWKtbersKREdY2NGujp0SfNzW6xHUl26yHLSsj3\nrI9G9cDGjdqxcqU+2rpVVbfcoiXr1yeMzZs/agIBfeX997X/+9/Xe88+627nrV++XA9s2pTwPjve\neUex9nZ7a6wxQ3qASlLRjBn60ltvuVVgty5b5vYg9eaOhmtqEl5f1dCgZdu361f19erz5IRWzp+v\n41u3uq8tLClR/Re/qAsff5wyd3Z3U5POxvNqvxR/bqyfU3J+J4EmACAVcjxx5XC2kDY324Gl94cj\n7yrfmjX+rbTddpv934YGKV6ZUpGIdPvtg9cb6wrDcDmqfmClcUrLZS9Sv7b5pspJ/PL+/SqdOVOP\nHjqkB7ds0f3r16ts9myZ+NbVwtLSwZzPeNBZvXChQuXlerGxUe/98peKnT6tE83N+vWcOQn5r95q\nslZfn15/8kl7BdPzC9OBeF6lUwCpddcuxdrbVRAKyerrSxl0StJVd9+tA08/rYttbXr1scd05sCB\nhGtJ9kpo1S23SBq6zbgwni9aWFKiqxYtUk9Xl4pqa7Vsxw73flw8edLNnX3h9tsT7lvnkSNq27tX\nlz15tePN3QUAIFMEnsg7KfMShstRzNY20vXr7fNu3z60HUqm15voIj+QRK7LcEZbsCiTLZ3ec+9Y\nuVIvNjbq2IYNQwKjafX1+puPP3ZX7F6JRtXT2ekWIwqWlrrnrJw/X/XRqB7atk3nW1rs1UxPxdue\n9nY9W1en3y5apJeXLtXiZ56xV0vjBnp7E4JRSTr7xz+6Y3MLIBUWaqCnJyEnM1hRoaIZM/SupOC0\nabrnJz9JCPRStXW56p57VFpXZ/cNNUb9nmO8AffFkyfdIPKdn/7UvR/OWANlZYqdPq3jW7dq3bx5\ninV2ZtTSJlkuf5mBQXyPgt+YU8gF+nhiavD2vkz+ASlbPSzTnXc816PaKyaZ0fZpvG/t2jFv803o\nQVldrZizRV3pA6NUPSiXbNig1598UjJGjatXu9d3giynb6Zj4NIlt13KvlWrFKqocAPIglBIjatX\n6xc1NVJ8VfLC8eO6cPy4+3pv65SqhgZdbm9X78WLqm5oUO/581Jbm3rPn9e+VasSAr3zH3yg2Jkz\ng9uCJX28dav794FYTCeam/X83Ln663ffdQNuSeqK9+wNlpfrTk/PXue+X+7o0InmZknSpdZW7Wlq\nSvmZZPI5AQDgB3I8gVxK7p/pfM3vHMyJ7tOJKSObOX7ec4cjEX3S3Dykt2RyTuKG+fN14fhxBadN\nU+3ixWl7VUqDOa8N3/2utj74oPp6etTjCW5DlZX66rFjal6xwr32su3bte+pp3Rs/fohFWZDkYiu\nvvdet4CPE8C9Eo26wXBxba0utbYqXF0tU1CggZ4eFYRC+lK84NGLixbpC1u26KX77ksItJM5PUwd\nv120yA2Wk59z3qvTB9Tbb7Nl82b1x2Kqvu22IfmtAAD4ZTQ5ngSeQC55Cx9lsxDRRF0HEy6bRX2k\n7BYs8p5bUsrreIv/XL9ihbpPnHAL9JTNnq2yWbNG/d5jnZ16ft48XW5ttStPO/82FRTomr/6K3dL\nqfeajlAkokcOHkwo3iPZ9//Yhg3q6eiQCQQUKClRYVGRps2erdP79rnHeYNF72tchYVupVonAPa+\nn9H8AiD5s0p+H6kCVgAA/EBxIUxJ485LyHYBn7EYaWutX2NlC++w8jnXxc/enamMJ8dvpPxQ77nT\nXSc5JzEUb3VSvXChSurqRvXedzc16dmrr9avr79elXPnKlxVZQd5AwP2n74+ffLqq0OuWdXQoGuX\nLlV9NKqvfvCBDjz99JD303nkiBtAWn196u3q0tttbeqOt1iR7DYn3m3D3tdI0jVLluirR4+qfvly\n1UejQ4JOyd4iO232bBWGw3r1sccU6+wccn+T76E3V7WwtFSxjo5h83QxeeXz9yhMTswp5AKBJ648\nk6mAz0iFiPwa60T36byCjLb4Trb42bvTT94KsOMJipOrqnofe4PQ4d5755EjutTaqp6ODp3ctUsF\nTj9fr4GBIX0ql23frge3bNEDGzcqHIkkBPnP3XijXl661D2X0/tTkspvuEHRN99UWX29QlVVKqqu\ndu/Ji42Nat2zJ+HS4UhE0+rr9cCmTe61vMe/vHSpJKl01iyd2rvXvZ8j/dLhvrVrVR+NKlxVpf4L\nF/RJc3NWfjkBAMBosNUWV56lS+1AbuHCkQOxXOdGjmWsyInkraATvZUxl707h+O9L04uZfL4xrtN\neLTv3dmmKtmrmJ/fuFH7Vq3S+Y8+GtwOW1CgqxcvVll9vY6tX6/+S5dkAgFd9bnP6YFNmxSORNzz\nePuHmkBA4enT9dC2bdr//e8nFDjy3oPi2lpZlqXLp04ljM0Eg/paW1vK8SfPrZ7ubvf6M+66S5J0\norl5xPxbenECALKNHE8glc7O0RfwyXVu5FjGipzgh/rUnPsSqqzUIwcODMmNlMYftI82cI11dmrn\n448PqXob6+zUr2+4QT1nzrjHhquq7MqzHsW1tXr08GFJGlJB1nvMzM9/XudbWhQoKVFxTY1aNm9O\n2FJrgkFZ8Z6gjof37NHVixalHHfy3JKk52680e0bWh+NqjAYHDHwnqy/nAAATB3keGJKGndegtPu\nZDQ/gOU6N3IsY0XGxjOnkreCwlZcU6NwTY2qb7tNoYqKlMckbxMe67bl0ea3hiMRFc+YobY339Sz\nV12l1ZGIXlqyRJI04447Eo41hYVDXn+ptVXPz5snSbp/3TotWb9exbW1Q475aOtWte7apVe3btVH\nW7cmBJ3B8nIVTZ8ev8jgv8vv/PSnacedPLfCkYhqFi6UZN+zxtWrR5V/Sy/O/Ec+HvzGnEIuZBx4\nGmN+ZIw5bIx52xjzgjEm9U8WQD4jNxIj4If61M63tCh2+rRODJNXmBxYjbVQ0ljyW508z4GeHvWe\nO+eO6761a1VQVGSfb9o0PbRtm0qvvVYmGJQCg62uL8d7Y0r2Z/7o4cOqX75cRTNmuGOouuUWSVLF\njTeq0MkjLbD/me3t6tKltjb7a/FdQN5xpwq6U80tftEBAMhXGW+1NcYskfSqZVkDxpgfSpJlWd9O\ncRxbbQHgCpPJFuSxvmbHypX66OWXVb1ggUrq6txtrqm23XrzPCUpNH26KufNU7C8XKd//3u3p2Z9\nNKpYR4e7BbggHNZALJZ2TOlawmxdtsxt+5JKSV2dVrzzjnu+Z6++WpdaWyXZ231r7rgjK+1xAADI\nhtFstQ0M9+RwLMva5nm4T9IjmZ4LADC13Ld27ajyCr15moufeUb7Vq1yXzNSDuf5lhbF2tv1SXOz\nwjU1bu7jnqYm3b9u3ZBzv/7Nb2qgp0cFwaAut7frlBMYera+DvT0JKyklt9wg33+NO/BWZV0OH93\nKu4Wlpaq/8KFhNdMX7BAVTffrFeiUfe99cdi7vOxM2fcVV/6bgIApgq/cjz/VtLLPp0LGNao8hIm\nU69OTHrkuvhvtFuQvdtr961alfCakbbenj96VJIUrKjQ9JtukpS4fbVl82b39a9/85t6YONGuz3K\npk1u+5NwdXVC4FkQDCZsZ7148qQb3I62FcnOnTvdc9TefXfCc6UzZ+rhHTt0vqUl4b1V33abJCkw\nTIuYXLfuSWeyjmsq4XsU/MacQi4Mu+JpjNkmqTbFU/9gWdbm+DHfkdRjWdbadOd5/PHHdd1110mS\nIpGIFixYoMbGRkmDE5/HPB7t44MHD458fLz/5U5JikbVGP/6ZBg/jyffY8dkGc+V9PjQpUuaLjvQ\nGvja17Rz5073+UOXLum0pM/Fg7Dk138Qiajj+HHNPXdOoUhE5++9V9d961tu4PpOd7d6Jc2VdHL3\nbv3wzjt16/e+p88vW6b71q7Vv0WjajtzRjPi22yPlpbquq9/3Q2aveMLlJXp90ePauCll1T04ovq\nPHJEhy5dcs/nfX+SHXgHnnhCA93dKvrzn3W5tVWtN96ou378Y/u5khK9KzsfdGU8wPy3aFTz/u7v\nFHrhBS3+2c/0xsGDCe93z1tv6ezbb2uu7FXdwBNP5Pzzk6Su+C8I3pV0OBrV3/P9lsc8nvSPDyZ9\nf8n1eHicf48PHjyozvgvGz/88EONxrjaqRhjHpf0XyXdZ1nW5TTHkOOJiUf/y7HJdb9SXLGGa/Ux\nUhuQkXJCX1qyRCeamxO2u16/YoVC8UJGgZISDfT26kRzs0KVlZr5wAO6ePJkwtbeWGennpszx80B\nLZs9WxeOH3fbotQvX64HNm3S85/+tC62tqogGNSX9+9PaB+T6n1k0uJksrbumazjAgBMnKz28TTG\nfEHSjyXda1lW+zDHEXhi4tH/cmwaG3PbrxTwGEt/zuGCN+f5WEeHPmluVqCsTDPuukv9ly65+Z3e\nXpivRKMp+4p6A6uCcDihaFB9NKoHNm7U6khEvefOSbK30/7Nxx/7ek9G835zZbKOCwAwcbIdeL4n\nKSTpbPxLb1iW9USK4wg84audnq148MkVvkLMnJpcXmxsTBkAjiRdwBrr7NRzN97oFh8qrq3VpdbW\nISt06VbuvIHVq4895lbHnX7zzXp41y6FIxH9oqZGsfZ2FZaU6Oqf/1xLv/KVMY9zrMeM5/zIL3yP\ngt+YU/DbaALPgkxPblnWHMuy6i3Laoj/GRJ0AsgT9CvFJDKW/pxe6YoRhSMR1Sxc6J4z+uabKXth\npuuR6S2UdN/atapfvlz10agbdO5uatK0T31KBeGwom+8oZLaVKURRh7nWI8Zz/kBAJho48rxHNUF\nWPEEAIxBpls3h8s1zOZ20LGu0CaPc99TTw1ZoRxP3iQ5lwCAiZbVFU8AALJhtK1YkqVbsRzPOVNJ\nbh8y1hXa5HGmWqEc7r2MpLimRuHqagJOAMCkQuCJvOOUdAb8wpyaGvwMLoeTHCgmB4kjzafkcaYK\nXMfzXs63tIy59ygmN75HwW/MKeQCgScAAGOQHCiON+Adz+rmaMYHAMBkQI4nAABjMNnbh0z28QEA\npp6stlMZwyAIPAEAAABgiqK4EKYk8hLgN+YU/MR8gt+YU/Abcwq5QOAJAAAAAMgqttoCAAAAADLG\nVlsAAAAAQM4ReCLvkJcAvzGn4CfmE/zGnILfmFPIBQJPAAAAAEBWkeMJAAAAAMgYOZ4AAAAAgJwj\n8ETeIS8BfmNOwU/MJ/iNOQW/MaeQCwSeAAAAAICsIscTAAAAAJAxcjwBAAAAADlH4Im8Q14C/Mac\ngp+YT/Abcwp+Y04hFwg8AQAAAABZRY4nAAAAACBj5HgCAAAAAHKOwBN5h7wE+I05BT8xn+A35hT8\nxpxCLhB4AgAAAACyihxPAAAAAEDGyPEEAAAAAOQcgSfyDnkJ8BtzCn5iPsFvzCn4jTmFXCDwBAAA\nAABkFTmeAAAAAICMkeMJAAAAAMg5Ak/kHfIS4DfmFPzEfILfmFPwG3MKuUDgCQAAAADIKnI8AQAA\nAAAZI8cTAAAAAJBzBJ7IO+QlwG/MKfiJ+QS/MafgN+YUcoHAEwAAAACQVeR4AgAAAAAyRo4nAAAA\nACDnCDyRd8hLgN+YU/AT8wl+Y07Bb8wp5AKBJwAAAAAgq8jxBAAAAABkjBxPAAAAAEDOEXgi75CX\nAL8xp+An5hP8xpyC35hTyAUCTwAAAABAVpHjCQAAAADIGDmeAAAAAICcI/BE3iEvAX5jTsFPzCf4\njTkFvzGnkAsEngAAAACArCLHEwAAAACQMXI8AQAAAAA5R+CJvENeAvzGnIKfmE/wG3MKfmNOIRcI\nPAEAAAAAWUWOJwAAAAAgY+R4AgAAAAByjsATeYe8BPiNOQU/MZ/gN+YU/MacQi4QeAIAAAAAsooc\nTwAAAABAxsjxBAAAAADkHIEn8g55CfAbcwp+Yj7Bb8wp+I05hVwg8AQAAAAAZBU5ngAAAACAjJHj\nCQAAAADIOQJP5B3yEuA35hT8xHyC35hT8BtzCrlA4AkAAAAAyCpyPAEAAAAAGSPHEwAAAACQcwSe\nyDvkJcBvzCn4ifkEvzGn4DfmFHKBwBMAAAAAkFXkeAIAAAAAMkaOJwAAAAAg5wg8kXfIS4DfmFPw\nE/MJfmNOwW/MKeQCgScAAAAAIKvI8QQAAAAAZIwcTwAAAABAzmUceBpj/skY87Yx5qAx5lVjzLV+\nDgxIh7wE+I05BT8xn+A35hT8xpxCLoxnxfOfLcu6xbKsBZI2SfpfPo0JGNbBgwdzPQRMMcwp+In5\nBL8xp+A35hRyIePA07Ks856HZZLaxz8cYGSdnZ25HgKmGOYU/MR8gt+YU/Abcwq5EBjPi40xT0v6\nz5IuSrrLlxEBAAAAAKaUYVc8jTHbjDF/SvHnYUmyLOs7lmXNkrRG0r9MwHgBffjhh7keAqYY5hT8\nxHyC35hT8BtzCrngSzsVY8wsSS9blvXZFM/RSwUAAAAAprCR2qlkvNXWGDPHsqz34g+XSzqQyQAA\nAAAAAFNbxiuexpgNkuZK6pd0VNI3LMtq83FsAAAAAIApwJettgAAAAAApDOePp6jZoz5J2PM28aY\ng8aYV40x107EdTE1GWN+ZIw5HJ9TLxhjKnI9JuQ3Y8wKY8w7xph+Y8ytuR4P8pcx5gvGmL8YY94z\nxvyPXI8H+c0Y83+NMaeMMX/K9VgwNRhjrjXG7Ij/m/dnY8x/z/WYkL+MMUXGmH3xGO+QMeZ/D3v8\nRKx4GmOmOX0/jTH/TdItlmV9PesXxpRkjFki6VXLsgaMMT+UJMuyvp3jYSGPGWM+LWlA0v+R9PeW\nZf0hx0NCHjLGFEp6V9L9kj6R9HtJX7Us63BOB4a8ZYxZLKlb0r9bljU/1+NB/jPG1EqqtSzroDGm\nTNL/kxTl+xQyZYwpsSzrojEm47bo4wAAAphJREFUIOk1Sd+yLOu1VMdOyIqnE3TGlUlqn4jrYmqy\nLGubZVkD8Yf7JM3M5XiQ/yzL+otlWUdyPQ7kvTskvW9Z1oeWZfVKek528T0gI5Zl7ZHUketxYOqw\nLKvVsqyD8b93SzosqS63o0I+syzrYvyvIUmFks6mO3ZCAk9JMsY8bYz5SNJKST+cqOtiyvtbSS/n\nehAAIOkaSR97Hh+Pfw0AJh1jzHWSGmT/Eh/IiDGmwBhzUNIpSTssyzqU7tiM26mkuOg2SbUpnvoH\ny7I2W5b1HUnfMcZ8W9K/SPovfl0bU89I8yl+zHck9ViWtXZCB4e8NJo5BYwT1foA5IX4NtsNkp6M\nr3wCGYnvQlwQr7nyijGm0bKsnamO9S3wtCxrySgPXStWqDCCkeaTMeZxSUsl3TchA0LeG8P3KCBT\nn0jyFs+7VvaqJwBMGsaYoKTfSPqlZVmbcj0eTA2WZZ0zxmyRtFDSzlTHTFRV2zmeh8slHZiI62Jq\nMsZ8QdIqScsty7qc6/FgyjG5HgDy1n5Jc4wx1xljQpL+WtKLOR4TALiMMUbSzyUdsizrX3M9HuQ3\nY0y1MSYS/3uxpCUaJs6bqKq2GyTNldQv6aikb1iW1Zb1C2NKMsa8JzuB2UlefsOyrCdyOCTkOWPM\nlyT9VFK1pHOSDliW9WBuR4V8ZIx5UNK/yi6w8HPLsoYtLQ8Mxxjza0n3SqqS1Cbp+5Zlrc7tqJDP\njDGLJO2W9EcNpgf8T8uyfpe7USFfGWPmS/qF7MXMAknPWpb1o7THT0TgCQAAAAC4ck1YVVsAAAAA\nwJWJwBMAAAAAkFUEngAAAACArCLwBAAAAABkFYEnAAAAACCrCDwBAAAAAFlF4AkAAAAAyCoCTwAA\nAABAVv1/lzHCzGUnjVoAAAAASUVORK5CYII=\n", + "text": [ + "" + ] + } + ], + "prompt_number": 5 + } + ], + "metadata": {} + } + ] +} \ No newline at end of file diff --git a/examples/triplet/mnist_triplet.prototxt b/examples/triplet/mnist_triplet.prototxt new file mode 100644 index 00000000000..0e903f85909 --- /dev/null +++ b/examples/triplet/mnist_triplet.prototxt @@ -0,0 +1,113 @@ +name: "mnist_siamese" +input: "data" +input_dim: 10000 +input_dim: 1 +input_dim: 28 +input_dim: 28 +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 500 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 10 + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 2 + } +} diff --git a/examples/triplet/mnist_triplet_solver.prototxt b/examples/triplet/mnist_triplet_solver.prototxt new file mode 100644 index 00000000000..39222b89cf0 --- /dev/null +++ b/examples/triplet/mnist_triplet_solver.prototxt @@ -0,0 +1,25 @@ +# The train/test net protocol buffer definition +net: "examples/triplet/mnist_triplet_train_test.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of MNIST, we have test batch size 100 and 100 test iterations, +# covering the full 10,000 testing images. +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 500 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.01 +momentum: 0.9 +weight_decay: 0.0000 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +max_iter: 50000 +# snapshot intermediate results +snapshot: 5000 +snapshot_prefix: "examples/triplet/mnist_triplet" +# solver mode: CPU or GPU +solver_mode: CPU diff --git a/examples/triplet/mnist_triplet_train_test.prototxt b/examples/triplet/mnist_triplet_train_test.prototxt new file mode 100644 index 00000000000..3cea2376c11 --- /dev/null +++ b/examples/triplet/mnist_triplet_train_test.prototxt @@ -0,0 +1,498 @@ +name: "mnist_triplet_train_test" +layer { + name: "triplet_data" + type: "Data" + top: "triplet_data" + top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/mnist_triplet_train_leveldb" + batch_size: 64 + } +} +layer { + name: "triplet_data" + type: "Data" + top: "triplet_data" + top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/mnist_triplet_test_leveldb" + batch_size: 100 + } +} +layer { + name: "slice_triplet" + type: "Slice" + bottom: "triplet_data" + top: "data" + top: "data_true" + top: "data_false" + slice_param { + slice_dim: 1 + slice_point: 1 + slice_point: 2 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_true" + type: "Convolution" + bottom: "data_true" + top: "conv1_true" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_true" + type: "Pooling" + bottom: "conv1_true" + top: "pool1_true" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_true" + type: "Convolution" + bottom: "pool1_true" + top: "conv2_true" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_true" + type: "Pooling" + bottom: "conv2_true" + top: "pool2_true" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_true" + type: "InnerProduct" + bottom: "pool2_true" + top: "ip1_true" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_true" + type: "ReLU" + bottom: "ip1_true" + top: "ip1_true" +} +layer { + name: "ip2_true" + type: "InnerProduct" + bottom: "ip1_true" + top: "ip2_true" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_true" + type: "InnerProduct" + bottom: "ip2_true" + top: "feat_true" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_false" + type: "Convolution" + bottom: "data_false" + top: "conv1_false" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_false" + type: "Pooling" + bottom: "conv1_false" + top: "pool1_false" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_false" + type: "Convolution" + bottom: "pool1_false" + top: "conv2_false" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_false" + type: "Pooling" + bottom: "conv2_false" + top: "pool2_false" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_false" + type: "InnerProduct" + bottom: "pool2_false" + top: "ip1_false" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_false" + type: "ReLU" + bottom: "ip1_false" + top: "ip1_false" +} +layer { + name: "ip2_false" + type: "InnerProduct" + bottom: "ip1_false" + top: "ip2_false" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_false" + type: "InnerProduct" + bottom: "ip2_false" + top: "feat_false" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "loss" + type: "ContrastiveLoss" + bottom: "feat" + bottom: "feat_false" + bottom: "sim" + top: "loss" + contrastive_loss_param { + margin: 1 + } +} diff --git a/examples/triplet/readme.md b/examples/triplet/readme.md new file mode 100644 index 00000000000..ce98ec10819 --- /dev/null +++ b/examples/triplet/readme.md @@ -0,0 +1,179 @@ +--- +title: Siamese Network Tutorial +description: Train and test a siamese network on MNIST data. +category: example +include_in_docs: true +layout: default +priority: 100 +--- + +# Siamese Network Training with Caffe +This example shows how you can use weight sharing and a contrastive loss +function to learn a model using a siamese network in Caffe. + +We will assume that you have caffe successfully compiled. If not, please refer +to the [Installation page](../../installation.html). This example builds on the +[MNIST tutorial](mnist.html) so it would be a good idea to read that before +continuing. + +*The guide specifies all paths and assumes all commands are executed from the +root caffe directory* + +## Prepare Datasets + +You will first need to download and convert the data from the MNIST +website. To do this, simply run the following commands: + + ./data/mnist/get_mnist.sh + ./examples/siamese/create_mnist_siamese.sh + +After running the script there should be two datasets, +`./examples/siamese/mnist_siamese_train_leveldb`, and +`./examples/siamese/mnist_siamese_test_leveldb`. + +## The Model +First, we will define the model that we want to train using the siamese network. +We will use the convolutional net defined in +`./examples/siamese/mnist_siamese.prototxt`. This model is almost +exactly the same as the [LeNet model](mnist.html), the only difference is that +we have replaced the top layers that produced probabilities over the 10 digit +classes with a linear "feature" layer that produces a 2 dimensional vector. + + layers { + name: "feat" + type: INNER_PRODUCT + bottom: "ip2" + top: "feat" + blobs_lr: 1 + blobs_lr: 2 + inner_product_param { + num_output: 2 + } + } + +## Define the Siamese Network + +In this section we will define the siamese network used for training. The +resulting network is defined in +`./examples/siamese/mnist_siamese_train_test.prototxt`. + +### Reading in the Pair Data + +We start with a data layer that reads from the LevelDB database we created +earlier. Each entry in this database contains the image data for a pair of +images (`pair_data`) and a binary label saying if they belong to the same class +or different classes (`sim`). + + layers { + name: "pair_data" + type: DATA + top: "pair_data" + top: "sim" + data_param { + source: "examples/siamese/mnist-siamese-train-leveldb" + scale: 0.00390625 + batch_size: 64 + } + include: { phase: TRAIN } + } + +In order to pack a pair of images into the same blob in the database we pack one +image per channel. We want to be able to work with these two images separately, +so we add a slice layer after the data layer. This takes the `pair_data` and +slices it along the channel dimension so that we have a single image in `data` +and its paired image in `data_p.` + + layers { + name: "slice_pair" + type: SLICE + bottom: "pair_data" + top: "data" + top: "data_p" + slice_param { + slice_dim: 1 + slice_point: 1 + } + } + +### Building the First Side of the Siamese Net + +Now we can specify the first side of the siamese net. This side operates on +`data` and produces `feat`. Starting from the net in +`./examples/siamese/mnist_siamese.prototxt` we add default weight fillers. Then +we name the parameters of the convolutional and inner product layers. Naming the +parameters allows Caffe to share the parameters between layers on both sides of +the siamese net. In the definition this looks like: + + ... + param: "conv1_w" + param: "conv1_b" + ... + param: "conv2_w" + param: "conv2_b" + ... + param: "ip1_w" + param: "ip1_b" + ... + param: "ip2_w" + param: "ip2_b" + ... + +### Building the Second Side of the Siamese Net + +Now we need to create the second path that operates on `data_p` and produces +`feat_p`. This path is exactly the same as the first. So we can just copy and +paste it. Then we change the name of each layer, input, and output by appending +`_p` to differentiate the "paired" layers from the originals. + +### Adding the Contrastive Loss Function + +To train the network we will optimize a contrastive loss function proposed in: +Raia Hadsell, Sumit Chopra, and Yann LeCun "Dimensionality Reduction by Learning +an Invariant Mapping". This loss function encourages matching pairs to be close +together in feature space while pushing non-matching pairs apart. This cost +function is implemented with the `CONTRASTIVE_LOSS` layer: + + layers { + name: "loss" + type: CONTRASTIVE_LOSS + contrastive_loss_param { + margin: 1.0 + } + bottom: "feat" + bottom: "feat_p" + bottom: "sim" + top: "loss" + } + +## Define the Solver + +Nothing special needs to be done to the solver besides pointing it at the +correct model file. The solver is defined in +`./examples/siamese/mnist_siamese_solver.prototxt`. + +## Training and Testing the Model + +Training the model is simple after you have written the network definition +protobuf and solver protobuf files. Simply run +`./examples/siamese/train_mnist_siamese.sh`: + + ./examples/siamese/train_mnist_siamese.sh + +# Plotting the results + +First, we can draw the model and siamese networks by running the following +commands that draw the DAGs defined in the .prototxt files: + + ./python/draw_net.py \ + ./examples/siamese/mnist_siamese.prototxt \ + ./examples/siamese/mnist_siamese.png + + ./python/draw_net.py \ + ./examples/siamese/mnist_siamese_train_test.prototxt \ + ./examples/siamese/mnist_siamese_train_test.png + +Second, we can load the learned model and plot the features using the iPython +notebook: + + ipython notebook ./examples/siamese/mnist_siamese.ipynb + diff --git a/examples/triplet/train_mnist_triplet.sh b/examples/triplet/train_mnist_triplet.sh new file mode 100755 index 00000000000..e005970824a --- /dev/null +++ b/examples/triplet/train_mnist_triplet.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env sh + +TOOLS=./build/tools + +$TOOLS/caffe train --solver=examples/triplet/mnist_triplet_solver.prototxt From 083f61b09800c7119d2117d560622c81a2a8ef52 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 8 Jun 2015 07:56:03 +0800 Subject: [PATCH 04/82] New triplet loss layer added(beta1 version-no test source files) --- include/caffe/data_layers.hpp | 3 + include/caffe/data_transformer.hpp | 36 -- include/caffe/filler.hpp | 71 +-- include/caffe/layer.hpp | 1 - include/caffe/loss_layers.hpp | 69 +++ include/caffe/net.hpp | 3 - include/caffe/neuron_layers.hpp | 70 +-- include/caffe/python_layer.hpp | 13 +- include/caffe/solver.hpp | 15 +- include/caffe/test/test_caffe_main.hpp | 28 +- .../caffe/test/test_gradient_check_util.hpp | 7 +- include/caffe/util/math_functions.hpp | 6 - include/caffe/util/mkl_alternate.hpp | 1 - include/caffe/vision_layers.hpp | 66 --- src/caffe/blob.cpp | 1 - src/caffe/data_transformer.cpp | 116 +--- src/caffe/layers/base_data_layer.cpp | 11 +- src/caffe/layers/base_data_layer.cu | 6 +- src/caffe/layers/concat_layer.cu | 44 +- src/caffe/layers/contrastive_loss_layer.cpp | 25 +- src/caffe/layers/contrastive_loss_layer.cu | 34 +- src/caffe/layers/conv_layer.cpp | 7 + src/caffe/layers/conv_layer.cu | 7 + src/caffe/layers/cudnn_conv_layer.cu | 2 + src/caffe/layers/data_layer.cpp | 90 ++- src/caffe/layers/deconv_layer.cpp | 7 + src/caffe/layers/deconv_layer.cu | 7 + src/caffe/layers/flatten_layer.cpp | 16 +- src/caffe/layers/image_data_layer.cpp | 42 +- src/caffe/layers/inner_product_layer.cpp | 4 +- src/caffe/layers/inner_product_layer.cu | 4 +- src/caffe/layers/lrn_layer.cu | 102 ++-- src/caffe/layers/mvn_layer.cpp | 23 +- src/caffe/layers/mvn_layer.cu | 23 +- src/caffe/layers/pooling_layer.cu | 218 ++++--- src/caffe/layers/prelu_layer.cpp | 4 +- src/caffe/layers/prelu_layer.cu | 16 +- .../sigmoid_cross_entropy_loss_layer.cpp | 2 +- .../sigmoid_cross_entropy_loss_layer.cu | 22 +- src/caffe/layers/slice_layer.cu | 47 +- src/caffe/layers/triplet_loss_layer.cpp | 124 ++++ src/caffe/net.cpp | 46 +- src/caffe/proto/caffe.proto | 206 ++----- src/caffe/solver.cpp | 554 ++++++++++-------- src/caffe/test/test_accuracy_layer.cpp | 5 +- src/caffe/test/test_argmax_layer.cpp | 3 +- .../test/test_contrastive_loss_layer.cpp | 58 +- src/caffe/test/test_convolution_layer.cpp | 9 +- .../test/test_data/generate_sample_data.py | 12 +- src/caffe/test/test_dummy_data_layer.cpp | 5 +- src/caffe/test/test_filler.cpp | 98 ---- src/caffe/test/test_flatten_layer.cpp | 46 +- src/caffe/test/test_gradient_based_solver.cpp | 82 +-- src/caffe/test/test_im2col_kernel.cu | 4 +- src/caffe/test/test_math_functions.cpp | 51 +- .../test_multinomial_logistic_loss_layer.cpp | 3 +- src/caffe/test/test_net.cpp | 145 ----- src/caffe/test/test_neuron_layer.cpp | 139 +---- src/caffe/test/test_pooling_layer.cpp | 13 +- src/caffe/test/test_softmax_layer.cpp | 4 +- src/caffe/test/test_stochastic_pooling.cpp | 35 +- src/caffe/test/test_triplet_loss_layer.cpp | 107 ++++ src/caffe/util/math_functions.cpp | 10 - src/caffe/util/math_functions.cu | 21 - 64 files changed, 1186 insertions(+), 1863 deletions(-) create mode 100644 src/caffe/layers/triplet_loss_layer.cpp create mode 100644 src/caffe/test/test_triplet_loss_layer.cpp diff --git a/include/caffe/data_layers.hpp b/include/caffe/data_layers.hpp index 3958cb7ecb0..2bb9d948169 100644 --- a/include/caffe/data_layers.hpp +++ b/include/caffe/data_layers.hpp @@ -14,6 +14,7 @@ #include "caffe/filler.hpp" #include "caffe/internal_thread.hpp" #include "caffe/layer.hpp" +#include "caffe/net.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/util/db.hpp" @@ -28,6 +29,7 @@ template class BaseDataLayer : public Layer { public: explicit BaseDataLayer(const LayerParameter& param); + virtual ~BaseDataLayer() {} // LayerSetUp: implements common data layer setup functionality, and calls // DataLayerSetUp to do special data layer setup for individual layer types. // This method may not be overridden except by the BasePrefetchingDataLayer. @@ -56,6 +58,7 @@ class BasePrefetchingDataLayer : public: explicit BasePrefetchingDataLayer(const LayerParameter& param) : BaseDataLayer(param) {} + virtual ~BasePrefetchingDataLayer() {} // LayerSetUp: implements common data layer setup functionality, and calls // DataLayerSetUp to do special data layer setup for individual layer types. // This method may not be overridden. diff --git a/include/caffe/data_transformer.hpp b/include/caffe/data_transformer.hpp index 0ad68c80216..880356601a4 100644 --- a/include/caffe/data_transformer.hpp +++ b/include/caffe/data_transformer.hpp @@ -62,7 +62,6 @@ class DataTransformer { */ void Transform(const vector & mat_vector, Blob* transformed_blob); - /** * @brief Applies the transformation defined in the data layer's * transform_param block to a cv::Mat @@ -88,41 +87,6 @@ class DataTransformer { */ void Transform(Blob* input_blob, Blob* transformed_blob); - /** - * @brief Infers the shape of transformed_blob will have when - * the transformation is applied to the data. - * - * @param datum - * Datum containing the data to be transformed. - */ - vector InferBlobShape(const Datum& datum); - /** - * @brief Infers the shape of transformed_blob will have when - * the transformation is applied to the data. - * It uses the first element to infer the shape of the blob. - * - * @param datum_vector - * A vector of Datum containing the data to be transformed. - */ - vector InferBlobShape(const vector & datum_vector); - /** - * @brief Infers the shape of transformed_blob will have when - * the transformation is applied to the data. - * It uses the first element to infer the shape of the blob. - * - * @param mat_vector - * A vector of Mat containing the data to be transformed. - */ - vector InferBlobShape(const vector & mat_vector); - /** - * @brief Infers the shape of transformed_blob will have when - * the transformation is applied to the data. - * - * @param cv_img - * cv::Mat containing the data to be transformed. - */ - vector InferBlobShape(const cv::Mat& cv_img); - protected: /** * @brief Generates a random integer from Uniform({0, 1, ..., n-1}). diff --git a/include/caffe/filler.hpp b/include/caffe/filler.hpp index ff3542e1f99..bb18e8e1e28 100644 --- a/include/caffe/filler.hpp +++ b/include/caffe/filler.hpp @@ -126,18 +126,17 @@ class PositiveUnitballFiller : public Filler { }; /** - * @brief Fills a Blob with values @f$ x \sim U(-a, +a) @f$ where @f$ a @f$ is - * set inversely proportional to number of incoming nodes, outgoing - * nodes, or their average. + * @brief Fills a Blob with values @f$ x \sim U(-a, +a) @f$ where @f$ a @f$ + * is set inversely proportional to the number of incoming nodes. * * A Filler based on the paper [Bengio and Glorot 2010]: Understanding - * the difficulty of training deep feedforward neuralnetworks. + * the difficulty of training deep feedforward neuralnetworks, but does not + * use the fan_out value. * - * It fills the incoming matrix by randomly sampling uniform data from [-scale, - * scale] where scale = sqrt(3 / n) where n is the fan_in, fan_out, or their - * average, depending on the variance_norm option. You should make sure the - * input blob has shape (num, a, b, c) where a * b * c = fan_in and num * b * c - * = fan_out. Note that this is currently not the case for inner product layers. + * It fills the incoming matrix by randomly sampling uniform data from + * [-scale, scale] where scale = sqrt(3 / fan_in) where fan_in is the number + * of input nodes. You should make sure the input blob has shape (num, a, b, c) + * where a * b * c = fan_in. * * TODO(dox): make notation in above comment consistent with rest & use LaTeX. */ @@ -149,16 +148,7 @@ class XavierFiller : public Filler { virtual void Fill(Blob* blob) { CHECK(blob->count()); int fan_in = blob->count() / blob->num(); - int fan_out = blob->count() / blob->channels(); - Dtype n = fan_in; // default to fan_in - if (this->filler_param_.variance_norm() == - FillerParameter_VarianceNorm_AVERAGE) { - n = (fan_in + fan_out) / Dtype(2); - } else if (this->filler_param_.variance_norm() == - FillerParameter_VarianceNorm_FAN_OUT) { - n = fan_out; - } - Dtype scale = sqrt(Dtype(3) / n); + Dtype scale = sqrt(Dtype(3) / fan_in); caffe_rng_uniform(blob->count(), -scale, scale, blob->mutable_cpu_data()); CHECK_EQ(this->filler_param_.sparse(), -1) @@ -166,47 +156,6 @@ class XavierFiller : public Filler { } }; -/** - * @brief Fills a Blob with values @f$ x \sim N(0, \sigma^2) @f$ where - * @f$ \sigma^2 @f$ is set inversely proportional to number of incoming - * nodes, outgoing nodes, or their average. - * - * A Filler based on the paper [He, Zhang, Ren and Sun 2015]: Specifically - * accounts for ReLU nonlinearities. - * - * Aside: for another perspective on the scaling factor, see the derivation of - * [Saxe, McClelland, and Ganguli 2013 (v3)]. - * - * It fills the incoming matrix by randomly sampling Gaussian data with std = - * sqrt(2 / n) where n is the fan_in, fan_out, or their average, depending on - * the variance_norm option. You should make sure the input blob has shape (num, - * a, b, c) where a * b * c = fan_in and num * b * c = fan_out. Note that this - * is currently not the case for inner product layers. - */ -template -class MSRAFiller : public Filler { - public: - explicit MSRAFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { - CHECK(blob->count()); - int fan_in = blob->count() / blob->num(); - int fan_out = blob->count() / blob->channels(); - Dtype n = fan_in; // default to fan_in - if (this->filler_param_.variance_norm() == - FillerParameter_VarianceNorm_AVERAGE) { - n = (fan_in + fan_out) / Dtype(2); - } else if (this->filler_param_.variance_norm() == - FillerParameter_VarianceNorm_FAN_OUT) { - n = fan_out; - } - Dtype std = sqrt(Dtype(2) / n); - caffe_rng_gaussian(blob->count(), Dtype(0), std, - blob->mutable_cpu_data()); - CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; - } -}; /** * @brief Get a specific filler from the specification given in FillerParameter. @@ -227,8 +176,6 @@ Filler* GetFiller(const FillerParameter& param) { return new UniformFiller(param); } else if (type == "xavier") { return new XavierFiller(param); - } else if (type == "msra") { - return new MSRAFiller(param); } else { CHECK(false) << "Unknown filler name: " << param.type(); } diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index 8f924a75755..2d13ef97c05 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -406,7 +406,6 @@ template inline Dtype Layer::Forward(const vector*>& bottom, const vector*>& top) { Dtype loss = 0; - Reshape(bottom, top); switch (Caffe::mode()) { case Caffe::CPU: Forward_cpu(bottom, top); diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index 86c34241168..2f9c1f567a1 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -210,6 +210,73 @@ class ContrastiveLossLayer : public LossLayer { Blob summer_vec_; // tmp storage for gpu forward pass }; +template +class TripletLossLayer : public LossLayer { + public: + explicit TripletLossLayer(const LayerParameter& param) + : LossLayer(param), diff_() {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline int ExactNumBottomBlobs() const { return 4; } + virtual inline const char* type() const { return "TripletLoss"; } + /** + * Unlike most loss layers, in the TripletLossLayer we can backpropagate + * to the first three inputs. + */ + virtual inline bool AllowForceBackward(const int bottom_index) const { + return bottom_index != 3; + } + + protected: + /// @copydoc TripletLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the Triplet error gradient w.r.t. the inputs. + * + * Computes the gradients with respect to the two input vectors (bottom[0] and + * bottom[1]), but not the similarity label (bottom[2]). + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$a@f$; Backward fills their diff with + * gradients if propagate_down[0] + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$b@f$; Backward fills their diff with gradients if + * propagate_down[1] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob diff_; // cached for backward pass + Blob diff_pos; + Blob diff_neg; + Blob dist_sq_; // cached for backward pass + Blob dist_sq_pos; + Blob dist_sq_neg; + Blob diff_sq_; // tmp storage for gpu forward pass + Blob diff_sq_pos; + Blob diff_sq_neg; + Blob summer_vec_; // tmp storage for gpu forward pass +}; + /** * @brief Computes the Euclidean (L2) loss @f$ * E = \frac{1}{2N} \sum\limits_{n=1}^N \left| \left| \hat{y}_n - y_n @@ -605,6 +672,8 @@ class SigmoidCrossEntropyLossLayer : public LossLayer { /// @copydoc SigmoidCrossEntropyLossLayer virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); /** * @brief Computes the sigmoid cross-entropy loss error gradient w.r.t. the diff --git a/include/caffe/net.hpp b/include/caffe/net.hpp index 5665df1edf2..075afebc9b0 100644 --- a/include/caffe/net.hpp +++ b/include/caffe/net.hpp @@ -137,9 +137,6 @@ class Net { inline const vector& blob_loss_weights() const { return blob_loss_weights_; } - inline const vector& layer_need_backward() const { - return layer_need_backward_; - } /// @brief returns the parameters inline const vector > >& params() const { return params_; diff --git a/include/caffe/neuron_layers.hpp b/include/caffe/neuron_layers.hpp index c2e0774aaa2..323215134c7 100644 --- a/include/caffe/neuron_layers.hpp +++ b/include/caffe/neuron_layers.hpp @@ -8,6 +8,7 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/layer.hpp" +#include "caffe/net.hpp" #include "caffe/proto/caffe.pb.h" #define HDF5_DATA_DATASET_NAME "data" @@ -267,72 +268,6 @@ class ExpLayer : public NeuronLayer { Dtype inner_scale_, outer_scale_; }; -/** - * @brief Computes @f$ y = log_{\gamma}(\alpha x + \beta) @f$, - * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, - * and base @f$ \gamma @f$. - */ -template -class LogLayer : public NeuronLayer { - public: - /** - * @param param provides LogParameter log_param, - * with LogLayer options: - * - scale (\b optional, default 1) the scale @f$ \alpha @f$ - * - shift (\b optional, default 0) the shift @f$ \beta @f$ - * - base (\b optional, default -1 for a value of @f$ e \approx 2.718 @f$) - * the base @f$ \gamma @f$ - */ - explicit LogLayer(const LayerParameter& param) - : NeuronLayer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "Log"; } - - protected: - /** - * @param bottom input Blob vector (length 1) - * -# @f$ (N \times C \times H \times W) @f$ - * the inputs @f$ x @f$ - * @param top output Blob vector (length 1) - * -# @f$ (N \times C \times H \times W) @f$ - * the computed outputs @f$ - * y = log_{\gamma}(\alpha x + \beta) - * @f$ - */ - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - - /** - * @brief Computes the error gradient w.r.t. the exp inputs. - * - * @param top output Blob vector (length 1), providing the error gradient with - * respect to the outputs - * -# @f$ (N \times C \times H \times W) @f$ - * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ - * with respect to computed outputs @f$ y @f$ - * @param propagate_down see Layer::Backward. - * @param bottom input Blob vector (length 1) - * -# @f$ (N \times C \times H \times W) @f$ - * the inputs @f$ x @f$; Backward fills their diff with - * gradients @f$ - * \frac{\partial E}{\partial x} = - * \frac{\partial E}{\partial y} y \alpha \log_e(gamma) - * @f$ if propagate_down[0] - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - Dtype base_scale_; - Dtype input_scale_, input_shift_; - Dtype backward_num_scale_; -}; - /** * @brief Computes @f$ y = (\alpha x + \beta) ^ \gamma @f$, * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, @@ -799,8 +734,7 @@ class PReLULayer : public NeuronLayer { const vector& propagate_down, const vector*>& bottom); bool channel_shared_; - Blob multiplier_; // dot multiplier for backward computation of params - Blob backward_buff_; // temporary buffer for backward computation + Blob multiplier_; // dot multipler for backward computation of params Blob bottom_memory_; // memory for in-place computation }; diff --git a/include/caffe/python_layer.hpp b/include/caffe/python_layer.hpp index 19cf18c9742..816ef453720 100644 --- a/include/caffe/python_layer.hpp +++ b/include/caffe/python_layer.hpp @@ -14,12 +14,12 @@ template class PythonLayer : public Layer { public: PythonLayer(PyObject* self, const LayerParameter& param) - : Layer(param), self_(bp::handle<>(bp::borrowed(self))) { } + : Layer(param), self_(self) { } virtual void LayerSetUp(const vector*>& bottom, const vector*>& top) { try { - self_.attr("setup")(bottom, top); + bp::call_method(self_, "setup", bottom, top); } catch (bp::error_already_set) { PyErr_Print(); throw; @@ -29,7 +29,7 @@ class PythonLayer : public Layer { virtual void Reshape(const vector*>& bottom, const vector*>& top) { try { - self_.attr("reshape")(bottom, top); + bp::call_method(self_, "reshape", bottom, top); } catch (bp::error_already_set) { PyErr_Print(); throw; @@ -42,7 +42,7 @@ class PythonLayer : public Layer { virtual void Forward_cpu(const vector*>& bottom, const vector*>& top) { try { - self_.attr("forward")(bottom, top); + bp::call_method(self_, "forward", bottom, top); } catch (bp::error_already_set) { PyErr_Print(); throw; @@ -51,7 +51,8 @@ class PythonLayer : public Layer { virtual void Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { try { - self_.attr("backward")(top, propagate_down, bottom); + bp::call_method(self_, "backward", top, propagate_down, + bottom); } catch (bp::error_already_set) { PyErr_Print(); throw; @@ -59,7 +60,7 @@ class PythonLayer : public Layer { } private: - bp::object self_; + PyObject* self_; }; } // namespace caffe diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index c2ced487d6f..4dcdc3dc20b 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -11,7 +11,7 @@ namespace caffe { /** * @brief An interface for classes that perform optimization on Net%s. * - * Requires implementation of ApplyUpdate to compute a parameter update + * Requires implementation of ComputeUpdateValue to compute a parameter update * given the current state of the Net parameters. */ template @@ -39,8 +39,8 @@ class Solver { int iter() { return iter_; } protected: - // Make and apply the update value for the current iteration. - virtual void ApplyUpdate() = 0; + // Get the update value for the current iteration. + virtual void ComputeUpdateValue() = 0; // The Solver::Snapshot function implements the basic snapshotting utility // that stores the learned net. You should implement the SnapshotSolverState() // function that produces a SolverState protocol buffer that needs to be @@ -80,10 +80,7 @@ class SGDSolver : public Solver { protected: void PreSolve(); Dtype GetLearningRate(); - virtual void ApplyUpdate(); - virtual void Normalize(int param_id); - virtual void Regularize(int param_id); - virtual void ComputeUpdateValue(int param_id, Dtype rate); + virtual void ComputeUpdateValue(); virtual void ClipGradients(); virtual void SnapshotSolverState(SolverState * state); virtual void RestoreSolverState(const SolverState& state); @@ -105,7 +102,7 @@ class NesterovSolver : public SGDSolver { : SGDSolver(param_file) {} protected: - virtual void ComputeUpdateValue(int param_id, Dtype rate); + virtual void ComputeUpdateValue(); DISABLE_COPY_AND_ASSIGN(NesterovSolver); }; @@ -119,7 +116,7 @@ class AdaGradSolver : public SGDSolver { : SGDSolver(param_file) { constructor_sanity_check(); } protected: - virtual void ComputeUpdateValue(int param_id, Dtype rate); + virtual void ComputeUpdateValue(); void constructor_sanity_check() { CHECK_EQ(0, this->param_.momentum()) << "Momentum cannot be used with AdaGrad."; diff --git a/include/caffe/test/test_caffe_main.hpp b/include/caffe/test/test_caffe_main.hpp index fc156091476..bd5f31e063f 100644 --- a/include/caffe/test/test_caffe_main.hpp +++ b/include/caffe/test/test_caffe_main.hpp @@ -40,36 +40,34 @@ class MultiDeviceTest : public ::testing::Test { typedef ::testing::Types TestDtypes; -template -struct CPUDevice { - typedef TypeParam Dtype; +struct FloatCPU { + typedef float Dtype; static const Caffe::Brew device = Caffe::CPU; }; -template -class CPUDeviceTest : public MultiDeviceTest > { +struct DoubleCPU { + typedef double Dtype; + static const Caffe::Brew device = Caffe::CPU; }; #ifdef CPU_ONLY -typedef ::testing::Types, - CPUDevice > TestDtypesAndDevices; +typedef ::testing::Types TestDtypesAndDevices; #else -template -struct GPUDevice { - typedef TypeParam Dtype; +struct FloatGPU { + typedef float Dtype; static const Caffe::Brew device = Caffe::GPU; }; -template -class GPUDeviceTest : public MultiDeviceTest > { +struct DoubleGPU { + typedef double Dtype; + static const Caffe::Brew device = Caffe::GPU; }; -typedef ::testing::Types, CPUDevice, - GPUDevice, GPUDevice > - TestDtypesAndDevices; +typedef ::testing::Types + TestDtypesAndDevices; #endif diff --git a/include/caffe/test/test_gradient_check_util.hpp b/include/caffe/test/test_gradient_check_util.hpp index cc5dcbad0ee..22937711b58 100644 --- a/include/caffe/test/test_gradient_check_util.hpp +++ b/include/caffe/test/test_gradient_check_util.hpp @@ -80,14 +80,11 @@ void GradientChecker::CheckGradientSingle(Layer* layer, CHECK_EQ(top_count, bottom[blob_id]->count()); } } - // First, figure out what blobs we need to check against, and zero init - // parameter blobs. + // First, figure out what blobs we need to check against. vector*> blobs_to_check; vector propagate_down(bottom.size(), check_bottom < 0); for (int i = 0; i < layer->blobs().size(); ++i) { - Blob* blob = layer->blobs()[i].get(); - caffe_set(blob->count(), static_cast(0), blob->mutable_cpu_diff()); - blobs_to_check.push_back(blob); + blobs_to_check.push_back(layer->blobs()[i].get()); } if (check_bottom < 0) { for (int i = 0; i < bottom.size(); ++i) { diff --git a/include/caffe/util/math_functions.hpp b/include/caffe/util/math_functions.hpp index 2cacd8e72cd..f43036fcebc 100644 --- a/include/caffe/util/math_functions.hpp +++ b/include/caffe/util/math_functions.hpp @@ -88,9 +88,6 @@ void caffe_rng_bernoulli(const int n, const Dtype p, unsigned int* r); template void caffe_exp(const int n, const Dtype* a, Dtype* y); -template -void caffe_log(const int n, const Dtype* a, Dtype* y); - template void caffe_abs(const int n, const Dtype* a, Dtype* y); @@ -206,9 +203,6 @@ void caffe_gpu_abs(const int n, const Dtype* a, Dtype* y); template void caffe_gpu_exp(const int n, const Dtype* a, Dtype* y); -template -void caffe_gpu_log(const int n, const Dtype* a, Dtype* y); - template void caffe_gpu_powx(const int n, const Dtype* a, const Dtype b, Dtype* y); diff --git a/include/caffe/util/mkl_alternate.hpp b/include/caffe/util/mkl_alternate.hpp index 3355b6658a3..32fdbf79932 100644 --- a/include/caffe/util/mkl_alternate.hpp +++ b/include/caffe/util/mkl_alternate.hpp @@ -33,7 +33,6 @@ extern "C" { DEFINE_VSL_UNARY_FUNC(Sqr, y[i] = a[i] * a[i]); DEFINE_VSL_UNARY_FUNC(Exp, y[i] = exp(a[i])); -DEFINE_VSL_UNARY_FUNC(Ln, y[i] = log(a[i])); DEFINE_VSL_UNARY_FUNC(Abs, y[i] = fabs(a[i])); // A simple way to define the vsl unary functions with singular parameter b. diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index a6bd86a93f5..cd0ab8babb0 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -453,72 +453,6 @@ class CuDNNPoolingLayer : public PoolingLayer { }; #endif -/** - * @brief Does spatial pyramid pooling on the input image - * by taking the max, average, etc. within regions - * so that the result vector of different sized - * images are of the same size. - */ -template -class SPPLayer : public Layer { - public: - explicit SPPLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "SPP"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { return 1; } - // MAX POOL layers can output an extra top blob for the mask; - // others can only output the pooled inputs. - virtual inline int MaxTopBlobs() const { - return (this->layer_param_.pooling_param().pool() == - PoolingParameter_PoolMethod_MAX) ? 2 : 1; - } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - // calculates the kernel and stride dimensions for the pooling layer, - // returns a correctly configured LayerParameter for a PoolingLayer - virtual LayerParameter GetPoolingParam(const int pyramid_level, - const int bottom_h, const int bottom_w, const SPPParameter spp_param); - - int pyramid_height_; - int bottom_h_, bottom_w_; - int channels_; - int kernel_h_, kernel_w_; - int pad_h_, pad_w_; - - /// the internal Split layer that feeds the pooling layers - shared_ptr > split_layer_; - /// top vector holder used in call to the underlying SplitLayer::Forward - vector*> split_top_vec_; - /// bottom vector holder used in call to the underlying PoolingLayer::Forward - vector*>*> pooling_bottom_vecs_; - /// the internal Pooling layers of different kernel sizes - vector > > pooling_layers_; - /// top vector holders used in call to the underlying PoolingLayer::Forward - vector*>*> pooling_top_vecs_; - /// pooling_outputs stores the outputs of the PoolingLayers - vector*> pooling_outputs_; - /// the internal Flatten layers that the Pooling layers feed into - vector*> flatten_layers_; - /// top vector holders used in call to the underlying FlattenLayer::Forward - vector*>*> flatten_top_vecs_; - /// flatten_outputs stores the outputs of the FlattenLayers - vector*> flatten_outputs_; - /// bottom vector holder used in call to the underlying ConcatLayer::Forward - vector*> concat_bottom_vec_; - /// the internal Concat layers that the Flatten layers feed into - shared_ptr > concat_layer_; -}; - } // namespace caffe #endif // CAFFE_VISION_LAYERS_HPP_ diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 94fdcc35fb6..6d2b3f502d9 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -26,7 +26,6 @@ void Blob::Reshape(const vector& shape) { shape_.resize(shape.size()); for (int i = 0; i < shape.size(); ++i) { CHECK_GE(shape[i], 0); - CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX"; count_ *= shape[i]; shape_[i] = shape[i]; } diff --git a/src/caffe/data_transformer.cpp b/src/caffe/data_transformer.cpp index 6f75bdb3852..b0b98e478c1 100644 --- a/src/caffe/data_transformer.cpp +++ b/src/caffe/data_transformer.cpp @@ -125,31 +125,10 @@ void DataTransformer::Transform(const Datum& datum, template void DataTransformer::Transform(const Datum& datum, Blob* transformed_blob) { - // If datum is encoded, decoded and transform the cv::image. - if (datum.encoded()) { - CHECK(!param_.force_color() && !param_.force_gray()) - << "cannot set both force_color and force_gray"; - cv::Mat cv_img; - if (param_.force_color() || param_.force_gray()) { - // If force_color then decode in color otherwise decode in gray. - cv_img = DecodeDatumToCVMat(datum, param_.force_color()); - } else { - cv_img = DecodeDatumToCVMatNative(datum); - } - // Transform the cv::image into blob. - return Transform(cv_img, transformed_blob); - } else { - if (param_.force_color() || param_.force_gray()) { - LOG(ERROR) << "force_color and force_gray only for encoded datum"; - } - } - - const int crop_size = param_.crop_size(); const int datum_channels = datum.channels(); const int datum_height = datum.height(); const int datum_width = datum.width(); - // Check dimensions. const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); const int width = transformed_blob->width(); @@ -160,6 +139,8 @@ void DataTransformer::Transform(const Datum& datum, CHECK_LE(width, datum_width); CHECK_GE(num, 1); + const int crop_size = param_.crop_size(); + if (crop_size) { CHECK_EQ(crop_size, height); CHECK_EQ(crop_size, width); @@ -215,12 +196,10 @@ void DataTransformer::Transform(const vector & mat_vector, template void DataTransformer::Transform(const cv::Mat& cv_img, Blob* transformed_blob) { - const int crop_size = param_.crop_size(); const int img_channels = cv_img.channels(); const int img_height = cv_img.rows; const int img_width = cv_img.cols; - // Check dimensions. const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); const int width = transformed_blob->width(); @@ -233,6 +212,7 @@ void DataTransformer::Transform(const cv::Mat& cv_img, CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte"; + const int crop_size = param_.crop_size(); const Dtype scale = param_.scale(); const bool do_mirror = param_.mirror() && Rand(2); const bool has_mean_file = param_.has_mean_file(); @@ -317,23 +297,11 @@ void DataTransformer::Transform(const cv::Mat& cv_img, template void DataTransformer::Transform(Blob* input_blob, Blob* transformed_blob) { - const int crop_size = param_.crop_size(); const int input_num = input_blob->num(); const int input_channels = input_blob->channels(); const int input_height = input_blob->height(); const int input_width = input_blob->width(); - if (transformed_blob->count() == 0) { - // Initialize transformed_blob with the right shape. - if (crop_size) { - transformed_blob->Reshape(input_num, input_channels, - crop_size, crop_size); - } else { - transformed_blob->Reshape(input_num, input_channels, - input_height, input_width); - } - } - const int num = transformed_blob->num(); const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); @@ -345,7 +313,7 @@ void DataTransformer::Transform(Blob* input_blob, CHECK_GE(input_height, height); CHECK_GE(input_width, width); - + const int crop_size = param_.crop_size(); const Dtype scale = param_.scale(); const bool do_mirror = param_.mirror() && Rand(2); const bool has_mean_file = param_.has_mean_file(); @@ -427,82 +395,6 @@ void DataTransformer::Transform(Blob* input_blob, } } -template -vector DataTransformer::InferBlobShape(const Datum& datum) { - if (datum.encoded()) { - CHECK(!param_.force_color() && !param_.force_gray()) - << "cannot set both force_color and force_gray"; - cv::Mat cv_img; - if (param_.force_color() || param_.force_gray()) { - // If force_color then decode in color otherwise decode in gray. - cv_img = DecodeDatumToCVMat(datum, param_.force_color()); - } else { - cv_img = DecodeDatumToCVMatNative(datum); - } - // InferBlobShape using the cv::image. - return InferBlobShape(cv_img); - } - - const int crop_size = param_.crop_size(); - const int datum_channels = datum.channels(); - const int datum_height = datum.height(); - const int datum_width = datum.width(); - // Check dimensions. - CHECK_GT(datum_channels, 0); - CHECK_GE(datum_height, crop_size); - CHECK_GE(datum_width, crop_size); - // Build BlobShape. - vector shape(4); - shape[0] = 1; - shape[1] = datum_channels; - shape[2] = (crop_size)? crop_size: datum_height; - shape[3] = (crop_size)? crop_size: datum_width; - return shape; -} - -template -vector DataTransformer::InferBlobShape( - const vector & datum_vector) { - const int num = datum_vector.size(); - CHECK_GT(num, 0) << "There is no datum to in the vector"; - // Use first datum in the vector to InferBlobShape. - vector shape = InferBlobShape(datum_vector[0]); - // Adjust num to the size of the vector. - shape[0] = num; - return shape; -} - -template -vector DataTransformer::InferBlobShape(const cv::Mat& cv_img) { - const int crop_size = param_.crop_size(); - const int img_channels = cv_img.channels(); - const int img_height = cv_img.rows; - const int img_width = cv_img.cols; - // Check dimensions. - CHECK_GT(img_channels, 0); - CHECK_GE(img_height, crop_size); - CHECK_GE(img_width, crop_size); - // Build BlobShape. - vector shape(4); - shape[0] = 1; - shape[1] = img_channels; - shape[2] = (crop_size)? crop_size: img_height; - shape[3] = (crop_size)? crop_size: img_width; - return shape; -} - -template -vector DataTransformer::InferBlobShape( - const vector & mat_vector) { - const int num = mat_vector.size(); - CHECK_GT(num, 0) << "There is no cv_img to in the vector"; - // Use first cv_img in the vector to InferBlobShape. - vector shape = InferBlobShape(mat_vector[0]); - // Adjust num to the size of the vector. - shape[0] = num; - return shape; -} - template void DataTransformer::InitRand() { const bool needs_rand = param_.mirror() || diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index 26a1118282f..352200915d7 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -2,6 +2,7 @@ #include #include "caffe/data_layers.hpp" +#include "caffe/net.hpp" #include "caffe/util/io.hpp" namespace caffe { @@ -20,11 +21,11 @@ void BaseDataLayer::LayerSetUp(const vector*>& bottom, } else { output_labels_ = true; } + // The subclasses should setup the size of bottom and top + DataLayerSetUp(bottom, top); data_transformer_.reset( new DataTransformer(transform_param_, this->phase_)); data_transformer_->InitRand(); - // The subclasses should setup the size of bottom and top - DataLayerSetUp(bottom, top); } template @@ -62,15 +63,13 @@ void BasePrefetchingDataLayer::Forward_cpu( JoinPrefetchThread(); DLOG(INFO) << "Thread joined"; // Reshape to loaded data. - top[0]->ReshapeLike(prefetch_data_); + top[0]->Reshape(this->prefetch_data_.num(), this->prefetch_data_.channels(), + this->prefetch_data_.height(), this->prefetch_data_.width()); // Copy the data caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), top[0]->mutable_cpu_data()); DLOG(INFO) << "Prefetch copied"; if (this->output_labels_) { - // Reshape to loaded labels. - top[1]->ReshapeLike(prefetch_label_); - // Copy the labels. caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), top[1]->mutable_cpu_data()); } diff --git a/src/caffe/layers/base_data_layer.cu b/src/caffe/layers/base_data_layer.cu index 9335a5bc9a9..775f6c47f7e 100644 --- a/src/caffe/layers/base_data_layer.cu +++ b/src/caffe/layers/base_data_layer.cu @@ -10,14 +10,12 @@ void BasePrefetchingDataLayer::Forward_gpu( // First, join the thread JoinPrefetchThread(); // Reshape to loaded data. - top[0]->ReshapeLike(this->prefetch_data_); + top[0]->Reshape(this->prefetch_data_.num(), this->prefetch_data_.channels(), + this->prefetch_data_.height(), this->prefetch_data_.width()); // Copy the data caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { - // Reshape to loaded labels. - top[1]->ReshapeLike(prefetch_label_); - // Copy the labels. caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), top[1]->mutable_gpu_data()); } diff --git a/src/caffe/layers/concat_layer.cu b/src/caffe/layers/concat_layer.cu index 8f2e85d8f52..dbadb5aeb30 100644 --- a/src/caffe/layers/concat_layer.cu +++ b/src/caffe/layers/concat_layer.cu @@ -6,41 +6,21 @@ namespace caffe { -template -__global__ void Concat(const int nthreads, const Dtype* in_data, - const bool forward, const int num_concats, const int concat_size, - const int top_concat_axis, const int bottom_concat_axis, - const int offset_concat_axis, Dtype* out_data) { - CUDA_KERNEL_LOOP(index, nthreads) { - const int total_concat_size = concat_size * bottom_concat_axis; - const int concat_num = index / total_concat_size; - const int concat_index = index % total_concat_size; - const int top_index = concat_index + - (concat_num * top_concat_axis + offset_concat_axis) * concat_size; - if (forward) { - out_data[top_index] = in_data[index]; - } else { - out_data[index] = in_data[top_index]; - } - } -} - template void ConcatLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { Dtype* top_data = top[0]->mutable_gpu_data(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); - const bool kForward = true; for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); - const int bottom_concat_size = bottom_concat_axis * concat_input_size_; - const int nthreads = bottom_concat_size * num_concats_; - Concat // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, bottom_data, kForward, num_concats_, concat_input_size_, - top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); + for (int n = 0; n < num_concats_; ++n) { + caffe_copy(bottom_concat_axis * concat_input_size_, + bottom_data + n * bottom_concat_axis * concat_input_size_, + top_data + (n * top_concat_axis + offset_concat_axis) + * concat_input_size_); + } offset_concat_axis += bottom_concat_axis; } } @@ -51,17 +31,15 @@ void ConcatLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); - const bool kForward = false; for (int i = 0; i < bottom.size(); ++i) { if (!propagate_down[i]) { continue; } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); - const int bottom_concat_size = bottom_concat_axis * concat_input_size_; - const int nthreads = bottom_concat_size * num_concats_; - Concat // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, top_diff, kForward, num_concats_, concat_input_size_, - top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); + for (int n = 0; n < num_concats_; ++n) { + caffe_copy(bottom_concat_axis * concat_input_size_, top_diff + + (n * top_concat_axis + offset_concat_axis) * concat_input_size_, + bottom_diff + n * bottom_concat_axis * concat_input_size_); + } offset_concat_axis += bottom_concat_axis; } } diff --git a/src/caffe/layers/contrastive_loss_layer.cpp b/src/caffe/layers/contrastive_loss_layer.cpp index 25e167819d3..0692c11c257 100644 --- a/src/caffe/layers/contrastive_loss_layer.cpp +++ b/src/caffe/layers/contrastive_loss_layer.cpp @@ -41,8 +41,6 @@ void ContrastiveLossLayer::Forward_cpu( diff_.mutable_cpu_data()); // a_i-b_i const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.contrastive_loss_param().margin(); - bool legacy_version = - this->layer_param_.contrastive_loss_param().legacy_version(); Dtype loss(0.0); for (int i = 0; i < bottom[0]->num(); ++i) { dist_sq_.mutable_cpu_data()[i] = caffe_cpu_dot(channels, @@ -50,12 +48,7 @@ void ContrastiveLossLayer::Forward_cpu( if (static_cast(bottom[2]->cpu_data()[i])) { // similar pairs loss += dist_sq_.cpu_data()[i]; } else { // dissimilar pairs - if (legacy_version) { - loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)); - } else { - Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]), 0.0); - loss += dist*dist; - } + loss += std::max(margin-dist_sq_.cpu_data()[i], Dtype(0.0)); } } loss = loss / static_cast(bottom[0]->num()) / Dtype(2); @@ -66,8 +59,6 @@ template void ContrastiveLossLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.contrastive_loss_param().margin(); - bool legacy_version = - this->layer_param_.contrastive_loss_param().legacy_version(); for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; @@ -85,20 +76,10 @@ void ContrastiveLossLayer::Backward_cpu(const vector*>& top, Dtype(0.0), bout + (j*channels)); } else { // dissimilar pairs - Dtype mdist(0.0); - Dtype beta(0.0); - if (legacy_version) { - mdist = margin - dist_sq_.cpu_data()[j]; - beta = -alpha; - } else { - Dtype dist = sqrt(dist_sq_.cpu_data()[j]); - mdist = margin - dist; - beta = -alpha * mdist / (dist + Dtype(1e-4)); - } - if (mdist > Dtype(0.0)) { + if ((margin-dist_sq_.cpu_data()[j]) > Dtype(0.0)) { caffe_cpu_axpby( channels, - beta, + -alpha, diff_.cpu_data() + (j*channels), Dtype(0.0), bout + (j*channels)); diff --git a/src/caffe/layers/contrastive_loss_layer.cu b/src/caffe/layers/contrastive_loss_layer.cu index 931239316ac..78a55995a0a 100644 --- a/src/caffe/layers/contrastive_loss_layer.cu +++ b/src/caffe/layers/contrastive_loss_layer.cu @@ -32,20 +32,12 @@ void ContrastiveLossLayer::Forward_gpu( Dtype(0.0), dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2 Dtype margin = this->layer_param_.contrastive_loss_param().margin(); - bool legacy_version = - this->layer_param_.contrastive_loss_param().legacy_version(); Dtype loss(0.0); for (int i = 0; i < bottom[0]->num(); ++i) { if (static_cast(bottom[2]->cpu_data()[i])) { // similar pairs loss += dist_sq_.cpu_data()[i]; } else { // dissimilar pairs - if (legacy_version) { - loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)); - } else { - Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]), - Dtype(0.0)); - loss += dist*dist; - } + loss += std::max(margin-dist_sq_.cpu_data()[i], Dtype(0.0)); } } loss = loss / static_cast(bottom[0]->num()) / Dtype(2); @@ -53,8 +45,8 @@ void ContrastiveLossLayer::Forward_gpu( } template -__global__ void CLLBackward(const int count, const int channels, - const Dtype margin, const bool legacy_version, const Dtype alpha, +__global__ void CLLForward(const int count, const int channels, + const Dtype margin, const Dtype alpha, const Dtype* y, const Dtype* diff, const Dtype* dist_sq, Dtype *bottom_diff) { CUDA_KERNEL_LOOP(i, count) { @@ -62,18 +54,8 @@ __global__ void CLLBackward(const int count, const int channels, if (static_cast(y[n])) { // similar pairs bottom_diff[i] = alpha * diff[i]; } else { // dissimilar pairs - Dtype mdist(0.0); - Dtype beta(0.0); - if (legacy_version) { - mdist = (margin - dist_sq[n]); - beta = -alpha; - } else { - Dtype dist = sqrt(dist_sq[n]); - mdist = (margin - dist); - beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i]; - } - if (mdist > 0.0) { - bottom_diff[i] = beta; + if ((margin-dist_sq[n]) > 0.0) { + bottom_diff[i] = -alpha * diff[i]; } else { bottom_diff[i] = 0; } @@ -89,14 +71,12 @@ void ContrastiveLossLayer::Backward_gpu(const vector*>& top, const int count = bottom[0]->count(); const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.contrastive_loss_param().margin(); - const bool legacy_version = - this->layer_param_.contrastive_loss_param().legacy_version(); const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast(bottom[0]->num()); // NOLINT_NEXT_LINE(whitespace/operators) - CLLBackward<<>>( - count, channels, margin, legacy_version, alpha, + CLLForward<<>>( + count, channels, margin, alpha, bottom[2]->gpu_data(), // pair similarity 0 or 1 diff_.gpu_data(), // the cached eltwise difference between a and b dist_sq_.gpu_data(), // the cached square distance between a and b diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp index 928ef5ee468..c0c9f6f3371 100644 --- a/src/caffe/layers/conv_layer.cpp +++ b/src/caffe/layers/conv_layer.cpp @@ -39,6 +39,13 @@ void ConvolutionLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->cpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); + if (this->param_propagate_down_[0]) { + caffe_set(this->blobs_[0]->count(), Dtype(0), weight_diff); + } + if (this->bias_term_ && this->param_propagate_down_[1]) { + caffe_set(this->blobs_[1]->count(), Dtype(0), + this->blobs_[1]->mutable_cpu_diff()); + } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->cpu_diff(); const Dtype* bottom_data = bottom[i]->cpu_data(); diff --git a/src/caffe/layers/conv_layer.cu b/src/caffe/layers/conv_layer.cu index b8a98ff7cc9..3902fdf3930 100644 --- a/src/caffe/layers/conv_layer.cu +++ b/src/caffe/layers/conv_layer.cu @@ -31,6 +31,13 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); + if (this->param_propagate_down_[0]) { + caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); + } + if (this->bias_term_ && this->param_propagate_down_[1]) { + caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), + this->blobs_[1]->mutable_gpu_diff()); + } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Bias gradient, if necessary. diff --git a/src/caffe/layers/cudnn_conv_layer.cu b/src/caffe/layers/cudnn_conv_layer.cu index b4e802e13d1..4a1a4c4f4f2 100644 --- a/src/caffe/layers/cudnn_conv_layer.cu +++ b/src/caffe/layers/cudnn_conv_layer.cu @@ -101,10 +101,12 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); + caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); + caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp index 161a75e0c8c..0f2d66776a9 100644 --- a/src/caffe/layers/data_layer.cpp +++ b/src/caffe/layers/data_layer.cpp @@ -38,17 +38,32 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, cursor_->Next(); } } - // Read a data point, to initialize the prefetch and top blobs. + // Read a data point, and use it to initialize the top blob. Datum datum; datum.ParseFromString(cursor_->value()); - // Use data_transformer to infer the expected blob shape from datum. - vector top_shape = this->data_transformer_->InferBlobShape(datum); - this->transformed_data_.Reshape(top_shape); - // Reshape top[0] and prefetch_data according to the batch_size. - top_shape[0] = this->layer_param_.data_param().batch_size(); - this->prefetch_data_.Reshape(top_shape); - top[0]->ReshapeLike(this->prefetch_data_); + bool force_color = this->layer_param_.data_param().force_encoded_color(); + if ((force_color && DecodeDatum(&datum, true)) || + DecodeDatumNative(&datum)) { + LOG(INFO) << "Decoding Datum"; + } + // image + int crop_size = this->layer_param_.transform_param().crop_size(); + if (crop_size > 0) { + top[0]->Reshape(this->layer_param_.data_param().batch_size(), + datum.channels(), crop_size, crop_size); + this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), + datum.channels(), crop_size, crop_size); + this->transformed_data_.Reshape(1, datum.channels(), crop_size, crop_size); + } else { + top[0]->Reshape( + this->layer_param_.data_param().batch_size(), datum.channels(), + datum.height(), datum.width()); + this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), + datum.channels(), datum.height(), datum.width()); + this->transformed_data_.Reshape(1, datum.channels(), + datum.height(), datum.width()); + } LOG(INFO) << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); @@ -71,17 +86,25 @@ void DataLayer::InternalThreadEntry() { CHECK(this->prefetch_data_.count()); CHECK(this->transformed_data_.count()); - // Reshape according to the first datum of each batch - // on single input batches allows for inputs of varying dimension. + // Reshape on single input batches for inputs of varying dimension. const int batch_size = this->layer_param_.data_param().batch_size(); - Datum datum; - datum.ParseFromString(cursor_->value()); - // Use data_transformer to infer the expected blob shape from datum. - vector top_shape = this->data_transformer_->InferBlobShape(datum); - this->transformed_data_.Reshape(top_shape); - // Reshape prefetch_data according to the batch_size. - top_shape[0] = batch_size; - this->prefetch_data_.Reshape(top_shape); + const int crop_size = this->layer_param_.transform_param().crop_size(); + bool force_color = this->layer_param_.data_param().force_encoded_color(); + if (batch_size == 1 && crop_size == 0) { + Datum datum; + datum.ParseFromString(cursor_->value()); + if (datum.encoded()) { + if (force_color) { + DecodeDatum(&datum, true); + } else { + DecodeDatumNative(&datum); + } + } + this->prefetch_data_.Reshape(1, datum.channels(), + datum.height(), datum.width()); + this->transformed_data_.Reshape(1, datum.channels(), + datum.height(), datum.width()); + } Dtype* top_data = this->prefetch_data_.mutable_cpu_data(); Dtype* top_label = NULL; // suppress warnings about uninitialized variables @@ -89,31 +112,48 @@ void DataLayer::InternalThreadEntry() { if (this->output_labels_) { top_label = this->prefetch_label_.mutable_cpu_data(); } - timer.Start(); for (int item_id = 0; item_id < batch_size; ++item_id) { - // get a datum + timer.Start(); + // get a blob Datum datum; datum.ParseFromString(cursor_->value()); + + cv::Mat cv_img; + if (datum.encoded()) { + if (force_color) { + cv_img = DecodeDatumToCVMat(datum, true); + } else { + cv_img = DecodeDatumToCVMatNative(datum); + } + if (cv_img.channels() != this->transformed_data_.channels()) { + LOG(WARNING) << "Your dataset contains encoded images with mixed " + << "channel sizes. Consider adding a 'force_color' flag to the " + << "model definition, or rebuild your dataset using " + << "convert_imageset."; + } + } read_time += timer.MicroSeconds(); timer.Start(); + // Apply data transformations (mirror, scale, crop...) int offset = this->prefetch_data_.offset(item_id); this->transformed_data_.set_cpu_data(top_data + offset); - this->data_transformer_->Transform(datum, &(this->transformed_data_)); - // Copy label. + if (datum.encoded()) { + this->data_transformer_->Transform(cv_img, &(this->transformed_data_)); + } else { + this->data_transformer_->Transform(datum, &(this->transformed_data_)); + } if (this->output_labels_) { top_label[item_id] = datum.label(); } trans_time += timer.MicroSeconds(); - timer.Start(); - // go to the next item. + // go to the next iter cursor_->Next(); if (!cursor_->valid()) { DLOG(INFO) << "Restarting data prefetching from start."; cursor_->SeekToFirst(); } } - timer.Stop(); batch_timer.Stop(); DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; diff --git a/src/caffe/layers/deconv_layer.cpp b/src/caffe/layers/deconv_layer.cpp index a4612963b6b..e6d65ab526b 100644 --- a/src/caffe/layers/deconv_layer.cpp +++ b/src/caffe/layers/deconv_layer.cpp @@ -39,6 +39,13 @@ void DeconvolutionLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->cpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); + if (this->param_propagate_down_[0]) { + caffe_set(this->blobs_[0]->count(), Dtype(0), weight_diff); + } + if (this->bias_term_ && this->param_propagate_down_[1]) { + caffe_set(this->blobs_[1]->count(), Dtype(0), + this->blobs_[1]->mutable_cpu_diff()); + } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->cpu_diff(); const Dtype* bottom_data = bottom[i]->cpu_data(); diff --git a/src/caffe/layers/deconv_layer.cu b/src/caffe/layers/deconv_layer.cu index 39bc4de8c66..9198dd64c72 100644 --- a/src/caffe/layers/deconv_layer.cu +++ b/src/caffe/layers/deconv_layer.cu @@ -31,6 +31,13 @@ void DeconvolutionLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); + if (this->param_propagate_down_[0]) { + caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); + } + if (this->bias_term_ && this->param_propagate_down_[1]) { + caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), + this->blobs_[1]->mutable_gpu_diff()); + } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); const Dtype* bottom_data = bottom[i]->gpu_data(); diff --git a/src/caffe/layers/flatten_layer.cpp b/src/caffe/layers/flatten_layer.cpp index f7e5c9c2172..745f271ea45 100644 --- a/src/caffe/layers/flatten_layer.cpp +++ b/src/caffe/layers/flatten_layer.cpp @@ -9,19 +9,9 @@ namespace caffe { template void FlattenLayer::Reshape(const vector*>& bottom, const vector*>& top) { - const int start_axis = bottom[0]->CanonicalAxisIndex( - this->layer_param_.flatten_param().axis()); - const int end_axis = bottom[0]->CanonicalAxisIndex( - this->layer_param_.flatten_param().end_axis()); - vector top_shape; - for (int i = 0; i < start_axis; ++i) { - top_shape.push_back(bottom[0]->shape(i)); - } - const int flattened_dim = bottom[0]->count(start_axis, end_axis + 1); - top_shape.push_back(flattened_dim); - for (int i = end_axis + 1; i < bottom[0]->num_axes(); ++i) { - top_shape.push_back(bottom[0]->shape(i)); - } + vector top_shape(2); + top_shape[0] = bottom[0]->num(); + top_shape[1] = bottom[0]->count() / bottom[0]->num(); top[0]->Reshape(top_shape); CHECK_EQ(top[0]->count(), bottom[0]->count()); } diff --git a/src/caffe/layers/image_data_layer.cpp b/src/caffe/layers/image_data_layer.cpp index 18c035cba9d..38ebbd5ec14 100644 --- a/src/caffe/layers/image_data_layer.cpp +++ b/src/caffe/layers/image_data_layer.cpp @@ -62,15 +62,21 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, // Read an image, and use it to initialize the top blob. cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, new_height, new_width, is_color); - // Use data_transformer to infer the expected blob shape from a cv_image. - vector top_shape = this->data_transformer_->InferBlobShape(cv_img); - this->transformed_data_.Reshape(top_shape); - // Reshape prefetch_data and top[0] according to the batch_size. + const int channels = cv_img.channels(); + const int height = cv_img.rows; + const int width = cv_img.cols; + // image + const int crop_size = this->layer_param_.transform_param().crop_size(); const int batch_size = this->layer_param_.image_data_param().batch_size(); - top_shape[0] = batch_size; - this->prefetch_data_.Reshape(top_shape); - top[0]->ReshapeLike(this->prefetch_data_); - + if (crop_size > 0) { + top[0]->Reshape(batch_size, channels, crop_size, crop_size); + this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size); + this->transformed_data_.Reshape(1, channels, crop_size, crop_size); + } else { + top[0]->Reshape(batch_size, channels, height, width); + this->prefetch_data_.Reshape(batch_size, channels, height, width); + this->transformed_data_.Reshape(1, channels, height, width); + } LOG(INFO) << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); @@ -101,19 +107,19 @@ void ImageDataLayer::InternalThreadEntry() { const int batch_size = image_data_param.batch_size(); const int new_height = image_data_param.new_height(); const int new_width = image_data_param.new_width(); + const int crop_size = this->layer_param_.transform_param().crop_size(); const bool is_color = image_data_param.is_color(); string root_folder = image_data_param.root_folder(); - // Reshape according to the first image of each batch - // on single input batches allows for inputs of varying dimension. - cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, - new_height, new_width, is_color); - // Use data_transformer to infer the expected blob shape from a cv_img. - vector top_shape = this->data_transformer_->InferBlobShape(cv_img); - this->transformed_data_.Reshape(top_shape); - // Reshape prefetch_data according to the batch_size. - top_shape[0] = batch_size; - this->prefetch_data_.Reshape(top_shape); + // Reshape on single input batches for inputs of varying dimension. + if (batch_size == 1 && crop_size == 0 && new_height == 0 && new_width == 0) { + cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, + 0, 0, is_color); + this->prefetch_data_.Reshape(1, cv_img.channels(), + cv_img.rows, cv_img.cols); + this->transformed_data_.Reshape(1, cv_img.channels(), + cv_img.rows, cv_img.cols); + } Dtype* prefetch_data = this->prefetch_data_.mutable_cpu_data(); Dtype* prefetch_label = this->prefetch_label_.mutable_cpu_data(); diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp index 83c3235eb71..89e0c8fbad7 100644 --- a/src/caffe/layers/inner_product_layer.cpp +++ b/src/caffe/layers/inner_product_layer.cpp @@ -101,13 +101,13 @@ void InnerProductLayer::Backward_cpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->cpu_data(); // Gradient with respect to weight caffe_cpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., - top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_cpu_diff()); + top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_cpu_diff()); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->cpu_diff(); // Gradient with respect to bias caffe_cpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, - bias_multiplier_.cpu_data(), (Dtype)1., + bias_multiplier_.cpu_data(), (Dtype)0., this->blobs_[1]->mutable_cpu_diff()); } if (propagate_down[0]) { diff --git a/src/caffe/layers/inner_product_layer.cu b/src/caffe/layers/inner_product_layer.cu index dd90cac12a8..a9e1784a205 100644 --- a/src/caffe/layers/inner_product_layer.cu +++ b/src/caffe/layers/inner_product_layer.cu @@ -33,13 +33,13 @@ void InnerProductLayer::Backward_gpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight caffe_gpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., - top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_gpu_diff()); + top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_gpu_diff()); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias caffe_gpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, - bias_multiplier_.gpu_data(), (Dtype)1., + bias_multiplier_.gpu_data(), (Dtype)0., this->blobs_[1]->mutable_gpu_diff()); } if (propagate_down[0]) { diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu index 001b3c34ac1..24aa6a30130 100644 --- a/src/caffe/layers/lrn_layer.cu +++ b/src/caffe/layers/lrn_layer.cu @@ -7,46 +7,44 @@ namespace caffe { template -__global__ void LRNFillScale(const int nthreads, const Dtype* const in, +__global__ void LRNFillScale(const int nthreads, const Dtype* in, const int num, const int channels, const int height, const int width, const int size, const Dtype alpha_over_size, - const Dtype k, Dtype* const scale) { + const Dtype k, Dtype* scale) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset - const int w = index % width; - const int h = (index / width) % height; - const int n = index / width / height; - const int offset = (n * channels * height + h) * width + w; - const int step = height * width; - const Dtype* const in_off = in + offset; - Dtype* const scale_off = scale + offset; + int w = index % width; + int h = (index / width) % height; + int n = index / width / height; + int offset = (n * channels * height + h) * width + w; + int step = height * width; + in += offset; + scale += offset; int head = 0; - const int pre_pad = (size - 1) / 2; - const int post_pad = size - pre_pad - 1; + int pre_pad = (size - 1) / 2; + int post_pad = size - pre_pad - 1; Dtype accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad && head < channels) { - accum_scale += in_off[head * step] * in_off[head * step]; + accum_scale += in[head * step] * in[head * step]; ++head; } // both add and subtract while (head < channels) { - accum_scale += in_off[head * step] * in_off[head * step]; + accum_scale += in[head * step] * in[head * step]; if (head - size >= 0) { - accum_scale -= in_off[(head - size) * step] - * in_off[(head - size) * step]; + accum_scale -= in[(head - size) * step] * in[(head - size) * step]; } - scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { - accum_scale -= in_off[(head - size) * step] - * in_off[(head - size) * step]; + accum_scale -= in[(head - size) * step] * in[(head - size) * step]; } - scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } } @@ -70,8 +68,8 @@ void LRNLayer::Forward_gpu(const vector*>& bottom, // TODO: check if it would be faster to just put it into the previous kernel. template -__global__ void LRNComputeOutput(const int nthreads, const Dtype* const in, - const Dtype* const scale, const Dtype negative_beta, Dtype* const out) { +__global__ void LRNComputeOutput(const int nthreads, const Dtype* in, + const Dtype* scale, const Dtype negative_beta, Dtype* out) { CUDA_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } @@ -120,58 +118,56 @@ void LRNLayer::Backward_gpu(const vector*>& top, } template -__global__ void LRNComputeDiff(const int nthreads, - const Dtype* const bottom_data, const Dtype* const top_data, - const Dtype* const scale, const Dtype* const top_diff, +__global__ void LRNComputeDiff(const int nthreads, const Dtype* bottom_data, + const Dtype* top_data, const Dtype* scale, const Dtype* top_diff, const int num, const int channels, const int height, const int width, const int size, const Dtype negative_beta, - const Dtype cache_ratio, Dtype* const bottom_diff) { + const Dtype cache_ratio, + Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset - const int w = index % width; - const int h = (index / width) % height; - const int n = index / width / height; - const int offset = (n * channels * height + h) * width + w; - const int step = height * width; - const Dtype* const bottom_off = bottom_data + offset; - const Dtype* const top_off = top_data + offset; - const Dtype* const scale_off = scale + offset; - const Dtype* const top_diff_off = top_diff + offset; - Dtype* const bottom_diff_off = bottom_diff + offset; + int w = index % width; + int h = (index / width) % height; + int n = index / width / height; + int offset = (n * channels * height + h) * width + w; + int step = height * width; + bottom_data += offset; + top_data += offset; + scale += offset; + top_diff += offset; + bottom_diff += offset; int head = 0; - const int pre_pad = size - (size + 1) / 2; - const int post_pad = size - pre_pad - 1; + int pre_pad = size - (size + 1) / 2; + int post_pad = size - pre_pad - 1; Dtype accum_ratio = 0; // accumulate values while (head < post_pad && head < channels) { - accum_ratio += top_diff_off[head * step] * top_off[head * step] / - scale_off[head * step]; + accum_ratio += top_diff[head * step] * top_data[head * step] / + scale[head * step]; ++head; } // both add and subtract while (head < channels) { - accum_ratio += top_diff_off[head * step] * top_off[head * step] / - scale_off[head * step]; + accum_ratio += top_diff[head * step] * top_data[head * step] / + scale[head * step]; if (head - size >= 0) { - accum_ratio -= top_diff_off[(head - size) * step] * - top_off[(head - size) * step] / scale_off[(head - size) * step]; + accum_ratio -= top_diff[(head - size) * step] * + top_data[(head - size) * step] / scale[(head - size) * step]; } - bottom_diff_off[(head - post_pad) * step] = - top_diff_off[(head - post_pad) * step] - * pow(scale_off[(head - post_pad) * step], negative_beta) - - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] + * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * + bottom_data[(head - post_pad) * step] * accum_ratio; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { - accum_ratio -= top_diff_off[(head - size) * step] * - top_off[(head - size) * step] / scale_off[(head - size) * step]; + accum_ratio -= top_diff[(head - size) * step] * + top_data[(head - size) * step] / scale[(head - size) * step]; } - bottom_diff_off[(head - post_pad) * step] = - top_diff_off[(head - post_pad) * step] - * pow(scale_off[(head - post_pad) * step], negative_beta) - - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] + * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * + bottom_data[(head - post_pad) * step] * accum_ratio; ++head; } } diff --git a/src/caffe/layers/mvn_layer.cpp b/src/caffe/layers/mvn_layer.cpp index 3e79bddcdde..b74d7b4f300 100644 --- a/src/caffe/layers/mvn_layer.cpp +++ b/src/caffe/layers/mvn_layer.cpp @@ -22,7 +22,6 @@ void MVNLayer::Reshape(const vector*>& bottom, bottom[0]->height(), bottom[0]->width()); Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data(); caffe_set(sum_multiplier_.count(), Dtype(1), multiplier_data); - eps_ = this->layer_param_.mvn_param().eps(); } template @@ -37,6 +36,7 @@ void MVNLayer::Forward_cpu(const vector*>& bottom, num = bottom[0]->num() * bottom[0]->channels(); int dim = bottom[0]->count() / num; + Dtype eps = 1e-10; if (this->layer_param_.mvn_param().normalize_variance()) { // put the squares of bottom into temp_ @@ -66,7 +66,7 @@ void MVNLayer::Forward_cpu(const vector*>& bottom, caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5), variance_.mutable_cpu_data()); - caffe_add_scalar(variance_.count(), eps_, variance_.mutable_cpu_data()); + caffe_add_scalar(variance_.count(), eps, variance_.mutable_cpu_data()); caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., @@ -102,6 +102,7 @@ void MVNLayer::Backward_cpu(const vector*>& top, num = bottom[0]->num() * bottom[0]->channels(); int dim = bottom[0]->count() / num; + Dtype eps = 1e-10; if (this->layer_param_.mvn_param().normalize_variance()) { caffe_mul(temp_.count(), top_data, top_diff, bottom_diff); @@ -124,6 +125,24 @@ void MVNLayer::Backward_cpu(const vector*>& top, // put the squares of bottom into temp_ caffe_powx(temp_.count(), bottom_data, Dtype(2), temp_.mutable_cpu_data()); + + // computes variance using var(X) = E(X^2) - (EX)^2 + caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, + sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); // EX + caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, temp_.cpu_data(), + sum_multiplier_.cpu_data(), 0., + variance_.mutable_cpu_data()); // E(X^2) + caffe_powx(mean_.count(), mean_.cpu_data(), Dtype(2), + temp_.mutable_cpu_data()); // (EX)^2 + caffe_sub(mean_.count(), variance_.cpu_data(), temp_.cpu_data(), + variance_.mutable_cpu_data()); // variance + + // normalize variance + caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5), + variance_.mutable_cpu_data()); + + caffe_add_scalar(variance_.count(), eps, variance_.mutable_cpu_data()); + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., temp_.mutable_cpu_data()); diff --git a/src/caffe/layers/mvn_layer.cu b/src/caffe/layers/mvn_layer.cu index 3888a0c7106..0667f50380f 100644 --- a/src/caffe/layers/mvn_layer.cu +++ b/src/caffe/layers/mvn_layer.cu @@ -36,6 +36,8 @@ void MVNLayer::Forward_gpu(const vector*>& bottom, caffe_gpu_sub(mean_.count(), variance_.gpu_data(), temp_.gpu_data(), variance_.mutable_gpu_data()); // variance + Dtype eps = 1e-10; + // do mean and variance normalization // subtract mean caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., @@ -48,7 +50,7 @@ void MVNLayer::Forward_gpu(const vector*>& bottom, caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5), variance_.mutable_gpu_data()); - caffe_gpu_add_scalar(variance_.count(), eps_, variance_.mutable_gpu_data()); + caffe_gpu_add_scalar(variance_.count(), eps, variance_.mutable_gpu_data()); caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.gpu_data(), sum_multiplier_.gpu_data(), 0., @@ -85,6 +87,8 @@ void MVNLayer::Backward_gpu(const vector*>& top, int dim = bottom[0]->count() / num; + Dtype eps = 1e-10; + if (this->layer_param_.mvn_param().normalize_variance()) { caffe_gpu_mul(temp_.count(), top_data, top_diff, bottom_diff); caffe_gpu_gemv(CblasNoTrans, num, dim, 1., bottom_diff, @@ -107,6 +111,23 @@ void MVNLayer::Backward_gpu(const vector*>& top, caffe_gpu_powx(temp_.count(), bottom_data, Dtype(2), temp_.mutable_gpu_data()); + // computes variance using var(X) = E(X^2) - (EX)^2 + caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, + sum_multiplier_.gpu_data(), 0., mean_.mutable_gpu_data()); // EX + caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, temp_.gpu_data(), + sum_multiplier_.gpu_data(), 0., + variance_.mutable_gpu_data()); // E(X^2) + caffe_gpu_powx(mean_.count(), mean_.gpu_data(), Dtype(2), + temp_.mutable_gpu_data()); // (EX)^2 + caffe_gpu_sub(mean_.count(), variance_.gpu_data(), temp_.gpu_data(), + variance_.mutable_gpu_data()); // variance + + // normalize variance + caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5), + variance_.mutable_gpu_data()); + + caffe_gpu_add_scalar(variance_.count(), eps, variance_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.gpu_data(), sum_multiplier_.gpu_data(), 0., temp_.mutable_gpu_data()); diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index ca4b13f7c41..d1d48501af3 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -9,32 +9,31 @@ namespace caffe { template -__global__ void MaxPoolForward(const int nthreads, - const Dtype* const bottom_data, const int num, const int channels, - const int height, const int width, const int pooled_height, - const int pooled_width, const int kernel_h, const int kernel_w, - const int stride_h, const int stride_w, const int pad_h, const int pad_w, - Dtype* const top_data, int* mask, Dtype* top_mask) { +__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, const int pad_w, Dtype* top_data, + int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { - const int pw = index % pooled_width; - const int ph = (index / pooled_width) % pooled_height; - const int c = (index / pooled_width / pooled_height) % channels; - const int n = index / pooled_width / pooled_height / channels; + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; - const int hend = min(hstart + kernel_h, height); - const int wend = min(wstart + kernel_w, width); + int hend = min(hstart + kernel_h, height); + int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; - const Dtype* const bottom_slice = - bottom_data + (n * channels + c) * height * width; + bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - if (bottom_slice[h * width + w] > maxval) { + if (bottom_data[h * width + w] > maxval) { maxidx = h * width + w; - maxval = bottom_slice[maxidx]; + maxval = bottom_data[maxidx]; } } } @@ -48,32 +47,30 @@ __global__ void MaxPoolForward(const int nthreads, } template -__global__ void AvePoolForward(const int nthreads, - const Dtype* const bottom_data, const int num, const int channels, - const int height, const int width, const int pooled_height, - const int pooled_width, const int kernel_h, const int kernel_w, - const int stride_h, const int stride_w, const int pad_h, const int pad_w, - Dtype* const top_data) { +__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - const int pw = index % pooled_width; - const int ph = (index / pooled_width) % pooled_height; - const int c = (index / pooled_width / pooled_height) % channels; - const int n = index / pooled_width / pooled_height / channels; + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); - const int pool_size = (hend - hstart) * (wend - wstart); + int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; - const Dtype* const bottom_slice = - bottom_data + (n * channels + c) * height * width; + bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - aveval += bottom_slice[h * width + w]; + aveval += bottom_data[h * width + w]; } } top_data[index] = aveval / pool_size; @@ -82,38 +79,37 @@ __global__ void AvePoolForward(const int nthreads, template __global__ void StoPoolForwardTrain(const int nthreads, - const Dtype* const bottom_data, + const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* const rand_idx, Dtype* const top_data) { + const int stride_w, Dtype* rand_idx, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - const int pw = index % pooled_width; - const int ph = (index / pooled_width) % pooled_height; - const int c = (index / pooled_width / pooled_height) % channels; - const int n = index / pooled_width / pooled_height / channels; - const int hstart = ph * stride_h; - const int hend = min(hstart + kernel_h, height); - const int wstart = pw * stride_w; - const int wend = min(wstart + kernel_w, width); + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + kernel_w, width); Dtype cumsum = 0.; - const Dtype* const bottom_slice = - bottom_data + (n * channels + c) * height * width; + bottom_data += (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - cumsum += bottom_slice[h * width + w]; + cumsum += bottom_data[h * width + w]; } } - const float thres = rand_idx[index] * cumsum; + float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - cumsum += bottom_slice[h * width + w]; + cumsum += bottom_data[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; - top_data[index] = bottom_slice[h * width + w]; + top_data[index] = bottom_data[h * width + w]; return; } } @@ -124,30 +120,29 @@ __global__ void StoPoolForwardTrain(const int nthreads, template __global__ void StoPoolForwardTest(const int nthreads, - const Dtype* const bottom_data, + const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* const top_data) { + const int stride_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - const int pw = index % pooled_width; - const int ph = (index / pooled_width) % pooled_height; - const int c = (index / pooled_width / pooled_height) % channels; - const int n = index / pooled_width / pooled_height / channels; - const int hstart = ph * stride_h; - const int hend = min(hstart + kernel_h, height); - const int wstart = pw * stride_w; - const int wend = min(wstart + kernel_w, width); + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = FLT_MIN; Dtype cumvalues = 0.; - const Dtype* const bottom_slice = - bottom_data + (n * channels + c) * height * width; + bottom_data += (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - cumsum += bottom_slice[h * width + w]; - cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; + cumsum += bottom_data[h * width + w]; + cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w]; } } top_data[index] = cumvalues / cumsum; @@ -215,43 +210,43 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, template -__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, - const int* const mask, const Dtype* const top_mask, const int num, - const int channels, const int height, const int width, - const int pooled_height, const int pooled_width, const int kernel_h, - const int kernel_w, const int stride_h, const int stride_w, const int pad_h, - const int pad_w, Dtype* const bottom_diff) { +__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, + const int* mask, const Dtype* top_mask, const int num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, const int pad_h, const int pad_w, + Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - const int w = index % width; - const int h = (index / width) % height; - const int c = (index / width / height) % channels; - const int n = index / width / height / channels; - const int phstart = - (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; - const int phend = min((h + pad_h) / stride_h + 1, pooled_height); - const int pwstart = - (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; - const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); + int w = index % width; + int h = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + int phstart = + (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; + int phend = min((h + pad_h) / stride_h + 1, pooled_height); + int pwstart = + (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; + int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; - const int offset = (n * channels + c) * pooled_height * pooled_width; - const Dtype* const top_diff_slice = top_diff + offset; + int offset = (n * channels + c) * pooled_height * pooled_width; + top_diff += offset; if (mask) { - const int* const mask_slice = mask + offset; + mask += offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { - if (mask_slice[ph * pooled_width + pw] == h * width + w) { - gradient += top_diff_slice[ph * pooled_width + pw]; + if (mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff[ph * pooled_width + pw]; } } } } else { - const Dtype* const top_mask_slice = top_mask + offset; + top_mask += offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { - if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { - gradient += top_diff_slice[ph * pooled_width + pw]; + if (top_mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff[ph * pooled_width + pw]; } } } @@ -261,26 +256,25 @@ __global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, } template -__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, +__global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, - Dtype* const bottom_diff) { + Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - const int w = index % width + pad_w; - const int h = (index / width) % height + pad_h; - const int c = (index / width / height) % channels; - const int n = index / width / height / channels; - const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; - const int phend = min(h / stride_h + 1, pooled_height); - const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; - const int pwend = min(w / stride_w + 1, pooled_width); + int w = index % width + pad_w; + int h = (index / width) % height + pad_h; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + int phend = min(h / stride_h + 1, pooled_height); + int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; - const Dtype* const top_diff_slice = - top_diff + (n * channels + c) * pooled_height * pooled_width; + top_diff += (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size @@ -289,7 +283,7 @@ __global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); - gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; + gradient += top_diff[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; @@ -299,31 +293,29 @@ __global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, template __global__ void StoPoolBackward(const int nthreads, - const Dtype* const rand_idx, const Dtype* const top_diff, + const Dtype* rand_idx, const Dtype* top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* const bottom_diff) { + const int stride_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - const int w = index % width; - const int h = (index / width) % height; - const int c = (index / width / height) % channels; - const int n = index / width / height / channels; - const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; - const int phend = min(h / stride_h + 1, pooled_height); - const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; - const int pwend = min(w / stride_w + 1, pooled_width); + int w = index % width; + int h = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + int phend = min(h / stride_h + 1, pooled_height); + int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; - const Dtype* const rand_idx_slice = - rand_idx + (n * channels + c) * pooled_height * pooled_width; - const Dtype* const top_diff_slice = - top_diff + (n * channels + c) * pooled_height * pooled_width; + rand_idx += (n * channels + c) * pooled_height * pooled_width; + top_diff += (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { - gradient += top_diff_slice[ph * pooled_width + pw] * - (index == static_cast(rand_idx_slice[ph * pooled_width + pw])); + gradient += top_diff[ph * pooled_width + pw] * + (index == static_cast(rand_idx[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; diff --git a/src/caffe/layers/prelu_layer.cpp b/src/caffe/layers/prelu_layer.cpp index 81831755512..7119a274dd3 100644 --- a/src/caffe/layers/prelu_layer.cpp +++ b/src/caffe/layers/prelu_layer.cpp @@ -45,8 +45,7 @@ void PReLULayer::LayerSetUp(const vector*>& bottom, // Propagate gradients to the parameters (as directed by backward pass). this->param_propagate_down_.resize(this->blobs_.size(), true); - multiplier_.Reshape(vector(1, bottom[0]->count(1))); - backward_buff_.Reshape(vector(1, bottom[0]->count(1))); + multiplier_.Reshape(vector(1, bottom[0]->count() / bottom[0]->num())); caffe_set(multiplier_.count(), Dtype(1), multiplier_.mutable_cpu_data()); } @@ -113,6 +112,7 @@ void PReLULayer::Backward_cpu(const vector*>& top, // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_cpu_diff(); + caffe_set(this->blobs_[0]->count(), Dtype(0), slope_diff); for (int i = 0; i < count; ++i) { int c = (i / dim) % channels / div_factor; slope_diff[c] += top_diff[i] * bottom_data[i] * (bottom_data[i] <= 0); diff --git a/src/caffe/layers/prelu_layer.cu b/src/caffe/layers/prelu_layer.cu index e1f20048f60..fd0eda5d191 100644 --- a/src/caffe/layers/prelu_layer.cu +++ b/src/caffe/layers/prelu_layer.cu @@ -75,36 +75,38 @@ void PReLULayer::Backward_gpu(const vector*>& top, bottom_data = bottom_memory_.gpu_data(); } - // Propagate to param + // Propagte to param // Since to write bottom diff will affect top diff if top and bottom blobs // are identical (in-place computaion), we first compute param backward to // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); + // slope_diff is set as 0, then accumulated over batches + caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), slope_diff); int cdim = channels * dim; Dtype dsum = 0.; for (int n = 0; n < bottom[0]->num(); ++n) { + Dtype* temp_buff = multiplier_.mutable_gpu_diff(); // compute element-wise diff // NOLINT_NEXT_LINE(whitespace/operators) - PReLUParamBackward<<<<>>( cdim, top_diff + top[0]->offset(n), - bottom_data + bottom[0]->offset(n), - backward_buff_.mutable_gpu_diff()); + bottom_data + bottom[0]->offset(n), multiplier_.mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; if (channel_shared_) { Dtype d; - caffe_gpu_dot(channels * dim, backward_buff_.gpu_diff(), + caffe_gpu_dot(channels * dim, multiplier_.gpu_diff(), multiplier_.gpu_data(), &d); dsum += d; } else { caffe_gpu_gemv(CblasNoTrans, channels, dim, 1., - backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1., + multiplier_.gpu_diff(), multiplier_.gpu_data(), 1., slope_diff); } } if (channel_shared_) { - caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff); + caffe_gpu_set(this->blobs_[0]->count(), Dtype(dsum), slope_diff); } } // Propagate to bottom diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp index cc236fe1e8e..077d949981c 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp @@ -71,7 +71,7 @@ void SigmoidCrossEntropyLossLayer::Backward_cpu( } #ifdef CPU_ONLY -STUB_GPU_BACKWARD(SigmoidCrossEntropyLossLayer, Backward); +STUB_GPU(SigmoidCrossEntropyLossLayer); #endif INSTANTIATE_CLASS(SigmoidCrossEntropyLossLayer); diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu index 547fa80c72f..08f7f492297 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu @@ -8,6 +8,26 @@ namespace caffe { +template +void SigmoidCrossEntropyLossLayer::Forward_gpu( + const vector*>& bottom, const vector*>& top) { + // The forward pass computes the sigmoid outputs. + sigmoid_bottom_vec_[0] = bottom[0]; + sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_); + // Compute the loss (negative log likelihood) + const int count = bottom[0]->count(); + const int num = bottom[0]->num(); + // Stable version of loss computation from input data + const Dtype* input_data = bottom[0]->cpu_data(); + const Dtype* target = bottom[1]->cpu_data(); + Dtype loss = 0; + for (int i = 0; i < count; ++i) { + loss -= input_data[i] * (target[i] - (input_data[i] >= 0)) - + log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); + } + top[0]->mutable_cpu_data()[0] = loss / num; +} + template void SigmoidCrossEntropyLossLayer::Backward_gpu( const vector*>& top, const vector& propagate_down, @@ -31,7 +51,7 @@ void SigmoidCrossEntropyLossLayer::Backward_gpu( } } -INSTANTIATE_LAYER_GPU_BACKWARD(SigmoidCrossEntropyLossLayer); +INSTANTIATE_LAYER_GPU_FUNCS(SigmoidCrossEntropyLossLayer); } // namespace caffe diff --git a/src/caffe/layers/slice_layer.cu b/src/caffe/layers/slice_layer.cu index 796841d3f52..e6e65677bd8 100644 --- a/src/caffe/layers/slice_layer.cu +++ b/src/caffe/layers/slice_layer.cu @@ -6,41 +6,22 @@ namespace caffe { -template -__global__ void Slice(const int nthreads, const Dtype* in_data, - const bool forward, const int num_slices, const int slice_size, - const int bottom_slice_axis, const int top_slice_axis, - const int offset_slice_axis, Dtype* out_data) { - CUDA_KERNEL_LOOP(index, nthreads) { - const int total_slice_size = slice_size * top_slice_axis; - const int slice_num = index / total_slice_size; - const int slice_index = index % total_slice_size; - const int bottom_index = slice_index + - (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; - if (forward) { - out_data[index] = in_data[bottom_index]; - } else { - out_data[bottom_index] = in_data[index]; - } - } -} - template void SliceLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { int offset_slice_axis = 0; const Dtype* bottom_data = bottom[0]->gpu_data(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); - const bool kForward = true; for (int i = 0; i < top.size(); ++i) { Dtype* top_data = top[i]->mutable_gpu_data(); const int top_slice_axis = top[i]->shape(slice_axis_); - const int top_slice_size = top_slice_axis * slice_size_; - const int nthreads = top_slice_size * num_slices_; - Slice // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, bottom_data, kForward, num_slices_, slice_size_, - bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data); + for (int n = 0; n < num_slices_; ++n) { + const int top_offset = n * top_slice_axis * slice_size_; + const int bottom_offset = + (n * bottom_slice_axis + offset_slice_axis) * slice_size_; + caffe_copy(top_slice_axis * slice_size_, + bottom_data + bottom_offset, top_data + top_offset); + } offset_slice_axis += top_slice_axis; } } @@ -52,16 +33,16 @@ void SliceLayer::Backward_gpu(const vector*>& top, int offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); - const bool kForward = false; for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); const int top_slice_axis = top[i]->shape(slice_axis_); - const int top_slice_size = top_slice_axis * slice_size_; - const int nthreads = top_slice_size * num_slices_; - Slice // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, top_diff, kForward, num_slices_, slice_size_, - bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff); + for (int n = 0; n < num_slices_; ++n) { + const int top_offset = n * top_slice_axis * slice_size_; + const int bottom_offset = + (n * bottom_slice_axis + offset_slice_axis) * slice_size_; + caffe_copy(top_slice_axis * slice_size_, + top_diff + top_offset, bottom_diff + bottom_offset); + } offset_slice_axis += top_slice_axis; } } diff --git a/src/caffe/layers/triplet_loss_layer.cpp b/src/caffe/layers/triplet_loss_layer.cpp new file mode 100644 index 00000000000..fc8b9fe036f --- /dev/null +++ b/src/caffe/layers/triplet_loss_layer.cpp @@ -0,0 +1,124 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/loss_layers.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void TripletLossLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + LossLayer::LayerSetUp(bottom, top); + CHECK_EQ(bottom[0]->channels(), bottom[1]->channels()); + CHECK_EQ(bottom[0]->channels(), bottom[2]->channels()); + CHECK_EQ(bottom[0]->height(), 1); + CHECK_EQ(bottom[0]->width(), 1); + CHECK_EQ(bottom[1]->height(), 1); + CHECK_EQ(bottom[1]->width(), 1); + CHECK_EQ(bottom[2]->height(), 1); + CHECK_EQ(bottom[2]->width(), 1); + CHECK_EQ(bottom[3]->channels(), 1); + CHECK_EQ(bottom[3]->height(), 1); + CHECK_EQ(bottom[3]->width(), 1); + diff_pos.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + diff_neg.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + diff_sq_pos.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + diff_sq_neg.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + dist_sq_.Reshape(bottom[0]->num(), 1, 1, 1); + dist_sq_pos.Reshape(bottom[0]->num(), 1, 1, 1); + dist_sq_neg.Reshape(bottom[0]->num(), 1, 1, 1); + // vector of ones used to sum along channels + summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); + for (int i = 0; i < bottom[0]->channels(); ++i) + summer_vec_.mutable_cpu_data()[i] = Dtype(1); +} + +template +void TripletLossLayer::Forward_cpu( + const vector*>& bottom, + const vector*>& top) { + int count = bottom[0]->count(); + caffe_sub( + count, + bottom[0]->cpu_data(), // a + bottom[1]->cpu_data(), // b + diff_pos.mutable_cpu_data()); // a_i-b_i + caffe_sub( + count, + bottom[0]->cpu_data(), // a + bottom[2]->cpu_data(), // c + diff_neg.mutable_cpu_data()); // a_i-c_i + const int channels = bottom[0]->channels(); + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype loss(0.0); + + //Loss component calculated from ab + for (int i = 0; i < bottom[0]->num(); ++i) { + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, + diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); + //ab is a similar pair + dist_sq_ += dist_sq_pos.cpu_data()[i]; + + + //Loss component calculated from ac + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, + diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); + //ac is a dissimilar pair + dist_sq_ -= dist_sq_neg.cpu_data()[i]; + loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); + + } + loss = loss / static_cast(bottom[0]->num()) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; +} + +template +void TripletLossLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + for (int i = 0; i < 3; ++i) { + if (propagate_down[i]) { + const Dtype sign = (i == 0) ? 1 : -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(bottom[i]->num()); + int num = bottom[i]->num(); + int channels = bottom[i]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[i]->mutable_cpu_diff(); + if ((margin-dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pairs + caffe_cpu_axpby( + channels, + alpha, + diff_pos.cpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + + // dissimilar pairs + + caffe_cpu_axpby( + channels, + -alpha, + diff_neg.cpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); + } + } + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(TripletLossLayer); +#endif + +INSTANTIATE_CLASS(TripletLossLayer); +REGISTER_LAYER_CLASS(TripletLoss); + +} // namespace caffe diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index a18ee63818e..888eec1d501 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -79,17 +79,10 @@ void Net::Init(const NetParameter& in_param) { } // Setup layer. const LayerParameter& layer_param = param.layer(layer_id); - if (layer_param.propagate_down_size() > 0) { - CHECK_EQ(layer_param.propagate_down_size(), - layer_param.bottom_size()) - << "propagate_down param must be specified " - << "either 0 or bottom_size times "; - } layers_.push_back(LayerRegistry::CreateLayer(layer_param)); layer_names_.push_back(layer_param.name()); LOG(INFO) << "Creating Layer " << layer_param.name(); bool need_backward = false; - // Figure out this layer's input and output for (int bottom_id = 0; bottom_id < layer_param.bottom_size(); ++bottom_id) { @@ -158,33 +151,15 @@ void Net::Init(const NetParameter& in_param) { // Go through the net backwards to determine which blobs contribute to the // loss. We can skip backward computation for blobs that don't contribute // to the loss. - // Also checks if all bottom blobs don't need backward computation (possible - // because the skip_propagate_down param) and so we can skip bacward - // computation for the entire layer set blobs_under_loss; - set blobs_skip_backp; for (int layer_id = layers_.size() - 1; layer_id >= 0; --layer_id) { bool layer_contributes_loss = false; - bool layer_skip_propagate_down = true; for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]]; if (layers_[layer_id]->loss(top_id) || (blobs_under_loss.find(blob_name) != blobs_under_loss.end())) { layer_contributes_loss = true; - } - if (blobs_skip_backp.find(blob_name) == blobs_skip_backp.end()) { - layer_skip_propagate_down = false; - } - if (layer_contributes_loss && !layer_skip_propagate_down) break; - } - // If this layer can skip backward computation, also all his bottom blobs - // don't need backpropagation - if (layer_need_backward_[layer_id] && layer_skip_propagate_down) { - layer_need_backward_[layer_id] = false; - for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); - ++bottom_id) { - bottom_need_backward_[layer_id][bottom_id] = false; } } if (!layer_contributes_loss) { layer_need_backward_[layer_id] = false; } @@ -203,11 +178,6 @@ void Net::Init(const NetParameter& in_param) { } else { bottom_need_backward_[layer_id][bottom_id] = false; } - if (!bottom_need_backward_[layer_id][bottom_id]) { - const string& blob_name = - blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; - blobs_skip_backp.insert(blob_name); - } } } // Handle force_backward if needed. @@ -397,9 +367,9 @@ void Net::AppendTop(const NetParameter& param, const int layer_id, // Helper for Net::Init: add a new bottom blob to the net. template -int Net::AppendBottom(const NetParameter& param, const int layer_id, - const int bottom_id, set* available_blobs, - map* blob_name_to_idx) { +int Net::AppendBottom(const NetParameter& param, + const int layer_id, const int bottom_id, + set* available_blobs, map* blob_name_to_idx) { const LayerParameter& layer_param = param.layer(layer_id); const string& blob_name = layer_param.bottom(bottom_id); if (available_blobs->find(blob_name) == available_blobs->end()) { @@ -411,12 +381,7 @@ int Net::AppendBottom(const NetParameter& param, const int layer_id, bottom_vecs_[layer_id].push_back(blobs_[blob_id].get()); bottom_id_vecs_[layer_id].push_back(blob_id); available_blobs->erase(blob_name); - bool propagate_down = true; - // Check if the backpropagation on bottom_id should be skipped - if (layer_param.propagate_down_size() > 0) - propagate_down = layer_param.propagate_down(bottom_id); - const bool need_backward = blob_need_backward_[blob_id] && - propagate_down; + const bool need_backward = blob_need_backward_[blob_id]; bottom_need_backward_[layer_id].push_back(need_backward); return blob_id; } @@ -445,7 +410,7 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, // (i.e., not given a param_name) or explicitly given a name that we // haven't already seen. param_owners_.push_back(-1); - if (param_name.size()) { + if (param_size) { param_names_index_[param_name] = net_param_id; } } else { @@ -505,6 +470,7 @@ Dtype Net::ForwardFromTo(int start, int end) { } for (int i = start; i <= end; ++i) { // LOG(ERROR) << "Forwarding " << layer_names_[i]; + layers_[i]->Reshape(bottom_vecs_[i], top_vecs_[i]); Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], top_vecs_[i]); loss += layer_loss; if (debug_info_) { ForwardDebugInfo(i); } diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 81a8c69d88e..edf7ae81d58 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -49,14 +49,6 @@ message FillerParameter { // The expected number of non-zero output weights for a given input in // Gaussian filler -- the default -1 means don't perform sparsification. optional int32 sparse = 7 [default = -1]; - // Normalize the filler variance by fan_in, fan_out, or their average. - // Applies to 'xavier' and 'msra' fillers. - enum VarianceNorm { - FAN_IN = 0; - FAN_OUT = 1; - AVERAGE = 2; - } - optional VarianceNorm variance_norm = 8 [default = FAN_IN]; } message NetParameter { @@ -96,7 +88,7 @@ message NetParameter { // NOTE // Update the next available ID when you add a new SolverParameter field. // -// SolverParameter next available ID: 37 (last added: iter_size) +// SolverParameter next available ID: 36 (last added: clip_gradients) message SolverParameter { ////////////////////////////////////////////////////////////////////////////// // Specifying the train and test networks @@ -149,8 +141,6 @@ message SolverParameter { // Display the loss averaged over the last average_loss iterations optional int32 average_loss = 33 [default = 1]; optional int32 max_iter = 7; // the maximum number of iterations - // accumulate gradients over `iter_size` x `batch_size` instances - optional int32 iter_size = 36 [default = 1]; optional string lr_policy = 8; // The learning rate decay policy. optional float gamma = 9; // The parameter to compute the learning rate. optional float power = 10; // The parameter to compute the learning rate. @@ -269,7 +259,7 @@ message ParamSpec { // NOTE // Update the next available ID when you add a new LayerParameter field. // -// LayerParameter next available layer-specific ID: 137 (last added: reduction_param) +// LayerParameter next available layer-specific ID: 132 (last added: prelu_param) message LayerParameter { optional string name = 1; // the layer name optional string type = 2; // the layer type @@ -290,10 +280,6 @@ message LayerParameter { // The blobs containing the numeric parameters of the layer. repeated BlobProto blobs = 7; - - // Specifies on which bottoms the backpropagation should be skipped. - // The size must be either 0 or equal to the number of bottoms. - repeated bool propagate_down = 11; // Rules controlling whether and when a layer is included in the network, // based on the current NetState. You may specify a non-zero number of rules @@ -326,14 +312,12 @@ message LayerParameter { optional DummyDataParameter dummy_data_param = 109; optional EltwiseParameter eltwise_param = 110; optional ExpParameter exp_param = 111; - optional FlattenParameter flatten_param = 135; optional HDF5DataParameter hdf5_data_param = 112; optional HDF5OutputParameter hdf5_output_param = 113; optional HingeLossParameter hinge_loss_param = 114; optional ImageDataParameter image_data_param = 115; optional InfogainLossParameter infogain_loss_param = 116; optional InnerProductParameter inner_product_param = 117; - optional LogParameter log_param = 134; optional LRNParameter lrn_param = 118; optional MemoryDataParameter memory_data_param = 119; optional MVNParameter mvn_param = 120; @@ -341,16 +325,14 @@ message LayerParameter { optional PowerParameter power_param = 122; optional PReLUParameter prelu_param = 131; optional PythonParameter python_param = 130; - optional ReductionParameter reduction_param = 136; optional ReLUParameter relu_param = 123; - optional ReshapeParameter reshape_param = 133; optional SigmoidParameter sigmoid_param = 124; optional SoftmaxParameter softmax_param = 125; - optional SPPParameter spp_param = 132; optional SliceParameter slice_param = 126; optional TanHParameter tanh_param = 127; optional ThresholdParameter threshold_param = 128; optional WindowDataParameter window_data_param = 129; + optional TripletLossParameter triplet_loss_param = 132; } // Message that stores parameters used to apply transformation @@ -370,10 +352,6 @@ message TransformationParameter { // or can be repeated the same number of times as channels // (would subtract them from the corresponding channel) repeated float mean_value = 5; - // Force the decoded image to have 3 color channels. - optional bool force_color = 6 [default = false]; - // Force the decoded image to have 1 color channels. - optional bool force_gray = 7 [default = false]; } // Message that stores parameters shared by loss layers @@ -385,9 +363,7 @@ message LossParameter { optional bool normalize = 2 [default = true]; } -// Messages that store parameters used by individual layer types follow, in -// alphabetical order. - +// Message that stores parameters used by AccuracyLayer message AccuracyParameter { // When computing accuracy, count as correct by comparing the true label to // the top k scoring classes. By default, only compare to the top scoring @@ -405,12 +381,14 @@ message AccuracyParameter { optional int32 ignore_label = 3; } +// Message that stores parameters used by ArgMaxLayer message ArgMaxParameter { // If true produce pairs (argmax, maxval) optional bool out_max_val = 1 [default = false]; optional uint32 top_k = 2 [default = 1]; } +// Message that stores parameters used by ConcatLayer message ConcatParameter { // The axis along which to concatenate -- may be negative to index from the // end (e.g., -1 for the last axis). Other axes must have the @@ -422,18 +400,16 @@ message ConcatParameter { optional uint32 concat_dim = 1 [default = 1]; } +// Message that stores parameters used by ContrastiveLossLayer message ContrastiveLossParameter { - // margin for dissimilar pair + //margin for dissimilar pair optional float margin = 1 [default = 1.0]; - // The first implementation of this cost did not exactly match the cost of - // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2. - // legacy_version = false (the default) uses (margin - d)^2 as proposed in the - // Hadsell paper. New models should probably use this version. - // legacy_version = true uses (margin - d^2). This is kept to support / - // reproduce existing models and results - optional bool legacy_version = 2 [default = false]; } - +message TripletLossParameter { + //margin for negative triplet + optional float margin = 1 [default = 1.0]; +} +// Message that stores parameters used by ConvolutionLayer message ConvolutionParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms @@ -459,6 +435,7 @@ message ConvolutionParameter { optional Engine engine = 15 [default = DEFAULT]; } +// Message that stores parameters used by DataLayer message DataParameter { enum DB { LEVELDB = 0; @@ -489,10 +466,12 @@ message DataParameter { optional bool force_encoded_color = 9 [default = false]; } +// Message that stores parameters used by DropoutLayer message DropoutParameter { optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio } +// Message that stores parameters used by DummyDataLayer. // DummyDataLayer fills any number of arbitrarily shaped blobs with random // (or constant) data generated by "Fillers" (see "message FillerParameter"). message DummyDataParameter { @@ -512,6 +491,7 @@ message DummyDataParameter { repeated uint32 width = 5; } +// Message that stores parameters used by EltwiseLayer message EltwiseParameter { enum EltwiseOp { PROD = 0; @@ -526,6 +506,7 @@ message EltwiseParameter { optional bool stable_prod_grad = 3 [default = true]; } +// Message that stores parameters used by ExpLayer message ExpParameter { // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. // Or if base is set to the default (-1), base is set to e, @@ -535,18 +516,6 @@ message ExpParameter { optional float shift = 3 [default = 0.0]; } -/// Message that stores parameters used by FlattenLayer -message FlattenParameter { - // The first axis to flatten: all preceding axes are retained in the output. - // May be negative to index from the end (e.g., -1 for the last axis). - optional int32 axis = 1 [default = 1]; - - // The last axis to flatten: all following axes are retained in the output. - // May be negative to index from the end (e.g., the default -1 for the last - // axis). - optional int32 end_axis = 2 [default = -1]; -} - // Message that stores parameters used by HDF5DataLayer message HDF5DataParameter { // Specify the data source. @@ -562,6 +531,7 @@ message HDF5DataParameter { optional bool shuffle = 3 [default = false]; } +// Message that stores parameters used by HDF5OutputLayer message HDF5OutputParameter { optional string file_name = 1; } @@ -575,6 +545,7 @@ message HingeLossParameter { optional Norm norm = 1 [default = L1]; } +// Message that stores parameters used by ImageDataLayer message ImageDataParameter { // Specify the data source. optional string source = 1; @@ -606,11 +577,13 @@ message ImageDataParameter { optional string root_folder = 12 [default = ""]; } +// Message that stores parameters InfogainLossLayer message InfogainLossParameter { // Specify the infogain matrix source. optional string source = 1; } +// Message that stores parameters used by InnerProductLayer message InnerProductParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms @@ -623,16 +596,6 @@ message InnerProductParameter { optional int32 axis = 5 [default = 1]; } -// Message that stores parameters used by LogLayer -message LogParameter { - // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. - // Or if base is set to the default (-1), base is set to e, - // so y = ln(shift + scale * x) = log_e(shift + scale * x) - optional float base = 1 [default = -1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - // Message that stores parameters used by LRNLayer message LRNParameter { optional uint32 local_size = 1 [default = 5]; @@ -646,6 +609,7 @@ message LRNParameter { optional float k = 5 [default = 1.]; } +// Message that stores parameters used by MemoryDataLayer message MemoryDataParameter { optional uint32 batch_size = 1; optional uint32 channels = 2; @@ -653,17 +617,16 @@ message MemoryDataParameter { optional uint32 width = 4; } +// Message that stores parameters used by MVNLayer message MVNParameter { // This parameter can be set to false to normalize mean only optional bool normalize_variance = 1 [default = true]; // This parameter can be set to true to perform DNN-like MVN optional bool across_channels = 2 [default = false]; - - // Epsilon for not dividing by zero while normalizing variance - optional float eps = 3 [default = 1e-9]; } +// Message that stores parameters used by PoolingLayer message PoolingParameter { enum PoolMethod { MAX = 0; @@ -693,6 +656,7 @@ message PoolingParameter { optional bool global_pooling = 12 [default = false]; } +// Message that stores parameters used by PowerLayer message PowerParameter { // PowerLayer computes outputs y = (shift + scale * x) ^ power. optional float power = 1 [default = 1.0]; @@ -700,40 +664,12 @@ message PowerParameter { optional float shift = 3 [default = 0.0]; } +// Message that stores parameters used by PythonLayer message PythonParameter { optional string module = 1; optional string layer = 2; } -// Message that stores parameters used by ReductionLayer -message ReductionParameter { - enum ReductionOp { - SUM = 1; - ASUM = 2; - SUMSQ = 3; - MEAN = 4; - } - - optional ReductionOp operation = 1 [default = SUM]; // reduction operation - - // The first axis to reduce to a scalar -- may be negative to index from the - // end (e.g., -1 for the last axis). - // (Currently, only reduction along ALL "tail" axes is supported; reduction - // of axis M through N, where N < num_axes - 1, is unsupported.) - // Suppose we have an n-axis bottom Blob with shape: - // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). - // If axis == m, the output Blob will have shape - // (d0, d1, d2, ..., d(m-1)), - // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) - // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. - // If axis == 0 (the default), the output Blob always has the empty shape - // (count 1), performing reduction across the entire input -- - // often useful for creating new loss functions. - optional int32 axis = 2 [default = 0]; - - optional float coeff = 3 [default = 1.0]; // coefficient for output -} - // Message that stores parameters used by ReLULayer message ReLUParameter { // Allow non-zero slope for negative inputs to speed up optimization @@ -750,70 +686,7 @@ message ReLUParameter { optional Engine engine = 2 [default = DEFAULT]; } -message ReshapeParameter { - // Specify the output dimensions. If some of the dimensions are set to 0, - // the corresponding dimension from the bottom layer is used (unchanged). - // Exactly one dimension may be set to -1, in which case its value is - // inferred from the count of the bottom blob and the remaining dimensions. - // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8: - // - // layer { - // type: "Reshape" bottom: "input" top: "output" - // reshape_param { ... } - // } - // - // If "input" is 2D with shape 2 x 8, then the following reshape_param - // specifications are all equivalent, producing a 3D blob "output" with shape - // 2 x 2 x 4: - // - // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 0 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 0 dim: 2 dim: -1 } } - // reshape_param { shape { dim: -1 dim: 0 dim: 2 } } - // - optional BlobShape shape = 1; - - // axis and num_axes control the portion of the bottom blob's shape that are - // replaced by (included in) the reshape. By default (axis == 0 and - // num_axes == -1), the entire bottom blob shape is included in the reshape, - // and hence the shape field must specify the entire output shape. - // - // axis may be non-zero to retain some portion of the beginning of the input - // shape (and may be negative to index from the end; e.g., -1 to begin the - // reshape after the last axis, including nothing in the reshape, - // -2 to include only the last axis, etc.). - // - // For example, suppose "input" is a 2D blob with shape 2 x 8. - // Then the following ReshapeLayer specifications are all equivalent, - // producing a blob "output" with shape 2 x 2 x 4: - // - // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 2 dim: 4 } axis: 1 } - // reshape_param { shape { dim: 2 dim: 4 } axis: -3 } - // - // num_axes specifies the extent of the reshape. - // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on - // input axes in the range [axis, axis+num_axes]. - // num_axes may also be -1, the default, to include all remaining axes - // (starting from axis). - // - // For example, suppose "input" is a 2D blob with shape 2 x 8. - // Then the following ReshapeLayer specifications are equivalent, - // producing a blob "output" with shape 1 x 2 x 8. - // - // reshape_param { shape { dim: 1 dim: 2 dim: 8 } } - // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 } - // reshape_param { shape { dim: 1 } num_axes: 0 } - // - // On the other hand, these would produce output blob shape 2 x 1 x 8: - // - // reshape_param { shape { dim: 2 dim: 1 dim: 8 } } - // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 } - // - optional int32 axis = 2 [default = 0]; - optional int32 num_axes = 3 [default = -1]; -} - +// Message that stores parameters used by SigmoidLayer message SigmoidParameter { enum Engine { DEFAULT = 0; @@ -823,6 +696,7 @@ message SigmoidParameter { optional Engine engine = 1 [default = DEFAULT]; } +// Message that stores parameters used by SliceLayer message SliceParameter { // The axis along which to slice -- may be negative to index from the end // (e.g., -1 for the last axis). @@ -849,6 +723,7 @@ message SoftmaxParameter { optional int32 axis = 2 [default = 1]; } +// Message that stores parameters used by TanHLayer message TanHParameter { enum Engine { DEFAULT = 0; @@ -858,10 +733,12 @@ message TanHParameter { optional Engine engine = 1 [default = DEFAULT]; } +// Message that stores parameters used by ThresholdLayer message ThresholdParameter { optional float threshold = 1 [default = 0]; // Strictly positive values } +// Message that stores parameters used by WindowDataLayer message WindowDataParameter { // Specify the data source. optional string source = 1; @@ -895,22 +772,6 @@ message WindowDataParameter { optional string root_folder = 13 [default = ""]; } -message SPPParameter { - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional uint32 pyramid_height = 1; - optional PoolMethod pool = 2 [default = MAX]; // The pooling method - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 6 [default = DEFAULT]; -} - // DEPRECATED: use LayerParameter. message V1LayerParameter { repeated string bottom = 2; @@ -957,6 +818,7 @@ message V1LayerParameter { SPLIT = 22; SLICE = 33; TANH = 23; + TRIPLET_LOSS = 40; WINDOW_DATA = 24; THRESHOLD = 31; } @@ -1002,6 +864,7 @@ message V1LayerParameter { optional TransformationParameter transform_param = 36; optional LossParameter loss_param = 42; optional V0LayerParameter layer = 1; + optional TripletLossParameter triplet_loss_param = 43; } // DEPRECATED: V0LayerParameter is the old way of specifying layer parameters @@ -1098,6 +961,7 @@ message V0LayerParameter { optional HDF5OutputParameter hdf5_output_param = 1001; } +// Message that stores parameters used by PReLULayer message PReLUParameter { // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: // Surpassing Human-Level Performance on ImageNet Classification, 2015. diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index aabe0edec80..096980dd7af 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -167,26 +167,7 @@ void Solver::Step(int iters) { vector losses; Dtype smoothed_loss = 0; - while (iter_ < stop_iter) { - // zero-init the params - for (int i = 0; i < net_->params().size(); ++i) { - shared_ptr > blob = net_->params()[i]; - switch (Caffe::mode()) { - case Caffe::CPU: - caffe_set(blob->count(), static_cast(0), - blob->mutable_cpu_diff()); - break; - case Caffe::GPU: -#ifndef CPU_ONLY - caffe_gpu_set(blob->count(), static_cast(0), - blob->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - } - } - + for (; iter_ < stop_iter; ++iter_) { if (param_.test_interval() && iter_ % param_.test_interval() == 0 && (iter_ > 0 || param_.test_initialization())) { TestAll(); @@ -194,13 +175,7 @@ void Solver::Step(int iters) { const bool display = param_.display() && iter_ % param_.display() == 0; net_->set_debug_info(display && param_.debug_info()); - // accumulate the loss and gradient - Dtype loss = 0; - for (int i = 0; i < param_.iter_size(); ++i) { - loss += net_->ForwardBackward(bottom_vec); - } - loss /= param_.iter_size(); - // average the loss across iterations for smoothed reporting + Dtype loss = net_->ForwardBackward(bottom_vec); if (losses.size() < average_loss) { losses.push_back(loss); int size = losses.size(); @@ -232,14 +207,11 @@ void Solver::Step(int iters) { } } } - ApplyUpdate(); - - // Increment the internal iter_ counter -- its value should always indicate - // the number of times the weights have been updated. - ++iter_; + ComputeUpdateValue(); + net_->Update(); // Save a snapshot if needed. - if (param_.snapshot() && iter_ % param_.snapshot() == 0) { + if (param_.snapshot() && (iter_ + 1) % param_.snapshot() == 0) { Snapshot(); } } @@ -355,14 +327,15 @@ void Solver::Snapshot() { string model_filename, snapshot_filename; const int kBufferSize = 20; char iter_str_buffer[kBufferSize]; - snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); + // Add one to iter_ to get the number of iterations that have completed. + snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_ + 1); filename += iter_str_buffer; model_filename = filename + ".caffemodel"; LOG(INFO) << "Snapshotting to " << model_filename; WriteProtoToBinaryFile(net_param, model_filename.c_str()); SolverState state; SnapshotSolverState(&state); - state.set_iter(iter_); + state.set_iter(iter_ + 1); state.set_learned_net(model_filename); state.set_current_step(current_step_); snapshot_filename = filename + ".solverstate"; @@ -480,138 +453,95 @@ void SGDSolver::ClipGradients() { } template -void SGDSolver::ApplyUpdate() { +void SGDSolver::ComputeUpdateValue() { + const vector > >& net_params = this->net_->params(); + const vector& net_params_lr = this->net_->params_lr(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); + // get the learning rate Dtype rate = GetLearningRate(); if (this->param_.display() && this->iter_ % this->param_.display() == 0) { LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; } ClipGradients(); - for (int param_id = 0; param_id < this->net_->params().size(); ++param_id) { - Normalize(param_id); - Regularize(param_id); - ComputeUpdateValue(param_id, rate); - } - this->net_->Update(); -} - -template -void SGDSolver::Normalize(int param_id) { - if (this->param_.iter_size() == 1) { return; } - // Scale gradient to counterbalance accumulation. - const vector > >& net_params = this->net_->params(); - const Dtype accum_normalization = Dtype(1.) / this->param_.iter_size(); - switch (Caffe::mode()) { - case Caffe::CPU: { - caffe_scal(net_params[param_id]->count(), accum_normalization, - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - caffe_gpu_scal(net_params[param_id]->count(), accum_normalization, - net_params[param_id]->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void SGDSolver::Regularize(int param_id) { - const vector > >& net_params = this->net_->params(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); + Dtype momentum = this->param_.momentum(); Dtype weight_decay = this->param_.weight_decay(); string regularization_type = this->param_.regularization_type(); - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; switch (Caffe::mode()) { - case Caffe::CPU: { - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // Compute the value to history, and then copy them to the blob's diff. + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } } + + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + history_[param_id]->mutable_cpu_data()); + // copy + caffe_copy(net_params[param_id]->count(), + history_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); } break; - } - case Caffe::GPU: { + case Caffe::GPU: #ifndef CPU_ONLY - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // Compute the value to history, and then copy them to the blob's diff. + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } } - } -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} -template -void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector > >& net_params = this->net_->params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype momentum = this->param_.momentum(); - Dtype local_rate = rate * net_params_lr[param_id]; - // Compute the update to history, then copy it to the parameter diff. - switch (Caffe::mode()) { - case Caffe::CPU: { - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - history_[param_id]->mutable_cpu_data()); - caffe_copy(net_params[param_id]->count(), - history_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - history_[param_id]->mutable_gpu_data()); - caffe_copy(net_params[param_id]->count(), - history_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + history_[param_id]->mutable_gpu_data()); + // copy + caffe_copy(net_params[param_id]->count(), + history_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } #else NO_GPU; #endif break; - } default: LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); } @@ -638,138 +568,252 @@ void SGDSolver::RestoreSolverState(const SolverState& state) { } template -void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { +void NesterovSolver::ComputeUpdateValue() { const vector > >& net_params = this->net_->params(); const vector& net_params_lr = this->net_->params_lr(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); + // get the learning rate + Dtype rate = this->GetLearningRate(); + if (this->param_.display() && this->iter_ % this->param_.display() == 0) { + LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; + } + SGDSolver::ClipGradients(); Dtype momentum = this->param_.momentum(); - Dtype local_rate = rate * net_params_lr[param_id]; + Dtype weight_decay = this->param_.weight_decay(); + string regularization_type = this->param_.regularization_type(); switch (Caffe::mode()) { - case Caffe::CPU: { - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // update history - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - this->history_[param_id]->mutable_cpu_data()); - - // compute update: step back then over step - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->cpu_data(), -momentum, - this->update_[param_id]->mutable_cpu_data()); - - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + this->temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // update history + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + this->history_[param_id]->mutable_cpu_data()); + + // compute udpate: step back then over step + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->cpu_data(), -momentum, + this->update_[param_id]->mutable_cpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } break; - } - case Caffe::GPU: { + case Caffe::GPU: #ifndef CPU_ONLY - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // update history - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - this->history_[param_id]->mutable_gpu_data()); - - // compute update: step back then over step - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->gpu_data(), -momentum, - this->update_[param_id]->mutable_gpu_data()); - - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + this->temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // update history + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + this->history_[param_id]->mutable_gpu_data()); + + // compute udpate: step back then over step + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->gpu_data(), -momentum, + this->update_[param_id]->mutable_gpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } #else NO_GPU; #endif break; - } default: LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); } } template -void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { +void AdaGradSolver::ComputeUpdateValue() { const vector > >& net_params = this->net_->params(); const vector& net_params_lr = this->net_->params_lr(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); + // get the learning rate + Dtype rate = this->GetLearningRate(); Dtype delta = this->param_.delta(); - Dtype local_rate = rate * net_params_lr[param_id]; + if (this->param_.display() && this->iter_ % this->param_.display() == 0) { + LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; + } + SGDSolver::ClipGradients(); + Dtype weight_decay = this->param_.weight_decay(); + string regularization_type = this->param_.regularization_type(); switch (Caffe::mode()) { - case Caffe::CPU: { - // compute square of gradient in update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history - caffe_add(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - this->history_[param_id]->cpu_data(), - this->history_[param_id]->mutable_cpu_data()); - - // prepare update - caffe_powx(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_cpu_data()); - - caffe_div(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), - this->update_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // scale and copy - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->cpu_data(), Dtype(0), - net_params[param_id]->mutable_cpu_diff()); + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + this->temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_add(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + this->history_[param_id]->cpu_data(), + this->history_[param_id]->mutable_cpu_data()); + + // prepare update + caffe_powx(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_cpu_data()); + + caffe_div(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), + this->update_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // scale and copy + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->cpu_data(), Dtype(0), + net_params[param_id]->mutable_cpu_diff()); + } break; - } - case Caffe::GPU: { + case Caffe::GPU: #ifndef CPU_ONLY - // compute square of gradient in update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history - caffe_gpu_add(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - this->history_[param_id]->gpu_data(), - this->history_[param_id]->mutable_gpu_data()); - - // prepare update - caffe_gpu_powx(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_div(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), - this->update_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // scale and copy - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->gpu_data(), Dtype(0), - net_params[param_id]->mutable_gpu_diff()); + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + this->temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_add(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + this->history_[param_id]->gpu_data(), + this->history_[param_id]->mutable_gpu_data()); + + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // scale and copy + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), + net_params[param_id]->mutable_gpu_diff()); + } #else NO_GPU; #endif break; - } default: LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); } diff --git a/src/caffe/test/test_accuracy_layer.cpp b/src/caffe/test/test_accuracy_layer.cpp index c14b67cc0e9..6cbf51df45e 100644 --- a/src/caffe/test/test_accuracy_layer.cpp +++ b/src/caffe/test/test_accuracy_layer.cpp @@ -16,7 +16,7 @@ namespace caffe { template -class AccuracyLayerTest : public CPUDeviceTest { +class AccuracyLayerTest : public ::testing::Test { protected: AccuracyLayerTest() : blob_bottom_data_(new Blob()), @@ -92,6 +92,7 @@ TYPED_TEST(AccuracyLayerTest, TestSetupTopK) { TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); AccuracyLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); @@ -117,6 +118,7 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { } TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { + Caffe::set_mode(Caffe::CPU); this->blob_bottom_data_->Reshape(2, 10, 4, 5); vector label_shape(3); label_shape[0] = 2; label_shape[1] = 4; label_shape[2] = 5; @@ -160,6 +162,7 @@ TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { } TYPED_TEST(AccuracyLayerTest, TestForwardIgnoreLabel) { + Caffe::set_mode(Caffe::CPU); LayerParameter layer_param; const TypeParam kIgnoreLabelValue = -1; layer_param.mutable_accuracy_param()->set_ignore_label(kIgnoreLabelValue); diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp index 895c3d372ff..3487d42f21e 100644 --- a/src/caffe/test/test_argmax_layer.cpp +++ b/src/caffe/test/test_argmax_layer.cpp @@ -13,12 +13,13 @@ namespace caffe { template -class ArgMaxLayerTest : public CPUDeviceTest { +class ArgMaxLayerTest : public ::testing::Test { protected: ArgMaxLayerTest() : blob_bottom_(new Blob(10, 20, 1, 1)), blob_top_(new Blob()), top_k_(5) { + Caffe::set_mode(Caffe::CPU); Caffe::set_random_seed(1701); // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_contrastive_loss_layer.cpp b/src/caffe/test/test_contrastive_loss_layer.cpp index 1e9447cbc51..d269fbc26f2 100644 --- a/src/caffe/test/test_contrastive_loss_layer.cpp +++ b/src/caffe/test/test_contrastive_loss_layer.cpp @@ -22,15 +22,15 @@ class ContrastiveLossLayerTest : public MultiDeviceTest { protected: ContrastiveLossLayerTest() - : blob_bottom_data_i_(new Blob(512, 2, 1, 1)), - blob_bottom_data_j_(new Blob(512, 2, 1, 1)), - blob_bottom_y_(new Blob(512, 1, 1, 1)), + : blob_bottom_data_i_(new Blob(128, 10, 1, 1)), + blob_bottom_data_j_(new Blob(128, 10, 1, 1)), + blob_bottom_y_(new Blob(128, 1, 1, 1)), blob_top_loss_(new Blob()) { // fill the values FillerParameter filler_param; - filler_param.set_min(-1.0); - filler_param.set_max(1.0); // distances~=1.0 to test both sides of margin - UniformFiller filler(filler_param); + filler_param.set_mean(0.0); + filler_param.set_std(0.3); // distances~=1.0 to test both sides of margin + GaussianFiller filler(filler_param); filler.Fill(this->blob_bottom_data_i_); blob_bottom_vec_.push_back(blob_bottom_data_i_); filler.Fill(this->blob_bottom_data_j_); @@ -79,8 +79,7 @@ TYPED_TEST(ContrastiveLossLayerTest, TestForward) { if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs loss += dist_sq; } else { - Dtype dist = std::max(margin - sqrt(dist_sq), 0.0); - loss += dist*dist; + loss += std::max(margin-dist_sq, Dtype(0)); } } loss /= static_cast(num) * Dtype(2); @@ -100,47 +99,4 @@ TYPED_TEST(ContrastiveLossLayerTest, TestGradient) { this->blob_top_vec_, 1); } -TYPED_TEST(ContrastiveLossLayerTest, TestForwardLegacy) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_contrastive_loss_param()->set_legacy_version(true); - ContrastiveLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // manually compute to compare - const Dtype margin = layer_param.contrastive_loss_param().margin(); - const int num = this->blob_bottom_data_i_->num(); - const int channels = this->blob_bottom_data_i_->channels(); - Dtype loss(0); - for (int i = 0; i < num; ++i) { - Dtype dist_sq(0); - for (int j = 0; j < channels; ++j) { - Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; - dist_sq += diff*diff; - } - if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs - loss += dist_sq; - } else { - loss += std::max(margin - dist_sq, Dtype(0.0)); - } - } - loss /= static_cast(num) * Dtype(2); - EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); -} - -TYPED_TEST(ContrastiveLossLayerTest, TestGradientLegacy) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_contrastive_loss_param()->set_legacy_version(true); - ContrastiveLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - GradientChecker checker(1e-2, 1e-2, 1701); - // check the gradient for the first two bottom layers - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 0); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 1); -} - } // namespace caffe diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index 67d41fff844..c1fe3b58c58 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -424,7 +424,7 @@ TYPED_TEST(ConvolutionLayerTest, TestGradientGroup) { #ifdef USE_CUDNN template -class CuDNNConvolutionLayerTest : public GPUDeviceTest { +class CuDNNConvolutionLayerTest : public ::testing::Test { protected: CuDNNConvolutionLayerTest() : blob_bottom_(new Blob(2, 3, 6, 4)), @@ -467,6 +467,7 @@ class CuDNNConvolutionLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNConvolutionLayerTest, TestDtypes); TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { + Caffe::set_mode(Caffe::GPU); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); LayerParameter layer_param; @@ -504,6 +505,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { + Caffe::set_mode(Caffe::GPU); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); LayerParameter layer_param; @@ -539,6 +541,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionGroupCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); @@ -569,7 +572,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { // Test separable convolution by computing the Sobel operator // as a single filter then comparing the result // as the convolution of two rectangular filters. - + Caffe::set_mode(Caffe::GPU); // Fill bottoms with identical Gaussian noise. shared_ptr > filler; FillerParameter filler_param; @@ -662,6 +665,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); @@ -679,6 +683,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientGroupCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); diff --git a/src/caffe/test/test_data/generate_sample_data.py b/src/caffe/test/test_data/generate_sample_data.py index ab5572685cb..e5dbc3406d8 100644 --- a/src/caffe/test/test_data/generate_sample_data.py +++ b/src/caffe/test/test_data/generate_sample_data.py @@ -5,8 +5,6 @@ import numpy as np import h5py -script_dir = os.path.dirname(os.path.abspath(__file__)) - num_cols = 8 num_rows = 10 height = 6 @@ -29,12 +27,12 @@ print data print label -with h5py.File(script_dir + '/sample_data.h5', 'w') as f: +with h5py.File(os.path.dirname(__file__) + '/sample_data.h5', 'w') as f: f['data'] = data f['label'] = label f['label2'] = label2 -with h5py.File(script_dir + '/sample_data_2_gzip.h5', 'w') as f: +with h5py.File(os.path.dirname(__file__) + '/sample_data_2_gzip.h5', 'w') as f: f.create_dataset( 'data', data=data + total_size, compression='gzip', compression_opts=1 @@ -48,6 +46,6 @@ compression='gzip', compression_opts=1 ) -with open(script_dir + '/sample_data_list.txt', 'w') as f: - f.write(script_dir + '/sample_data.h5\n') - f.write(script_dir + '/sample_data_2_gzip.h5\n') +with open(os.path.dirname(__file__) + '/sample_data_list.txt', 'w') as f: + f.write(os.path.dirname(__file__) + '/sample_data.h5\n') + f.write(os.path.dirname(__file__) + '/sample_data_2_gzip.h5\n') diff --git a/src/caffe/test/test_dummy_data_layer.cpp b/src/caffe/test/test_dummy_data_layer.cpp index c9ed38db3a5..99548352746 100644 --- a/src/caffe/test/test_dummy_data_layer.cpp +++ b/src/caffe/test/test_dummy_data_layer.cpp @@ -13,7 +13,7 @@ namespace caffe { template -class DummyDataLayerTest : public CPUDeviceTest { +class DummyDataLayerTest : public ::testing::Test { protected: DummyDataLayerTest() : blob_top_a_(new Blob()), @@ -44,6 +44,7 @@ class DummyDataLayerTest : public CPUDeviceTest { TYPED_TEST_CASE(DummyDataLayerTest, TestDtypes); TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) { + Caffe::set_mode(Caffe::CPU); LayerParameter param; DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); dummy_data_param->add_num(5); @@ -73,6 +74,7 @@ TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) { } TYPED_TEST(DummyDataLayerTest, TestTwoTopConstant) { + Caffe::set_mode(Caffe::CPU); LayerParameter param; DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); dummy_data_param->add_num(5); @@ -111,6 +113,7 @@ TYPED_TEST(DummyDataLayerTest, TestTwoTopConstant) { } TYPED_TEST(DummyDataLayerTest, TestThreeTopConstantGaussianConstant) { + Caffe::set_mode(Caffe::CPU); LayerParameter param; DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); dummy_data_param->add_num(5); diff --git a/src/caffe/test/test_filler.cpp b/src/caffe/test/test_filler.cpp index 728b8dc5f0d..e04b0fd22af 100644 --- a/src/caffe/test/test_filler.cpp +++ b/src/caffe/test/test_filler.cpp @@ -142,102 +142,4 @@ TYPED_TEST(GaussianFillerTest, TestFill) { EXPECT_LE(var, target_var * 5.); } -template -class XavierFillerTest : public ::testing::Test { - protected: - XavierFillerTest() - : blob_(new Blob(1000, 2, 4, 5)), - filler_param_() { - } - virtual void test_params(FillerParameter_VarianceNorm variance_norm, - Dtype n) { - this->filler_param_.set_variance_norm(variance_norm); - this->filler_.reset(new XavierFiller(this->filler_param_)); - this->filler_->Fill(blob_); - EXPECT_TRUE(this->blob_); - const int count = this->blob_->count(); - const Dtype* data = this->blob_->cpu_data(); - Dtype mean = 0.; - Dtype ex2 = 0.; - for (int i = 0; i < count; ++i) { - mean += data[i]; - ex2 += data[i] * data[i]; - } - mean /= count; - ex2 /= count; - Dtype std = sqrt(ex2 - mean*mean); - Dtype target_std = sqrt(2.0 / n); - EXPECT_NEAR(mean, 0.0, 0.1); - EXPECT_NEAR(std, target_std, 0.1); - } - virtual ~XavierFillerTest() { delete blob_; } - Blob* const blob_; - FillerParameter filler_param_; - shared_ptr > filler_; -}; - -TYPED_TEST_CASE(XavierFillerTest, TestDtypes); - -TYPED_TEST(XavierFillerTest, TestFillFanIn) { - TypeParam n = 2*4*5; - this->test_params(FillerParameter_VarianceNorm_FAN_IN, n); -} -TYPED_TEST(XavierFillerTest, TestFillFanOut) { - TypeParam n = 1000*4*5; - this->test_params(FillerParameter_VarianceNorm_FAN_OUT, n); -} -TYPED_TEST(XavierFillerTest, TestFillAverage) { - TypeParam n = (2*4*5 + 1000*4*5) / 2.0; - this->test_params(FillerParameter_VarianceNorm_AVERAGE, n); -} - -template -class MSRAFillerTest : public ::testing::Test { - protected: - MSRAFillerTest() - : blob_(new Blob(1000, 2, 4, 5)), - filler_param_() { - } - virtual void test_params(FillerParameter_VarianceNorm variance_norm, - Dtype n) { - this->filler_param_.set_variance_norm(variance_norm); - this->filler_.reset(new MSRAFiller(this->filler_param_)); - this->filler_->Fill(blob_); - EXPECT_TRUE(this->blob_); - const int count = this->blob_->count(); - const Dtype* data = this->blob_->cpu_data(); - Dtype mean = 0.; - Dtype ex2 = 0.; - for (int i = 0; i < count; ++i) { - mean += data[i]; - ex2 += data[i] * data[i]; - } - mean /= count; - ex2 /= count; - Dtype std = sqrt(ex2 - mean*mean); - Dtype target_std = sqrt(2.0 / n); - EXPECT_NEAR(mean, 0.0, 0.1); - EXPECT_NEAR(std, target_std, 0.1); - } - virtual ~MSRAFillerTest() { delete blob_; } - Blob* const blob_; - FillerParameter filler_param_; - shared_ptr > filler_; -}; - -TYPED_TEST_CASE(MSRAFillerTest, TestDtypes); - -TYPED_TEST(MSRAFillerTest, TestFillFanIn) { - TypeParam n = 2*4*5; - this->test_params(FillerParameter_VarianceNorm_FAN_IN, n); -} -TYPED_TEST(MSRAFillerTest, TestFillFanOut) { - TypeParam n = 1000*4*5; - this->test_params(FillerParameter_VarianceNorm_FAN_OUT, n); -} -TYPED_TEST(MSRAFillerTest, TestFillAverage) { - TypeParam n = (2*4*5 + 1000*4*5) / 2.0; - this->test_params(FillerParameter_VarianceNorm_AVERAGE, n); -} - } // namespace caffe diff --git a/src/caffe/test/test_flatten_layer.cpp b/src/caffe/test/test_flatten_layer.cpp index 7b6757cba32..3042d293cf7 100644 --- a/src/caffe/test/test_flatten_layer.cpp +++ b/src/caffe/test/test_flatten_layer.cpp @@ -42,48 +42,13 @@ TYPED_TEST(FlattenLayerTest, TestSetup) { LayerParameter layer_param; FlattenLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - ASSERT_EQ(this->blob_top_->num_axes(), 2); - EXPECT_EQ(this->blob_top_->shape(0), 2); - EXPECT_EQ(this->blob_top_->shape(1), 3 * 6 * 5); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3 * 6 * 5); + EXPECT_EQ(this->blob_top_->height(), 1); + EXPECT_EQ(this->blob_top_->width(), 1); } -TYPED_TEST(FlattenLayerTest, TestSetupWithAxis) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_flatten_param()->set_axis(2); - FlattenLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - ASSERT_EQ(this->blob_top_->num_axes(), 3); - EXPECT_EQ(this->blob_top_->shape(0), 2); - EXPECT_EQ(this->blob_top_->shape(1), 3); - EXPECT_EQ(this->blob_top_->shape(2), 6 * 5); -} - -TYPED_TEST(FlattenLayerTest, TestSetupWithEndAxis) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_flatten_param()->set_end_axis(-2); - FlattenLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - ASSERT_EQ(this->blob_top_->num_axes(), 3); - EXPECT_EQ(this->blob_top_->shape(0), 2); - EXPECT_EQ(this->blob_top_->shape(1), 3 * 6); - EXPECT_EQ(this->blob_top_->shape(2), 5); -} - -TYPED_TEST(FlattenLayerTest, TestSetupWithStartAndEndAxis) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_flatten_param()->set_axis(0); - layer_param.mutable_flatten_param()->set_end_axis(-2); - FlattenLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - ASSERT_EQ(this->blob_top_->num_axes(), 2); - EXPECT_EQ(this->blob_top_->shape(0), 2 * 3 * 6); - EXPECT_EQ(this->blob_top_->shape(1), 5); -} - -TYPED_TEST(FlattenLayerTest, TestForward) { +TYPED_TEST(FlattenLayerTest, Test) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; FlattenLayer layer(layer_param); @@ -106,4 +71,5 @@ TYPED_TEST(FlattenLayerTest, TestGradient) { this->blob_top_vec_); } + } // namespace caffe diff --git a/src/caffe/test/test_gradient_based_solver.cpp b/src/caffe/test/test_gradient_based_solver.cpp index c9135d64e70..eb2569c04f2 100644 --- a/src/caffe/test/test_gradient_based_solver.cpp +++ b/src/caffe/test/test_gradient_based_solver.cpp @@ -23,7 +23,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { protected: GradientBasedSolverTest() : - seed_(1701), num_(4), channels_(3), height_(10), width_(10) {} + seed_(1701), num_(5), channels_(3), height_(10), width_(10) {} shared_ptr > solver_; int seed_; @@ -56,21 +56,19 @@ class GradientBasedSolverTest : public MultiDeviceTest { } void RunLeastSquaresSolver(const Dtype learning_rate, - const Dtype weight_decay, const Dtype momentum, const int num_iters, - const int iter_size = 1) { + const Dtype weight_decay, const Dtype momentum, const int num_iters) { ostringstream proto; proto << "max_iter: " << num_iters << " " "base_lr: " << learning_rate << " " "lr_policy: 'fixed' " - "iter_size: " << iter_size << " " "net_param { " " name: 'TestNetwork' " " layer { " " name: 'data' " " type: 'DummyData' " " dummy_data_param { " - " num: " << num_ / iter_size << " " + " num: " << num_ << " " " channels: " << channels_ << " " " height: " << height_ << " " " width: " << width_ << " " @@ -78,10 +76,6 @@ class GradientBasedSolverTest : public MultiDeviceTest { " height: 1 " " width: 1 " " data_filler { " - " type: 'constant' " - " value: 1.0 " - " } " - " data_filler { " " type: 'gaussian' " " std: 1.0 " " } " @@ -276,45 +270,6 @@ class GradientBasedSolverTest : public MultiDeviceTest { } } - void CheckAccumulation(const Dtype kLearningRate, const Dtype kWeightDecay, - const Dtype kMomentum, const int kNumIters, const int kIterSize) { - const double kPrecision = 1e-2; - const double kMinPrecision = 1e-7; - // Solve without accumulation and save parameters. - this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, - kNumIters); - // Save parameters for comparison. - Net& net = *this->solver_->net(); - const vector > >& param_blobs = - net.layer_by_name("innerprod")->blobs(); - vector > > noaccum_params(param_blobs.size()); - for (int i = 0; i < param_blobs.size(); ++i) { - noaccum_params[i].reset(new Blob()); - noaccum_params[i]->CopyFrom(*param_blobs[i], false, true); - } - // Solve by equivalent accumulation of gradients over divided batches. - this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, - kNumIters, kIterSize); - Net& net_accum = *this->solver_->net(); - const vector > >& accum_params = - net_accum.layer_by_name("innerprod")->blobs(); - // Compare accumulated parameters against no accumulation standard. - const int D = this->channels_ * this->height_ * this->width_; - for (int i = 0; i < D; ++i) { - const Dtype expected_param = noaccum_params[0]->cpu_data()[i]; - const Dtype accum_param = accum_params[0]->cpu_data()[i]; - const Dtype error_margin = std::max(kMinPrecision, kPrecision * - std::min(fabs(expected_param), fabs(accum_param))); - EXPECT_NEAR(expected_param, accum_param, error_margin); - } - ASSERT_EQ(1, accum_params[1]->count()); - const Dtype expected_bias = noaccum_params[1]->cpu_data()[0]; - const Dtype accum_bias = accum_params[1]->cpu_data()[0]; - const Dtype error_margin = std::max(kMinPrecision, kPrecision * - std::min(fabs(expected_bias), fabs(accum_bias))); - EXPECT_NEAR(expected_bias, accum_bias, error_margin); - } - // Test that the correct update is computed for a regularized least squares // problem: // @@ -417,16 +372,6 @@ TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverything) { } } -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} template class AdaGradSolverTest : public GradientBasedSolverTest { @@ -471,16 +416,6 @@ TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithEverything) { } } -TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} template class NesterovSolverTest : public GradientBasedSolverTest { @@ -547,15 +482,4 @@ TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithEverything) { } } -TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - } // namespace caffe diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index 0017ac23e69..ee684c00255 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -25,7 +25,7 @@ __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; template -class Im2colKernelTest : public GPUDeviceTest { +class Im2colKernelTest : public ::testing::Test { protected: Im2colKernelTest() // big so launches > 1024 threads @@ -68,6 +68,8 @@ class Im2colKernelTest : public GPUDeviceTest { TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); TYPED_TEST(Im2colKernelTest, TestGPU) { + Caffe::set_mode(Caffe::GPU); + // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, diff --git a/src/caffe/test/test_math_functions.cpp b/src/caffe/test/test_math_functions.cpp index a095b544e17..667f744bdd7 100644 --- a/src/caffe/test/test_math_functions.cpp +++ b/src/caffe/test/test_math_functions.cpp @@ -15,10 +15,8 @@ namespace caffe { -template -class MathFunctionsTest : public MultiDeviceTest { - typedef typename TypeParam::Dtype Dtype; - +template +class MathFunctionsTest : public ::testing::Test { protected: MathFunctionsTest() : blob_bottom_(new Blob()), @@ -66,19 +64,14 @@ class MathFunctionsTest : public MultiDeviceTest { Blob* const blob_top_; }; -template -class CPUMathFunctionsTest - : public MathFunctionsTest > { -}; - -TYPED_TEST_CASE(CPUMathFunctionsTest, TestDtypes); +TYPED_TEST_CASE(MathFunctionsTest, TestDtypes); -TYPED_TEST(CPUMathFunctionsTest, TestNothing) { +TYPED_TEST(MathFunctionsTest, TestNothing) { // The first test case of a test suite takes the longest time // due to the set up overhead. } -TYPED_TEST(CPUMathFunctionsTest, TestHammingDistance) { +TYPED_TEST(MathFunctionsTest, TestHammingDistanceCPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); const TypeParam* y = this->blob_top_->cpu_data(); @@ -86,7 +79,7 @@ TYPED_TEST(CPUMathFunctionsTest, TestHammingDistance) { caffe_cpu_hamming_distance(n, x, y)); } -TYPED_TEST(CPUMathFunctionsTest, TestAsum) { +TYPED_TEST(MathFunctionsTest, TestAsumCPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); TypeParam std_asum = 0; @@ -97,7 +90,7 @@ TYPED_TEST(CPUMathFunctionsTest, TestAsum) { EXPECT_LT((cpu_asum - std_asum) / std_asum, 1e-2); } -TYPED_TEST(CPUMathFunctionsTest, TestSign) { +TYPED_TEST(MathFunctionsTest, TestSignCPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_cpu_sign(n, x, this->blob_bottom_->mutable_cpu_diff()); @@ -107,7 +100,7 @@ TYPED_TEST(CPUMathFunctionsTest, TestSign) { } } -TYPED_TEST(CPUMathFunctionsTest, TestSgnbit) { +TYPED_TEST(MathFunctionsTest, TestSgnbitCPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_cpu_sgnbit(n, x, this->blob_bottom_->mutable_cpu_diff()); @@ -117,7 +110,7 @@ TYPED_TEST(CPUMathFunctionsTest, TestSgnbit) { } } -TYPED_TEST(CPUMathFunctionsTest, TestFabs) { +TYPED_TEST(MathFunctionsTest, TestFabsCPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_abs(n, x, this->blob_bottom_->mutable_cpu_diff()); @@ -127,7 +120,7 @@ TYPED_TEST(CPUMathFunctionsTest, TestFabs) { } } -TYPED_TEST(CPUMathFunctionsTest, TestScale) { +TYPED_TEST(MathFunctionsTest, TestScaleCPU) { int n = this->blob_bottom_->count(); TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % this->blob_bottom_->count()]; @@ -140,10 +133,11 @@ TYPED_TEST(CPUMathFunctionsTest, TestScale) { } } -TYPED_TEST(CPUMathFunctionsTest, TestCopy) { +TYPED_TEST(MathFunctionsTest, TestCopyCPU) { const int n = this->blob_bottom_->count(); const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); TypeParam* top_data = this->blob_top_->mutable_cpu_data(); + Caffe::set_mode(Caffe::CPU); caffe_copy(n, bottom_data, top_data); for (int i = 0; i < n; ++i) { EXPECT_EQ(bottom_data[i], top_data[i]); @@ -152,14 +146,8 @@ TYPED_TEST(CPUMathFunctionsTest, TestCopy) { #ifndef CPU_ONLY -template -class GPUMathFunctionsTest : public MathFunctionsTest > { -}; - -TYPED_TEST_CASE(GPUMathFunctionsTest, TestDtypes); - // TODO: Fix caffe_gpu_hamming_distance and re-enable this test. -TYPED_TEST(GPUMathFunctionsTest, DISABLED_TestHammingDistance) { +TYPED_TEST(MathFunctionsTest, DISABLED_TestHammingDistanceGPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); const TypeParam* y = this->blob_top_->cpu_data(); @@ -170,7 +158,7 @@ TYPED_TEST(GPUMathFunctionsTest, DISABLED_TestHammingDistance) { EXPECT_EQ(reference_distance, computed_distance); } -TYPED_TEST(GPUMathFunctionsTest, TestAsum) { +TYPED_TEST(MathFunctionsTest, TestAsumGPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); TypeParam std_asum = 0; @@ -182,7 +170,7 @@ TYPED_TEST(GPUMathFunctionsTest, TestAsum) { EXPECT_LT((gpu_asum - std_asum) / std_asum, 1e-2); } -TYPED_TEST(GPUMathFunctionsTest, TestSign) { +TYPED_TEST(MathFunctionsTest, TestSignGPU) { int n = this->blob_bottom_->count(); caffe_gpu_sign(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); @@ -193,7 +181,7 @@ TYPED_TEST(GPUMathFunctionsTest, TestSign) { } } -TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) { +TYPED_TEST(MathFunctionsTest, TestSgnbitGPU) { int n = this->blob_bottom_->count(); caffe_gpu_sgnbit(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); @@ -204,7 +192,7 @@ TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) { } } -TYPED_TEST(GPUMathFunctionsTest, TestFabs) { +TYPED_TEST(MathFunctionsTest, TestFabsGPU) { int n = this->blob_bottom_->count(); caffe_gpu_abs(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); @@ -215,7 +203,7 @@ TYPED_TEST(GPUMathFunctionsTest, TestFabs) { } } -TYPED_TEST(GPUMathFunctionsTest, TestScale) { +TYPED_TEST(MathFunctionsTest, TestScaleGPU) { int n = this->blob_bottom_->count(); TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % this->blob_bottom_->count()]; @@ -228,10 +216,11 @@ TYPED_TEST(GPUMathFunctionsTest, TestScale) { } } -TYPED_TEST(GPUMathFunctionsTest, TestCopy) { +TYPED_TEST(MathFunctionsTest, TestCopyGPU) { const int n = this->blob_bottom_->count(); const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); TypeParam* top_data = this->blob_top_->mutable_gpu_data(); + Caffe::set_mode(Caffe::GPU); caffe_copy(n, bottom_data, top_data); bottom_data = this->blob_bottom_->cpu_data(); top_data = this->blob_top_->mutable_cpu_data(); diff --git a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp index b2db984feb1..9038017e3e2 100644 --- a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp +++ b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp @@ -16,7 +16,7 @@ namespace caffe { template -class MultinomialLogisticLossLayerTest : public CPUDeviceTest { +class MultinomialLogisticLossLayerTest : public ::testing::Test { protected: MultinomialLogisticLossLayerTest() : blob_bottom_data_(new Blob(10, 5, 1, 1)), @@ -51,6 +51,7 @@ TYPED_TEST_CASE(MultinomialLogisticLossLayerTest, TestDtypes); TYPED_TEST(MultinomialLogisticLossLayerTest, TestGradientCPU) { LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); MultinomialLogisticLossLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); GradientChecker checker(1e-2, 2*1e-2, 1701, 0, 0.05); diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index 782a96bc9b6..08106e79274 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -613,103 +613,6 @@ class NetTest : public MultiDeviceTest { InitNetFromProtoString(proto); } - virtual void InitSkipPropNet(bool test_skip_true) { - string proto = - "name: 'SkipPropTestNetwork' " - "layer { " - " name: 'data' " - " type: 'DummyData' " - " dummy_data_param { " - " shape { " - " dim: 5 " - " dim: 2 " - " dim: 3 " - " dim: 4 " - " } " - " data_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " shape { " - " dim: 5 " - " } " - " data_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'silence' " - " bottom: 'label' " - " type: 'Silence' " - "} " - "layer { " - " name: 'innerproduct' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 1 " - " weight_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " bias_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " param { " - " lr_mult: 1 " - " decay_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " decay_mult: 0 " - " } " - " bottom: 'data' " - " top: 'innerproduct' " - "} " - "layer { " - " name: 'ip_fake_labels' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 1 " - " weight_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " bias_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " bottom: 'data' " - " top: 'fake_labels' " - "} " - "layer { " - " name: 'argmax' " - " bottom: 'fake_labels' " - " top: 'label_argmax' " - " type: 'ArgMax' " - "} " - "layer { " - " name: 'loss' " - " bottom: 'innerproduct' " - " bottom: 'label_argmax' "; - if (test_skip_true) - proto += " propagate_down: [true, false] "; - else - proto += " propagate_down: [true, true] "; - proto += - " top: 'cross_entropy_loss' " - " type: 'SigmoidCrossEntropyLoss' " - " loss_weight: 0.1 " - "} "; - InitNetFromProtoString(proto); - } - int seed_; shared_ptr > net_; }; @@ -2321,52 +2224,4 @@ TYPED_TEST(NetTest, TestReshape) { } } -TYPED_TEST(NetTest, TestSkipPropagateDown) { - // check bottom_need_backward if propagate_down is true - this->InitSkipPropNet(false); - vector vec_layer_need_backward = this->net_->layer_need_backward(); - for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { - string layer_name = this->net_->layer_names()[layer_id]; - if (layer_name == "loss") { - // access to bottom_need_backward coresponding to label's blob - bool need_back = this->net_->bottom_need_backward()[layer_id][1]; - // if propagate_down is true, the loss layer will try to - // backpropagate on labels - EXPECT_TRUE(need_back) << "bottom_need_backward should be True"; - } - // layer_need_backward should be True except for data and silence layers - if (layer_name.find("data") != std::string::npos || - layer_name == "silence") { - EXPECT_FALSE(vec_layer_need_backward[layer_id]) - << "layer_need_backward for " << layer_name << " should be False"; - } else { - EXPECT_TRUE(vec_layer_need_backward[layer_id]) - << "layer_need_backward for " << layer_name << " should be True"; - } - } - // check bottom_need_backward if propagat_down is false - this->InitSkipPropNet(true); - vec_layer_need_backward.clear(); - vec_layer_need_backward = this->net_->layer_need_backward(); - for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { - string layer_name = this->net_->layer_names()[layer_id]; - if (layer_name == "loss") { - // access to bottom_need_backward coresponding to label's blob - bool need_back = this->net_->bottom_need_backward()[layer_id][1]; - // if propagate_down is false, the loss layer will not try to - // backpropagate on labels - EXPECT_FALSE(need_back) << "bottom_need_backward should be False"; - } - // layer_need_backward should be False except for innerproduct and - // loss layers - if (layer_name == "innerproduct" || layer_name == "loss") { - EXPECT_TRUE(vec_layer_need_backward[layer_id]) - << "layer_need_backward for " << layer_name << " should be True"; - } else { - EXPECT_FALSE(vec_layer_need_backward[layer_id]) - << "layer_need_backward for " << layer_name << " should be False"; - } - } -} - } // namespace caffe diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index c6e4d27b903..c9d52f247a6 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -117,49 +117,6 @@ class NeuronLayerTest : public MultiDeviceTest { + slope_data[c] * std::min(bottom_data[i], (Dtype)(0))); } } - - void LogBottomInit() { - FillerParameter filler_param; - GaussianFiller filler(filler_param); - filler.Fill(this->blob_bottom_); - Dtype* bottom_data = this->blob_bottom_->mutable_cpu_data(); - caffe_exp(this->blob_bottom_->count(), bottom_data, bottom_data); - } - - void TestLogForward(const float base, const float scale, const float shift) { - LogBottomInit(); - LayerParameter layer_param; - layer_param.mutable_log_param()->set_base(base); - layer_param.mutable_log_param()->set_scale(scale); - layer_param.mutable_log_param()->set_shift(shift); - LogLayer layer(layer_param); - layer.SetUp(blob_bottom_vec_, blob_top_vec_); - layer.Forward(blob_bottom_vec_, blob_top_vec_); - const Dtype kDelta = 2e-4; - const Dtype* bottom_data = blob_bottom_->cpu_data(); - const Dtype* top_data = blob_top_->cpu_data(); - for (int i = 0; i < blob_bottom_->count(); ++i) { - const Dtype bottom_val = bottom_data[i]; - const Dtype top_val = top_data[i]; - if (base == -1) { - EXPECT_NEAR(top_val, log(shift + scale * bottom_val), kDelta); - } else { - EXPECT_NEAR(top_val, log(shift + scale * bottom_val) / log(base), - kDelta); - } - } - } - - void TestLogGradient(const float base, const float scale, const float shift) { - LogBottomInit(); - LayerParameter layer_param; - layer_param.mutable_log_param()->set_base(base); - layer_param.mutable_log_param()->set_scale(scale); - layer_param.mutable_log_param()->set_shift(shift); - LogLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-2); - checker.CheckGradientEltwise(&layer, blob_bottom_vec_, blob_top_vec_); - } }; TYPED_TEST_CASE(NeuronLayerTest, TestDtypesAndDevices); @@ -382,88 +339,6 @@ TYPED_TEST(NeuronLayerTest, TestExpGradientBase2Shift1Scale3) { this->TestExpGradient(kBase, kScale, kShift); } -TYPED_TEST(NeuronLayerTest, TestLogLayer) { - typedef typename TypeParam::Dtype Dtype; - // Test default base of "-1" -- should actually set base := e. - const Dtype kBase = -1; - const Dtype kScale = 1; - const Dtype kShift = 0; - this->TestLogForward(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogGradient) { - typedef typename TypeParam::Dtype Dtype; - // Test default base of "-1" -- should actually set base := e. - const Dtype kBase = -1; - const Dtype kScale = 1; - const Dtype kShift = 0; - this->TestLogGradient(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogLayerBase2) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 1; - const Dtype kShift = 0; - this->TestLogForward(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogGradientBase2) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 1; - const Dtype kShift = 0; - this->TestLogGradient(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Shift1) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 1; - const Dtype kShift = 1; - this->TestLogForward(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Shift1) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 1; - const Dtype kShift = 1; - this->TestLogGradient(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Scale3) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 3; - const Dtype kShift = 0; - this->TestLogForward(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Scale3) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 3; - const Dtype kShift = 0; - this->TestLogGradient(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Shift1Scale3) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 3; - const Dtype kShift = 1; - this->TestLogForward(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Shift1Scale3) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 3; - const Dtype kShift = 1; - this->TestLogGradient(kBase, kScale, kShift); -} - TYPED_TEST(NeuronLayerTest, TestDropoutHalf) { const float kDropoutRatio = 0.5; this->TestDropoutForward(kDropoutRatio); @@ -666,10 +541,14 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { caffe_copy(ip2.blobs()[0]->count(), ip.blobs()[0]->cpu_data(), ip2.blobs()[0]->mutable_cpu_data()); // Forward in-place + ip.Reshape(this->blob_bottom_vec_, this->blob_top_vec_); ip.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + prelu.Reshape(this->blob_top_vec_, this->blob_top_vec_); prelu.Forward(this->blob_top_vec_, this->blob_top_vec_); // Forward non-in-place + ip2.Reshape(blob_bottom_vec_2, blob_middle_vec_2); ip2.Forward(blob_bottom_vec_2, blob_middle_vec_2); + prelu2.Reshape(blob_middle_vec_2, blob_top_vec_2); prelu2.Forward(blob_middle_vec_2, blob_top_vec_2); // Check numbers for (int s = 0; s < blob_top_2->count(); ++s) { @@ -711,7 +590,7 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { #ifdef USE_CUDNN template -class CuDNNNeuronLayerTest : public GPUDeviceTest { +class CuDNNNeuronLayerTest : public ::testing::Test { protected: CuDNNNeuronLayerTest() : blob_bottom_(new Blob(2, 3, 4, 5)), @@ -734,6 +613,7 @@ class CuDNNNeuronLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNNeuronLayerTest, TestDtypes); TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNReLULayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -748,6 +628,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNReLULayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); @@ -756,6 +637,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CHECK(google::protobuf::TextFormat::ParseFromString( "relu_param { negative_slope: 0.01 }", &layer_param)); @@ -775,6 +657,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CHECK(google::protobuf::TextFormat::ParseFromString( "relu_param { negative_slope: 0.01 }", &layer_param)); @@ -785,6 +668,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSigmoidLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -801,6 +685,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSigmoidLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); @@ -809,6 +694,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNTanHLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -831,6 +717,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestTanHGradientCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNTanHLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index 69f2d5c1135..e9964e7f0b7 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -608,7 +608,7 @@ TYPED_TEST(PoolingLayerTest, TestGradientAvePadded) { #ifdef USE_CUDNN template -class CuDNNPoolingLayerTest : public GPUDeviceTest { +class CuDNNPoolingLayerTest : public ::testing::Test { protected: CuDNNPoolingLayerTest() : blob_bottom_(new Blob()), @@ -963,6 +963,7 @@ class CuDNNPoolingLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNPoolingLayerTest, TestDtypes); TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -976,6 +977,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -992,6 +994,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { /* TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; layer_param.set_kernelsize(3); layer_param.set_stride(2); @@ -1017,6 +1020,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxCuDNN) { + Caffe::set_mode(Caffe::GPU); this->TestForwardSquare(); this->TestForwardRectHigh(); this->TestForwardRectWide(); @@ -1026,6 +1030,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxCuDNN) { // the corresponding backward test. /* TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxTopMaskCuDNN) { + Caffe::set_mode(Caffe::GPU); this->blob_top_vec_.push_back(this->blob_top_mask_); this->TestForwardSquare(); this->TestForwardRectHigh(); @@ -1034,6 +1039,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxTopMaskCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { + Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1053,6 +1059,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -1098,6 +1105,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { /* TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { + Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1118,6 +1126,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -1143,6 +1152,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { + Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1160,6 +1170,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAvePaddedCuDNN) { + Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; diff --git a/src/caffe/test/test_softmax_layer.cpp b/src/caffe/test/test_softmax_layer.cpp index 996da4b8f7c..f6674422e56 100644 --- a/src/caffe/test/test_softmax_layer.cpp +++ b/src/caffe/test/test_softmax_layer.cpp @@ -82,7 +82,7 @@ TYPED_TEST(SoftmaxLayerTest, TestGradient) { #ifdef USE_CUDNN template -class CuDNNSoftmaxLayerTest : public GPUDeviceTest { +class CuDNNSoftmaxLayerTest : public ::testing::Test { protected: CuDNNSoftmaxLayerTest() : blob_bottom_(new Blob(2, 10, 2, 3)), @@ -104,6 +104,7 @@ class CuDNNSoftmaxLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNSoftmaxLayerTest, TestDtypes); TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSoftmaxLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -137,6 +138,7 @@ TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { } TYPED_TEST(CuDNNSoftmaxLayerTest, TestGradientCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSoftmaxLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); diff --git a/src/caffe/test/test_stochastic_pooling.cpp b/src/caffe/test/test_stochastic_pooling.cpp index f84464c322c..12962c65d85 100644 --- a/src/caffe/test/test_stochastic_pooling.cpp +++ b/src/caffe/test/test_stochastic_pooling.cpp @@ -16,10 +16,8 @@ using std::min; namespace caffe { -template -class StochasticPoolingLayerTest : public MultiDeviceTest { - typedef typename TypeParam::Dtype Dtype; - +template +class StochasticPoolingLayerTest : public ::testing::Test { protected: StochasticPoolingLayerTest() : blob_bottom_(new Blob()), @@ -47,14 +45,9 @@ class StochasticPoolingLayerTest : public MultiDeviceTest { vector*> blob_top_vec_; }; -template -class CPUStochasticPoolingLayerTest - : public StochasticPoolingLayerTest > { -}; - -TYPED_TEST_CASE(CPUStochasticPoolingLayerTest, TestDtypes); +TYPED_TEST_CASE(StochasticPoolingLayerTest, TestDtypes); -TYPED_TEST(CPUStochasticPoolingLayerTest, TestSetup) { +TYPED_TEST(StochasticPoolingLayerTest, TestSetup) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -67,16 +60,8 @@ TYPED_TEST(CPUStochasticPoolingLayerTest, TestSetup) { EXPECT_EQ(this->blob_top_->width(), 2); } -#ifndef CPU_ONLY - -template -class GPUStochasticPoolingLayerTest - : public StochasticPoolingLayerTest > { -}; - -TYPED_TEST_CASE(GPUStochasticPoolingLayerTest, TestDtypes); - -TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochastic) { +TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPU) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; layer_param.set_phase(TRAIN); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); @@ -119,7 +104,8 @@ TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochastic) { EXPECT_GE(total / this->blob_top_->count(), 0.55); } -TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochasticTestPhase) { +TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPUTestPhase) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; layer_param.set_phase(TEST); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); @@ -156,7 +142,8 @@ TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochasticTestPhase) { } } -TYPED_TEST(GPUStochasticPoolingLayerTest, TestGradient) { +TYPED_TEST(StochasticPoolingLayerTest, TestGradientGPU) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; layer_param.set_phase(TRAIN); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); @@ -171,6 +158,6 @@ TYPED_TEST(GPUStochasticPoolingLayerTest, TestGradient) { this->blob_top_vec_); } -#endif + } // namespace caffe diff --git a/src/caffe/test/test_triplet_loss_layer.cpp b/src/caffe/test/test_triplet_loss_layer.cpp new file mode 100644 index 00000000000..c8d9377fa23 --- /dev/null +++ b/src/caffe/test/test_triplet_loss_layer.cpp @@ -0,0 +1,107 @@ +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class TripletLossLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + TripletLossLayerTest() + : blob_bottom_data_i_(new Blob(128, 10, 1, 1)), + blob_bottom_data_j_(new Blob(128, 10, 1, 1)), + blob_bottom_data_k_(new Blob(128, 10, 1, 1)), + blob_bottom_y_(new Blob(128, 1, 1, 1)), + blob_top_loss_(new Blob()) { + // fill the values + FillerParameter filler_param; + filler_param.set_mean(0.0); + filler_param.set_std(0.3); // distances~=1.0 to test both sides of margin + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_data_i_); + blob_bottom_vec_.push_back(blob_bottom_data_i_); + filler.Fill(this->blob_bottom_data_j_); + blob_bottom_vec_.push_back(blob_bottom_data_j_); + filler.Fill(this->blob_bottom_data_k_); + blob_bottom_vec_.push_back(blob_bottom_data_k_); + for (int i = 0; i < blob_bottom_y_->count(); ++i) { + blob_bottom_y_->mutable_cpu_data()[i] = caffe_rng_rand() % 2; // 0 or 1 + } + blob_bottom_vec_.push_back(blob_bottom_y_); + blob_top_vec_.push_back(blob_top_loss_); + } + virtual ~TripletLossLayerTest() { + delete blob_bottom_data_i_; + delete blob_bottom_data_j_; + delete blob_bottom_data_k_; + delete blob_bottom_y_; + delete blob_top_loss_; + } + + Blob* const blob_bottom_data_i_; + Blob* const blob_bottom_data_j_; + Blob* const blob_bottom_data_k_; + Blob* const blob_bottom_y_; + Blob* const blob_top_loss_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(TripletLossLayerTest, TestDtypesAndDevices); + +TYPED_TEST(TripletLossLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // manually compute to compare + const Dtype margin = layer_param.contrastive_loss_param().margin(); + const int num = this->blob_bottom_data_i_->num(); + const int channels = this->blob_bottom_data_i_->channels(); + Dtype loss(0); + for (int i = 0; i < num; ++i) { + Dtype dist_sq(0); + for (int j = 0; j < channels; ++j) { + Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq += diff*diff; + } + if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs + loss += dist_sq; + } else { + loss += std::max(margin-dist_sq, Dtype(0)); + } + } + loss /= static_cast(num) * Dtype(2); + EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); +} + +TYPED_TEST(TripletLossLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + // check the gradient for the first two bottom layers + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 1); +} + +} // namespace caffe diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index 0aab6b17b85..13e17be582b 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -206,16 +206,6 @@ void caffe_exp(const int n, const double* a, double* y) { vdExp(n, a, y); } -template <> -void caffe_log(const int n, const float* a, float* y) { - vsLn(n, a, y); -} - -template <> -void caffe_log(const int n, const double* a, double* y) { - vdLn(n, a, y); -} - template <> void caffe_abs(const int n, const float* a, float* y) { vsAbs(n, a, y); diff --git a/src/caffe/util/math_functions.cu b/src/caffe/util/math_functions.cu index 2631a0740d6..43e65eb9a69 100644 --- a/src/caffe/util/math_functions.cu +++ b/src/caffe/util/math_functions.cu @@ -324,27 +324,6 @@ void caffe_gpu_exp(const int N, const double* a, double* y) { N, a, y); } -template -__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { - CUDA_KERNEL_LOOP(index, n) { - y[index] = log(a[index]); - } -} - -template <> -void caffe_gpu_log(const int N, const float* a, float* y) { - // NOLINT_NEXT_LINE(whitespace/operators) - log_kernel<<>>( - N, a, y); -} - -template <> -void caffe_gpu_log(const int N, const double* a, double* y) { - // NOLINT_NEXT_LINE(whitespace/operators) - log_kernel<<>>( - N, a, y); -} - template __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { From 9b571fa2ac8cbea0e66c67769962fce5814c9121 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 15 Jun 2015 10:17:44 +0800 Subject: [PATCH 05/82] macro define in upgrade_proto --- src/caffe/layers/triplet_loss_layer.cpp | 11 ++++++----- src/caffe/util/upgrade_proto.cpp | 6 ++++++ 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/src/caffe/layers/triplet_loss_layer.cpp b/src/caffe/layers/triplet_loss_layer.cpp index fc8b9fe036f..de60a5f78d5 100644 --- a/src/caffe/layers/triplet_loss_layer.cpp +++ b/src/caffe/layers/triplet_loss_layer.cpp @@ -60,14 +60,15 @@ void TripletLossLayer::Forward_cpu( dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); //ab is a similar pair - dist_sq_ += dist_sq_pos.cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += dist_sq_pos.cpu_data()[i]; //Loss component calculated from ac dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); //ac is a dissimilar pair - dist_sq_ -= dist_sq_neg.cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; + loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); } @@ -79,16 +80,16 @@ template void TripletLossLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.triplet_loss_param().margin(); - for (int i = 0; i < 3; ++i) { + for (int i = 1; i < 3; ++i) {//there must be further check to ensure the gradient calc if (propagate_down[i]) { - const Dtype sign = (i == 0) ? 1 : -1; + const Dtype sign = (i == 2) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast(bottom[i]->num()); int num = bottom[i]->num(); int channels = bottom[i]->channels(); for (int j = 0; j < num; ++j) { Dtype* bout = bottom[i]->mutable_cpu_diff(); - if ((margin-dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { // similar pairs caffe_cpu_axpby( channels, diff --git a/src/caffe/util/upgrade_proto.cpp b/src/caffe/util/upgrade_proto.cpp index 38a06026adf..ade1650374c 100644 --- a/src/caffe/util/upgrade_proto.cpp +++ b/src/caffe/util/upgrade_proto.cpp @@ -816,6 +816,10 @@ bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param, layer_param->mutable_threshold_param()->CopyFrom( v1_layer_param.threshold_param()); } + if (v1_layer_param.has_triplet_loss_param()) { + layer_param->mutable_triplet_loss_param()->CopyFrom( + v1_layer_param.triplet_loss_param()); + } if (v1_layer_param.has_window_data_param()) { layer_param->mutable_window_data_param()->CopyFrom( v1_layer_param.window_data_param()); @@ -913,6 +917,8 @@ const char* UpgradeV1LayerType(const V1LayerParameter_LayerType type) { return "Slice"; case V1LayerParameter_LayerType_TANH: return "TanH"; + case V1LayerParameter_LayerType_TRIPLET_LOSS: + return "TripletLoss"; case V1LayerParameter_LayerType_WINDOW_DATA: return "WindowData"; case V1LayerParameter_LayerType_THRESHOLD: From 011aef02648b835e72df1a98f719dfcbdda3cf42 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 15 Jun 2015 12:22:59 +0800 Subject: [PATCH 06/82] restore --- .../triplet/convert_mnist_triplet_data.cpp | 16 +- .../triplet/mnist_triplet_solver.prototxt | 2 +- .../triplet/mnist_triplet_train_test.prototxt | 6 +- examples/triplet/readme.md | 76 +-- examples/triplet/train_mnist_triplet.sh | 2 +- include/caffe/data_layers.hpp | 3 - include/caffe/data_transformer.hpp | 36 ++ include/caffe/filler.hpp | 71 ++- include/caffe/layer.hpp | 1 + include/caffe/loss_layers.hpp | 2 - include/caffe/net.hpp | 3 + include/caffe/neuron_layers.hpp | 70 ++- include/caffe/python_layer.hpp | 13 +- include/caffe/solver.hpp | 15 +- include/caffe/test/test_caffe_main.hpp | 28 +- .../caffe/test/test_gradient_check_util.hpp | 7 +- include/caffe/util/math_functions.hpp | 6 + include/caffe/util/mkl_alternate.hpp | 1 + include/caffe/vision_layers.hpp | 66 +++ src/caffe/blob.cpp | 1 + src/caffe/data_transformer.cpp | 116 +++- src/caffe/layers/base_data_layer.cpp | 11 +- src/caffe/layers/base_data_layer.cu | 6 +- src/caffe/layers/concat_layer.cu | 44 +- src/caffe/layers/contrastive_loss_layer.cpp | 25 +- src/caffe/layers/contrastive_loss_layer.cu | 34 +- src/caffe/layers/conv_layer.cpp | 7 - src/caffe/layers/conv_layer.cu | 7 - src/caffe/layers/cudnn_conv_layer.cu | 2 - src/caffe/layers/data_layer.cpp | 90 +-- src/caffe/layers/deconv_layer.cpp | 7 - src/caffe/layers/deconv_layer.cu | 7 - src/caffe/layers/flatten_layer.cpp | 16 +- src/caffe/layers/image_data_layer.cpp | 42 +- src/caffe/layers/inner_product_layer.cpp | 4 +- src/caffe/layers/inner_product_layer.cu | 4 +- src/caffe/layers/lrn_layer.cu | 102 ++-- src/caffe/layers/mvn_layer.cpp | 23 +- src/caffe/layers/mvn_layer.cu | 23 +- src/caffe/layers/pooling_layer.cu | 218 +++---- src/caffe/layers/prelu_layer.cpp | 4 +- src/caffe/layers/prelu_layer.cu | 16 +- .../sigmoid_cross_entropy_loss_layer.cpp | 2 +- .../sigmoid_cross_entropy_loss_layer.cu | 22 +- src/caffe/layers/slice_layer.cu | 47 +- src/caffe/net.cpp | 46 +- src/caffe/proto/caffe.proto | 203 ++++++- src/caffe/solver.cpp | 554 ++++++++---------- src/caffe/test/test_accuracy_layer.cpp | 5 +- src/caffe/test/test_argmax_layer.cpp | 3 +- .../test/test_contrastive_loss_layer.cpp | 58 +- src/caffe/test/test_convolution_layer.cpp | 9 +- .../test/test_data/generate_sample_data.py | 12 +- src/caffe/test/test_dummy_data_layer.cpp | 5 +- src/caffe/test/test_filler.cpp | 98 ++++ src/caffe/test/test_flatten_layer.cpp | 46 +- src/caffe/test/test_gradient_based_solver.cpp | 82 ++- src/caffe/test/test_im2col_kernel.cu | 4 +- src/caffe/test/test_math_functions.cpp | 51 +- .../test_multinomial_logistic_loss_layer.cpp | 3 +- src/caffe/test/test_net.cpp | 145 +++++ src/caffe/test/test_neuron_layer.cpp | 139 ++++- src/caffe/test/test_pooling_layer.cpp | 13 +- src/caffe/test/test_softmax_layer.cpp | 4 +- src/caffe/test/test_stochastic_pooling.cpp | 35 +- src/caffe/test/test_triplet_loss_layer.cpp | 107 ---- src/caffe/util/math_functions.cpp | 10 + src/caffe/util/math_functions.cu | 21 + 68 files changed, 1923 insertions(+), 1034 deletions(-) delete mode 100644 src/caffe/test/test_triplet_loss_layer.cpp diff --git a/examples/triplet/convert_mnist_triplet_data.cpp b/examples/triplet/convert_mnist_triplet_data.cpp index d1eed30cba6..e35e7f4f3bf 100644 --- a/examples/triplet/convert_mnist_triplet_data.cpp +++ b/examples/triplet/convert_mnist_triplet_data.cpp @@ -60,7 +60,6 @@ void convert_dataset(const char* image_filename, const char* label_filename, cols = swap_endian(cols); // Open leveldb - leveldb::DB* db; leveldb::Options options; options.create_if_missing = true; @@ -85,7 +84,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; for (int itemid = 0; itemid < num_items; ++itemid) { - int i = caffe::caffe_rng_rand() % num_items; // pick a random pair + int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups int j = caffe::caffe_rng_rand() % num_items; int k = caffe::caffe_rng_rand() % num_items; read_image(&image_file, &label_file, i, rows, cols, @@ -94,15 +93,18 @@ void convert_dataset(const char* image_filename, const char* label_filename, pixels + (rows * cols), &label_j); read_image(&image_file, &label_file, k, rows, cols, pixels + (2 * rows * cols), &label_k); + datum.set_data(pixels, 3*rows*cols); - if (label_i == label_j && label_i == label_k) { + if (label_i == label_j && label_i != label_k) { datum.set_label(1); + + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); } else { + itemid--; datum.set_label(0); } - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); - db->Put(leveldb::WriteOptions(), std::string(key), value); } delete db; @@ -112,7 +114,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, int main(int argc, char** argv) { if (argc != 4) { printf("This script converts the MNIST dataset to the leveldb format used\n" - "by caffe to train a triplet network.\n" + "by caffe to train a siamese network.\n" "Usage:\n" " convert_mnist_data input_image_file input_label_file " "output_db_file\n" diff --git a/examples/triplet/mnist_triplet_solver.prototxt b/examples/triplet/mnist_triplet_solver.prototxt index 39222b89cf0..edd8e1e0338 100644 --- a/examples/triplet/mnist_triplet_solver.prototxt +++ b/examples/triplet/mnist_triplet_solver.prototxt @@ -7,7 +7,7 @@ test_iter: 100 # Carry out testing every 500 training iterations. test_interval: 500 # The base learning rate, momentum and the weight decay of the network. -base_lr: 0.01 +base_lr: 0.001 momentum: 0.9 weight_decay: 0.0000 # The learning rate policy diff --git a/examples/triplet/mnist_triplet_train_test.prototxt b/examples/triplet/mnist_triplet_train_test.prototxt index 3cea2376c11..da25dec31de 100644 --- a/examples/triplet/mnist_triplet_train_test.prototxt +++ b/examples/triplet/mnist_triplet_train_test.prototxt @@ -487,12 +487,14 @@ layer { } layer { name: "loss" - type: "ContrastiveLoss" + type: "TripletLoss" bottom: "feat" + bottom: "feat_true" bottom: "feat_false" bottom: "sim" top: "loss" - contrastive_loss_param { + triplet_loss_param { margin: 1 } } + diff --git a/examples/triplet/readme.md b/examples/triplet/readme.md index ce98ec10819..524718ce2db 100644 --- a/examples/triplet/readme.md +++ b/examples/triplet/readme.md @@ -1,15 +1,15 @@ --- -title: Siamese Network Tutorial -description: Train and test a siamese network on MNIST data. +title: Triplet Network Tutorial +description: Train and test a triplet network on MNIST data. category: example include_in_docs: true layout: default priority: 100 --- -# Siamese Network Training with Caffe +# Triplet Network Training with Caffe This example shows how you can use weight sharing and a contrastive loss -function to learn a model using a siamese network in Caffe. +function to learn a model using a triplet network in Caffe. We will assume that you have caffe successfully compiled. If not, please refer to the [Installation page](../../installation.html). This example builds on the @@ -25,16 +25,16 @@ You will first need to download and convert the data from the MNIST website. To do this, simply run the following commands: ./data/mnist/get_mnist.sh - ./examples/siamese/create_mnist_siamese.sh + ./examples/triplet/create_mnist_triplet.sh After running the script there should be two datasets, -`./examples/siamese/mnist_siamese_train_leveldb`, and -`./examples/siamese/mnist_siamese_test_leveldb`. +`./examples/triplet/mnist_triplet_train_leveldb`, and +`./examples/triplet/mnist_triplet_test_leveldb`. ## The Model -First, we will define the model that we want to train using the siamese network. +First, we will define the model that we want to train using the triplet network. We will use the convolutional net defined in -`./examples/siamese/mnist_siamese.prototxt`. This model is almost +`./examples/triplet/mnist_triplet.prototxt`. This model is almost exactly the same as the [LeNet model](mnist.html), the only difference is that we have replaced the top layers that produced probabilities over the 10 digit classes with a linear "feature" layer that produces a 2 dimensional vector. @@ -51,11 +51,11 @@ classes with a linear "feature" layer that produces a 2 dimensional vector. } } -## Define the Siamese Network +## Define the triplet Network -In this section we will define the siamese network used for training. The +In this section we will define the triplet network used for training. The resulting network is defined in -`./examples/siamese/mnist_siamese_train_test.prototxt`. +`./examples/triplet/mnist_triplet_train_test.prototxt`. ### Reading in the Pair Data @@ -70,7 +70,7 @@ or different classes (`sim`). top: "pair_data" top: "sim" data_param { - source: "examples/siamese/mnist-siamese-train-leveldb" + source: "examples/triplet/mnist-triplet-train-leveldb" scale: 0.00390625 batch_size: 64 } @@ -95,14 +95,14 @@ and its paired image in `data_p.` } } -### Building the First Side of the Siamese Net +### Building the First Side of the triplet Net -Now we can specify the first side of the siamese net. This side operates on +Now we can specify the first side of the triplet net. This side operates on `data` and produces `feat`. Starting from the net in -`./examples/siamese/mnist_siamese.prototxt` we add default weight fillers. Then +`./examples/triplet/mnist_triplet.prototxt` we add default weight fillers. Then we name the parameters of the convolutional and inner product layers. Naming the parameters allows Caffe to share the parameters between layers on both sides of -the siamese net. In the definition this looks like: +the triplet net. In the definition this looks like: ... param: "conv1_w" @@ -118,12 +118,19 @@ the siamese net. In the definition this looks like: param: "ip2_b" ... -### Building the Second Side of the Siamese Net +### Building the Second Side of the triplet Net -Now we need to create the second path that operates on `data_p` and produces -`feat_p`. This path is exactly the same as the first. So we can just copy and +Now we need to create the second path that operates on `data_pos` and produces +`feat_pos`. This path is exactly the same as the first. So we can just copy and paste it. Then we change the name of each layer, input, and output by appending -`_p` to differentiate the "paired" layers from the originals. +`_pos` to differentiate the "paired" layers from the originals. + +### Building the Third Side of the triplet Net + +Now we need to create the second path that operates on `data_neg` and produces +`feat_neg`. This path is exactly the same as the first. So we can just copy and +paste it. Then we change the name of each layer, input, and output by appending +`_neg` to differentiate the "paired" layers from the originals. ### Adding the Contrastive Loss Function @@ -131,16 +138,17 @@ To train the network we will optimize a contrastive loss function proposed in: Raia Hadsell, Sumit Chopra, and Yann LeCun "Dimensionality Reduction by Learning an Invariant Mapping". This loss function encourages matching pairs to be close together in feature space while pushing non-matching pairs apart. This cost -function is implemented with the `CONTRASTIVE_LOSS` layer: +function is implemented with the `TRIPLET_LOSS` layer: layers { name: "loss" - type: CONTRASTIVE_LOSS - contrastive_loss_param { + type: TRIPLET_LOSS + triplet_loss_param { margin: 1.0 } bottom: "feat" - bottom: "feat_p" + bottom: "feat_pos" + bottom: "feat_neg" bottom: "sim" top: "loss" } @@ -149,31 +157,31 @@ function is implemented with the `CONTRASTIVE_LOSS` layer: Nothing special needs to be done to the solver besides pointing it at the correct model file. The solver is defined in -`./examples/siamese/mnist_siamese_solver.prototxt`. +`./examples/triplet/mnist_triplet_solver.prototxt`. ## Training and Testing the Model Training the model is simple after you have written the network definition protobuf and solver protobuf files. Simply run -`./examples/siamese/train_mnist_siamese.sh`: +`./examples/triplet/train_mnist_triplet.sh`: - ./examples/siamese/train_mnist_siamese.sh + ./examples/triplet/train_mnist_triplet.sh # Plotting the results -First, we can draw the model and siamese networks by running the following +First, we can draw the model and triplet networks by running the following commands that draw the DAGs defined in the .prototxt files: ./python/draw_net.py \ - ./examples/siamese/mnist_siamese.prototxt \ - ./examples/siamese/mnist_siamese.png + ./examples/triplet/mnist_triplet.prototxt \ + ./examples/triplet/mnist_triplet.png ./python/draw_net.py \ - ./examples/siamese/mnist_siamese_train_test.prototxt \ - ./examples/siamese/mnist_siamese_train_test.png + ./examples/triplet/mnist_triplet_train_test.prototxt \ + ./examples/triplet/mnist_triplet_train_test.png Second, we can load the learned model and plot the features using the iPython notebook: - ipython notebook ./examples/siamese/mnist_siamese.ipynb + ipython notebook ./examples/triplet/mnist_triplet.ipynb diff --git a/examples/triplet/train_mnist_triplet.sh b/examples/triplet/train_mnist_triplet.sh index e005970824a..683cda2963b 100755 --- a/examples/triplet/train_mnist_triplet.sh +++ b/examples/triplet/train_mnist_triplet.sh @@ -1,5 +1,5 @@ #!/usr/bin/env sh -TOOLS=./build/tools +TOOLS=./release/tools $TOOLS/caffe train --solver=examples/triplet/mnist_triplet_solver.prototxt diff --git a/include/caffe/data_layers.hpp b/include/caffe/data_layers.hpp index 2bb9d948169..3958cb7ecb0 100644 --- a/include/caffe/data_layers.hpp +++ b/include/caffe/data_layers.hpp @@ -14,7 +14,6 @@ #include "caffe/filler.hpp" #include "caffe/internal_thread.hpp" #include "caffe/layer.hpp" -#include "caffe/net.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/util/db.hpp" @@ -29,7 +28,6 @@ template class BaseDataLayer : public Layer { public: explicit BaseDataLayer(const LayerParameter& param); - virtual ~BaseDataLayer() {} // LayerSetUp: implements common data layer setup functionality, and calls // DataLayerSetUp to do special data layer setup for individual layer types. // This method may not be overridden except by the BasePrefetchingDataLayer. @@ -58,7 +56,6 @@ class BasePrefetchingDataLayer : public: explicit BasePrefetchingDataLayer(const LayerParameter& param) : BaseDataLayer(param) {} - virtual ~BasePrefetchingDataLayer() {} // LayerSetUp: implements common data layer setup functionality, and calls // DataLayerSetUp to do special data layer setup for individual layer types. // This method may not be overridden. diff --git a/include/caffe/data_transformer.hpp b/include/caffe/data_transformer.hpp index 880356601a4..0ad68c80216 100644 --- a/include/caffe/data_transformer.hpp +++ b/include/caffe/data_transformer.hpp @@ -62,6 +62,7 @@ class DataTransformer { */ void Transform(const vector & mat_vector, Blob* transformed_blob); + /** * @brief Applies the transformation defined in the data layer's * transform_param block to a cv::Mat @@ -87,6 +88,41 @@ class DataTransformer { */ void Transform(Blob* input_blob, Blob* transformed_blob); + /** + * @brief Infers the shape of transformed_blob will have when + * the transformation is applied to the data. + * + * @param datum + * Datum containing the data to be transformed. + */ + vector InferBlobShape(const Datum& datum); + /** + * @brief Infers the shape of transformed_blob will have when + * the transformation is applied to the data. + * It uses the first element to infer the shape of the blob. + * + * @param datum_vector + * A vector of Datum containing the data to be transformed. + */ + vector InferBlobShape(const vector & datum_vector); + /** + * @brief Infers the shape of transformed_blob will have when + * the transformation is applied to the data. + * It uses the first element to infer the shape of the blob. + * + * @param mat_vector + * A vector of Mat containing the data to be transformed. + */ + vector InferBlobShape(const vector & mat_vector); + /** + * @brief Infers the shape of transformed_blob will have when + * the transformation is applied to the data. + * + * @param cv_img + * cv::Mat containing the data to be transformed. + */ + vector InferBlobShape(const cv::Mat& cv_img); + protected: /** * @brief Generates a random integer from Uniform({0, 1, ..., n-1}). diff --git a/include/caffe/filler.hpp b/include/caffe/filler.hpp index bb18e8e1e28..ff3542e1f99 100644 --- a/include/caffe/filler.hpp +++ b/include/caffe/filler.hpp @@ -126,17 +126,18 @@ class PositiveUnitballFiller : public Filler { }; /** - * @brief Fills a Blob with values @f$ x \sim U(-a, +a) @f$ where @f$ a @f$ - * is set inversely proportional to the number of incoming nodes. + * @brief Fills a Blob with values @f$ x \sim U(-a, +a) @f$ where @f$ a @f$ is + * set inversely proportional to number of incoming nodes, outgoing + * nodes, or their average. * * A Filler based on the paper [Bengio and Glorot 2010]: Understanding - * the difficulty of training deep feedforward neuralnetworks, but does not - * use the fan_out value. + * the difficulty of training deep feedforward neuralnetworks. * - * It fills the incoming matrix by randomly sampling uniform data from - * [-scale, scale] where scale = sqrt(3 / fan_in) where fan_in is the number - * of input nodes. You should make sure the input blob has shape (num, a, b, c) - * where a * b * c = fan_in. + * It fills the incoming matrix by randomly sampling uniform data from [-scale, + * scale] where scale = sqrt(3 / n) where n is the fan_in, fan_out, or their + * average, depending on the variance_norm option. You should make sure the + * input blob has shape (num, a, b, c) where a * b * c = fan_in and num * b * c + * = fan_out. Note that this is currently not the case for inner product layers. * * TODO(dox): make notation in above comment consistent with rest & use LaTeX. */ @@ -148,7 +149,16 @@ class XavierFiller : public Filler { virtual void Fill(Blob* blob) { CHECK(blob->count()); int fan_in = blob->count() / blob->num(); - Dtype scale = sqrt(Dtype(3) / fan_in); + int fan_out = blob->count() / blob->channels(); + Dtype n = fan_in; // default to fan_in + if (this->filler_param_.variance_norm() == + FillerParameter_VarianceNorm_AVERAGE) { + n = (fan_in + fan_out) / Dtype(2); + } else if (this->filler_param_.variance_norm() == + FillerParameter_VarianceNorm_FAN_OUT) { + n = fan_out; + } + Dtype scale = sqrt(Dtype(3) / n); caffe_rng_uniform(blob->count(), -scale, scale, blob->mutable_cpu_data()); CHECK_EQ(this->filler_param_.sparse(), -1) @@ -156,6 +166,47 @@ class XavierFiller : public Filler { } }; +/** + * @brief Fills a Blob with values @f$ x \sim N(0, \sigma^2) @f$ where + * @f$ \sigma^2 @f$ is set inversely proportional to number of incoming + * nodes, outgoing nodes, or their average. + * + * A Filler based on the paper [He, Zhang, Ren and Sun 2015]: Specifically + * accounts for ReLU nonlinearities. + * + * Aside: for another perspective on the scaling factor, see the derivation of + * [Saxe, McClelland, and Ganguli 2013 (v3)]. + * + * It fills the incoming matrix by randomly sampling Gaussian data with std = + * sqrt(2 / n) where n is the fan_in, fan_out, or their average, depending on + * the variance_norm option. You should make sure the input blob has shape (num, + * a, b, c) where a * b * c = fan_in and num * b * c = fan_out. Note that this + * is currently not the case for inner product layers. + */ +template +class MSRAFiller : public Filler { + public: + explicit MSRAFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + CHECK(blob->count()); + int fan_in = blob->count() / blob->num(); + int fan_out = blob->count() / blob->channels(); + Dtype n = fan_in; // default to fan_in + if (this->filler_param_.variance_norm() == + FillerParameter_VarianceNorm_AVERAGE) { + n = (fan_in + fan_out) / Dtype(2); + } else if (this->filler_param_.variance_norm() == + FillerParameter_VarianceNorm_FAN_OUT) { + n = fan_out; + } + Dtype std = sqrt(Dtype(2) / n); + caffe_rng_gaussian(blob->count(), Dtype(0), std, + blob->mutable_cpu_data()); + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; /** * @brief Get a specific filler from the specification given in FillerParameter. @@ -176,6 +227,8 @@ Filler* GetFiller(const FillerParameter& param) { return new UniformFiller(param); } else if (type == "xavier") { return new XavierFiller(param); + } else if (type == "msra") { + return new MSRAFiller(param); } else { CHECK(false) << "Unknown filler name: " << param.type(); } diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index 2d13ef97c05..8f924a75755 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -406,6 +406,7 @@ template inline Dtype Layer::Forward(const vector*>& bottom, const vector*>& top) { Dtype loss = 0; + Reshape(bottom, top); switch (Caffe::mode()) { case Caffe::CPU: Forward_cpu(bottom, top); diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index 2f9c1f567a1..8993dd70910 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -672,8 +672,6 @@ class SigmoidCrossEntropyLossLayer : public LossLayer { /// @copydoc SigmoidCrossEntropyLossLayer virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); /** * @brief Computes the sigmoid cross-entropy loss error gradient w.r.t. the diff --git a/include/caffe/net.hpp b/include/caffe/net.hpp index 075afebc9b0..5665df1edf2 100644 --- a/include/caffe/net.hpp +++ b/include/caffe/net.hpp @@ -137,6 +137,9 @@ class Net { inline const vector& blob_loss_weights() const { return blob_loss_weights_; } + inline const vector& layer_need_backward() const { + return layer_need_backward_; + } /// @brief returns the parameters inline const vector > >& params() const { return params_; diff --git a/include/caffe/neuron_layers.hpp b/include/caffe/neuron_layers.hpp index 323215134c7..c2e0774aaa2 100644 --- a/include/caffe/neuron_layers.hpp +++ b/include/caffe/neuron_layers.hpp @@ -8,7 +8,6 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/layer.hpp" -#include "caffe/net.hpp" #include "caffe/proto/caffe.pb.h" #define HDF5_DATA_DATASET_NAME "data" @@ -268,6 +267,72 @@ class ExpLayer : public NeuronLayer { Dtype inner_scale_, outer_scale_; }; +/** + * @brief Computes @f$ y = log_{\gamma}(\alpha x + \beta) @f$, + * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, + * and base @f$ \gamma @f$. + */ +template +class LogLayer : public NeuronLayer { + public: + /** + * @param param provides LogParameter log_param, + * with LogLayer options: + * - scale (\b optional, default 1) the scale @f$ \alpha @f$ + * - shift (\b optional, default 0) the shift @f$ \beta @f$ + * - base (\b optional, default -1 for a value of @f$ e \approx 2.718 @f$) + * the base @f$ \gamma @f$ + */ + explicit LogLayer(const LayerParameter& param) + : NeuronLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Log"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = log_{\gamma}(\alpha x + \beta) + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the exp inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} = + * \frac{\partial E}{\partial y} y \alpha \log_e(gamma) + * @f$ if propagate_down[0] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Dtype base_scale_; + Dtype input_scale_, input_shift_; + Dtype backward_num_scale_; +}; + /** * @brief Computes @f$ y = (\alpha x + \beta) ^ \gamma @f$, * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, @@ -734,7 +799,8 @@ class PReLULayer : public NeuronLayer { const vector& propagate_down, const vector*>& bottom); bool channel_shared_; - Blob multiplier_; // dot multipler for backward computation of params + Blob multiplier_; // dot multiplier for backward computation of params + Blob backward_buff_; // temporary buffer for backward computation Blob bottom_memory_; // memory for in-place computation }; diff --git a/include/caffe/python_layer.hpp b/include/caffe/python_layer.hpp index 816ef453720..19cf18c9742 100644 --- a/include/caffe/python_layer.hpp +++ b/include/caffe/python_layer.hpp @@ -14,12 +14,12 @@ template class PythonLayer : public Layer { public: PythonLayer(PyObject* self, const LayerParameter& param) - : Layer(param), self_(self) { } + : Layer(param), self_(bp::handle<>(bp::borrowed(self))) { } virtual void LayerSetUp(const vector*>& bottom, const vector*>& top) { try { - bp::call_method(self_, "setup", bottom, top); + self_.attr("setup")(bottom, top); } catch (bp::error_already_set) { PyErr_Print(); throw; @@ -29,7 +29,7 @@ class PythonLayer : public Layer { virtual void Reshape(const vector*>& bottom, const vector*>& top) { try { - bp::call_method(self_, "reshape", bottom, top); + self_.attr("reshape")(bottom, top); } catch (bp::error_already_set) { PyErr_Print(); throw; @@ -42,7 +42,7 @@ class PythonLayer : public Layer { virtual void Forward_cpu(const vector*>& bottom, const vector*>& top) { try { - bp::call_method(self_, "forward", bottom, top); + self_.attr("forward")(bottom, top); } catch (bp::error_already_set) { PyErr_Print(); throw; @@ -51,8 +51,7 @@ class PythonLayer : public Layer { virtual void Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { try { - bp::call_method(self_, "backward", top, propagate_down, - bottom); + self_.attr("backward")(top, propagate_down, bottom); } catch (bp::error_already_set) { PyErr_Print(); throw; @@ -60,7 +59,7 @@ class PythonLayer : public Layer { } private: - PyObject* self_; + bp::object self_; }; } // namespace caffe diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index 4dcdc3dc20b..c2ced487d6f 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -11,7 +11,7 @@ namespace caffe { /** * @brief An interface for classes that perform optimization on Net%s. * - * Requires implementation of ComputeUpdateValue to compute a parameter update + * Requires implementation of ApplyUpdate to compute a parameter update * given the current state of the Net parameters. */ template @@ -39,8 +39,8 @@ class Solver { int iter() { return iter_; } protected: - // Get the update value for the current iteration. - virtual void ComputeUpdateValue() = 0; + // Make and apply the update value for the current iteration. + virtual void ApplyUpdate() = 0; // The Solver::Snapshot function implements the basic snapshotting utility // that stores the learned net. You should implement the SnapshotSolverState() // function that produces a SolverState protocol buffer that needs to be @@ -80,7 +80,10 @@ class SGDSolver : public Solver { protected: void PreSolve(); Dtype GetLearningRate(); - virtual void ComputeUpdateValue(); + virtual void ApplyUpdate(); + virtual void Normalize(int param_id); + virtual void Regularize(int param_id); + virtual void ComputeUpdateValue(int param_id, Dtype rate); virtual void ClipGradients(); virtual void SnapshotSolverState(SolverState * state); virtual void RestoreSolverState(const SolverState& state); @@ -102,7 +105,7 @@ class NesterovSolver : public SGDSolver { : SGDSolver(param_file) {} protected: - virtual void ComputeUpdateValue(); + virtual void ComputeUpdateValue(int param_id, Dtype rate); DISABLE_COPY_AND_ASSIGN(NesterovSolver); }; @@ -116,7 +119,7 @@ class AdaGradSolver : public SGDSolver { : SGDSolver(param_file) { constructor_sanity_check(); } protected: - virtual void ComputeUpdateValue(); + virtual void ComputeUpdateValue(int param_id, Dtype rate); void constructor_sanity_check() { CHECK_EQ(0, this->param_.momentum()) << "Momentum cannot be used with AdaGrad."; diff --git a/include/caffe/test/test_caffe_main.hpp b/include/caffe/test/test_caffe_main.hpp index bd5f31e063f..fc156091476 100644 --- a/include/caffe/test/test_caffe_main.hpp +++ b/include/caffe/test/test_caffe_main.hpp @@ -40,34 +40,36 @@ class MultiDeviceTest : public ::testing::Test { typedef ::testing::Types TestDtypes; -struct FloatCPU { - typedef float Dtype; +template +struct CPUDevice { + typedef TypeParam Dtype; static const Caffe::Brew device = Caffe::CPU; }; -struct DoubleCPU { - typedef double Dtype; - static const Caffe::Brew device = Caffe::CPU; +template +class CPUDeviceTest : public MultiDeviceTest > { }; #ifdef CPU_ONLY -typedef ::testing::Types TestDtypesAndDevices; +typedef ::testing::Types, + CPUDevice > TestDtypesAndDevices; #else -struct FloatGPU { - typedef float Dtype; +template +struct GPUDevice { + typedef TypeParam Dtype; static const Caffe::Brew device = Caffe::GPU; }; -struct DoubleGPU { - typedef double Dtype; - static const Caffe::Brew device = Caffe::GPU; +template +class GPUDeviceTest : public MultiDeviceTest > { }; -typedef ::testing::Types - TestDtypesAndDevices; +typedef ::testing::Types, CPUDevice, + GPUDevice, GPUDevice > + TestDtypesAndDevices; #endif diff --git a/include/caffe/test/test_gradient_check_util.hpp b/include/caffe/test/test_gradient_check_util.hpp index 22937711b58..cc5dcbad0ee 100644 --- a/include/caffe/test/test_gradient_check_util.hpp +++ b/include/caffe/test/test_gradient_check_util.hpp @@ -80,11 +80,14 @@ void GradientChecker::CheckGradientSingle(Layer* layer, CHECK_EQ(top_count, bottom[blob_id]->count()); } } - // First, figure out what blobs we need to check against. + // First, figure out what blobs we need to check against, and zero init + // parameter blobs. vector*> blobs_to_check; vector propagate_down(bottom.size(), check_bottom < 0); for (int i = 0; i < layer->blobs().size(); ++i) { - blobs_to_check.push_back(layer->blobs()[i].get()); + Blob* blob = layer->blobs()[i].get(); + caffe_set(blob->count(), static_cast(0), blob->mutable_cpu_diff()); + blobs_to_check.push_back(blob); } if (check_bottom < 0) { for (int i = 0; i < bottom.size(); ++i) { diff --git a/include/caffe/util/math_functions.hpp b/include/caffe/util/math_functions.hpp index f43036fcebc..2cacd8e72cd 100644 --- a/include/caffe/util/math_functions.hpp +++ b/include/caffe/util/math_functions.hpp @@ -88,6 +88,9 @@ void caffe_rng_bernoulli(const int n, const Dtype p, unsigned int* r); template void caffe_exp(const int n, const Dtype* a, Dtype* y); +template +void caffe_log(const int n, const Dtype* a, Dtype* y); + template void caffe_abs(const int n, const Dtype* a, Dtype* y); @@ -203,6 +206,9 @@ void caffe_gpu_abs(const int n, const Dtype* a, Dtype* y); template void caffe_gpu_exp(const int n, const Dtype* a, Dtype* y); +template +void caffe_gpu_log(const int n, const Dtype* a, Dtype* y); + template void caffe_gpu_powx(const int n, const Dtype* a, const Dtype b, Dtype* y); diff --git a/include/caffe/util/mkl_alternate.hpp b/include/caffe/util/mkl_alternate.hpp index 32fdbf79932..3355b6658a3 100644 --- a/include/caffe/util/mkl_alternate.hpp +++ b/include/caffe/util/mkl_alternate.hpp @@ -33,6 +33,7 @@ extern "C" { DEFINE_VSL_UNARY_FUNC(Sqr, y[i] = a[i] * a[i]); DEFINE_VSL_UNARY_FUNC(Exp, y[i] = exp(a[i])); +DEFINE_VSL_UNARY_FUNC(Ln, y[i] = log(a[i])); DEFINE_VSL_UNARY_FUNC(Abs, y[i] = fabs(a[i])); // A simple way to define the vsl unary functions with singular parameter b. diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index cd0ab8babb0..a6bd86a93f5 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -453,6 +453,72 @@ class CuDNNPoolingLayer : public PoolingLayer { }; #endif +/** + * @brief Does spatial pyramid pooling on the input image + * by taking the max, average, etc. within regions + * so that the result vector of different sized + * images are of the same size. + */ +template +class SPPLayer : public Layer { + public: + explicit SPPLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "SPP"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + // MAX POOL layers can output an extra top blob for the mask; + // others can only output the pooled inputs. + virtual inline int MaxTopBlobs() const { + return (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_MAX) ? 2 : 1; + } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + // calculates the kernel and stride dimensions for the pooling layer, + // returns a correctly configured LayerParameter for a PoolingLayer + virtual LayerParameter GetPoolingParam(const int pyramid_level, + const int bottom_h, const int bottom_w, const SPPParameter spp_param); + + int pyramid_height_; + int bottom_h_, bottom_w_; + int channels_; + int kernel_h_, kernel_w_; + int pad_h_, pad_w_; + + /// the internal Split layer that feeds the pooling layers + shared_ptr > split_layer_; + /// top vector holder used in call to the underlying SplitLayer::Forward + vector*> split_top_vec_; + /// bottom vector holder used in call to the underlying PoolingLayer::Forward + vector*>*> pooling_bottom_vecs_; + /// the internal Pooling layers of different kernel sizes + vector > > pooling_layers_; + /// top vector holders used in call to the underlying PoolingLayer::Forward + vector*>*> pooling_top_vecs_; + /// pooling_outputs stores the outputs of the PoolingLayers + vector*> pooling_outputs_; + /// the internal Flatten layers that the Pooling layers feed into + vector*> flatten_layers_; + /// top vector holders used in call to the underlying FlattenLayer::Forward + vector*>*> flatten_top_vecs_; + /// flatten_outputs stores the outputs of the FlattenLayers + vector*> flatten_outputs_; + /// bottom vector holder used in call to the underlying ConcatLayer::Forward + vector*> concat_bottom_vec_; + /// the internal Concat layers that the Flatten layers feed into + shared_ptr > concat_layer_; +}; + } // namespace caffe #endif // CAFFE_VISION_LAYERS_HPP_ diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 6d2b3f502d9..94fdcc35fb6 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -26,6 +26,7 @@ void Blob::Reshape(const vector& shape) { shape_.resize(shape.size()); for (int i = 0; i < shape.size(); ++i) { CHECK_GE(shape[i], 0); + CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX"; count_ *= shape[i]; shape_[i] = shape[i]; } diff --git a/src/caffe/data_transformer.cpp b/src/caffe/data_transformer.cpp index b0b98e478c1..6f75bdb3852 100644 --- a/src/caffe/data_transformer.cpp +++ b/src/caffe/data_transformer.cpp @@ -125,10 +125,31 @@ void DataTransformer::Transform(const Datum& datum, template void DataTransformer::Transform(const Datum& datum, Blob* transformed_blob) { + // If datum is encoded, decoded and transform the cv::image. + if (datum.encoded()) { + CHECK(!param_.force_color() && !param_.force_gray()) + << "cannot set both force_color and force_gray"; + cv::Mat cv_img; + if (param_.force_color() || param_.force_gray()) { + // If force_color then decode in color otherwise decode in gray. + cv_img = DecodeDatumToCVMat(datum, param_.force_color()); + } else { + cv_img = DecodeDatumToCVMatNative(datum); + } + // Transform the cv::image into blob. + return Transform(cv_img, transformed_blob); + } else { + if (param_.force_color() || param_.force_gray()) { + LOG(ERROR) << "force_color and force_gray only for encoded datum"; + } + } + + const int crop_size = param_.crop_size(); const int datum_channels = datum.channels(); const int datum_height = datum.height(); const int datum_width = datum.width(); + // Check dimensions. const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); const int width = transformed_blob->width(); @@ -139,8 +160,6 @@ void DataTransformer::Transform(const Datum& datum, CHECK_LE(width, datum_width); CHECK_GE(num, 1); - const int crop_size = param_.crop_size(); - if (crop_size) { CHECK_EQ(crop_size, height); CHECK_EQ(crop_size, width); @@ -196,10 +215,12 @@ void DataTransformer::Transform(const vector & mat_vector, template void DataTransformer::Transform(const cv::Mat& cv_img, Blob* transformed_blob) { + const int crop_size = param_.crop_size(); const int img_channels = cv_img.channels(); const int img_height = cv_img.rows; const int img_width = cv_img.cols; + // Check dimensions. const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); const int width = transformed_blob->width(); @@ -212,7 +233,6 @@ void DataTransformer::Transform(const cv::Mat& cv_img, CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte"; - const int crop_size = param_.crop_size(); const Dtype scale = param_.scale(); const bool do_mirror = param_.mirror() && Rand(2); const bool has_mean_file = param_.has_mean_file(); @@ -297,11 +317,23 @@ void DataTransformer::Transform(const cv::Mat& cv_img, template void DataTransformer::Transform(Blob* input_blob, Blob* transformed_blob) { + const int crop_size = param_.crop_size(); const int input_num = input_blob->num(); const int input_channels = input_blob->channels(); const int input_height = input_blob->height(); const int input_width = input_blob->width(); + if (transformed_blob->count() == 0) { + // Initialize transformed_blob with the right shape. + if (crop_size) { + transformed_blob->Reshape(input_num, input_channels, + crop_size, crop_size); + } else { + transformed_blob->Reshape(input_num, input_channels, + input_height, input_width); + } + } + const int num = transformed_blob->num(); const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); @@ -313,7 +345,7 @@ void DataTransformer::Transform(Blob* input_blob, CHECK_GE(input_height, height); CHECK_GE(input_width, width); - const int crop_size = param_.crop_size(); + const Dtype scale = param_.scale(); const bool do_mirror = param_.mirror() && Rand(2); const bool has_mean_file = param_.has_mean_file(); @@ -395,6 +427,82 @@ void DataTransformer::Transform(Blob* input_blob, } } +template +vector DataTransformer::InferBlobShape(const Datum& datum) { + if (datum.encoded()) { + CHECK(!param_.force_color() && !param_.force_gray()) + << "cannot set both force_color and force_gray"; + cv::Mat cv_img; + if (param_.force_color() || param_.force_gray()) { + // If force_color then decode in color otherwise decode in gray. + cv_img = DecodeDatumToCVMat(datum, param_.force_color()); + } else { + cv_img = DecodeDatumToCVMatNative(datum); + } + // InferBlobShape using the cv::image. + return InferBlobShape(cv_img); + } + + const int crop_size = param_.crop_size(); + const int datum_channels = datum.channels(); + const int datum_height = datum.height(); + const int datum_width = datum.width(); + // Check dimensions. + CHECK_GT(datum_channels, 0); + CHECK_GE(datum_height, crop_size); + CHECK_GE(datum_width, crop_size); + // Build BlobShape. + vector shape(4); + shape[0] = 1; + shape[1] = datum_channels; + shape[2] = (crop_size)? crop_size: datum_height; + shape[3] = (crop_size)? crop_size: datum_width; + return shape; +} + +template +vector DataTransformer::InferBlobShape( + const vector & datum_vector) { + const int num = datum_vector.size(); + CHECK_GT(num, 0) << "There is no datum to in the vector"; + // Use first datum in the vector to InferBlobShape. + vector shape = InferBlobShape(datum_vector[0]); + // Adjust num to the size of the vector. + shape[0] = num; + return shape; +} + +template +vector DataTransformer::InferBlobShape(const cv::Mat& cv_img) { + const int crop_size = param_.crop_size(); + const int img_channels = cv_img.channels(); + const int img_height = cv_img.rows; + const int img_width = cv_img.cols; + // Check dimensions. + CHECK_GT(img_channels, 0); + CHECK_GE(img_height, crop_size); + CHECK_GE(img_width, crop_size); + // Build BlobShape. + vector shape(4); + shape[0] = 1; + shape[1] = img_channels; + shape[2] = (crop_size)? crop_size: img_height; + shape[3] = (crop_size)? crop_size: img_width; + return shape; +} + +template +vector DataTransformer::InferBlobShape( + const vector & mat_vector) { + const int num = mat_vector.size(); + CHECK_GT(num, 0) << "There is no cv_img to in the vector"; + // Use first cv_img in the vector to InferBlobShape. + vector shape = InferBlobShape(mat_vector[0]); + // Adjust num to the size of the vector. + shape[0] = num; + return shape; +} + template void DataTransformer::InitRand() { const bool needs_rand = param_.mirror() || diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index 352200915d7..26a1118282f 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -2,7 +2,6 @@ #include #include "caffe/data_layers.hpp" -#include "caffe/net.hpp" #include "caffe/util/io.hpp" namespace caffe { @@ -21,11 +20,11 @@ void BaseDataLayer::LayerSetUp(const vector*>& bottom, } else { output_labels_ = true; } - // The subclasses should setup the size of bottom and top - DataLayerSetUp(bottom, top); data_transformer_.reset( new DataTransformer(transform_param_, this->phase_)); data_transformer_->InitRand(); + // The subclasses should setup the size of bottom and top + DataLayerSetUp(bottom, top); } template @@ -63,13 +62,15 @@ void BasePrefetchingDataLayer::Forward_cpu( JoinPrefetchThread(); DLOG(INFO) << "Thread joined"; // Reshape to loaded data. - top[0]->Reshape(this->prefetch_data_.num(), this->prefetch_data_.channels(), - this->prefetch_data_.height(), this->prefetch_data_.width()); + top[0]->ReshapeLike(prefetch_data_); // Copy the data caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), top[0]->mutable_cpu_data()); DLOG(INFO) << "Prefetch copied"; if (this->output_labels_) { + // Reshape to loaded labels. + top[1]->ReshapeLike(prefetch_label_); + // Copy the labels. caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), top[1]->mutable_cpu_data()); } diff --git a/src/caffe/layers/base_data_layer.cu b/src/caffe/layers/base_data_layer.cu index 775f6c47f7e..9335a5bc9a9 100644 --- a/src/caffe/layers/base_data_layer.cu +++ b/src/caffe/layers/base_data_layer.cu @@ -10,12 +10,14 @@ void BasePrefetchingDataLayer::Forward_gpu( // First, join the thread JoinPrefetchThread(); // Reshape to loaded data. - top[0]->Reshape(this->prefetch_data_.num(), this->prefetch_data_.channels(), - this->prefetch_data_.height(), this->prefetch_data_.width()); + top[0]->ReshapeLike(this->prefetch_data_); // Copy the data caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { + // Reshape to loaded labels. + top[1]->ReshapeLike(prefetch_label_); + // Copy the labels. caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), top[1]->mutable_gpu_data()); } diff --git a/src/caffe/layers/concat_layer.cu b/src/caffe/layers/concat_layer.cu index dbadb5aeb30..8f2e85d8f52 100644 --- a/src/caffe/layers/concat_layer.cu +++ b/src/caffe/layers/concat_layer.cu @@ -6,21 +6,41 @@ namespace caffe { +template +__global__ void Concat(const int nthreads, const Dtype* in_data, + const bool forward, const int num_concats, const int concat_size, + const int top_concat_axis, const int bottom_concat_axis, + const int offset_concat_axis, Dtype* out_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int total_concat_size = concat_size * bottom_concat_axis; + const int concat_num = index / total_concat_size; + const int concat_index = index % total_concat_size; + const int top_index = concat_index + + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; + if (forward) { + out_data[top_index] = in_data[index]; + } else { + out_data[index] = in_data[top_index]; + } + } +} + template void ConcatLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { Dtype* top_data = top[0]->mutable_gpu_data(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); + const bool kForward = true; for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); - for (int n = 0; n < num_concats_; ++n) { - caffe_copy(bottom_concat_axis * concat_input_size_, - bottom_data + n * bottom_concat_axis * concat_input_size_, - top_data + (n * top_concat_axis + offset_concat_axis) - * concat_input_size_); - } + const int bottom_concat_size = bottom_concat_axis * concat_input_size_; + const int nthreads = bottom_concat_size * num_concats_; + Concat // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, bottom_data, kForward, num_concats_, concat_input_size_, + top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); offset_concat_axis += bottom_concat_axis; } } @@ -31,15 +51,17 @@ void ConcatLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); + const bool kForward = false; for (int i = 0; i < bottom.size(); ++i) { if (!propagate_down[i]) { continue; } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); - for (int n = 0; n < num_concats_; ++n) { - caffe_copy(bottom_concat_axis * concat_input_size_, top_diff + - (n * top_concat_axis + offset_concat_axis) * concat_input_size_, - bottom_diff + n * bottom_concat_axis * concat_input_size_); - } + const int bottom_concat_size = bottom_concat_axis * concat_input_size_; + const int nthreads = bottom_concat_size * num_concats_; + Concat // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, top_diff, kForward, num_concats_, concat_input_size_, + top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); offset_concat_axis += bottom_concat_axis; } } diff --git a/src/caffe/layers/contrastive_loss_layer.cpp b/src/caffe/layers/contrastive_loss_layer.cpp index 0692c11c257..25e167819d3 100644 --- a/src/caffe/layers/contrastive_loss_layer.cpp +++ b/src/caffe/layers/contrastive_loss_layer.cpp @@ -41,6 +41,8 @@ void ContrastiveLossLayer::Forward_cpu( diff_.mutable_cpu_data()); // a_i-b_i const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.contrastive_loss_param().margin(); + bool legacy_version = + this->layer_param_.contrastive_loss_param().legacy_version(); Dtype loss(0.0); for (int i = 0; i < bottom[0]->num(); ++i) { dist_sq_.mutable_cpu_data()[i] = caffe_cpu_dot(channels, @@ -48,7 +50,12 @@ void ContrastiveLossLayer::Forward_cpu( if (static_cast(bottom[2]->cpu_data()[i])) { // similar pairs loss += dist_sq_.cpu_data()[i]; } else { // dissimilar pairs - loss += std::max(margin-dist_sq_.cpu_data()[i], Dtype(0.0)); + if (legacy_version) { + loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)); + } else { + Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]), 0.0); + loss += dist*dist; + } } } loss = loss / static_cast(bottom[0]->num()) / Dtype(2); @@ -59,6 +66,8 @@ template void ContrastiveLossLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.contrastive_loss_param().margin(); + bool legacy_version = + this->layer_param_.contrastive_loss_param().legacy_version(); for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; @@ -76,10 +85,20 @@ void ContrastiveLossLayer::Backward_cpu(const vector*>& top, Dtype(0.0), bout + (j*channels)); } else { // dissimilar pairs - if ((margin-dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + Dtype mdist(0.0); + Dtype beta(0.0); + if (legacy_version) { + mdist = margin - dist_sq_.cpu_data()[j]; + beta = -alpha; + } else { + Dtype dist = sqrt(dist_sq_.cpu_data()[j]); + mdist = margin - dist; + beta = -alpha * mdist / (dist + Dtype(1e-4)); + } + if (mdist > Dtype(0.0)) { caffe_cpu_axpby( channels, - -alpha, + beta, diff_.cpu_data() + (j*channels), Dtype(0.0), bout + (j*channels)); diff --git a/src/caffe/layers/contrastive_loss_layer.cu b/src/caffe/layers/contrastive_loss_layer.cu index 78a55995a0a..931239316ac 100644 --- a/src/caffe/layers/contrastive_loss_layer.cu +++ b/src/caffe/layers/contrastive_loss_layer.cu @@ -32,12 +32,20 @@ void ContrastiveLossLayer::Forward_gpu( Dtype(0.0), dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2 Dtype margin = this->layer_param_.contrastive_loss_param().margin(); + bool legacy_version = + this->layer_param_.contrastive_loss_param().legacy_version(); Dtype loss(0.0); for (int i = 0; i < bottom[0]->num(); ++i) { if (static_cast(bottom[2]->cpu_data()[i])) { // similar pairs loss += dist_sq_.cpu_data()[i]; } else { // dissimilar pairs - loss += std::max(margin-dist_sq_.cpu_data()[i], Dtype(0.0)); + if (legacy_version) { + loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)); + } else { + Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]), + Dtype(0.0)); + loss += dist*dist; + } } } loss = loss / static_cast(bottom[0]->num()) / Dtype(2); @@ -45,8 +53,8 @@ void ContrastiveLossLayer::Forward_gpu( } template -__global__ void CLLForward(const int count, const int channels, - const Dtype margin, const Dtype alpha, +__global__ void CLLBackward(const int count, const int channels, + const Dtype margin, const bool legacy_version, const Dtype alpha, const Dtype* y, const Dtype* diff, const Dtype* dist_sq, Dtype *bottom_diff) { CUDA_KERNEL_LOOP(i, count) { @@ -54,8 +62,18 @@ __global__ void CLLForward(const int count, const int channels, if (static_cast(y[n])) { // similar pairs bottom_diff[i] = alpha * diff[i]; } else { // dissimilar pairs - if ((margin-dist_sq[n]) > 0.0) { - bottom_diff[i] = -alpha * diff[i]; + Dtype mdist(0.0); + Dtype beta(0.0); + if (legacy_version) { + mdist = (margin - dist_sq[n]); + beta = -alpha; + } else { + Dtype dist = sqrt(dist_sq[n]); + mdist = (margin - dist); + beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i]; + } + if (mdist > 0.0) { + bottom_diff[i] = beta; } else { bottom_diff[i] = 0; } @@ -71,12 +89,14 @@ void ContrastiveLossLayer::Backward_gpu(const vector*>& top, const int count = bottom[0]->count(); const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.contrastive_loss_param().margin(); + const bool legacy_version = + this->layer_param_.contrastive_loss_param().legacy_version(); const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast(bottom[0]->num()); // NOLINT_NEXT_LINE(whitespace/operators) - CLLForward<<>>( - count, channels, margin, alpha, + CLLBackward<<>>( + count, channels, margin, legacy_version, alpha, bottom[2]->gpu_data(), // pair similarity 0 or 1 diff_.gpu_data(), // the cached eltwise difference between a and b dist_sq_.gpu_data(), // the cached square distance between a and b diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp index c0c9f6f3371..928ef5ee468 100644 --- a/src/caffe/layers/conv_layer.cpp +++ b/src/caffe/layers/conv_layer.cpp @@ -39,13 +39,6 @@ void ConvolutionLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->cpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); - if (this->param_propagate_down_[0]) { - caffe_set(this->blobs_[0]->count(), Dtype(0), weight_diff); - } - if (this->bias_term_ && this->param_propagate_down_[1]) { - caffe_set(this->blobs_[1]->count(), Dtype(0), - this->blobs_[1]->mutable_cpu_diff()); - } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->cpu_diff(); const Dtype* bottom_data = bottom[i]->cpu_data(); diff --git a/src/caffe/layers/conv_layer.cu b/src/caffe/layers/conv_layer.cu index 3902fdf3930..b8a98ff7cc9 100644 --- a/src/caffe/layers/conv_layer.cu +++ b/src/caffe/layers/conv_layer.cu @@ -31,13 +31,6 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); - if (this->param_propagate_down_[0]) { - caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); - } - if (this->bias_term_ && this->param_propagate_down_[1]) { - caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), - this->blobs_[1]->mutable_gpu_diff()); - } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Bias gradient, if necessary. diff --git a/src/caffe/layers/cudnn_conv_layer.cu b/src/caffe/layers/cudnn_conv_layer.cu index 4a1a4c4f4f2..b4e802e13d1 100644 --- a/src/caffe/layers/cudnn_conv_layer.cu +++ b/src/caffe/layers/cudnn_conv_layer.cu @@ -101,12 +101,10 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); - caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); - caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp index 0f2d66776a9..161a75e0c8c 100644 --- a/src/caffe/layers/data_layer.cpp +++ b/src/caffe/layers/data_layer.cpp @@ -38,32 +38,17 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, cursor_->Next(); } } - // Read a data point, and use it to initialize the top blob. + // Read a data point, to initialize the prefetch and top blobs. Datum datum; datum.ParseFromString(cursor_->value()); + // Use data_transformer to infer the expected blob shape from datum. + vector top_shape = this->data_transformer_->InferBlobShape(datum); + this->transformed_data_.Reshape(top_shape); + // Reshape top[0] and prefetch_data according to the batch_size. + top_shape[0] = this->layer_param_.data_param().batch_size(); + this->prefetch_data_.Reshape(top_shape); + top[0]->ReshapeLike(this->prefetch_data_); - bool force_color = this->layer_param_.data_param().force_encoded_color(); - if ((force_color && DecodeDatum(&datum, true)) || - DecodeDatumNative(&datum)) { - LOG(INFO) << "Decoding Datum"; - } - // image - int crop_size = this->layer_param_.transform_param().crop_size(); - if (crop_size > 0) { - top[0]->Reshape(this->layer_param_.data_param().batch_size(), - datum.channels(), crop_size, crop_size); - this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), - datum.channels(), crop_size, crop_size); - this->transformed_data_.Reshape(1, datum.channels(), crop_size, crop_size); - } else { - top[0]->Reshape( - this->layer_param_.data_param().batch_size(), datum.channels(), - datum.height(), datum.width()); - this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), - datum.channels(), datum.height(), datum.width()); - this->transformed_data_.Reshape(1, datum.channels(), - datum.height(), datum.width()); - } LOG(INFO) << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); @@ -86,25 +71,17 @@ void DataLayer::InternalThreadEntry() { CHECK(this->prefetch_data_.count()); CHECK(this->transformed_data_.count()); - // Reshape on single input batches for inputs of varying dimension. + // Reshape according to the first datum of each batch + // on single input batches allows for inputs of varying dimension. const int batch_size = this->layer_param_.data_param().batch_size(); - const int crop_size = this->layer_param_.transform_param().crop_size(); - bool force_color = this->layer_param_.data_param().force_encoded_color(); - if (batch_size == 1 && crop_size == 0) { - Datum datum; - datum.ParseFromString(cursor_->value()); - if (datum.encoded()) { - if (force_color) { - DecodeDatum(&datum, true); - } else { - DecodeDatumNative(&datum); - } - } - this->prefetch_data_.Reshape(1, datum.channels(), - datum.height(), datum.width()); - this->transformed_data_.Reshape(1, datum.channels(), - datum.height(), datum.width()); - } + Datum datum; + datum.ParseFromString(cursor_->value()); + // Use data_transformer to infer the expected blob shape from datum. + vector top_shape = this->data_transformer_->InferBlobShape(datum); + this->transformed_data_.Reshape(top_shape); + // Reshape prefetch_data according to the batch_size. + top_shape[0] = batch_size; + this->prefetch_data_.Reshape(top_shape); Dtype* top_data = this->prefetch_data_.mutable_cpu_data(); Dtype* top_label = NULL; // suppress warnings about uninitialized variables @@ -112,48 +89,31 @@ void DataLayer::InternalThreadEntry() { if (this->output_labels_) { top_label = this->prefetch_label_.mutable_cpu_data(); } + timer.Start(); for (int item_id = 0; item_id < batch_size; ++item_id) { - timer.Start(); - // get a blob + // get a datum Datum datum; datum.ParseFromString(cursor_->value()); - - cv::Mat cv_img; - if (datum.encoded()) { - if (force_color) { - cv_img = DecodeDatumToCVMat(datum, true); - } else { - cv_img = DecodeDatumToCVMatNative(datum); - } - if (cv_img.channels() != this->transformed_data_.channels()) { - LOG(WARNING) << "Your dataset contains encoded images with mixed " - << "channel sizes. Consider adding a 'force_color' flag to the " - << "model definition, or rebuild your dataset using " - << "convert_imageset."; - } - } read_time += timer.MicroSeconds(); timer.Start(); - // Apply data transformations (mirror, scale, crop...) int offset = this->prefetch_data_.offset(item_id); this->transformed_data_.set_cpu_data(top_data + offset); - if (datum.encoded()) { - this->data_transformer_->Transform(cv_img, &(this->transformed_data_)); - } else { - this->data_transformer_->Transform(datum, &(this->transformed_data_)); - } + this->data_transformer_->Transform(datum, &(this->transformed_data_)); + // Copy label. if (this->output_labels_) { top_label[item_id] = datum.label(); } trans_time += timer.MicroSeconds(); - // go to the next iter + timer.Start(); + // go to the next item. cursor_->Next(); if (!cursor_->valid()) { DLOG(INFO) << "Restarting data prefetching from start."; cursor_->SeekToFirst(); } } + timer.Stop(); batch_timer.Stop(); DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; diff --git a/src/caffe/layers/deconv_layer.cpp b/src/caffe/layers/deconv_layer.cpp index e6d65ab526b..a4612963b6b 100644 --- a/src/caffe/layers/deconv_layer.cpp +++ b/src/caffe/layers/deconv_layer.cpp @@ -39,13 +39,6 @@ void DeconvolutionLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->cpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); - if (this->param_propagate_down_[0]) { - caffe_set(this->blobs_[0]->count(), Dtype(0), weight_diff); - } - if (this->bias_term_ && this->param_propagate_down_[1]) { - caffe_set(this->blobs_[1]->count(), Dtype(0), - this->blobs_[1]->mutable_cpu_diff()); - } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->cpu_diff(); const Dtype* bottom_data = bottom[i]->cpu_data(); diff --git a/src/caffe/layers/deconv_layer.cu b/src/caffe/layers/deconv_layer.cu index 9198dd64c72..39bc4de8c66 100644 --- a/src/caffe/layers/deconv_layer.cu +++ b/src/caffe/layers/deconv_layer.cu @@ -31,13 +31,6 @@ void DeconvolutionLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); - if (this->param_propagate_down_[0]) { - caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); - } - if (this->bias_term_ && this->param_propagate_down_[1]) { - caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), - this->blobs_[1]->mutable_gpu_diff()); - } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); const Dtype* bottom_data = bottom[i]->gpu_data(); diff --git a/src/caffe/layers/flatten_layer.cpp b/src/caffe/layers/flatten_layer.cpp index 745f271ea45..f7e5c9c2172 100644 --- a/src/caffe/layers/flatten_layer.cpp +++ b/src/caffe/layers/flatten_layer.cpp @@ -9,9 +9,19 @@ namespace caffe { template void FlattenLayer::Reshape(const vector*>& bottom, const vector*>& top) { - vector top_shape(2); - top_shape[0] = bottom[0]->num(); - top_shape[1] = bottom[0]->count() / bottom[0]->num(); + const int start_axis = bottom[0]->CanonicalAxisIndex( + this->layer_param_.flatten_param().axis()); + const int end_axis = bottom[0]->CanonicalAxisIndex( + this->layer_param_.flatten_param().end_axis()); + vector top_shape; + for (int i = 0; i < start_axis; ++i) { + top_shape.push_back(bottom[0]->shape(i)); + } + const int flattened_dim = bottom[0]->count(start_axis, end_axis + 1); + top_shape.push_back(flattened_dim); + for (int i = end_axis + 1; i < bottom[0]->num_axes(); ++i) { + top_shape.push_back(bottom[0]->shape(i)); + } top[0]->Reshape(top_shape); CHECK_EQ(top[0]->count(), bottom[0]->count()); } diff --git a/src/caffe/layers/image_data_layer.cpp b/src/caffe/layers/image_data_layer.cpp index 38ebbd5ec14..18c035cba9d 100644 --- a/src/caffe/layers/image_data_layer.cpp +++ b/src/caffe/layers/image_data_layer.cpp @@ -62,21 +62,15 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, // Read an image, and use it to initialize the top blob. cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, new_height, new_width, is_color); - const int channels = cv_img.channels(); - const int height = cv_img.rows; - const int width = cv_img.cols; - // image - const int crop_size = this->layer_param_.transform_param().crop_size(); + // Use data_transformer to infer the expected blob shape from a cv_image. + vector top_shape = this->data_transformer_->InferBlobShape(cv_img); + this->transformed_data_.Reshape(top_shape); + // Reshape prefetch_data and top[0] according to the batch_size. const int batch_size = this->layer_param_.image_data_param().batch_size(); - if (crop_size > 0) { - top[0]->Reshape(batch_size, channels, crop_size, crop_size); - this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size); - this->transformed_data_.Reshape(1, channels, crop_size, crop_size); - } else { - top[0]->Reshape(batch_size, channels, height, width); - this->prefetch_data_.Reshape(batch_size, channels, height, width); - this->transformed_data_.Reshape(1, channels, height, width); - } + top_shape[0] = batch_size; + this->prefetch_data_.Reshape(top_shape); + top[0]->ReshapeLike(this->prefetch_data_); + LOG(INFO) << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); @@ -107,19 +101,19 @@ void ImageDataLayer::InternalThreadEntry() { const int batch_size = image_data_param.batch_size(); const int new_height = image_data_param.new_height(); const int new_width = image_data_param.new_width(); - const int crop_size = this->layer_param_.transform_param().crop_size(); const bool is_color = image_data_param.is_color(); string root_folder = image_data_param.root_folder(); - // Reshape on single input batches for inputs of varying dimension. - if (batch_size == 1 && crop_size == 0 && new_height == 0 && new_width == 0) { - cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, - 0, 0, is_color); - this->prefetch_data_.Reshape(1, cv_img.channels(), - cv_img.rows, cv_img.cols); - this->transformed_data_.Reshape(1, cv_img.channels(), - cv_img.rows, cv_img.cols); - } + // Reshape according to the first image of each batch + // on single input batches allows for inputs of varying dimension. + cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, + new_height, new_width, is_color); + // Use data_transformer to infer the expected blob shape from a cv_img. + vector top_shape = this->data_transformer_->InferBlobShape(cv_img); + this->transformed_data_.Reshape(top_shape); + // Reshape prefetch_data according to the batch_size. + top_shape[0] = batch_size; + this->prefetch_data_.Reshape(top_shape); Dtype* prefetch_data = this->prefetch_data_.mutable_cpu_data(); Dtype* prefetch_label = this->prefetch_label_.mutable_cpu_data(); diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp index 89e0c8fbad7..83c3235eb71 100644 --- a/src/caffe/layers/inner_product_layer.cpp +++ b/src/caffe/layers/inner_product_layer.cpp @@ -101,13 +101,13 @@ void InnerProductLayer::Backward_cpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->cpu_data(); // Gradient with respect to weight caffe_cpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., - top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_cpu_diff()); + top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_cpu_diff()); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->cpu_diff(); // Gradient with respect to bias caffe_cpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, - bias_multiplier_.cpu_data(), (Dtype)0., + bias_multiplier_.cpu_data(), (Dtype)1., this->blobs_[1]->mutable_cpu_diff()); } if (propagate_down[0]) { diff --git a/src/caffe/layers/inner_product_layer.cu b/src/caffe/layers/inner_product_layer.cu index a9e1784a205..dd90cac12a8 100644 --- a/src/caffe/layers/inner_product_layer.cu +++ b/src/caffe/layers/inner_product_layer.cu @@ -33,13 +33,13 @@ void InnerProductLayer::Backward_gpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight caffe_gpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., - top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_gpu_diff()); + top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_gpu_diff()); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias caffe_gpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, - bias_multiplier_.gpu_data(), (Dtype)0., + bias_multiplier_.gpu_data(), (Dtype)1., this->blobs_[1]->mutable_gpu_diff()); } if (propagate_down[0]) { diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu index 24aa6a30130..001b3c34ac1 100644 --- a/src/caffe/layers/lrn_layer.cu +++ b/src/caffe/layers/lrn_layer.cu @@ -7,44 +7,46 @@ namespace caffe { template -__global__ void LRNFillScale(const int nthreads, const Dtype* in, +__global__ void LRNFillScale(const int nthreads, const Dtype* const in, const int num, const int channels, const int height, const int width, const int size, const Dtype alpha_over_size, - const Dtype k, Dtype* scale) { + const Dtype k, Dtype* const scale) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset - int w = index % width; - int h = (index / width) % height; - int n = index / width / height; - int offset = (n * channels * height + h) * width + w; - int step = height * width; - in += offset; - scale += offset; + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + const Dtype* const in_off = in + offset; + Dtype* const scale_off = scale + offset; int head = 0; - int pre_pad = (size - 1) / 2; - int post_pad = size - pre_pad - 1; + const int pre_pad = (size - 1) / 2; + const int post_pad = size - pre_pad - 1; Dtype accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad && head < channels) { - accum_scale += in[head * step] * in[head * step]; + accum_scale += in_off[head * step] * in_off[head * step]; ++head; } // both add and subtract while (head < channels) { - accum_scale += in[head * step] * in[head * step]; + accum_scale += in_off[head * step] * in_off[head * step]; if (head - size >= 0) { - accum_scale -= in[(head - size) * step] * in[(head - size) * step]; + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; } - scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { - accum_scale -= in[(head - size) * step] * in[(head - size) * step]; + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; } - scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } } @@ -68,8 +70,8 @@ void LRNLayer::Forward_gpu(const vector*>& bottom, // TODO: check if it would be faster to just put it into the previous kernel. template -__global__ void LRNComputeOutput(const int nthreads, const Dtype* in, - const Dtype* scale, const Dtype negative_beta, Dtype* out) { +__global__ void LRNComputeOutput(const int nthreads, const Dtype* const in, + const Dtype* const scale, const Dtype negative_beta, Dtype* const out) { CUDA_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } @@ -118,56 +120,58 @@ void LRNLayer::Backward_gpu(const vector*>& top, } template -__global__ void LRNComputeDiff(const int nthreads, const Dtype* bottom_data, - const Dtype* top_data, const Dtype* scale, const Dtype* top_diff, +__global__ void LRNComputeDiff(const int nthreads, + const Dtype* const bottom_data, const Dtype* const top_data, + const Dtype* const scale, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int size, const Dtype negative_beta, - const Dtype cache_ratio, - Dtype* bottom_diff) { + const Dtype cache_ratio, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset - int w = index % width; - int h = (index / width) % height; - int n = index / width / height; - int offset = (n * channels * height + h) * width + w; - int step = height * width; - bottom_data += offset; - top_data += offset; - scale += offset; - top_diff += offset; - bottom_diff += offset; + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + const Dtype* const bottom_off = bottom_data + offset; + const Dtype* const top_off = top_data + offset; + const Dtype* const scale_off = scale + offset; + const Dtype* const top_diff_off = top_diff + offset; + Dtype* const bottom_diff_off = bottom_diff + offset; int head = 0; - int pre_pad = size - (size + 1) / 2; - int post_pad = size - pre_pad - 1; + const int pre_pad = size - (size + 1) / 2; + const int post_pad = size - pre_pad - 1; Dtype accum_ratio = 0; // accumulate values while (head < post_pad && head < channels) { - accum_ratio += top_diff[head * step] * top_data[head * step] / - scale[head * step]; + accum_ratio += top_diff_off[head * step] * top_off[head * step] / + scale_off[head * step]; ++head; } // both add and subtract while (head < channels) { - accum_ratio += top_diff[head * step] * top_data[head * step] / - scale[head * step]; + accum_ratio += top_diff_off[head * step] * top_off[head * step] / + scale_off[head * step]; if (head - size >= 0) { - accum_ratio -= top_diff[(head - size) * step] * - top_data[(head - size) * step] / scale[(head - size) * step]; + accum_ratio -= top_diff_off[(head - size) * step] * + top_off[(head - size) * step] / scale_off[(head - size) * step]; } - bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] - * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * - bottom_data[(head - post_pad) * step] * accum_ratio; + bottom_diff_off[(head - post_pad) * step] = + top_diff_off[(head - post_pad) * step] + * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { - accum_ratio -= top_diff[(head - size) * step] * - top_data[(head - size) * step] / scale[(head - size) * step]; + accum_ratio -= top_diff_off[(head - size) * step] * + top_off[(head - size) * step] / scale_off[(head - size) * step]; } - bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] - * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * - bottom_data[(head - post_pad) * step] * accum_ratio; + bottom_diff_off[(head - post_pad) * step] = + top_diff_off[(head - post_pad) * step] + * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } } diff --git a/src/caffe/layers/mvn_layer.cpp b/src/caffe/layers/mvn_layer.cpp index b74d7b4f300..3e79bddcdde 100644 --- a/src/caffe/layers/mvn_layer.cpp +++ b/src/caffe/layers/mvn_layer.cpp @@ -22,6 +22,7 @@ void MVNLayer::Reshape(const vector*>& bottom, bottom[0]->height(), bottom[0]->width()); Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data(); caffe_set(sum_multiplier_.count(), Dtype(1), multiplier_data); + eps_ = this->layer_param_.mvn_param().eps(); } template @@ -36,7 +37,6 @@ void MVNLayer::Forward_cpu(const vector*>& bottom, num = bottom[0]->num() * bottom[0]->channels(); int dim = bottom[0]->count() / num; - Dtype eps = 1e-10; if (this->layer_param_.mvn_param().normalize_variance()) { // put the squares of bottom into temp_ @@ -66,7 +66,7 @@ void MVNLayer::Forward_cpu(const vector*>& bottom, caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5), variance_.mutable_cpu_data()); - caffe_add_scalar(variance_.count(), eps, variance_.mutable_cpu_data()); + caffe_add_scalar(variance_.count(), eps_, variance_.mutable_cpu_data()); caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., @@ -102,7 +102,6 @@ void MVNLayer::Backward_cpu(const vector*>& top, num = bottom[0]->num() * bottom[0]->channels(); int dim = bottom[0]->count() / num; - Dtype eps = 1e-10; if (this->layer_param_.mvn_param().normalize_variance()) { caffe_mul(temp_.count(), top_data, top_diff, bottom_diff); @@ -125,24 +124,6 @@ void MVNLayer::Backward_cpu(const vector*>& top, // put the squares of bottom into temp_ caffe_powx(temp_.count(), bottom_data, Dtype(2), temp_.mutable_cpu_data()); - - // computes variance using var(X) = E(X^2) - (EX)^2 - caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, - sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); // EX - caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, temp_.cpu_data(), - sum_multiplier_.cpu_data(), 0., - variance_.mutable_cpu_data()); // E(X^2) - caffe_powx(mean_.count(), mean_.cpu_data(), Dtype(2), - temp_.mutable_cpu_data()); // (EX)^2 - caffe_sub(mean_.count(), variance_.cpu_data(), temp_.cpu_data(), - variance_.mutable_cpu_data()); // variance - - // normalize variance - caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5), - variance_.mutable_cpu_data()); - - caffe_add_scalar(variance_.count(), eps, variance_.mutable_cpu_data()); - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., temp_.mutable_cpu_data()); diff --git a/src/caffe/layers/mvn_layer.cu b/src/caffe/layers/mvn_layer.cu index 0667f50380f..3888a0c7106 100644 --- a/src/caffe/layers/mvn_layer.cu +++ b/src/caffe/layers/mvn_layer.cu @@ -36,8 +36,6 @@ void MVNLayer::Forward_gpu(const vector*>& bottom, caffe_gpu_sub(mean_.count(), variance_.gpu_data(), temp_.gpu_data(), variance_.mutable_gpu_data()); // variance - Dtype eps = 1e-10; - // do mean and variance normalization // subtract mean caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., @@ -50,7 +48,7 @@ void MVNLayer::Forward_gpu(const vector*>& bottom, caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5), variance_.mutable_gpu_data()); - caffe_gpu_add_scalar(variance_.count(), eps, variance_.mutable_gpu_data()); + caffe_gpu_add_scalar(variance_.count(), eps_, variance_.mutable_gpu_data()); caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.gpu_data(), sum_multiplier_.gpu_data(), 0., @@ -87,8 +85,6 @@ void MVNLayer::Backward_gpu(const vector*>& top, int dim = bottom[0]->count() / num; - Dtype eps = 1e-10; - if (this->layer_param_.mvn_param().normalize_variance()) { caffe_gpu_mul(temp_.count(), top_data, top_diff, bottom_diff); caffe_gpu_gemv(CblasNoTrans, num, dim, 1., bottom_diff, @@ -111,23 +107,6 @@ void MVNLayer::Backward_gpu(const vector*>& top, caffe_gpu_powx(temp_.count(), bottom_data, Dtype(2), temp_.mutable_gpu_data()); - // computes variance using var(X) = E(X^2) - (EX)^2 - caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, - sum_multiplier_.gpu_data(), 0., mean_.mutable_gpu_data()); // EX - caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, temp_.gpu_data(), - sum_multiplier_.gpu_data(), 0., - variance_.mutable_gpu_data()); // E(X^2) - caffe_gpu_powx(mean_.count(), mean_.gpu_data(), Dtype(2), - temp_.mutable_gpu_data()); // (EX)^2 - caffe_gpu_sub(mean_.count(), variance_.gpu_data(), temp_.gpu_data(), - variance_.mutable_gpu_data()); // variance - - // normalize variance - caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5), - variance_.mutable_gpu_data()); - - caffe_gpu_add_scalar(variance_.count(), eps, variance_.mutable_gpu_data()); - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.gpu_data(), sum_multiplier_.gpu_data(), 0., temp_.mutable_gpu_data()); diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index d1d48501af3..ca4b13f7c41 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -9,31 +9,32 @@ namespace caffe { template -__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, - const int num, const int channels, const int height, - const int width, const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, const int pad_h, const int pad_w, Dtype* top_data, - int* mask, Dtype* top_mask) { +__global__ void MaxPoolForward(const int nthreads, + const Dtype* const bottom_data, const int num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, const int pad_h, const int pad_w, + Dtype* const top_data, int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; - int hend = min(hstart + kernel_h, height); - int wend = min(wstart + kernel_w, width); + const int hend = min(hstart + kernel_h, height); + const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; - bottom_data += (n * channels + c) * height * width; + const Dtype* const bottom_slice = + bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - if (bottom_data[h * width + w] > maxval) { + if (bottom_slice[h * width + w] > maxval) { maxidx = h * width + w; - maxval = bottom_data[maxidx]; + maxval = bottom_slice[maxidx]; } } } @@ -47,30 +48,32 @@ __global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, } template -__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, - const int num, const int channels, const int height, - const int width, const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { +__global__ void AvePoolForward(const int nthreads, + const Dtype* const bottom_data, const int num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, const int pad_h, const int pad_w, + Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); - int pool_size = (hend - hstart) * (wend - wstart); + const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; - bottom_data += (n * channels + c) * height * width; + const Dtype* const bottom_slice = + bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - aveval += bottom_data[h * width + w]; + aveval += bottom_slice[h * width + w]; } } top_data[index] = aveval / pool_size; @@ -79,37 +82,38 @@ __global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, template __global__ void StoPoolForwardTrain(const int nthreads, - const Dtype* bottom_data, + const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* rand_idx, Dtype* top_data) { + const int stride_w, Dtype* const rand_idx, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h; - int hend = min(hstart + kernel_h, height); - int wstart = pw * stride_w; - int wend = min(wstart + kernel_w, width); + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); Dtype cumsum = 0.; - bottom_data += (n * channels + c) * height * width; + const Dtype* const bottom_slice = + bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - cumsum += bottom_data[h * width + w]; + cumsum += bottom_slice[h * width + w]; } } - float thres = rand_idx[index] * cumsum; + const float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - cumsum += bottom_data[h * width + w]; + cumsum += bottom_slice[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; - top_data[index] = bottom_data[h * width + w]; + top_data[index] = bottom_slice[h * width + w]; return; } } @@ -120,29 +124,30 @@ __global__ void StoPoolForwardTrain(const int nthreads, template __global__ void StoPoolForwardTest(const int nthreads, - const Dtype* bottom_data, + const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* top_data) { + const int stride_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h; - int hend = min(hstart + kernel_h, height); - int wstart = pw * stride_w; - int wend = min(wstart + kernel_w, width); + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = FLT_MIN; Dtype cumvalues = 0.; - bottom_data += (n * channels + c) * height * width; + const Dtype* const bottom_slice = + bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - cumsum += bottom_data[h * width + w]; - cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w]; + cumsum += bottom_slice[h * width + w]; + cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; } } top_data[index] = cumvalues / cumsum; @@ -210,43 +215,43 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, template -__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, - const int* mask, const Dtype* top_mask, const int num, const int channels, - const int height, const int width, const int pooled_height, - const int pooled_width, const int kernel_h, const int kernel_w, - const int stride_h, const int stride_w, const int pad_h, const int pad_w, - Dtype* bottom_diff) { +__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, + const int* const mask, const Dtype* const top_mask, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - int w = index % width; - int h = (index / width) % height; - int c = (index / width / height) % channels; - int n = index / width / height / channels; - int phstart = - (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; - int phend = min((h + pad_h) / stride_h + 1, pooled_height); - int pwstart = - (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; - int pwend = min((w + pad_w) / stride_w + 1, pooled_width); + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = + (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; + const int phend = min((h + pad_h) / stride_h + 1, pooled_height); + const int pwstart = + (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; + const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; - int offset = (n * channels + c) * pooled_height * pooled_width; - top_diff += offset; + const int offset = (n * channels + c) * pooled_height * pooled_width; + const Dtype* const top_diff_slice = top_diff + offset; if (mask) { - mask += offset; + const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { - if (mask[ph * pooled_width + pw] == h * width + w) { - gradient += top_diff[ph * pooled_width + pw]; + if (mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; } } } } else { - top_mask += offset; + const Dtype* const top_mask_slice = top_mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { - if (top_mask[ph * pooled_width + pw] == h * width + w) { - gradient += top_diff[ph * pooled_width + pw]; + if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; } } } @@ -256,25 +261,26 @@ __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, } template -__global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, +__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, - Dtype* bottom_diff) { + Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - int w = index % width + pad_w; - int h = (index / width) % height + pad_h; - int c = (index / width / height) % channels; - int n = index / width / height / channels; - int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; - int phend = min(h / stride_h + 1, pooled_height); - int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; - int pwend = min(w / stride_w + 1, pooled_width); + const int w = index % width + pad_w; + const int h = (index / width) % height + pad_h; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; - top_diff += (n * channels + c) * pooled_height * pooled_width; + const Dtype* const top_diff_slice = + top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size @@ -283,7 +289,7 @@ __global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); - gradient += top_diff[ph * pooled_width + pw] / pool_size; + gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; @@ -293,29 +299,31 @@ __global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, template __global__ void StoPoolBackward(const int nthreads, - const Dtype* rand_idx, const Dtype* top_diff, + const Dtype* const rand_idx, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* bottom_diff) { + const int stride_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - int w = index % width; - int h = (index / width) % height; - int c = (index / width / height) % channels; - int n = index / width / height / channels; - int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; - int phend = min(h / stride_h + 1, pooled_height); - int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; - int pwend = min(w / stride_w + 1, pooled_width); + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; - rand_idx += (n * channels + c) * pooled_height * pooled_width; - top_diff += (n * channels + c) * pooled_height * pooled_width; + const Dtype* const rand_idx_slice = + rand_idx + (n * channels + c) * pooled_height * pooled_width; + const Dtype* const top_diff_slice = + top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { - gradient += top_diff[ph * pooled_width + pw] * - (index == static_cast(rand_idx[ph * pooled_width + pw])); + gradient += top_diff_slice[ph * pooled_width + pw] * + (index == static_cast(rand_idx_slice[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; diff --git a/src/caffe/layers/prelu_layer.cpp b/src/caffe/layers/prelu_layer.cpp index 7119a274dd3..81831755512 100644 --- a/src/caffe/layers/prelu_layer.cpp +++ b/src/caffe/layers/prelu_layer.cpp @@ -45,7 +45,8 @@ void PReLULayer::LayerSetUp(const vector*>& bottom, // Propagate gradients to the parameters (as directed by backward pass). this->param_propagate_down_.resize(this->blobs_.size(), true); - multiplier_.Reshape(vector(1, bottom[0]->count() / bottom[0]->num())); + multiplier_.Reshape(vector(1, bottom[0]->count(1))); + backward_buff_.Reshape(vector(1, bottom[0]->count(1))); caffe_set(multiplier_.count(), Dtype(1), multiplier_.mutable_cpu_data()); } @@ -112,7 +113,6 @@ void PReLULayer::Backward_cpu(const vector*>& top, // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_cpu_diff(); - caffe_set(this->blobs_[0]->count(), Dtype(0), slope_diff); for (int i = 0; i < count; ++i) { int c = (i / dim) % channels / div_factor; slope_diff[c] += top_diff[i] * bottom_data[i] * (bottom_data[i] <= 0); diff --git a/src/caffe/layers/prelu_layer.cu b/src/caffe/layers/prelu_layer.cu index fd0eda5d191..e1f20048f60 100644 --- a/src/caffe/layers/prelu_layer.cu +++ b/src/caffe/layers/prelu_layer.cu @@ -75,38 +75,36 @@ void PReLULayer::Backward_gpu(const vector*>& top, bottom_data = bottom_memory_.gpu_data(); } - // Propagte to param + // Propagate to param // Since to write bottom diff will affect top diff if top and bottom blobs // are identical (in-place computaion), we first compute param backward to // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); - // slope_diff is set as 0, then accumulated over batches - caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), slope_diff); int cdim = channels * dim; Dtype dsum = 0.; for (int n = 0; n < bottom[0]->num(); ++n) { - Dtype* temp_buff = multiplier_.mutable_gpu_diff(); // compute element-wise diff // NOLINT_NEXT_LINE(whitespace/operators) - PReLUParamBackward<<<<>>( cdim, top_diff + top[0]->offset(n), - bottom_data + bottom[0]->offset(n), multiplier_.mutable_gpu_diff()); + bottom_data + bottom[0]->offset(n), + backward_buff_.mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; if (channel_shared_) { Dtype d; - caffe_gpu_dot(channels * dim, multiplier_.gpu_diff(), + caffe_gpu_dot(channels * dim, backward_buff_.gpu_diff(), multiplier_.gpu_data(), &d); dsum += d; } else { caffe_gpu_gemv(CblasNoTrans, channels, dim, 1., - multiplier_.gpu_diff(), multiplier_.gpu_data(), 1., + backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1., slope_diff); } } if (channel_shared_) { - caffe_gpu_set(this->blobs_[0]->count(), Dtype(dsum), slope_diff); + caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff); } } // Propagate to bottom diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp index 077d949981c..cc236fe1e8e 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp @@ -71,7 +71,7 @@ void SigmoidCrossEntropyLossLayer::Backward_cpu( } #ifdef CPU_ONLY -STUB_GPU(SigmoidCrossEntropyLossLayer); +STUB_GPU_BACKWARD(SigmoidCrossEntropyLossLayer, Backward); #endif INSTANTIATE_CLASS(SigmoidCrossEntropyLossLayer); diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu index 08f7f492297..547fa80c72f 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu @@ -8,26 +8,6 @@ namespace caffe { -template -void SigmoidCrossEntropyLossLayer::Forward_gpu( - const vector*>& bottom, const vector*>& top) { - // The forward pass computes the sigmoid outputs. - sigmoid_bottom_vec_[0] = bottom[0]; - sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_); - // Compute the loss (negative log likelihood) - const int count = bottom[0]->count(); - const int num = bottom[0]->num(); - // Stable version of loss computation from input data - const Dtype* input_data = bottom[0]->cpu_data(); - const Dtype* target = bottom[1]->cpu_data(); - Dtype loss = 0; - for (int i = 0; i < count; ++i) { - loss -= input_data[i] * (target[i] - (input_data[i] >= 0)) - - log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); - } - top[0]->mutable_cpu_data()[0] = loss / num; -} - template void SigmoidCrossEntropyLossLayer::Backward_gpu( const vector*>& top, const vector& propagate_down, @@ -51,7 +31,7 @@ void SigmoidCrossEntropyLossLayer::Backward_gpu( } } -INSTANTIATE_LAYER_GPU_FUNCS(SigmoidCrossEntropyLossLayer); +INSTANTIATE_LAYER_GPU_BACKWARD(SigmoidCrossEntropyLossLayer); } // namespace caffe diff --git a/src/caffe/layers/slice_layer.cu b/src/caffe/layers/slice_layer.cu index e6e65677bd8..796841d3f52 100644 --- a/src/caffe/layers/slice_layer.cu +++ b/src/caffe/layers/slice_layer.cu @@ -6,22 +6,41 @@ namespace caffe { +template +__global__ void Slice(const int nthreads, const Dtype* in_data, + const bool forward, const int num_slices, const int slice_size, + const int bottom_slice_axis, const int top_slice_axis, + const int offset_slice_axis, Dtype* out_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int total_slice_size = slice_size * top_slice_axis; + const int slice_num = index / total_slice_size; + const int slice_index = index % total_slice_size; + const int bottom_index = slice_index + + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; + if (forward) { + out_data[index] = in_data[bottom_index]; + } else { + out_data[bottom_index] = in_data[index]; + } + } +} + template void SliceLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { int offset_slice_axis = 0; const Dtype* bottom_data = bottom[0]->gpu_data(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); + const bool kForward = true; for (int i = 0; i < top.size(); ++i) { Dtype* top_data = top[i]->mutable_gpu_data(); const int top_slice_axis = top[i]->shape(slice_axis_); - for (int n = 0; n < num_slices_; ++n) { - const int top_offset = n * top_slice_axis * slice_size_; - const int bottom_offset = - (n * bottom_slice_axis + offset_slice_axis) * slice_size_; - caffe_copy(top_slice_axis * slice_size_, - bottom_data + bottom_offset, top_data + top_offset); - } + const int top_slice_size = top_slice_axis * slice_size_; + const int nthreads = top_slice_size * num_slices_; + Slice // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, bottom_data, kForward, num_slices_, slice_size_, + bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data); offset_slice_axis += top_slice_axis; } } @@ -33,16 +52,16 @@ void SliceLayer::Backward_gpu(const vector*>& top, int offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); + const bool kForward = false; for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); const int top_slice_axis = top[i]->shape(slice_axis_); - for (int n = 0; n < num_slices_; ++n) { - const int top_offset = n * top_slice_axis * slice_size_; - const int bottom_offset = - (n * bottom_slice_axis + offset_slice_axis) * slice_size_; - caffe_copy(top_slice_axis * slice_size_, - top_diff + top_offset, bottom_diff + bottom_offset); - } + const int top_slice_size = top_slice_axis * slice_size_; + const int nthreads = top_slice_size * num_slices_; + Slice // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, top_diff, kForward, num_slices_, slice_size_, + bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff); offset_slice_axis += top_slice_axis; } } diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index 888eec1d501..a18ee63818e 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -79,10 +79,17 @@ void Net::Init(const NetParameter& in_param) { } // Setup layer. const LayerParameter& layer_param = param.layer(layer_id); + if (layer_param.propagate_down_size() > 0) { + CHECK_EQ(layer_param.propagate_down_size(), + layer_param.bottom_size()) + << "propagate_down param must be specified " + << "either 0 or bottom_size times "; + } layers_.push_back(LayerRegistry::CreateLayer(layer_param)); layer_names_.push_back(layer_param.name()); LOG(INFO) << "Creating Layer " << layer_param.name(); bool need_backward = false; + // Figure out this layer's input and output for (int bottom_id = 0; bottom_id < layer_param.bottom_size(); ++bottom_id) { @@ -151,15 +158,33 @@ void Net::Init(const NetParameter& in_param) { // Go through the net backwards to determine which blobs contribute to the // loss. We can skip backward computation for blobs that don't contribute // to the loss. + // Also checks if all bottom blobs don't need backward computation (possible + // because the skip_propagate_down param) and so we can skip bacward + // computation for the entire layer set blobs_under_loss; + set blobs_skip_backp; for (int layer_id = layers_.size() - 1; layer_id >= 0; --layer_id) { bool layer_contributes_loss = false; + bool layer_skip_propagate_down = true; for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]]; if (layers_[layer_id]->loss(top_id) || (blobs_under_loss.find(blob_name) != blobs_under_loss.end())) { layer_contributes_loss = true; + } + if (blobs_skip_backp.find(blob_name) == blobs_skip_backp.end()) { + layer_skip_propagate_down = false; + } + if (layer_contributes_loss && !layer_skip_propagate_down) break; + } + // If this layer can skip backward computation, also all his bottom blobs + // don't need backpropagation + if (layer_need_backward_[layer_id] && layer_skip_propagate_down) { + layer_need_backward_[layer_id] = false; + for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); + ++bottom_id) { + bottom_need_backward_[layer_id][bottom_id] = false; } } if (!layer_contributes_loss) { layer_need_backward_[layer_id] = false; } @@ -178,6 +203,11 @@ void Net::Init(const NetParameter& in_param) { } else { bottom_need_backward_[layer_id][bottom_id] = false; } + if (!bottom_need_backward_[layer_id][bottom_id]) { + const string& blob_name = + blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; + blobs_skip_backp.insert(blob_name); + } } } // Handle force_backward if needed. @@ -367,9 +397,9 @@ void Net::AppendTop(const NetParameter& param, const int layer_id, // Helper for Net::Init: add a new bottom blob to the net. template -int Net::AppendBottom(const NetParameter& param, - const int layer_id, const int bottom_id, - set* available_blobs, map* blob_name_to_idx) { +int Net::AppendBottom(const NetParameter& param, const int layer_id, + const int bottom_id, set* available_blobs, + map* blob_name_to_idx) { const LayerParameter& layer_param = param.layer(layer_id); const string& blob_name = layer_param.bottom(bottom_id); if (available_blobs->find(blob_name) == available_blobs->end()) { @@ -381,7 +411,12 @@ int Net::AppendBottom(const NetParameter& param, bottom_vecs_[layer_id].push_back(blobs_[blob_id].get()); bottom_id_vecs_[layer_id].push_back(blob_id); available_blobs->erase(blob_name); - const bool need_backward = blob_need_backward_[blob_id]; + bool propagate_down = true; + // Check if the backpropagation on bottom_id should be skipped + if (layer_param.propagate_down_size() > 0) + propagate_down = layer_param.propagate_down(bottom_id); + const bool need_backward = blob_need_backward_[blob_id] && + propagate_down; bottom_need_backward_[layer_id].push_back(need_backward); return blob_id; } @@ -410,7 +445,7 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, // (i.e., not given a param_name) or explicitly given a name that we // haven't already seen. param_owners_.push_back(-1); - if (param_size) { + if (param_name.size()) { param_names_index_[param_name] = net_param_id; } } else { @@ -470,7 +505,6 @@ Dtype Net::ForwardFromTo(int start, int end) { } for (int i = start; i <= end; ++i) { // LOG(ERROR) << "Forwarding " << layer_names_[i]; - layers_[i]->Reshape(bottom_vecs_[i], top_vecs_[i]); Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], top_vecs_[i]); loss += layer_loss; if (debug_info_) { ForwardDebugInfo(i); } diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index edf7ae81d58..8a8196eb21c 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -49,6 +49,14 @@ message FillerParameter { // The expected number of non-zero output weights for a given input in // Gaussian filler -- the default -1 means don't perform sparsification. optional int32 sparse = 7 [default = -1]; + // Normalize the filler variance by fan_in, fan_out, or their average. + // Applies to 'xavier' and 'msra' fillers. + enum VarianceNorm { + FAN_IN = 0; + FAN_OUT = 1; + AVERAGE = 2; + } + optional VarianceNorm variance_norm = 8 [default = FAN_IN]; } message NetParameter { @@ -88,7 +96,7 @@ message NetParameter { // NOTE // Update the next available ID when you add a new SolverParameter field. // -// SolverParameter next available ID: 36 (last added: clip_gradients) +// SolverParameter next available ID: 37 (last added: iter_size) message SolverParameter { ////////////////////////////////////////////////////////////////////////////// // Specifying the train and test networks @@ -141,6 +149,8 @@ message SolverParameter { // Display the loss averaged over the last average_loss iterations optional int32 average_loss = 33 [default = 1]; optional int32 max_iter = 7; // the maximum number of iterations + // accumulate gradients over `iter_size` x `batch_size` instances + optional int32 iter_size = 36 [default = 1]; optional string lr_policy = 8; // The learning rate decay policy. optional float gamma = 9; // The parameter to compute the learning rate. optional float power = 10; // The parameter to compute the learning rate. @@ -259,7 +269,7 @@ message ParamSpec { // NOTE // Update the next available ID when you add a new LayerParameter field. // -// LayerParameter next available layer-specific ID: 132 (last added: prelu_param) +// LayerParameter next available layer-specific ID: 137 (last added: reduction_param) message LayerParameter { optional string name = 1; // the layer name optional string type = 2; // the layer type @@ -280,6 +290,10 @@ message LayerParameter { // The blobs containing the numeric parameters of the layer. repeated BlobProto blobs = 7; + + // Specifies on which bottoms the backpropagation should be skipped. + // The size must be either 0 or equal to the number of bottoms. + repeated bool propagate_down = 11; // Rules controlling whether and when a layer is included in the network, // based on the current NetState. You may specify a non-zero number of rules @@ -312,12 +326,14 @@ message LayerParameter { optional DummyDataParameter dummy_data_param = 109; optional EltwiseParameter eltwise_param = 110; optional ExpParameter exp_param = 111; + optional FlattenParameter flatten_param = 135; optional HDF5DataParameter hdf5_data_param = 112; optional HDF5OutputParameter hdf5_output_param = 113; optional HingeLossParameter hinge_loss_param = 114; optional ImageDataParameter image_data_param = 115; optional InfogainLossParameter infogain_loss_param = 116; optional InnerProductParameter inner_product_param = 117; + optional LogParameter log_param = 134; optional LRNParameter lrn_param = 118; optional MemoryDataParameter memory_data_param = 119; optional MVNParameter mvn_param = 120; @@ -325,14 +341,17 @@ message LayerParameter { optional PowerParameter power_param = 122; optional PReLUParameter prelu_param = 131; optional PythonParameter python_param = 130; + optional ReductionParameter reduction_param = 136; optional ReLUParameter relu_param = 123; + optional ReshapeParameter reshape_param = 133; optional SigmoidParameter sigmoid_param = 124; optional SoftmaxParameter softmax_param = 125; + optional SPPParameter spp_param = 132; optional SliceParameter slice_param = 126; optional TanHParameter tanh_param = 127; optional ThresholdParameter threshold_param = 128; optional WindowDataParameter window_data_param = 129; - optional TripletLossParameter triplet_loss_param = 132; + optional TripletLossParameter triplet_loss_param = 137; } // Message that stores parameters used to apply transformation @@ -352,6 +371,10 @@ message TransformationParameter { // or can be repeated the same number of times as channels // (would subtract them from the corresponding channel) repeated float mean_value = 5; + // Force the decoded image to have 3 color channels. + optional bool force_color = 6 [default = false]; + // Force the decoded image to have 1 color channels. + optional bool force_gray = 7 [default = false]; } // Message that stores parameters shared by loss layers @@ -363,7 +386,9 @@ message LossParameter { optional bool normalize = 2 [default = true]; } -// Message that stores parameters used by AccuracyLayer +// Messages that store parameters used by individual layer types follow, in +// alphabetical order. + message AccuracyParameter { // When computing accuracy, count as correct by comparing the true label to // the top k scoring classes. By default, only compare to the top scoring @@ -381,14 +406,12 @@ message AccuracyParameter { optional int32 ignore_label = 3; } -// Message that stores parameters used by ArgMaxLayer message ArgMaxParameter { // If true produce pairs (argmax, maxval) optional bool out_max_val = 1 [default = false]; optional uint32 top_k = 2 [default = 1]; } -// Message that stores parameters used by ConcatLayer message ConcatParameter { // The axis along which to concatenate -- may be negative to index from the // end (e.g., -1 for the last axis). Other axes must have the @@ -400,16 +423,23 @@ message ConcatParameter { optional uint32 concat_dim = 1 [default = 1]; } -// Message that stores parameters used by ContrastiveLossLayer message ContrastiveLossParameter { - //margin for dissimilar pair + // margin for dissimilar pair optional float margin = 1 [default = 1.0]; + // The first implementation of this cost did not exactly match the cost of + // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2. + // legacy_version = false (the default) uses (margin - d)^2 as proposed in the + // Hadsell paper. New models should probably use this version. + // legacy_version = true uses (margin - d^2). This is kept to support / + // reproduce existing models and results + optional bool legacy_version = 2 [default = false]; } + message TripletLossParameter { //margin for negative triplet optional float margin = 1 [default = 1.0]; } -// Message that stores parameters used by ConvolutionLayer + message ConvolutionParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms @@ -435,7 +465,6 @@ message ConvolutionParameter { optional Engine engine = 15 [default = DEFAULT]; } -// Message that stores parameters used by DataLayer message DataParameter { enum DB { LEVELDB = 0; @@ -466,12 +495,10 @@ message DataParameter { optional bool force_encoded_color = 9 [default = false]; } -// Message that stores parameters used by DropoutLayer message DropoutParameter { optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio } -// Message that stores parameters used by DummyDataLayer. // DummyDataLayer fills any number of arbitrarily shaped blobs with random // (or constant) data generated by "Fillers" (see "message FillerParameter"). message DummyDataParameter { @@ -491,7 +518,6 @@ message DummyDataParameter { repeated uint32 width = 5; } -// Message that stores parameters used by EltwiseLayer message EltwiseParameter { enum EltwiseOp { PROD = 0; @@ -506,7 +532,6 @@ message EltwiseParameter { optional bool stable_prod_grad = 3 [default = true]; } -// Message that stores parameters used by ExpLayer message ExpParameter { // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. // Or if base is set to the default (-1), base is set to e, @@ -516,6 +541,18 @@ message ExpParameter { optional float shift = 3 [default = 0.0]; } +/// Message that stores parameters used by FlattenLayer +message FlattenParameter { + // The first axis to flatten: all preceding axes are retained in the output. + // May be negative to index from the end (e.g., -1 for the last axis). + optional int32 axis = 1 [default = 1]; + + // The last axis to flatten: all following axes are retained in the output. + // May be negative to index from the end (e.g., the default -1 for the last + // axis). + optional int32 end_axis = 2 [default = -1]; +} + // Message that stores parameters used by HDF5DataLayer message HDF5DataParameter { // Specify the data source. @@ -531,7 +568,6 @@ message HDF5DataParameter { optional bool shuffle = 3 [default = false]; } -// Message that stores parameters used by HDF5OutputLayer message HDF5OutputParameter { optional string file_name = 1; } @@ -545,7 +581,6 @@ message HingeLossParameter { optional Norm norm = 1 [default = L1]; } -// Message that stores parameters used by ImageDataLayer message ImageDataParameter { // Specify the data source. optional string source = 1; @@ -577,13 +612,11 @@ message ImageDataParameter { optional string root_folder = 12 [default = ""]; } -// Message that stores parameters InfogainLossLayer message InfogainLossParameter { // Specify the infogain matrix source. optional string source = 1; } -// Message that stores parameters used by InnerProductLayer message InnerProductParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms @@ -596,6 +629,16 @@ message InnerProductParameter { optional int32 axis = 5 [default = 1]; } +// Message that stores parameters used by LogLayer +message LogParameter { + // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. + // Or if base is set to the default (-1), base is set to e, + // so y = ln(shift + scale * x) = log_e(shift + scale * x) + optional float base = 1 [default = -1.0]; + optional float scale = 2 [default = 1.0]; + optional float shift = 3 [default = 0.0]; +} + // Message that stores parameters used by LRNLayer message LRNParameter { optional uint32 local_size = 1 [default = 5]; @@ -609,7 +652,6 @@ message LRNParameter { optional float k = 5 [default = 1.]; } -// Message that stores parameters used by MemoryDataLayer message MemoryDataParameter { optional uint32 batch_size = 1; optional uint32 channels = 2; @@ -617,16 +659,17 @@ message MemoryDataParameter { optional uint32 width = 4; } -// Message that stores parameters used by MVNLayer message MVNParameter { // This parameter can be set to false to normalize mean only optional bool normalize_variance = 1 [default = true]; // This parameter can be set to true to perform DNN-like MVN optional bool across_channels = 2 [default = false]; + + // Epsilon for not dividing by zero while normalizing variance + optional float eps = 3 [default = 1e-9]; } -// Message that stores parameters used by PoolingLayer message PoolingParameter { enum PoolMethod { MAX = 0; @@ -656,7 +699,6 @@ message PoolingParameter { optional bool global_pooling = 12 [default = false]; } -// Message that stores parameters used by PowerLayer message PowerParameter { // PowerLayer computes outputs y = (shift + scale * x) ^ power. optional float power = 1 [default = 1.0]; @@ -664,12 +706,40 @@ message PowerParameter { optional float shift = 3 [default = 0.0]; } -// Message that stores parameters used by PythonLayer message PythonParameter { optional string module = 1; optional string layer = 2; } +// Message that stores parameters used by ReductionLayer +message ReductionParameter { + enum ReductionOp { + SUM = 1; + ASUM = 2; + SUMSQ = 3; + MEAN = 4; + } + + optional ReductionOp operation = 1 [default = SUM]; // reduction operation + + // The first axis to reduce to a scalar -- may be negative to index from the + // end (e.g., -1 for the last axis). + // (Currently, only reduction along ALL "tail" axes is supported; reduction + // of axis M through N, where N < num_axes - 1, is unsupported.) + // Suppose we have an n-axis bottom Blob with shape: + // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). + // If axis == m, the output Blob will have shape + // (d0, d1, d2, ..., d(m-1)), + // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) + // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. + // If axis == 0 (the default), the output Blob always has the empty shape + // (count 1), performing reduction across the entire input -- + // often useful for creating new loss functions. + optional int32 axis = 2 [default = 0]; + + optional float coeff = 3 [default = 1.0]; // coefficient for output +} + // Message that stores parameters used by ReLULayer message ReLUParameter { // Allow non-zero slope for negative inputs to speed up optimization @@ -686,7 +756,70 @@ message ReLUParameter { optional Engine engine = 2 [default = DEFAULT]; } -// Message that stores parameters used by SigmoidLayer +message ReshapeParameter { + // Specify the output dimensions. If some of the dimensions are set to 0, + // the corresponding dimension from the bottom layer is used (unchanged). + // Exactly one dimension may be set to -1, in which case its value is + // inferred from the count of the bottom blob and the remaining dimensions. + // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8: + // + // layer { + // type: "Reshape" bottom: "input" top: "output" + // reshape_param { ... } + // } + // + // If "input" is 2D with shape 2 x 8, then the following reshape_param + // specifications are all equivalent, producing a 3D blob "output" with shape + // 2 x 2 x 4: + // + // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 0 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 0 dim: 2 dim: -1 } } + // reshape_param { shape { dim: -1 dim: 0 dim: 2 } } + // + optional BlobShape shape = 1; + + // axis and num_axes control the portion of the bottom blob's shape that are + // replaced by (included in) the reshape. By default (axis == 0 and + // num_axes == -1), the entire bottom blob shape is included in the reshape, + // and hence the shape field must specify the entire output shape. + // + // axis may be non-zero to retain some portion of the beginning of the input + // shape (and may be negative to index from the end; e.g., -1 to begin the + // reshape after the last axis, including nothing in the reshape, + // -2 to include only the last axis, etc.). + // + // For example, suppose "input" is a 2D blob with shape 2 x 8. + // Then the following ReshapeLayer specifications are all equivalent, + // producing a blob "output" with shape 2 x 2 x 4: + // + // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 2 dim: 4 } axis: 1 } + // reshape_param { shape { dim: 2 dim: 4 } axis: -3 } + // + // num_axes specifies the extent of the reshape. + // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on + // input axes in the range [axis, axis+num_axes]. + // num_axes may also be -1, the default, to include all remaining axes + // (starting from axis). + // + // For example, suppose "input" is a 2D blob with shape 2 x 8. + // Then the following ReshapeLayer specifications are equivalent, + // producing a blob "output" with shape 1 x 2 x 8. + // + // reshape_param { shape { dim: 1 dim: 2 dim: 8 } } + // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 } + // reshape_param { shape { dim: 1 } num_axes: 0 } + // + // On the other hand, these would produce output blob shape 2 x 1 x 8: + // + // reshape_param { shape { dim: 2 dim: 1 dim: 8 } } + // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 } + // + optional int32 axis = 2 [default = 0]; + optional int32 num_axes = 3 [default = -1]; +} + message SigmoidParameter { enum Engine { DEFAULT = 0; @@ -696,7 +829,6 @@ message SigmoidParameter { optional Engine engine = 1 [default = DEFAULT]; } -// Message that stores parameters used by SliceLayer message SliceParameter { // The axis along which to slice -- may be negative to index from the end // (e.g., -1 for the last axis). @@ -723,7 +855,6 @@ message SoftmaxParameter { optional int32 axis = 2 [default = 1]; } -// Message that stores parameters used by TanHLayer message TanHParameter { enum Engine { DEFAULT = 0; @@ -733,12 +864,10 @@ message TanHParameter { optional Engine engine = 1 [default = DEFAULT]; } -// Message that stores parameters used by ThresholdLayer message ThresholdParameter { optional float threshold = 1 [default = 0]; // Strictly positive values } -// Message that stores parameters used by WindowDataLayer message WindowDataParameter { // Specify the data source. optional string source = 1; @@ -772,6 +901,22 @@ message WindowDataParameter { optional string root_folder = 13 [default = ""]; } +message SPPParameter { + enum PoolMethod { + MAX = 0; + AVE = 1; + STOCHASTIC = 2; + } + optional uint32 pyramid_height = 1; + optional PoolMethod pool = 2 [default = MAX]; // The pooling method + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 6 [default = DEFAULT]; +} + // DEPRECATED: use LayerParameter. message V1LayerParameter { repeated string bottom = 2; @@ -821,6 +966,7 @@ message V1LayerParameter { TRIPLET_LOSS = 40; WINDOW_DATA = 24; THRESHOLD = 31; + } optional LayerType type = 5; repeated BlobProto blobs = 6; @@ -961,7 +1107,6 @@ message V0LayerParameter { optional HDF5OutputParameter hdf5_output_param = 1001; } -// Message that stores parameters used by PReLULayer message PReLUParameter { // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: // Surpassing Human-Level Performance on ImageNet Classification, 2015. diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 096980dd7af..aabe0edec80 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -167,7 +167,26 @@ void Solver::Step(int iters) { vector losses; Dtype smoothed_loss = 0; - for (; iter_ < stop_iter; ++iter_) { + while (iter_ < stop_iter) { + // zero-init the params + for (int i = 0; i < net_->params().size(); ++i) { + shared_ptr > blob = net_->params()[i]; + switch (Caffe::mode()) { + case Caffe::CPU: + caffe_set(blob->count(), static_cast(0), + blob->mutable_cpu_diff()); + break; + case Caffe::GPU: +#ifndef CPU_ONLY + caffe_gpu_set(blob->count(), static_cast(0), + blob->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + } + if (param_.test_interval() && iter_ % param_.test_interval() == 0 && (iter_ > 0 || param_.test_initialization())) { TestAll(); @@ -175,7 +194,13 @@ void Solver::Step(int iters) { const bool display = param_.display() && iter_ % param_.display() == 0; net_->set_debug_info(display && param_.debug_info()); - Dtype loss = net_->ForwardBackward(bottom_vec); + // accumulate the loss and gradient + Dtype loss = 0; + for (int i = 0; i < param_.iter_size(); ++i) { + loss += net_->ForwardBackward(bottom_vec); + } + loss /= param_.iter_size(); + // average the loss across iterations for smoothed reporting if (losses.size() < average_loss) { losses.push_back(loss); int size = losses.size(); @@ -207,11 +232,14 @@ void Solver::Step(int iters) { } } } - ComputeUpdateValue(); - net_->Update(); + ApplyUpdate(); + + // Increment the internal iter_ counter -- its value should always indicate + // the number of times the weights have been updated. + ++iter_; // Save a snapshot if needed. - if (param_.snapshot() && (iter_ + 1) % param_.snapshot() == 0) { + if (param_.snapshot() && iter_ % param_.snapshot() == 0) { Snapshot(); } } @@ -327,15 +355,14 @@ void Solver::Snapshot() { string model_filename, snapshot_filename; const int kBufferSize = 20; char iter_str_buffer[kBufferSize]; - // Add one to iter_ to get the number of iterations that have completed. - snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_ + 1); + snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); filename += iter_str_buffer; model_filename = filename + ".caffemodel"; LOG(INFO) << "Snapshotting to " << model_filename; WriteProtoToBinaryFile(net_param, model_filename.c_str()); SolverState state; SnapshotSolverState(&state); - state.set_iter(iter_ + 1); + state.set_iter(iter_); state.set_learned_net(model_filename); state.set_current_step(current_step_); snapshot_filename = filename + ".solverstate"; @@ -453,95 +480,138 @@ void SGDSolver::ClipGradients() { } template -void SGDSolver::ComputeUpdateValue() { - const vector > >& net_params = this->net_->params(); - const vector& net_params_lr = this->net_->params_lr(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); - // get the learning rate +void SGDSolver::ApplyUpdate() { Dtype rate = GetLearningRate(); if (this->param_.display() && this->iter_ % this->param_.display() == 0) { LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; } ClipGradients(); - Dtype momentum = this->param_.momentum(); + for (int param_id = 0; param_id < this->net_->params().size(); ++param_id) { + Normalize(param_id); + Regularize(param_id); + ComputeUpdateValue(param_id, rate); + } + this->net_->Update(); +} + +template +void SGDSolver::Normalize(int param_id) { + if (this->param_.iter_size() == 1) { return; } + // Scale gradient to counterbalance accumulation. + const vector > >& net_params = this->net_->params(); + const Dtype accum_normalization = Dtype(1.) / this->param_.iter_size(); + switch (Caffe::mode()) { + case Caffe::CPU: { + caffe_scal(net_params[param_id]->count(), accum_normalization, + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + caffe_gpu_scal(net_params[param_id]->count(), accum_normalization, + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void SGDSolver::Regularize(int param_id) { + const vector > >& net_params = this->net_->params(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); Dtype weight_decay = this->param_.weight_decay(); string regularization_type = this->param_.regularization_type(); + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; switch (Caffe::mode()) { - case Caffe::CPU: - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // Compute the value to history, and then copy them to the blob's diff. - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } + case Caffe::CPU: { + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; } - - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - history_[param_id]->mutable_cpu_data()); - // copy - caffe_copy(net_params[param_id]->count(), - history_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); } break; - case Caffe::GPU: + } + case Caffe::GPU: { #ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // Compute the value to history, and then copy them to the blob's diff. - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; } - - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - history_[param_id]->mutable_gpu_data()); - // copy - caffe_copy(net_params[param_id]->count(), - history_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); } #else NO_GPU; #endif break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector > >& net_params = this->net_->params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype momentum = this->param_.momentum(); + Dtype local_rate = rate * net_params_lr[param_id]; + // Compute the update to history, then copy it to the parameter diff. + switch (Caffe::mode()) { + case Caffe::CPU: { + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + history_[param_id]->mutable_cpu_data()); + caffe_copy(net_params[param_id]->count(), + history_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + history_[param_id]->mutable_gpu_data()); + caffe_copy(net_params[param_id]->count(), + history_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } default: LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); } @@ -568,252 +638,138 @@ void SGDSolver::RestoreSolverState(const SolverState& state) { } template -void NesterovSolver::ComputeUpdateValue() { +void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { const vector > >& net_params = this->net_->params(); const vector& net_params_lr = this->net_->params_lr(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); - // get the learning rate - Dtype rate = this->GetLearningRate(); - if (this->param_.display() && this->iter_ % this->param_.display() == 0) { - LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; - } - SGDSolver::ClipGradients(); Dtype momentum = this->param_.momentum(); - Dtype weight_decay = this->param_.weight_decay(); - string regularization_type = this->param_.regularization_type(); + Dtype local_rate = rate * net_params_lr[param_id]; switch (Caffe::mode()) { - case Caffe::CPU: - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - this->temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - - // update history - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - this->history_[param_id]->mutable_cpu_data()); - - // compute udpate: step back then over step - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->cpu_data(), -momentum, - this->update_[param_id]->mutable_cpu_data()); - - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } + case Caffe::CPU: { + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + this->history_[param_id]->mutable_cpu_data()); + + // compute update: step back then over step + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->cpu_data(), -momentum, + this->update_[param_id]->mutable_cpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); break; - case Caffe::GPU: + } + case Caffe::GPU: { #ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - this->temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - - // update history - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - this->history_[param_id]->mutable_gpu_data()); - - // compute udpate: step back then over step - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->gpu_data(), -momentum, - this->update_[param_id]->mutable_gpu_data()); - - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + this->history_[param_id]->mutable_gpu_data()); + + // compute update: step back then over step + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->gpu_data(), -momentum, + this->update_[param_id]->mutable_gpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); #else NO_GPU; #endif break; + } default: LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); } } template -void AdaGradSolver::ComputeUpdateValue() { +void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { const vector > >& net_params = this->net_->params(); const vector& net_params_lr = this->net_->params_lr(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); - // get the learning rate - Dtype rate = this->GetLearningRate(); Dtype delta = this->param_.delta(); - if (this->param_.display() && this->iter_ % this->param_.display() == 0) { - LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; - } - SGDSolver::ClipGradients(); - Dtype weight_decay = this->param_.weight_decay(); - string regularization_type = this->param_.regularization_type(); + Dtype local_rate = rate * net_params_lr[param_id]; switch (Caffe::mode()) { - case Caffe::CPU: - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - this->temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - - // compute square of gradient in update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history - caffe_add(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - this->history_[param_id]->cpu_data(), - this->history_[param_id]->mutable_cpu_data()); - - // prepare update - caffe_powx(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_cpu_data()); - - caffe_div(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), - this->update_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // scale and copy - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->cpu_data(), Dtype(0), - net_params[param_id]->mutable_cpu_diff()); - } + case Caffe::CPU: { + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_add(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + this->history_[param_id]->cpu_data(), + this->history_[param_id]->mutable_cpu_data()); + + // prepare update + caffe_powx(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_cpu_data()); + + caffe_div(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), + this->update_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // scale and copy + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->cpu_data(), Dtype(0), + net_params[param_id]->mutable_cpu_diff()); break; - case Caffe::GPU: + } + case Caffe::GPU: { #ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - this->temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - - // compute square of gradient in update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history - caffe_gpu_add(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - this->history_[param_id]->gpu_data(), - this->history_[param_id]->mutable_gpu_data()); - - // prepare update - caffe_gpu_powx(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_div(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), - this->update_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // scale and copy - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->gpu_data(), Dtype(0), - net_params[param_id]->mutable_gpu_diff()); - } + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_add(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + this->history_[param_id]->gpu_data(), + this->history_[param_id]->mutable_gpu_data()); + + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // scale and copy + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), + net_params[param_id]->mutable_gpu_diff()); #else NO_GPU; #endif break; + } default: LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); } diff --git a/src/caffe/test/test_accuracy_layer.cpp b/src/caffe/test/test_accuracy_layer.cpp index 6cbf51df45e..c14b67cc0e9 100644 --- a/src/caffe/test/test_accuracy_layer.cpp +++ b/src/caffe/test/test_accuracy_layer.cpp @@ -16,7 +16,7 @@ namespace caffe { template -class AccuracyLayerTest : public ::testing::Test { +class AccuracyLayerTest : public CPUDeviceTest { protected: AccuracyLayerTest() : blob_bottom_data_(new Blob()), @@ -92,7 +92,6 @@ TYPED_TEST(AccuracyLayerTest, TestSetupTopK) { TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { LayerParameter layer_param; - Caffe::set_mode(Caffe::CPU); AccuracyLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); @@ -118,7 +117,6 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { } TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { - Caffe::set_mode(Caffe::CPU); this->blob_bottom_data_->Reshape(2, 10, 4, 5); vector label_shape(3); label_shape[0] = 2; label_shape[1] = 4; label_shape[2] = 5; @@ -162,7 +160,6 @@ TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { } TYPED_TEST(AccuracyLayerTest, TestForwardIgnoreLabel) { - Caffe::set_mode(Caffe::CPU); LayerParameter layer_param; const TypeParam kIgnoreLabelValue = -1; layer_param.mutable_accuracy_param()->set_ignore_label(kIgnoreLabelValue); diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp index 3487d42f21e..895c3d372ff 100644 --- a/src/caffe/test/test_argmax_layer.cpp +++ b/src/caffe/test/test_argmax_layer.cpp @@ -13,13 +13,12 @@ namespace caffe { template -class ArgMaxLayerTest : public ::testing::Test { +class ArgMaxLayerTest : public CPUDeviceTest { protected: ArgMaxLayerTest() : blob_bottom_(new Blob(10, 20, 1, 1)), blob_top_(new Blob()), top_k_(5) { - Caffe::set_mode(Caffe::CPU); Caffe::set_random_seed(1701); // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_contrastive_loss_layer.cpp b/src/caffe/test/test_contrastive_loss_layer.cpp index d269fbc26f2..1e9447cbc51 100644 --- a/src/caffe/test/test_contrastive_loss_layer.cpp +++ b/src/caffe/test/test_contrastive_loss_layer.cpp @@ -22,15 +22,15 @@ class ContrastiveLossLayerTest : public MultiDeviceTest { protected: ContrastiveLossLayerTest() - : blob_bottom_data_i_(new Blob(128, 10, 1, 1)), - blob_bottom_data_j_(new Blob(128, 10, 1, 1)), - blob_bottom_y_(new Blob(128, 1, 1, 1)), + : blob_bottom_data_i_(new Blob(512, 2, 1, 1)), + blob_bottom_data_j_(new Blob(512, 2, 1, 1)), + blob_bottom_y_(new Blob(512, 1, 1, 1)), blob_top_loss_(new Blob()) { // fill the values FillerParameter filler_param; - filler_param.set_mean(0.0); - filler_param.set_std(0.3); // distances~=1.0 to test both sides of margin - GaussianFiller filler(filler_param); + filler_param.set_min(-1.0); + filler_param.set_max(1.0); // distances~=1.0 to test both sides of margin + UniformFiller filler(filler_param); filler.Fill(this->blob_bottom_data_i_); blob_bottom_vec_.push_back(blob_bottom_data_i_); filler.Fill(this->blob_bottom_data_j_); @@ -79,7 +79,8 @@ TYPED_TEST(ContrastiveLossLayerTest, TestForward) { if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs loss += dist_sq; } else { - loss += std::max(margin-dist_sq, Dtype(0)); + Dtype dist = std::max(margin - sqrt(dist_sq), 0.0); + loss += dist*dist; } } loss /= static_cast(num) * Dtype(2); @@ -99,4 +100,47 @@ TYPED_TEST(ContrastiveLossLayerTest, TestGradient) { this->blob_top_vec_, 1); } +TYPED_TEST(ContrastiveLossLayerTest, TestForwardLegacy) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_contrastive_loss_param()->set_legacy_version(true); + ContrastiveLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // manually compute to compare + const Dtype margin = layer_param.contrastive_loss_param().margin(); + const int num = this->blob_bottom_data_i_->num(); + const int channels = this->blob_bottom_data_i_->channels(); + Dtype loss(0); + for (int i = 0; i < num; ++i) { + Dtype dist_sq(0); + for (int j = 0; j < channels; ++j) { + Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq += diff*diff; + } + if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs + loss += dist_sq; + } else { + loss += std::max(margin - dist_sq, Dtype(0.0)); + } + } + loss /= static_cast(num) * Dtype(2); + EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); +} + +TYPED_TEST(ContrastiveLossLayerTest, TestGradientLegacy) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_contrastive_loss_param()->set_legacy_version(true); + ContrastiveLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + // check the gradient for the first two bottom layers + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 1); +} + } // namespace caffe diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index c1fe3b58c58..67d41fff844 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -424,7 +424,7 @@ TYPED_TEST(ConvolutionLayerTest, TestGradientGroup) { #ifdef USE_CUDNN template -class CuDNNConvolutionLayerTest : public ::testing::Test { +class CuDNNConvolutionLayerTest : public GPUDeviceTest { protected: CuDNNConvolutionLayerTest() : blob_bottom_(new Blob(2, 3, 6, 4)), @@ -467,7 +467,6 @@ class CuDNNConvolutionLayerTest : public ::testing::Test { TYPED_TEST_CASE(CuDNNConvolutionLayerTest, TestDtypes); TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { - Caffe::set_mode(Caffe::GPU); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); LayerParameter layer_param; @@ -505,7 +504,6 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { - Caffe::set_mode(Caffe::GPU); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); LayerParameter layer_param; @@ -541,7 +539,6 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionGroupCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); @@ -572,7 +569,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { // Test separable convolution by computing the Sobel operator // as a single filter then comparing the result // as the convolution of two rectangular filters. - Caffe::set_mode(Caffe::GPU); + // Fill bottoms with identical Gaussian noise. shared_ptr > filler; FillerParameter filler_param; @@ -665,7 +662,6 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); @@ -683,7 +679,6 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientGroupCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); diff --git a/src/caffe/test/test_data/generate_sample_data.py b/src/caffe/test/test_data/generate_sample_data.py index e5dbc3406d8..ab5572685cb 100644 --- a/src/caffe/test/test_data/generate_sample_data.py +++ b/src/caffe/test/test_data/generate_sample_data.py @@ -5,6 +5,8 @@ import numpy as np import h5py +script_dir = os.path.dirname(os.path.abspath(__file__)) + num_cols = 8 num_rows = 10 height = 6 @@ -27,12 +29,12 @@ print data print label -with h5py.File(os.path.dirname(__file__) + '/sample_data.h5', 'w') as f: +with h5py.File(script_dir + '/sample_data.h5', 'w') as f: f['data'] = data f['label'] = label f['label2'] = label2 -with h5py.File(os.path.dirname(__file__) + '/sample_data_2_gzip.h5', 'w') as f: +with h5py.File(script_dir + '/sample_data_2_gzip.h5', 'w') as f: f.create_dataset( 'data', data=data + total_size, compression='gzip', compression_opts=1 @@ -46,6 +48,6 @@ compression='gzip', compression_opts=1 ) -with open(os.path.dirname(__file__) + '/sample_data_list.txt', 'w') as f: - f.write(os.path.dirname(__file__) + '/sample_data.h5\n') - f.write(os.path.dirname(__file__) + '/sample_data_2_gzip.h5\n') +with open(script_dir + '/sample_data_list.txt', 'w') as f: + f.write(script_dir + '/sample_data.h5\n') + f.write(script_dir + '/sample_data_2_gzip.h5\n') diff --git a/src/caffe/test/test_dummy_data_layer.cpp b/src/caffe/test/test_dummy_data_layer.cpp index 99548352746..c9ed38db3a5 100644 --- a/src/caffe/test/test_dummy_data_layer.cpp +++ b/src/caffe/test/test_dummy_data_layer.cpp @@ -13,7 +13,7 @@ namespace caffe { template -class DummyDataLayerTest : public ::testing::Test { +class DummyDataLayerTest : public CPUDeviceTest { protected: DummyDataLayerTest() : blob_top_a_(new Blob()), @@ -44,7 +44,6 @@ class DummyDataLayerTest : public ::testing::Test { TYPED_TEST_CASE(DummyDataLayerTest, TestDtypes); TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) { - Caffe::set_mode(Caffe::CPU); LayerParameter param; DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); dummy_data_param->add_num(5); @@ -74,7 +73,6 @@ TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) { } TYPED_TEST(DummyDataLayerTest, TestTwoTopConstant) { - Caffe::set_mode(Caffe::CPU); LayerParameter param; DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); dummy_data_param->add_num(5); @@ -113,7 +111,6 @@ TYPED_TEST(DummyDataLayerTest, TestTwoTopConstant) { } TYPED_TEST(DummyDataLayerTest, TestThreeTopConstantGaussianConstant) { - Caffe::set_mode(Caffe::CPU); LayerParameter param; DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); dummy_data_param->add_num(5); diff --git a/src/caffe/test/test_filler.cpp b/src/caffe/test/test_filler.cpp index e04b0fd22af..728b8dc5f0d 100644 --- a/src/caffe/test/test_filler.cpp +++ b/src/caffe/test/test_filler.cpp @@ -142,4 +142,102 @@ TYPED_TEST(GaussianFillerTest, TestFill) { EXPECT_LE(var, target_var * 5.); } +template +class XavierFillerTest : public ::testing::Test { + protected: + XavierFillerTest() + : blob_(new Blob(1000, 2, 4, 5)), + filler_param_() { + } + virtual void test_params(FillerParameter_VarianceNorm variance_norm, + Dtype n) { + this->filler_param_.set_variance_norm(variance_norm); + this->filler_.reset(new XavierFiller(this->filler_param_)); + this->filler_->Fill(blob_); + EXPECT_TRUE(this->blob_); + const int count = this->blob_->count(); + const Dtype* data = this->blob_->cpu_data(); + Dtype mean = 0.; + Dtype ex2 = 0.; + for (int i = 0; i < count; ++i) { + mean += data[i]; + ex2 += data[i] * data[i]; + } + mean /= count; + ex2 /= count; + Dtype std = sqrt(ex2 - mean*mean); + Dtype target_std = sqrt(2.0 / n); + EXPECT_NEAR(mean, 0.0, 0.1); + EXPECT_NEAR(std, target_std, 0.1); + } + virtual ~XavierFillerTest() { delete blob_; } + Blob* const blob_; + FillerParameter filler_param_; + shared_ptr > filler_; +}; + +TYPED_TEST_CASE(XavierFillerTest, TestDtypes); + +TYPED_TEST(XavierFillerTest, TestFillFanIn) { + TypeParam n = 2*4*5; + this->test_params(FillerParameter_VarianceNorm_FAN_IN, n); +} +TYPED_TEST(XavierFillerTest, TestFillFanOut) { + TypeParam n = 1000*4*5; + this->test_params(FillerParameter_VarianceNorm_FAN_OUT, n); +} +TYPED_TEST(XavierFillerTest, TestFillAverage) { + TypeParam n = (2*4*5 + 1000*4*5) / 2.0; + this->test_params(FillerParameter_VarianceNorm_AVERAGE, n); +} + +template +class MSRAFillerTest : public ::testing::Test { + protected: + MSRAFillerTest() + : blob_(new Blob(1000, 2, 4, 5)), + filler_param_() { + } + virtual void test_params(FillerParameter_VarianceNorm variance_norm, + Dtype n) { + this->filler_param_.set_variance_norm(variance_norm); + this->filler_.reset(new MSRAFiller(this->filler_param_)); + this->filler_->Fill(blob_); + EXPECT_TRUE(this->blob_); + const int count = this->blob_->count(); + const Dtype* data = this->blob_->cpu_data(); + Dtype mean = 0.; + Dtype ex2 = 0.; + for (int i = 0; i < count; ++i) { + mean += data[i]; + ex2 += data[i] * data[i]; + } + mean /= count; + ex2 /= count; + Dtype std = sqrt(ex2 - mean*mean); + Dtype target_std = sqrt(2.0 / n); + EXPECT_NEAR(mean, 0.0, 0.1); + EXPECT_NEAR(std, target_std, 0.1); + } + virtual ~MSRAFillerTest() { delete blob_; } + Blob* const blob_; + FillerParameter filler_param_; + shared_ptr > filler_; +}; + +TYPED_TEST_CASE(MSRAFillerTest, TestDtypes); + +TYPED_TEST(MSRAFillerTest, TestFillFanIn) { + TypeParam n = 2*4*5; + this->test_params(FillerParameter_VarianceNorm_FAN_IN, n); +} +TYPED_TEST(MSRAFillerTest, TestFillFanOut) { + TypeParam n = 1000*4*5; + this->test_params(FillerParameter_VarianceNorm_FAN_OUT, n); +} +TYPED_TEST(MSRAFillerTest, TestFillAverage) { + TypeParam n = (2*4*5 + 1000*4*5) / 2.0; + this->test_params(FillerParameter_VarianceNorm_AVERAGE, n); +} + } // namespace caffe diff --git a/src/caffe/test/test_flatten_layer.cpp b/src/caffe/test/test_flatten_layer.cpp index 3042d293cf7..7b6757cba32 100644 --- a/src/caffe/test/test_flatten_layer.cpp +++ b/src/caffe/test/test_flatten_layer.cpp @@ -42,13 +42,48 @@ TYPED_TEST(FlattenLayerTest, TestSetup) { LayerParameter layer_param; FlattenLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - EXPECT_EQ(this->blob_top_->num(), 2); - EXPECT_EQ(this->blob_top_->channels(), 3 * 6 * 5); - EXPECT_EQ(this->blob_top_->height(), 1); - EXPECT_EQ(this->blob_top_->width(), 1); + ASSERT_EQ(this->blob_top_->num_axes(), 2); + EXPECT_EQ(this->blob_top_->shape(0), 2); + EXPECT_EQ(this->blob_top_->shape(1), 3 * 6 * 5); } -TYPED_TEST(FlattenLayerTest, Test) { +TYPED_TEST(FlattenLayerTest, TestSetupWithAxis) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_flatten_param()->set_axis(2); + FlattenLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(this->blob_top_->num_axes(), 3); + EXPECT_EQ(this->blob_top_->shape(0), 2); + EXPECT_EQ(this->blob_top_->shape(1), 3); + EXPECT_EQ(this->blob_top_->shape(2), 6 * 5); +} + +TYPED_TEST(FlattenLayerTest, TestSetupWithEndAxis) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_flatten_param()->set_end_axis(-2); + FlattenLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(this->blob_top_->num_axes(), 3); + EXPECT_EQ(this->blob_top_->shape(0), 2); + EXPECT_EQ(this->blob_top_->shape(1), 3 * 6); + EXPECT_EQ(this->blob_top_->shape(2), 5); +} + +TYPED_TEST(FlattenLayerTest, TestSetupWithStartAndEndAxis) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_flatten_param()->set_axis(0); + layer_param.mutable_flatten_param()->set_end_axis(-2); + FlattenLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(this->blob_top_->num_axes(), 2); + EXPECT_EQ(this->blob_top_->shape(0), 2 * 3 * 6); + EXPECT_EQ(this->blob_top_->shape(1), 5); +} + +TYPED_TEST(FlattenLayerTest, TestForward) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; FlattenLayer layer(layer_param); @@ -71,5 +106,4 @@ TYPED_TEST(FlattenLayerTest, TestGradient) { this->blob_top_vec_); } - } // namespace caffe diff --git a/src/caffe/test/test_gradient_based_solver.cpp b/src/caffe/test/test_gradient_based_solver.cpp index eb2569c04f2..c9135d64e70 100644 --- a/src/caffe/test/test_gradient_based_solver.cpp +++ b/src/caffe/test/test_gradient_based_solver.cpp @@ -23,7 +23,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { protected: GradientBasedSolverTest() : - seed_(1701), num_(5), channels_(3), height_(10), width_(10) {} + seed_(1701), num_(4), channels_(3), height_(10), width_(10) {} shared_ptr > solver_; int seed_; @@ -56,19 +56,21 @@ class GradientBasedSolverTest : public MultiDeviceTest { } void RunLeastSquaresSolver(const Dtype learning_rate, - const Dtype weight_decay, const Dtype momentum, const int num_iters) { + const Dtype weight_decay, const Dtype momentum, const int num_iters, + const int iter_size = 1) { ostringstream proto; proto << "max_iter: " << num_iters << " " "base_lr: " << learning_rate << " " "lr_policy: 'fixed' " + "iter_size: " << iter_size << " " "net_param { " " name: 'TestNetwork' " " layer { " " name: 'data' " " type: 'DummyData' " " dummy_data_param { " - " num: " << num_ << " " + " num: " << num_ / iter_size << " " " channels: " << channels_ << " " " height: " << height_ << " " " width: " << width_ << " " @@ -76,6 +78,10 @@ class GradientBasedSolverTest : public MultiDeviceTest { " height: 1 " " width: 1 " " data_filler { " + " type: 'constant' " + " value: 1.0 " + " } " + " data_filler { " " type: 'gaussian' " " std: 1.0 " " } " @@ -270,6 +276,45 @@ class GradientBasedSolverTest : public MultiDeviceTest { } } + void CheckAccumulation(const Dtype kLearningRate, const Dtype kWeightDecay, + const Dtype kMomentum, const int kNumIters, const int kIterSize) { + const double kPrecision = 1e-2; + const double kMinPrecision = 1e-7; + // Solve without accumulation and save parameters. + this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, + kNumIters); + // Save parameters for comparison. + Net& net = *this->solver_->net(); + const vector > >& param_blobs = + net.layer_by_name("innerprod")->blobs(); + vector > > noaccum_params(param_blobs.size()); + for (int i = 0; i < param_blobs.size(); ++i) { + noaccum_params[i].reset(new Blob()); + noaccum_params[i]->CopyFrom(*param_blobs[i], false, true); + } + // Solve by equivalent accumulation of gradients over divided batches. + this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, + kNumIters, kIterSize); + Net& net_accum = *this->solver_->net(); + const vector > >& accum_params = + net_accum.layer_by_name("innerprod")->blobs(); + // Compare accumulated parameters against no accumulation standard. + const int D = this->channels_ * this->height_ * this->width_; + for (int i = 0; i < D; ++i) { + const Dtype expected_param = noaccum_params[0]->cpu_data()[i]; + const Dtype accum_param = accum_params[0]->cpu_data()[i]; + const Dtype error_margin = std::max(kMinPrecision, kPrecision * + std::min(fabs(expected_param), fabs(accum_param))); + EXPECT_NEAR(expected_param, accum_param, error_margin); + } + ASSERT_EQ(1, accum_params[1]->count()); + const Dtype expected_bias = noaccum_params[1]->cpu_data()[0]; + const Dtype accum_bias = accum_params[1]->cpu_data()[0]; + const Dtype error_margin = std::max(kMinPrecision, kPrecision * + std::min(fabs(expected_bias), fabs(accum_bias))); + EXPECT_NEAR(expected_bias, accum_bias, error_margin); + } + // Test that the correct update is computed for a regularized least squares // problem: // @@ -372,6 +417,16 @@ TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverything) { } } +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} template class AdaGradSolverTest : public GradientBasedSolverTest { @@ -416,6 +471,16 @@ TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithEverything) { } } +TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} template class NesterovSolverTest : public GradientBasedSolverTest { @@ -482,4 +547,15 @@ TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithEverything) { } } +TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + } // namespace caffe diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index ee684c00255..0017ac23e69 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -25,7 +25,7 @@ __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; template -class Im2colKernelTest : public ::testing::Test { +class Im2colKernelTest : public GPUDeviceTest { protected: Im2colKernelTest() // big so launches > 1024 threads @@ -68,8 +68,6 @@ class Im2colKernelTest : public ::testing::Test { TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); TYPED_TEST(Im2colKernelTest, TestGPU) { - Caffe::set_mode(Caffe::GPU); - // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, diff --git a/src/caffe/test/test_math_functions.cpp b/src/caffe/test/test_math_functions.cpp index 667f744bdd7..a095b544e17 100644 --- a/src/caffe/test/test_math_functions.cpp +++ b/src/caffe/test/test_math_functions.cpp @@ -15,8 +15,10 @@ namespace caffe { -template -class MathFunctionsTest : public ::testing::Test { +template +class MathFunctionsTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + protected: MathFunctionsTest() : blob_bottom_(new Blob()), @@ -64,14 +66,19 @@ class MathFunctionsTest : public ::testing::Test { Blob* const blob_top_; }; -TYPED_TEST_CASE(MathFunctionsTest, TestDtypes); +template +class CPUMathFunctionsTest + : public MathFunctionsTest > { +}; + +TYPED_TEST_CASE(CPUMathFunctionsTest, TestDtypes); -TYPED_TEST(MathFunctionsTest, TestNothing) { +TYPED_TEST(CPUMathFunctionsTest, TestNothing) { // The first test case of a test suite takes the longest time // due to the set up overhead. } -TYPED_TEST(MathFunctionsTest, TestHammingDistanceCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestHammingDistance) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); const TypeParam* y = this->blob_top_->cpu_data(); @@ -79,7 +86,7 @@ TYPED_TEST(MathFunctionsTest, TestHammingDistanceCPU) { caffe_cpu_hamming_distance(n, x, y)); } -TYPED_TEST(MathFunctionsTest, TestAsumCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestAsum) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); TypeParam std_asum = 0; @@ -90,7 +97,7 @@ TYPED_TEST(MathFunctionsTest, TestAsumCPU) { EXPECT_LT((cpu_asum - std_asum) / std_asum, 1e-2); } -TYPED_TEST(MathFunctionsTest, TestSignCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestSign) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_cpu_sign(n, x, this->blob_bottom_->mutable_cpu_diff()); @@ -100,7 +107,7 @@ TYPED_TEST(MathFunctionsTest, TestSignCPU) { } } -TYPED_TEST(MathFunctionsTest, TestSgnbitCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestSgnbit) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_cpu_sgnbit(n, x, this->blob_bottom_->mutable_cpu_diff()); @@ -110,7 +117,7 @@ TYPED_TEST(MathFunctionsTest, TestSgnbitCPU) { } } -TYPED_TEST(MathFunctionsTest, TestFabsCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestFabs) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_abs(n, x, this->blob_bottom_->mutable_cpu_diff()); @@ -120,7 +127,7 @@ TYPED_TEST(MathFunctionsTest, TestFabsCPU) { } } -TYPED_TEST(MathFunctionsTest, TestScaleCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestScale) { int n = this->blob_bottom_->count(); TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % this->blob_bottom_->count()]; @@ -133,11 +140,10 @@ TYPED_TEST(MathFunctionsTest, TestScaleCPU) { } } -TYPED_TEST(MathFunctionsTest, TestCopyCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestCopy) { const int n = this->blob_bottom_->count(); const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); TypeParam* top_data = this->blob_top_->mutable_cpu_data(); - Caffe::set_mode(Caffe::CPU); caffe_copy(n, bottom_data, top_data); for (int i = 0; i < n; ++i) { EXPECT_EQ(bottom_data[i], top_data[i]); @@ -146,8 +152,14 @@ TYPED_TEST(MathFunctionsTest, TestCopyCPU) { #ifndef CPU_ONLY +template +class GPUMathFunctionsTest : public MathFunctionsTest > { +}; + +TYPED_TEST_CASE(GPUMathFunctionsTest, TestDtypes); + // TODO: Fix caffe_gpu_hamming_distance and re-enable this test. -TYPED_TEST(MathFunctionsTest, DISABLED_TestHammingDistanceGPU) { +TYPED_TEST(GPUMathFunctionsTest, DISABLED_TestHammingDistance) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); const TypeParam* y = this->blob_top_->cpu_data(); @@ -158,7 +170,7 @@ TYPED_TEST(MathFunctionsTest, DISABLED_TestHammingDistanceGPU) { EXPECT_EQ(reference_distance, computed_distance); } -TYPED_TEST(MathFunctionsTest, TestAsumGPU) { +TYPED_TEST(GPUMathFunctionsTest, TestAsum) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); TypeParam std_asum = 0; @@ -170,7 +182,7 @@ TYPED_TEST(MathFunctionsTest, TestAsumGPU) { EXPECT_LT((gpu_asum - std_asum) / std_asum, 1e-2); } -TYPED_TEST(MathFunctionsTest, TestSignGPU) { +TYPED_TEST(GPUMathFunctionsTest, TestSign) { int n = this->blob_bottom_->count(); caffe_gpu_sign(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); @@ -181,7 +193,7 @@ TYPED_TEST(MathFunctionsTest, TestSignGPU) { } } -TYPED_TEST(MathFunctionsTest, TestSgnbitGPU) { +TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) { int n = this->blob_bottom_->count(); caffe_gpu_sgnbit(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); @@ -192,7 +204,7 @@ TYPED_TEST(MathFunctionsTest, TestSgnbitGPU) { } } -TYPED_TEST(MathFunctionsTest, TestFabsGPU) { +TYPED_TEST(GPUMathFunctionsTest, TestFabs) { int n = this->blob_bottom_->count(); caffe_gpu_abs(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); @@ -203,7 +215,7 @@ TYPED_TEST(MathFunctionsTest, TestFabsGPU) { } } -TYPED_TEST(MathFunctionsTest, TestScaleGPU) { +TYPED_TEST(GPUMathFunctionsTest, TestScale) { int n = this->blob_bottom_->count(); TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % this->blob_bottom_->count()]; @@ -216,11 +228,10 @@ TYPED_TEST(MathFunctionsTest, TestScaleGPU) { } } -TYPED_TEST(MathFunctionsTest, TestCopyGPU) { +TYPED_TEST(GPUMathFunctionsTest, TestCopy) { const int n = this->blob_bottom_->count(); const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); TypeParam* top_data = this->blob_top_->mutable_gpu_data(); - Caffe::set_mode(Caffe::GPU); caffe_copy(n, bottom_data, top_data); bottom_data = this->blob_bottom_->cpu_data(); top_data = this->blob_top_->mutable_cpu_data(); diff --git a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp index 9038017e3e2..b2db984feb1 100644 --- a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp +++ b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp @@ -16,7 +16,7 @@ namespace caffe { template -class MultinomialLogisticLossLayerTest : public ::testing::Test { +class MultinomialLogisticLossLayerTest : public CPUDeviceTest { protected: MultinomialLogisticLossLayerTest() : blob_bottom_data_(new Blob(10, 5, 1, 1)), @@ -51,7 +51,6 @@ TYPED_TEST_CASE(MultinomialLogisticLossLayerTest, TestDtypes); TYPED_TEST(MultinomialLogisticLossLayerTest, TestGradientCPU) { LayerParameter layer_param; - Caffe::set_mode(Caffe::CPU); MultinomialLogisticLossLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); GradientChecker checker(1e-2, 2*1e-2, 1701, 0, 0.05); diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index 08106e79274..782a96bc9b6 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -613,6 +613,103 @@ class NetTest : public MultiDeviceTest { InitNetFromProtoString(proto); } + virtual void InitSkipPropNet(bool test_skip_true) { + string proto = + "name: 'SkipPropTestNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " shape { " + " dim: 5 " + " dim: 2 " + " dim: 3 " + " dim: 4 " + " } " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " shape { " + " dim: 5 " + " } " + " data_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'silence' " + " bottom: 'label' " + " type: 'Silence' " + "} " + "layer { " + " name: 'innerproduct' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'data' " + " top: 'innerproduct' " + "} " + "layer { " + " name: 'ip_fake_labels' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " bottom: 'data' " + " top: 'fake_labels' " + "} " + "layer { " + " name: 'argmax' " + " bottom: 'fake_labels' " + " top: 'label_argmax' " + " type: 'ArgMax' " + "} " + "layer { " + " name: 'loss' " + " bottom: 'innerproduct' " + " bottom: 'label_argmax' "; + if (test_skip_true) + proto += " propagate_down: [true, false] "; + else + proto += " propagate_down: [true, true] "; + proto += + " top: 'cross_entropy_loss' " + " type: 'SigmoidCrossEntropyLoss' " + " loss_weight: 0.1 " + "} "; + InitNetFromProtoString(proto); + } + int seed_; shared_ptr > net_; }; @@ -2224,4 +2321,52 @@ TYPED_TEST(NetTest, TestReshape) { } } +TYPED_TEST(NetTest, TestSkipPropagateDown) { + // check bottom_need_backward if propagate_down is true + this->InitSkipPropNet(false); + vector vec_layer_need_backward = this->net_->layer_need_backward(); + for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { + string layer_name = this->net_->layer_names()[layer_id]; + if (layer_name == "loss") { + // access to bottom_need_backward coresponding to label's blob + bool need_back = this->net_->bottom_need_backward()[layer_id][1]; + // if propagate_down is true, the loss layer will try to + // backpropagate on labels + EXPECT_TRUE(need_back) << "bottom_need_backward should be True"; + } + // layer_need_backward should be True except for data and silence layers + if (layer_name.find("data") != std::string::npos || + layer_name == "silence") { + EXPECT_FALSE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be False"; + } else { + EXPECT_TRUE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be True"; + } + } + // check bottom_need_backward if propagat_down is false + this->InitSkipPropNet(true); + vec_layer_need_backward.clear(); + vec_layer_need_backward = this->net_->layer_need_backward(); + for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { + string layer_name = this->net_->layer_names()[layer_id]; + if (layer_name == "loss") { + // access to bottom_need_backward coresponding to label's blob + bool need_back = this->net_->bottom_need_backward()[layer_id][1]; + // if propagate_down is false, the loss layer will not try to + // backpropagate on labels + EXPECT_FALSE(need_back) << "bottom_need_backward should be False"; + } + // layer_need_backward should be False except for innerproduct and + // loss layers + if (layer_name == "innerproduct" || layer_name == "loss") { + EXPECT_TRUE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be True"; + } else { + EXPECT_FALSE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be False"; + } + } +} + } // namespace caffe diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index c9d52f247a6..c6e4d27b903 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -117,6 +117,49 @@ class NeuronLayerTest : public MultiDeviceTest { + slope_data[c] * std::min(bottom_data[i], (Dtype)(0))); } } + + void LogBottomInit() { + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + Dtype* bottom_data = this->blob_bottom_->mutable_cpu_data(); + caffe_exp(this->blob_bottom_->count(), bottom_data, bottom_data); + } + + void TestLogForward(const float base, const float scale, const float shift) { + LogBottomInit(); + LayerParameter layer_param; + layer_param.mutable_log_param()->set_base(base); + layer_param.mutable_log_param()->set_scale(scale); + layer_param.mutable_log_param()->set_shift(shift); + LogLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + layer.Forward(blob_bottom_vec_, blob_top_vec_); + const Dtype kDelta = 2e-4; + const Dtype* bottom_data = blob_bottom_->cpu_data(); + const Dtype* top_data = blob_top_->cpu_data(); + for (int i = 0; i < blob_bottom_->count(); ++i) { + const Dtype bottom_val = bottom_data[i]; + const Dtype top_val = top_data[i]; + if (base == -1) { + EXPECT_NEAR(top_val, log(shift + scale * bottom_val), kDelta); + } else { + EXPECT_NEAR(top_val, log(shift + scale * bottom_val) / log(base), + kDelta); + } + } + } + + void TestLogGradient(const float base, const float scale, const float shift) { + LogBottomInit(); + LayerParameter layer_param; + layer_param.mutable_log_param()->set_base(base); + layer_param.mutable_log_param()->set_scale(scale); + layer_param.mutable_log_param()->set_shift(shift); + LogLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientEltwise(&layer, blob_bottom_vec_, blob_top_vec_); + } }; TYPED_TEST_CASE(NeuronLayerTest, TestDtypesAndDevices); @@ -339,6 +382,88 @@ TYPED_TEST(NeuronLayerTest, TestExpGradientBase2Shift1Scale3) { this->TestExpGradient(kBase, kScale, kShift); } +TYPED_TEST(NeuronLayerTest, TestLogLayer) { + typedef typename TypeParam::Dtype Dtype; + // Test default base of "-1" -- should actually set base := e. + const Dtype kBase = -1; + const Dtype kScale = 1; + const Dtype kShift = 0; + this->TestLogForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogGradient) { + typedef typename TypeParam::Dtype Dtype; + // Test default base of "-1" -- should actually set base := e. + const Dtype kBase = -1; + const Dtype kScale = 1; + const Dtype kShift = 0; + this->TestLogGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogLayerBase2) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 1; + const Dtype kShift = 0; + this->TestLogForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogGradientBase2) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 1; + const Dtype kShift = 0; + this->TestLogGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Shift1) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 1; + const Dtype kShift = 1; + this->TestLogForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Shift1) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 1; + const Dtype kShift = 1; + this->TestLogGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Scale3) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 3; + const Dtype kShift = 0; + this->TestLogForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Scale3) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 3; + const Dtype kShift = 0; + this->TestLogGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Shift1Scale3) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 3; + const Dtype kShift = 1; + this->TestLogForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Shift1Scale3) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 3; + const Dtype kShift = 1; + this->TestLogGradient(kBase, kScale, kShift); +} + TYPED_TEST(NeuronLayerTest, TestDropoutHalf) { const float kDropoutRatio = 0.5; this->TestDropoutForward(kDropoutRatio); @@ -541,14 +666,10 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { caffe_copy(ip2.blobs()[0]->count(), ip.blobs()[0]->cpu_data(), ip2.blobs()[0]->mutable_cpu_data()); // Forward in-place - ip.Reshape(this->blob_bottom_vec_, this->blob_top_vec_); ip.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - prelu.Reshape(this->blob_top_vec_, this->blob_top_vec_); prelu.Forward(this->blob_top_vec_, this->blob_top_vec_); // Forward non-in-place - ip2.Reshape(blob_bottom_vec_2, blob_middle_vec_2); ip2.Forward(blob_bottom_vec_2, blob_middle_vec_2); - prelu2.Reshape(blob_middle_vec_2, blob_top_vec_2); prelu2.Forward(blob_middle_vec_2, blob_top_vec_2); // Check numbers for (int s = 0; s < blob_top_2->count(); ++s) { @@ -590,7 +711,7 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { #ifdef USE_CUDNN template -class CuDNNNeuronLayerTest : public ::testing::Test { +class CuDNNNeuronLayerTest : public GPUDeviceTest { protected: CuDNNNeuronLayerTest() : blob_bottom_(new Blob(2, 3, 4, 5)), @@ -613,7 +734,6 @@ class CuDNNNeuronLayerTest : public ::testing::Test { TYPED_TEST_CASE(CuDNNNeuronLayerTest, TestDtypes); TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNReLULayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -628,7 +748,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNReLULayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); @@ -637,7 +756,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CHECK(google::protobuf::TextFormat::ParseFromString( "relu_param { negative_slope: 0.01 }", &layer_param)); @@ -657,7 +775,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CHECK(google::protobuf::TextFormat::ParseFromString( "relu_param { negative_slope: 0.01 }", &layer_param)); @@ -668,7 +785,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSigmoidLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -685,7 +801,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSigmoidLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); @@ -694,7 +809,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNTanHLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -717,7 +831,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestTanHGradientCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNTanHLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index e9964e7f0b7..69f2d5c1135 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -608,7 +608,7 @@ TYPED_TEST(PoolingLayerTest, TestGradientAvePadded) { #ifdef USE_CUDNN template -class CuDNNPoolingLayerTest : public ::testing::Test { +class CuDNNPoolingLayerTest : public GPUDeviceTest { protected: CuDNNPoolingLayerTest() : blob_bottom_(new Blob()), @@ -963,7 +963,6 @@ class CuDNNPoolingLayerTest : public ::testing::Test { TYPED_TEST_CASE(CuDNNPoolingLayerTest, TestDtypes); TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -977,7 +976,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -994,7 +992,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { /* TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; layer_param.set_kernelsize(3); layer_param.set_stride(2); @@ -1020,7 +1017,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxCuDNN) { - Caffe::set_mode(Caffe::GPU); this->TestForwardSquare(); this->TestForwardRectHigh(); this->TestForwardRectWide(); @@ -1030,7 +1026,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxCuDNN) { // the corresponding backward test. /* TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxTopMaskCuDNN) { - Caffe::set_mode(Caffe::GPU); this->blob_top_vec_.push_back(this->blob_top_mask_); this->TestForwardSquare(); this->TestForwardRectHigh(); @@ -1039,7 +1034,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxTopMaskCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { - Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1059,7 +1053,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -1105,7 +1098,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { /* TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { - Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1126,7 +1118,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -1152,7 +1143,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { - Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1170,7 +1160,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAvePaddedCuDNN) { - Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; diff --git a/src/caffe/test/test_softmax_layer.cpp b/src/caffe/test/test_softmax_layer.cpp index f6674422e56..996da4b8f7c 100644 --- a/src/caffe/test/test_softmax_layer.cpp +++ b/src/caffe/test/test_softmax_layer.cpp @@ -82,7 +82,7 @@ TYPED_TEST(SoftmaxLayerTest, TestGradient) { #ifdef USE_CUDNN template -class CuDNNSoftmaxLayerTest : public ::testing::Test { +class CuDNNSoftmaxLayerTest : public GPUDeviceTest { protected: CuDNNSoftmaxLayerTest() : blob_bottom_(new Blob(2, 10, 2, 3)), @@ -104,7 +104,6 @@ class CuDNNSoftmaxLayerTest : public ::testing::Test { TYPED_TEST_CASE(CuDNNSoftmaxLayerTest, TestDtypes); TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSoftmaxLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -138,7 +137,6 @@ TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { } TYPED_TEST(CuDNNSoftmaxLayerTest, TestGradientCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSoftmaxLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); diff --git a/src/caffe/test/test_stochastic_pooling.cpp b/src/caffe/test/test_stochastic_pooling.cpp index 12962c65d85..f84464c322c 100644 --- a/src/caffe/test/test_stochastic_pooling.cpp +++ b/src/caffe/test/test_stochastic_pooling.cpp @@ -16,8 +16,10 @@ using std::min; namespace caffe { -template -class StochasticPoolingLayerTest : public ::testing::Test { +template +class StochasticPoolingLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + protected: StochasticPoolingLayerTest() : blob_bottom_(new Blob()), @@ -45,9 +47,14 @@ class StochasticPoolingLayerTest : public ::testing::Test { vector*> blob_top_vec_; }; -TYPED_TEST_CASE(StochasticPoolingLayerTest, TestDtypes); +template +class CPUStochasticPoolingLayerTest + : public StochasticPoolingLayerTest > { +}; + +TYPED_TEST_CASE(CPUStochasticPoolingLayerTest, TestDtypes); -TYPED_TEST(StochasticPoolingLayerTest, TestSetup) { +TYPED_TEST(CPUStochasticPoolingLayerTest, TestSetup) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -60,8 +67,16 @@ TYPED_TEST(StochasticPoolingLayerTest, TestSetup) { EXPECT_EQ(this->blob_top_->width(), 2); } -TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPU) { - Caffe::set_mode(Caffe::GPU); +#ifndef CPU_ONLY + +template +class GPUStochasticPoolingLayerTest + : public StochasticPoolingLayerTest > { +}; + +TYPED_TEST_CASE(GPUStochasticPoolingLayerTest, TestDtypes); + +TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochastic) { LayerParameter layer_param; layer_param.set_phase(TRAIN); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); @@ -104,8 +119,7 @@ TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPU) { EXPECT_GE(total / this->blob_top_->count(), 0.55); } -TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPUTestPhase) { - Caffe::set_mode(Caffe::GPU); +TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochasticTestPhase) { LayerParameter layer_param; layer_param.set_phase(TEST); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); @@ -142,8 +156,7 @@ TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPUTestPhase) { } } -TYPED_TEST(StochasticPoolingLayerTest, TestGradientGPU) { - Caffe::set_mode(Caffe::GPU); +TYPED_TEST(GPUStochasticPoolingLayerTest, TestGradient) { LayerParameter layer_param; layer_param.set_phase(TRAIN); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); @@ -158,6 +171,6 @@ TYPED_TEST(StochasticPoolingLayerTest, TestGradientGPU) { this->blob_top_vec_); } - +#endif } // namespace caffe diff --git a/src/caffe/test/test_triplet_loss_layer.cpp b/src/caffe/test/test_triplet_loss_layer.cpp deleted file mode 100644 index c8d9377fa23..00000000000 --- a/src/caffe/test/test_triplet_loss_layer.cpp +++ /dev/null @@ -1,107 +0,0 @@ -#include -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -#include "caffe/filler.hpp" -#include "caffe/vision_layers.hpp" - -#include "caffe/test/test_caffe_main.hpp" -#include "caffe/test/test_gradient_check_util.hpp" - -namespace caffe { - -template -class TripletLossLayerTest : public MultiDeviceTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - TripletLossLayerTest() - : blob_bottom_data_i_(new Blob(128, 10, 1, 1)), - blob_bottom_data_j_(new Blob(128, 10, 1, 1)), - blob_bottom_data_k_(new Blob(128, 10, 1, 1)), - blob_bottom_y_(new Blob(128, 1, 1, 1)), - blob_top_loss_(new Blob()) { - // fill the values - FillerParameter filler_param; - filler_param.set_mean(0.0); - filler_param.set_std(0.3); // distances~=1.0 to test both sides of margin - GaussianFiller filler(filler_param); - filler.Fill(this->blob_bottom_data_i_); - blob_bottom_vec_.push_back(blob_bottom_data_i_); - filler.Fill(this->blob_bottom_data_j_); - blob_bottom_vec_.push_back(blob_bottom_data_j_); - filler.Fill(this->blob_bottom_data_k_); - blob_bottom_vec_.push_back(blob_bottom_data_k_); - for (int i = 0; i < blob_bottom_y_->count(); ++i) { - blob_bottom_y_->mutable_cpu_data()[i] = caffe_rng_rand() % 2; // 0 or 1 - } - blob_bottom_vec_.push_back(blob_bottom_y_); - blob_top_vec_.push_back(blob_top_loss_); - } - virtual ~TripletLossLayerTest() { - delete blob_bottom_data_i_; - delete blob_bottom_data_j_; - delete blob_bottom_data_k_; - delete blob_bottom_y_; - delete blob_top_loss_; - } - - Blob* const blob_bottom_data_i_; - Blob* const blob_bottom_data_j_; - Blob* const blob_bottom_data_k_; - Blob* const blob_bottom_y_; - Blob* const blob_top_loss_; - vector*> blob_bottom_vec_; - vector*> blob_top_vec_; -}; - -TYPED_TEST_CASE(TripletLossLayerTest, TestDtypesAndDevices); - -TYPED_TEST(TripletLossLayerTest, TestForward) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // manually compute to compare - const Dtype margin = layer_param.contrastive_loss_param().margin(); - const int num = this->blob_bottom_data_i_->num(); - const int channels = this->blob_bottom_data_i_->channels(); - Dtype loss(0); - for (int i = 0; i < num; ++i) { - Dtype dist_sq(0); - for (int j = 0; j < channels; ++j) { - Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; - dist_sq += diff*diff; - } - if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs - loss += dist_sq; - } else { - loss += std::max(margin-dist_sq, Dtype(0)); - } - } - loss /= static_cast(num) * Dtype(2); - EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); -} - -TYPED_TEST(TripletLossLayerTest, TestGradient) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - GradientChecker checker(1e-2, 1e-2, 1701); - // check the gradient for the first two bottom layers - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 0); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 1); -} - -} // namespace caffe diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index 13e17be582b..0aab6b17b85 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -206,6 +206,16 @@ void caffe_exp(const int n, const double* a, double* y) { vdExp(n, a, y); } +template <> +void caffe_log(const int n, const float* a, float* y) { + vsLn(n, a, y); +} + +template <> +void caffe_log(const int n, const double* a, double* y) { + vdLn(n, a, y); +} + template <> void caffe_abs(const int n, const float* a, float* y) { vsAbs(n, a, y); diff --git a/src/caffe/util/math_functions.cu b/src/caffe/util/math_functions.cu index 43e65eb9a69..2631a0740d6 100644 --- a/src/caffe/util/math_functions.cu +++ b/src/caffe/util/math_functions.cu @@ -324,6 +324,27 @@ void caffe_gpu_exp(const int N, const double* a, double* y) { N, a, y); } +template +__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = log(a[index]); + } +} + +template <> +void caffe_gpu_log(const int N, const float* a, float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + log_kernel<<>>( + N, a, y); +} + +template <> +void caffe_gpu_log(const int N, const double* a, double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + log_kernel<<>>( + N, a, y); +} + template __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { From fbcab79046ff0ea84e7dc8f656160a8b844bae17 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 15 Jun 2015 15:35:57 +0800 Subject: [PATCH 07/82] suit for opencv3.0.0 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index e4e66dfd138..140acba7b7b 100644 --- a/Makefile +++ b/Makefile @@ -171,7 +171,7 @@ ifneq ($(CPU_ONLY), 1) endif LIBRARIES += glog gflags protobuf leveldb snappy \ lmdb boost_system hdf5_hl hdf5 m \ - opencv_core opencv_highgui opencv_imgproc + opencv_core opencv_highgui opencv_imgproc opencv_imgcodecs PYTHON_LIBRARIES := boost_python python2.7 WARNINGS := -Wall -Wno-sign-compare From d92b0d421bc76fee94c1ea33484ede9a2db79a14 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 15 Jun 2015 16:13:24 +0800 Subject: [PATCH 08/82] delete spaces --- src/caffe/layers/triplet_loss_layer.cpp | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/src/caffe/layers/triplet_loss_layer.cpp b/src/caffe/layers/triplet_loss_layer.cpp index de60a5f78d5..f963a7f9136 100644 --- a/src/caffe/layers/triplet_loss_layer.cpp +++ b/src/caffe/layers/triplet_loss_layer.cpp @@ -55,22 +55,18 @@ void TripletLossLayer::Forward_cpu( Dtype margin = this->layer_param_.triplet_loss_param().margin(); Dtype loss(0.0); - //Loss component calculated from ab + // Loss component calculated from ab for (int i = 0; i < bottom[0]->num(); ++i) { dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); - //ab is a similar pair + // ab is a similar pair dist_sq_.mutable_cpu_data()[i] += dist_sq_pos.cpu_data()[i]; - - - //Loss component calculated from ac + // Loss component calculated from ac dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); - //ac is a dissimilar pair + // ac is a dissimilar pair dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; - loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); - } loss = loss / static_cast(bottom[0]->num()) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; @@ -80,7 +76,8 @@ template void TripletLossLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.triplet_loss_param().margin(); - for (int i = 1; i < 3; ++i) {//there must be further check to ensure the gradient calc + for (int i = 1; i < 3; ++i) { +// there must be further check to ensure the gradient calc if (propagate_down[i]) { const Dtype sign = (i == 2) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / @@ -97,17 +94,14 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, diff_pos.cpu_data() + (j*channels), Dtype(0.0), bout + (j*channels)); - - // dissimilar pairs - + // dissimilar pairs caffe_cpu_axpby( channels, -alpha, diff_neg.cpu_data() + (j*channels), Dtype(0.0), bout + (j*channels)); - - } else { + } else { caffe_set(channels, Dtype(0), bout + (j*channels)); } } From d45672c8257ced7c3508fb7018b35e00cff2085f Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 15 Jun 2015 16:38:27 +0800 Subject: [PATCH 09/82] delete libopenccodecsv --- Makefile | 2 +- src/caffe/layers/triplet_loss_layer.cpp | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 140acba7b7b..2b918ffd26d 100644 --- a/Makefile +++ b/Makefile @@ -171,7 +171,7 @@ ifneq ($(CPU_ONLY), 1) endif LIBRARIES += glog gflags protobuf leveldb snappy \ lmdb boost_system hdf5_hl hdf5 m \ - opencv_core opencv_highgui opencv_imgproc opencv_imgcodecs + opencv_core opencv_highgui opencv_imgproc #opencv_imgcodecs PYTHON_LIBRARIES := boost_python python2.7 WARNINGS := -Wall -Wno-sign-compare diff --git a/src/caffe/layers/triplet_loss_layer.cpp b/src/caffe/layers/triplet_loss_layer.cpp index f963a7f9136..2ee8bc2037e 100644 --- a/src/caffe/layers/triplet_loss_layer.cpp +++ b/src/caffe/layers/triplet_loss_layer.cpp @@ -84,9 +84,9 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, static_cast(bottom[i]->num()); int num = bottom[i]->num(); int channels = bottom[i]->channels(); - for (int j = 0; j < num; ++j) { + for (int j = 0; j < num; ++j) { Dtype* bout = bottom[i]->mutable_cpu_diff(); - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { // similar pairs caffe_cpu_axpby( channels, @@ -101,9 +101,9 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, diff_neg.cpu_data() + (j*channels), Dtype(0.0), bout + (j*channels)); - } else { + } else { caffe_set(channels, Dtype(0), bout + (j*channels)); - } + } } } } From b825d33cf064faaea2fc875db1976bddd036c2d7 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 15 Jun 2015 17:33:53 +0800 Subject: [PATCH 10/82] triplet tranining data code fix --- .../triplet/convert_mnist_triplet_data.cpp | 1 - src/caffe/test/test_triplet_loss_layer | 155 ++++++++++++++++++ 2 files changed, 155 insertions(+), 1 deletion(-) create mode 100644 src/caffe/test/test_triplet_loss_layer diff --git a/examples/triplet/convert_mnist_triplet_data.cpp b/examples/triplet/convert_mnist_triplet_data.cpp index e35e7f4f3bf..0cbab642b7c 100644 --- a/examples/triplet/convert_mnist_triplet_data.cpp +++ b/examples/triplet/convert_mnist_triplet_data.cpp @@ -97,7 +97,6 @@ void convert_dataset(const char* image_filename, const char* label_filename, datum.set_data(pixels, 3*rows*cols); if (label_i == label_j && label_i != label_k) { datum.set_label(1); - datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", itemid); db->Put(leveldb::WriteOptions(), std::string(key), value); diff --git a/src/caffe/test/test_triplet_loss_layer b/src/caffe/test/test_triplet_loss_layer new file mode 100644 index 00000000000..4050a35d80b --- /dev/null +++ b/src/caffe/test/test_triplet_loss_layer @@ -0,0 +1,155 @@ +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class TripletLossLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + TripletLossLayerTest() + : blob_bottom_data_i_(new Blob(512, 2, 1, 1)), + blob_bottom_data_j_(new Blob(512, 2, 1, 1)), + blob_bottom_data_k_(new Blob(512, 2, 1, 1)), + blob_bottom_y_(new Blob(512, 1, 1, 1)), + blob_top_loss_(new Blob()) { + // fill the values + FillerParameter filler_param; + filler_param.set_min(-1.0); + filler_param.set_max(1.0); // distances~=1.0 to test both sides of margin + UniformFiller filler(filler_param); + filler.Fill(this->blob_bottom_data_i_); + blob_bottom_vec_.push_back(blob_bottom_data_i_); + filler.Fill(this->blob_bottom_data_j_); + blob_bottom_vec_.push_back(blob_bottom_data_j_); + filler.Fill(this->blob_bottom_data_k_); + blob_bottom_vec_.push_back(blob_bottom_data_k_); + for (int i = 0; i < blob_bottom_y_->count(); ++i) { + blob_bottom_y_->mutable_cpu_data()[i] = caffe_rng_rand() % 2; // 0 or 1 + } + blob_bottom_vec_.push_back(blob_bottom_y_); + blob_top_vec_.push_back(blob_top_loss_); + } + virtual ~TripletLossLayerTest() { + delete blob_bottom_data_i_; + delete blob_bottom_data_j_; + delete blob_bottom_data_k_; + delete blob_bottom_y_; + delete blob_top_loss_; + } + + Blob* const blob_bottom_data_i_; + Blob* const blob_bottom_data_j_; + Blob* const blob_bottom_data_k_; + Blob* const blob_bottom_y_; + Blob* const blob_top_loss_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(TripletLossLayerTest, TestDtypesAndDevices); + +TYPED_TEST(TripletLossLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // manually compute to compare + const Dtype margin = layer_param.triplet_loss_param().margin(); + const int num = this->blob_bottom_data_i_->num(); + const int channels = this->blob_bottom_data_i_->channels(); + Dtype loss(0); + for (int i = 0; i < num; ++i) { + Dtype dist_sq(0); + for (int j = 0; j < channels; ++j) { + Dtype diff_pos = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq += diff_pos*diff_pos; + Dtype diff_neg = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq -= diff_neg*diff_neg; + } + loss += std::max(margin + dist_sq, 0.0); + /*if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs + loss += dist_sq; + } else { + Dtype dist = std::max(margin - sqrt(dist_sq), 0.0); + loss += dist*dist; + }*/ + } + loss /= static_cast(num) * Dtype(2); + EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); +} + +TYPED_TEST(TripletLossLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + // check the gradient for the first two bottom layers + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 1); +} + +/*TYPED_TEST(TripletLossLayerTest, TestForwardLegacy) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_triplet_loss_param()->set_legacy_version(true); + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // manually compute to compare + const Dtype margin = layer_param.triplet_loss_param().margin(); + const int num = this->blob_bottom_data_i_->num(); + const int channels = this->blob_bottom_data_i_->channels(); + Dtype loss(0); + for (int i = 0; i < num; ++i) { + Dtype dist_sq(0); + for (int j = 0; j < channels; ++j) { + Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq += diff*diff; + } + if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs + loss += dist_sq; + } else { + loss += std::max(margin - dist_sq, Dtype(0.0)); + } + } + loss /= static_cast(num) * Dtype(2); + EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); +} + +TYPED_TEST(TripletLossLayerTest, TestGradientLegacy) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_triplet_loss_param()->set_legacy_version(true); + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + // check the gradient for the first two bottom layers + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 1); +}*/ + +} // namespace caffe From 273d375766e967e9667bb9de92e68ec88ff6b6d9 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 15 Jun 2015 20:43:15 +0800 Subject: [PATCH 11/82] cpu only --- Makefile | 631 ---------------------------------------- Makefile.config.example | 2 +- 2 files changed, 1 insertion(+), 632 deletions(-) delete mode 100644 Makefile diff --git a/Makefile b/Makefile deleted file mode 100644 index 2b918ffd26d..00000000000 --- a/Makefile +++ /dev/null @@ -1,631 +0,0 @@ -PROJECT := caffe - -CONFIG_FILE := Makefile.config -# Explicitly check for the config file, otherwise make -k will proceed anyway. -ifeq ($(wildcard $(CONFIG_FILE)),) -$(error $(CONFIG_FILE) not found. See $(CONFIG_FILE).example.) -endif -include $(CONFIG_FILE) - -BUILD_DIR_LINK := $(BUILD_DIR) -ifeq ($(RELEASE_BUILD_DIR),) - RELEASE_BUILD_DIR := .$(BUILD_DIR)_release -endif -ifeq ($(DEBUG_BUILD_DIR),) - DEBUG_BUILD_DIR := .$(BUILD_DIR)_debug -endif - -DEBUG ?= 0 -ifeq ($(DEBUG), 1) - BUILD_DIR := $(DEBUG_BUILD_DIR) - OTHER_BUILD_DIR := $(RELEASE_BUILD_DIR) -else - BUILD_DIR := $(RELEASE_BUILD_DIR) - OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR) -endif - -# All of the directories containing code. -SRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \ - \( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print) - -# The target shared library name -LIB_BUILD_DIR := $(BUILD_DIR)/lib -STATIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).a -DYNAMIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).so - -############################## -# Get all source files -############################## -# CXX_SRCS are the source files excluding the test ones. -CXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp") -# CU_SRCS are the cuda source files -CU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu") -# TEST_SRCS are the test source files -TEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp -TEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp") -TEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS)) -TEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu") -GTEST_SRC := src/gtest/gtest-all.cpp -# TOOL_SRCS are the source files for the tool binaries -TOOL_SRCS := $(shell find tools -name "*.cpp") -# EXAMPLE_SRCS are the source files for the example binaries -EXAMPLE_SRCS := $(shell find examples -name "*.cpp") -# BUILD_INCLUDE_DIR contains any generated header files we want to include. -BUILD_INCLUDE_DIR := $(BUILD_DIR)/src -# PROTO_SRCS are the protocol buffer definitions -PROTO_SRC_DIR := src/$(PROJECT)/proto -PROTO_SRCS := $(wildcard $(PROTO_SRC_DIR)/*.proto) -# PROTO_BUILD_DIR will contain the .cc and obj files generated from -# PROTO_SRCS; PROTO_BUILD_INCLUDE_DIR will contain the .h header files -PROTO_BUILD_DIR := $(BUILD_DIR)/$(PROTO_SRC_DIR) -PROTO_BUILD_INCLUDE_DIR := $(BUILD_INCLUDE_DIR)/$(PROJECT)/proto -# NONGEN_CXX_SRCS includes all source/header files except those generated -# automatically (e.g., by proto). -NONGEN_CXX_SRCS := $(shell find \ - src/$(PROJECT) \ - include/$(PROJECT) \ - python/$(PROJECT) \ - matlab/+$(PROJECT)/private \ - examples \ - tools \ - -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh") -LINT_SCRIPT := scripts/cpp_lint.py -LINT_OUTPUT_DIR := $(BUILD_DIR)/.lint -LINT_EXT := lint.txt -LINT_OUTPUTS := $(addsuffix .$(LINT_EXT), $(addprefix $(LINT_OUTPUT_DIR)/, $(NONGEN_CXX_SRCS))) -EMPTY_LINT_REPORT := $(BUILD_DIR)/.$(LINT_EXT) -NONEMPTY_LINT_REPORT := $(BUILD_DIR)/$(LINT_EXT) -# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT) -PY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp -PY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so -PY$(PROJECT)_HXX := include/$(PROJECT)/python_layer.hpp -# MAT$(PROJECT)_SRC is the mex entrance point of matlab package for $(PROJECT) -MAT$(PROJECT)_SRC := matlab/+$(PROJECT)/private/$(PROJECT)_.cpp -ifneq ($(MATLAB_DIR),) - MAT_SO_EXT := $(shell $(MATLAB_DIR)/bin/mexext) -endif -MAT$(PROJECT)_SO := matlab/+$(PROJECT)/private/$(PROJECT)_.$(MAT_SO_EXT) - -############################## -# Derive generated files -############################## -# The generated files for protocol buffers -PROTO_GEN_HEADER_SRCS := $(addprefix $(PROTO_BUILD_DIR)/, \ - $(notdir ${PROTO_SRCS:.proto=.pb.h})) -PROTO_GEN_HEADER := $(addprefix $(PROTO_BUILD_INCLUDE_DIR)/, \ - $(notdir ${PROTO_SRCS:.proto=.pb.h})) -PROTO_GEN_CC := $(addprefix $(BUILD_DIR)/, ${PROTO_SRCS:.proto=.pb.cc}) -PY_PROTO_BUILD_DIR := python/$(PROJECT)/proto -PY_PROTO_INIT := python/$(PROJECT)/proto/__init__.py -PROTO_GEN_PY := $(foreach file,${PROTO_SRCS:.proto=_pb2.py}, \ - $(PY_PROTO_BUILD_DIR)/$(notdir $(file))) -# The objects corresponding to the source files -# These objects will be linked into the final shared library, so we -# exclude the tool, example, and test objects. -CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o}) -CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o}) -PROTO_OBJS := ${PROTO_GEN_CC:.cc=.o} -OBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS) -# tool, example, and test objects -TOOL_OBJS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o}) -TOOL_BUILD_DIR := $(BUILD_DIR)/tools -TEST_CXX_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT)/test -TEST_CU_BUILD_DIR := $(BUILD_DIR)/cuda/src/$(PROJECT)/test -TEST_CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o}) -TEST_CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o}) -TEST_OBJS := $(TEST_CXX_OBJS) $(TEST_CU_OBJS) -GTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o}) -EXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o}) -# Output files for automatic dependency generation -DEPS := ${CXX_OBJS:.o=.d} ${CU_OBJS:.o=.d} ${TEST_CXX_OBJS:.o=.d} \ - ${TEST_CU_OBJS:.o=.d} $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d} -# tool, example, and test bins -TOOL_BINS := ${TOOL_OBJS:.o=.bin} -EXAMPLE_BINS := ${EXAMPLE_OBJS:.o=.bin} -# symlinks to tool bins without the ".bin" extension -TOOL_BIN_LINKS := ${TOOL_BINS:.bin=} -# Put the test binaries in build/test for convenience. -TEST_BIN_DIR := $(BUILD_DIR)/test -TEST_CU_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ - $(foreach obj,$(TEST_CU_OBJS),$(basename $(notdir $(obj)))))) -TEST_CXX_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ - $(foreach obj,$(TEST_CXX_OBJS),$(basename $(notdir $(obj)))))) -TEST_BINS := $(TEST_CXX_BINS) $(TEST_CU_BINS) -# TEST_ALL_BIN is the test binary that links caffe dynamically. -TEST_ALL_BIN := $(TEST_BIN_DIR)/test_all.testbin - -############################## -# Derive compiler warning dump locations -############################## -WARNS_EXT := warnings.txt -CXX_WARNS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o.$(WARNS_EXT)}) -CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o.$(WARNS_EXT)}) -TOOL_WARNS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o.$(WARNS_EXT)}) -EXAMPLE_WARNS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o.$(WARNS_EXT)}) -TEST_WARNS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o.$(WARNS_EXT)}) -TEST_CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o.$(WARNS_EXT)}) -ALL_CXX_WARNS := $(CXX_WARNS) $(TOOL_WARNS) $(EXAMPLE_WARNS) $(TEST_WARNS) -ALL_CU_WARNS := $(CU_WARNS) $(TEST_CU_WARNS) -ALL_WARNS := $(ALL_CXX_WARNS) $(ALL_CU_WARNS) - -EMPTY_WARN_REPORT := $(BUILD_DIR)/.$(WARNS_EXT) -NONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT) - -############################## -# Derive include and lib directories -############################## -CUDA_INCLUDE_DIR := $(CUDA_DIR)/include - -CUDA_LIB_DIR := -# add /lib64 only if it exists -ifneq ("$(wildcard $(CUDA_DIR)/lib64)","") - CUDA_LIB_DIR += $(CUDA_DIR)/lib64 -endif -CUDA_LIB_DIR += $(CUDA_DIR)/lib - -INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include -ifneq ($(CPU_ONLY), 1) - INCLUDE_DIRS += $(CUDA_INCLUDE_DIR) - LIBRARY_DIRS += $(CUDA_LIB_DIR) - LIBRARIES := cudart cublas curand -endif -LIBRARIES += glog gflags protobuf leveldb snappy \ - lmdb boost_system hdf5_hl hdf5 m \ - opencv_core opencv_highgui opencv_imgproc #opencv_imgcodecs -PYTHON_LIBRARIES := boost_python python2.7 -WARNINGS := -Wall -Wno-sign-compare - -############################## -# Set build directories -############################## - -DISTRIBUTE_DIR ?= distribute -DISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib -DIST_ALIASES := dist -ifneq ($(strip $(DISTRIBUTE_DIR)),distribute) - DIST_ALIASES += distribute -endif - -ALL_BUILD_DIRS := $(sort $(BUILD_DIR) $(addprefix $(BUILD_DIR)/, $(SRC_DIRS)) \ - $(addprefix $(BUILD_DIR)/cuda/, $(SRC_DIRS)) \ - $(LIB_BUILD_DIR) $(TEST_BIN_DIR) $(PY_PROTO_BUILD_DIR) $(LINT_OUTPUT_DIR) \ - $(DISTRIBUTE_SUBDIRS) $(PROTO_BUILD_INCLUDE_DIR)) - -############################## -# Set directory for Doxygen-generated documentation -############################## -DOXYGEN_CONFIG_FILE ?= ./.Doxyfile -# should be the same as OUTPUT_DIRECTORY in the .Doxyfile -DOXYGEN_OUTPUT_DIR ?= ./doxygen -DOXYGEN_COMMAND ?= doxygen -# All the files that might have Doxygen documentation. -DOXYGEN_SOURCES := $(shell find \ - src/$(PROJECT) \ - include/$(PROJECT) \ - python/ \ - matlab/ \ - examples \ - tools \ - -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh" -or \ - -name "*.py" -or -name "*.m") -DOXYGEN_SOURCES += $(DOXYGEN_CONFIG_FILE) - - -############################## -# Configure build -############################## - -# Determine platform -UNAME := $(shell uname -s) -ifeq ($(UNAME), Linux) - LINUX := 1 -else ifeq ($(UNAME), Darwin) - OSX := 1 -endif - -# Linux -ifeq ($(LINUX), 1) - CXX ?= /usr/bin/g++ - GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.) - # older versions of gcc are too dumb to build boost with -Wuninitalized - ifeq ($(shell echo $(GCCVERSION) \< 4.6 | bc), 1) - WARNINGS += -Wno-uninitialized - endif - # boost::thread is reasonably called boost_thread (compare OS X) - # We will also explicitly add stdc++ to the link target. - LIBRARIES += boost_thread stdc++ -endif - -# OS X: -# clang++ instead of g++ -# libstdc++ for NVCC compatibility on OS X >= 10.9 with CUDA < 7.0 -ifeq ($(OSX), 1) - CXX := /usr/bin/clang++ - ifneq ($(CPU_ONLY), 1) - CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d') - ifeq ($(shell echo $(CUDA_VERSION) \< 7.0 | bc), 1) - CXXFLAGS += -stdlib=libstdc++ - LINKFLAGS += -stdlib=libstdc++ - endif - # clang throws this warning for cuda headers - WARNINGS += -Wno-unneeded-internal-declaration - endif - # gtest needs to use its own tuple to not conflict with clang - COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1 - # boost::thread is called boost_thread-mt to mark multithreading on OS X - LIBRARIES += boost_thread-mt - # we need to explicitly ask for the rpath to be obeyed - DYNAMIC_FLAGS := -install_name @rpath/libcaffe.so - ORIGIN := @loader_path -else - ORIGIN := \$$ORIGIN -endif - -# Custom compiler -ifdef CUSTOM_CXX - CXX := $(CUSTOM_CXX) -endif - -# Static linking -ifneq (,$(findstring clang++,$(CXX))) - STATIC_LINK_COMMAND := -Wl,-force_load $(STATIC_NAME) -else ifneq (,$(findstring g++,$(CXX))) - STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive -else - # The following line must not be indented with a tab, since we are not inside a target - $(error Cannot static link with the $(CXX) compiler) -endif - -# Debugging -ifeq ($(DEBUG), 1) - COMMON_FLAGS += -DDEBUG -g -O0 - NVCCFLAGS += -G -else - COMMON_FLAGS += -DNDEBUG -O2 -endif - -# cuDNN acceleration configuration. -ifeq ($(USE_CUDNN), 1) - LIBRARIES += cudnn - COMMON_FLAGS += -DUSE_CUDNN -endif - -# CPU-only configuration -ifeq ($(CPU_ONLY), 1) - OBJS := $(PROTO_OBJS) $(CXX_OBJS) - TEST_OBJS := $(TEST_CXX_OBJS) - TEST_BINS := $(TEST_CXX_BINS) - ALL_WARNS := $(ALL_CXX_WARNS) - TEST_FILTER := --gtest_filter="-*GPU*" - COMMON_FLAGS += -DCPU_ONLY -endif - -# Python layer support -ifeq ($(WITH_PYTHON_LAYER), 1) - COMMON_FLAGS += -DWITH_PYTHON_LAYER - LIBRARIES += $(PYTHON_LIBRARIES) -endif - -# BLAS configuration (default = ATLAS) -BLAS ?= atlas -ifeq ($(BLAS), mkl) - # MKL - LIBRARIES += mkl_rt - COMMON_FLAGS += -DUSE_MKL - MKL_DIR ?= /opt/intel/mkl - BLAS_INCLUDE ?= $(MKL_DIR)/include - BLAS_LIB ?= $(MKL_DIR)/lib $(MKL_DIR)/lib/intel64 -else ifeq ($(BLAS), open) - # OpenBLAS - LIBRARIES += openblas -else - # ATLAS - ifeq ($(LINUX), 1) - ifeq ($(BLAS), atlas) - # Linux simply has cblas and atlas - LIBRARIES += cblas atlas - endif - else ifeq ($(OSX), 1) - # OS X packages atlas as the vecLib framework - LIBRARIES += cblas - # 10.10 has accelerate while 10.9 has veclib - XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep -o 'version: 6') - ifneq (,$(findstring version: 6,$(XCODE_CLT_VER))) - BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/ - LDFLAGS += -framework Accelerate - else - BLAS_INCLUDE ?= /System/Library/Frameworks/vecLib.framework/Versions/Current/Headers/ - LDFLAGS += -framework vecLib - endif - endif -endif -INCLUDE_DIRS += $(BLAS_INCLUDE) -LIBRARY_DIRS += $(BLAS_LIB) - -LIBRARY_DIRS += $(LIB_BUILD_DIR) - -# Automatic dependency generation (nvcc is handled separately) -CXXFLAGS += -MMD -MP - -# Complete build flags. -COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir)) -CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) -NVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS) -# mex may invoke an older gcc that is too liberal with -Wuninitalized -MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized -LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) - -USE_PKG_CONFIG ?= 0 -ifeq ($(USE_PKG_CONFIG), 1) - PKG_CONFIG := $(shell pkg-config opencv --libs) -else - PKG_CONFIG := -endif -LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) $(PKG_CONFIG) \ - $(foreach library,$(LIBRARIES),-l$(library)) -PYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library)) - -# 'superclean' target recursively* deletes all files ending with an extension -# in $(SUPERCLEAN_EXTS) below. This may be useful if you've built older -# versions of Caffe that do not place all generated files in a location known -# to the 'clean' target. -# -# 'supercleanlist' will list the files to be deleted by make superclean. -# -# * Recursive with the exception that symbolic links are never followed, per the -# default behavior of 'find'. -SUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo - -# Set the sub-targets of the 'everything' target. -EVERYTHING_TARGETS := all py$(PROJECT) test warn lint -# Only build matcaffe as part of "everything" if MATLAB_DIR is specified. -ifneq ($(MATLAB_DIR),) - EVERYTHING_TARGETS += mat$(PROJECT) -endif - -############################## -# Define build targets -############################## -.PHONY: all test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ - py mat py$(PROJECT) mat$(PROJECT) proto runtest \ - superclean supercleanlist supercleanfiles warn everything - -all: $(STATIC_NAME) $(DYNAMIC_NAME) tools examples - -everything: $(EVERYTHING_TARGETS) - -linecount: - cloc --read-lang-def=$(PROJECT).cloc \ - src/$(PROJECT) include/$(PROJECT) tools examples \ - python matlab - -lint: $(EMPTY_LINT_REPORT) - -lintclean: - @ $(RM) -r $(LINT_OUTPUT_DIR) $(EMPTY_LINT_REPORT) $(NONEMPTY_LINT_REPORT) - -docs: $(DOXYGEN_OUTPUT_DIR) - @ cd ./docs ; ln -sfn ../$(DOXYGEN_OUTPUT_DIR)/html doxygen - -$(DOXYGEN_OUTPUT_DIR): $(DOXYGEN_CONFIG_FILE) $(DOXYGEN_SOURCES) - $(DOXYGEN_COMMAND) $(DOXYGEN_CONFIG_FILE) - -$(EMPTY_LINT_REPORT): $(LINT_OUTPUTS) | $(BUILD_DIR) - @ cat $(LINT_OUTPUTS) > $@ - @ if [ -s "$@" ]; then \ - cat $@; \ - mv $@ $(NONEMPTY_LINT_REPORT); \ - echo "Found one or more lint errors."; \ - exit 1; \ - fi; \ - $(RM) $(NONEMPTY_LINT_REPORT); \ - echo "No lint errors!"; - -$(LINT_OUTPUTS): $(LINT_OUTPUT_DIR)/%.lint.txt : % $(LINT_SCRIPT) | $(LINT_OUTPUT_DIR) - @ mkdir -p $(dir $@) - @ python $(LINT_SCRIPT) $< 2>&1 \ - | grep -v "^Done processing " \ - | grep -v "^Total errors found: 0" \ - > $@ \ - || true - -test: $(TEST_ALL_BIN) $(TEST_ALL_DYNLINK_BIN) $(TEST_BINS) - -tools: $(TOOL_BINS) $(TOOL_BIN_LINKS) - -examples: $(EXAMPLE_BINS) - -py$(PROJECT): py - -py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY) - -$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME) - @ echo CXX/LD -o $@ $< - $(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \ - -o $@ $(LINKFLAGS) -l$(PROJECT) $(PYTHON_LDFLAGS) \ - -Wl,-rpath,$(ORIGIN)/../../build/lib - -mat$(PROJECT): mat - -mat: $(MAT$(PROJECT)_SO) - -$(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME) - @ if [ -z "$(MATLAB_DIR)" ]; then \ - echo "MATLAB_DIR must be specified in $(CONFIG_FILE)" \ - "to build mat$(PROJECT)."; \ - exit 1; \ - fi - @ echo MEX $< - $(Q)$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) \ - CXX="$(CXX)" \ - CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \ - CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS)" -output $@ - @ if [ -f "$(PROJECT)_.d" ]; then \ - mv -f $(PROJECT)_.d $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}; \ - fi - -runtest: $(TEST_ALL_BIN) - $(TOOL_BUILD_DIR)/caffe - $(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle $(TEST_FILTER) - -pytest: py - cd python; python -m unittest discover -s caffe/test - -mattest: mat - cd matlab; $(MATLAB_DIR)/bin/matlab -nodisplay -r 'caffe.run_tests(), exit()' - -warn: $(EMPTY_WARN_REPORT) - -$(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR) - @ cat $(ALL_WARNS) > $@ - @ if [ -s "$@" ]; then \ - cat $@; \ - mv $@ $(NONEMPTY_WARN_REPORT); \ - echo "Compiler produced one or more warnings."; \ - exit 1; \ - fi; \ - $(RM) $(NONEMPTY_WARN_REPORT); \ - echo "No compiler warnings!"; - -$(ALL_WARNS): %.o.$(WARNS_EXT) : %.o - -$(BUILD_DIR_LINK): $(BUILD_DIR)/.linked - -# Create a target ".linked" in this BUILD_DIR to tell Make that the "build" link -# is currently correct, then delete the one in the OTHER_BUILD_DIR in case it -# exists and $(DEBUG) is toggled later. -$(BUILD_DIR)/.linked: - @ mkdir -p $(BUILD_DIR) - @ $(RM) $(OTHER_BUILD_DIR)/.linked - @ $(RM) -r $(BUILD_DIR_LINK) - @ ln -s $(BUILD_DIR) $(BUILD_DIR_LINK) - @ touch $@ - -$(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK) - @ mkdir -p $@ - -$(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) - @ echo LD -o $@ - $(Q)$(CXX) -shared -o $@ $(OBJS) $(LINKFLAGS) $(LDFLAGS) $(DYNAMIC_FLAGS) - -$(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) - @ echo AR -o $@ - $(Q)ar rcs $@ $(OBJS) - -$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS) - @ echo CXX $< - $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) - -$(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \ - | $(PROTO_BUILD_DIR) - @ echo CXX $< - $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) - -$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS) - @ echo NVCC $< - $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \ - -odir $(@D) - $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) - -$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ - | $(DYNAMIC_NAME) $(TEST_BIN_DIR) - @ echo CXX/LD -o $@ $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib - -$(TEST_CU_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CU_BUILD_DIR)/%.o \ - $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) - @ echo LD $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib - -$(TEST_CXX_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CXX_BUILD_DIR)/%.o \ - $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) - @ echo LD $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib - -# Target for extension-less symlinks to tool binaries with extension '*.bin'. -$(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR) - @ $(RM) $@ - @ ln -s $(abspath $<) $@ - -$(TOOL_BINS): %.bin : %.o | $(DYNAMIC_NAME) - @ echo CXX/LD -o $@ - $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ - -Wl,-rpath,$(ORIGIN)/../lib - -$(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME) - @ echo CXX/LD -o $@ - $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ - -Wl,-rpath,$(ORIGIN)/../../lib - -proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER) - -$(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \ - $(PROTO_SRC_DIR)/%.proto | $(PROTO_BUILD_DIR) - @ echo PROTOC $< - $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --cpp_out=$(PROTO_BUILD_DIR) $< - -$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \ - $(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR) - @ echo PROTOC \(python\) $< - $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $< - -$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR) - touch $(PY_PROTO_INIT) - -clean: - @- $(RM) -rf $(ALL_BUILD_DIRS) - @- $(RM) -rf $(OTHER_BUILD_DIR) - @- $(RM) -rf $(BUILD_DIR_LINK) - @- $(RM) -rf $(DISTRIBUTE_DIR) - @- $(RM) $(PY$(PROJECT)_SO) - @- $(RM) $(MAT$(PROJECT)_SO) - -supercleanfiles: - $(eval SUPERCLEAN_FILES := $(strip \ - $(foreach ext,$(SUPERCLEAN_EXTS), $(shell find . -name '*$(ext)' \ - -not -path './data/*')))) - -supercleanlist: supercleanfiles - @ \ - if [ -z "$(SUPERCLEAN_FILES)" ]; then \ - echo "No generated files found."; \ - else \ - echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ - fi - -superclean: clean supercleanfiles - @ \ - if [ -z "$(SUPERCLEAN_FILES)" ]; then \ - echo "No generated files found."; \ - else \ - echo "Deleting the following generated files:"; \ - echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ - $(RM) $(SUPERCLEAN_FILES); \ - fi - -$(DIST_ALIASES): $(DISTRIBUTE_DIR) - -$(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS) - # add include - cp -r include $(DISTRIBUTE_DIR)/ - mkdir -p $(DISTRIBUTE_DIR)/include/caffe/proto - cp $(PROTO_GEN_HEADER_SRCS) $(DISTRIBUTE_DIR)/include/caffe/proto - # add tool and example binaries - cp $(TOOL_BINS) $(DISTRIBUTE_DIR)/bin - cp $(EXAMPLE_BINS) $(DISTRIBUTE_DIR)/bin - # add libraries - cp $(STATIC_NAME) $(DISTRIBUTE_DIR)/lib - cp $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib - # add python - it's not the standard way, indeed... - cp -r python $(DISTRIBUTE_DIR)/python - --include $(DEPS) diff --git a/Makefile.config.example b/Makefile.config.example index a873502559f..074fbc34285 100644 --- a/Makefile.config.example +++ b/Makefile.config.example @@ -5,7 +5,7 @@ # USE_CUDNN := 1 # CPU-only switch (uncomment to build without GPU support). -# CPU_ONLY := 1 +CPU_ONLY := 1 # To customize your choice of compiler, uncomment and set the following. # N.B. the default for Linux is g++ and the default for OSX is clang++ From da1e177e58298600a74e458c401fe366a7ad9c91 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 15 Jun 2015 20:44:00 +0800 Subject: [PATCH 12/82] cpu only --- Makefile.config.example | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.config.example b/Makefile.config.example index 074fbc34285..a873502559f 100644 --- a/Makefile.config.example +++ b/Makefile.config.example @@ -5,7 +5,7 @@ # USE_CUDNN := 1 # CPU-only switch (uncomment to build without GPU support). -CPU_ONLY := 1 +# CPU_ONLY := 1 # To customize your choice of compiler, uncomment and set the following. # N.B. the default for Linux is g++ and the default for OSX is clang++ From 454323d3e7ec0f0eb01d0a43eaedd38154bb91bf Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 15 Jun 2015 20:44:21 +0800 Subject: [PATCH 13/82] cpu only --- Makefile.config.example | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.config.example b/Makefile.config.example index a873502559f..074fbc34285 100644 --- a/Makefile.config.example +++ b/Makefile.config.example @@ -5,7 +5,7 @@ # USE_CUDNN := 1 # CPU-only switch (uncomment to build without GPU support). -# CPU_ONLY := 1 +CPU_ONLY := 1 # To customize your choice of compiler, uncomment and set the following. # N.B. the default for Linux is g++ and the default for OSX is clang++ From 04145dd0de00f16044738a2a9e9d429b2d6a01df Mon Sep 17 00:00:00 2001 From: Wangyida Date: Tue, 16 Jun 2015 08:44:04 +0800 Subject: [PATCH 14/82] bug fixes on triplet_loss_layer.cpp and the test on mnist works well --- .../triplet/mnist_triplet_solver.prototxt | 2 +- examples/triplet/train_mnist_triplet.sh | 2 +- src/caffe/layers/triplet_loss_layer.cpp | 39 +++++++++++++++++-- 3 files changed, 38 insertions(+), 5 deletions(-) diff --git a/examples/triplet/mnist_triplet_solver.prototxt b/examples/triplet/mnist_triplet_solver.prototxt index edd8e1e0338..39222b89cf0 100644 --- a/examples/triplet/mnist_triplet_solver.prototxt +++ b/examples/triplet/mnist_triplet_solver.prototxt @@ -7,7 +7,7 @@ test_iter: 100 # Carry out testing every 500 training iterations. test_interval: 500 # The base learning rate, momentum and the weight decay of the network. -base_lr: 0.001 +base_lr: 0.01 momentum: 0.9 weight_decay: 0.0000 # The learning rate policy diff --git a/examples/triplet/train_mnist_triplet.sh b/examples/triplet/train_mnist_triplet.sh index 683cda2963b..e005970824a 100755 --- a/examples/triplet/train_mnist_triplet.sh +++ b/examples/triplet/train_mnist_triplet.sh @@ -1,5 +1,5 @@ #!/usr/bin/env sh -TOOLS=./release/tools +TOOLS=./build/tools $TOOLS/caffe train --solver=examples/triplet/mnist_triplet_solver.prototxt diff --git a/src/caffe/layers/triplet_loss_layer.cpp b/src/caffe/layers/triplet_loss_layer.cpp index 2ee8bc2037e..a79b8700f90 100644 --- a/src/caffe/layers/triplet_loss_layer.cpp +++ b/src/caffe/layers/triplet_loss_layer.cpp @@ -60,7 +60,7 @@ void TripletLossLayer::Forward_cpu( dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); // ab is a similar pair - dist_sq_.mutable_cpu_data()[i] += dist_sq_pos.cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i];// wrong!!! dist_sq = ***** // Loss component calculated from ac dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); @@ -76,10 +76,39 @@ template void TripletLossLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.triplet_loss_param().margin(); +// there must be further check to ensure the gradient calc + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(bottom[0]->num()); + int num = bottom[0]->num(); + int channels = bottom[0]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pairs + caffe_cpu_axpby( + channels, + alpha, + diff_pos.cpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + // dissimilar pairs + caffe_cpu_axpby( + channels, + -alpha, + diff_neg.cpu_data() + (j*channels), + Dtype(1.0), + bout + (j*channels)); + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); + } + } + } for (int i = 1; i < 3; ++i) { // there must be further check to ensure the gradient calc if (propagate_down[i]) { - const Dtype sign = (i == 2) ? 1 : -1; + const Dtype sign = (i == 1) ? -1 : 1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast(bottom[i]->num()); int num = bottom[i]->num(); @@ -87,6 +116,7 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, for (int j = 0; j < num; ++j) { Dtype* bout = bottom[i]->mutable_cpu_diff(); if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + if (i == 1) { // similar pairs caffe_cpu_axpby( channels, @@ -94,13 +124,16 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, diff_pos.cpu_data() + (j*channels), Dtype(0.0), bout + (j*channels)); + } + else { // dissimilar pairs caffe_cpu_axpby( channels, - -alpha, + alpha, diff_neg.cpu_data() + (j*channels), Dtype(0.0), bout + (j*channels)); + } } else { caffe_set(channels, Dtype(0), bout + (j*channels)); } From 17a0fd72ac638249a79e9106b956ae254b6ea49e Mon Sep 17 00:00:00 2001 From: Wangyida Date: Tue, 16 Jun 2015 11:40:05 +0800 Subject: [PATCH 15/82] GPU version added --- Makefile | 631 ++++++++++++++++++ Makefile.config.example | 2 +- .../triplet/mnist_triplet_solver.prototxt | 2 +- src/caffe/layers/triplet_loss_layer.cpp | 3 +- src/caffe/layers/triplet_loss_layer.cu | 128 ++++ 5 files changed, 762 insertions(+), 4 deletions(-) create mode 100644 Makefile create mode 100644 src/caffe/layers/triplet_loss_layer.cu diff --git a/Makefile b/Makefile new file mode 100644 index 00000000000..140acba7b7b --- /dev/null +++ b/Makefile @@ -0,0 +1,631 @@ +PROJECT := caffe + +CONFIG_FILE := Makefile.config +# Explicitly check for the config file, otherwise make -k will proceed anyway. +ifeq ($(wildcard $(CONFIG_FILE)),) +$(error $(CONFIG_FILE) not found. See $(CONFIG_FILE).example.) +endif +include $(CONFIG_FILE) + +BUILD_DIR_LINK := $(BUILD_DIR) +ifeq ($(RELEASE_BUILD_DIR),) + RELEASE_BUILD_DIR := .$(BUILD_DIR)_release +endif +ifeq ($(DEBUG_BUILD_DIR),) + DEBUG_BUILD_DIR := .$(BUILD_DIR)_debug +endif + +DEBUG ?= 0 +ifeq ($(DEBUG), 1) + BUILD_DIR := $(DEBUG_BUILD_DIR) + OTHER_BUILD_DIR := $(RELEASE_BUILD_DIR) +else + BUILD_DIR := $(RELEASE_BUILD_DIR) + OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR) +endif + +# All of the directories containing code. +SRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \ + \( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print) + +# The target shared library name +LIB_BUILD_DIR := $(BUILD_DIR)/lib +STATIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).a +DYNAMIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).so + +############################## +# Get all source files +############################## +# CXX_SRCS are the source files excluding the test ones. +CXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp") +# CU_SRCS are the cuda source files +CU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu") +# TEST_SRCS are the test source files +TEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp +TEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp") +TEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS)) +TEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu") +GTEST_SRC := src/gtest/gtest-all.cpp +# TOOL_SRCS are the source files for the tool binaries +TOOL_SRCS := $(shell find tools -name "*.cpp") +# EXAMPLE_SRCS are the source files for the example binaries +EXAMPLE_SRCS := $(shell find examples -name "*.cpp") +# BUILD_INCLUDE_DIR contains any generated header files we want to include. +BUILD_INCLUDE_DIR := $(BUILD_DIR)/src +# PROTO_SRCS are the protocol buffer definitions +PROTO_SRC_DIR := src/$(PROJECT)/proto +PROTO_SRCS := $(wildcard $(PROTO_SRC_DIR)/*.proto) +# PROTO_BUILD_DIR will contain the .cc and obj files generated from +# PROTO_SRCS; PROTO_BUILD_INCLUDE_DIR will contain the .h header files +PROTO_BUILD_DIR := $(BUILD_DIR)/$(PROTO_SRC_DIR) +PROTO_BUILD_INCLUDE_DIR := $(BUILD_INCLUDE_DIR)/$(PROJECT)/proto +# NONGEN_CXX_SRCS includes all source/header files except those generated +# automatically (e.g., by proto). +NONGEN_CXX_SRCS := $(shell find \ + src/$(PROJECT) \ + include/$(PROJECT) \ + python/$(PROJECT) \ + matlab/+$(PROJECT)/private \ + examples \ + tools \ + -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh") +LINT_SCRIPT := scripts/cpp_lint.py +LINT_OUTPUT_DIR := $(BUILD_DIR)/.lint +LINT_EXT := lint.txt +LINT_OUTPUTS := $(addsuffix .$(LINT_EXT), $(addprefix $(LINT_OUTPUT_DIR)/, $(NONGEN_CXX_SRCS))) +EMPTY_LINT_REPORT := $(BUILD_DIR)/.$(LINT_EXT) +NONEMPTY_LINT_REPORT := $(BUILD_DIR)/$(LINT_EXT) +# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT) +PY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp +PY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so +PY$(PROJECT)_HXX := include/$(PROJECT)/python_layer.hpp +# MAT$(PROJECT)_SRC is the mex entrance point of matlab package for $(PROJECT) +MAT$(PROJECT)_SRC := matlab/+$(PROJECT)/private/$(PROJECT)_.cpp +ifneq ($(MATLAB_DIR),) + MAT_SO_EXT := $(shell $(MATLAB_DIR)/bin/mexext) +endif +MAT$(PROJECT)_SO := matlab/+$(PROJECT)/private/$(PROJECT)_.$(MAT_SO_EXT) + +############################## +# Derive generated files +############################## +# The generated files for protocol buffers +PROTO_GEN_HEADER_SRCS := $(addprefix $(PROTO_BUILD_DIR)/, \ + $(notdir ${PROTO_SRCS:.proto=.pb.h})) +PROTO_GEN_HEADER := $(addprefix $(PROTO_BUILD_INCLUDE_DIR)/, \ + $(notdir ${PROTO_SRCS:.proto=.pb.h})) +PROTO_GEN_CC := $(addprefix $(BUILD_DIR)/, ${PROTO_SRCS:.proto=.pb.cc}) +PY_PROTO_BUILD_DIR := python/$(PROJECT)/proto +PY_PROTO_INIT := python/$(PROJECT)/proto/__init__.py +PROTO_GEN_PY := $(foreach file,${PROTO_SRCS:.proto=_pb2.py}, \ + $(PY_PROTO_BUILD_DIR)/$(notdir $(file))) +# The objects corresponding to the source files +# These objects will be linked into the final shared library, so we +# exclude the tool, example, and test objects. +CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o}) +CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o}) +PROTO_OBJS := ${PROTO_GEN_CC:.cc=.o} +OBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS) +# tool, example, and test objects +TOOL_OBJS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o}) +TOOL_BUILD_DIR := $(BUILD_DIR)/tools +TEST_CXX_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT)/test +TEST_CU_BUILD_DIR := $(BUILD_DIR)/cuda/src/$(PROJECT)/test +TEST_CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o}) +TEST_CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o}) +TEST_OBJS := $(TEST_CXX_OBJS) $(TEST_CU_OBJS) +GTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o}) +EXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o}) +# Output files for automatic dependency generation +DEPS := ${CXX_OBJS:.o=.d} ${CU_OBJS:.o=.d} ${TEST_CXX_OBJS:.o=.d} \ + ${TEST_CU_OBJS:.o=.d} $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d} +# tool, example, and test bins +TOOL_BINS := ${TOOL_OBJS:.o=.bin} +EXAMPLE_BINS := ${EXAMPLE_OBJS:.o=.bin} +# symlinks to tool bins without the ".bin" extension +TOOL_BIN_LINKS := ${TOOL_BINS:.bin=} +# Put the test binaries in build/test for convenience. +TEST_BIN_DIR := $(BUILD_DIR)/test +TEST_CU_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ + $(foreach obj,$(TEST_CU_OBJS),$(basename $(notdir $(obj)))))) +TEST_CXX_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ + $(foreach obj,$(TEST_CXX_OBJS),$(basename $(notdir $(obj)))))) +TEST_BINS := $(TEST_CXX_BINS) $(TEST_CU_BINS) +# TEST_ALL_BIN is the test binary that links caffe dynamically. +TEST_ALL_BIN := $(TEST_BIN_DIR)/test_all.testbin + +############################## +# Derive compiler warning dump locations +############################## +WARNS_EXT := warnings.txt +CXX_WARNS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o.$(WARNS_EXT)}) +CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o.$(WARNS_EXT)}) +TOOL_WARNS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o.$(WARNS_EXT)}) +EXAMPLE_WARNS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o.$(WARNS_EXT)}) +TEST_WARNS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o.$(WARNS_EXT)}) +TEST_CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o.$(WARNS_EXT)}) +ALL_CXX_WARNS := $(CXX_WARNS) $(TOOL_WARNS) $(EXAMPLE_WARNS) $(TEST_WARNS) +ALL_CU_WARNS := $(CU_WARNS) $(TEST_CU_WARNS) +ALL_WARNS := $(ALL_CXX_WARNS) $(ALL_CU_WARNS) + +EMPTY_WARN_REPORT := $(BUILD_DIR)/.$(WARNS_EXT) +NONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT) + +############################## +# Derive include and lib directories +############################## +CUDA_INCLUDE_DIR := $(CUDA_DIR)/include + +CUDA_LIB_DIR := +# add /lib64 only if it exists +ifneq ("$(wildcard $(CUDA_DIR)/lib64)","") + CUDA_LIB_DIR += $(CUDA_DIR)/lib64 +endif +CUDA_LIB_DIR += $(CUDA_DIR)/lib + +INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include +ifneq ($(CPU_ONLY), 1) + INCLUDE_DIRS += $(CUDA_INCLUDE_DIR) + LIBRARY_DIRS += $(CUDA_LIB_DIR) + LIBRARIES := cudart cublas curand +endif +LIBRARIES += glog gflags protobuf leveldb snappy \ + lmdb boost_system hdf5_hl hdf5 m \ + opencv_core opencv_highgui opencv_imgproc opencv_imgcodecs +PYTHON_LIBRARIES := boost_python python2.7 +WARNINGS := -Wall -Wno-sign-compare + +############################## +# Set build directories +############################## + +DISTRIBUTE_DIR ?= distribute +DISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib +DIST_ALIASES := dist +ifneq ($(strip $(DISTRIBUTE_DIR)),distribute) + DIST_ALIASES += distribute +endif + +ALL_BUILD_DIRS := $(sort $(BUILD_DIR) $(addprefix $(BUILD_DIR)/, $(SRC_DIRS)) \ + $(addprefix $(BUILD_DIR)/cuda/, $(SRC_DIRS)) \ + $(LIB_BUILD_DIR) $(TEST_BIN_DIR) $(PY_PROTO_BUILD_DIR) $(LINT_OUTPUT_DIR) \ + $(DISTRIBUTE_SUBDIRS) $(PROTO_BUILD_INCLUDE_DIR)) + +############################## +# Set directory for Doxygen-generated documentation +############################## +DOXYGEN_CONFIG_FILE ?= ./.Doxyfile +# should be the same as OUTPUT_DIRECTORY in the .Doxyfile +DOXYGEN_OUTPUT_DIR ?= ./doxygen +DOXYGEN_COMMAND ?= doxygen +# All the files that might have Doxygen documentation. +DOXYGEN_SOURCES := $(shell find \ + src/$(PROJECT) \ + include/$(PROJECT) \ + python/ \ + matlab/ \ + examples \ + tools \ + -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh" -or \ + -name "*.py" -or -name "*.m") +DOXYGEN_SOURCES += $(DOXYGEN_CONFIG_FILE) + + +############################## +# Configure build +############################## + +# Determine platform +UNAME := $(shell uname -s) +ifeq ($(UNAME), Linux) + LINUX := 1 +else ifeq ($(UNAME), Darwin) + OSX := 1 +endif + +# Linux +ifeq ($(LINUX), 1) + CXX ?= /usr/bin/g++ + GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.) + # older versions of gcc are too dumb to build boost with -Wuninitalized + ifeq ($(shell echo $(GCCVERSION) \< 4.6 | bc), 1) + WARNINGS += -Wno-uninitialized + endif + # boost::thread is reasonably called boost_thread (compare OS X) + # We will also explicitly add stdc++ to the link target. + LIBRARIES += boost_thread stdc++ +endif + +# OS X: +# clang++ instead of g++ +# libstdc++ for NVCC compatibility on OS X >= 10.9 with CUDA < 7.0 +ifeq ($(OSX), 1) + CXX := /usr/bin/clang++ + ifneq ($(CPU_ONLY), 1) + CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d') + ifeq ($(shell echo $(CUDA_VERSION) \< 7.0 | bc), 1) + CXXFLAGS += -stdlib=libstdc++ + LINKFLAGS += -stdlib=libstdc++ + endif + # clang throws this warning for cuda headers + WARNINGS += -Wno-unneeded-internal-declaration + endif + # gtest needs to use its own tuple to not conflict with clang + COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1 + # boost::thread is called boost_thread-mt to mark multithreading on OS X + LIBRARIES += boost_thread-mt + # we need to explicitly ask for the rpath to be obeyed + DYNAMIC_FLAGS := -install_name @rpath/libcaffe.so + ORIGIN := @loader_path +else + ORIGIN := \$$ORIGIN +endif + +# Custom compiler +ifdef CUSTOM_CXX + CXX := $(CUSTOM_CXX) +endif + +# Static linking +ifneq (,$(findstring clang++,$(CXX))) + STATIC_LINK_COMMAND := -Wl,-force_load $(STATIC_NAME) +else ifneq (,$(findstring g++,$(CXX))) + STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive +else + # The following line must not be indented with a tab, since we are not inside a target + $(error Cannot static link with the $(CXX) compiler) +endif + +# Debugging +ifeq ($(DEBUG), 1) + COMMON_FLAGS += -DDEBUG -g -O0 + NVCCFLAGS += -G +else + COMMON_FLAGS += -DNDEBUG -O2 +endif + +# cuDNN acceleration configuration. +ifeq ($(USE_CUDNN), 1) + LIBRARIES += cudnn + COMMON_FLAGS += -DUSE_CUDNN +endif + +# CPU-only configuration +ifeq ($(CPU_ONLY), 1) + OBJS := $(PROTO_OBJS) $(CXX_OBJS) + TEST_OBJS := $(TEST_CXX_OBJS) + TEST_BINS := $(TEST_CXX_BINS) + ALL_WARNS := $(ALL_CXX_WARNS) + TEST_FILTER := --gtest_filter="-*GPU*" + COMMON_FLAGS += -DCPU_ONLY +endif + +# Python layer support +ifeq ($(WITH_PYTHON_LAYER), 1) + COMMON_FLAGS += -DWITH_PYTHON_LAYER + LIBRARIES += $(PYTHON_LIBRARIES) +endif + +# BLAS configuration (default = ATLAS) +BLAS ?= atlas +ifeq ($(BLAS), mkl) + # MKL + LIBRARIES += mkl_rt + COMMON_FLAGS += -DUSE_MKL + MKL_DIR ?= /opt/intel/mkl + BLAS_INCLUDE ?= $(MKL_DIR)/include + BLAS_LIB ?= $(MKL_DIR)/lib $(MKL_DIR)/lib/intel64 +else ifeq ($(BLAS), open) + # OpenBLAS + LIBRARIES += openblas +else + # ATLAS + ifeq ($(LINUX), 1) + ifeq ($(BLAS), atlas) + # Linux simply has cblas and atlas + LIBRARIES += cblas atlas + endif + else ifeq ($(OSX), 1) + # OS X packages atlas as the vecLib framework + LIBRARIES += cblas + # 10.10 has accelerate while 10.9 has veclib + XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep -o 'version: 6') + ifneq (,$(findstring version: 6,$(XCODE_CLT_VER))) + BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/ + LDFLAGS += -framework Accelerate + else + BLAS_INCLUDE ?= /System/Library/Frameworks/vecLib.framework/Versions/Current/Headers/ + LDFLAGS += -framework vecLib + endif + endif +endif +INCLUDE_DIRS += $(BLAS_INCLUDE) +LIBRARY_DIRS += $(BLAS_LIB) + +LIBRARY_DIRS += $(LIB_BUILD_DIR) + +# Automatic dependency generation (nvcc is handled separately) +CXXFLAGS += -MMD -MP + +# Complete build flags. +COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir)) +CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) +NVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS) +# mex may invoke an older gcc that is too liberal with -Wuninitalized +MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized +LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) + +USE_PKG_CONFIG ?= 0 +ifeq ($(USE_PKG_CONFIG), 1) + PKG_CONFIG := $(shell pkg-config opencv --libs) +else + PKG_CONFIG := +endif +LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) $(PKG_CONFIG) \ + $(foreach library,$(LIBRARIES),-l$(library)) +PYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library)) + +# 'superclean' target recursively* deletes all files ending with an extension +# in $(SUPERCLEAN_EXTS) below. This may be useful if you've built older +# versions of Caffe that do not place all generated files in a location known +# to the 'clean' target. +# +# 'supercleanlist' will list the files to be deleted by make superclean. +# +# * Recursive with the exception that symbolic links are never followed, per the +# default behavior of 'find'. +SUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo + +# Set the sub-targets of the 'everything' target. +EVERYTHING_TARGETS := all py$(PROJECT) test warn lint +# Only build matcaffe as part of "everything" if MATLAB_DIR is specified. +ifneq ($(MATLAB_DIR),) + EVERYTHING_TARGETS += mat$(PROJECT) +endif + +############################## +# Define build targets +############################## +.PHONY: all test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ + py mat py$(PROJECT) mat$(PROJECT) proto runtest \ + superclean supercleanlist supercleanfiles warn everything + +all: $(STATIC_NAME) $(DYNAMIC_NAME) tools examples + +everything: $(EVERYTHING_TARGETS) + +linecount: + cloc --read-lang-def=$(PROJECT).cloc \ + src/$(PROJECT) include/$(PROJECT) tools examples \ + python matlab + +lint: $(EMPTY_LINT_REPORT) + +lintclean: + @ $(RM) -r $(LINT_OUTPUT_DIR) $(EMPTY_LINT_REPORT) $(NONEMPTY_LINT_REPORT) + +docs: $(DOXYGEN_OUTPUT_DIR) + @ cd ./docs ; ln -sfn ../$(DOXYGEN_OUTPUT_DIR)/html doxygen + +$(DOXYGEN_OUTPUT_DIR): $(DOXYGEN_CONFIG_FILE) $(DOXYGEN_SOURCES) + $(DOXYGEN_COMMAND) $(DOXYGEN_CONFIG_FILE) + +$(EMPTY_LINT_REPORT): $(LINT_OUTPUTS) | $(BUILD_DIR) + @ cat $(LINT_OUTPUTS) > $@ + @ if [ -s "$@" ]; then \ + cat $@; \ + mv $@ $(NONEMPTY_LINT_REPORT); \ + echo "Found one or more lint errors."; \ + exit 1; \ + fi; \ + $(RM) $(NONEMPTY_LINT_REPORT); \ + echo "No lint errors!"; + +$(LINT_OUTPUTS): $(LINT_OUTPUT_DIR)/%.lint.txt : % $(LINT_SCRIPT) | $(LINT_OUTPUT_DIR) + @ mkdir -p $(dir $@) + @ python $(LINT_SCRIPT) $< 2>&1 \ + | grep -v "^Done processing " \ + | grep -v "^Total errors found: 0" \ + > $@ \ + || true + +test: $(TEST_ALL_BIN) $(TEST_ALL_DYNLINK_BIN) $(TEST_BINS) + +tools: $(TOOL_BINS) $(TOOL_BIN_LINKS) + +examples: $(EXAMPLE_BINS) + +py$(PROJECT): py + +py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY) + +$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ $< + $(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \ + -o $@ $(LINKFLAGS) -l$(PROJECT) $(PYTHON_LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../../build/lib + +mat$(PROJECT): mat + +mat: $(MAT$(PROJECT)_SO) + +$(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME) + @ if [ -z "$(MATLAB_DIR)" ]; then \ + echo "MATLAB_DIR must be specified in $(CONFIG_FILE)" \ + "to build mat$(PROJECT)."; \ + exit 1; \ + fi + @ echo MEX $< + $(Q)$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) \ + CXX="$(CXX)" \ + CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \ + CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS)" -output $@ + @ if [ -f "$(PROJECT)_.d" ]; then \ + mv -f $(PROJECT)_.d $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}; \ + fi + +runtest: $(TEST_ALL_BIN) + $(TOOL_BUILD_DIR)/caffe + $(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle $(TEST_FILTER) + +pytest: py + cd python; python -m unittest discover -s caffe/test + +mattest: mat + cd matlab; $(MATLAB_DIR)/bin/matlab -nodisplay -r 'caffe.run_tests(), exit()' + +warn: $(EMPTY_WARN_REPORT) + +$(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR) + @ cat $(ALL_WARNS) > $@ + @ if [ -s "$@" ]; then \ + cat $@; \ + mv $@ $(NONEMPTY_WARN_REPORT); \ + echo "Compiler produced one or more warnings."; \ + exit 1; \ + fi; \ + $(RM) $(NONEMPTY_WARN_REPORT); \ + echo "No compiler warnings!"; + +$(ALL_WARNS): %.o.$(WARNS_EXT) : %.o + +$(BUILD_DIR_LINK): $(BUILD_DIR)/.linked + +# Create a target ".linked" in this BUILD_DIR to tell Make that the "build" link +# is currently correct, then delete the one in the OTHER_BUILD_DIR in case it +# exists and $(DEBUG) is toggled later. +$(BUILD_DIR)/.linked: + @ mkdir -p $(BUILD_DIR) + @ $(RM) $(OTHER_BUILD_DIR)/.linked + @ $(RM) -r $(BUILD_DIR_LINK) + @ ln -s $(BUILD_DIR) $(BUILD_DIR_LINK) + @ touch $@ + +$(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK) + @ mkdir -p $@ + +$(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) + @ echo LD -o $@ + $(Q)$(CXX) -shared -o $@ $(OBJS) $(LINKFLAGS) $(LDFLAGS) $(DYNAMIC_FLAGS) + +$(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) + @ echo AR -o $@ + $(Q)ar rcs $@ $(OBJS) + +$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS) + @ echo CXX $< + $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \ + | $(PROTO_BUILD_DIR) + @ echo CXX $< + $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS) + @ echo NVCC $< + $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \ + -odir $(@D) + $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ + | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo CXX/LD -o $@ $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +$(TEST_CU_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CU_BUILD_DIR)/%.o \ + $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo LD $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +$(TEST_CXX_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CXX_BUILD_DIR)/%.o \ + $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo LD $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +# Target for extension-less symlinks to tool binaries with extension '*.bin'. +$(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR) + @ $(RM) $@ + @ ln -s $(abspath $<) $@ + +$(TOOL_BINS): %.bin : %.o | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ + $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../lib + +$(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ + $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../../lib + +proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER) + +$(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \ + $(PROTO_SRC_DIR)/%.proto | $(PROTO_BUILD_DIR) + @ echo PROTOC $< + $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --cpp_out=$(PROTO_BUILD_DIR) $< + +$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \ + $(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR) + @ echo PROTOC \(python\) $< + $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $< + +$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR) + touch $(PY_PROTO_INIT) + +clean: + @- $(RM) -rf $(ALL_BUILD_DIRS) + @- $(RM) -rf $(OTHER_BUILD_DIR) + @- $(RM) -rf $(BUILD_DIR_LINK) + @- $(RM) -rf $(DISTRIBUTE_DIR) + @- $(RM) $(PY$(PROJECT)_SO) + @- $(RM) $(MAT$(PROJECT)_SO) + +supercleanfiles: + $(eval SUPERCLEAN_FILES := $(strip \ + $(foreach ext,$(SUPERCLEAN_EXTS), $(shell find . -name '*$(ext)' \ + -not -path './data/*')))) + +supercleanlist: supercleanfiles + @ \ + if [ -z "$(SUPERCLEAN_FILES)" ]; then \ + echo "No generated files found."; \ + else \ + echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ + fi + +superclean: clean supercleanfiles + @ \ + if [ -z "$(SUPERCLEAN_FILES)" ]; then \ + echo "No generated files found."; \ + else \ + echo "Deleting the following generated files:"; \ + echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ + $(RM) $(SUPERCLEAN_FILES); \ + fi + +$(DIST_ALIASES): $(DISTRIBUTE_DIR) + +$(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS) + # add include + cp -r include $(DISTRIBUTE_DIR)/ + mkdir -p $(DISTRIBUTE_DIR)/include/caffe/proto + cp $(PROTO_GEN_HEADER_SRCS) $(DISTRIBUTE_DIR)/include/caffe/proto + # add tool and example binaries + cp $(TOOL_BINS) $(DISTRIBUTE_DIR)/bin + cp $(EXAMPLE_BINS) $(DISTRIBUTE_DIR)/bin + # add libraries + cp $(STATIC_NAME) $(DISTRIBUTE_DIR)/lib + cp $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib + # add python - it's not the standard way, indeed... + cp -r python $(DISTRIBUTE_DIR)/python + +-include $(DEPS) diff --git a/Makefile.config.example b/Makefile.config.example index 074fbc34285..a873502559f 100644 --- a/Makefile.config.example +++ b/Makefile.config.example @@ -5,7 +5,7 @@ # USE_CUDNN := 1 # CPU-only switch (uncomment to build without GPU support). -CPU_ONLY := 1 +# CPU_ONLY := 1 # To customize your choice of compiler, uncomment and set the following. # N.B. the default for Linux is g++ and the default for OSX is clang++ diff --git a/examples/triplet/mnist_triplet_solver.prototxt b/examples/triplet/mnist_triplet_solver.prototxt index 39222b89cf0..edd8e1e0338 100644 --- a/examples/triplet/mnist_triplet_solver.prototxt +++ b/examples/triplet/mnist_triplet_solver.prototxt @@ -7,7 +7,7 @@ test_iter: 100 # Carry out testing every 500 training iterations. test_interval: 500 # The base learning rate, momentum and the weight decay of the network. -base_lr: 0.01 +base_lr: 0.001 momentum: 0.9 weight_decay: 0.0000 # The learning rate policy diff --git a/src/caffe/layers/triplet_loss_layer.cpp b/src/caffe/layers/triplet_loss_layer.cpp index a79b8700f90..467c4a5736a 100644 --- a/src/caffe/layers/triplet_loss_layer.cpp +++ b/src/caffe/layers/triplet_loss_layer.cpp @@ -54,13 +54,12 @@ void TripletLossLayer::Forward_cpu( const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.triplet_loss_param().margin(); Dtype loss(0.0); - // Loss component calculated from ab for (int i = 0; i < bottom[0]->num(); ++i) { dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); // ab is a similar pair - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i];// wrong!!! dist_sq = ***** + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; // Loss component calculated from ac dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); diff --git a/src/caffe/layers/triplet_loss_layer.cu b/src/caffe/layers/triplet_loss_layer.cu new file mode 100644 index 00000000000..3ca1277e800 --- /dev/null +++ b/src/caffe/layers/triplet_loss_layer.cu @@ -0,0 +1,128 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void TripletLossLayer::Forward_gpu( + const vector*>& bottom, + const vector*>& top) { + int count = bottom[0]->count(); + caffe_gpu_sub( + count, + bottom[0]->gpu_data(), // a + bottom[1]->gpu_data(), // b + diff_pos.mutable_gpu_data()); // a_i-b_i + caffe_gpu_sub( + count, + bottom[0]->gpu_data(), // a + bottom[2]->gpu_data(), // c + diff_neg.mutable_gpu_data()); // a_i-c_i + caffe_gpu_powx( + count, + diff_pos.mutable_gpu_data(), // a_i-b_i + Dtype(2), + diff_sq_pos.mutable_gpu_data()); // (a_i-b_i)^2 + caffe_gpu_powx( + count, + diff_neg.mutable_gpu_data(), // a_i-c_i + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (a_i-c_i)^2 + const int channels = bottom[0]->channels(); + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype loss(0.0); + // Loss component calculated from ab + for (int i = 0; i < bottom[0]->num(); ++i) { + /*dist_sq_pos.mutable_gpu_data()[i] = caffe_gpu_dot(channels, + diff_pos.gpu_data() + (i*channels), diff_pos.gpu_data() + (i*channels));*/ + // ab is a similar pair + dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; + // Loss component calculated from ac + /*dist_sq_neg.mutable_gpu_data()[i] = caffe_gpu_dot(channels, + diff_neg.gpu_data() + (i*channels), diff_neg.gpu_data() + (i*channels));*/ + // ac is a dissimilar pair + dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; + loss += std::max(margin + dist_sq_.gpu_data()[i], Dtype(0.0)); + } + loss = loss / static_cast(bottom[0]->num()) / Dtype(2); + top[0]->mutable_gpu_data()[0] = loss; +} + +template +void TripletLossLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); +// there must be further check to ensure the gradient calc + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(bottom[0]->num()); + int num = bottom[0]->num(); + int channels = bottom[0]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + // similar pairs + caffe_gpu_axpby( + channels, + alpha, + diff_pos.gpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + // dissimilar pairs + caffe_gpu_axpby( + channels, + -alpha, + diff_neg.gpu_data() + (j*channels), + Dtype(1.0), + bout + (j*channels)); + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); + } + } + } + for (int i = 1; i < 3; ++i) { +// there must be further check to ensure the gradient calc + if (propagate_down[i]) { + const Dtype sign = (i == 1) ? -1 : 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(bottom[i]->num()); + int num = bottom[i]->num(); + int channels = bottom[i]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[i]->mutable_gpu_diff(); + if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + if (i == 1) { + // similar pairs + caffe_gpu_axpby( + channels, + alpha, + diff_pos.gpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } + else { + // dissimilar pairs + caffe_gpu_axpby( + channels, + alpha, + diff_neg.gpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); + } + } + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer); + +} // namespace caffe From e7e1a03357d00ee3088b6f7b97901d06d95a98c3 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Tue, 16 Jun 2015 11:49:33 +0800 Subject: [PATCH 16/82] opencv 2.4x --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 140acba7b7b..2b918ffd26d 100644 --- a/Makefile +++ b/Makefile @@ -171,7 +171,7 @@ ifneq ($(CPU_ONLY), 1) endif LIBRARIES += glog gflags protobuf leveldb snappy \ lmdb boost_system hdf5_hl hdf5 m \ - opencv_core opencv_highgui opencv_imgproc opencv_imgcodecs + opencv_core opencv_highgui opencv_imgproc #opencv_imgcodecs PYTHON_LIBRARIES := boost_python python2.7 WARNINGS := -Wall -Wno-sign-compare From 48402b0fa72842eb8b50cf4ef603a938ce9825e8 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Tue, 16 Jun 2015 12:03:44 +0800 Subject: [PATCH 17/82] annangement of headers --- src/caffe/layers/triplet_loss_layer.cpp | 3 +-- src/caffe/layers/triplet_loss_layer.cu | 5 ++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/caffe/layers/triplet_loss_layer.cpp b/src/caffe/layers/triplet_loss_layer.cpp index 467c4a5736a..cbbb02b3b5c 100644 --- a/src/caffe/layers/triplet_loss_layer.cpp +++ b/src/caffe/layers/triplet_loss_layer.cpp @@ -123,8 +123,7 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, diff_pos.cpu_data() + (j*channels), Dtype(0.0), bout + (j*channels)); - } - else { + } else { // dissimilar pairs caffe_cpu_axpby( channels, diff --git a/src/caffe/layers/triplet_loss_layer.cu b/src/caffe/layers/triplet_loss_layer.cu index 3ca1277e800..fa1259d0160 100644 --- a/src/caffe/layers/triplet_loss_layer.cu +++ b/src/caffe/layers/triplet_loss_layer.cu @@ -2,9 +2,9 @@ #include #include "caffe/layer.hpp" -#include "caffe/vision_layers.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" namespace caffe { @@ -105,8 +105,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, diff_pos.gpu_data() + (j*channels), Dtype(0.0), bout + (j*channels)); - } - else { + } else { // dissimilar pairs caffe_gpu_axpby( channels, From 9e10e1736361d45bb36d2fe3199806dfc8cd1404 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Wed, 17 Jun 2015 21:10:32 +0800 Subject: [PATCH 18/82] parameter update in triplet --- .../triplet/mnist_orpe_train_test.prototxt | 536 ++++++++++++++++++ .../triplet/mnist_triplet_train_test.prototxt | 2 +- examples/triplet/readme.md | 50 +- 3 files changed, 561 insertions(+), 27 deletions(-) create mode 100644 examples/triplet/mnist_orpe_train_test.prototxt diff --git a/examples/triplet/mnist_orpe_train_test.prototxt b/examples/triplet/mnist_orpe_train_test.prototxt new file mode 100644 index 00000000000..afad6f9051e --- /dev/null +++ b/examples/triplet/mnist_orpe_train_test.prototxt @@ -0,0 +1,536 @@ +name: "mnist_triplet_train_test" +layer { + name: "triplet_data" + type: "Data" + top: "triplet_data" + top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/mnist_triplet_train_leveldb" + batch_size: 64 + } +} +layer { + name: "triplet_data" + type: "Data" + top: "triplet_data" + top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/mnist_triplet_test_leveldb" + batch_size: 100 + } +} +layer { + name: "slice_triplet" + type: "Slice" + bottom: "triplet_data" + top: "data" + top: "data_true" + top: "data_false" + slice_param { + slice_dim: 1 + slice_point: 1 + slice_point: 2 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 16 + kernel_size: 8 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "pool1" + top: "pool1" +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 7 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu2" + type: "ReLU" + bottom: "pool2" + top: "pool2" +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 256 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu3" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_true" + type: "Convolution" + bottom: "data_true" + top: "conv1_true" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 15 + kernel_size: 8 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_true" + type: "Pooling" + bottom: "conv1_true" + top: "pool1_true" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu1_true" + type: "ReLU" + bottom: "pool1_true" + top: "pool1_true" +} +layer { + name: "conv2_true" + type: "Convolution" + bottom: "pool1_true" + top: "conv2_true" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 7 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_true" + type: "Pooling" + bottom: "conv2_true" + top: "pool2_true" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu2_true" + type: "ReLU" + bottom: "pool2_true" + top: "pool2_true" +} +layer { + name: "ip1_true" + type: "InnerProduct" + bottom: "pool2_true" + top: "ip1_true" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 256 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu3_true" + type: "ReLU" + bottom: "ip1_true" + top: "ip1_true" +} +layer { + name: "ip2_true" + type: "InnerProduct" + bottom: "ip1_true" + top: "ip2_true" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_true" + type: "InnerProduct" + bottom: "ip2_true" + top: "feat_true" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_false" + type: "Convolution" + bottom: "data_false" + top: "conv1_false" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 16 + kernel_size: 8 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_false" + type: "Pooling" + bottom: "conv1_false" + top: "pool1_false" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu1_false" + type: "ReLU" + bottom: "pool1_false" + top: "pool1_false" +} +layer { + name: "conv2_false" + type: "Convolution" + bottom: "pool1_false" + top: "conv2_false" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 7 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_false" + type: "Pooling" + bottom: "conv2_false" + top: "pool2_false" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu2_false" + type: "ReLU" + bottom: "pool2_false" + top: "pool2_false" +} +layer { + name: "ip1_false" + type: "InnerProduct" + bottom: "pool2_false" + top: "ip1_false" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 256 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu3_false" + type: "ReLU" + bottom: "ip1_false" + top: "ip1_false" +} +layer { + name: "ip2_false" + type: "InnerProduct" + bottom: "ip1_false" + top: "ip2_false" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_false" + type: "InnerProduct" + bottom: "ip2_false" + top: "feat_false" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "loss" + type: "TripletLoss" + bottom: "feat" + bottom: "feat_true" + bottom: "feat_false" + bottom: "sim" + top: "loss" + triplet_loss_param { + margin: 3 + } +} + diff --git a/examples/triplet/mnist_triplet_train_test.prototxt b/examples/triplet/mnist_triplet_train_test.prototxt index da25dec31de..12b8e1072fd 100644 --- a/examples/triplet/mnist_triplet_train_test.prototxt +++ b/examples/triplet/mnist_triplet_train_test.prototxt @@ -494,7 +494,7 @@ layer { bottom: "sim" top: "loss" triplet_loss_param { - margin: 1 + margin: 0.2 } } diff --git a/examples/triplet/readme.md b/examples/triplet/readme.md index 524718ce2db..efcca277d7a 100644 --- a/examples/triplet/readme.md +++ b/examples/triplet/readme.md @@ -57,17 +57,16 @@ In this section we will define the triplet network used for training. The resulting network is defined in `./examples/triplet/mnist_triplet_train_test.prototxt`. -### Reading in the Pair Data +### Reading in the Triplet Data We start with a data layer that reads from the LevelDB database we created -earlier. Each entry in this database contains the image data for a pair of -images (`pair_data`) and a binary label saying if they belong to the same class -or different classes (`sim`). +earlier. Each entry in this database contains the image data for a triplet of +images (`triplet_data`) and the label (`sim`) is not nessesary in our method. layers { - name: "pair_data" + name: "triplet_data" type: DATA - top: "pair_data" + top: "triplet_data" top: "sim" data_param { source: "examples/triplet/mnist-triplet-train-leveldb" @@ -77,31 +76,33 @@ or different classes (`sim`). include: { phase: TRAIN } } -In order to pack a pair of images into the same blob in the database we pack one -image per channel. We want to be able to work with these two images separately, -so we add a slice layer after the data layer. This takes the `pair_data` and +In order to pack a triplet of images into the same blob in the database we pack one +image per channel. We want to be able to work with these three images separately, +so we add a slice layer after the data layer. This takes the `triplet_data` and slices it along the channel dimension so that we have a single image in `data` -and its paired image in `data_p.` +and its positive image in `data_pos.` & its negative image in `data_neg.` layers { - name: "slice_pair" + name: "slice_triplet" type: SLICE - bottom: "pair_data" + bottom: "triplet_data" top: "data" - top: "data_p" + top: "data_pos" + top: "data_neg" slice_param { - slice_dim: 1 - slice_point: 1 - } + slice_dim: 1 + slice_point: 1 + slice_point: 2 + } } -### Building the First Side of the triplet Net +### Building the First part of the triplet Net Now we can specify the first side of the triplet net. This side operates on `data` and produces `feat`. Starting from the net in `./examples/triplet/mnist_triplet.prototxt` we add default weight fillers. Then we name the parameters of the convolutional and inner product layers. Naming the -parameters allows Caffe to share the parameters between layers on both sides of +parameters allows Caffe to share the parameters between layers on three channels of the triplet net. In the definition this looks like: ... @@ -132,23 +133,20 @@ Now we need to create the second path that operates on `data_neg` and produces paste it. Then we change the name of each layer, input, and output by appending `_neg` to differentiate the "paired" layers from the originals. -### Adding the Contrastive Loss Function +### Adding the Triplet Loss Function -To train the network we will optimize a contrastive loss function proposed in: -Raia Hadsell, Sumit Chopra, and Yann LeCun "Dimensionality Reduction by Learning -an Invariant Mapping". This loss function encourages matching pairs to be close -together in feature space while pushing non-matching pairs apart. This cost -function is implemented with the `TRIPLET_LOSS` layer: +To train the network we will optimize a triplet loss function proposed in: +This cost function is implemented with the `TRIPLET_LOSS` layer: layers { name: "loss" type: TRIPLET_LOSS triplet_loss_param { - margin: 1.0 + margin: 0.2 } bottom: "feat" bottom: "feat_pos" - bottom: "feat_neg" + bottom: "feat_neg" bottom: "sim" top: "loss" } From 66e80a7250d9fded458ebad12c3fda5715d515bb Mon Sep 17 00:00:00 2001 From: Wangyida Date: Tue, 30 Jun 2015 21:15:13 +0800 Subject: [PATCH 19/82] master restore --- LICENSE | 4 +- docs/development.md | 33 +- docs/tutorial/layers.md | 6 +- examples/mnist/readme.md | 2 +- .../triplet/convert_mnist_triplet_data.cpp | 127 ---- examples/triplet/create_mnist_triplet.sh | 21 - examples/triplet/mnist_siamese.ipynb | 154 ----- examples/triplet/mnist_triplet.prototxt | 113 ---- .../triplet/mnist_triplet_solver.prototxt | 25 - .../triplet/mnist_triplet_train_test.prototxt | 498 ---------------- examples/triplet/readme.md | 179 ------ examples/triplet/train_mnist_triplet.sh | 5 - include/caffe/data_layers.hpp | 3 - include/caffe/data_transformer.hpp | 36 ++ include/caffe/filler.hpp | 71 ++- include/caffe/layer.hpp | 1 + include/caffe/loss_layers.hpp | 69 --- include/caffe/net.hpp | 3 + include/caffe/neuron_layers.hpp | 70 ++- include/caffe/python_layer.hpp | 13 +- include/caffe/solver.hpp | 15 +- include/caffe/test/test_caffe_main.hpp | 28 +- .../caffe/test/test_gradient_check_util.hpp | 7 +- include/caffe/util/db.hpp | 136 ----- include/caffe/util/db_leveldb.hpp | 73 +++ include/caffe/util/db_lmdb.hpp | 91 +++ include/caffe/util/math_functions.hpp | 6 + include/caffe/util/mkl_alternate.hpp | 1 + include/caffe/vision_layers.hpp | 66 +++ python/caffe/draw.py | 2 +- src/caffe/blob.cpp | 1 + src/caffe/data_transformer.cpp | 116 +++- src/caffe/layers/base_data_layer.cpp | 11 +- src/caffe/layers/base_data_layer.cu | 6 +- src/caffe/layers/concat_layer.cu | 44 +- src/caffe/layers/contrastive_loss_layer.cpp | 25 +- src/caffe/layers/contrastive_loss_layer.cu | 34 +- src/caffe/layers/conv_layer.cpp | 7 - src/caffe/layers/conv_layer.cu | 7 - src/caffe/layers/cudnn_conv_layer.cu | 2 - src/caffe/layers/data_layer.cpp | 90 +-- src/caffe/layers/deconv_layer.cpp | 7 - src/caffe/layers/deconv_layer.cu | 7 - src/caffe/layers/flatten_layer.cpp | 16 +- src/caffe/layers/image_data_layer.cpp | 42 +- src/caffe/layers/inner_product_layer.cpp | 4 +- src/caffe/layers/inner_product_layer.cu | 4 +- src/caffe/layers/log_layer.cpp | 47 -- src/caffe/layers/log_layer.cu | 57 ++ src/caffe/layers/lrn_layer.cu | 102 ++-- src/caffe/layers/mvn_layer.cpp | 23 +- src/caffe/layers/mvn_layer.cu | 23 +- src/caffe/layers/pooling_layer.cu | 218 +++---- src/caffe/layers/prelu_layer.cpp | 4 +- src/caffe/layers/prelu_layer.cu | 16 +- .../sigmoid_cross_entropy_loss_layer.cpp | 2 +- .../sigmoid_cross_entropy_loss_layer.cu | 22 +- src/caffe/layers/slice_layer.cu | 47 +- src/caffe/layers/triplet_loss_layer.cpp | 124 ---- src/caffe/net.cpp | 46 +- src/caffe/proto/caffe.proto | 206 +++++-- src/caffe/solver.cpp | 554 ++++++++---------- src/caffe/test/test_accuracy_layer.cpp | 5 +- src/caffe/test/test_argmax_layer.cpp | 3 +- .../test/test_contrastive_loss_layer.cpp | 58 +- src/caffe/test/test_convolution_layer.cpp | 9 +- .../test/test_data/generate_sample_data.py | 12 +- src/caffe/test/test_dummy_data_layer.cpp | 5 +- src/caffe/test/test_filler.cpp | 98 ++++ src/caffe/test/test_flatten_layer.cpp | 46 +- src/caffe/test/test_gradient_based_solver.cpp | 82 ++- src/caffe/test/test_im2col_kernel.cu | 4 +- src/caffe/test/test_math_functions.cpp | 51 +- .../test_multinomial_logistic_loss_layer.cpp | 3 +- src/caffe/test/test_net.cpp | 152 ++++- src/caffe/test/test_neuron_layer.cpp | 139 ++++- src/caffe/test/test_pooling_layer.cpp | 13 +- src/caffe/test/test_softmax_layer.cpp | 4 +- src/caffe/test/test_stochastic_pooling.cpp | 35 +- src/caffe/test/test_triplet_loss_layer.cpp | 107 ---- src/caffe/util/db.cpp | 57 +- src/caffe/util/db_leveldb.cpp | 21 + src/caffe/util/db_lmdb.cpp | 51 ++ src/caffe/util/math_functions.cpp | 10 + src/caffe/util/math_functions.cu | 21 + 85 files changed, 2184 insertions(+), 2574 deletions(-) delete mode 100644 examples/triplet/convert_mnist_triplet_data.cpp delete mode 100755 examples/triplet/create_mnist_triplet.sh delete mode 100644 examples/triplet/mnist_siamese.ipynb delete mode 100644 examples/triplet/mnist_triplet.prototxt delete mode 100644 examples/triplet/mnist_triplet_solver.prototxt delete mode 100644 examples/triplet/mnist_triplet_train_test.prototxt delete mode 100644 examples/triplet/readme.md delete mode 100755 examples/triplet/train_mnist_triplet.sh create mode 100644 include/caffe/util/db_leveldb.hpp create mode 100644 include/caffe/util/db_lmdb.hpp create mode 100644 src/caffe/layers/log_layer.cu delete mode 100644 src/caffe/layers/triplet_loss_layer.cpp delete mode 100644 src/caffe/test/test_triplet_loss_layer.cpp create mode 100644 src/caffe/util/db_leveldb.cpp create mode 100644 src/caffe/util/db_lmdb.cpp diff --git a/LICENSE b/LICENSE index efcc5c5b6b0..d69d16f5bc7 100644 --- a/LICENSE +++ b/LICENSE @@ -1,11 +1,11 @@ COPYRIGHT All contributions by the University of California: -Copyright (c) 2014, The Regents of the University of California (Regents) +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) All rights reserved. All other contributions: -Copyright (c) 2014, the respective contributors +Copyright (c) 2014, 2015, the respective contributors All rights reserved. Caffe uses a shared copyright model: each contributor holds copyright over diff --git a/docs/development.md b/docs/development.md index ccb6a29701d..107c2c3b281 100644 --- a/docs/development.md +++ b/docs/development.md @@ -62,28 +62,25 @@ The following is a poetic presentation of the protocol in code form. #### [Shelhamer's](https://github.com/shelhamer) “life of a branch in four acts” Make the `feature` branch off of the latest `bvlc/master` -``` -git checkout master -git pull upstream master -git checkout -b feature -# do your work, make commits -``` + + git checkout master + git pull upstream master + git checkout -b feature + # do your work, make commits Prepare to merge by rebasing your branch on the latest `bvlc/master` -``` -# make sure master is fresh -git checkout master -git pull upstream master -# rebase your branch on the tip of master -git checkout feature -git rebase master -``` + + # make sure master is fresh + git checkout master + git pull upstream master + # rebase your branch on the tip of master + git checkout feature + git rebase master Push your branch to pull request it into `BVLC/caffe:master` -``` -git push origin feature -# ...make pull request to master... -``` + + git push origin feature + # ...make pull request to master... Now make a pull request! You can do this from the command line (`git pull-request -b master`) if you install [hub](https://github.com/github/hub). Hub has many other magical uses. diff --git a/docs/tutorial/layers.md b/docs/tutorial/layers.md index ff2ee491244..806374e3f93 100644 --- a/docs/tutorial/layers.md +++ b/docs/tutorial/layers.md @@ -5,9 +5,7 @@ title: Layer Catalogue To create a Caffe model you need to define the model architecture in a protocol buffer definition file (prototxt). -Caffe layers and their parameters are defined in the protocol buffer definitions for the project in [caffe.proto](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto). The latest definitions are in the [dev caffe.proto](https://github.com/BVLC/caffe/blob/dev/src/caffe/proto/caffe.proto). - -TODO complete list of layers linking to headings +Caffe layers and their parameters are defined in the protocol buffer definitions for the project in [caffe.proto](https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto). ### Vision Layers @@ -507,7 +505,7 @@ The `Slice` layer is a utility layer that slices an input layer to multiple outp } } -`axis` indicates the target axis; `slice_point` indicates indexes in the selected dimension (the number of indices must be equal to the number of top blobs minus one). +`axis` indicates the target axis; `slice_point` indicates indexes in the selected dimension (the number of indices must be equal to the number of top blobs minus one). #### Elementwise Operations diff --git a/examples/mnist/readme.md b/examples/mnist/readme.md index 269e53ab9b9..413d4a1f40b 100644 --- a/examples/mnist/readme.md +++ b/examples/mnist/readme.md @@ -283,5 +283,5 @@ and you will be using CPU for training. Isn't that easy? MNIST is a small dataset, so training with GPU does not really introduce too much benefit due to communication overheads. On larger datasets with more complex models, such as ImageNet, the computation speed difference will be more significant. -### How to reduce the learning rate a fixed steps? +### How to reduce the learning rate at fixed steps? Look at lenet_multistep_solver.prototxt diff --git a/examples/triplet/convert_mnist_triplet_data.cpp b/examples/triplet/convert_mnist_triplet_data.cpp deleted file mode 100644 index d1eed30cba6..00000000000 --- a/examples/triplet/convert_mnist_triplet_data.cpp +++ /dev/null @@ -1,127 +0,0 @@ -// This script converts the MNIST dataset to the leveldb format used -// by caffe to train siamese network. -// Usage: -// convert_mnist_data input_image_file input_label_file output_db_file -// The MNIST dataset could be downloaded at -// http://yann.lecun.com/exdb/mnist/ -#include // NOLINT(readability/streams) -#include - -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "stdint.h" - -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" - -uint32_t swap_endian(uint32_t val) { - val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); - return (val << 16) | (val >> 16); -} - -void read_image(std::ifstream* image_file, std::ifstream* label_file, - uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index + 8); - label_file->read(label, 1); -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename) { - // Open files - std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); - std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); - CHECK(image_file) << "Unable to open file " << image_filename; - CHECK(label_file) << "Unable to open file " << label_filename; - // Read the magic and the meta data - uint32_t magic; - uint32_t num_items; - uint32_t num_labels; - uint32_t rows; - uint32_t cols; - - image_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2051) << "Incorrect image file magic."; - label_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2049) << "Incorrect label file magic."; - image_file.read(reinterpret_cast(&num_items), 4); - num_items = swap_endian(num_items); - label_file.read(reinterpret_cast(&num_labels), 4); - num_labels = swap_endian(num_labels); - CHECK_EQ(num_items, num_labels); - image_file.read(reinterpret_cast(&rows), 4); - rows = swap_endian(rows); - image_file.read(reinterpret_cast(&cols), 4); - cols = swap_endian(cols); - - // Open leveldb - - leveldb::DB* db; - leveldb::Options options; - options.create_if_missing = true; - options.error_if_exists = true; - leveldb::Status status = leveldb::DB::Open( - options, db_filename, &db); - CHECK(status.ok()) << "Failed to open leveldb " << db_filename - << ". Is it already existing?"; - - char label_i; - char label_j; - char label_k; - char* pixels = new char[3 * rows * cols]; - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - - caffe::Datum datum; - datum.set_channels(3); // one channel for each image in the pair - datum.set_height(rows); - datum.set_width(cols); - LOG(INFO) << "A total of " << num_items << " items."; - LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (int itemid = 0; itemid < num_items; ++itemid) { - int i = caffe::caffe_rng_rand() % num_items; // pick a random pair - int j = caffe::caffe_rng_rand() % num_items; - int k = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, - pixels, &label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), &label_j); - read_image(&image_file, &label_file, k, rows, cols, - pixels + (2 * rows * cols), &label_k); - datum.set_data(pixels, 3*rows*cols); - if (label_i == label_j && label_i == label_k) { - datum.set_label(1); - } else { - datum.set_label(0); - } - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); - db->Put(leveldb::WriteOptions(), std::string(key), value); - } - - delete db; - delete pixels; -} - -int main(int argc, char** argv) { - if (argc != 4) { - printf("This script converts the MNIST dataset to the leveldb format used\n" - "by caffe to train a triplet network.\n" - "Usage:\n" - " convert_mnist_data input_image_file input_label_file " - "output_db_file\n" - "The MNIST dataset could be downloaded at\n" - " http://yann.lecun.com/exdb/mnist/\n" - "You should gunzip them after downloading.\n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3]); - } - return 0; -} diff --git a/examples/triplet/create_mnist_triplet.sh b/examples/triplet/create_mnist_triplet.sh deleted file mode 100755 index f404f2aa255..00000000000 --- a/examples/triplet/create_mnist_triplet.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env sh -# This script converts the mnist data into leveldb format. - -EXAMPLES=./build/examples/triplet -DATA=./data/mnist - -echo "Creating leveldb..." - -rm -rf ./examples/triplet/mnist_siamese_train_leveldb -rm -rf ./examples/triplet/mnist_siamese_test_leveldb - -$EXAMPLES/convert_mnist_triplet_data.bin \ - $DATA/train-images-idx3-ubyte \ - $DATA/train-labels-idx1-ubyte \ - ./examples/triplet/mnist_triplet_train_leveldb -$EXAMPLES/convert_mnist_triplet_data.bin \ - $DATA/t10k-images-idx3-ubyte \ - $DATA/t10k-labels-idx1-ubyte \ - ./examples/triplet/mnist_triplet_test_leveldb - -echo "Done." diff --git a/examples/triplet/mnist_siamese.ipynb b/examples/triplet/mnist_siamese.ipynb deleted file mode 100644 index 8e076663ca6..00000000000 --- a/examples/triplet/mnist_siamese.ipynb +++ /dev/null @@ -1,154 +0,0 @@ -{ - "metadata": { - "description": "Extracting features and plotting the Siamese network embedding.", - "example_name": "Siamese network embedding", - "include_in_docs": true, - "priority": 6, - "signature": "sha256:845bb18929f96543ba2611eb5eca744fd98939cbef876df6bc319c29f616fc64" - }, - "nbformat": 3, - "nbformat_minor": 0, - "worksheets": [ - { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Setup\n", - "\n", - "Import Caffe and the usual modules." - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "import numpy as np\n", - "import matplotlib.pyplot as plt\n", - "%matplotlib inline\n", - "\n", - "# Make sure that caffe is on the python path:\n", - "caffe_root = '../../' # this file is expected to be in {caffe_root}/examples/siamese\n", - "import sys\n", - "sys.path.insert(0, caffe_root + 'python')\n", - "\n", - "import caffe" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 1 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Load the trained net\n", - "\n", - "Load the model definition and weights and set to CPU mode TEST phase computation with input scaling." - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "MODEL_FILE = 'mnist_siamese.prototxt'\n", - "# decrease if you want to preview during training\n", - "PRETRAINED_FILE = 'mnist_siamese_iter_50000.caffemodel' \n", - "caffe.set_mode_cpu()\n", - "net = caffe.Net(MODEL_FILE, PRETRAINED_FILE, caffe.TEST)" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 2 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Load some MNIST test data" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "TEST_DATA_FILE = '../../data/mnist/t10k-images-idx3-ubyte'\n", - "TEST_LABEL_FILE = '../../data/mnist/t10k-labels-idx1-ubyte'\n", - "n = 10000\n", - "\n", - "with open(TEST_DATA_FILE, 'rb') as f:\n", - " f.read(16) # skip the header\n", - " raw_data = np.fromstring(f.read(n * 28*28), dtype=np.uint8)\n", - "\n", - "with open(TEST_LABEL_FILE, 'rb') as f:\n", - " f.read(8) # skip the header\n", - " labels = np.fromstring(f.read(n), dtype=np.uint8)" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 3 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Generate the Siamese features" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "# reshape and preprocess\n", - "caffe_in = raw_data.reshape(n, 1, 28, 28) * 0.00390625 # manually scale data instead of using `caffe.io.Transformer`\n", - "out = net.forward_all(data=caffe_in)" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 4 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Visualize the learned Siamese embedding" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "feat = out['feat']\n", - "f = plt.figure(figsize=(16,9))\n", - "c = ['#ff0000', '#ffff00', '#00ff00', '#00ffff', '#0000ff', \n", - " '#ff00ff', '#990000', '#999900', '#009900', '#009999']\n", - "for i in range(10):\n", - " plt.plot(feat[labels==i,0].flatten(), feat[labels==i,1].flatten(), '.', c=c[i])\n", - "plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])\n", - "plt.grid()\n", - "plt.show()" - ], - "language": "python", - "metadata": {}, - "outputs": [ - { - "metadata": {}, - "output_type": "display_data", - "png": "iVBORw0KGgoAAAANSUhEUgAAA54AAAIXCAYAAAD0R4FDAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXtwXOWZr/usvurWUktqGdmxaawEHEMuthGXITiIyMaJ\nwbEMFmCTDMkkoyqSyTnZMwdqpmYyzEyS2ruKue2ZqSTHO/vYGQbhCxdjwI637ViWMEEEMJhgB4MB\ngSRLsizJkiypuyX1+WP1Wlp971YvSd3y+1S5rF69Lt/6+lOrf/2+v/dVgsEggiAIgiAIgiAIgjBT\nWOZ6AIIgCIIgCIIgCML8RoSnIAiCIAiCIAiCMKOI8BQEQRAEQRAEQRBmFBGegiAIgiAIgiAIwowi\nwlMQBEEQBEEQBEGYUUR4CoIgCIIgCIIgCDNKRsJTUZQ8RVFaFUV5U1GUU4qi/HezBiYIgiAIgiAI\ngiDMD5RM+3gqilIQDAZHFEWxAS8B/08wGHzJlNEJgiAIgiAIgiAIOU/GqbbBYHAk9KMDsAJ9mZ5T\nEARBEARBEARBmD9kLDwVRbEoivIm0A0cDQaDpzIfliAIgiAIgiAIgjBfMCPiORkMBlcAi4EvK4pS\nk/GoBEEQBEEQBEEQhHmDzawTBYPBi4qivAhUA03adkVRMjORCoIgCIIgCIIgCFlNMBhUEj2fkfBU\nFMUDjAeDwQFFUfKBtcDfxxhEJpcRhDC+9a1vsWPHjrkehjCPkDUlmImsJ8FsZE0JZiNrSjAbRUmo\nOYHMI54LgV8pimJBTdt9PBgMHsnwnIIgCIIgCIIgCMI8IiPhGQwG3wZWmTQWQUiJq666aq6HIMwz\nZE0JZiLrSTAbWVOC2ciaEuaCjIsLCcJsU1NTM9dDEOYZsqYEM5H1JJiNrCnBbGRNCXOBCE9BEARB\nEARBEARhRjGtqq0gCIIgCIIgCIIQTSrFd3KF6RaOVWa64qyiKEGpaisIgiAIgiAIwuWKoijzotNH\nvPsIbU+oriXVVhAEQRAEQRAEQZhRRHgKOUdTU9NcD0GYZ8iaEsxE1pNgNrKmBLORNSXMBSI8BUEQ\nBEEQBEEQhBlFPJ6CIAiCIAiCIAgziHg8JeIpCIIgCIIgCIJwWdPX18emTZsoKiriqquu4sknnzT9\nGiI8hZxDfAmC2ciaEsxE1pNgNrKmBLORNSVE8v3vf5+8vDx6enp44okneOihhzh16pSp1xDhKQiC\nIAiCIAiCcJly6dIlnnnmGX784x9TUFDAl770JTZu3Mjjjz9u6nXE4ykIgiAIgiAIgjCDJPV4NjTA\nmTNQUACNjeB2p3eBDI4/ceIEt956K5cuXdK3/fM//zNNTU3s27cvpfsQj6cgCIIgCIIgCEK2c+YM\nHDsGBw6oInIWjx8eHqa4uDhsm8vlYmhoKP1xJECEp5BziC9BMBtZU4KZyHoSzEbWlGA2sqaykIIC\n9f/qati2bVaPLyoqYnBwMGzbxYsXcblc6Y8jASI8BUEQBEEQBEEQ5pLGRqivh0OH0k+zzfD4a665\nhvHxcd5//31921tvvcXnPve59MeRAPF4CoIgCIIgCIIgzCDZ3sdzy5YtKIrCL3/5S9544w3uuusu\nfvvb37J8+fKw/cTjKQiCIAiCIAiCIEyLn/3sZ4yOjrJgwQK+8Y1v8Itf/CJKdGaKCE8h5xBfgmA2\nsqYEM5H1JJiNrCnBbGRNCZGUlpby7LPPMjw8zEcffcT9999v+jVEeAqCIAiCIAiCIAgzing8BUEQ\nBEEQBEEQZpBs93iming8BUEQBEEQBEEQhKxFhKeQc4gvQTAbWVOCmch6EsxG1pRgNrKmhLlAhKcg\nCIIgCIIgCIIwo4jHUxAEQRAEQRAEYQYRj6dEPAVBEARBEARBEIQZRoSnkHOIL0EwG1lTgpnIehLM\nRtaUYDaypoS5QISnIAiCIAiCIAiCMKOIx1MQBEEQBEEQBGEGyWaP53/8x3+wY8cOfv/737Nlyxa2\nb98ed99MPJ62zIcqCIIgCIIgCIIg5CKf+tSn+NGPfsTBgwcZHR2dsetIqq2Qc4gvQTAbWVOCmch6\nEsxG1pRgNrKmBCObNm1i48aNlJeXz+h1RHgKgiAIgiAIgiDMKQ1ADbAeGJiD45nxVGDxeAqCIAiC\nIAiCIMwgyT2eNcCx0M/1wO40r5Dp8fCjH/2I9vZ28XgKgiAIgiBoNDc3MDBwBputgNraRpxO91wP\nSRAEIQMKQv9XA9vm4PiZj3hKqq2Qc4gvQTAbWVOCmch6mh0GBs7Q1XWM9vYDtLQ0zPVwZhRZU4LZ\nyJrKRhpRI5WHgOl8kZbp8WrUciaRiKcgCIIgCDmHzaZ+u+/xVLN69fS+3RcEQcge3EwnPdaM4ycm\nJggEAoyPjzMxMYHP58Nms2G1WjMYTzTi8RQEQRAEIefw+QZoaWlg9eptkmYrCELWk819PP/u7/6O\nf/iHf4ja9rd/+7dR+2bi8RThKQiCIAiCIAiCMINks/BMh0yEp3g8hZxDfAmC2ciaEswkcj01Nzew\nb18N+/evx+ebXon7TMmGMQjTR96jBLORNSXMBSI8BUEQBGEGyYYiONkwBkEQBOHyRlJtBUEQBGEG\n2b9/Pe3tB/B4qrnzzkNz4kfMhjEIgiBczkiqrQhPQRAEQZhRsqEITjaMQRAE4XJGhKek2go5iPgS\nBLORNSWYSeR6cjrdrFmze04FnxljEJ/o3CHvUYLZyJoS5gLp4ykIgiAIOUZzcwMDA2ew2QqorW2c\nFVGr+UQBWloaWLMmk35zU8zFvQiCIAizj6TaCoIgCEKOsW9fjS4Cq6rqTROBiUjHJ5qOmJyLexEE\nQZhtJNVWUm0FQRAEIeew2QoA8HiqWb16W9hzM5USW1vbSFVVfUrFidKpopvoXgRBEIT5gwhPIecQ\nX4JgNrKmBDOZjfWUSATOVOuUdHyi6YjJdATt5Yq8RwlmI2tKmAtEeAqCIAhCDmCMZAJxRWA2RBDz\n8ytwOj0pCclsKL4kCIJwOeP3+/nOd77DVVddRXFxMStXruTXv/616dcRj6cgCIJw2ZMLBW5S9UJm\nQ+sU8W0KgiCEk80ez5GRER577DG+/e1vc+WVV/Liiy+yZcsW3n77bbxeb9i+mXg8paqtIAiCcNkz\nUxVbzSTVSKYWQYyFWQI72XmyIeqqkQtfKgiCIMwlBQUFPProo/rjO++8k6VLl/LGG29ECc9MkFRb\nIecQX4JgNrKmBDOF0kytJzO8kGb5P5OdJ5t8mzPleZ1N5D1KMBtZU9lHAw3UUMN61jNA+oXhMj3e\nSHd3N2fOnOG6667L6DyRSMRTEARBuOyprW2c8/TUZLS2PsLISA9HjmxNO3KnRf36+98BUhPYiSKF\nyYR6oqhrJuza9VlGRrqwWOzcffdruFzJv4nPpuirIAhCPM5whmOomTcNNLCb9N5DMz1eIxAI8MAD\nD/Ctb32La665ZlrniId4PAVBEAQhy4gl+tLxTUYef/BgnX5sYeFiNm9+O6lwTXS9ufKRbt/uJhC4\nCKj38cADnyQ9Jhs8r4IgCMk8nutZzwEOUE01hziEm/TerzI9HmBycpKtW7cyPDzMc889h9VqTfk+\nxOMpCIIgCFlMvKhiLM9prMhdqscbj001/TVRpHCmIprJsFjsAFitBXz96y/F3U+bl6GhsxQWenE4\nimdriIIgCNOikUYaaGAb26YlGjM9PhgM8p3vfIfz58+zf//+mKIzU8TjKeQc4ksQzEbWlADh7Up8\nvun7Y9JZT/H8h4ODZ0M/WRkd7cHnG4jpm4x3fKRonI7nMpt8mhp33/0ahYWLuffeUwnTbLV5uXSp\nnZ6e4znt7wR5jxLMR9ZU9uHGzW52T0s0mnH8Qw89xB/+8Af27duH0+mc1jmSIcJTEARBuGxIJC7n\nogiNJjDt9mJuuukxffvkpD/00wTnzh2jpaUhZr/LeFHJSNE4nV6Z2dhf0+Xy8sADnyT1dmrzYreX\nAOLvFARBSERbWxvbtm3jrbfeorKyEpfLhcvl4sknnzT1OuLxFARBEC4bEvkW9+9fT3v7gbTSUdNF\nSwEdHDxLMBhgdLQXmIgaz44dZfj9/YAqnrZu/SjmeDL1L2baaiRbW5Vo83LTTY/R2vqw+DsFQZhz\nsrmPZzpk4vEU4SkIgiBcNiQSl7NRhMYofI1YLA48nhtwOIqprW3k0KF6OjsPY7eXsGTJ1xgZOYfN\nVsDg4HuMjp5PqaprKqIwlhBvbm6gre15JiZ8eDzXs3btnrjzkU7BI0EQhMsZEZ6SaivkIOJLEMxG\n1tTlQyLfolmppYnW01QK6FSxG4fDTVnZyjAv4tq1e6iqqmfr1o84d65JTwEeHPyIQOAiPl8vu3Zd\nE9eP2tzcwNmzu5OmDsdK1R0YOMPoaBd+fz+dnYdpaWngf/0vB9u2KWzbZuHcuZcSHp8uZnlr5zPy\nHiWYjawpYS4Q4SkIgiBcNqQjLmdCEGnCd/Pmk3i9G/F669iy5UPy8soAVcBZrfns3r2c9vbDHDpU\nz8TEmOEM4/pPk5P+uKJyYOCM3nYELHz88UF+9asKhobawu7twoW3cDrLcTiifaMAZWUrWL16G8Fg\nILQlyPPP36afIxAYJD+/krVrn5q2YJ8Lb60gCIIw+0iqrSAIgpDVzJWPcDbTSI8efZCPP96Px7OC\nCxdOMjbWoz+nKHaD8FOxWBxMTvqx20vYvPktPeVWm6uenleYnPShKDYsljwmJoaBqd6XsVJ+tXv0\n+QZoavo2EKSmZgdOp5tt2yyA+rd8w4YWFi681bT5mQ1vrSAIwlwjqbbSx1MQBEHIcmL1tJwNUk0j\nzUQYa8f29Z3E7++no+MwimIP20cVnQqa8AO1yTdAIHCRZ56ppqLiBmprG8PmSj12XK+Qa7UWYLE4\n2L7dzfj4SNg1HI5SrNZ89u2riXkfGzY08/zzt7FhwzEWLrw1rflJRm1t44x7awVBEIS5RyKeQs7R\n1NRETU3NXA9DmEfImspuzI6IpSoUUy02ZIz8uVxL+eCDYlauXER+vprammpxH1BFnN1eyLlz0QWI\nNCyWPK644ibOnTuGzVbE+LgazayqqsfvH6a9/YC+r93uprj40/T3/55Nm15l374vG1JwVRTFxt13\nv87x4/+XPpaioqUUFV2ZcOyx5idbq9zmOvIeJZiNrKnZRyKe4vEUBEEQspxEBYESEc+jmaqnMFU/\nqDHyV1CwiL6+t2hvP8DHHx9IubhPWdkKvN46Skuvpb//dNR+ZWVf4FOfWktBwSKuuqqOyclx8vMr\nqai4PnSeIsbG+lm9+udhxwUCA1y48DqTkz5eeGGNIdJpwWp1AWpU9MUX12G1qpFWRbExPNyWdOxO\npxuHw83Bg3X6HItfUxAEQYiHRDwFQRCEeYkxmpifX8m9957G6XSbHkE1Rv6OHNmqn9vhcNPZeRib\nrYgrrriZNWvUtiS7dn2WkZEuLBY7d911hBMnfqJHDR9/fCGjo11h53c4SrHZ8pmcHGdyMkAgMGzw\nfNqwWCx6Oq3XW0db296oMRojo/HwejfS1fUyPt/5sGtv2fJBVERzaOgsExMBxsbC+5BqEdd05lai\npIIgXA5IxFOEpyAIgjBP0QSmhrF4jlmeQqNoys+v4OLFs1y48DplZV/Ebndx4cIJfL4LAOTlVeJw\nuBgcfE8/vrBwMUuWfE0/R0/Pb/H7p19B1+vdSFvb88Ckvq2gYBFu97V0dh4O29fpLMfvv0gwOE5Z\n2Qo2bDiqC2dQ27zcc8+bnDjxU318vb2vhxU+0tAEKpD23EovUEEQLgdEeIrwFHIQ8SUIZiNran7i\n8w2we/dyRke7oiJwZkXZjKLJ6fTg8/Xy7ruwbJn6fH5+ZVgEU1FsBINTLVEWLryNiYlxenqOT/Mu\nw1Er4E6iRSGt1gIqK2/hy1/+Jbt3f5aJiTHsdheVlas5f/41XUTabIWhCrg2PJ5V2GyF9Pf/ntHR\n84yPXzKMObzIkXbNpUvvYWTk3LTmczaq2uZ6VFXeowSzkTU1+2S78PzGN77BkSNHuHTpEh6Ph+98\n5zv89V//ddR+4vEUBEEQhAicTjf33ns6pj/ULC+i0d/p8awIe87hKGXhwhq9Sm1Z2QocjpKwfc6d\nO0ZPz8sJrpDen2k1BVcVnYpiZ2JihI6Ow7S2Poz2OSEQGAJgcnKqRcv4+BiBwEV8vgt0dBzmllv+\nldHR8wQCF8OEcqTotNuLuf/+9xgZOTft+dQ8vKWl14b5Rc1EvKeCIAiJ+au/+is+/PBDBgcHOXDg\nAP/+7//Or3/9a1OvIcJTyDnkGzrBbGRNzV/iFQjKtBWIVrgoGAxQVOTFanUCasXZZcvUyOaGDU2M\njJzT/ZiXLn1CWdnnKChYRH5+ZehMViLF3BQKxpRZu70ktC05ijLVLc1ud3HTTY8xOTmmb+vtfQOP\nRy1MVFa2ImJ+guzbdysWiyqYrdYCnE4PAOXlK1myZD1LlqynqMhLWdnnaWl5SN83cj7jFXgyor1G\nQ0PJCxpNF7Nav8wV8h4lmI2sKSGS6667jry8PP2xzWZjwYIFpl5DhKcgCIJw2THdSrkaWgSto+Mw\ngcAluruP09FxGLu9EFArxf6f/1NHX99J/Rif7wLnzh2jouIGios/Hdo6keAqU4JUUewsWfI1Uv2z\nHQyO64I3EBjihRduD3ve41nF2rV7qKqqZ8OGo9x99+toolZRrIyP+5icDKAoDurqfsv9979HUdFS\nrNYC+vpO4vNdxO8fpLv7OO3tB7DbizKOLM+kOMz09RYEQZhpGoAaYD0wnZyPTI8H+N73vkdhYSHX\nXXcdf/M3f8OqVaumeabYiMdTyDnElyCYjawpIV2MvkSn001Hx2G9ku3Ro4f50peqsVqddHdHezeX\nLFnPhQsnGRlpT+laDoebxYu/yocfPm2oZps6dnsJZWWf08eiKHYqKm7k/PlXCAaDKIqVu+9+DYej\nhH37bqWg4FOcP9+qH+90VlBRUY3fPxjTi5rIm5mOfzPdok/NzQ20tT3PxIQPj+d61q7dM29FpbxH\nCWYja2r2SebxrAG0Ds71QLpl1jI9XiMYDHLs2DE2b97M/v37ufHGG8Oez8TjaUv0pCAIgiBc7kRW\nrh0aasNqteP1bqSmZgcwVckV4N1367jzzr0cObI15vk6OtQWK6mgKDbKy79IZ+dvpiU6QSEYnKSn\n51VA9Z0WF18TJiCDwUmefnolDz54gQce+IT9+9eHHe/znae9/QB5eZVEUlCwKGHRptraRp55ZhVW\nq5MjR7YmLOyjpdymysDAGb1wU2fnYVpaGnA43DldREgQhMuXgtD/1cB0cj4yPV5DURRqamqor6/n\nySefjBKemSART0EQBEEwoImnwcGzuFxe+vtP4ff3A1rVWFUAxmr9YRReq1f/nGefvZmxsa6oa4Rj\nBSawWBx6P04zyMtbwPj4GOPjg/p1Fiy4md7e15ic9EWPwppPefkqbLZ8rFYH3d2t+P1qKxiHo5R7\n7jnBM8/coPf5dDjcbNnyYZi4i9UaxbitqGgpRUVXRgnDVKrORu5jbP2itYM5eLBOWrMIgpCVJIt4\nDqCmy24DpvOVWabHR/Ld736XyspKfvKTn4Rtl6q2giAIgmASmi9xZKSd7u7juugEdNEZz4do9DS2\ntj7Mffed1gvzxGPDhiZcrqVYrXkJ90uXsbEeg+i0UF6+gp6e4zFFJ8DExCg9Pcfp7DxMd/crKIr6\n+cFicVBScg0tLQ9RXv5FQBOib6ZUtMm4raBgUUzPZype0Mh9amsb8Xrr8Ho3smHDUZxOd84XERLm\nK2a474T5jhs1PXa6ojGT48+fP8/OnTu5dOkSExMTHDx4kD179rBx48ZpjiY2IjyFnKOpqWmuhyDM\nM2RNXd5EVl7VxIvdXhy1b1nZCrzeurh+RZutgHffnRI+TqebioobEl7/nXf+jcLCKwkEBhPulxmT\n9PefSnlvv78Pn68Xi8VJeflKzp9v1YsIuVxLKS29lpaWh6Iq1cYq4mPc5nCocxopDCMFo/aaPPHE\nEvbuvZX9+9dHVc51Ot2sW/cs69btjXmt+ZRmK+9Ruc4ZVPfdAVQROvfImhKMKIrCL37xCxYvXkx5\neTk/+tGPePzxx7nhhsR/v9JFPJ6CIAhCzpFKamaqaJE0QI+ktbQ0cNNNj7F793ImJkax211UVq7m\nK195Iupakem1p08/SGmpl4MH6/Rte/fezOhoFzZbEePjw/qximJncPBjhobOTnv8qRLej1NDTfMF\nsNkKGR+/hN3uMvT69DE01AaA0+lhdPQ8Pt8AQ0MfAvDMM6soLAxPnXU43Pq9a55YTVhqcxtZQChy\nu/E1uXRJLcLk9dZRVLQUiyW+XzRdn6ggzA5mue8EYWbweDyz8mVERh5PRVGWAP8JLECt+74tGAz+\nW8Q+4vEUBEEQTCWWl3C6JKq8unfvrXohHqfTQ0XFDdTWNtLa+oguNgOBQb1irOZh7Os7qafoVlXV\nY7Xm8/HHBwgEBuOmuloszrjPzSRWax6LF3+VW275V1pbH+ammx4LE8oVFdczOPghPt+AIXUXbLYi\nJiZ8evqx11vHunXPhr02TmeF7gnNy6vkvvtOh81dvC8NtNfEbi8hELiovzbi4RRyE7Pdd0Iukszj\nmSvMZVXbAPDfgsHgm4qiFAGvK4pyKBgMns7wvIIgCIIQFzO9fPn5FTidnpgCSEsNtdmK8Pl6dX/h\nyEiPLoDy8yv1sVgsTn07qGJ1eLiTgYFTYV7RWMy86JyKbhqpqLiJmprtYdHCe+89zeOPVzI+Psy5\nc8dwOsvDRCcQFrkF9MJIxtfG4XDT2XkYgLGxrqi5i1eJ1hh1bm19GKs1n927lzM21guoKc/i4RRy\nB819l000AM8DPuB6YA8iioWZJiOPZzAY7AoGg2+Gfh4GTgOLzBiYIMRDfAmC2ciayj3M9PINDbXh\n8/XS0XE4qrCNdp0rrrgZmBK6mrjS2qIoioOLF9/j4sU/8O67YLUWkpe3AL9/iJ6e44aquOr3vQ6H\nO2nRIQ2bzWV4pKAKyPQxFt8x0tV1TL9vzVt55MhWrNb8qasqqV/T+NqsXbtHb8MSOXfaY2PRoJ07\nr9HbuaxZsxuXy8uaNbsZGmpjdLRLTxd2ua6aVx7OZMh7lGA2TU2vAl1AP3CYbPGeCvMb0zyeiqJc\nBawEWhPvKQiCIAiZYaaXL1H0VLuOzzcQ5kHMz69AUWyMjw/rkb9AQI34KYqViYlLTExcirpWMDiO\nzVaA232d7pNMRFnZ9fT1nTCegVhRy1Tw+S7E3K4oNnp732T7dneowJGaQuVweEJjWMG6dXtpbX2Y\njo7f4PNdwGrNZ9Gi22lvP6Sn2p4//zt8voGo1+a++06HzV2kn9Mo4rWeoTt3XkNFRbUeATV6YMvK\nvkBNzfZpzYEgCBpOw88rEe+pMBuY0sczlGbbBPwkGAzujXgu+OCDD3LVVVcB4Ha7WbFiBTU1NcDU\nt3jyWB7LY3ksj+XxXDz+oz9aQUtLA5OTf4zDURT1vMXSyMDAGVpb3yE/v5JVq5YQCAzS3Kz6Opct\nA4B331X//+IXFzA21kNX1zWMjHRSVTWsP2+x5HH11T4gqO8febz2+IMPigkEBuM+b9bjlSsXMTLS\nzbvvToQ9399/EzZbHn/2Z2rV2KamJkZGuujvf5ivf/0lXn/9Q158cS1XXz2un2/Rotv4i79omtb8\n/+53Z+ntfYNlyyzAZNj59u69lZYWdb7vuGMj69btzZr1I4/ny+O7gHZqahYBjTQ1vZll44v1+B+p\nqRkGCmhq+h4Q/f4V//ELwP+gpqYC2J4j95vbj2+//fZ54/E8evQob775JgMDamXzjz76iF/96ldJ\nPZ4ZC09FUezAC8CBYDD4rzGel+JCgiAIQk7S3NzA2bO7CQQuhm3Pz69kdLQLh6OU0tJr9eJCpaWf\nZ3z8UiiaacHjqaav74Tuf0wVq1Ut6mP0i84csb2fDkcpixevY2TkXNxCQGqU9GJofzdbtnw47RRY\nn2+AnTuv0YsRORylbNnyAU6nO2EBqHQwsxqyMF9oQG13chI17RSgnuzzZMaiBrVNC+TOmC9fpLhQ\nhh5PRe0u/b+BU7FEpyDMBNq3SIJgFrKmhHgMDJzRhZXmz/R4qqmre4Wqqnq2bPmAr371BbzeOgoL\nr2R0tIfXXvsALSW2t7cVu10VN1N9QRP+XQZgYmKYrq7mNEdr0ceYHrFTd/3+fj74YLfuv2xq+nbU\nPgsWqD3eHA4399zzZlwhF9krNRZq2q2W/qdQXFzFkSNb8fkGTPP0Gv2kkX7ebEbeo2YSrcemJjpz\nqeXJ9Nu0TG9NNaCK3fWolXoFIT0yEp7Al4BvALcrinIi9O+rJoxLEARBEOYEo0gaHHwPUEXnXXcd\n1cWPVvTG6XTjdLpZt+5ZJif9jI11R51vwYJqqqrq2bz5JFVV9TgcpWHPxxeL6XwzbmHLlg/Iz1+Q\nxjGxiBTFxsfh42lubiAQGAWsTE5O8Mwz1XrPz0hSFXyFhV79Wr29r+v7a77RTCOUZlZDFuYLmnhb\nAdQBh8id6q6NqJHO2RqzJtIPIMWIhOlgiscz4QUk1VYQBEGYYcxMoQzvQ1luKMqjsHDhl7njjr0x\nz//LXzqjUmq1VNzh4TYKC704HMV0d78clbprJP1+nqrodLm8YX1H06WsbAWjo12MjnZFPWe3u9i8\n+W1OnPgpbW3P4/P1MTk5QWS0tLBwMQ888EnU8ammyk7171T9rZmm1kYSWSRKEOa+x6aW6luAKiTN\nHoOZ51+PKjqryS2Bnh1Iqm3mEU9BEARBmBOMkcm+vlOmpVAao2Iez0rDM0HOnTsW9/zGViWKYiMv\nbwElJdfQ3X2cS5fa6ek5Tnv7AaxWZ8zjVSw4naWk2jJFUWxcdVUdR48+yP7967HZ8pMfFPGn32Yr\nZMmS9ZSXf0Hvk2l8DiAQGKK19WEGBs4wOtoVEtjhotNqLeDrX38p5hXVXqkVOByJP6hqKbVadDhS\ndKaSspsIsyKnwnxC67E5V2vCGEW8mvTTWJOlv5oZpZztCKswF7z33nvk5eXxzW9+0/Rzi/AUcg7x\nughmI2u1e9m8AAAgAElEQVQqNzGmbw4Oqu02pptCaRQ0q1f/XBc9a9bsQVEc+n6lpZ9n9eptMQVQ\nRUU1AO+/n0cwOM7YWA+9vW8AasQQ1JYhbvdyLJZ44nMyFHFM1jJFFbb33/8+Y2MX9Hm4cOFE2Hhj\noSjhf/rHxy/R0XGE999v1Ptkqvs59HtSW530Y7Xaw46120vYsKGFwsLF3HvvKVwuL7FQe6Wep7Mz\nvFdq5DxqwtCYymwkVz2amZJb71HiA0wP7QurIqCX9AWiUViuInruY/tAp7em5lqkC7PB97//fW68\n8UbUUj7mYlofT0EQBEGYTYyRybVrn6K19eGUUygjU3M1QQPQ2vpwWB/KpUvv5sMPn8HhKOarX30e\np9Mdtn9LSwMOh5tAYJS8vEqCwT79WK3HpcXixOFw4PerIlEtCK+hkJ6fE0AVti+//H9H9MGM3avT\niCoup65ptRYwMTESYz8/Docbp9ODz9dLR8dhFMXOpz61FovFjsVip6ZmB06nmwce+CRhunM8b2Xk\nPCbrzTpdj6ZUs51NNCEEqoCajUqrM52uaiaRY20MbesHDqMKxHxUAXkW8ALFxL8vTVh6gAuA1h94\nOXDacP65SiUWcomdO3dSWlrKtddey/vvv2/6+SXiKeQcWk8kQTCLy3VNZZq2ONcYK52eOPFTRkZ6\n9Cqoye4tMnKWSNCMjJwjGPTj8/XS2vowEC2ABgbO0NNznLGxLq65ZjLqej5fLxbLlNjUBGnoERZL\n4ihlOMY/3Qr5+RVYLE7Gx0dTOtpmK+aee97EYsnH4SiLm57rcJRSU7ODioobwsbd3/8OX/vai+Tn\nL+DgwTp9jo1zunPnNWFzH68qbbpCcrrVbXM9Uppb71HTr7Q6fXKp6M3zTI3128AjQE/oOa24UVto\nn3bgOInvS0t/tQCDhu1doWNiRymj15REqueahgaoqYH162FgGi9BpscPDg7y6KOP8i//8i8z5kUV\n4SkIwmVBQ3MzNfv2sX7/fgZ86RRumb9k84fxVNtvaOmYkfeiPf7v7eXcuPP/jXrdjYLHas3H7x/E\nas1DUay6eI21ryaOIgWQcR+HowQARbGiJRaVl69k06ZXyM+vDJ3VmMJkxWo1ir/o9Can04PDUUpe\n3gKuuOKPQtvK6e5+mfffbwwVI0qWnhu6mtVJUdGVLFhwI35/X8woqcNRyj33nMDpdFNb2xj2XHn5\nCiC+eFcjr+fD1lWkt1J7fScnA3i9dSkLyel6NKWa7WwyFz7AuRC706EBVRBq+JkSzYcBO+qcafej\nfVlVAjwWca7PAg6gAlW49kc8n+5c5JJ4n5+cOQPHjsGBA6qInO3jf/SjH/Hd736XRYsWzUiaLYjw\nFHKQ3PK6CNnCmYEBjnV1caC9nYaWlrDnLtc1lc0fxtMVxUbRMzY25UXss32ak75S/XXXBE8wGMDr\n3ciddx5iaKiNnp7jTEyMcf58a9Q1a2sbcbmWYrU6dVEaKYCMQrSi4j8oLFxMeXk1oHomh4ba2Lfv\n1lC7kMjU2gm9yq0qQKMLC/l8vfj9/YyN9dDd/dvQtgHGxnrCfJnxcDrLDec6T1PTt8KKIRlRFDsV\nFdfrAtrpdLNw4W2A6nHNy/Owb18N/f3vAFPrR5uDBQtu1rdbrfkxv0DQXt/OzsNYrfYZT301qw/o\nXJFb71Fz4QPMlaI3ZyIe24ktmrX7WRV6fBG4nfCIZBcQQH2POUb4e8oiEs9FA01NK0jFCyrMHgWh\nl6C6GrZN4yXI5Pg333yTI0eO8MMf/hBAIp6CIAiZUGBTI0/VHg/bVq+e49FkB9n8YTxSFCeLgKq+\nvQrGx4fp7DyMzVZIVVU9i69QP7hpr7smeDo6DmO1OsKilXZ7cdg1NZxON+Pjo3R3q1Vpn3zy01Hj\nMArRgoJKHnjgE/LyykL3UoTf38elS+309rYS6ee0Wl36ddUiRPGFpM1WBGipvKlFODdsaGFiIhCx\nVdFf/8g+osFggI6O8CJAd9yxl6qqejyelXz00XN0dR3D5+ulsHCxvn60OVi7do++roaG2vQvEHbv\nXq7P2Wx/6SHVbLMZM1I8s7nojfH+jN7uzwE7mBKZF4Ey1C+mqlAjnGWhfatRxaQxImk81xeAL4V+\nXgGsQU3bjTWnDaHrvhU617LQPrki3ucvjY1QXw+HDoF7Gi9BJscfO3aMjz76iCuvvJKFCxfyT//0\nTzz99NNUV1enP5AESB9PQRBynobmZs4MDFBgs9FYW4vbGV0xdMDno6GlhW2rV8d8XsguIvstGntr\nVlXVxyxCE6tXZOTrHmsf7Vo33fRYVIEirShNd/fxqMhiVVU9Doc7btEa7bw+X79emEf1dlpRRaON\ngoIrmJjw4/cPUFl5C11dL0f4P9V9S0quxe2+hkBAFdbxsNmKGB8fDtvmci1laOjDsG2LFq1h7do9\nUXOrEa9/ZuS+DkcZ99zzBi6XV5+roaGzFBZ6GR5uY3x8GL9/6oOv9tql0k9TCgLlItMp8lPDVDGi\nemanGNFsUsPU/S1AFZG/B5agFg2qQPV0vkT4l0mLgHdQo56LgNcAX+iYk6F9bkEVmk+EHmtFhOoM\n16xELTKkvRbG8WjMx3nPPrK5j+fo6ChDQ0OAGu38x3/8Rz766CN+8YtfUF5eHrZvJn08RXgKgpDz\n1Ozbx7Eu1TdTX1XF7jVr5nhEgtnEEoyRpCJmUtnHSKTQsttdBAJD+jgOHqxLKIibmxvo6zvF4OBZ\nCgs/xYULrwNgsThYuPDLnDt3nMnJ5EWBvN6NrFu3F59vgJ07r8bn68VudxMIDGH8sLpkyXo++WR/\nxNFWYkVHi4qupKhoKRaLnXPnjhEMBrBa81m06Ha+8pUnosS3zVbA5GQgSvhaLE6++c2usLmIRaLX\nLhapfNkQOb54AtW4T35+BUNDbSJoZ4Qa0heR61Ejb9XkVrQtmcjWnn8FVTBqeFCLAPlDj50Rz2tY\nUFusjBCdBVEJ3IEqWGNdX5tTDeNrEfncCuBojPELZpPNwjOSv//7v+fs2bP853/+Z9RzmQhPSbUV\nco7c8roIs0GmabSyprKfVNKCW1sfCatsG4t0Uy61lNCyshV4vXVs3vx23KJCWsqocT0Zq90ODJwK\nbbUyOemno+NwSqLTYrHT3/8O27e72bnzaiorvxxKK76EUVDa7S5uvfVnRBcnMorOqeeGhz/WfZYO\nRwlWawF2exHd3b/l8OF6fQ6Nflu7vYiqqvowz+jkpI9du5brVXvt9pLQ/2rqcnn5St1Pm2zejSnV\n2vmSpeOm4gc27vPxxweytqhWPHLnPWo6PsFcTfE0FuOpRE2LXctUaqtWvdYoKq2ovTr9hm2xfz/V\nlPpBYqfedwFPEr8YUGNoTBD9WjQCG2lqugnYiIhOIRaPPvpoTNGZKSI8BUHIeRpra6mvquLQnXdK\nGu08JRXBmG5Boli+0chtmuAtL/8CPl8/LS0PhUVLkwliTZg6nR6Dl3IixjYj4cWFNm16jdHR8wQC\nF/H5emlrexaf73xESi4EAkO0tj4c8oFGoyhqlDUWPl8vExOjjI2dx+/vD/N4GsV1Tc121qzZzd13\nv47FMvW7NjbWpYvSzZvfCv1/kqqqeu666zesW7c3JbEfKXIjizrFIhW/qHGf8vIvJt1fmC7TEZHZ\n7M+MJJZf04YqLrU+nNp7T6woZnSrpaltVuBOwr2bidB+/2NVvHUDrtDYPkT1jxqf2wv8D9TU30Re\n0Jo4zwnC9JBUW0EQBGFekEo6LkylXfb1ncTvV1sQaKmc8dI7jdudzgoqKqqTpmka02xdLi/nz7cC\n4HCoVWLHxnrp7j6u719evpKioisZH79ER8dhLBY7mza9Rnn5F/jVryrw+XrDzq+l/Uam/2qpuEYs\nlnzuu+80DkcJO3deg893Puz5SG+oxeLA47kBh6OY1at/zvPP305BwSIcjmL9vn2+AXbtWs7YWJd+\n7dbWRzLyZUa+hslSmSH9FGsgrXRr4XLls6iRxTHUdNQy4HWmem6WoqbJjjDVP9OKWn12D6oAj+/H\nVlkGdAJDMZ6zMyUujdiIjoIuCe3rA64PXf8qpgTnYuCTGOeqIX5qdKLnhOmQS6m2iRCPpyAIOUMq\nhYCE7CJXiryk6t+M9G0aRdMHHzyF399PeflKyso+r3sBNW+jUaAl8h1GXic/v5LR0S69P6bL5dVF\nliY4a2p2hBU7Mt7H0FAbu3YtY3LSp+9/yy3/kxdeuJ28vAUMDbWxadMruFxehobaeO65W1AUK4HA\nCH5/H1dccQvFxZ9maKiN/v538Pl6sVjs3Hnnb3jnnX9jbKxf927a7SWUlHw2VIFXjcwGgxNRIj3W\nnCfzZSZbS5HnS/XLhFjkyroVspEG4P8jtcrRsTzUTtRIZjD0L955FFRBG91LN1pg5qOmMmuCNNYx\nGvWoKbS9ofFVAx2AF7U4keYJTeSvzVXvbfYiwlNSbYUcJHe8LkIsEvXTnCuycU01NDdTs28f6/fv\nZ8AXK2Vr9kg3hXWuSNW/Genb1CvgDpzRxVVR0ZVhrUD6+k7i9W7Ue1Rq/UJjpX9q68mY3llX9wpV\nVfVs2fIBLpcXmErTjUxFjXUfJ078FIejBEWx43AU43CUcPTog/h8A5w/38rYWBetrQ8D4HJ5+cY3\nOnC5qvD7LwBBuruP09b2ot4GxWJxct9977Fw4a16CxSvdyNebx1bt36kt4IBi95DVLuXyFYzxrEm\nS3uNtZaM6c1A3P6o6QrHXFm3qZCN71HzD2Nq6SkSi04tHd5D7PRZH1M9NhOdJxjneIiOavpRxWYX\niUXnClRP52uokc5qoBVoB46jeULVNZUoNTpXvbdCNiPCUxCEWUX6aaZGNgn02e65ONNoYmbDhqOs\nW/dsTNFUU7NDfwwwNtaD1eoItSCZ6heaSNDU1jbici3l0qVPePrplfh8/WHPp1PoaGDgDGNjPQSD\nAc6dO8b77z9JV9exuIJQTfM9GXYOi2XKOzo56dOFqjaWdev2sm7ds7S2PkIgMIii2NE+FCuKjSVL\n1idNYfb7B8nLq2Tt2qcSel6N400kEDPpvznf1q0w0xiLBZ1Nsm9F6N9FIvvypk9/jG2xgkbJoq/F\nqKL5C6i+zYeAt5nqBVoS+t9YbCiRvzaXvLdCriCptoIgzCrSTzM11u/fz4H2dqo9njkvmpRuC5Jc\nITIVE6a8f62tj9DXd4re3t8xOekPS/VMJ/0zMq03WXpuvPRQ7ZqRlJWtwO/vZ3x8lMnJABUV11NQ\nsIiPPnqOQGCqoEh5+UruuONZdu1azuTkKIpix+NZhdNZFtVeJF5bFK2lS7wxx/LMRhJrLaXrzU01\ndXa+rlshVRK1O9GeO8tU+mkA1ZPpCe0T7pMOJ57/ci7ZiFo0qIZwb+Y21Pt9DHg49Fh+H+YCSbUV\n4SkIgpCViECfeeL5EZubGzh7drcu3AoLF7N589u6eElH0BgFY3n5Su666zcpC9W8vEruu++07vts\navo23d0vMzbWE+YLjRSKTqdHLy5kt5ewaNHt1NRsp7X1Ec6e3UUgMBh2TaezQi825HRWAMGo4kQA\nXm8d69Y9m3DMkH6/zul4c5MJeCPi9cxmkvXCzIQawgWY23CtQdS0UyN1qIKyM8ZzELuoT7bgQo1u\n/hR4CjWKWgTcjFpoSNZ8NiDCU1JthRxEvC5CLDLxRJq5pszyZrqdTnavWSOic4YwpqKWla0IS8Uc\nGDiji06HozRMdELy9E9tPWmpp07nApYsWZ9UdAIR6b1d7Nz5Gd37uG7ds9x337u4XEux2QqYmPBH\nHVNevhKPZ4Vh7G/p6cTqfamiU2vjYrMVhQlRn++87gEFtXKudt6amu0Jx2zs19na+khUq5p4pOvN\nTTd1dj54Pefv3z1jeqtZr43m1Xwn9FhLLTVe69XQcy7DPsWoFWtfi3FOK9kpOrXWK0Oo0cwzTKXu\nDhPe3iWc+bumhGxGhKcgCPOCbPFEZss4hMQYCwmNjHSGPacJHK0C7XQjZAMDZ+jpOY7P14PdXpjS\neWprG0PeShWf7wLt7QfYufMaXYAWFl5Jd/dxfXswGGDJkvV4vXXcdddvWLNmj17IaP/+dWzf7uZX\nv6pACX0P7XCUcvfdr+N0ehgfH2ZyMvwLEoejlPvuezfUi/NtvQBSvPFrntmyss/j8w1w5MhW+vtP\nZdxTNd510i00NJ+8nqnMU26hfWli9B1miiYwe1GL62jFcbRrFTGVJrsaNRp6LfBc6LhYXximUt12\nJlmAGnE14gZuC/2szZ92j8UR2wUhO5BUW0EQ5gXZ4onMlnEIiYn0TBYVLaWo6EpstgJWr/45ra0P\nZ+wNnG4rkBdfXEtHx2G9P2dkCxe/f5j29gMptXbZvt2tR28VxU5eXjl1da+EtXMxoig27r//fb3y\nbjoYU2Gt1nwmJkax24vZvPlk0vNNN402FeaT13Mm52luGECNyJnpO4zXBkS7Vj9qJND4fA1TabnZ\nSDmq8OwOPbYCbwBXEj5/2j2KnzMbyfZU25qaGlpbW7GFikAuXryY06dPR+0nHk9BEC57ssUTmS3j\nEBLj8w2wa9dyxsa68HiqsVic9PSovi6zPtD7fAM888wqCgoWYbcXR/kLY3kPm5sb6O8/RW/vG5SU\nXMvISAelpcs4d+6YLmBBLYKk9d50Oj1YLFYmJvxUVFzPmjV7aG19hIGBM3R3v0wwGECtkhkMuz/j\nHIDqB928+a24IjFyvNo1tMdHjmzVhbaiWDl/vjXl+cykX+flhMyTRiJvaDIxG/l8A1O+yGxm6ndY\nZSmq8JwJf6wwE2S78Lz99tv55je/yZ/8yZ8k3E88nsJlhfgShFhk4onMZE1FejrFm5kbOJ1u7rvv\ntJ666XCoqWlmpGNq68npdIelxUamnMbyHqpi8TgTE6P09b3O2FgXfX2/Jz+/kuLiz3DwYB1Hjmxl\n9eptrF2rptS63csYHe3G7++no0Nt8aKdOxgMYLXmsXDhl6PuT5sDr3cjRUVeyso+R0vLQ3FTOCPH\nG/k4PBW2LK35zKRfpxGzUlGzqY8uTK0ps+YpOzH20Uz22kV6Q43HQuI2IMY2IQ2hn7NddFoJF502\n1F6eifyxiedTPksJsZhpYRyZMC4IgnDZ8Y9vvcXfDQ5SYLPRWFsbJRobmps5MzAQ8/nn29roGh0F\n4NtNTTy7bt2sjj1byYVKolpRG1A/0E8nHVO7z8HBs7hcXuz2Ymy27+nPJ/IXDg6qvQLt9mJuuumx\niN6bFmASq7UQn09tFt/RcUSvPtvS0oDD4ebcuRbGxqYq0JaXr2T16m0cObJVv64xShp5f1r/TmMK\nZ0tLQ8wIZeS97Nnz+bDxZzKfxmMj5zadNaSJ4UT3kQqaVxugoaWF3WvWTOs8ZhNrnsxhJqvLpoom\nJrXxJLrPSG9oXZJjY7VPaQSeR+3Fma0UovbfXAK0GrYXMSUmS4nt40xnPoWsINNfQxN+jf/qr/6K\nv/zLv2TZsmX89Kc/5bbbbkt+UBpIqq0gCFlJPLGXSATGe17bdnZwEK/LRbHdHnZszb59+ofM+qoq\ndq9ZE3aewUCA492qt6YyP5/T996rH1u2Ywf9frW66JVFRfgnJvBNTHC9x8OetWsv28inUcg4nRVU\nVFRnrQBNF6Mg+tfea/loLIgDP9/lf1PAaFhqqeYvtFrzw3plOp1unnvuVrq7p9J7R0Z6ovpn5uUt\nYGysB4+nGofDTWfnYTyeakpLr43q1VlQsIj6+nf09iuJhF+kqDOmyRqjacb9Ir2vkeM3QxAZr+f3\nD6ad/qylojqdHkpKluFwRKc4p8L892pHfkI1Crd65kakxPNmQvR4tW1aumyyY3cQ3XfTgxo1zObP\nqG7gj4C3UNu8aCxArcBbCpxAFdORaHPiAZYxJbZz/z04V0maaltDZr+GGR7/6quvct111+FwOHjy\nySf5sz/7M958802qqqrC9pNUW0EQ5h3xqsMat6965pmodLhYx2nb2kdGON7dHXXOgpCRvtrjYdvq\n1VHnOTs41W6ia3Q07NjrPWqz8UKrlUG/n67RUfr9fg53dl7WVW216JjNVoTPd35GWllkklaZybHG\nFNOPfXbeYxnv8Hn+i29ERTa1CNXQUFtUWq3dHp7eq82Z3V6ib9+06VU9tVJLrb3zzkMMDbWFic7y\n8pW66DReN57oPHt2d4I02aljjPfa2vpw2Dkjx28GxutpEeF0zq/dR0nJMnp6Yqc4p0JjbS31VVXz\nVHRCdKpq8uqyxt+Zo0cfnIHquo2on5YjhWOs8WrpsjeHfn4VVWjFEp27iRadCmrV27kUnZH3GOvz\n+gDqPY8ZtpWg3m898AGxRSdMzacVtS/pAeBb0x+uMPNkWuQ5w+NvvPFGCgsLsdvt/PEf/zFf+tKX\n2L9//zQGEh8RnkLOIb6E+UmkpyqWGIRwkbiooCBKZGrPF9ls9Pt8YefSKHU4ws75PZst6kOm8Tqv\n1NVRmZ8fczx71q7F43RyaWKCgVDkE2BFWVnYfmbMyVwxnXFoAmDBgpuBmWllkUl/xkyONaacXll5\nAwCryor5G+8wd955iN/+9s2Ex2jzECn2tMebN79FX1U9P7/zEPe5vFSHxJ5RTE61fHGn3CPUeO/G\nPqVaBDOWUE2UKjwTfkPj9TZteiXt82v3kalnd7a92sm+CDH/717kJ9REok8l7AuXj/fPQG/UR1Cj\neFuZSiON15NTows1VfYCcDLG2OOl0mZDlDPydS6JeKwJ0ULDz6XA14AHUft0JkIT537DtilxK5+l\nspDkv4Yze/wsIMJTEISsIDJSGS/iYNxebFf7HRrFYGNtLR6nk+HxcQ53dOjnyrdaAbApCk0bNoSd\ns8jhiPqQabyO1+Xi9L33xhyP2+nkhooKAFaWl7OksJBypxNPSKiaNSfLd+9OKPpmUqROpzepJgCM\nUTqz02wz6c+YybFGwbX7jjupr6riyIZN1K2Ln9IZS6RFij3tscvl5ddrdnPY6Y5bNkQ735YtH/K1\nr704rb6Wxj6l8YSPcdw/aD0Ztsa08ba2PmJa9Cs/vwKnswKHw43DURI3apuMXCvCk8kXIdMj8hOq\nseBObIy/Mx7PCv1n875QioxqGrdF9uTU0HreFgAvhY5bCJQBawmPFEZ+5E2YETgHXIp4HDRsv4B6\n/xtQ5ydRUaFIrg/9vxLYnvkwhZkj+a/hjB1/8eJFDh48yNjYGOPj4zzxxBO0tLTw1a9+dZqDiY14\nPAVByAqm46mK17ok1rlu3buX4z09wJSPM1WS+UoHfD5WPf00iwoKODUwoHs+66uqcDsc+rEV+fm0\nDQ3FPU+8OdGIHHeYD9Xvj7q/ZONOlWz1u2XSn9Hs3o6ZFlOKPH5TSHTGcqxlSqx7T6U/ZCwvdKrH\npsr861OZGpm1SZmdwkDGdQOxi1VlRiyfZiLvJkAbcCuq6Pwp6qduY4RT80LagMnQv2zAguq97Elx\n/xLgI8K9uA7gBuJ7N7V1YUctRrQ9xj7CbJLN7VR6e3tZv349f/jDH7BarSxfvpwf//jH1NbWRu2b\nicdTqtoKgjCrxBNDjbW1afW/NJ4nkljnKnY4gOhU2VTGF6/CpXHfRQUFuvAzXqfu4EH9WKfFgm9S\n/eCTSgXcxtpalu/eTdfoaMxxG8dVmZcXdX9mVeZM97WZLTKp8JnKsemIyUyrqUYe/98cbm4bOMNy\nWwH5tY2Q5MN9OmONde+pRIDjpb9nEj2OxMxz5RLTraqsMjvVSyPXjXlfChgFUh3hAqmR+D05teM+\njyrMzhAuOhXgfOjncZPGahZfBl5JY/8bUe9fS5EuBa5B9W5C7NfduC48qCnMUlxIiI3H4+HVV1+d\n8etIqq2Qc4gvIbeJl7aZyFMVK4001nm0/bYeORIlkhIVCzGuqcjzNjQ3c7KvD4Ayh4NjnZ2U7djB\n2hde4FR/f1QBohVlZdR5vfp1jB/W8w0iOZXvPB9pbeXTxcVU5ufzVIwKuWE+1E2b4vpUPU4nncPD\n007DNb422eI7nQ1STX80tkEpL1/J5OQfJ9w3VlpqpOAaHThDadcxulJMvcw0VTOV1NR4v0NmprXm\nWoqsWSQqBgXJ/u5lWpFkrtEE0mFU8Wmcg8jcQWNvylPELpCkESQ7vJyxOA7Ee/+0od5fsWGbdm9a\nivQHqOnEMPW6R/bt1I4pQk1VDk/Nlc9SwlwgEU9BEGaMWNHDWFGTRC1QItuZLN+1i9P33ZewEi3A\noscfZ1VFhd465ZHWVnpGRth65EjCtNMwsXbpEofb2/XUWUVR6BlTPUOHOzv1gkMepxOvywWKwt51\n69SfQxijhfWHDnG4s5MVZWXsqKlJOn/GHqE/fPnlqAhpZCQyMhJrt1rZ6PXSOzqqR2Mz7Uk4nSiq\nWSm/s02q0beBgTP4/WoD+qKiK3E4ihLuGysyGhnxSjfyl2mkMJUIsHGNpXusmeMQIkkUFcwFEgln\nYxpxBfAcU1HNyhjHLUctOFQNvBbjWjayI/oZWWW3HNXHaWyPshZVjK9AbQcDU0Icol/3yMi39nx/\n6Dy5+sWEMJ8Qj6cgCDNGLE9YLF9mrP2M2yrz83UBBqrQW+HxUGizsaOmRj9PpCdSo76qip6RkZj+\ntEi08XVeuqSLXVAFrtvh4HCn2kttRVkZe9et4/bnn+fC2BiD4+Nxz20UgpFjbmhu5vm2NrX3Z0UF\newxRX2OP0I1eL3sTpOYm8nsO+/2meTSn4/eM5w1Mh0w9lNMhVR9oOv68VPdN14Nqhmc1kzmei9fH\nbF/t5RRhnXuMgvLnwMPEFs41TImpCqZSZzWBZjzus8A51IJCdwH7yA6RCWqCoZUpwWlFFYKtwBdQ\nxxo5BwOk94WC5octQm0zsyd0XLrnEWaKbPZ4poP08RQEISuJFZWMlVIba7+odiYhD2ORzUavz8fh\njg4cVmtUOq22n1bxVotcvtPfrx+vtVmJhTY+7fhyp5NyhwO3w8Evb7uNOq+XjV4vRzds4KcnTtDn\n8ySAV+QAACAASURBVOmiM7JNi4YWJTSOWUtZfeqDD6Z6f3Z0cPXOnXoaq9YjNJUIaay+o9p8mtmT\ncDrniucNTIfZr/qZPP1RI5300FT3TfXaxv0dDjcHD9ZNu7rsXLWnmS7Ga+7ceU3a9z0XY85+ItM1\nZwpjBduHiV+K0xgN/WLoZ2NU0HhcFzCI2j7kWbJHdK5FjWYaP48XAi6migVF3gukX6K0EdXLOYwa\n4dTWdKalUgXBPCTVVsg5mpqaqEkhTVGYe2IVpdEic2eHhvAWFlLscFDicFDhdOIOFQAyHptvtfLg\n0aN8rqyMm+12vU1KpIjRzvu58nJustn4n7fcwsOtrWGRSwvox3/xqadY6nJRYLPxPZuNu+64I+bY\nO4eHOd7Tw+HOTh5ubQ1Ldz0zMMDFwFTK1BNf+UpMMZYsLVijMCSqNX/pnrVrUy7qY7zGU2vX8nBr\nK9tWr+aR1ta4RZimQ7x0y0SYUZwom4vORKaHJnqPmslU0kwLHM1Vexoj6UQhtWvabEX4fOd1AZnq\nfWfzmorErL97yed3dgoVpe5LNaaTamPKR43o+VBbhWiRPWNrlQDR6azTwYIqwl+I87xWNTeSItQ2\nKM2oVXcJjVvrqTmIKg4row+dNm7UKrdaFeDEa1o+SwlzgaTaCjmHvFnmNsa0Sw2P00lvKAIZmYoZ\nmaa5bfXqmCImXjqnMTX0/YsXGQgJxXKnkwuha942NETTX/wFn921i66REcYmJii22xkPBrEoChd8\nPpwWC5PBIEHgS5WV7L3jDrYeORKW2msFbq2sjPIyxkov1sa1oqyMRYWFOCyWMFGdbnQyXmsZM9Jc\ns4FEqaTZljI5V+9RqabxxpuveHOcyvymkuqbynnSaaeiXXNsrJ/OzsNptyIxu6XOTGLWmko+v8na\nl5hFvPTPVFrD1DAljkEttrObqdYqt4Yem9U6xYNanCcSK3AWeDBiPEa0scGUZ9MFDMXZJ5J0W+Wk\nnlYrn6VmH0m1FeEpCMIsowmuErudi4GA6p10OuMKLm3/IpuNm6+4gkUFBTF7YUbut2fNGh5pbeVU\nXx9nBwd5ZdMmvtvczOGODlaWl9M9MkLn6CjFdjsnN2/G63Lh3r49LIKpoQBWRWHc8F5W5/VS7HDw\nn++9p2+zMPVRR+vhqfs3PR72rF3LzXv30jUygs1i4aYFC8KipPHEoxnznaqYzcVCQJdr78dIjELq\nB0533I+r6c5XuvvHE5jJztPc3MAHHzyF399PeflK7rrrN7Pmb71cSP7lhFl+wOn2Fq1hSsTFE2Sa\nOAa1sutypnpZPkJ0L89MsKPeQ+T5lNC1J1E9mu8DHRH7lKJWn53ybDawijMsoIATNOLHnVTg15B8\nPoRcQYSneDwFQZhlNI/gW5s3617BPWvWxPUNNtbW4nE69Wjgk++/H7MdS0V+PjZF0fdraGnhzMAA\nx3t66Bob4+HWVv06v7nrLpYWq6XqBwMBlu3aRdmOHYyMx/YEBSFMdAI0nzvHzrNnw7ZpolNLqT0z\nMDDl3+zspKGlha6RES4GAlwI+VS3Hjmi+01j+V8zbV+SriczXrubbCaXUiZnEqMv1Oigi3Qvpjtf\nQ0PqOrfbS7jppseS7h/PO5nsupHVgdPxt6bjh72cSe4xTtS+JB3PZ6IVqBHr3Mkq3NagptCuBzai\nOsaOh67zbcJ7edpRo4vTQUvbDRAtOhcBt6D6NvtR7zNWRHQQ+AxqJBbAzRmu5BitHMBPA4tJHlU2\no1XObPl2BSE5IjyFnEN6T+U2mrjyuly6yErUw9PtdHJDRYX+OBASgJq404TZ821tujjUivyE9dC0\nWqk7eJDhUJXYtiE11ckK+E6fpt/vJxAM4rRYuL68POl99Pn9+CfDU7lcdjvrlyzh2tJS6g4e1Asa\ngVogaNvq1dgt6tuuBfBPTnKgvZ2rd+5kyX/9F7c+91yUwMxUCCaa21iYUQhotsm23o+pvEfN9EfB\nRB9X052vwkIvAIHARVpbH066fzyBmey6xuNqanYkvc7lhFl/96JFerKVmIqAjEUqginWubU+lbEE\nmbHf5+9Q/ZLGL+N+DbwV+tkOrCLc95kMLVCzErgt9LM1Yp+VwDuE99gsI7yQkXbNCVRxugxtbgtC\n46jGwza8wFYSvwMkmg8jiV7H2K+hfJYS5gIRnoIgZDUNzc0MBgI4QoJtZXk5VxYW4rRY2HrkCPva\n2jjW1aW3HSl1ODhxzz24nU4q8vPxhITt2YsXdQG36umnGQztPxFxva8tWcKCUH/OVLEp6geWodA4\n24aGONbVRa/Px6KCAr0Krtvp5LW772ZxYSGrFy7Uj+31+WgfGeF4d/eUEH3iCW7du1cXr5p4ziT6\nmQpmVsCdLcyKeM1mXGC6H+dTJdHH1XTny+FQP2SnGiGNJzCTXTfbvkC4PEi2EqcbcYu3Ao2/ZZpA\nM547UQVWbSxFqG1VDgDG96gxpn5zi1FblfQBi1EjlLEwCkstq+VK1KimhymB+TnUCrS/CY2tMfR4\nI2qqr/aXpIQp0arhQ5vbRhqpp55DLMOtR2qvJv67TqoVaRO9jmZETYXLhZ07d7J8+XKKior4zGc+\nw0svvWTq+cXjKQhCVhHpMVy+e7few3NRQQHv1NdTd/CgXjDHrih6FNSCWjRoPBjkeo+H0YkJvaJt\nZV4eXWNjVHs8OC0Wvc+l2+HQxd/K8nJ+c9ddACzftYuusTF9XEU2G8MxUnEVpj6uACxwOvmCx8Ph\njg48TifLSkoodjioyM8P86YCNLS0cKi9nQG/Xz++0Grl0kS4HF5cWMjbmzeH3XesQkHZVmQn16hh\n9txUs1XCxQw0D6XVms/QUFvY+pI1l+skW4lm94CsYeq3rA5VfCY7dwPwPDCKKjSXh85RDTyFWuG2\nK3SuAKqYLEZNg60GrkUtAvQuagQyiCrGalArzx4L7T/I1DxobU5AFa5vJxijNodam5cS4AHgCKro\njDW3xp6bw6FtmbzrJHodpY9ntpDtHs9Dhw7xp3/6p+zevZsbb7yRc+fOEQwGWbRoUdh+4vEUBGHe\nEJla6jOIsPFQaqvWp7LYbufGBQv05yeB8z6f7qk09rOsWbRI7ek5MsKr59Um5FbgKwsXsqykhDyr\nFYfFwuefeoq7Dhzgc+XlrF+yhCWh6Gq8N8vIt94en49jnZ2sX7IEi6JwvKeHA+3tvBCKzGr3paXA\nVhvSiAHyQqmuJaE+otUeD29v3ozb6UyaBit9CTNjNuMCqSbQZQNapHJoqC1qfcmay3WSrcRMekAm\n83BuT/HcZ1CF5UXU1iWnUYXhIdT+l6dD97AqtP8EqujUPJRtqD7QXuBroe2ngBeBvaFjTxI+D8Zx\nJhKdMDWHH4TG4w6du4v4c6sdc7PhOpm86yR6HaWPp5Aajz76KI8++ig33ngjAAsXLowSnZkiwlPI\nOcSXML+JFFfXG4RZz9gYDS0teF1qwYjBQID3Ll5kQV4eoApRjRVlZbxSV6enjZ4bGaHX56NzZESP\nkE4A+z7+mOMtLYxNTNB6/jztly6pfTs7Oii026lyufBNTjIYp/BQLALBIEc6OsI8oFq01ON00jk8\nrKfL7omIWg4FAtR5vWHFl7SUV2Ma7COtrVFpt5pHbo/zh/zDpftZv38/Dx49OuPpufMFs8RgKu9R\nufhRMJZ383Iq7NTc3MC+fTXs378eny+zZOx0zzVzf/fMWomxRKYx/XMVU4WBNpLeb1mkP7MHNbr5\nCLAQqEIVlu8a9lnJlGDUisAVAz8DPkEViDB1/17C5yGdd4N4c5hobrXn9qRxnemMIT7yWSr7aG5o\nYF9NDfvXr8c3kP57TCbHT0xM8Prrr9PT08PVV1/NkiVL+MEPfsCYIfPLDER4CoKQVUR6DPesWUNl\nyHOpidG24WF9/56xMW654grqq6o4uXkzdV6v7qnUChjdvHcvL4VSVItCwhbUiGdktVoNh8XCsc5O\nPVU35j5K/IwS3+QkYwax6p+cxGGxMD45qUdBtchnucFL6Z+cxG618tMTJ+gZGYlb9dYYGb5m507W\n79/P9at3UFVVj8+9mpbuXg60t3Pg449zrkrtXHGyuYFv7KvhZROERTLMFDGxMd+xGsuDORu+zJmf\nq9QwRnd37rw6o/HMv0hxLI+hMWqopbQeRjUopLNWGlHFqpbdokUH/ws1qtgPdDK1ziuZ8mLClMgc\nBD6L2ucz2e/FbH01lItfQQkzxcCZM3QdO0b7gQO0NKT/vpDJ8d3d3QQCAZ5++mleeukl3nzzTU6c\nOMFPfvKTtMeRCBGeQs4hDY/nB/HahERWYXU7nZy+994wMeotLNT3L3U42F5Tg9vh4Oa9ezl27hyX\nDL04G5qbef/iRb30Q5HdrotTlxYhXbYsanzjk5P0jI3pwjRSYlqBP6qsxB5HfHqcTr2Crba/f3KS\ngdDYtCq3AK/ffTfO0L42ReHQJ5+w++xZXTBeHRKWxnnSIsNFNhvnfT4OtLfzg9aTrFmzmyK7GgGu\n9nj4YqhC70xUqc201Uu2YZYYSOU9amDgDI91LeGH7ctZvftnMzB/5pcvilUcaDZamWSLSNOiuzZb\nET5fb0bjSTdSnP1/92IlqmtRw2tRi/xopOtxc6OmxL5LeHQwuueyyirChZyxAu0YpFTUZ/6T/Wvq\n8sNWEHpfqK5m9bb0M0gyOT4/9AX/D37wA6644grKy8v58z//c/bv35/2OBIhwlMQhDkhnTYhRjHa\n0NzMqVAKiRX4QkhYRfbM/OLTT1Ozbx9PffCBLjqtwCt1dTy7bh17162jwJCaG8lkxOMg6OLQqihM\nAMfOnWPt4sUsLiykZcMGFhcWcrfXi8fp5ILPx1Ao4mlTFL0qr8ZVLpcurr0uF13f/CaeUGGkgUCA\niwbx3BsSlvcePqxvq8jPp8Lp1M9rFJbGqPGetWtnrEptLvb8TMRspo3abAV0s4D3WMbvRj0zMH/Z\nVckyk6il8XWxWvPnLPqpRXevuOJmfTzTXSfzr4JvrNRULZrXxlS7kRJgxzSvERkd1ASlhfB+nY4Y\nY6s0XB/Uoj69qAJ0OdHiM52MAemTKZhDbWMjVfX13HnoEE53+u8LmRxfWlrK4sWL075mukhVWyHn\naGpqkm/q5gHr9+/nQHs71R5PWqKoZt8+vbKrxtL/n723D2/ivNNGb1lf/rZsy8QhBgU3hKYfCU7c\n0ha81tZOKSbUboKSJu1F0rO1djdtt/tuN+w53bNnu233fa/T9Lq63Z7Tbjh9NyRN/YKTNIEU3BQT\n/FGSOk1DIF+NuyTQGjDGIGHjD9mY3/lj5hk9Gs1IM9JIlsxzc+nCmo9nnueZkTT33L/f/SstRXhu\nTimpAsS7zTptNmxZuRI9IyOYW1iAx+3G9SUl+N2FC8A772iqnjxYmZaHhoYQmp1F7+nTcX0PDgyg\n+/jxGNIIALVFRTg/O6vkljptNtx7ww0xLrcetxsrfvYzjExNAZBupdTk111QgNkvfSluHpjrrVli\nuXv3+zE9PYqCAifuvPMVlJX5lHVqd+FjQ1+Ncy9N9RzmKph7a1PTjrTIgJHvqEgkjKbuH+G3M15l\n/rTmOHXklpPl3r1+jI5KLqH19QG0thp37+TPy/PPd6TcjlWw6joxg/z+3VO7vvoSb24YJyGFzf4a\nwJcghfE2IDbMFpA+B29ByvV8HsB3IIXn9nLbqB1l/TDucW1m29xBfl9T+Ylcd7X953/+Z/T09GDf\nvn1wOBz4zGc+g09+8pP4l3/5l5jt0nG1dSRaKSAgIGA1GKFx2u1o9/mw0+83RViKOdfXi/Pzkro4\nOxtn/sN/JfJlWGZkl9zzkQguGAxvdNhs6ONyRsORCIKDg9jR1KSosMPhMI5duBBHOovtdvymowM3\n7NqlLFtWVIQ9J04o2960ezfevuce+EpKFOKpJp0A8PJnPxs3D+mQvunpUczPXwQA7N27AZ///J+U\ndUzNBKSyL1+YHlZu+AcHg2ht7UZXS0vMPKihJq+5TkxZ2Gi2jvWru78eM38spBSIznHqkNShXDkH\n6ajJ/HnJBTMjq66TIIIYxjCKUYwudMGTAw8IMoMuZOYhiA+SURAgmfSojxGEFHJ+DBLRBCTS2Q3p\nwcxNkHJEtaICzEQM5FZ0gYBAqvinf/onjI+P48Ybb0RhYSHuuece/OM//qOlxxCKp4CAQFbBK3Va\ntSjVUN84f+3FF9Hzxz/iA5WVqHS7cW52VjEAsgMokOt68ornNYWFIADnZmeVZWpFlEeVy4Wpy5cR\n4Vxp230+PLtxY1x/tg8NYec77yhqJoPDZsNlIqnkS02NUlP05qoqjE5PY0zlFBeor8dLZ88qxPOD\nHg9WV1Tg6zffjNv378dQR4cSVgwgjvxqzZWaZKjX79lVh0hkHHZ7Me6++60YxVOtZr548LMYGemB\n19toODzQ7LnOZaRD4NjtbzGkW3C9mdu/v830HCdDrpwDq1TCxVAbMwU//OiXlbIAAujOE6Usf+BH\nVIkE4mtcakUFsE+rE0AJpLDgZNdZutEFRr8hjG4nkKvIdcXTKITiKSAgkJPQullPVotSDbXyNjY9\njXORCPpHRxGor1dKqNhtNiwQYYEIdSUluMjlWJ7VsAP3ut04F4koyimP8NxcnOI4cOYM2vbvx8Tc\nHA6PjSn9+cXJk3GkE4i65U7Mz6P39GksKyyEr7QUZQ4H3lIprWwu7ujpUYjnDRUVeGbjRgDAzF/8\nRcz2/Lzy749duKCEGwcHB+NIhnou/+edr2Dv3g04cM0j+Omhoyh2vKmcJ7Wa2dLSZfqG3+y5zmWo\n584MgWM2P4B066hHL1KZ42TIlXNglUqYTVU60yiWlbJGNGLHoiplmSY0VrQf28bAwHYDYelMiVwL\n4HpIdUP57VjOKA/+0xow2FetdszA6DeE0e0EBHIXwlxIIO8gak/lD7TMZ9TlUpJBfeN8fGICgFSz\n8+F165T2/vzaa5XtXt+6NaaGpvqLrqG6Gr+9804E6utxdOtWOP7wB2WdQ8elNjQ3h56REYV0Mlfa\nuYUFze3VGJudxclLl3B4bEwhpaUOB9pWrFDmotzlUsZQ4XLpOsby83pTdzfeunAB/aOjCumskOdG\nDfXclZX58PnP/wknpi/HnSe1u3Aq7qVmz3UuwwyBU39HGQ3Ey4RD7FI6B0sNXehCAAEcwIGkYbaZ\n/d1L7ICcfjkbKxyWY9sw5nTMDI8OAXgGxkjkYoTNGj2mke2MGx2JeymBxYAgngICAhmD1s26mtAk\nA3Nv9cikzFcmuRdOzM/joaEhpb0nb78dq0pL4S4owH0HD+JDVVVKG7x6aYNEwD6xZw/6T5/Gjbt3\n4zJHUi8TaeZXqnHswgXUPP44ktHOKpfaYVEKCQaAS5cvo/fUKVyUCWNXSwtWlZXBbbfjuZMnFTJ4\n689/HkNCi7lapKMzMwqhLLFLLV+U50YN9dwx6JEqM+VStLY1e65zGekQODOl6K1GqueAkY3v7m/D\n7ZFw3vh15krNTyPwwINudOdAbmdiQpN+ORsryFxsG8ZyfVOpkbkYn1ajxzSynfVllAQErITI8RQQ\nEMgYtPIQjUIrfDRQX49LsvKoZarD57PVFhVhdGYG5U4nJubnUepw4JLKgCgTYLmdDO0+H4bOnsWo\nHO5rB1DucsU48BYVFOCjy5bh+MQEJubnMcGF/jZUV6PY4VDyWGvcbtzi9eLY+fMYm51Fo9eLp26/\nXXLbjUTQe+oUGr1eXJybw9jMDJwFBXjlzjvhKyvTdaHVO09m8gNzJZdQwBrwLrSv1Afw/7V254Vf\nZzruuVcvEucopp97bIXDcmwbkcj9GBzsQVPTLXC7n0yj3aUG5iCszmcVyAWIHE+heAoICGQQ6She\nLJyUEbRGrxdFdjsm5uZQW1iIp26/Pa5dXrn7TUcHAvX1OLZ1KwL19fjYsmUx25Y6MpPi/ufLl6NI\nVh6dNhtGp6bwoepqtK1YAXdBARaAGNIJQKoJOjqKkenpGNIJSPMwJIf3ljgcOCeTy49fc42i8G7Y\nswcDZ87gt2NjWFZYiBvKy/HuxAQuzs9jPBLBhr17AeirdnrnyUx4KdvW63bj9NSUIZVUIHfBFKWQ\ntxFPNO3IG7/OXHC9XSykrvYmVgbN1xxVh3smat9oaGhsG273SbS2noPb3Quh7PFYzPgKAYHkEIqn\nQN5B1J66OsDUuYbqalyIRFBXUoK3QiGFtBXZ7bjV60W506kY4oQjEdz6859jeXExyp1O1BQVKbUy\nf9zUhBt37cKc/H10+3XXoffUKcnZ1kAdTyP4QEUFGpctw7PvvRdX3qXa7cbFubkYNRSQyKmWOREg\nhelqGR1Vu914v8cTMx88vG43xmXSZwdw/N57lTBbI1BK3hQUoNTpxKMGSt4w1fT01JSizl6tyudS\n+I5i7rE3N+3AV9weU1rVwEDQwnqk5pCK620+lDUxck3ljtrrR+ZrYAplL10she+pfINQPIWrrYCA\nQI6Cd1XteP55JYyTgZUnAYA1u3fjnXvuwfahIVyYncV7k5MApLDUczIB+8jPfx5D4EocDlQ4nQir\nFMZUUVtUhMMdHVj+xBNKrVAe5zn1rwBS3qmroADuggLM64QAT12+rJtvysYOQAknBqTQ3OrCQvSe\nOgWnzaaE2ZoB7+AaqK/H9qGhpKVEmGratn8/gFiV1EgpksUkKwLx4N1jzdIXa+uRmkMqrrfDGFbK\nmgQRzNuyJrmj9maqBibvbPtjAA/B+tqgySBKmggIpAMRaiuQdxBP6K4O8OGfLIyz2u1WnpbZuW3H\nZmcRHBzEcydPKqVRKpxO3CLXvSyVQ1SZ2thQXY1ylytKOtNUO20A3r77bmwfGtIknWowMrlw5YpS\n8oX1k2FtVRWucE8UPXLZGK/bjQV5+YcrK9Hu8+HY1q1o9/nQ4fPhhTvuwJOtrQjU12Ns27aY2p9G\noQ6x1XIn1oNWOK+R/dM3MEkPVprSsO+ofDK6sRKJCJBxz03rEEQQfvjRhjaENY6aO2VN9GHkd898\nSGymYCbc08y2vHHOhwD8CsBqACfT6axJLB3zHnEvJbAYEMRTQEAg58HIjMNmA6NpFTIRA4AyhwMP\nr1uHCEf67HJZlA6fT8nvLLHbsaywEM986lM4KauiPHgyW66TA1pss6FtxYqYZZUuF+7o6cFT775r\neEwFQIwj7u3XXYc3AgF0+Hxo9/lwaMsWlHB9KHO54HW7cZkIYTm8tnj6GIILP8C3XvkNwpGIMv7t\nQ0MYm57GfQcPKnmWZhxq1eTRTK6nVr6okf0XW62xjPgGg4DfD7S1ITz+VrTNbwWzy7YWEYkI0GLc\ntjNFswc9CGoc1UxZk1xGJsrxpAYzbrJmtuXV0QIAFwGMA9iA7D3SyGa5lcV4TCMgkFmIHE+BvIPI\nS8gv8GGWfM6lXshlonb+8/e/V8ia02ZDicOhqJZetxsEKaSVd7AN1NdjR1MTan/6U0TksikdPh9e\nOXcOI9PTUmOqHM+bystxXWkpek+fjutHAYCm2lq8eu4cJg3W8DSClSUlmLtyBZGFBdxWU4PlxcXY\ne/IkwnNzuLmqCmUOh1JDFAAKMYfr8Qd4cBG/wcch6a5A24oVmJqfj3OY5V1na9xuNNbUxJyDROGw\n6bgTG90/ldw8K5G+c6cMvx99/f3wA9j/r7UY8Y7Ce74Rm799AO4Zj7k0tiWIxcjMa0MbetCDRjTm\nLbm8un739MJZeWfb1QDGMTBgRzjcCIdjGC0tIUhfL5n8kFnh0GsUfqSW/2oMV9c1lRvI5RzP0tJS\n2Lg65jMzM3jwwQfx7//+73HbihxPAQGBnMVzJ09idGYGAFDtcuG8rNYFBwcV4xkjOYDD4XCMQjhP\nhGmZXJY6HIqZTl1JCd5fUYHe06cVh9X7Dh7EHFers+/MmYR9/v3EBMpdLk3jnytAXL6pHZJ6yXI3\nU8HU5ctKHmjvqVOocbsVZfOtCxdQLtcDXVtVhT9NTeF8BPg9PogyzICRTgB47fx53CLXMOUVRqY6\nsrBjFvbKzgGf18kvB6IqZqowsn8quXlWoqWlyxriWywrIo2NaPnSUxg89hCa9u2QSGe+WMNaAD3q\n0IXs3bazPjixB+3oxE78W16SzqsPTBcHpLPIvhc83N+vANiAcPg6jI5KNYkHB4HW1kx/yPg+ZBrZ\nVFcFrnZcunRJ+Xtqagq1tbW4++67LT+OUDwFBAQsg5pAbh8ailEplxUWKrUn+RxAIzUgmcstj2q3\nGxtqa/Hbc+dwenoa5U4njm3digqXC7c+/TTOz87GucuqcXNVFd4OhXSdZY2gyuXCBQ13WQZXQUEM\n8dXcxmZTHHdtALR6U1dSgte3bsV9Bw+iZ2QEN7ou4rrq1Th0RlJCCwsK8Pt77kGFy6UojMwYyGm3\no8ThwNT8PHpPn445B+/fvRv/dfEiFgB8yOPBYHt7QmXTyIOCqxbhsBRuu2MH4JFJTjZFEg0shnGT\nH/FaTbZtWbT6IKCP3DH4Mq6LRyMVGrB580q43TsTbp8vGAgGER5+C47i42jp+g3cHt9id0nAAuSy\n4snjsccew7e//W3813/9l+Z6oXgKCAjkBNSq2dj0tEI6PS4XXv7sZ/HQ0FBcyKVWDqCa3DCXW75c\nx/lIBIdHRxXToIn5edy4ezeG77kHK0tL8Z78BM9hs8WVMWGoKynBssJCzbBaI3DYbLipshKHz55F\nid2OKY3wW5fNBp6Waimpc9x7rZ6udk3hmyVP4sWDP0OV629Q43ZjZfVN+ElzMx789a/x2vnzeLG9\nXXGw1VIyA/X12On3x4W9jnLn6cLcXFIiqT7PHpdLEFEGjwfoVlGcbIokGlgMl1ktrUZPx7IKauJU\nLBMnoRcZw2/Dz6FM/lwfHPwi2lqfWaSeGNfFLYtUyDGEh4cx2n8YADAYfAit6u8UgSWJdB/+WPXw\n6LHHHsO2bdtS2jcZhLmQQN6hr69vsbsgoAM1gWTvK10uvHbXXfCVlcUZzwCxZjbbh4bg37sXMEr3\nVgAAIABJREFUT737ruKEeuOuXbjv4EHsaGrCLz79aRTZJRsgO4DxSEQJSQWAuStXsGHv3phjr7/m\nGmV9md0ec2xXQQEK/vCHpGMrQNRZlsdlIrw6Pg4boEk6AeCSarndFv9AkPWKN00qtNlwXXExql0u\nFNFFjI0dxv8YqcYz7x3HuUgEvadP46GhIezbtAmnvvCFmLIpzEzozVAIQPScaJn/zMr9KwDQs2lT\n0rlIx/X2akCufUcthnGTlldppgMH1QZR6j4EB4Lw7/WjbX8bwnnmMpyNa2rCIYX6v+cFnmhaTFXG\nuOHQ4hoqZc78xyGH7HsbG9G0IzOf2Vz7nhJI3+TOCpO8kydPYmBgAPfff39K+yeDIJ4CAgKWQe2G\nyt6/e++9CWtJ8mSIkZiQTCbVOYketxu3yiVCGJ1rqK6GUyZzxXY7fv2ZzyjHri4sxJHxcYk4ulyw\nq4hnKBLBb8+di+vT8uJiLCssVN5fAXRrfi5cuaKpUlbJeZnqZc6C+K/eBUjq69GtW9G2YgWK7Hbc\n4vVi+vJlnJ+bw7H55XgCX8AFx/swTRI5rXS5dF1i2TyORyKoKymJIfVqZ9u1ck7oFQDfOXJEsz0g\nSmbnr1xBh8+XkuutQPZRVFQDt9ub1ZtzLepgpnBGKlATbHUfhsPD6B/tR89ID4KLULJnMZCslAyP\n11puwyv1wMDmtfiRe2fGerR0nFqfQ9Sj+QFLW27p6kJ9IIDNBw7A7Vk6Sq5AYqT7kNCKh4w//elP\n0dTUBJ8vM+HdIsdTQEBgUcBCaY9PTsJXUoKTly7BV1aGd8JhjEciWFtVhevLynBJzklkxPHVu+7C\nXw8OomdkBCV2O0qcTrz82c8CADbs3YsN11yDM9PTStjn9V1dSm1PPahDcddWVeHQli0AgJt278bo\n7KyyzobYUigFAApU+zttNthtNly+cgV8hul1xcWYnJuLyTtlDrylDgc+ds01eFIm4HzeKwDcVl2J\n/7P0GXzl3Cacmp6Bw2bD7+68U7dOJ8uJ5XM59XJptbbVgt7+6breZgq5k7O2uNi716+E2tbXBxbV\nxCmTSOaM3La/DT0jPWj0NuLA5gPwXAXXgx9+9MsBzgEE0J0gwDmMMIIIYgd2ZNCEyY/sZ95mKru4\nCkBI/rsDwGKFJgvkC5LleKbr7m6FO/yNN96Ib3zjG3jggQd0t0knx1MQTwEBgZQQDALDw5KJZ1dX\n1Ecl6X4y4Tx24YKiaqrBTHQ8bjfCkQhqHn9cIXZs3epduxQnW54EqcnRk6ramg3V1Tg1NYUxjkzW\nFBbiHPfeV1qK60tLcXxyEtcVFWFofFxZV2a3JyyjoudsawdQ6nQqJNhhs+FTdXX40YYNuGX347h4\nRVIx7/TV4emNbQoZbKiuxsrSUuz0++Fxu7Fhzx4lx1XPiAnQJoN6BJPflpkRaeVrGiWouQKjhGup\nE1TLSsXkOcKRMIKDQexo2nFVkE4gF0vJpFtQJxUS6UdmyO7tAHoBNAB4wWBfBK5m5Lq50IsvvohP\nfepTOHv2LEpKSnS3E+ZCAlcVRO2p3MDwMNAv/5YHg/F+Knrgy6sAUk7jxfl5lDudmJifR6nDgfd7\nPPjaiy/iVyMjiCwsKMVCWBitx+3GR2pqFBLEh3eysE+v243TnD04Q3VhIZ751Kfw0WeewdjsLNZW\nVeHs0aPAihUAJHLI18EcmZqK2Z+RTlZChY2hwGZDaG5Ot5zKAqCQzgqnE0e3blXCj9/nGMOrc9fB\nh/cwef4U/Hsv4w8XL6La7Ua1262QTgAol3NA2bjVynG5y6UQRjUpZQZNjIxqudMmKqui3j/XYTTs\nyGrzHfYdlcj9NxnZtZIMLyUDlnS0K4/bg+48VXuN/e7Fz04XurKgYppBugV1UrGoylR28ZNYVLvq\nNCHupQTUePzxx3HXXXclJJ3pQuR4CggIpASuXCHMeB9EOLXQXVCAgc98BoH6ehzbuhVetxuXLl9G\n76lT6PnjHzE6M4PQ3BzmiVBot+Otu+/Gvx45ouQZ+kpL4S4owH0HDyo5i10tLVhVWoq5hQUcHhuL\nO37vqVN4aGgI79xzDwL19Ti0ZQvGOCJ8aX4eFxOURgGAQrsdf37ddQAkc6L3V1QoNUWdNptiFGTX\n2f/Ply+PyXn9B+9ruA2v4NtVAzi2UI/+0VGcnpnBedlAqObxx1H4k5/gY888g3kitHP5lYwojkxN\n4fDYWEKDH4/bDY/LhY7nn0fb/v1468KFOFOgRPmaamMilvOpzhnNFbS0dKG+PpBU5cuU+U4i06Vk\nJhBWmEQwLK4Bi7VgtKMH0i1/MqgzCpdShmE84mfHAw+60Z0C6czUTBk3DtJGKiQyU9nF6Y5FQCC3\n8B//8R947LHHMnoMoXgK5B3EE7rcQFdXfLlCPfDKz83V1eg/cwYAELlyBd85ckRR1XgV0+NyKSVO\nGqqr8cIdd8Qpcl63WyGXK372M9htNjgLCvC+8vJoKRVIamOFy4Xw3BwavV4U2e3oeP55hWTZ1qwB\n5LARp82GIocD8/PzUt1LjTqgQx0dWFlaiuDgIPpPn44JxeXLpGgF5DZ6vXhUdQ0/X/IVzLtfxRNF\ndyBy6ULcPpeJcJkIQ7IJUm1RkbKOjYEpx8kMfvj5q5XNk7xuNwZOn0bVzp24uboa7T5fjMpqpC2m\njuZSjU9GuJLBakWQfUclIvHJyO5iONFaiUxl1ZmlHWp9bAyZLemSKah/97QVcSuVvUwXv0n1CklF\nMV3kekY5CnEvJbAYEIqngMASwGIoT6xcoZHcTl758bhcCnFS35DzrrhP3n472n0+dPh8CukEYm/m\nXbJDrdNmw/Tly7g4P4/xSASvnT8PQFIjFyDVxQzPzaG2qAgHNm/GycnJGCXKKxMwO4Cbq6owkcSM\naMsvf4mburvROzKCC6r5ZuVQtNTO5cXFmrmRxydncCxSiV+dGoVLdry9uaoK7T4fHBqlV0ZnZhQF\njc3Z0a1bYxyF9a6J4xMTAKSQ3ec3b0agvh5rKipwdnYWobk59J85A5fdbogwahGrfCytkilFUO3y\nzCOZGmtUreWRS+VCzCqTRmFWu1JTsePy+woAD1vYr+xBUiLD4ac0FHErlb3MFb+RnHZ3og39CJt2\nhBUqo4BAPkMQT4G8g6g9FY9cv9nnCcpOvx9v33235g05H8rpcbvx7MaNeGbjxpht+Jv5VXK46jyR\nkltpB/DyZz+LQH09PlJTE1PmpMBmiyn/wfJAJ994A4CkUL4qk1YboKl2AsDpqSklDJiZHhXb7VhW\nWKiEDm+49lrpmNx+H6mp0SxpwvpT6nCg4Gwlqv/kw7KfbsHOdRtjapDyGJueRjgSwfahIYxNT+Ov\nVbmXz508qVwTD/T1KUT0kkyqJ+bn0fqLX+DS3ByKHNHgl4bqasMlUbSIlSitEv2O0qqZypCM7KZC\nhnOpXEimaItZ2qGmYj4AXxgI4i/2+vGz/W2I5Ek9z+jvnkTpHQ7JTTVWEbeSlGWu+M0whtGPee6h\nREIvEoEMQdxLCSwGBPEUEEiAYBDw+4G2NiCcw/cnuX6zryYoiW7Ik4HflxntMNgAvHrXXfh/3nwT\nY9PTeIc7aYUFBXixvT2mP2sqKnB4bCyGYJLqfy3wdJQplNMLCxibncV3jhzBsfPnldqh7Eu21OHA\nDz7xCc2HBF0tLUp+62jFGZw/a0fvXjeCQeDZjRtjQmsZ+kdHERwc1H3owCuxg2fO4Kl330X/6KhS\ni5Svj1rqdGqqy8mgdR4TqXwCEjIVoVAsh+c2ehuxY5HDczNds9Mo1FSsHMCy8DDWjPbDa0H+rFUY\nGAhi714/9uuQ4e/he3I9zjcRBtDSshb19R0ZdCnOnLJYLD+WkB5KfBjAo5YfwzyWdvavgECuQJRT\nERBIAL8/6twaCBh3bs02crWOYqYRjkRwU3c3RmdmUOly4chdd8FXVhZTUqW2qAgFNhtebG+PMfQB\nouVB1lZV4Y1QKKYWp91mw4LGdxdzs61wOrG+tha/PnNGqcvptNlw7w034Gd/+INmfqdDru8ZuXIF\ndgAbamvx7MaN2D40hKfefRehuTmUhasx+c074P3LIaxpCqO80IF3Ll7Eu5OTMW1VOJ04cd99uO/g\nQc0SJ5WPPqqQTB7q+qj5UhplKUGvHmq6SKVciFamXabyM5P3JYhhDKMYxehCV0ZcWMMAfrS/Dd4c\nKy+TrPRPbD3OOnTjdeRruKlUL/SL2AGCBzuRG+PwI/v1RQWuNuR6ORWjEHU8BQQyhLY2oKdHcm49\ncMB4rUqB5EhmQmPUpMZMvcpE+wZ6e9F76hQKIJHOPRs34rO/+hUiV2ILpNx+3XXwuN3K8Woeewzj\nkQhsAG71evHuxIRufVItOGSCy74l3QUFKL1Qg4WaEMLzc8o2PCkuANB07bV49lOfkuZK46HD7b/4\nBXpPn0a5w4GJy5fj6oHqPawIDgzg5ZO/hX1hAh77AubLb0Wps9BSo6BUa8DmMsyYKuVSPVQ/4m+3\ntZZlpy88uQqgO0NHNlNkPVskPFmt1dyrx7nUkG590Vhk4yGKQP5BEE9BPAXyENmsPRUOG3duFTCH\nZKrPtT/9qVLvs8PnwzMbNxpum5GqIrsdJycnY8gATxBqiopwcnISM2+9he4vfxk37NqlEDxXQQH+\n7Npr4SoowMFTpxC5cgVlTide5+pvAsDJyUls2LsX1xUXK66zDOu83hjHW0AijXq1PrXQ6PXivclJ\nnJdDMvn6oYnUMjYHD69bh4eGhgyr4fx5KcUELqE86bHMIt1IArWj51eHji26ky4/b82Tk+j7+td1\nt82lCAWt221rb8GNIYggnsJTCCGEBjTgBbyQlZv1ZMTSj+yQ8GRk+Bd9v8Dj/sdzqB7nUgMrtmNN\nTc5sPURJB6KOZ/YhiKfI8RQQSAgzzq0C2tDLZ0uWl8rX+zT7Nc1yD9XutUCsEVPPH/+I/tFRvHzu\nHB4aGoKdc5Cdu3IFvadOocTpRGNNDQBgcn4eDw0NxRxr4/79mJybw6sywWyorka1ywUAGBofh1Nu\n0wbA43Jpkk69L2Lmgvu7O+9Esd2OMrtdIZ0Omw0Pr1uXdA58ZWWm8mkVoyNM4gqkcVS5XDg9NZVW\nTiJ/HTgrpDbM1oBlUNe4zAVzLf56/vsPfzjhtunkOFsNrVxMftn2LDlmD2MYIUiGOSuxMmvkKpn7\nrtUmSXqZhMnMpEpRmmI9TgFjsDanNZrH2ogdFrsCCwjkM9Imnjab7T9tNttZm832uhUdEhBIBvGE\nLreQzChFjxQkM6G5zesFIOUkVrhcMccwas6iRW75ZbdUV0t/r1+PHU1NWCu/Z/C4XNjR1KSYGGmR\n5NHpaVycn8c8EWwAqt1uNMh997rduLm6Gu6CArx21134+LJlAKIlVz7k8WB5cTE6fL64osoN1dV4\nMxCAx+2Gr6wMH6mpwSRHxi8TxZFgK9DV0oI7fXXwuRYwDanMzNTlyzh89ix6RkbwRZNOiOxcMXOj\nnpERlP7lIAKB1MPX1TUu2Tm9wXEeW2d/uChOpfz1fIccAp0LSGaZonW7zS/LFqnnb9R3YmfGjhN/\nXMjHjRJLfs5+DGtNklItM5Pq755UusSPNrQhLExzsoYudCGAQE6HRYt7KYHFgBWK56MAPm1BOwIC\nAnmIZDemespmMtXnydtvR6C+Hoe2bIlRLmsefxyPvvOO8n71rl26BFSL3Kprha4qLYW7oAAffuop\n/F5lXfyJa66R8jiLilDjdsMjK5k8nLKrbQEkZbb39GmUOp0I1NejwGbD78bHEblyBf/8yitKO2u9\nXrT7fBhsb8epL3wB5yMRxSm3wunUdJdl88jqelrlYKwm8R63G09vbMPKZR9SjsOXW0mmPqvbY9cH\ny3ttqK7Goy1NSiRBKg6v6hqXXS0tWO8+iS9f/jbCp/cuilNpLqmYPNKtp5kJx2yteqOJbtQz6Teq\nVnyDkEg3m7NGAJcsOA4bw5vye+urY2pDKl3Sjx70IGhpRVUjiD1zi0WCkzkGZwIeeIRCLZB3GBkZ\nwZYtW1BdXY1rr70WX/3qV7GwoGWVmDrSJp5ENAjI8TECAlmAqD2VW0h2Y5pqeQ3+Rp4dowCS0sfy\nMO0AxuWSIFpKnBYZUNcKXVlaisODgxiZmsJFzgV2bVUVfvbJTwKQ8jjPRSLoPX06jly/cuedqCsp\nwTK55Em504lCux1j09MxJU2Ia6f/zBm47Pa42peVLhc2rViBUCSC+w4ejCFibB7/63Ofs7RcCV/v\nc83u3cox+fPWyKnPO5M8JecfRGzZ/d/htseuX1laGtNvvQcXiQipOizR43bjGzVHUIwZVV3DxUEu\nfUelGyqayuc3GVHUqjea6EY9XfKcCEzd3S73+SkAF+V1dgDjFh2XjWEcQB3iFdRkc5bqNbW4IZ+x\nZy4bJFiL3KpD8wUk5NL3lEBu4G/+5m/g9Xpx5swZvPbaa+jv78ePfvQjS48hcjwFBATSQrIbUyuU\nIHaMSlUbBVxOJq/E6ZEWreWM9LHw17VVVejw+XBoy5Y4YqhFrn1lZfjT5z+P95VLJjwT8/PoPXUK\n/aOjCkEusdsxdfmyoo6q22Hje/fee3FmelohYjdxRNBszqZ6rHpzwufSjs3OKuSPHW/70BBmLl9G\nbWEhnt24Melx2Vz58B7umn0Yf+3YhdrCQmXcjLiy/rwZCmnOidkQT7UKKiAh3XqaqXx+k+ZNquqN\napEutmwFgKPysrXInErI+syeolcCqJH/rgDwcJrt8w8AtAqhZIpcL27IZ+xjj2yQYC1yqw7NF+Ah\n6pcKRPHmm2/innvugcvlwjXXXINPf/rTePPNN5PvaALqtKKM4IEHHsD1118PAPB4PFi7dq0SW86e\nuIj34r2Z9wy50p9cfX/HD36AkUuXsLyhAV0tLXjtpZcycjzmdmpm/+DAAF4eHITbbsfz/+2/weN2\nJ9ze43Jh2cmTOH/xIrBmDRq9Xhz/7W+l2pcf/CB+8IlPKNsPT0xIDqPvvIOOt99WHEZfHhzE0QsX\ngDVrEBwcxIMOBxbeeQc1N98Me0EB6J13UHD+PB79u7+L6U9XSwuCg4O4+Prr8H/ve3Hz2VVQgLdC\nIeCdd/C+8nKsamxE76lTKHv3XUxdvoypG29E76lTWB8Oo9lux7P33x833u7WVvT19WHmrbeAqioA\nwOjRo+g4d07pv5n5HQ6H0S9bxwZdLoxNT8e8Z8dbdeYMQnJu6w1nzmCb/F3N2nv58GEclc2V7t+x\nA9+87TZ0FRRgOBzGzFtv4Z9uvVXJaezr68ODDgcmC0/hrtkfYGJ0BYqvvw9v33M7goOD2HblCl57\n6aW4/tXdeisObN4cc30WOxzAO+/gxooK7Lj//qTjdbs9cDgexEsvvZYznz+t998DcMnvRzGAB/v6\nUJqF43dnuP0uvx/DAGb6+vBPAIrl9Tf29WGbtEPs9i1dCA4Gse3KNrz20msY9vsl/8++PnQA6JPb\n62ff9/L+JX19+IKB+VP35w4D4ymWj38DgA/6/dgJoKmvD6MALvr9eEg+Xqrz1QWgo68Pfw/Ak+D4\nNwLYobHe7/encf67TffXmvcPApiG3/8sAA8e7HsQ05jGs/5n4YEnI8efwQzgl8jttr5t6EMfWlq6\nMDgYxJUr23L++yGb76VlL8PvPyr/3QHgmznTv6X6PhGCA0EMh4dR7ChGV0uX4XrMVu2/ceNGdHV1\nobm5GRcuXEBPTw++853vaG7b19eH1157DWE5RenEiROGjmFJORWbzXY9gOeIKM7KT5RTERBYPGSq\nUL0VMNs3fvu6khK8vnUr7vjlL3H47Nm4NvTqJGot59tl0OsPv+2q0lKsLC1FscOBifl5pR9Omw2f\nuOYaVLrdODczg8NjYwCkMNp37703qXIUjkRw0+7dGJ2djet/OrUi7zt4UHNOwpEIHujrgw3Ao35/\nXJusHa97Cmsq9qPc5cTE/Jdw+Oy47lwZqZOYrJalVsmR4MAAnjt5EpGFBdxWU4MnczCnMhn8yE55\njiCsqz/Jt1UD4KSqXT+iY6oF8BsADwEoUm27XadPiUq6MNgB/DmAGQCH5WXq+WP9PIaocml0jrWK\naWSzrIy1xTz0YOVVkZsIy7mkouyMUSxG8aSrF8nKqfj3+tE/KpfhqQ+gu9XcL0S6+1+4cAGtra14\n/fXXsbCwgAceeAD/+Z//GbedKKcicFXByFMjAQks7NE75cXp/7sJbW1SbdJcgFnTEn7717duhcft\n1nWbVYf/srDO+StX0OHzxRAdpqwlcq5lOD45CUAKy11WVKSEgh6fmFC2mSdC/+gonHY7jly4oCzf\nu3Ejtg8NJTXS8bjdePueezTDl82En6rnQC8k2uN249mNG/GMThgt229NxX4cHutFz0gPjk/8Xneu\n3r97N67pegb3ntqM0Tl7XHt6/dOaB3WI53A4jNGZGYTm5tB76tSilU5JhkTfUVaX59CDlaGbfFv/\nS6PdYm7bUUiksxsS6eS3fQJh5f1qXFEC+7qgXdKllmt3AUAvgOPyey+A04gNEFSHy5bDeIislruv\nVr9SgRFTnWTFPKz53ctktmxuQJj6GId0TVl1lQtYAXUaQjb3JyJs3LgRgUAA09PTGB8fx4ULF/AP\n//APpvuRCGkTT5vN9r8AvAjgRpvN9iebzfbF9LslICBgBRTSsH8zDve60dMDBDN0vxEMShFxK74x\ngA0/T+5S2tXSojjKqo109LZP5FDLq2I3dXejd2QEgQMHEI5EFAOd3tOnQUAMmelqaUHz8uVoW7Ei\nzrlWnRfpKykBAFycn8fL584BkBTO64qL4SqIfp2W2O0IRSKwc08E733hhRgjnwcS3Ejq5dWZIese\neSw3dXejaudOBA4cUNRDM06yrC/lLkbMG/Gbji/pkkZWXmY8EsGGvXtNjzERijl33YbqastcVrOJ\nbN3mpUtwg5CUzDYATm45s98qhUTwwogliV5I1KYKUi4j34c57pZjHAW4Sd5fi3RtB/A+1bFdkAio\nTT72YWgTYHaUCfnYibLX+HGqt7GqsuPiOsvyyNxjD2scaxOdDYHMwNr6pQLpoaulC4H6AA5sPmA6\nTDbd/cfHx/G73/0OX/nKV+B0OlFVVYUHHngA+/fvN92PRLAk1DbhAUSorYDAoqOtDejpARobU6+d\nmAx+P9DfD+Dv9gJrjIXQZiIUWB06G6ivR+/IiFLOo8PnwzMbNxrqi3rZpbk5qQ6lw4FLly/HtbG8\nuBiRhQWcl8mcDZLpUbHdjrfuvhsNTz+dtB+AfkitVvip2bnwuFzoPn5ccfA1Ou/hSBjBwSB2NO1I\n+INW89hjGI9ElDH7ysqStm0UycKC9TAwEEQ4PAyHoxgtLV15bT5kNFgy1dBNrXDVlQDOQCKdHkjl\nRdjVz0JZ2fFOIxoKC0gOrsxMx4tXcR63xhyvBhINqgHwKwARALchNqQWkEinG8Ckqr+VAKoBnIMU\njgsALM7ADomo8v1Uw4/Mhz63oQ096EEjGhe5rmPmAnr98KNfnskAAuhOaSb9yE4guoDA4iBZqO1i\ngohQV1eHr33ta/j617+OyclJfPGLX0RJSQmeeOKJmG1FqK2AgEBCdHUBgUB6pDOZSnb8EwPA3+2F\nfaW2S6kWMlEjkFfF1lZVYUdTE26TzXEaqqvxKGeskKwv6mVMYf3YNdco+5VxIbpvBgL4aE2Nso4g\nfcm+1NEBX1mZoX4A+iG1TMXseP75mPOgd2605mI4HFZIZ6XLZXjePW4Pulu7kz5FZeVlrCadUh8S\nhwXrYSmVUzAaLJmqjqEOV22E5CzLlE47oqSzElHdjB3vJNfWhxDr4Po7vA/FGIND9qAuhUQYewDs\nhxSmG0JsSG0DgHa5DTXp9AA4IrdxERLhnOL6toEbA+snr6ndD4lgA8Ycc1PV46xwlrWmFmXm1C1r\nHGuzFYguICCghs1mw89//nM899xz8Hq9WL16NdxuN77//e9behxBPAXyDiLH0zw8HqC7Oz2lM1l+\noa8xDKwZxUJRBHUlJYbq/tUUFcWFt6aLrpYWdPh8aOdKojzZ2opAfT1euOMOzT7d8YMfYGJ+HrVF\nRXjq9tt1Q3m3f9WNse+0Ajta0bbchw6fD5tXrIBXrgnK9mHlQwDgCoDvHDkCAEn7wZCIkGudB71z\nozUXfM3QI3fdZbk5DysvYzXpTAfZLqeQye+oTN+aHx8YAPbuRdn+/VgRicAN4B3umA3y35WQSJ+6\nFuUE9/4GROtjtgGoQAXKsQyXIT0QvyRv9yFIxI+hDMAnIKmg1QB2Ikp8AWAZAB8kFbQBwLS8vBjA\ny5C0sncBPItoWDNfp5PPV2UE+3qNsfghke4Ncv/fQmoZklbkHQ4Ovqz78MSaMNf0YE3ZFpFvmE2I\neykBNdatW4fBwUGEQiGcO3cOu3btQg33MN0KZKWcioCAQP4jmTpZXhhdb7TY/MnJSZyLRNB7+jSC\ng4OWhNp63O64EFaWT8iDD2f90+Qk3pBdaR8aGlK2Ve83PCyHE8ONVbcUYWVjGMcuXIgxu+lubcXb\n99wT40zL5otXLFkY7fahIQyHwzg+MQFfWRnKnU78uKkJDw0NaYbUJlJmSx0OhCIRhCMR6Vgac8FK\nw+iF65pxzs0XsHIKiVx28wVdsDZYUh266wuHMTI6ikkAhYODOCxf/3UAPgBJiWTOtT5VO92IEs9K\nAI8C6EA0ePJWSOqkGhcADEIiquchKZuD8ra98n6MpJZCIpf3c+0yPA/gZkQDNIMAxgDcB+B38t88\nGJllobor5DGVy+Ngob4j8v8sjzUR6TcSCp2Kt6zdLn0OtR6esBxSqe1gimGuZnoZv46R6/TAFFkB\nAYGlCpHjKSAgYAjJ8gvN5h8CyUtqWA1Gqo5PTmIiEsGEnKdZW1SE0ZmZpP3gc2Xd/8deHB6P5k/y\n+wYHBvBWKITjExP4jRxmy6DOGx2bnjZczgXQnudwJILVu3ZhXA6zTSdfVi/vNhiUiHfnNT18AAAg\nAElEQVRxsRS6nYk8YYHsw4/YrLpL3GfSs3kzet1updDCTZDCYQGJUD4D7ZxQJ4A/QCJxrFhDKaLk\nkUcxJCXxXwE8BmBOXs7yM9cCKEFsvqcbUiQBr4Kytj4CiRz75HZZn1i+NSAppXcPBLEsPIw5RzEe\na+nCpOqBRK081gpIYbyNkNTSh5CY9PuRPEvRyDZqJCpRlJkc0kS9TLRucRBEEMMYRjGK0YUu4Wor\nkJPI5RxPMxA5ngICArow42Cqub/sVnvfZ93Y0ajvQpqKS6le2ZNU+5oMLCR1ZGpKIZ2VLhd+09GR\nsLSHUo7lr/aj/d4IDhyIKrxrq6riSrQMh8M4fPYsRmdm8NDQUExbasWSvTdSzgXQnmeP242PyOEw\n6ebL6inbTO3NpDPyUkC++XKqQ3f5z+STbjcCkJROnnQCURKnzgmtBHAXJEWyDcCPIRFFLdIJSGZC\nHwOwG1HSCURNgV5HfGhWBPGks0DuYz8khfIw16dSxN7stAKoCw9jzWg/PjzSg8/JoavMnKgBkqIb\nAHAU0eBPH5JnSBoJhU4lXNrt9qC1tRtDQ9vjcj2tCXM108vcy8XMHedgAQGBRBDEUyDvIPISzMFM\n7UfN/TNIONQkKt2+JgMjVRUyybMDcNvtuOMHP8CluTnd/Vi/ekdH4PrfBuHxRG/QD23ZgmdUNTr/\nINf1rHA68fC6dTFtsf0+UFmJjuefxzwRfKWluMnjicsxNQOj5WkSkfvgwIBmrisgKZ2ApPbuyI17\nzZzEMID+vr6crJSoJsUsJ7MWkprnAbDd7cZYayvuk889MwziSacTwDhiS600QHK//QCkkFeWC7kG\nElHUw4Lc9kSC9YMAEj5Ch6SAHtFYboNEehmRXQvgZwA+Luf9vudtxM+bdsDBbbMSUZJphGzyMJKl\nmEomI/vd0zLKykztykS9zJ1cTJbf+ibeBKBtbpQLObC5CHEvJbAYEMRTQGCJI13n2GwSjuOTkm+l\nFmEzAz1yxUjf0a1b4XW7pZvemRm8EQolJLtac5iINEcWpFvYi/PzcYon2+/k5KREZk+dwtT8PIbO\nndNUSOPGJivQbW1AmLuH8rjdWFlaisNjYwnHwvdz9a5dMXOkp9QGg8DEBFBbCzz1lAiz5aG+1qys\nn2n1LbLaEXcYkjI4CimEVGsbIKpvARLN8CBaQ9MFYBWAU4iWUuHDW62IW7iCqMKqhwpVP/n+AlF3\n3EOQjIZ+2NKFN+oD+H83H8B5t0dx6m2U2/IjtXNgxDc2HW/Z7BllJepl7tR+ZErnOMZRhzpN1Tdf\n1VBBmAWWIgTxFMg7+BOUoRCIhzqc1fT+cimWD/z3AXQMZC4MFgB8JSUAtAmbGTz32yi5+uLB2HIk\n3a2t8JWVKaGpFU4nsGZNQmJuZA55ctpQXa38rdcmv/1arzfp9gyJFGi+zaKnmzQJKm9ENB6JxJDU\nRGG2hw8Do6PAQw9BgINape8CEPD7U9aCjJZLSQVqUqx+H0S0vEgVgAH5/2lI5kLV8rbjsvMt9u/H\n85EILkAy7lFXtk01k6k0hX0uIlpKhUcIQCEkYjwA4IOQwnp73R78sLVbye30QCKmByApvJk6B6mC\n/e61tHShvj6AzZsP5L1RFo9USRZfxuV1vK6p+lpT6iX9vppFpgmzuJcSWAwIcyEBgTxHtkxf9Exn\nrIQRsyEjrqtV/7wfoetGgPe8aD+5Gc926ZshPbxuna6DrBnwpj8Akhotmd2egTc4Utdl5dvs2OiW\nHXilBwfd3bHbhCIR9J46FTPXauMiNtdvHnVg/HgRSq+fxMdudeDJjUvD7dYKWG2Qxcx4mKGPlR9n\nFl7LzHHY+yJIZIs3CHIglkjy5jzYu1d6CgEA9fWAxd8FyxDvQJsMMf0zCVYahrn0ZvIcLDYGBoII\nh4fhcBSjpaUrZ8irH37FmTeAgGGH3DDCCCKIHdihG2psZJts9NUsMmMaJbCYEOZCgngK5CH6+vrE\nkzoOfj80CYbVyIYDrRFnXCME+Pb2CHqvGcTaN5twaJ87KRnPp2sqHJYeNuzYkfghQyKCCsTPtRah\n5+caBCXRrsPniyvTcrVC65pN53pSk0OjYKGzxyGZ9MwDuA3AckikMlHpDj/iS5PwKIAU7qpg/35g\nZATweoHNmwELvwtskGp4HtZZH9eXNFEHycCInxc1ITdT9sQKaBUyseo7au9eP0ZHpbNdXx9Aa+vi\nO9IC2SFZVjnfZosQahFmK9178+l3b6lAEE8RaisgkPfIVg5muiG7RmDEGddIzuqTj7kRCLcaIp35\nBo9HeriQbFwsRFqLdALGjJ3YXAOIcXex+mcz027GmUQqbs4J20Nq2XMsRHcEkloYglQDczeiYaMP\naOzHh9d+GJIDrRpxRK+lRVI6LSadgHRt6ZFOzb4YAH+jUw5JUQWksU4AWA2JYDKwc7BYIbeZDLfO\nXo6oOWTGmTcWRkJXjYTRZqOvgLZplJnwW5EjKpCLEIqngECew6gClktIJzw4lXqhAsmhpWiHIxHc\n1N2N0ZkZlDmdmJyfx9qqKhzassXSuc9GGPdSBVPH3oTkNFuOqENsFaTcR+bWympv8vAjqnZ2ILY0\nihFUAbhgttMyqgGcT3HfVOCEVOrlT5CU4SkAk/K6Onk5j8UKuc3kcRPVAzUKI6pbLtbVNKJUZiuM\nNlWYUVtzfSxXI3Jd8Xz77bfx5S9/Ga+++ipqamrw8MMPo6OjI247EWorICCQV8hUeLCR/M+rCWbm\nQ4/QW50Lq4VMhnFrhS0m3SdPrqMgJGXuovy+DsCvAfwtJOVwHFH10APgPcSPX01yApCUUjuihFUP\ntZAUSLP5mJmGOj+VwQ7JuIjNlwtSWHIxgLcQzfFk14wTQAmAnchunmeq4dbZghFCs5ikR4/0Gsn1\nzPW8SjP5qrk+lqsRuUw8L1++jA984AN48MEH8bWvfQ19fX3YsmULjhw5gtWrV8dsK4inwFWFxcxL\nyJcb0lSQ7tjM7J8s/zBVpKKcBYPAyy/3YflyvyXmTGbmIdG26ZyP4MAAnjt5EudmZhTykMtKohbp\nteqz5kdU0QsAhm5/01Vgs/Ud5Ud0bJUA3kUsUWGkUm2ew4PPZ/wVogpkCRKXErHJLyvzLdOBHUAZ\nJDJ5AMBnEBs+q0YFgF8AuBcSWefnxg/9a4aRmuP4B/hwO8rhQBekEi1mH3CYQS7l4xkhNItJetIh\nvVYbES0G2DXqhBMlKMFO7NQcSy5dU1cLcpl4vvHGG/j4xz+OyclJZdnGjRuxbt06fOtb34rZVuR4\nCghkCVp5cEsF/Nhu/f6gZikOo/snm5tk+YepIpWapcPDwNGj2uVJUsFzJ08q83DL008nzF1MNGda\n64zmQg6HwxjlSGely5VSDddsQStP0qrPWip1NdOtfZstsLExYqn+KHVBIk7vAvhXaNem5PMZRyGZ\nEs0jef1KQmZIZ8I7FhXs3N8FkPo8BuD/AnB9kvYvAvh3AJsAfAxSyPB1ADYA+I28TTmAh1X7sxy7\nERThMBwxNVHVeZmsJusKud1M1GZdDBjJccxkHuTAQBB79/qxf38bIpH4GU2nfIpWXmW+gV2jveiF\nCy7dsXwP3xM5oDmGdP0OrPZLuHLlCt5444202+HhSL6JgEBuIZNP6JLlHubLDalR8ON1/lV0bO4n\nm5RQ2GDQWCismblhBjnpgil7kYUF3Ob14ifNzYbCQXk1zVnRAsBvmTlTZCEaoDg1P68oZ8HBwTjl\nLNGcaa1jZEyvPfW+AOBxuXDkrrvyTp3XmxuzSmgXzIctdrW0pJVHbPV3lDpcmKlrTki1J3dCe2yM\nVAJRYgQAtwJYKbdXA4l0vmlpj1PH5weCWBYexpyjGP+zpQszCfIQ+VBgngTbECXlgERK/wySey1T\ndB2QSOXHIBFuQMptPc3tNyGvfxvR+WWkphxOTCD6QOM+eT3/gIOf8xH5fxYebRaZ+N1LJQwdiJKz\ndLdJFeHwsOLMOzgYjHPm7UJXTqmWeqG/mcqDNUq8L/kvKcpwEEGRA5oDMPobn4n916xZg2XLluHh\nhx/G3/7t3+LQoUMYGBjAJz/5SVN9SAaheAoIcBgelnIP9dQvI86uwSBMqYVmt7cS/HhLdkXHVu6U\nxmaUjAWDwMT3W1B7qh5Pbcic660aTNkLzc2h9/RpPDQ0ZMhhlFfTSv9y0FL19baaGgBAQ3U1Gqqr\nAeiT8a6WFqwqK4Pbbsd9Bw/GPKFUX2vBIHDsdxIZa6hMTO67WlrQ7vOhw+fDe/feC19ZWfoDyzL0\nPmtmldBUXGKtdqpNB0FIxJJ3pmWEphdSaKne2Jji1gbgD/Iy5urK2ntc/nscUrhtJcypjkk7zzpg\n8LttWXgYa0b78eGRHnxhsNPwodgcVAM4B0m1vQ4SOW+CZKr0UW77ywAeAsBrAuxxTTm3bBSxzrJM\nyTuGDyGAqPkPU5d5MyBGfp1cu2oFdTFh1j03V1xSkznz5ppqqedEa8ah1gyMqs3pKMMCmUG64kY6\n+zudTjz77LPYt28frr32Wnz/+9/H3Xffjbq6OtP9SAgiyuhLOoSAgHU4dOhQxtretIkIIGpsJAqF\nUmujuVlqAyAKBKzf3gqsWUNUUUHkdMaOt7NT6k9rK1FHh/E5WIwxEBFt2reP8MgjhEceobVPPkmh\n2VlT+zU+/TSFZmctvaZCs7MUOHCAQrOzMX/roXnPHmUMgQMH9LdrJkLRLKHzALXfa2yci4nO/n5q\n3rOHNu3bZ/i8GIH63OUirLyemin2R7WDiDbJfzcSEfuIdsrbbuKW8fs6ub9dlPzHu8DANklfzdHv\nBQSM7fOVfZvokUdA//vTjVQ0GzJ8LJc8xnKd9W3yvNSq5q6Vm5/b5PVHNbZLBSEiChDROq4fqX49\nGr2mtK4DPWhdR4nQTM0E+V8g5ZGkhk7qpFqqpUqqpE2zzbTvQAfNzsb3upM6qZmaaRNtolDKZ858\n3xIdcxNtIhCokRpj1ustzxaeO/QcBSiwKMe+WpGMExm5Z8jk/mp8/OMfpx07dsQt1xuHvDwxL0y2\nQbovQTwFzIARn02b9ElPJolnKCQRp1RJJ5F58mp2eyNzlAwVFdEbwsLCaDupEkgrCHsqCM3OUscv\nf0ntv/xl0i9angidmJiI+XLe/G//lhGSpHX8uieeoPXPPKMcyyiRWqw5ThXJCHWqxNTqH9ZMwMrv\nKEYOQEQfJokgMELDXwbN3HYBjX3NEMsGIjpBREXcstVEVJVgH82XfM2ikQghY/sUzYao80DAFOks\n0VhWqnrvlOfjBDd3nUS0jOIJK1uvnmMz6O/vpD17mmnfvk30GXks6ZBYo9dUMxknuUbG2N/ZSXua\nm2nfpk30mVBr1oiSmszxpDcR8V0McpzsmCEKaRI8veXZQibvpQS0keuc6NixYzQzM0NTU1P08MMP\nU319Pc3NzcVtlw7xFK62AjmFTJXZyCbM1tU0u70Vc1RTA4yPS7mdb70F+GRLx1TdZtOtJZpOXU+j\nSORUmo06kvwxGAL19djR1GQonzDf6rUmK5EiancaQxjAFyGZ+eyEflitVu3HMICbIIWLNgA4Bcl8\npxGSMc+QTltVANZBqs/JtukA8AqiuYqGO5+huiCrAcwCmIY0N6yWaDGkkik3IZpfyWMVovmtE4iW\nm2EwUzszUY7k3r1+JQ+xrj6Ana3dWSmPYnUN0L1+P0blH5y6QAd2djtjciczlaeodqa9hEvoQQ8A\noAENeAEv5IybrihbImAUuexqCwDbt2/HT37yE8zPz+PP/uzP8MMf/hD19fVx26XjaivMhQRyCsVy\nUoxVRi9WwwhBSmaco9WGGfJoxRy98gqwYQPw619HSScg9UeL3AwEgwgPD8NRXIyWri645ZV682GW\nSLJcU7ZvJh44mDXyydTxK5xOXJyfV47F8gmTwSpDJjNIp6RJMoOepWbUlQqMmLt4IOUnJgMzUSqC\nRBKZcdD75PXPQCohwnggb4YzD+Ao19YFSOSlVn7vALBvIIgr4WHAUQy0dAEq058KROtjxnQ+Q9fs\nJIA1iCeX0/LrEwC8kHJXSwFcgjRWN7dPLbffhwHUQyL3Rkuj6Bk2dSE2D7GlaQfazA8xJfOfVMy0\nEsEh/+B4GxvRsuNRtKlaZXmKUn9TM6jRIq9a+YcP4AHYYMOjeDSG3PH7/xg/xkN4KGvGQgMDQXwp\nPIENjlp8qeUpeFSfi1SJeaL9MkX2BQS++93v4rvf/W5mD5JMEk33hRyXlQVyC0ZCXQ3nuqQZkqre\nv7MzNkQ11VxGo+Gsev1PNkdWhOKqsae5mR4B6BGADnCd5seyalX0uOvXm5unbISRbnvhBfLu3Emt\nv/hFXJjmc88/n/HwTRYiqg7zzWUYzT9NBfkQMpsqMhEWaRR8m16N9jtJCqG1ycvXUzTPkX9VkhSW\nWsOW7WkmPALpdSCQ2RsHAy87Nwb1y8uNq4Niw2v5vMYT8vp2Sh62rEanPEcgKTR5vWqf2dkQHTgQ\n0MxD5NtoJv18TL4fzYsUFjkbCtGBQIBmdb6YrchT1ApVNROGyu9fS7VZDV3ds6eZHnkE9MgjoP9x\nYFVcrmeyMFy9/NBE+1kVTixCbbOPpcKJ9MYBA6G2QvEUyClYqeqkq6Kp9x8bAy7Kj/QrK1NXG5Mp\nlkwtPHYMCIXi+59sjsyM26iixT/1buI6zY/F7Y4et7Y28RjV0FNarcTJyUmMRyLoPXUqzma81OVC\nd4YLafPKZr6ElWZSlTSq9OYjvnf0KL45MZH0c5WsxqhRxYvfjjmoNsrb96rafwLADLfvYQBc0IOC\nmyGpmI2Q1E/ICh68jYCGk2i2saCxzAWgFZLyykJonQBehhRiex+AH0Nys2WKoJaabKT26zCk8iuA\npHTOqfZxuz1xZT602mCKabTMSvRsFmMPACcaAfx9wpYyB7fHg9YEPyJWlC7RUjfNlGMpVs4YMIpR\n3IpbsRIrs6II8sr2k03uOPVXzzmWqZYv4SXMyVfPA3gAz+LZmDFpOc7mixutUGYFNJGMmab7whJh\n9wL5h3RVNPX+7H1lJdGJE6n3S0ux5FVKXi1Mpf9mxm1U0dJ76s2PhT/uiRPpmzRZjXxwQ801LGVV\nMpMw+rlKZu7STMYUUX67Dq5NdfudFP8jXUX6TrBs33YiqpwNSUqnCdMfvZfX5PZOInJTcqfdFfJc\nuIkI/Z2SSrtvU0yfjehDRkx31I6wqZgR8W1sI6Z+vkQhqiAiUIjuT8vgKF9gVN3UUgc7qZPW03py\nkUtRXtfTeksUQSPglW0t9VdvbGqzJBCogzqU9Wy/bbQtbsyLbUpkFEaU2cVwIV5MLBVOpDcOCFdb\ngaWIRKGk/Lp0yY+aIOqFuFoR2sqHrNbWSv83NBC1t5tv04wzr5VkzApH4EziaiNRmQi5FjAGs58r\nPYdfo+Uu1NutIaIKkgge/4ysmWJ/oGsonnR6KEoO11M0DJQd40OUfqmVExRbYiTRS8uxFiSF2fLr\nKik23JUPDXbIocGJ5tFMGRKi9F1v1W00c30P0C6d3prtZXaRaRKhRWT4ZXVURyHSJoCZ6iPf3gk6\noUsI1cdlfSyjMgKBGqhBcz9+fF7y5hVBMxKGfbWR06XCiQTxFFhSSHbDfMsth3TzB9X5k9m4+bai\nhuViqYWMjG37q1nNedKbv6VGapZirsti1Va1ApmqAZotJMoZ1qy3qaOQbiOJALZSYpqhJkGSXia9\n3BRV09RKo149z+UUS+K88rIquS/JSKNe7iXkNowS1zqK5p8yglxBUk1Ovn9OksgsI8d2IknpfARU\n8XQjHZ0NJSWJzVx7i/FxiT48mKcQ3U/q3krfUc20uL1MjEyVMmHEw0veOCJjRmXMRB+NtqfejvUx\nEVnlx1dKpaYJWjLCls7vnhEyaESZtYqc5guWCicSxFNgSSHZDfNHP3pIN5RUHWaq15aVxKmuTmq/\noiL1ENxU1EIrSaHePJldnq/IBvHMNplKJ9Q8o301INpk0tQoG0h0PTVTPHXQU0i1ttWCekrVBJN/\nz0hhMRHVkvYPdztFiZC6HiYS7Gflq5QkMrmN/oZq6AVqpm3UQRHlkmH9YyZIJI9dMUOaDdHyAwEK\nceY+iS49I+pyJvXGZAqqdE0Z1cAlZFspStVoKFk/tVRNBrNhp6yPXvLSelqf9twYHXOqc8PG10rJ\na6iqCRr/3k1uqqRKaqVWZX/+e8rstVJLtUrbfIiwWVhFTvMFS4UTCeIpsGTQ2SnlULJQU60b5lBI\nclBdvz654yu7+fZ6Y7dXh7amQz7NOrjyY02H/FpJCvVIitnlVoLNT12d9rnOZWidW6vJVDJyyH8W\nzBLJjBK/ZkrKpqwKAefHrafqZxta1EEvDNwozeCJYDtJRMxNRI90Er3STHRwE1FFSFILB0lSEk+Q\nKjRVfn2AYnMW1Y635RR1ia3Q2N/Kl5eIKuklAlUoN9OM/LUSkY+IlpFEPpkqzBNmtVLczLXNu/yy\n9tqTzLPW/gzZCYI1F+CbbaUo1dxDvX4mUjrT7WO6eaCsb63USh3Uodsvre06qZNqqTaOCKr30cvr\n1COJalLN5o1XS/XGq3UOEpHRSqpUtm+ndtPzZwb5ktNqBJDKDi+Jl974SBBPgXwCT5raE3yXGSVX\n7OZbTQ4ZcbJCtUuVhKWrGlpJCvUUV7PL04GarPHzk+ghQS6G/WqdW6vNjcyQQ7NEku/rthdesEb9\nZHfmTH5LwKasysflx13zjQM5odKboQ5Gt2VlPUCkaA8hInqjObpiVyCeMG2i+B/t5Rp94NcvI+lU\nGlU97RQlgmZuHopj3u9SSAc3pDhll82VVhkZfrz8pdess60WEj0I4NtZRbmRiWm1UpQpBVWvn4mU\nTjN91iJ56c5NqiG26mVa+ydrW289I2jLaJmyfjktV9RSEGgtrY0ZbyJyn6gfrM0SKtEkzwJXJwTx\nFMg7GCFNhw4dMk2u1NuHQlETn3RVu1RJGOtTaSlRa6t1JkK5bvKjBzVZY/NTXp74IYEVYb9Wh9pq\nXZ9Wmht19vdT5aOPEh55hNY++WTSNs2SXr6vlqmfzRT9ZaijrNyR8+Nu/cxsxlV6hmznDC8naVqd\nJOVfKoRHZkpHGiXFk6mVRBJ5XE8SkXRSlOydoHj1jpFHkJRf2UzxP/YOjWXLKaqOnlCts9EsNdM2\naqMILdPYl4XMNtA8tdP9tI1m455b8GosPzY9gqhF5M0Er4ZIIpW86ZJWO+qanlYglWvKaqUoEwoq\nc6WtpVo6EWOFZX2NUL7f6c5Nsr5pETrmUMuW8USQJ/VaYbXJ1vNQq5HbaBtVUzUto2Uxc3zo0KGE\n5D7RGEMUihnHKoqvYZoqlpKZ0NUGQTwF8g4x4YE6StahQ4dMkyut7a3Mq0wFoZAUAsyTJr79bdsy\nq+Rl+lhac5Vo/rQeDgQCUt5soocEVoT9Wk0UMkn+OzuJKr4VJYMdv/xl8v6kQXotU2rNpafpwkzY\nMD/ubD6QySTx1Arp1AqZDRApTGtjKJ4INXPbekgKzT2qsS7AvS8hiey1EsWUKymYDSnmP4ykrqX4\n0xxLTs8RaB+10/0UIkmpdXLr2yiWJPJ9Ys8tQnK/2XIWJJOuqpzoxpfvRy23H9+O3qXe2d9JzXua\nadO+TTG5p0aQCwZomVBQK+Qwai0yawVxZn0G6TvHpoJkfdMidPyy5bSc2qldU11cSSuphmpilER+\nfTu1Jzw2I6aM1Oo9MDh06FBScmnE+MjqEjZLyUzoaoMgngJ5DSuULKthdZ/UxkR8+2pSajX4Yzkc\n1h9La64SzV8iYpDquqWI5mYifEUig5XfzXxNUsuUWjNsIAHy3XgoHXRSbF6lYk4kv2fr1IRHTYQ6\nKRqey5ckYURKnSd5gmKdaJ1EMeVKquRyJSBJqewg7dMcDc+9rGzfQRFlPVMwtUirHpnTCjNOF4lu\nfNXhyTz5ZNC71Gu5Oes4kCM/aiaQSQW1kipTbncNraEKqiA3uWkdrYvLjWyn9pg8zGwoalqETs/Y\nqJM6FZWygRpiSFwN1VAd1ZGHPIbIs5aCbIRcbqNtVERFZCc7VVN1nPqsFbLMXw9WPpRYSmZCVxsE\n8RTIa2TDwMYsrO6TXu5pY6MUfssfy+pcRj7Ul/WhslK77TVrJHLs9Rp37tWaq1w8p/mGTZuIUDRL\nldsP0Imz+VdqJF1YnSubVZhwoNEsu0LRH9dKbjkLAV1HEhFSf0TVRIjPz1TnSbL9a7hlAYoNtwWR\nUq6k8ulGWj4bilmnNu5hY1Arsw00HzMNfD/V++qRueX0JoGICmiKmmnO8G2qun3+fSttTXCjHp/f\napRCVspzhqcbqd2k4rkUwQhGJVXS5+hzKZNBXjXlCZtWW2qVdRWtSmj0YwTJzIDYNowQrqN1MQ82\n1GqmVgkVfr3WMdl7PszWTFixOiS5juoSrs+EOp2JtgSyC0E8BfIaekqWkZAjK0iaVhtWq2t64aXq\nv4ni1dB0CShzB/Z4KEZ15cHmgFdE6+o0m9Ns34rwZiuRKHx7sZAwdFSDfSz2HC42rMyVzRQObT6k\nTTCbyTBb0dpUq4SI2aY7KT5nU4tIsWN5SSKMcbU5Z0PkPBCgdbMh8nDLeUKs7lOd/HeZfNxEl7DR\n8Syj/QSKxGxrhN+r2+ffd1Ak4Y0vTz55FTnZMVtnQ4QDAVo7a/6WOhdCba0AT5j4GpbphFeyXMMC\nKogjbImMeyqpMkZdNHJsLZKpV1qE35Y/Dtte7T7LHnSoS6gwoszWa4Uoq4lhIzXSalpNFVRBXvIq\nCibfp+cOPaf0lQ9JLqIi3XxbPoTX7DwZhcjxzF8I4imwqMiE22hnJ9EttxxK2GZnp0Si1CGd6v4k\n6x/LKwSIOkzGcBkdu1ESwZeZKSmJH1uq4Mms1hj59QBRcXHqtUpzAXqhvmkV0k6z5mXC0NFmMi+r\n5BhSmZ9s1zxNB1p9PXTLIe3zZiLPVbPsCulHKxttupmiXfNQVE1UEyl2LK380ZGHiMoAACAASURB\nVOUk5VOq19nl9tnx1X0yYrpjwvyYiIgq6Sg3ngUKkf7HJlbVjG3fbAqy+lzoHTPRPmawqA/HEhAB\nsyRBTTD1XFXNtHuCTlAd1dFROhpD2LQUa15lPUEnEuaA8n1gxkBaiqLazIft5yKXsryQChUSyfrJ\nk1E3uWPIHq+Qsu218j0d5IgZRwM1KLmjPDllCmYM8T4UDW8OUYjaqI2W0/I40snWd1AHraSVhuqf\npvMgQeR45i8E8RRYVGQiR9NIm+rcRUaU1PtqtcUTRp68Jirtkmo/9aBZA5Jrb9kySilcVasupjqc\nVw2myN58M9Hy5eZIZy6WOclEqG+6OYcJQ0ctMuRZTKQyP2b3yU4NRW1o9tWMraoOkm1qNBRVDS3V\nlDncaoXpMpWSKZ5a7rFriaiaoj/8Xnk/deivVq4pPwaiWALnovhanGq00pxCOvWOw8Aru2rzonRI\nYaJj5iPU5yURETBLEtT5e1omPKm0yyNRqCaf09hMUn3NNmrTrMXJ96GGapS/Wf9ZG1VURUwJ3Ebb\nNEN/WY4mPx6e9IKiYb8ucpGNbMpyJzljyGAxFcfsx8ZaSqVUTuVKrquTnAQCFVOxEsrMO9GCQD7y\npfXggEj74YNenqaRBwp8qLEo1ZJfEMRTwDDSJQla+6dyk5+sH0ba1KvRqS5fokW6tAje2rXax0rk\nCsv306xjrBZp5dv73OeIamrMl2BRq5eMUCdSXNMJ68zEg4d0kYkw1XRzDhOGjqZ7N5wDSGV+zO7T\nTNEfHe1LLQE1TZO1avY1C+etmZKNWRtaXUvUlpbi2UHxZJU3JFJvz9pMpBJqGRsZGZ/WeEJEVEpn\nqZyOkZdephMUJqJYIyKTzxKTIh8/qnqXfjPFzn0isxezRjBqUqi3v5F2UwnJTJQLqQbfB94p1kc+\nWk/rY9oopuK4ZexfBVVoqrAhCilht2pnWPU/plh2UqcSUsz+HZX9qBnR5P8xJZUnjDypraZq5W+9\nkijJSrlokVE98q+neKvzY3mCLFTP/IEgngKGkS5J0NrfTBgpI2Zqsx01QiGi5uZDScNXtcpvhEJE\nbne0/ba2+NItzEm2sVFS99T95/vKiClAVF0d22+WP7l+fTRE1ujcataA5OaSn+tVq4yTWtYuU3L1\nyLtVSmU+GQmlE8aWDzmHhpEB6TCV+TG7T3K1qZl0aUyCVUawbXaWag4coNbZWeXY/PVk1ZSq27FS\nYePb2qY6DlM8+Vc7xU8bI16tqm35nE+94yZqJ9XxVdARpd06OkxEiV1zGbKlnps9jjWhtrFHbSbt\nS199bRlREFNVpfT2N2uIo2cmxKBFOJMRW74PvFKqVjS1SCMjdw5y0FE6SttoG3nJG6fgrabV5CAH\nVVN1TK6o+l8zNccpxOyfi1xxKij710ZtRBRLolkb7zv0vhgiqVcShe9XG7XFnRczDx8SKd78MYWz\nbX5CEE8Bw0iXJKSzP0+kEtVrZND7AeYJkxZpJIolgcuWRUknH1ZbV6d/bC3VUC/8Vb0tv05PLd22\nTSKrtbXaYa18rmdDQzxRT0Qa+bqYeg8E9PJj9eY5lfzVXAzBXSrGHWmjmdIiYYuF5GpTApqWJoNr\npvgp468nrfWpQN1OojEnIzVsfR1JqmUrSWQypHEcXvF8pJPot81Ec5uItoa0yeoJig1p1TJCYghR\nfG4pwzaSnHWThdrqwUsvE4iomN5QFE8jqmQzZecjYPY41nxHxR7VgojwjCKZoqnl/qqnjqkJG58L\naaYP6vzKNmqjEIWojuoIBCqjMmqjthjn2lW0Ks4MiLVrJ7uynFcs+fxQfj92HPZPrX6q/7Gc0/W0\nngqpkNbROmqlVmqndnru0HMxhFhLzeykTnKQI649fk7MPHwwqngLZ9v8hCCeAoaRbghiOvvzpJWR\nIrPhqUTGVFsWXquX66lXTkTdV74EidNJtG5dPFlk27rdRHa7pIqy9bxxEa+WJqvdyfe1vT2e8GvN\ngVZupxFizeZCTRQzoY4L5AgskNE6+zupeU8zbdq3iUI5UzIiwa10mnfZyaYs1SlNR+FspsSkhl+v\n3k59HPa+gYhe53aMBKLTpj5eiIgc3LI6jfEw6E1/sjEkwwkKUx0dVkinUWjNs7UqqNTaJpkYZzcn\nNHZ0uUIw9ZAsz1Pt/qquj8mDN99hobJGQnTVfVDnZTrIQV7y0m10WwyBZCGsaiXRTnZqpVbNsFpG\nQhuoQXH8VZNBfj8b2RQFlyew7F8Jlegei82nlprJclfVbrwVVJFQpUwFIQrRKlpF62k91VGd4fMi\nkJsQxFPAFLKlRKmJUGur5Kj6/7P39tFtnfed55cEQIgvIgG+GaYp03QiK87YLhmxcRLGBVpT9ZB2\nQ9QTbhRvDtOzO+DO+GS3ezqxN+2cnHZ3JzOd05w5090507VmWuXNTCNbtWVFVhwqAWlVSezaieg0\nTc02Cd3IDi1LASVLFqm33/7x4Ln3dx889w24AEHpfnFwSAD3Pm/3Eryf+3vjffqBE9VNtrvbHrCm\npwUoSoshj8eMREQ7dq61vMRJX5/YJxol2rlTP1a5bXu7+bksRcItrxxg5batraIPdR5yrHKOY2MC\nQCWoc1dhO8ur05rK9pNJfVKmwUFz7F5iX8uN0w21QarkSrR4dZ7+XFrUKnwMNDV37d9ZcFsyr0u6\ng4g6SCTmWSZ/Fk5VXmG4XbOd2o/ltabhHJmxk8PFt3NkgmezzXwqnYNfGMxRjlL0DCVpkcYc6n3q\n1pmPfZD+xndcoVWitQJ10BQdq/Hldb2jplVO7pa6six2rqJEVgsaB6cttMUWdnKUM8Cui7polEap\nj/oMCyCPlYxTvATu+qiPClSgVmot+ayXeg04VD/roi4jk67MbCuTC6ngK/tZpMWShETy92ZqJgnJ\ncj0lXPJ9pFsuXx8JprzWqpxrO7VrM+C6ycmKHBTQhtoYheAZypdqZYnSgZBal9IJTqTLkQQcDnES\nZnXzUN1IJyfFe6OjJoyqlkL+Pi83wvuQ2wwNEW3fLl5HoybEShjkpUhUy6vbU42b5fGl2ax1TVVX\nYTXZkpNVV2e55seCz7urSw+XbueRrg+nRE1uCuKGSehqWyrfJU3SRASi8U+NEx4DjewfKbF4Vlom\npT6tqaUq53zqIPMfZz85g5cbdLnhxTQJwE2TSBTk2bKnaTjNxj2peW+i+F6l5UqsylGaxXB6+Xcl\nLm7zYp+FHHX7OI94wqMOepFQdJss7+K4PBt4kN9R1aiTWI02ndwtdfGdTmVUuHTwJsFMxmCqtTJ5\nEh75kG6ujdRIR+loSYxmL/XSNE1r3WFvpBstLqx8DLo+4xSnJmqidmov2UeCqlyTIRqiPuoz4JBb\nY2Xm4DSlCXlrO5PFv2AO/Ha1Vvm4kpT0lH3WLrGT7E/OLcxmu3kVgmcoX1KtadWyfKpJbrjbqkyW\no7OCSt1/f74EODmkcndYbjXk0NTQIPrnYOlmKZQxoXwO0aj5+cSEFWwleO3eTdTUZLWmTk+XwmUk\nYl0Xaf3UwTefu87lVo13la693JLpVRwUda7GKlw6jcWLi29PjzO4Ou1b7g2TEDxL5VbSpAQii9fT\nhQ8VaOrQlPaCvtLSM+kD6U1hTS3nfOomAUQNB9KUPjROy2sFW/BKk/lPtpv8u4Dy/acc3ucgqiYd\nktJhlO69YG1taRqnQ8U+/sGjFXicUNyn9cCvOZ5HulI1PcQvbv6ygqQn/lciRzn6lfyvBAZ1Xlwl\n/VqUy3W/LBdYdfGdWcqWuIryWEVuIdVlgOXj5/ORYMXhaIRGLEAnrZLSkikfavkS+eD9N1ADdVAH\nbaEtFkiVbekAVffopE5KUYp2027LPvL3IRoy1qOf+i3geQfdYXymAr9aa5UDorpuXs8RuYbcqrtI\ni2E2202uEDxDeZIEA+m26ZZZtlKpSW54WRPed0+PsN7dcIMAJlk+RAXCoSErpHIrI39K+JKApz77\n+pwthdzKJ8eeSJifxWJWILzrLrJk2AWIBgZKrbT8GY8TLS7qkwBxF2UJpXfcYXUB1kFzLCZeT06W\ntuX35oLsx67+p7Qg83hXL2DIYdWttqjTvqHrbnByK2lSApEerqcrLT0zfsjemrrZtUxETQyse+am\ntBf93LW1lfQA6SY7m5v6fpq1H7PpS3fYvaCVDmy8w8540VX1m1TwGMNZoAJN0icpS+s05nIepal0\nrmas6yWapE/W1BrjDRQFHHiJk/OSMTRNzueWCozlZiH1A6y8z920m3qox6ih6VbeQ3Ufla9VeBqm\nYct8JHgu0iIN0iDdTXcbkKmrwzlKo0ZioBEasWSb3UpbCSRcX50y2Eq42027tS68bg872OU1O3ny\nI7kOHdRB3dRNy7RsWWvuwtxCLcYa8DWV6+YkHmcrEzvZxdCGVs/NqRA8Q3mSCga1uJC3y0Db3+8M\nh3x8w8MmTKkgpSsdooIuf27daoISB/GJiVKrKAcoO5fZjg4TlDlk8kRCdk/pdjw9LQBOQjd3r5XP\naNRaz1ONd9WNWXfM/Wp6WvTR12e9MaC7aeHlfOLg7DdRVTVqc4Yimv72v6KeL/wBjR38iPbivByI\nrLT0TGGtQFNzemvqtSAJ1m37RwhrBeIX/RLKeC3KXtIDpJq11mtCH/m+tG52s77k064vvwCZZm3K\nOaYWcoQDacKhcZp0PMaV2U/dzqPqW2z9yRsomhfwbiBX6sJaesRM9+K/ozH6qOF+qoMR2ZbqFuvF\nmsnnprNU2s2Rw5WsoamDYNmmCmO91EtZytIyLVOWsrSNtlEXddEYjRlWOL59IzVa3FwlfPI6nFto\ni+Xz7bS95Jg0UAMdpaOONTtBoAQlSkq/RClqicm0e/BtZNKhDuqwwCa3uLZRm8XS2kiNlmRFco7d\n1G1Zg17qpQmaoCxlPQGi7hxRz+0CFaiHelzPYT83Wpz2DxMZBasQPEN5kgoGlV7I+3Wt5ODDwYW7\nmwIi4c7YGNFXv5ovGZ/anlPpkELB6iKrjkNtSyYS4k/pNlsoEDU22kMkB92hIevvW7aY20nQ5i6s\n3OVUQqZTP+rYec1SmUjJ7pjbaccOAdHd3VYXXdXqLJ929VPrHQyr4mpbq4KAVZKbW+umqF/q9RgE\nfKzKPZ8kEI0VoZODT5pKAXCZ3DPCqnDnRXz/ePHnMJklV/hy8XIrU5r97frVwV2SnXPZDXSl3kjI\n1Gmapqkj3+FoAaosTi5N6hET7sXfJh7PqloNdTDsBKc62SX90W3PIcWp/iQvEaJmgVVBTq4Rt0Dq\n4jl1D2nhbKZmCyzJ9VHrfcpHH/UZc0lSsiRuUyYDUh8TNKGN8XR6SIuwtt186fbqGsmkQj3Uo3VP\n5sfJDuacIM8LjOrPWO83Wtz2D116g1MInqE8iYNBEIla/LpW6oCoq0s802lhdeSWwnQ6b2yfywnY\nkVCmZlq1m48EwK1bS8fB4xjHxkSpFCfLJM9qC5ggOjJC9O53C3huahIutHKtp6etUN3dLdyFJeTy\nDLfqUwXdri4zjlXOq61NzHvbNvH52Jg1aY9TLU8utb6pepz4c3jYe7v1pqqAZ5rKu+rfQPG/l7ED\nG+fWWmkSIkNp8nYMvG7nUZWeTzrwkaA2RNaEQDrJbTuoFO68iEOhDm7TZC6Xrg6nl/Q5qnV1nIjS\nRYvv8DXoSl2J0pQ2IMEN4JZpuYw4Of0RUwHALulMyVjJhC83gLCOwhk4OKTw39X9dGNQXWr5Y4AG\ntO9voS22FsYRGimJ53QCOG5BnKAJCxyrbrcS8Pg+7dRO0zTtyeIpH13UZbS1lbZaYlJBKAHPOMUt\na5egBO2m3bYArbrX2sGcX3dqNZOvDlzlMZdj8+viXa5reChnheB5naoSeKzUBZPI2ZrG3Vh1yYMk\nmHHL5+CgADcJXdLaqI4XEJDqZT6yn927hWWRu6uqMaLSisctjq2t5u+qW/CuXWLMo6PWz+Jxsw8e\n98nHp1p8nZ6plFhDvk82ax27zuXW6diq544uIy+RtSxNY6NYQ79Ji+pVgUFPADUx7RTYGBXxv5fJ\nj2+cW2ulSYgMeT0GVTxWfuRkePVjhZPb6qDRi9z6cgPTAhENkrCG2rn7SqXJvGCYvMZdqcuV34tk\n/xfV+iOuWqOcsszq+raDU/tRuLevQkgHdVAjNRourHZjkBZS3cMteY+M2ZSPPurTutHaPSZowoCv\nO+nOEjjWWVjjFLdAZoYyWiufXZIk1TUYhBLA1Y1TxEJP0gANUC/1auuDykeEIkZMqLruXiyYO2iH\nJa6UyD0+d4qmLHC6SIu+zjE/51oo/wrB8zpVJfDoN75TB7mFggleKvx6HZtdCQ91X9XyFo1a3ULt\n5qMrxdLTY45XxprGYsKimUoJi2U2K+JKeQIcaTWV26sJhXTjdsvIq85Hvnay0Kpt8EQ9/Kkrp6Jb\nD+mq3N9fCpU6V9tqluCpVY1ZogChp4r+euWO0WuN1f3dRJecaKHKqjQJkSGHY2CB9zfX6sK3Mk3m\nP896NpJ7ObXTVHpBoJtTnTB/XcvvRfJGXlTbZUStVkZeDkZbaIt2DPI9p0y2TnCluuumKKUtkWIH\ngKq1NUYxo80RGjFKnzg9uHuu20Pn+ttIjTRKozRBE7SNtlGCEhSnuJHwCCQAWMZMqvGlTo9+6rdd\nd50FU4pbUmUbOkhV3+MAnaUshaofheB5naqS5EB+4/HsQFJ9X016Yzc2nUVUhbFbbskbbqNjY9ZY\nRvlsahJWOZ5hlccrqu6zlpIun12g9v/zAOFThwjNayVgytu99VZzv8ZGAae5nD45UiRC9OCDYtzS\ngtvWJqy0HNp57Ke0Yk5Oip/Ly6VQrx4zNVEPh+CODr1lUgVJt5I68pg4lXwJUkFY4p3EXSMDg54q\nynGMDmYzt3WUSaOOJ8gXAQVtga1F/GhgNxg08utqKw9ZNxE9liN6KU10kR+/HST8ZruJyqjXbtuf\n032FSsNeJVC2kzNYVvH+zDUlr+dUPSRN8Rvnqe7j1aJaoILFKigtnnaSVs8EJShWfIBEmREeCykB\nUwU3O/AqcWH18YhQRBu32U3dBlRL2L2b7naF50ZqpJ2009aKa4nVzcOSEMnro4EajLE1U7MFKFUr\nppObrXQJb6EWow27mwb8PQ7Fk0b1YH/nUajqKATP61S1TOYiLYPt7VagUeGXX/D299uPTXdhXChY\nYzwTibzFCiohTn3ybLRqoh4VVmXG2JERotH95gUpZuZKwJRbILn7bSoloFOXBddprKmUdT343HTW\nSb5GsZjVTVin6Wmxfr299u6w8ngNDYmSLzy+VNZWVa3adsmbpIK0UlY70zK/qNsMSXMcx5gmW2h0\nW0d5bh2S+3s0Q1UT4qqlat5g8AueaTIP2Xf4ix4SBNfO3uvXNlF2f3Yo4GUbJ1Xq7hvKKq/nlO5C\nv9YX417jPMsBVBVCOPQN0mDJPHn2U1kGhGd3lRlxJQQ5xYKqjxjFaJEWPVsivTyaqIkiFHF0//UT\n58kfUYoSj4m9LX9bSVkVdXu1LzU77gRZ45tUK6bOgimPSZrS1Ed9JZZQN8kbCLwuqe7c8xLfHIJq\nsArBM1SJOAzwZDPlXsyrsZh2yWu8goPcTrW4qVZPCXdDQ86gJy2R6ns33mju19oqxj0wIPqMf1pc\nkOIP9hsWTwmYXV1m7c7hYaLOTvG7jIHUuaByi6f8XE0cxK1Pcq5NTcIyK9fAa6kU9XjzBEHcasuP\nPb9ZwefQ1GQdqx9rY5BWys2QGddOtXQTJiJH30XtOjLT1keLrtmZIaJ1tww2vMtNYCVWFeQNhkou\nXnhdziEqWjpBRG1k/idtKv5sIcPiWYlF0ot767M5onya6K/HiVY34d/d9SrdhX6tM3h6jfO0SwIk\nS5o4/U3JvzkJjLrstmofur4SlLB8Zgd1W2mr4b56F91l1KEkElmHvbjx3k63G5ZT7iLcRm10E92k\nrdnpFGPpNOZO6ix5f5EWiciE92ma1pZsAYFaqVW7bk41W3OUM/aXllCdBdPufPT6PerkSu43vrnW\nfxvXukLwvMYUdMZZDjBBJBLS1XCU4+Yur06ySy40OmpNMCQ/y2ZNEFSf0WhpLCWHQPW9hobi781r\nhNycxc1WPrnltbdXuNbyGEhdtlf5HB01gXx5WV96ZMcOMwsuz5Y7NWU9dk6lUqRLcTxutcjyOXML\nsHrs5RySSatLMre+ejkXg7JS1hzcAlY5AF7RnP36LqbJ+MZezxLNDfqP79wMVuJqyrx4eYy66W99\nwWCazH+YWSLz+I2RSYeLJCydy/r9/H59ezlFLlXSQagNU7nlKao9Bp34uHbTbouVz62WIweGOMVp\nmZapn/oNWEtT2gJJEgxjFKOdtNN3vOdO2kljNEZt1Ebt1G6bEMfp0U/9xto8SA9aPnMaj5OFM0KR\nkqyzcYqXWDKTlDSATgKeXRxnX/Eh2weZGWydYjb5OqiWULvjbgekTtZrJzD1G98cZrcNViF4XmMK\nwoKkSzwjy4b4gQopbkFRy5DoMs+qQGpnfbUDWgGfeQMsl5f1CXTk0y7Jj/rkrrR2z85OPeRGIgJI\nl5fF2FU3XgNoYU1gpFqfcjnrfrIdCW7crXlx0Yz7VI8Rt3Cq41ePPYdCp/jaZNK+jqfduejXSml3\n3lU7vpOoSuVUiioHwGsxZ0Oq+StNIXD4lLx4aaOXxNLl856Xztb66EKH1UrKIy2pL1Y7608AtVOD\nLpWbW8hR+kCaxg+NVy2zbrnW8Uq+o+o1g6ddDc8kJS11Op0sWxxOucVTwouEJLs4TJ1lz+sjTnEL\n3PJEPeqjkRrpFrrFiH90cnH1+miiJtt27GC1mZrNmNK8eE+1qk7SpGUtpTtyP/Vb4lHVGwJeIY5b\nXPnfAt/fzXodlHWyXv82NqtC8LzGFIQFSU08wy1fEorsLJde2tZZ0uzGzS+u1f14vUtptRwelsCU\nN7aVgCQBk1tDYzEzGQ+3/KnPeNw6ltZWe0up01PWueT7NjSYbXO4jcfFdmNjRNu3C1jkgAoQ3XST\nsEr39YljwqGXx4WqwKZLtgSIJEb82KtQaBdfq8tQXI2YSzvYqnZ8J1F1wbMcN+FazNmQCjjXcppR\nN1Ipk2TkxcsYXSQQ0W35vOfdp0nkDBrz16Utl1YKY2kSh7+jQHSsmsGZsqMKbnAE0IS1vQNpwmMg\nPAaamqvOXZdyL56r+R1ViYKKkZPQIWFqjMYoS9mSNmV/YzRm1NFU64xKoORutNK9VloH26iNeqlX\na62MUMSSTMjJ4ihrcd5MN9PddHdJ6RW7h86t1u8jRjHP/ekerfnWklIuHdRhWctGatTGmyYpabFE\npihFHdRBvdTrOWZT/Vtwqs3Kz5GNtE7KucqbIyGwWhWC5zWmasS5cSul1apoXvT6sYCqF8xObrY6\n66uM7ezvFz85xE1OijZ5TOfNN5tWuslJqyvs6GhpzCJA1NxcCqLqvioE8ufWraVxlg0NZkZbbnHs\n7RXuqlu2mLGSvAao3bOx0d6FmMOZ2t/UlNU9VkKo6o7r5dhJ2QFpENZML/1v5vjOcrWhc/brqltN\nFSlq4fdzdGB/mg4dGqe1MixREsYW0+RMKm6fu6icpauwS9/tuYFpze47BNCR1ya8wvj4oXHCY6CR\n/SNVs3jWw8VzkAoqmYuEDrckQ7y/ARowwG+apmmURqmXektAkceaLtOyxY13kiZLMtruol1G7GiE\nInSUjlIzNTtCHG/TS6mVXbSrpOSJ7M8NNu0+S1DCyFIr20lS0tbau4t2WWC9gzos2WVV4JTQnqSk\nBS5VeFePm90xd/pb8JLddiPkNtfrXSF4hnKVvMBV3VV55lmvbn86yFT3zeXE5zJpjcy0qovt5E8O\nI3KsQ0PW7WWtTZ45trvbhMTOTgGt6bR1XFu3ijHoINzumc2WWhYnJ52TC3mBWgmd/LVdPOrOnVYw\nlzGYHOCcss7anQvqtkFY33TngQqi1yNghnJRmohAdOD30vTYY6DHHgPNlWGJKjZDh9xIxQPJ6CCm\n2ol+gmwvTc5gWrP7DkpH5bi5eh1rmrzBfWGtQFNzU1WDTqL6uHgmKt9SqR4nGVfZTu2eLF1uoCph\nJE7xklhK/rnqjilBUX1Id1g+bxnbKOMWVbfdSZo0YkaXaZlylNOWPOHxjzImsp3ataDHYbid2mmU\nRn0nE2qmZlqkRUsdS/mIUpSWaZluoBuM9/qoT5tAiEPsIi1SlrJGsiR+XkhraDM10wRNWBJF8WzB\nPMZ0iIZKXGjtjjn/W9gs2WX5uSLPn1CmQvAM5VncXXVkxBpzqVoj7axWOriQYDQ0pLc+qjAr4xgl\nnG3daq1zSUR08GC+JK6Uw58OIKUFlYMjt3BKd9JUyjpGaaWMxcz95fqoVtOJCefkQl6fst22Nqul\n1OnJYzCle+wNN5juvF7Knehg0E9iKCc5ldepegxjUU61Ju+/P1+7BEZBB6ZdyypS1KF/O06PPQba\nv3+kLIunhLFMgWjdiVQ8kEyaSiFGfc+PW2TQoOfWXr16UlfTzbVe5+xHQbvaluvyqx4nr2VQpCSo\ncusaV4EKNEiDFqthP/VbXGylCy6HUB4Tyl1sG6mRuqhLmwhI1oAsUMFw2+2kThqlUQsAuSUPmqRJ\nC/DJtviji7pogiboZrpZC7FOjwZqoDjFLVlpB2jAso1M5sMhM0tZ57HnRdvSZTRHOQtETtCEAd9S\nTomJ+qhPC5perPzViN+shgpUoEma1LqBhwrBM5RP2ZXU6OwU4MFdOHWw4AQX2ayAGLWOZTQqXEol\nHOksnmpf/B/w9LR1295eot27rRldOzsFhMnX0u1UQm4kYoW7m282614++KAA7rEx03o4Pa1P4HPz\nzdbsu8mktV272Evd0642qe7Z11cKS2pSJd3xUuFPB4O8nWw2mHNLPVeCKOvjRbpakxK229rytQPh\nNJWSy/WqHFHu0wuU/twBGj9QekNAUtTamwWam5sqCzpZM74uE+wscDqIHj3k7QAAIABJREFUUd+r\n13g8ovrypOby4+bq995Nvc7Zj4I+p8p1+VWPk992dKCqWrs4hEQoQsu0bHlPlvUoUIHaqM2oNxml\nKKUpTYu0WBL72ERNNE7jtkmLnFx9dXU6pWVStsNBbIImaJImSwC0h3psrY8gZzdaPm8islg9pbWT\niCwW06N0VDt2Dp58rmqCJ102WTWBkwRVuQ47aIcxhjvoDuM4qVZ+bjHldVT9nI+bxUp6PSkEz1Bl\nS2c11JX/4HKCC25RtXtyWJTupmqGXFU6CypPVARYLZuAsIoS6SFXzaLL40mlFVdXN7S93bqfdFWW\n1uLhYQGuXuAzEjH3c4sLvfNO/dpwF9xEQr+Nenx0LrUcgCfss6P7lt1NjmpCn67WpHr+1CSZz7Vg\nfglKaaL075XeEKgHqZYdCaJjh8Zpcq1Aa4x+VgtWsAmN2v7lx801TaX3bsKLUH+yc3N0q5+pHiev\n7pK6ups6i+IgDRpwFqUoLdKixT1WhUK1lIgENOn6CrK6uU7SpKOrswS1buo2Mrn2UE8JFMYoRgM0\nYFhH5RyGadhYwz7qc6yLyduyi/lUH73UWwK6EmrHabwkvvVBerAkVvNd9C7L6wQlLC65shyNnAfv\nSwJvK7VSL/XSIi1a1pMfjz7qsz3/dJZYWW7GqzaLlfR6UgieoRzllPBFjf30Gy8o2+AZUe3KfKiA\nq3uqQCLHLuM3pWtuR4cVJoaHhUVQvr7rLnP80uKpWg7V9zmQ2MVwdnaaUBmJiO102WNVS2ZLS2lb\nN91kurcuLpbG4N5xh4BAmWxJJ7l9ImG6yMr4Wul+qx5PXYwlT3DELZ5uyYL8fK4r7eKkcmtc6mpN\nStgeGtKXpqmKrgXzi07l0NY40finijcE9u33Vwe0ynSnWnZKXEHTVEo/RTl8VKvhbwqpoOI5CRCV\n3rtxvwgNV9xOfO14rKTfi3m7Y6C6cKqAYRe3mRWVbUsghceT2sV28mytchsJk17qQKqWPd2Dx2hO\n0IS2NIx8SCBspEZby6Yue+xtdFvJ9iKD9pjxuo3aXMfKH1nKGvsnKFFiUZYPtV+ZpImveYpSlpsV\nMlFTC7U4xvzKY65aTP3oWkvUdS0oBM9QjvJiaao04Qvvo7dXD209Pc5JeWS9Tql8Pm/ZXiYq4hbN\nyUmigQERI8nb2rLFBGHuOsz7UPeRGWbHxqwQK2HXzhoZi5mlUCQkqZlqJyas69LWZh3X4CAZWXud\nYFOFMbdyKV6ti9xia9eWHCMHQbdzS3XD9nOOBWkhlet08GC+soY2mcqFd0elyZ22VBWICh9fo6lD\nc/6gs9z+fEi17JS4gjpYrt+fz9NjOaKX0kQXbTinysPfFFJBJU3e1kR378b9ItRr6/Upe1fbyoHa\nLlYyKBDgbqEJSpS067WMBgcsCbbLtEx91Ee7aJelvAqfx27aTXGKW9xQdVDNb4RIiynfp5VaDRhT\nY0kbqIESlKCx4oNDle7RT/1a0OSPCEVojMYsfycyoQ2PNZT9eQHQERoxMgAn82Z2WhUE+WOYhi3J\nh1RrKwfRbbSNGqiBOqjDsdyIPOYyYZGbpd2pjRA660cheIZyVC1qBaoZVgcG9Flao1EBoNJNVk3c\nE4uZLrf33583XEnV7Vpbze2cYFYHwm1tzlZZoNRtV93faV9dW6OjYrzcNZaXs+HuuzrAk+JuzNKV\nWEq1DutA0k5eMt3yMcpasBLQ29v1SYn8nnuVWEid2pL7u8VPVQXUypRTkiSvqop7cz+Jb/12Io9l\n3CpTNV2WlWv53EKORp8apdQXU7R8tjg5B8v1wXye/jZNjpxT9vADNNw5NVUL11UVMio5pO4XoZvb\nx93+OypNlQB1jnI0SqOUohQt03JFF/N2+3JQSVLSk8VRF3/pBsV8X/67as2TtSpV8e04FHLLX5ay\nRrvc6sgf0vq5TMvaDLQRilCi+ODv30V3GRDHXXNl1lme0Ib/fUqwlmP+Z/TPjBjXrbSVQKA76U4D\nHo155k0An6Zp6qEeSlHKaOcOuoPaqM2SXVhdyxEaMSC9gzrobrrb8rkb4OvcrYN2mw3d8GunEDxD\nOapa5Sv4RbrqzukGg5OTArDuvlufYEdNgmP3bG52r4M5MWGN19QBMf/8rrtM6NHFecZiRNu2lcKw\n7iktofK1Gv8qY0TtAC+Vsh43vladnfbWx74+/y6lbqVP5Bj5vDlI68BGdcN2Go9aq3RyMjgrvFfo\n0u1TVRh1IAJdkiS/qspNp1Fyvv5V51QpQFXgsszhfXptrXQYaSICUe4TOUr/cZqSe5P+M666cE7Z\nwy+OrUzO8NxU0BeCumRNKmRU1wv9WvVxrwyoaxEnJwGNw5TXWo9c8nxRrWNu+6oJdiZoQruPLhFP\nkpKOCYl0YCmz5eYoZ8l2207tJdbCFmqhJCWpl3ot4M8trmlKl8xL5xrLQVW1KHJglWNoozbDKqlr\nL0tZC3T3UZ+xRsM0TDfTzTRKoxYrKV+PIRpyBXw1gZO0yAYJimEsaO0UgmcoV1Vy8Wy3r5P1TS03\nwoFJJhLigMVrXnZ0WEHHCRL5Mx4XsZLc4ifb0sVY2j1lzVEny6bdGPizrc1aN3RkRGTilfvyhEo6\nwNNBkw6ypQVX164fOYEaL7fC4VBak2UJHlnOxo87rq5/Wau0EpUDXbp9qmI1lEqTLRHokiT5VVVu\nOvktIKm+rqE4vPfMzZUOoziX9B+mDeD0mnHV0DQR9RDRGAXLOgEa7pyaCjp+qprlUjazKrfGVAbU\ntYiTU2FKV0rFCQ6cst6q2VgHabBkPXm5FAlDuv4KVLBYOhuowYDBQRrUxocWqGCJJ+XWVBXmeqnX\nYiUdpmHbcjRqjKm6JnbZanlyI905pQNMXvJElnqR5wNfD2n1tLMkt1EbpSltZPX1k2DKzkIdBCgG\ndY6HllN3heAZylWVXDzb7cutXTIhjYTUsTErnMm4Re7CKsGVgyJAdPSoaOs3fzNvAGlbG9GuXaIN\nHhspP+eJfiQ8yJqXuZyZPdfrs6fHCsPlPmVdTHnxr8v4K7Pocuux3E6FSDUL7siINe6Vj1l3nNXE\nQxxInECNnwMSNmUG36kp5/I4XgFQPW6VSgddbq62un2q6qruQAS6JEl1IbfrX+mK20HCFXcDPR/H\n/2MR3v94P33kzbXSYRTnMn5AxHUOPzlMk9+Y9Ayd+Xy+emAdoOHOqamg46f8lEvxKruSN26f1ZO8\nXmRXq0RPLePknGp+6uBAVzNSVxfSLjkR70Odp6wnyhMVEZnJiiIUMepmEjkfJ9l/kpKWtnRw2Eu9\nNEET2lqk3HrL4yZlXCfXNE1b2o1SlCZowhXcLLGcebNfuT67aTf1UI+tJbSbug3wkm0N0ZAFvu3O\nY96WUwbboG+GBHWOh5ZTd4XgGcpVlVw82+2rS0ijA5TOTtMKpsueq0JLf79oK5nMW96XcMsBZedO\n676yFidPzuPkshuJlJY+0Vk6t261utfq3HXtnhzK1f10WXQl+HAglxAnwYjDrNyupcVshx8rDrXq\nWnM4dbKOqTG8dnDGgdgui66dBb1aLuFc5VzUyXGtTVNgMXdm47T5vAJzRJQioiQR9ZFwvR0nyn2z\n6Nb62UNUaF4zQczrHKuQjLQwtkZTuTkqNK/RlRTRJws28LVWoMHHB2n0qVFfAJPP5zd7SGHg8lMu\nxaucrKibxcLq9SK7nmvDepXdXNU4UyldPKEav0lkBQuv62kHqMu0TP3Ub4zDzkrHrV+qO6uUTACk\n1vPk/cnYSgl63FU1RSlLXCcXX5sYxSzrxqF6N+22WOm4C246X+rCy/uXVkv5Hk9eJMcst/Gy7l6P\nTb0mDQqz6LorBM9Qrqrkot5uX/n+9LQ+IYwOLHt7S2MPuWtpczPRrbfau7K2t4u+BgZEuxzOeNZZ\nHhtp57KrezY2Wi2IgJkJ160+KX9yy6N0Q+Zw2d5uhWM5RlnjNBo1LcpuNwuWlwWsLy/rjxXvl89h\naKi03XKhUAfEdqqq62o1lSbzG28zjTsoSTBMkva/QPrfspjU3Jx/EEuT4/qWZdmSUGjXLoPd9P4y\nAaZGNw9yCzlKfSlFyb1JGjs4VtfWvaDlZEWthoWVKyi3u6AusjeDG6DdXDnsyBIqRMxiuADCAVD7\noXYaW9NnSpXz5zDkJAlnOrdfLg54TdSktQS6Wb84vKnQorbDrbsyVlQnbmXdTbspRSkjHpUn+OG1\nQb1Y6Xj/smaodDWWyZB02YW9nMf1CpRetdnHXwuF4HkdqxqJT3Rt2vWjJoTRlcxQwU9CIXfL3bVL\nJMRZXnbPOCthUP4uE+nwGpgc+NRapX7cbrkLL3ct5gApLbtyTCMjJlwNDQkwT6XMz3nNTSk5RhV6\nm5rKi9fkUq2V2awJvepx1UGhn/PB73g2OnOsL13vlq00lX7zbyVjTcY/W3Rr/cx+KrxrrXSN3Cya\n6voq25dl2SqQsM7aHTc2p/HPVRdgvMgJrvn8a2LdCzKrboXusE5WVDcLayV95xZy1HGgg3AIhLX6\ncLvbrG6AOcpZ4gg5bBkxhgfM8xtz+vnp5u8E405uv1x2pVzsXGT7qd82FlRN8qOzpMo42K20tcRa\nyeWUtZdDrt/yOGqmXrk2vA9etiaEsFBcIXhex+Kg4FSGo9w2JXzYWan4+3YJYXSxjWrWWt6macXM\na2GQu8LyupyFgtVS2tVlXYvpafdMtPLz4WFrQqQtW0wglv0nkyJZkEy6s7hoQje3BqsgzRMxqQDH\n3X6bm/Xr41fcWukGml6T61RitayFS62dKnJj24xusQ4yMr7+x0NUGFtzBwwJhkNEtI2IukiAyaTY\nr/CRolvrhzTQSaS3aHK4WSaiQTJcd9XsuY6WLe7+y5P85IrtpEhf+kXOqY2oMF6gqUP+XEQt51MA\noOYE13L+eAw09MRQ9eE4TaXHq9ymNtAdtpK++b7JuWRNLr5131EcrCqpv1lLOSUK0pU56aZuAfiP\ngbAfNLSmz5Sqy4qqxobabe/FSqeurwqSdkl7dHNWt+fxjhyI3ayVunjQLuqiu+luT+Vx8vl8ydjs\nrLN8vexci0OFCsHzOpZdGQ5dVlIvUJrLmZY9HrtpZ6Xi8Za7d9v3weFzZEQAmewnEhFWQGnZW14W\nVsydO/M0OSmArq9PWEWzWSuQybnK+cnkRRxOJZDrLJa6pyxxYrd9U5MA3HTaWiNUTbCki6lU3VtV\ngJP1TQETouWaB2HddgNNr8l1amG1rIY1//7787Wt01lSJ7Ly2pxBjSe9X3GNdQOMaSLqJgF2HApT\nJECrQFZwVNdXZzFOs3bUDLiKpdLRssX34/NQ21dVIJGRVreNB5C0QIJbXzopfTjBdWGtQNlvZH0l\nP6pIQWbVrUbCIY8up5X0LfdN7k/S8tpyTRIZ6cCTw8skTXqGgY10y1Utk3aJeaSWaZn61vpo19wu\nmlyzd6F1sgB2U3eJFVIHZE7r4uZmaUna4wGA5fZ8bNM0bWw7TMMW2JVtcYsqh9Q+6qMsZT1bconE\nOaUeDx5vyy2uEjaDLnUS6tpSCJ7XsXidRGkpdMtK6rWkBbfM2dVjtLPs2dV0lFDD+1EhkqgUOvjr\nrVutcKa2199vjTXVZVyVTw56gABgOUfdGNXEQ9yCqovllJAcjQpwVt2UJdxKIFVhV0Kwn2PoJC+g\n6XTcnN7zIj8wGcR8a9Gmc4dkgZEganP6FV/zSwwYxz9nZnwtNK+5A0ba3NeAQj+gp7MYq3DDXy9r\nti+ZXLHPbtZvJ5nwO6a0r5MdYDnNxU87TlL68J2YpwoJmQwFaOGvSsIhjy6n5SaOkvvycfuxngYF\nfQu5HH0unaRPjYM+VNBbAe3k1y21XDnVyrSzHPppy06yj67iQ8YmSpfZDuowSoNwleOuLMfVR33U\nRV2UprSREEgFYF35EA6KquWSx6vqLKrywbPe+k2Ao27P++HjaaZmGqVRRytyqFAheF7n4hfTjY2i\n3Ih6Ye+3pIUfeFXjPLnLLb/o1SUh4jGN3Bqo9sVfSxfYSERYQ/m4ZfkRnuSmv9+Ev2TSatFdXrbC\nI3evVcu/qJ8DJuzzsfM15GCbNXMplMxxYMBqsQUEYPNYULdj6AXq/ABjNSyOfsCvGlbVmseXKjBi\nV5uzGmstxdf8ZQmMbUSF8TWaOjRHhTfXvAGGCoW62Ek3+FJBSYUbJ9jRQVYzmf+FeologIja2XsD\nZFpp7eZn16dfkCy2Y2T39WLVrtSqmCbPcFxNRt0I+bnwDsrV14/1tBy40elAOk2PAfQYQIemsu47\n8PFq1iiocXHp2iw3QYuf8emgTs5X1qmULq5cXhMO2Y2LAxt3fx6mYduER9zyKQG5kRpL5qrW2eQP\nNS7Wz/qq2/NzQ433tINoVZsh0VWo6igEz+tcdllbufVwdFRY33RQyuW1pIadu2gsZoUl/hmHsMlJ\n0c/u3QK2GhoEaHV3i/cEHOaNUizcmru4aGZx5ePm7XOo0Vk8uSVRth2JmCDc2ioAVk1Y1Nlp/t7R\noc/iyteQWzClRVRCBp8THyOPU/Va7kRda/XGQDlQE3R7RP7ArxqxoAcP5stus6x5K1BjV5uzmpZY\nvuary2R1LZVusl7E56LGTkqqGSOirEObHBQnfE6EW1nl9VeEvddHVhBLUkmcqC95sPhp3SL9WLUr\ntSr6ANc0lb8U9Si7JC66i+CgXH2dLLcq2JdbkkE9pw6Nj9NjAO0fGaE1n19cOjipRqmIctq0O17l\ntCX34eAnrXgt1FIClxxUB2nQm8u24mLLkxDp3J91MZU6SAYJ92PVQrqbdlOMYsY2uhqfXqX7nuLn\nBo/3lMDrBNFSXm4ShHB6baom4AngnwP4ewD/AOD/0Hxek8mGKhWPn5SWR7vkMJVc3NqBgLywjUbJ\nyACrfjYyYnV/dRqbaVXMWyyAHBZ1cotD5TUmZabZZFJAX1+fgHJ1LBMT1qy1sg6nhE439fUR4RML\n1PjoAUrvP0TT/2rNYh2Wc3JbJy/ycmPAz3EPuj2i6sCkH1WSXKhWcOhpbXyYr0rW3K3EiBellf3V\n13aKsu36HLbTzY+XcZGGn67i6xYSACznllReczDjbVdYm1V3PtlZtasiH+B6rSdldroIroarb2n/\n1j+Bci1+6jm1VijQ3NSUb+hUxcuQ2NWM5NtxUHCDh3Lmane8ymlLdxPibrqb4hSnRVos2Z7DrddY\nSdmHjIF0S/LES8d0UqexdqpF0y7mla9PH/VVBG1e/u+p8yvHfVenaljY60HXO1BXHTwBRAD8I4Bb\nAMQAHAdwu7JNjaYbSienOoryolYHparKseo4WRv5Ra9T4hqd+6pfCLODGt3aqMDLE+2o4KnOT+c2\na6fRUSL8nmkB6fmDOQtgy3jS5WUzhnZsrLTWqU7qsXK7MeAXZCttb8cOcc51d3uD9JqpTJ/Darrp\n+gbyNOlBz8vcCmRaD9vI2Q3VTk6xmbwtOZ5+EtZHCZ7NpM8yK/fpoNL5yXjN4WIfO0iUc2kgoqNs\nblNkAuUYGVl3DaVZ29z6202B+KHaWbX9qBoXNZUaV+tdG130vd7B3isA6LarBjx4OV7l/h24jZeD\narnnjRsg8xhJPhavgFeOO3Ct5eUmwUb/XVZL1ypQe1UtwPODAL7BXn8GwGeUbWoy2VD+5QSlqoK2\njnkdm3Q1veMO6ziDiEnUvc8hU8ZSTk9by5lw91jd9p7X5VPCAtL1+f3UceOacROAW1jVONaentK4\n2HITRvEEUEHEEXo9Jqplt26UJj20uWijrbUWVZoQp0DeXW51MMspxqlkCR+PfG5h2+na5vskbfok\nssKpen7xNtR14GsnYbbNYXsnVSlwkl/UDC4MBp5JNcjsrG5tldNXOcBRroUxCOUWcjR6IE2pQ+O0\nvEE1YN1kV4/Si6trNeDBy/Eq9+Lez3irdd5Ii+hW2lrW2vnJWstVb5a4jfy7rKauVaD2qlqA50cB\n/Df2+hMA/l9lm5pMNlT1ZFdKxYt0F+V+QFC3v3QP8dqOHYjp3i8UrPGa3d2lGWUjEWuNUO72K8HQ\nixV28uNrlD00R723rFksqSqs8wRJvB87uNTBvpPF2glUq5HcRlquW1ocQL3GGU/y+Xz9mya8yM58\n5WZ55Ovs1eU2zbbpodJjxT8fJGs9TQl2EhKdQHmw+FpmqG0iors1/UnJ7aSbrZd1ILKunfxdzX7r\n8bzM/0q+PGB1kcUV8MCokRhn8PFgIDTIuppubZXT12axJsiL/OSBZGDrWVGtYQepAODH1XWj4KFa\n1shaqBzXVa5y5647rtU6p65n1cM5tpGqBXj+ixA8rz05gRsvpVKu/ICgTvLL0ms7bjGeaj1MCUZq\niRTVBVeulQRTvr1d+Red1ERDKmzL19y9WP4us/W6lTThayLrl8oxlZOxuBItLwtLp1N913Ktj+WC\ncj6fv7Z9DnmtTTvAky6lu4koVnyvtXQfucYvSsCzswr2F99rJwGK/L9HlES22UVyB2WeCKi/uJ98\nLV3bORAuFrfT3dTwe4zV7dM2c1WUf3++KjcxLK6ALDHO6FOjtoDjx7IYZF1Nt7bK6WuzWBOMi/xD\nCGw97//P91e9VijR5ljjal3clxPHWkvxGpt+3Wx1xzUEz1BBywt4RlGZXgewjb3eBuCEutHv/M7v\n4JZbbgEAJBIJDA0NIZPJAADm5+cBIHxdR69ffBFYXBSvs9l5XLgAABmMjAD/8l/OY37euv3nPw+c\nO5dBSwvw8MPzaGsTn8/MAC++OI94HHjuuQwSCbE9b296WrQ3O5vBK68AwDze9S5gzx7n8c7MwNj+\n3e+2bq+2DwBtbRns2QMcP262Nzsr5vfpTwOJRAZLS8DCgvi8vz+D97wHOHJEtL+6msGpU6X9vfji\nPAoF0d/amv7zxUXx+cyMWB91PoODQKGQwdCQWN/jx4F9+6zz3bcvg9VVc7wf/nAG27cDp07N48gR\n4PbbM/jxj835qfu3tIjXt902j6YmYGHBPL6f/rR+fQDgwgXxemQkg+ZmYGio9Hjqjo/b65//PINM\nxlzvmZkM9u1j2xfHO3/bPDANZOCtfS/r7fj64Xng+Mb8/dn9vQBAZjYDLAHzP5oHUkBmWwaYBeaP\nzwOfBzLnMkBLcfz/n/K6Dci8lgFOAfNH5oEskJkv9l88vpm24ueH54EOIHOp+Pn5eeAIkLktA4wA\n81fmcf+3gP9wJYMLAA5F59HaUDw+I8D89DwwX5zfADB/Yh44C2S+X2wPxf4uZ4CTwPz/Ng/8EZB5\nVJlfKgNkgfn/eR74v1n7fzgPfJydD9+ZB4aAzD9lgEKx/XeAzM9t1vv4PPAwkEl4PD7q9nK9RjLA\nHof9n8sAM8X1CPB8Oj5/HA/jYSQyCczeO4vsf8ni07d8Gv/18n8FANy2chumb5mG1Pz8PF489iIW\nexYBANn/ksUf7fwj2/Yfjj6Md95+B09/8mkk4omKxsvHl4gnfH+uHd/8w3gH7+DpzNNIIIEH/vQB\nnDh3An3DfZi9dxbHv3u8ovUN6nVLpgUA8K7ou5B6O4Wvf/LrFa/nucFzWFhYAADMNM1g39i+qoz/\nYTyMv8/8PeKI4775+/BZfBYPZB6oyno9MP8ATuAE+jJ9mMUsjs97P377YD//2cwslrCEC/MXfI3/\nxfkXsYhFIAPMYAYPzz+MF/EiFjPFv5/5LP4I9n8/1X7Nx/cIHsHD8w973n8Ws8jOZ/FpfBqJjPh7\nk9tsxPHbLK8/j8/jXOYcWtCCh+cfRhva6mp8G/36+PHjWF1dBQAsLy/Dk9zI1OkJIArgJxDJhZoQ\nJheqa3m1BqkWsELBTHDjx1XT7n03i5xM0OPVPVS1wpYbc8fnPT0t5ptKCQtdoSD6UZP76NxgeXkU\nLy7K09PCdVa1XHodr594TjcLp9N+QVs/ZR3V9naNy22Z1sea1+MMUI7rm6bSb+ApzWd2mWSl9bGD\nrJZAnUup6gbbyNrrptJxgIjiZO/Wyi2iTez3rWwfp/mp54Ic3xBZraHlWBj9unRXySpeqWe5U3bW\nIK2YGyl1jXILOer4i47AXFmDVDUscrU8jrVyafbaj1+rY5AxoPVkAa6nsRBtHtf3SnQ9zDFIodqu\ntqIPjAN4FSK77e9rPq/JZEO5y2/SGa+lMry6sjpJt61dn/l83ti+u1sAYn8/0Q03CNDzC3C6eftd\nK7eSME4uyuUCHS+X4we01ONb7g2JSsVrlNrN26/rbLk3HerB5chxfSXE6WIinTLJyiyucj8OdFwc\nqKRbboqs9TBBlrInV9X/BmoiomkSQNpAJmguklnqhO/jND+nscr9hsi5Tqid0uS8LnZyIcWS88ll\ne2MYCwvUfeAAjR86FFjJlWqUDAkyCZFXpUm5v8JiRKN7orR8Vr3zUXtVc10OPnew6qVfpGoFOF77\n8XvxH2QM6DRNUzd10xiNbQjsceguNy7UTpX+36s3EK6Groc5BqmagKdrByF41o0qAQenfe0son4g\nwKmkitpnPp8vyXprF4NZrvyulZ/xV9qXW79+VckNiUrkZd7ViDHVqR7A03F9JWwtU6nFTbXC8ddp\nsn4jDyv75sia9Ee3j58nLz2ia2eSSpMXDZKZ/dYu5tNOfK7lmA3LTSiVJkdgLTmfXLY3jMMHzBJL\nU3NzPgZUW7klBqoGgJXcXylaAIO2eFYy9iCTM6kK4jvKq+WwVglSvPbj9+I/yPFvtMWrmv1Xek5d\nD4l0roc5BqkQPENZVAk4uO0rLW/cVdar7KxaXsar1iJ1c2v1YkHL5axutuXK63pvdDmOoC2ZXq2U\nfo6vtGwHmV3XrxznVY30v5UqRwIo+TdykpivIlktoSBhjZTutPIz1erZSPpv+1b2+6Cmb5CwSr6b\nvW4hyv1POUr/XprGPzVOhY9r1q7Ex5L0gJlm7Xq9PlOh3av8AqvL9nIYY4dEiaWR/fsDs3hWQxL6\n2v68jca+PlYCaNUAsJL7K2sFSn0pZet+Wi5AljN22Vf3F7rr2q2gjvvyAAAgAElEQVR5oyHKTm5A\nvJEX/xtt8apV/0ElUaqnZEyhaq8QPEP5VrnXz2pmVrUdv+U8/MLL7t2irElvrwmLuja8WND8lBfR\n9VGPDGKnHTtEjGVTE9HiYjBtBmml1Fm21ay8VRWDnPudXIMtaYPnaloKRjdWV8ulGguqPmVWWB7/\nKZ8xm33k+2omWv7cWfpe+vfYhf4fTJWuGR+nXQwrkb+SMZXKL7B63L6wtkZTc3PeoXODvmwKawUD\nsvAYqPsL3RbAq1U8opMbMQfI1FzK80Wwl7GrF9e8r/6v9FdtzpVakmsNUV7HW69ATLTxFq9a9R/U\nMajnYxmq+grBM5RvlQsNMsZxaEgfI+k3RtRpe517iG573Xt2JVT4dZuf8iJe+3XSRoIqtxT393vb\nx6l+aipFFI1az4UgxI+Jl9hQv7J1OUqT8W224BRPaxngJT0YBakdJCyS3STKn6TJamGcIhPEtpIV\nDGMkypvE2fYgUfYEJJIB9RHRDcU202zbDjKhspUsMZ8EEvGcaSqFVYfn+P9avND/zAgVmgti7BwW\n1VqadoBpB3dpqv7xUFQz1+1a+aJrxK2eqoXQa1ypDkyCctM1XHH3g7Dm7SI4t5Cj0adHKfWllGPM\nqHpxXQvQzufzWmus03qpgFxriPJqPd5oq6IfXUsWPf49FdQx2EzHMlTwCsEzlG+V63apuk2q7bjF\niPqJj9Rd1Om2172n9qW7bnNyAfUyLz9rmMtZ4a/G145GzdKWFu9uxXZu1Xwty3G5dpJbVt5K4d0W\nFMaJcp9YoPQfHqCx/Ydo8uNrNjGYfICkB6NKxWFTwqSEPf6tK/uVILZc/KnW0lSfSSJKlL6f+0SO\n0v+m6ArbXCiFTd1ThVqQ6aIrf24RPwvNBZrKTYm2ZQxqmu2XJfsYVjdxd2M1vrWKqhl4bmAaZwmX\nY18fc3S7dZIOTPSwkqPcQorSB5I0fshbH4W1AqXmUoQ194tgCW/JvcmyQKkaCZxU5fN5LeA6wZ0K\nyLVOCuUVyDfaquhH1bLobQTQ8u+poI7BZjqWoYJXCJ6hfCuoeEO1Hb/tBrG9nxjCcpMIeenXq5tx\nMul9vkFZSZeXhaXTTywrd6vu7S0Fbrc420qlW/OqGX4KROk/9pnwxa8bpk7c4icz03Lgk7DZQtY4\nzRgJC+E02397cRs7F9lGTbvsaXGFzU25f+tr2qBeInpQmUNv8WcrCVDtJDPBUVDwnmb93czW5Fq5\nJtroAHEqdbv1E9OpAxM9rKQpfcBfIqHcQo5GD4xS6lCKltecv+A4vEnX4dGnRm0BTXdxXQuo0wGu\nE9ypgFzN5Edex7vZVS2LXuiiGupaUAieoTa1JFz191cvsUwtrtvsoIjX+ezo8Ad/G+hhZ7hV6yzF\nulqntZCvGwg+Y/7Giwlf2v79fhr7iI3FM2ilyfwWVWtnthDRUTLjMKUraqvDPk5POyAtPsc/pbjC\nSlj1Yvm0eyaLY9eNc5Lc4d3rMeS1RLk1N7yuC1TluprqwEQPK+M0fgjFPoY99SETD+ExUPYbzu4X\ncvxDTwxR9htZGn1q1Deg1RrqpJzgTgXka6Wm60aqWha90EU11LWgEDxDbapEN6pU100JOUG6sflZ\nn3LX0g6K+PyyWX/tO4FWtY95oVBe/VA3VTJuXzcQ0lQCIE7nVGFtjXr+YI7QvFZ90JdAJYFshEyw\nvItE7KV6g0JC2phmnwDA0+IKK9/XWTXVZ5fN+xI6mRvsFfb5+i4P65Rm7dkdjxyJOFW1/6BdoDVy\n+47aiDqY1ZQKP8HPr0CFtUmamst6bo+7zU5+Y1I7rtxCjlJfSlHHX3RQ7xd7jbhOP4DmlNE2yHUo\n9/+epQ7k2vI1Z4GsZ/lxn90IF9V6KCMW6tpSCJ6hNtQy5iY30JBw1d5uhZxKvyx5v06JatTxlbuW\ndlCkwqOf9p1AqxbHvFJLsVvG4VSqijdKNG6cJeeUYlHzHUrnN5Oq3J4nCOov7jtNRD0koHPUoU1u\nJSyQqItZ9W94h2cXETVr3lsujjdtvn+RbfNCn4f18uKKy9o3ni3kvIYBye07aqOsYxWDkMe7Q3x+\nasbbcsfhd5+xg2OGFdMuHlJ1sfWbHEltU81oG+RxLvf/XujCuXGq97UPwTNU0ArBM9RG5p5wlRsg\nSbhZXg7WHVYFHC8ZbFMp08U0qLV0S8hUrur5mEs5ZRyu+o0SLzGYaTK/xabKAG1lf1cQ5durQKV+\n5tRmjgTsRYioSbOf16edRdOre61drU+QAGIex9lGtFps9++aiVYlmNrNL03Copu1WUsp2b583knW\nMi/VOL883nDYKJdHJxCyAzvL+2PmnbrcZwdtQdAp463bOMoZu3UiRJQmKnykQFOH9PGQ3V/opt4v\n9lJ0T9Roc/hJby68qhxjLFl/o0/bx4zaTiUAi+lmceGsZXKdEst3FfrOUY6SlCQQaJiG63rtQ4UK\nSiF4hqqH3BO26u8nw6LpJ76xUnEwc4JaFYSy2equZbUSO9Wj7DIOB+XCW7G7caXJbdT90+QMPHL7\nISoFKhWgkpo2e0hYSNupgm9r0ma1tTy3FPuy+zzio68kWeB4rYFoldeS1a2Z2zpyFUjEi06QGTda\nrYzDUh7Ht1FJV5wgyQ7sLLGSf9hr/IGm99vHQaoZb9X+dONwgyxP9TUXcpT+v1gGZuUYFNYKNPj4\nILX/ebvF0tn35b6yj4VjjKWmPz+WzyAsppsly2gtrYMllu8q9M3bnKTJQNoMFareFYLnJtBmjsGs\nVOXWY6zUPcQrmFUrlnHTKsCT1e4YBAXNft2NS84pL1ZRJ+uWun8ReH7aTXS/LlGWU38FMl1Wo0S0\nWOy7Eoum3dMtdjPoPtX++LHSQaJXcLQ7Nl6OayUqji9/Wz64Pvy6bTtIgpAuY6sd2FliJQ9OGH+g\nfPvpb09rodEOynTvu0GWE+DpyqGk/lOKCm+WQi1PHITHQJ17Ox2tkeVaHXVjSu5N+or/5Gt88LmD\nnvv2q3qoTVlLy6x6rlej71pbmss5hqGrbaigFYLnJlA9x2BWW+W6hNbyy3IzWA+rpRLO3ICTNeiE\nTka7yj/pss6pNOmBSaci8By+gSgPokMgyjuV98iRcElNkojt5HU7p5S+y4VI3fZbfe7j9PRi/eSu\nu3eQOyR6BUe+Pj1UuxIqxfHlD+aDazNN3s8zL83ZAJ4d2OliJdXty3HhVVWO+7EO7nQxm3x8qS8K\nC27HX3TQxLMTNPq0CaKpL6U8W4LtxiLnqcaPJvcmjeRFXtvla1zN/3v1EItYS8useq5Xo+9aW5rL\nOYYheIYKWl7As0FsVz01NDRQtfvYzJqYAA4fBkZGgLk5IJHY6BHVTqurwMwMsGePOe+ZGWBpCWhp\nAWZn62M96nFMQclpbpkMsLAgfp+aAvadq/3JWjKGfd72051blnaRwQJEw1OYwj54bJhrAsBhACMA\n5gB4WI5XOoG7CuL3q11A4+niB1PF/ZcAtAA4C+CYTSMdAKIATtt8Xom6fLTbDOCC5v1WAE0ACprP\nOgH80qa9LIA8gHMAGgH8VnEsLQBm4Wl9Dclj01ZsDxBrXMZh3nCVcZ7pNPP8DJZWl/Cjwo9wav0U\nRrpHMHf/HBJx5wZX11cxc3QGe+7ZY9lWttcSbcEluoQjrx+xtDmDGSxhCa888woKK+JkmLp1Comm\nhLHf7L2zRpt2/fD+Dr52EL9c/yVaIi24ePUi1q6s4SquGtsMdw3j9fOv4+TaSWMsj77wKJ786ZMo\nXCxgqHMIT9/3NB554RGjn4lnJ3D4xGGjjalbp7BvzDxR5Odu65V5JoOFFfGd0hPvAYFwav0U2qJt\naIm24MXffhEDWwd8t+tX/Ljw9XXSBCZwGIcxghHMYQ6Jck+yUBum8BiGqgc1NDSAiBoctwnBc2Pl\ndoF8valc0Kim6nFMQclpbiU3RVD7kzWQGzMzMIGuCDCB/JNeLba9B95gYAa4+gTQuApcvhOI3gDg\nCARQvBfAAQBnitumAKwo+0cBXGavGwA4fbW2A0gC6AbwsofxyT6uuLQLCDD8VQAv2HweA3CpuN1V\nm224hgF8G2KsV4rv8flxaHwPxNqsARiCgFkVTOWxKcBc4wqgbUPl9zyzEQej/tZ+/PCjP6wIdnh7\nkwOTaIo0Yc89e/DoC49iaXUJr0RfQeHeAvAtACeAtmgbPnDDB3Dh0gUcOynuqkjI8wJLN375Rqxc\nUP8ohKINUdzUehP6W/rRHG1GW6wNezN78egLj2LfT/bhzCXxh5UdyOKp+56y7Lu6vorb992OlQsr\nWgjkQPzoC4/i4GsHsX5lHTt7duKJsSeMbbc9vg0nzp9ABBFcKZ7ETY1NuHj1omWuunaDgk7AelzU\nPu20ilXMYAZ7sCcElk2q8BiGqgd5Ac9orQYTSq9E4toCmUrV0iJ+jowIvtFpfn4emUymrsa0WeU0\nt9lZlTNrf7KWjsGjOGxy6+EMgH3ALGYt/6TLOqcS8GdBWxLQCQDRdwHYC+B9AOIADsKEziSA7wF4\nP4CTEBBHsEKnnbWR63xxv9d8jPGy+yYABEy+aPNZFAI6ATEXaUHdCuBtzfYpCOhMQIDqFQjo7IGY\nfweACIAMxPH8BcQxBUzwLR5XQ/LY6KBNcyMiaAX6HeXzPLODuJao+GMPysLG2/tC5gtGe0urSwb4\n4CjQ2dyJsw1nce7yORx5/QhSW1LGfnvu2VOyz7bHtyHSEEGsMYaXHnzJsBKuX1m39M8BrzXailRz\nygK0ibiwrEroTDYlsTezt2QeiXgCP/4ffoyZozNojjQj+1wWLdEW9DT34LW3X7Os49LqkgG/R14/\ngtSXU2iJtmBn907c1HwTTpw/YYxppHsEiaYEjrxxxDJXqUdfeBQn3zmJh771kCfLpO6c0h1rflzU\nPu2UQKI8r49QdaNyjmGtr6VChQLEv/lQoepGs7PC8lZPbsf1OKag5DQ3eVNkQ+Y8MwNkMkg8NIF9\ne1b9j2EJwAKEi+JPiu+NQAAIzH/SNbkzPAMBTT9i49gL4FEIt9NjMN1SkwB+AGAAwKsQlr5mlAKh\nA3Q+jxk8gwyevTKB9bdXncfW5XkWpeJWUf6fhI/1fRAutJMAfojSW52tAO5gr18CsAXAcQDbi++d\ngbCayeO5pvTJjqtFM8W+zynv83NjRrOf2szzM8g8k8HEsxNYXXdZzzqQhLjDJw5j5qg5wdl7ZzF1\n61Rgbp127UnwaY22one9F9vPbsdlEidFsimJ7/3290r247DUiEacuXQGp9ZPYcfXdhhrvrN7JwCg\nPdaOba3bMNQ1ZPR55tIZHD993GhDAtdP3hZ//A1owK1tt+Khbz2kPYaJeAL7xvbhmye+aazdV//x\nq8bvt++7Havrq8Y4AWHBXb+6jsLFAo68cQSvnRN3eIY6h5AdyGLu/jk8sesJ2zW3O06A93NO10bQ\nxzlUqFChglToahuqItVr/ONGjKte12JTqlL/Zh4X9ySAR1Cxq6Krpczu8wxQDCcF+iEALKG83wRh\nERwG8ITSdg+AU96H+QwyWCk2fCumMOZ0F1x139VJ59LLXWgjxedFzb6TAJ4u/i6tkmcg5neO9Z0C\n8GNY582PYQKmy+zNAJ6CWK8tEJbXAZQqA3N9uauuz5jJclwXN1J+YwdlLGYLWjCL2YpvxqyuryL1\n5RTWrwoLZe+WXpxcO4lkUxL39d+HX7zzC8f4zu1/uR2n1s0TXq453yb7XNa0qgJoibTgu9nv4t/9\n4N/h+KnjOHnhJNaurCHWGMO5y9Y7D3ZxpjPPz2Dvq3sNSFY1desUmiPNOPTaIUQaI7g9eTsWfiHG\noIsddZN6nKSLcku0BWcvncWxN63uyDpJ996OWAcWP7poiSENFSpUqFordLUNVXUtLZl8MDNTP27D\nGzGuel2LcrThEO3Vv9luoLOwulgGcSykpQwode10+lwaSVTQke8nAdwG4TZ6RNP2SwA+DOAd2Cfm\nkeoComdbgEtAN0Zwj9YUyOTFtTai2Y7HbV4BZj4+g6XeJbRcbMHsn88icSEhrJl/yrbj7sRnlTZW\nYM5bAnwMwmIpvSPl8cxCgPDZ4vN3YcItl1zfNgiL8irE2qvnhovKcV3cSM3eO+srdnAJS0airRnM\n+HbXs3P3XL8owPPq1avIDmSxN7PXAowzR2e0APjSgy9hx9d2YP3qumXNpVUSsFoyCYR4JI6Opg7s\nG9uHxN6E4V4r4TfaEMVluoxGNOKZ5WfQ1NCEt68Iv+/eL/Ui3ZfGhcsXLNB5V+ddWHlnBSfXTqIt\n2obCWgFvXHkDpy8K3/Erp6+gd0svRnpG8PhvPI5HX3gUR39xFLd+9daS+M+SNcMMzt57FqmjKTx5\nz5OGG69cm+ZIMwCgI9aBP7n7T2zXfqB1ACfOn8CZS2fwyAuP1P1NkVChQoUKXW1DVaSNiH+cn593\n3WYjxnUtxYJKiD58WLBdzeXVv9luoBI2PQKzl3PKApC641v8/MdtwFRBJA4DIEBnClbonIGAphSE\na21n8X0OSVL3QcRGcuiM2YzxNHBvxyxu7ZzC/ZhDPAhXYg9wunTDEhZ2LODwnYcx84nicTgPYWmW\n4uNXEw51AXgDwhr5dxAAfwRinglYj2eLsq/dvdVZiGRF52ACPeD73CjHddHT+RSAnp+ZwTOZDJ6d\nmMB68YSTgOZlrDPPz+CVZ14BngWG1oewx+1GhUY6d0/pFgsApy6eQiwS08Yf6vb93A8+h46mDsQa\nYmiNtRrtvOdr70FibwI9X+zBDVtuAABQ0RRfuFjAh5/5MAAg1mj944g1xvDygy8j2hDFVVzF+tV1\nAzoBGBl5v3/q+wBE7Oium3Zh4bcW8OrHXkVPvEfEp75xBD85K4C3LdqG0xdP4+TaSbTGWi3xn4WL\nBRx5/Qhmjs7YuswuYQnH4sew0rSCkedGMPHsBGKRmLE2dyXvAgADKAH9OdXe1G7s0xxp3lQu4aE2\nXrX6ngoViisEz1AVqZ7iH4thgZiYAP7sz2o/rmqsBZ/Tag2vJTYcohMJzCT2IZNNOM+9lgPVAaTy\neb4H+OA54MkjjIN1oLMEEdu5AgFnKiTdBgFhq8VtzsCqXcWx3Fd8zeArfiqBsXP7vEGnCm3N7ruU\nKAa0bCkCxekR7PlK8Ti0ohSipSKa947BNibXolkAvcXfh2FaRFUlIDLvOrVlJxmXOwEkLngHuVpr\ndWkJKwsLOHH4MI6WcYdoaXVJlDo5Adxy9Jay3GxVmJx5fgYXrlxAU0OT5X1AQPzg1kHEI3E89K2H\nDEhUEw2dXDuJS3QJC79YMIB05Z0VI/bzbwt/CwCINIgTqSXSgr/+yF8DAF568CXEG+MARFbZ93W+\nD5954TPoaOqwnUNXvAvRBuEAdgVX8N03v4tbZm9B6sspIyvtUOcQvpcV8akS+BrRiJMXTmJ1fdWw\nwgJAW6wNf3L3n1jAevtfbjegsKV496RttQ2nVk7h8InDaI22Gjc4Ord0lqyLTvymyGtvv2YbMxoq\nVKhQ9aIwxjPUNaNrsezJRs2pHsr8eJp7PQyUqaT8y6PQx32qcYYfgACuyxDAdr64XQrCUsjjJ+8A\ncBSlcaKq+iGAVZdJlisGkTm2ubitHeQPF+dxDNa4zwZgdcsqZj4xgz3P7UHijoRwG5bZbFMAfhPA\n4zBLpXQW57gOsQZvFJ/txbn9Ozi7wrqVGOHuum0QcOrn9MhAHx9aZ3p2YgInDh9G98gI7p+bQ9zn\n30AQtSTVsiBOZVtmnp8pKW8Si8QsbsG8ruZQ5xDyv5XHoy88asRfNjc2Y3zbOI6uHMVtidvws7d/\nhu9MfscS3yjH9Ma5N4xMtxPbJnDk9SMGSBprsG0CL731Ek6unQQg3FuJCGcvn7Vs1xXvwvt73o/Z\ne2fxwDceMGIwARGHyfuS73135bs48c4JNKLRqDea2pLC9z72PTwSfwTHnj2GN068gY7uDizev4iB\n+IB2Tb1IdyzLqekZKlSoUOXKS4xnaPEMdc1ow610VdBGzWlDM9oW5WnuNRqoV8tzidVbzaAqLWmX\nIBLixAE8BJHBVrq08uviFQhw4vp7CBhahel2OgTTXRcQcaM/hD7hj6pLEMmLfg576ERxLj+E+K/B\n/3NQ0Sr43/Yh8U/F2M73K3P4GkzoBARM//PiPN4LM/PsWQjodHOFdXOXlevO3XX9yM2tuk507+ws\nbp2aKgs6Z56fwdmLZ5HaksKTu54sG0pU115uAVVrherKm6jW5J7mHnTFu9C7pRdP3/e04cYq4y9/\n/aZfx+n103hr/S0ce/MYzl48i1958lfQ80VR/gQwS5W8euZVAMI19uLVi/i1G3/NMvYHtj2A85fO\n45frph/4aGoUTZGmknmeXj+NwycO47a/vA2vrr5qvD/cNYw99+wxrKD8PQnDV5lv+craCn59/6/j\n5DMncf7qeWAAOHP/GTwSFy61M8/PIPtcFucuqumYnaVzCXfKnBsqVKhQG6EQPENtOtnFJdST229Q\nuhbn5FW1nLtbrIvXmNcSDlYB5iBMIHodpnspF0FkuZX7vU/5/HJx/9sB/BkEvOVhlhkBgGcgYKsd\nztK5vOqULG4rkwJdcdhuD4R1N1V8bwQi+yxXG4TFcw9EnVFpXIoCsMulwtxfHQEZqBwc3dyqXVSr\n2Kl4IoGxfft8QycgoOTYyWNYWVvBB576gOe4QKdSHyrMPvrCo5ZtJZQmm5L4wb/4QQnsvudr78Hj\n//A4Tq+L+EkZ3yj3645348z6GfyoIGoTjXSPYO3ymuGC+6EDHwIAfOUfvoKFlQWcWj+FKKJGDdFX\nTr2CRJPZ5wsnX8DCyoIBta2RVly8ehHfeuBb2rk3ohFvrb+FU+un0NTQhIltE/j2A99GIp7A7L2z\nmByYRHYga7zXHms32o01xIw2fn7+51hYWcCZN84AJ4Gt2Io/KZ74drDodk7pYnvLSYy12coHhSpf\nYYxnqI1QCJ6hrhnVg5UuaF2Lc/Kqepp72ZbnWQCDMC2bvP6mtHB2wATEhuL7FyHgKV58fwJmXKPU\nCoB3AXgOouYl/zZPQ2TCLcBZV2CfnEeqCSLm1KF2qDH2H8BMBvRjmPAmYy3vhEgkJGNZ+wAssjYu\nw5qQCDCB80l4r79ZITj6TUBUkfwAdYDiNSlX1lYMyJHgse0r2/DhAx8uTYyjAaOZ52dw45dvxF/8\n/V8YMPvIC49Ytn3fX70PZy+dRXOkGdGGKIb3D2PX13dZ2l55ZwVXinc1Yo0xS2zo1K1T2NGxA8dO\nHsOp9VPob+3H3P1zlvM30hDBjV++EReumCfrFXaX5OS6KLMCCJfa9ybfC0CAIQCcv3IeR14/gt9/\n8feNGFUubrm8SBfx49UfI/tcFhPPTgAAnr7vaTx131MG/M3eO4vueDfOXzmPS3TJaMNSsuUC8PbX\n3sbvrv+u5bi4waIXQCwnMVZoJQ0VKlQ1FYJnqE2nTCaz0UMI5UMblSDJj9zOKd/WVwkTD0HAlbRs\nxjXbnoGAxH4A0hNwBCKm8hgEoLVCuON2KvtegbAWnoIZFwoIq+QxuGekbURpjU6md1reAW0hEbN5\nyaWt4zDrac5AWGSPQABgd/F5A8S8pC7AClvDELGmGZggJt1mJUR7sWJK+M2i5kAH+PyOUt2xayBp\nmWxqLE0AJMHjxDsncOzNYyUAwsGoOdJsAOfKBRMak01J7Llnj2XbvpY+HHvzGC5cuYC31t8S2V/f\nOILbv3a7AU4y2VAEEbz02y8ZcYrS9bQ5JrJfNaIRFy5fwL8++q/REhF9vLfjvbh49SJWLqxY5krK\nCX71qoDHM5fO4Kdv/xTRhijOXzlv2eb7p76P4e5hy3vSeik11DmEvpY+R0hLxBP41Z5fLXm/OdKM\nni095htrQMNRQdCz985isG0Q8UaRgEmujTynJHA++dMnXQHRT4Zjqc1WPihU+QqvpUJthELwDBUq\nVFW14aVZApCr9VW1WnGY4Flaf0Oz7wgElL0LZu3KOZhWUAlaCQB3F98zKjAXL6ob1oBDZ0yw9epC\nKw04sr1GGBaky42XEb0YRcPZhtI22TUzUBzrv4EJeEsQFtkCBHwegYDjIxButnblYG6GcL3lICYN\nc8MAJuHdirkBQFeWNiCeVLrZXrx60bAcqjGajcXLg6HOIQuA8My0B187aAFOAEg0JQw3Wm5xk+Cm\nAtzK2gre/dV3Y+LZCXzrgW+hv7UfP/n4T3BX111GMiIJWPNvzAMQVsPT66fxV8t/ZSQB2p7YjsK6\ns4k/2hDFFTLH+sb5N6zWx6Le1/0+dMbFXZ7hrmFMDkzilY++gp64eeL/ePXHeOHkC8Y2KqRJQLx0\n9RJ6twh3hc6mTsQaYnh/7/vxN7/9N8b7w93D2HuPSM+ciCdwc9vNOHayFPoB88ZA4aKYa9CAWI6V\nNFSoUKG8KgTPUJtOYVzC5tJmSPpU8TnFIWc7gB8V3x8B8D2Ybp+/YPu0wwQpCVs8GY7OXbQHAlJH\nUYTFIhTSL4G9OQFugD4GU01SJNUB4OViX6cB/BK4HL2M6NUomi4X3Q2TMGEzBtFPb/F9QFhdea1M\nXmtzqPiU67EXwhUYEBl6e9lnX0ApiMl1+DaAp+Hd/XUDEwT5Op8qdQv2INUtk1u1fqPvNwx30dX1\nVcM9VLqV3rL1FguAJOIJ3Nx6M469ecyAH0AA5cS2Cfzs4z8zkupIi9ujLzyKl0+9jFhDDHcm78S2\n1m2W8Z2+KBL33HfoPvzwoz/EwNaBkgy4ACyQ2BJpMeCwPdaOP/3QnxrWTztdpssGJDei0QLMHTFR\nbiWCCL6z8h384NQPEG+M42dnf4bzl8+jo6kD8YjpsrB+dd0Yz9LqEm6ZvQVb/vsWfOCpD2Db49vw\ntX/8GhZWFnDkjSP44A0fxNStU7g9ebtRJmb7X27H7cnbRUzo/d92jc2U55T8TAJxJYCoc9ctx0oa\nanMqvJYKtREKwTNUqFBV1XWRIEle77ZBWPZOQbjOzkG4n15tJGsAACAASURBVMp4QbldEsI6+gKA\nWyGyxQJWSNLFGX6z2PYCGFyeA+4olpQ5aTO+LRAlWwABmlH2WSuAu4p9PQogC0QuC/MmgYQl96cQ\nFllAuNy+DNEXNzANsbHPinYwUGwfMC25CQBPQMDWUQgXYg5eKoiVG29ZA6ALRDWIJ1Xj9pzqP3L3\n0JHuEezNlBZK5fAzsW0C2YEsXnvoNRwaP2QBFu4WKmtzHjt5DG9eeFM7zpW1Fdy+T7je8gy4XM2R\nZqSaU/jIwEcMt9qzl84i93zOErfqpq54l+W17OsKrmD10ipW1lawfnUdq5dWceT1I+j5Uk+JGy8g\ngDXSEMGZS2ewfnUdL7z1Ak6cP2FYYiOI4Pyl89hzzx68du41Yz8JoAAMl2IJgX92z59Z6p3yGE55\n7L79wLeNrL/lKoznDBUqVK0V1vEMFSpUIJqZEW61LS0CNq8pyJR1IdV6nFKyrqR0LZX1OdXttgNY\nhrB2bocAT6l+CAB1WrdOWGEvfhW491PA4/9eLHgPBJhyNUMk+umAWftye3G7SHGsncV53Q7hIsvV\nCGFl/SVEzOhWiHqfjTBddbdAWHPVsWdgrYcpS8z8BAJKXyv+bId+XUMFIqd6nbrP3OpIrq6v4n37\n34e+lj60N7Xb1ojkNT25Yg0xI9mOTk0NTfhg6oMGmAFAAxoQa4zhgW0P4PT6abzyy1csFtcGNKCj\nqQOrF70F9EYQwVVcLYkB9aNIQwTff/D7uPfr9+LUuvmHF22I4jJdNn4C+lqfANC7pRevfuxVZJ/L\nGms1desUTr5z0ngdb4yjJdqCnd078cSuJyx1USup0xlEHddQoUKFkvJSxzMEz1ChQgWiTEbEcgLC\nwrlv34YOJ1hlYAUou7lJAJUxmaoSEMmEABGPuV78fQiiJIrbdd8uCLCVygJ4qvj7DIBXIJL8HAHw\nH4q/fwfA5wB8BSKZTxTC4roKAdJyPFPF/dwy4U5AWDG/DeGaG4EA18sAdkJYMxNs28MwQTwLcx1V\nOa1rHer5mRmsLi0h2tKCe2dnyyppUitpQbJ4p2i1I4aZ/6UNe+7d6ws8bvzyjYYFMDuQxVP3PVWy\njQSboc4hvP7O63hr7S0Mdw2jPdaOhZUFNKDBFvzijXG0xdpwev205X0Oc3aKNkSRfyCPe79+Ly6S\nl2K25Wvq1im8/NbL+OnbPy2ZT7IpicLFAhrRiGQ8iVhjTGs1HWwbxOrFVRQuFtAWbcMHej8ANABH\nXj+Ctmgbzl0+Z+lv35j4Q+Fgz9/3KrcbDKH0msEMlrCEFrRgFrNIhHfMQoUC4A08Q1fbUJtOYVxC\nfWozxHLayfWc8hov6OY2KZPqtAB4EQLEbv4B0PoA8JBN2l+euOi/w6yJuRXAf2bbLUFYUNcB/D8A\nDkHUCh0ofiYrTFwG8BaEy6yETjmvncXXt8NMVMRzwQxBWD9PQsRn9kFA51swrb3cY091d5Xr2K78\n9BKHuUFlR+y0urSElYUFnDh8GEeVrFn19h2ljdsrZv1KPHME+74Q04KHU8mO9Svrxu928CjdQvO/\nlcfSx5YsLqLd8W7LfhElI9b61XX8cv2Xlvca0egKnYCI5dz17C7c2XknGlzrBZWvtmgbCmsF/NO5\nfwJQug7SIiuTIZ1eMyFajqs10orCesHY9tzlczjyhqg5mh3I4gM3FH3kXxWJnpojzcYxiUXEF0q5\nCYbCeM7ytIQlLGABh3EYM3WducxZ9fY9Fer6UAieoUKFCkTXdCxnUPGCL0G41P4dRFzlUwAungCO\nfRo4/Cngd/730n144qJHYMLh28XXEsh4QiP1GtQu9K0NAuTU2MvvAPgYTPiMQ9TzXAbwTHE8CxCu\nttxjskPpWwVxuY6vABh8Cbj9Y0DqeeDJM+7rapeldoOANFq809I9MoJ7NtudFsDTnSK1DieH0J3d\n4kRsjbTi/OXzFjCVwPrQtx4yrGmJeAKJpgSyz2Xx0LcewlC3yDiVaEpgYtsEIg1W8GyLtpWAXKOP\nS5a1K2t4+fTLIBCaI83oineVwK1Ue6wdu2/d7bntCCKINkQNSOSZcrk6Yh1GPdBGNOL9ve8HYJ3b\n+SvnsXpp1RiH1Mn1k4hFYnhi7AlkB7IYvWEU+d/K45snvmkck6bGJl8ZaL3U/gzlrpbiF+oIRrCn\n1pnLQoXa5ApdbUOFClXfulaDR2Xc6LGzwOXiBWf2ItDTZI2DfBUiHlO6q94J4ASEtfAVAJ+E6b7a\nBFELlLu7AgLI/kcA34ewiHJ3WuniqsaxOrnFcm0BsAbhcvt9CKB2mq/Rfsafb7bqtivnl4E3N+iA\ntb66iqMzM7hnz566crP1HPe3uir+tvbssfxN8f0v0SUcef0IRrpHEI/EcexNEZ+Yak7he9nvYeSv\nRozYxsG2QVy4cgHr/z97bx8U13nne377HZoGGmhkhJBakkvWSyIZJBzJsRS1IyleEyd0XshcM3cs\nu2rdU8luJffurrh3tu7O3Jqb3Joqp27NTO2uK9pkxEzingQpkWLZZhRhCSThGFu2XhxJMQ6KiRBC\nvIgWIKBpoPePp5/T55w+p885/QIN+n1UlOjz8pznvAD97d/Ldy6MKKJCraVaami9tx5XR64KdaKd\ndzsxFmENeRxmB3Y9sktS4+k0OzE5P8nWmxwIR+MRVy2SpeceWHUALftbJDWWRpBfG47NZMN2z3a8\nN/SeIDJXOVcJ12ckPIIiWxHGImMoc5QhGo1ia9lW3Bi9gcHpQaHusqmrSXI/1//reiE66vf6UZ5f\nLlkv3158/9NNzSUYIYQQQACHcZjSbAlCBNV4EgSx9FmuxaM+SIVdzSxwxqos+KrAPEB7AVxBPLLX\nAGACTJC5Yt/z5fLLxIXfNcQbENWA1Wq6AaxEvLGQH0ygtsZe84ZCcmrBBCdvkpRM+InPtwHARB3Q\n6gdcO4Bd24CjtuRRT7X6WTVB+pCSrriQi0O7xY7Dew6j8e1GtPa1Cts1rG/AxMwEWvtaYYYZZlNi\nGqxcPF0bvYbh8DBcVheK7EWYmJkQur+uyFuBwel4W+byvHJE5iJCNJCLx+rSajgsDnQNdUGJUnsp\n7s3cU1wnF6Al9hLcfP4m3A43Vr+2Gn0P+gxdKwB43P04yp3luDBwAdPz03BZXfjCyi/g/sz9BDFq\nN9sxM89qTqsKqnDhqxdwqOuQpOmQ+Jq7HW7J/eDpySPhEaEWdGp2SthX3pTI4/DgifInBAFKzYQI\ngsgmVONJLEuoLkGFQICJtDqVWsGlygIUjy7YMyVOC+X1ntVgQu+MlYmmnthy/qu7CMAFxL0++a2V\n+1zuki2Xw1NVh8FqM/2Ii04g3ugIAKKIRz3rwbrt1iPuucnnfBqsxlN8XLXU1wR/ziDgeQqY2AG0\n2aBZKqVWP7tQtikGUnoX/HeUaG5CGqBK3Z9WuqXYQ7LZ1yzUAAb3BVGRXyEZO7gvCKvJinnMSwTd\nttJtEo9JnrI7HB6G3WzHxOwE+if7BdFZbCvGFyu/KJnH0PQQ8iysoNlldQnj90/249bELdVLoSQ6\nnWYnVuStSPD5HJ0ZxSP/8gieb3seY+Ex1TGTcSV0BW39bZienwbAajQvDl1EvjU/YdsnVzwJgF0/\n7lfasr8FRfYiYbn4mgPx+5H3hzwMh4cxEh4RrmFbfxt6xnqEfQ/vOSxs77K6MBwelliliG10SHQS\n9F6KWAxIeBLEciHWLAStrUyELheUikeXqsgW1ym6AKwD6xDLRV8AAH//yxNFxsBqOfl75howESj3\nueT1mWoCTCz8roHVl4q347Wj1QCaY+uOAzgBlvJ7AnHPzbOi/eXCT3yOm8FSgX1gtaCSebuBJz4T\nn5PaZwpagq8JrNlRo8p6AyQVZWo1ppkYO11Ecwv+PLm40PJuVBMnbocbN751Q7KO120CgNPixIFV\nB+D3+tHxlQ6Jx6RYzH5h5RcSjllkL0LrrVbJspqyGvgqfXCYHZianRKWD04PYnpuWtdlMcMME0yY\nnJ/E4PSgYofbmegMfnHzF4IIlrPKuUry2mlOLJiWNzAanB5Ed6g7YS4wAWsK1sBhZv6cB88ehPMn\nTvym7zewmWz4yd6foKmrSfKc8PuxpWQLgNg1rPiC8P27X3tXck/49rwpkfgDCLVmQlT7SRDEQkGp\ntgSxXKirY6KztnaZdvgRsVTTb3laqAssQjkFgGfjNYAJKJ6OagXrQMtTSL8X2/dxJNZwqiGuq3wV\nTMCqWb1oWcHohZ8jx4N4aq88FVfPMX1IXsOptd4ASdNU00zpzWp9nYG58XRLj8ODje6NKLIp+3Dq\nrRXtHe/F7td348JXL8Bb6FUcw2axocBagGJ7MXrGevDe4HuCj6fVZMUOzw5J6mx5Xjne/9r72HVi\nl6L9iBVWzEK7u61RtKxaHGYHyvLK0D/Zn3ScbaXbUGgtTPDs5GOE59knTfLzqHRWYkPRBsXnRGx9\nAkDTBsWIVQrVfhIEkQn0pNpaF2oyBEFkmWBQsVnIsmSpercEATwGZj/SBqAitpxH/Bpjr0sAtAP4\nPuKirFe0XwDKDYHkt51HwgAmOpO9n3RrrBeT7LhBsEjnQOy83LE5K0U19RxTy8pGr9WNDsSRuYQ0\n1SDSEuZJx04XA3ML7gsicD6A/gf9Qg1i4HxAEBvcn/S3X7qK33lGE9bL8RZ6cevP46mvYsE6FhkT\njtGwvgG9470JdY+z0Vl8OPwhAMBismAuOoeh6SEc6joksWyR7JNh0WmGOSFdWIlCWyEGJhOFsJx1\nheswM5cYXXVZXZLorfw8qsuqcXXkKgDW4faVna8I65q6mjA4OYjGtxvRebcTDyIPcOKPJ3Dx6xex\nrSyxoxePbuohlWdTdxMrgiAIEZRqSyw5qC5BBbebRf6Wu+gEMu7dsmDPlBtMICH2/7uQpqnytNWb\nYN1hxTWNSgJLK/1TTZSla0GS7LhuADcQPy+tFGAttGo4y2NfGXjsk9bAqdWYKqD0PKVaXxc4dw6+\n119H3VtvIRRW6eRqYG5ckHDrDrnY4P6k0Tujiuu1ONl7Ukjl/eT+J5IxuMApc5RJ9olEI6gqqMLT\nK5+WbM8tW7LNPOZ1bTccHtbc1mayodnXjOC+IErtpZJ1E7MTmEPceoVbrQBAHvIwMz+DVQUstXcs\nMoZDXYeE9e+df0+4rmORMcxhDpFoBDtP7ExIlRW/Pnj2oGYabSrPplbKNpFZAgjABx/qUIdQhnyj\n6L0UsRhQxJMgiKUHF9nZJADgJFj95Q7oT2/VQh6dEp9GsgigUlSLC0sPgH4wISmOQKpFwsSRUB49\n1WITmCCeRbzxUQ2Uo4zy80jnVmlFRZUiwRyDVjy6okRaUeZ0xlagOxRCxwCLsgXOn0fL/v2Gx1CC\nRz7lqZjcn/Q/fViNE8+uxU/2HTEklMVRytryWhTYCnB4z2E0dTVhLDKGivwKeF1ejAyNCNtZYMFE\nZAKRaAT13no0+5rhdrhRWVAJj8ODueicYCHisrowMTuRcFwxWimz2cICCy5+/aLQxddsSvxs3wIL\niu3FsJqsmJqbwswsi4xOYxptt9sSGjhxHBaHsPzKyBVEohGYYEKXvwvf7fyukCq74ecbJNer3FGO\nofAQAPXIdSrPZlYj+EQC3ehGR+yXdgABtCyUbxRBZBiKeBJLDp/Pt9hTIBaTdKN1Cig+U91g6aKj\niIuaTGAgOqW5H48GbgSrFZVHINWOlUp66gBYg6AoIAR9/gRpUx899ybT9y/ZuWSj4ZaOJkOZ/B3l\ntLLPh2s9Hhzes8fw/mqNY9QazewLBrG+oQHfevMsfll33HAK5Y5yFqWsKavBa198DS37W9DU1YSW\nnhZ03u3EwNQArt5j6aRWkxXFtmLMYQ6hmZDg28mP2Tvei+HwMEZnRuEwO7C6YDXyLdJusfJmP2ZI\nbV1K7aUosZdItrEqfOZugSWhSVDCNiYLDqw6gJ3lOxPWmWBCz/M92Fa2TdLFVz7mPOZxb+YeBsPx\nJknm2FuxWk8t3vW/KzQT8p/yC/dt085N8Dg8cDvcOPPcGeRZ8nD5G5exrWxbQidbLjprPbV4vOxx\n4ftMCkS9UVJqXJQZhG7VqMXhdGsKYtB7KWIxoOZCBEEsLXzIWDOZpIib5Ij9LnMRo41vUmkkVI54\nkyBA2TfUB+17o2cbI8jORVJ79qMI3K+3Zbbh1gL7hobCYQTOn8fhPXvgdjgM778QjWPE1/zVPa/i\nUNch5Fvy0Tvey2o9Z8aERjsl9hI8VvQYuoZZM6GK/ApJA6EyRxk+V/45BPcFE7xDxdE7Tt3qOpzu\nOy00K+LbiCOjJpgQRfx9yFv/01v4Hx/9D7TdbjN8rmL/Uo4JJkEEBs4FcOzmMYzOjMICiyS11hz7\nx2s7rbDCbDLj7efexj9e+0dJ9HnlT1cK16XeW49QOKR6H3kjodHwKNput6G6tBprC9fiiO8Iuz86\nmwxlA2pclBlCCCGAAA7jMNw5+4eIeNghH09iWUJ1CQ85GWwmw1F8poJg9h9+5LboBIx7WaYSdb0I\n5v95AOyaKPmGiu9NPvT5eRpBKVoqOxdJ7dlfujJaCwxA17XO5O8ot8OBlv37UxKdANAzznwei23F\nkmY1mSJwLoCWnhbhmh/qOoSW/S3oHe8VlnGvyRJ7CS594xJK81jtI4/wrchjBrEuqwsj4RG09rVi\n+6+2Y2xmDHazXdiWR+84BZYCXB65LLx22Vxoe65NYicCQCI6ASbEju4/mlBrqoXL6sJoeBSv7nkV\nDesbsKN0hzD+9y99HwB7/njEUZ5qO495mEzx92SzmMVMdAY/vPrDhOizOGX5VN8pXOy8CIBFkuWR\nSx69Prr/KBrWN+DsV87i+DPHBcsbpcj2QkEpuZnBDTda0JJR0UnvpYjFgGo8CYJYWqTZXVQ3bjDv\nyoXEaP1gKvWGKdYowgvgtui1UtRUfG/8UK4jTef+6ahNlbzR3XcEqMvwQ5Ks5pRf2ykAp5CR55N3\nmbU6ndgXDMJhUEB7C7zoe9CH+5H7gijUQq1jqdLy7lA37kfuA2DpqqPTowiFQ4LYLLIV4dSXT+H7\nl74vRN3k9aUf/9nH2P7L7bgXvgcAqC6tRoGtQOiAazfbcX30OiwmC2wmG56qeApXR67i3sw9PJh8\nIMx7IjKBr576KsJzYdybvqd6fo8WPYrvvfM9zEf1NRUqsBQgYolgYmYCbbfbsPZf16Iiv0LoUFtT\nVoPLw5fhPuLG5OwkAPb89T3ow8DUgBBxLbIVYWvp1oTOvkopvjvKdwgR2em5afAGuLcf3E7YlpNq\nHXE2UaslJgji4YRSbQmCIFIlVRGnhg/G0lCNbp/qPqmQjZRUHWMa8S/MOD6kdW2VhN3rPh8GYp61\n6xsasN9gUy3u21nrqcWWki1C+msyCwy19Eil5Xx8cUOfhvUNEruWdYXrsKZgTdLjisf2e/0Iz4XR\n2teq2EyoqqAKW0u2orWvFcW2YkH4AqxT7Ew00cpEjlLabqrUe+vR3t8umcfeir2YnpuW+JMC7Nyi\niOKdu+9gaHoIDrMDDosD4bkwqsuqUeooRXBfEACw+RebMTA9AJvJJqQSA5SyShBEbkKptgRBENlE\nR6MZQxhNQ00lbTULqcoJBACMgfmUHkPmItM60lwXNbUwzWurZFHBu8x6amuxJwXPWnETGHH6azIL\nDLX0SKXlfHzfSp9kndiupdJZqXlc8dhHfEeEcXetYCmzVhNL0HJanNj9yG6hQ+6+yn1Cs6CtJVuF\ncZJRYi9JSNtNlc+6P4tmXzNsZptkecdAh+BPajOxdTazDXce3MHM3Aze/9r7aFjfAIfFgbHIGMLz\nYXQNdQnXyO1w48af3UDD+gZs92yXzJ1SVgmCWKqQ8CSWHFSXQGSalJ+pTIs4o7WaRrfX2idTHWe7\nwbrsDgA4pLGtEZRqU7PQ5ThlYte2/W/aUxLbSsKOd5n98unThtNsAakQ11tvp9axNLgvCJfVhe5Q\nNzb8fAN6x3vj9YUHjkr2EY+h5Bkq9yWUH1M+7gdf/wBVBVW4/q3ruDN5R+iQe37gvNCsZ33RetSu\nYEa5ltg/JbaVbcOP9/5YELNizAbfFl0PXcf6f12Pje6NcJildbjcn3R7GROOkfkIuoa7JLWwvIZV\n3NmWXyN+DW4/uA18zMR3+1fa0/5QhTrNEgC9lyIWB0q1JZYc7e3t1Aac0I8OL8eUn6lUusPmMj4k\npIp+5Qeb8Mf5AeTBhjf/8iIeWeHVHmchO7/6sDCpwwZI9XnKdppwJsZ3H3ELKaVVBVW49ee3Ujqu\nDz50nOsAQkCFtQI39t2QzClZbas4fdhtd6Otvw21nlqc/vJpAMBjP39MkkZbZCvCWGQMFpMFc1HW\nZdbj8OD+zH1JCmuRtQhOm1PSZVeMFVZB5Crh9/rxzt13MDg9CIDVqj6YfYCbYzcl3W1L7CW4+fxN\nNHU14cQfT2A4PIzPrfgc7jy4g9UFq1FkL5KkJO/+9W50nusENmYmzZY6zRIAvZciMo+eVFsSngRB\nLG98PublCLAOpwZr5B4qFATjZ/+bG9ceYULjC3er0PF/aQuNjAvyZLW0C2xv8rBT/s/lGA4Pw2lx\n4vq3rsNb6FVtRpSM1edWo6+nj3nDIlEA8drWn/57YOyzHqza+oQwtljIAol2IVyY1pTVYI1rDfIt\n+Wi52YL5mAGt0+LE5BxrAmSCCSX2EhTYCrDGtQbXRq8hNJMYBXRanHhixRPouNORYM8CANtKt6Hj\nK+z3zOrXVmNqdgpuhxszczMYnx0XtjPBhO2l27HCuQJjkTFJoyFx3an4eoiFtpZvph4yPR5BEARA\nwpMgCAKoqwNaWzPr5bhcURCMtf+tHB88MoxHB53oDFzXF/HMND6oRzWXWNQ53S61mSQVwdg73ovd\nr+/Gha9egLeQPQvyCJrb7lYdlx/zyr0rgsDjEUDxdm/V1aGvtRX/8DcuXK+cEMbWE52TR1jF8xMj\nblwkbo6khAUWFNmL8OQjT6JrsAsj4RGYYRbEbL23HivyV6A71I3Ou53CWEoilSP2MK0pq0GZo0wS\nvXU73AicC+D6vevoGevBu197V7jm6bCoDbgIgli2UHMhYllCdQnLiIWozwsGNb0cl/UzZeQaK9RQ\nvvmXF/GFu1UZFZ2Ga8yS1dKm4kmaZZI9T6Hubgx0dKCvtRXnA5noSJU6Ss2MtPAWenHrz29JBJC8\ndjTZuCd7T6JjoEMiOi9941KCAOK1rVXbWXMh7qEpfl7UniN5gymlhkNOixMWE6sBLbAWCEKx2FYM\nv9ePYluxZPs5zGF0ZhRX710V6k0dlnhN5/DUMF7vfR0dAx3CWE6LE+e+ck6o4xRTXVqNd/3vot5b\nD7/XjzPPnUmokwXYPeoc7MTAlQEc6ooXTBv5GZJvu9jenkRusKz/7hE5C/l4EgSxeIh9GbdfBNb8\nH0lrMVPC7V6c9FodtaULgg7vy2SprI+s8OpLrzUypZgwAViapGYUa6G8WxeAdLvUZhK9zYa0kHs1\nJhs3PBcWvq90VuJawzVFAeRwu9Hyv7rxYLQfNpMNE7PMQ1P8vIifo+2/2o6p2SmE58LY4dmByoJK\nwTrm1T2v4onjT2BomqWxVpdWo8BagM5BluZaYC3Ag9kHggj2Fnpx4M0Dgo+mmP4H/dj4i42oLqvG\nnQd3hOWdg50SP06H2SGkIu+r3IfWvlbJOKMzozh49mBCVLhlf4skEm2zsI64RbYi9E/0o+6tOgT3\nBQ39DOnZNpXoN0EQhFEo1ZYgiMVDXJ/neA7ofJMtXw61mLlSW6qnBtKHBW3Q8zDXmIVDIZwPBLDn\n8GHNNFuxGPhfTpZj7kYvrE4nfvkfy9Ezpe3HqTXmq3texaGuQxlPueSpnPmW/ATfUC7oaspqcOa5\nM0mPK0+RlT8v4ufIYXFI6iXlvqKH9xzGS+0vIYoomn3N2Hp0K/om+1BkK8L5r57H9y99XzLfV/e8\nisd+/pguT1CA2arcenBLaLzk9/px/JnjwvXgnpwAS6t1WpyC8F3nWoc1rrjPqf+UXzjvem897BY7\n+if6he0b1jdgYmZC8WdISUDq+XmjhkMEQaQL1XgSBJHbiOvzGpdZLebq1UBfH1BUBFy9CnhFaarJ\nmuVkGn6N8wH0qhwz0w16NM7vYasxSzWaJBYDT/WW44X/ziJ2/8/feXC1ZBiAMZEQOBdAS0+LII6y\nLTCUxIyRey9vEtTsa5bsIx6r8e1GIapYYCnAg7kHAJTrR4FYp9i7cSHXsr8lYb6v7HwFW45uQWQu\nIul+y9lctBmjkVFYTVZ4XV78/v7vMRIeURTVoXAIL7a/CBNMOOI7Isy31lMLh9mhKSpX/2y1IJSv\nfvMqiu3FitfRyDUXP5eRaARtt9seyg+DCILIDFTjSSxLqC5hGSGuz9NRi5ktsvJMcaE5NgYckplZ\n8vTXVjCRlk34Ne5NcsxU/ECToXF+y73GTP48adVSqtXriVNWv/u7xwGwFN2KzdXCciMpst2hbkF0\nlthLDO2bivejUsptsnsvPwb39txauhWhcAiNbzeq1nIG9wXh9/pR761HsZ3VZybzveTeouLaUfl8\nvYVePOF5QlF0AsDGko248xd38GjRo+gc7MRIeARVBVWKkVy3w40Tz5zA8WeOY9eJXegc6ITdbMdP\n9v4ERXapz6mSj6r7U/b/WGQMh7oOqV5HI9dc/FwWWAsUvVuJ5Qu9lyIWAxKeBEHkBrwWc6lHOjlF\n7M0kamsBeS1fsmY52WIhG/QsxvnlMFq1lGrCVCxA6v/5KNY3NODLp0/jF88kNqExMg+1hj7JSKUR\nkZKAMnIMLph6x3s1j+12uHH8meM48cwJrCtaBwCYjc7i+5e+rzo3j8MjqR1Vmq9SYyKA1Yke8R2R\nbFPrqcVH3/xI81wHJgcwNjuGmfkZfPnfvpxwXCWhqLce18g1F4/Z7GvW/DAolQ8fCIIgxFCqLUEs\ndXKliQ0hJRRi9+bw4cR7YtQCJBP3eCFtR5aYxUm2EOlmbQAAIABJREFU0UovTaXmNZX03XRSnNOp\nyw0ggG50wwknggjCrfJQqB3D6LH1bq9nu1A4hJfaX8KD2Qf46N5H2Fq6FU6rU5L2K76uTV1NmlYy\n79x9B5FoROKFqoXeYxjB6PNAdaAEQSSDajwJ4mEgV5rY5DpLWaDTPRbIJR/MTJGKIFxoEaCnTlBN\nBPngQ0ese1UDGtCi0r1K7RjJmhUZGUc+32w0V0p2X8Tr8ix5+P23fp+SL+diCcCHuSkYQRDaUI0n\nsSyhugQZMXsGxZROIk53NxNvra1MhIrI+WdqOd9jg16uRn0wzwUCeN3nw1t1dQiHFiY90OjzlErN\na6asUPSip05QLQ3WGcu9rkUtDifJvVY7hpGU22TjyOd7qOtQwnbidNKDZw9mpK5Vad2df3/HkOgU\nP1MLfe85RlOnidwm5//uEcsS8vEkiKVOMKie0vmwI45y2pgfnm7xZrTzbDYjqnrvsWwOgSZ37gd5\n9fiMijDqg8mFKgCcDwSwfwGjxdn0RpR7Zy4kSj6TyURQEEEEEMBhHFZNs00WyebHuzZ6TfNYWuit\ntwWAckc5hsKsk7Auv1kkvy+ZumeLde+5oCcIgkgVSrUlCCKz5FJKqzhF1e9n4lOvQPfBmLdlLqTD\nyubgG2xZ9ClpYtDKxYgPJgC8VVeHvtZWeGpr8eXTpxc0NTeXa+LSEcXi8+I+k+mKoNd9PuEDgvUN\nDZIPCMTHqyqo0tXARw0j9bZuuxtt/Zm3GMnmBxIEQRCLhZ5UW4p4EgSRWXhKK8BE6GKqHXGK6pEj\nxkSw0c6suZAOK5uDs1FlSgEAJwGEAewAcBRAExbOW1RMEIYaETncbkNRy33BoCGhyslELelipUTq\nQRzZ0xvN48i7oaoJJyMCK1kkW3w8w42NzgVwsvckwnNh7PDswNEDR5OeqziaCCArkcV0rj1BEMRS\nhiKexJKjvb0dPp9vsadBqFFXx+ooa2sXxZNTQrLOsiIUnymjnVl1HiuryOagOiUf4tFcgEV0B2Es\nwrvMSRaB04I/T+l0kVUjU9GydBrF6D0vIxHfZJHsdK6jeA565pEJ+D3qGe+Bt8CLInsRyveVo9fR\nCyeciLwVQVtfGzwODza6N6LIVqR5L+nvHpFp6JkiMg1FPAmCWHhyqeaUe4OmtC+Mia90jpUpZHNQ\nnZLYmrAaTFzHoqPkvckwWkuqRDZq4lKNlsktTdKpE9R7XkoRX7VIcrJIdjrXUezDWV1avSCRZ/E9\n6nvQBwAoP1+Oof2sXrR+Xz0azjeg/0E/Ou92AqDIJ0EQDwcU8SQIgnjYCAF4CUAUQDOYyF6i3pvZ\nslcxWkuaCfScS6qRSr2WJplEKVKZTiQ51Tm81P4SoogmTQtOF3EkOhKNoO12G2xmGyLzERTbilH9\nzWp0FHagFrU4jdNww032JARBLCvIx5MgiMyRS02DlhpGO+QSulloIZNN9JxLqmmndahDK1olwidb\nJEsHXsxmT3pINZVZqeHSn8b+hK7hLgCAf70ftv02SWffbKRiEwRBLBbk40ksS8h7apFI4oO5JAkE\nWBfYujq0v/FGWvtDyx+SW4a0gonQ5YxBX850yURKbDLEvo56vRyN/o7ix/jbfdcwmZ/8XJJ5VCbz\nLA0iiAY0ZF10Asm9PfcFg1jf0JCTohPQ50uqhLzhUsv+FpTmlQrLjuw5gha0SK69Ef9W+rtHZBp6\npojFgIQnQRD6yIWurZlELKR/+MP09tcS4kY75C4gRvSzLhZYZGdbyKQqRFI5xgePDOP4X1WlfC7c\ns7SvtRXnZc+kG+4E4ZMtknXz5bWciyk6AwjABx/qUIeQ7NORVDsRB/cF0bC+QZIyq7SMIAjiYYaE\nJ7HkoC5si0QwyMwgF7tTbaYQCWnfiRNp7a8pxINgnWJ1+FQuNBkPZC+wyM62kElFiBj9HSU+xq+b\nPkr5XLId/dVLrguubnSjAx1oRSsCsk9HUp17U1cTBicH0fh2oxAZNxLR1IL+7hGZhp4pYjGgGk+C\nIB5O0rU/yaB9SiYa5KRagptx9xuNJkXZagaUjHSOuRB1eJk6hpGGSJmyZVmKZKPe1Yh1DEEQxHKE\najyJZQnVJRC6SZZH6nazL78f7Tt3Gs8z5V4lGRBOqimSBvJgU41cZjyQzW1oVMZKlg6aLdI5ZipR\nK6O/ozIVGTMS/V2IFOJcJRv1rqmm6OqF/u4RmYaeKWIxIOFJEMTyYNM5wH0ZKH8f6L3PlmmpMb7+\nvfcWtWGSaoqkATWZagluBvWzLvSmg6bS1CfdYz5MZFso5TJ6612NPIO5nl5MEASRC1CqLUEQywP3\nZeB+Nfu+6h3g1ue180i11i+QhYxqiqSBPNgMZv5mFb3poJlMXVwMT85ch6w8tKH0WYIgCP2QjydB\nEA8P5e8Dw08AzmvA9SrAW6ytxrTW+3ws4giwfFS3e2G9TJeKmswCdW/VobWvFbWeWooiEYsCPYME\nQRD6oRpPYllCdQmEIhcfY5HOr/7fwMF6Fi0EkueRxvJM2y9fVl7P81c9HqC/Hzh2bGG9TBc6DzaH\nWMqpi/Q7anmQS88gPVNEpqFnilgMUhaeJpOpwWQyXTOZTHMmk2l7JidFEMRDRKaMJL3FLL32zg1t\ncaj3mLzzzsaNQGcnMDrKli8XL9McJp2GO+cCAbzu8+GtujqEM2JOSjyMZNIOhSAIgkgj1dZkMm0C\nMA/gRwD+92g0+qHKdpRqSxBEHHndpN8fT2etqABu3EgvwqenLlKeQtuiUbvFx6ypAdasAZqbtee4\nQPWhRBxum3Lv6lXMxD4kWN/QgP1a93eRWAxrmUUnAKAbzO81iJzztSUIgiBSQ0+qrTXVwaPR6O/5\nQQiCWMIstEDinVr5sXk6KwAMDLBl6QiFYFC7LtJoC1i1MZNdO/l55qj4WYqoCTZum8LJ9S624vme\nDwQUBfKy89vsBsBvUQDMeocgCIJ4KKAaT2LJQXUJKqSaspqqAWSy4x88qD4XuegLBlmkU7wsHfTU\nRQaDwLp1gMMBNDai/Y03Uhsz2bVL1d+E0ETNl5PbppRWV8Pr9+PLp08vShRR7+8oPTYvy85vk3/O\nVAuAfix0Q3/3iExDzxSxGCSNeJpMptMAKhRW/Z/RaPSk3oO8+OKLWLt2LQDA7XajuroaPp8PQPzB\np9f0Wu/ry5cv59R8cuZ1dzfaY9ETXyzCpmv/qSn4AKC2Fu0vvAC0t6d2/JMn0T4wwF6XlQEjI2gH\nAL8fvth27e3twHe+A5/LBRw+LDT18X3pS0BrK9rn54ELF+B77rnsX681a4TrhclJ4LnnjI83NcVe\nx8Rle3s78MMfwjcxAdhsaH/kEWB6Gr7GRiAYjJ9vLjwvS/g1F2wDjz2GtS+8AI71O9/B+OQkDp44\nAYfbveDz+4fnnsNEXx8sDgeimzbhnStXYHE48B9PnVKcj575Tl2fAkqZ3+YL8y+gPdWfz1x5/R3A\n5/IBh4H2yzkwH3pNrx/S15fp7xG9TvP15cuXEYoFFz799FPoIW07FZPJdBZU40kQi48Bz0cJmbLs\nKC2NN99ZsQIYHIzPpakpeTqvz2es5jITpHq9xChdO/G5eDzA8DD7fqHO6yEgV305X/f5hNRZR3k5\nwkNDANKrMyW/TYIgCGIpsJB2KlToSRCLDe/AalREpWrZEQgAK1cywXngALBtG1teXQ289550Llrp\nvIuRlprq9RLDr11TU/xa/O53bF1tLbsW/HtKt9WNVldah9uN/S0tOSU6AWnqbNnjjwvfp1NnSp1V\nCYIgiOVCOl1tvwbgHwF4ANwHcCkajT6rsB1FPImM0i5KNSMWGHEznbExZjHC8fsBm005cqoVXcxU\n1DVF0nqmeOOg+/fjy6qqgI8+iq9fpPNaqogjh7nclVYOj8TOv/AC9u7enZNRWWJpQn/3iExDzxSR\nabLd1fY4gOOp7k8QxBJg0ybg5k0gGgWeegqYnY2LzQpR+XdNDXDkiLq40uo0yyOHegkEgJMngXAY\n2LEDOHpUeVx511mtlF+dh5YM0d0tFZ01NcCZM/Gxl4hoyiUSmu4EkLYFx0JYl/BIbHt7u/D9YqN1\n3g+lpQtBEASxKKRd46l5AIp4EsTSxe2WiiqbDYhEWArppk3Ab34DWK0stdbrTdxfrNLKy4He3tRF\nX7Joq1r9pLx2dHBQu5ZUw14moRx1IhbNdbuBz38eeO21hYtuZkCQsXGk53yuqWlRxUhCDacPcQuO\nBqRkwZHrUdRkAvBcIIDekycxFw7Ds2MHDhw9qvueaJ13rl8XgiAIYmmQ1YgnQRDLlE2bmJ+mzQaY\nRWXgBQXAgwfs+7VrgTt3gHv32Otdu4AbN9TtRuRs3qy8vRrydFZ5tFVWQye8ib92DfsAOHiNZWMj\n20Ct5lJ+HAX/zcRyVI1objbJlCeizHM0NDio6S+pRTqRNIfbDbvbjVN+P9vfFoQD7rQsOPRYlywm\nSp6e/Breu3oVM7HGXf1tbYbuidZ5q61fdv6hBEEQxKKTqeZCBLFg8JbORJYYGGDCa3iY+VxWVgKr\nV7PIJhBPq+UKjO+TrGmQ0jGMeIaK01lLSoB33wXq61ldqTitNYbg8zg8jPMOB3DsGNtGpaGQ8EzJ\nj6PwRj1hiFSbM2WCTHkiytR0JkSamtdmSvu7AizSeRopR3X3BYNY39CwIN6een5HyRsoKV1zfg24\n6ASYR6mRe6J13mrrl51/6BKH/u4RmYaeKWIxoIgnQTwsJEshFa/jAtPpZALP65Xml65ZExdxmzcz\nEcnDf/JjBIPAI48AMzNsX7MZmJ833uWVC6OSEvb1+OMsInvxoqLgE97EA9gTDgOHDsXFoVKk6Ic/\nBP7rfwWuXYsf59IlxbGNlqNK0EjjNUwQLNJ5GMqCTG8qrqwGd18wmHZjnHTFq2T/I4dTTyOOsRg1\nl8mivvIIp9I159egrKYGzpUrYbbZ4GtuNhw9TnbeauudVnbsWk8tDu/JvQgxQRAEsQSJRqNZ/WKH\nIIglzssvR6N790ajzz4bjY6OLvz+mWDv3miUtQmKRhsapOsqKuLrDhyIRquqotFPP42vf/ZZtq62\nVjr/0VE21ugoO8fi4sRjfPppNFpZGY3W1bHv+fZKbNzIxvB4pMfnx3nhhWjUYokfo6pKcZjp0dHo\n6YqK6LTSnJWOI742VVXZu0fJ7kFWjheN/zZegMOJmR4djZ5uaIhOp3gt090/F/j13r3RHwHRHwHR\n07L7/eazz0Z/BER/WVureo4LfQ06Xn45+uu9e6NvPvtsdODup9GG0w3R0emle/0JgiCIhSOm+ZLq\nQmouRBB6SOgoYzByku7+mSCZpUlpKcDT+errgRMnpPvqsTsRn2NJCeuGqxWZkUcA166Np7pWVQG3\nbqkfw2IBenpYRFYpksjnnP9ToNchjfqJmyZVVQFbt8avzZYt6k2Q9EQsk22jZSuTaeoAtIKl4hpN\nU00zOqsW7XuYuqi+VVeHvtZWeGprE1JZExoo5QDUaIggCIJIFT3NhajGk1hyLEpdQmJHmYXd3wiB\nABNodXVMfHFU6hsBMEsSgHWrLS5O3F9cx6g2fk9P/PstW/TN6+RJJiRbW4GXXmLpswC7XhcuJO4j\nPkZBQfx73hyntTVeO8rn3OtgDXhawVJPgYTjtH/nO/Fr09ubOFay48hJtk2ye5ANgki9NlLPuSZB\nrcYz3drPdJDXVWYL/jtKrX7yXCCAU34/ZiYmMjrPdM9PnN5syc9fkGtF6IPq8YhMQ88UsRhQjSdB\n6EHLhzLb+2uhZjUi7sra1MTsRBobEyNYR4/GooP5wK9/HY8GirvP8mNcvRqPjm7YADzxBBvP6wX6\n+tjyzk7gsceY0ObHOnmS1YMCwIsvsqhqOByfwzvvAG+/DXz5y8Du3axT7vAw8w7l5yI+xtgY2+7W\nreTCXqkBz8WLwO7dOLd7N0IHD+L61BSePHWKiYOkY+n4ACHZNmkViKaAG6l3uk3zwxK1Gs/F7C6r\n1Dk2E8ijuBy1+kmteYjX/3zDBpQ/8QTyy8sx3tsriRTLj5vu+YnrTE/5/Vm5VgRBEMTDC6XaEsRy\nQJyCWlERb/gjjqzJ033d7sRUSvE2HL6t2GZETkMD8NvfxkWh1RoXjGVlTNDydQBw4ABLq5WP6fEw\nISv36eSpu1u3xsfJywN+/3smRg8eZJG5xx9nIlosqkNgkc5LTwBDn8SbEnm9yqmFydKKldbJU1L5\nssWwV8kketKrk6CWSprNFFOtNN5kqa/pYDRFVWsefL3V5cJsLCrq8HgQHh6WHEN+3JmJiYydX7au\nFUEQBLE8oVRbglguqKW3csTRqXffVU7nFG+Tn89EnzyVkm/DO9vyaNfJk3GBaLFIj81tR7ze+DIu\nOgFgZEQqOgHWPVZsXQKwjrfDw2w+4pRaiwVob2fnIj7GF78Yf93bCwwNAW1tiWmhPOo39EncJmb3\nbnaaPPrmcmHP6Ci7tsnsUZTWyVNSNexVFirdMxm65pCmTQyP9skFi9ryTKCVxptfXg6zw4GRK1cQ\nXLcObxw4IJx/OvdFLYrLx3xt9Wqc2L1bGFuvxckju3YJ43qqqxOOwY/r8Hgw0d+P+UgEXr8/I0JR\nj/1MLjzLBEEQxNKBhCex5Hgo6xK06u3EtYNer7JgEG/T2yv1q8zPB1auZFHLFSuADz4A1q1jPp6N\njUw8cubm4t8XF8dtR3p79Z1Lfj5Lq+Uit6aGRUXn59lru52dAxe/c3PA/v1MdOfns2W1tcBrr8XH\nFAvm06dZRFX+Rnh6Ov691wtwAVBeDtfEBBxKolUPBlNSF7PGMZfmkA200njHe3sxHw4jGokgEgqh\nv61NOP+k1yQAwAfWrElBX8lFGv8dxcd80NeHwc5OYWwuvruamhSFG1+//+hRYVzx91wI8uMWb9yI\nwc5O9Le1wWKzZUTU6/mAYLk+R7nIQ/l3j8gq9EwRiwEJT4JYCmiJGz3RKfE2fDy7HZicBP7lX1h6\nbijE6kD/6q+YX2dnJxO78nR5sxlwudjy2lomOsXRSCXsdpYGvHIlS4k9c4bNpayMiU++jcMBdHXF\no6ZmM4tmtrYykVtRARw7Jj3XYJCl6c7OsnNQEpGxiBEAdl5cANTWwp7s2mphsGHQYtY4pjqHpRLZ\n0orS8fPm2AoLsfOVVyTrFK9JNxIbVIlQE2l8TFtxseLYWsJNPK7SMRxuN+xuN0LXrwNgfp/yuWfz\n3uXCs0wQBEEsHajGkyAyRZr2E0nHtNlYF9fmZmlt4cmTrEHPjh3x2kaleciXfe97wC9+wYSaOILJ\nsdlYNHN4mAm6SESaFnvlCvCFL0iXlZTEmw6p0dDAmgpFItLlK1YATz7Jjieu7RRjscTnqmRJw61K\nACYyz55VtjKRr1erZczG/URu2GgYncNSsNlIVt/J11lsNpjtdtz97W8xE3tW8yoq8Gc3bgCA+jVJ\n0ZaGX+edr7yCrkOHEsbORB2l+N546+vxjMgK6VwggJ6WFkRiP6da986o1U0uPMsEQRBEbqCnxpOE\nJ0Fkimx4dSYbU94IaN06FqUUd53l+8jHOX8+3mE2GZWVbFwuBk0m4PJlYNs21txH3JVWiVWrWAQ1\nEmFRzTNngPJyaQ0op6EBmJhg4pA3JyoqYo2GLBb2/eiougdmKMQsWaJRJtB37WLnyJsJFRdL12u9\nUc4F79UcIZcbzXCxdO/qVUFMygWWWhMejqaY5g2qDsO4LU0SMiHckt0b8XnDYkHl00/jwNGjqsda\nCh8wEARBELkJNRciliU5W5eQDa9OPdYeABN1lZVMKHHR6fEA/f0s0sd9K/k4WoKR87nPMcHHx/v8\n54H//J+ZyBOnrirhcrHj8OjmypVM7D31FHv9mc+wSKd4XsEgE7o7drCU2vPnmVCdm2PnVVWlntLq\ndgPHj7OIqtvNRKe4mZB8PSA0bWrfuTOxJnQhvVdzHD2NZhYLnq7KRadS2qfcnzIyNgaT3a66fQK8\nQZXo1M8FAvjpypVoLi3Fm6ImRYD+31GZaLSU7N5IUovn5iQ1rUrkYursUknzzjY5+3ePWLLQM0Us\nBiQ8CSJTGKz1S3vMYBCorwf8fhZJ5AKxpoYt37gxXqPpcknH2bFD/ZguV3wcHnHMz2ciko+3eTNQ\nWJh87hMT0qZEnBMn2FwuXAA+/pgJzT/9CVi/nonRkRFW4zkwwIRvzEICRUVsH73Xlottp5PtpwRv\n2vTee4k1odm4n6mi1dU4y8d0AFnrRJsuXCyVVlerdnQVi7Px3l7c7exEdGYGBVVVKYvpUHc3pgYG\nMDM6itttbfjl9u2CQJqJWaAsBGLxKhdp+4JBOMrLhW3tJSVJBWUufsBADYwIgiCWD5RqSxALQZbq\nBYVxe3pYWuuVK6xxT2kpizS2tbGI3ZYtrAGQ08kiiP/2b6xhj/xnc/VqlhobDrOmPkC826wSNhtb\nz2svS0uBe/fY99XVzHtzbIy9NplYumttLYvO8vnIPTs54ppOjpGU195eFum8cIE1PlK6B7zuUy19\nN1dYjLTfJZJqLE9XPRcIoPfkScyFw/Ds2JGQWpqptGE+DsCa+licTgzGnuNkaao8NXispweFXi9s\nRUXYFwyiq6nJUH2lEkqpsnye9pISfOPSJRRqNQHLMXI5zZsgCIKIQzWeBJErZPpNPBdR4npOJXh9\n43e/Gz++xxOPIsrhtZVGsNuBmRkm2jZuZOI3P599TUzEhaeY8nImfAGWUiuvNzWZWPMicQ1raSmL\ntBYVGRPvSteK3wO1xkK5xmII5KUiymVI6hqRKAL11lUqNdoRL9vz6qt453vfA0wm+I4cwduNjboE\nknx+fI6Tg4OK9ZVGGv6IRVrJli0Y7+2FxWaDtaAAvubmJSnatO6X0YZIBEEQRHYg4UksS9rb2+Hz\n+RZ7GsaimJl+Ey9vLKQUHeRUVbH/+/qYaKupie9rVGh+5jMsPVa8j9vN0m7v31cWmXK2bWPCt7+f\nRUDPnQP+5m+AN99kUVqLBfjwQ9Yo6cUX2TKbTdrx1oh4l18rhXvQ/txz8E1MZD4inSkWQyAvsihP\nVVCII5Gl1dX4ytmzhsRIsmZFyZrvcIFkyc/HO1euoKayUlGwRiMR3G5rg62oCJGxMUGoqglXpWOq\nXRuxSDvl9z8UjYIeloZIOfN3j1g20DNFZBo9wtO6UJMhiGUHrw8E2Bv0ZG94gkFjb+LVRC1ffu0a\ne22N/Qi7XEwoOJ1MqPGGPvn5LNV00yb2emyMRSi9XuDWLePRzU8+SdwnFFKuOzSZElN5AVbTWVjI\nhOf9+8AzzwA3brDvt2wBtm5lDYzKy+Pn1NwMNDay/fU0+xFfP17rWV0NrF0LHDmSeA/6+liklu+b\n7TevRlOvuQfrQqJxzGxHmnhtH8BsTvQKin3BINpj3YvVonzJ5i4+LgDYioo0vT7F6b2IRnEvFELf\nlSv45fbtcK1ZIxGx3vp6rG9okFisdDU1ITI2hvyKChw4dkwiVkdjP+viY6pdG17vmWyuelhKUcRc\nbIhEEARBqBCNRrP6xQ5BEMuQZ5+NRoFotLY2Gh0dTVz/8svR6N69bDul9cnYu5eNDUSjDQ3xsUpK\n4svlX3Z7/PuKimi0sjIa/fRTNp7JFF9nNkejFov6OJn4qqqKRgsLE5d7PNHoU09Fow6HdHlDQ+J5\nl5dL14+Oxv/Xus7icerrlfczci8zjfz+LkF+vXdv9EdA9EdA9HQWzuHNZ5+N/giI/rK2Njpt8J50\nvPxy9F8qKqJHSkqiJ/fvT9g/2dz5cX9kNidsMz06Gj3d0JB0PP71E5cr+k/FxZJlaueiNB/xsp9V\nVUn203Nt1Oaqh2zf20ySznkSBEEQmSOm+ZLqQop4EkSqaEUx5RFRt1t/lKunh/1vsbBmP/39yg14\nOG43iwTyZkI8lXTTJlY/KY48JmsWlAm4X+eGDcD4OFu2cyezU/ntbxPPw2pl5+nzxSO5tbVs/vx8\n+DUWR72Uajd5tFJshaLHs9NoRDpdloFVS7YjTfuCwZRrMXnHWQCChYg4Ypps7vuCQfx8wwaEY3XQ\nJosF06OjCIdCQkRRfswx/vPKMZsxK+psW1ZTA9eaNaoRWKX5iJd9+fRpSfOhPa++KkRL1a6NOPpp\nlGSR3VQjofJ9M9FMCUjvPAmCIIgFRkuZpvsFingSGebs2bOLOwG9kUweReNRPnG0ct06NkZVFVsn\nH+upp5SjmTU1iVFEkyka3bEjGt2/n0X3xOPYbOlFLs1m9XVWq/Jyj4fN4dNPpVHYhgb1iK04ullV\nxfZXi3ByxFFDebRydJRdY6Vrq0DCM5VOtFoPWue2BMiFSFPHyy9Looo8OidELYHo0erqhDlqzV3Y\n32JRjPyJI4L/XFER/dXOncLr/89uj/5vJpPw+qeVlZrXSGk+8mXZikJ2vPxy9Nd790bffPZZ4Vh6\nIrtKc1AaS23fpRRVzQUW/e8eseygZ4rINNAR8SQfT4IwCo9ktrYmej+K4T6Q3E+TR+W4nUhHB6st\n5N6Y4rG4JydnZoY1CTpzJl7XyYlGgQ8+YNHBq1eZr+fq1cxKhNd6pkqy6KhafejwMIt2fvvbrDMt\nEI/sKfmHFhay2k6+3Ucfsagj//L7lf0redSwupptI24Y1NTEbF2Urq0WPGqq5x6nCo/eLnLtnNz3\n0Qhi/8jFItTdjcj9+wCkHpX7gkF4/X546+sTmgudCwRwyu9P6rXJ/SxXPf00gMTI37gowjk9MICJ\n3l5hDmU1NZIMg+INGzTPw+F2w+5245TfL9wL+fXNVoRZySdT7d5qzSGZ56Z8X6rNJAiCeAjRUqbp\nfoEinsRyQ289II+aiaOGxcUsMrl/P3vNay1raqLRF16IR9k+/ZRFL/Py4tvt3cu2KS6W7iv+2rEj\nvQhnJr7E8yovj0b9/vh1euGFaLSsLDFa6vcrRwArKuLb1NdL1yWLGoqjoSUlxiKL6ey7hFCLFi4l\neGTySElJdIzXM2sgjrQ1ezyK0TmOOPInjuZCKrKxAAAgAElEQVSJI5z82Hw7cbRVHBXVinpqRQCz\nFWE2UkurN1KsNJZ831yImBMEQRCZAzoinmSnQhBG0WszIbfxEOP3s26z3E+zvp6NK/f6fOQRVuPJ\nsdniUUwlKxTuqcntVZLZrKSLy8W65g4Nsbns3s3sUTo6pNFJsfWJ0jXZto1FLXt7E+tfS0vjkWK/\nHzh+XN/cuH1NSQlw6RLr4quXdPZdQohtKOwlJXj+5s2Uo5eZ6IJqdIxzgQBGr1/HWE8P/O++i0KN\n+yTuEhseHobV5RLqMPMrKvCtGzeSHlN8vfIrKjA1MAB7SQmqnnkGk3fuwOp0Ir+8HPd7ejD0/vuI\nzsxI9tey+hB7cCbzAc0Ecj9SrXrRhRqLIAiCWLrosVOhVFtiydHe3r64E9CbJslTQS0W6fKaGmbp\n8cQT7DVvgCNuOJOfz0TavXvSfbnoNJmUU13n59k6LjazJTpNJpY2+/77TFgODbH02lAIMIt+rRQU\nMOHIhai8CQvA7FV6e5VTW/Pz2f+FhcDf/33ivoEAu07yVFye5nzzpi7hKDxTgQCznKmoWNaiE4in\nPtpLSvCNS5fSEgrJUiy14Om+N48dSxgjWSpwqLsbdzs7MTUwgK5Dh3TPMTw8jIKqKjyya5ewbmpg\nQHPe/HpZXS64N26Et74ez9+8ick7d4R5/+Ff/xWDnZ34/cwMnJWVMDscAKSWLGrw9F7eSCjVFGgl\n5NdRfL9+vmEDpvmHOykgHqvr0KFFT79eriz63z1i2UHPFLEYkPAkiGzBxc+HH7JIJGfNGiZa+Xpe\nmyh+zYWYWh2lWhbB7Kz6OjW4f6URolE2v8ceA155Jd6xt6ODiWWnkwnuBw9Y7WkgEBd1YqxW4B/+\nQb3LKxfO4+PA976XOA+1etvYhwPnjL6B7+5mdaEDA4AOMbOU4ULn+Zs3NaOFWqRTr8eFC/e5FI/R\ne/KkIGraX3oprWOKt//mRx9h/9GjyK+oUBxDSfDuCwbh8HgwOzGBOx0dGHjnHbzd2Agz94kFEI19\nMFT82GNouHYN5bW1AIDI2JimOBbXVeoR8mqiXGm5fDx+LficeeffVKBaTYIgCEIvJDyJJYfP51vs\nKeiDR0a3bQP27WPLeHRTvJ5HB8Sv+RvDmhqWbqqF1crScI1gsQD/7t8BzzxjbD8xMzMsxTYQYFYp\nAItObt0aF40lJSxy2dKSKDxnZ5nAk4tw8fgck0L2hoYtid5InPBMLQObE72k2hxITZTxaJ3R8bhw\nKa2uhtfvl4wxFw7HN5R9oKJ1TPk85ds73G5868YNxTHUGu5Y8/KEbcJDQ+hrbYXN5YJJ9LPnrKzE\nf+rqgsPtxnis6ZCtqAiwWJJ+CMLn+7PVqzES+zCotLpaVcypPdtKy+XicF8wKIhureMoXUsx6dx7\nQj9L5u8esWSgZ4pYDEh4EsRCoCasxIjTRouLgfJyoKwM2L6drd+4Mb5tYaF03y99KS6a9GC1srTX\nO3dYdM8oXAQ6nUzw/tM/xUXi+DiL2ALxOsneXiDWfVQ4PpDo0Sm/NrwLLk9PlqNxXQ1HY/Tcp4cc\nI11QAe3OuVy4fOXsWTxz/LhkDE/s/pdWV8NeXCwZR0s4y+fZ1dSEycFBvN3YKMzDSPfWc4EAwvIP\nTiwWRCYmUPH5zwNgfp0N164J4/FIcmRsDLfb2pJ+CMLnO9nXh0hsfoVr10rm9otNm3DE7cY/l5eD\nfwwjf7aV5q4mutU6/2pdSzG50N2YIAiCWBpQcyFiydHe3r60P6kLBFhKp7yRzsqVcRFYVgaMjLDv\n6+uZTUplJYscfvIJS2HljYkA1njnvfeA/n59c9i5k0VSOzqAyUlj83e7gS9+EXjjDeDJJ5mwFL8h\nt1qZcB4bAz7/eeDECaCxkaXDihsiVVXFrVPU0NvISYVwKITzgYBms5OMPFNq93UByUSTHy2MNsER\nN+XRarAjR3z/Tvn9muOIz38+lkLK56lnf6Xj8vMTn4ccl9eLyIMHsNjtcK1bh99HIti5aRN6T57E\nzOgoSqurke/x4LZoPvLrxq+rragIkbExxe2OuN2CfYyzshIVTz2FPYcPo6upKasNfhay8RGhzJL/\nu0fkHPRMEZlGT3Mha7KVBEGkiZIY4XWJAItmrlnD1k9Px/fjzT6sVuBv/xb47nfj+4g72wIsZfbU\nKUCclqhFV1d8fD3w7rglJezr+PF4nac8xXd2Ni6aOzqAF19k5x4IsPNqa2ORTj1RRR4JTREejVkQ\nxPeVe4EuMDwyBQDnA4G0z11JyO4LBnWJeY44AmfJz8frPp9uYSy+f3qi1+Lz9/r9WN/QIMxTvr/8\n3MTibV8wmHDtrCoZBVaXCzP372MmFqWc7O/HEIBP3ntP2KZw7Vr4jhwRrpv8WOLruvOVVwTh2NXU\nhN6TJzEXDqN8xw6YYo3KLE4n6t95R4iois/7V088IdSWZgqj95wgCIIglKCIJ0Gkgt7oltg+pKGB\nbXfsGBNgSnYoAEujjUYBbnBvtwNFRdIIZyawWlkHWpntAwAgL48dl0cyy8pYLWdzM7B2rTRt9sAB\ndo5K4wCsM+zatSy1d9Uqlnb77rvGO8bmQEQxKdyGRa+ozgKZjkylE63kGI1aJhvnl9u3w1lZCXtR\nkaJwVTp/LjDvf/IJ5sNhWBwOFK5bh9Hr14WGRo7yckRnZ4XXJquV+Y2Zzfj6xYso27YN4VAIv9i8\nGdOxrAT3Zz+LqTt3EOYfsqhhsaDy6adRUFmJ8d5eWJ1ODH/wAaZjNkkurxeutWsFOxa+zb5gUHK9\nAGB1XR3uXb2Kr164IGkIxc9bbBGT6v0iCIIgiFTQE/Ek4UkQRuHRLC6+xD6VcrgY8XhY1HB4WJ/F\nicnExCf/P1uYzcyCRUxBAUuhjUSknpvl5Uz4bdgQF8EWC/PztFji1i9btjB7laGh+DqxUAWSXzM1\n5CI+195Up5kWrIaR9NlwKIRfxcSZTUWcGUGPkFWbn9Jy8XglW7ZIRJaeeWoJYaUU2Z84nZibmlId\nUyzWABYRHf7wQ+HnwpKXhw1/8RcIdXfDbLPBYrfDbLPB19yMtxsb0dfaitLqajy4dStBhJosFkRj\nP++O8nKEh4bYcptN6IDrKCsT9hNv4ygvR2RsDPOxTIbSbdvwlY4OxevEz3t6dFSSXkzRSYIgCGKh\nIB9PYlmyKN5TgQCrwSwtlYpOi0XqUynfh3tCPvoocPductHpcsW/56JzxYr4cZKxebOx8+FjykWn\n2Ry3QCkokK4bGmLndPEiqze129n53L/PRGdFBas17exkArW8nEVt+bUqLmb/p9oxNosdZzPyTOn1\ndzWIWmMXpaY9DrcbBWvW4G5np2YnX62mP4C+jqVGuquKxxvv7ZWs1zMftXRbvu/bjY0J6aBzski8\nragIAJjHpsWC2ViKO++qW7Jli+TnwrNjB+5dv46Bjg70t7XBVlCAZ06cENJjC9etg62gQNJ1+Q9O\nJ1bX1WHl008L8y17/HHh+0dizYhKq6vhqalJ2MbqciE8NCSITgAoXLdOiOAq3ff9LS0oiHmH3v/D\nH3C6oUFYr+fayklln6XCUjw38lwkMg09U8RiQMKTIPTQ3c0a/4yOSkXn3Fzcp1JpH+4JKar3SsBu\nZ+mqzz0nXR6NxlNxuWC125VF6I0bxs7HYmHpu0C8ztPlkgrRU6ek+xQVMcHn9QK3bycK0127WO2n\n282+HA62vLCQRX6vXEmvY+xD2nFWTWypCT69nXz1WM3o6Viqdryxnh4ATOjtfOWVhPHk++mZj5oQ\n1uq6CgDmvDysrqvDN69exfqGBiY85+aA2VlY8vKErrrcAoVzt7MTY598IpkrFy7Htm7F1MgI7nZ2\nIjw8DJPdjrwVK+D7yU9QsGoVZqemkFdRgQPHjuHA0aMoXLcOD27dwr0rV5C3YgXcmzYhIttmfUMD\nVuzaJZmDvaQEvuZmnAsE8HFzs6q36XhvL+bDYURCIfS3taFl82aEQyHdtkJiUtlnqbCcz40gCCKX\nIeFJLDkWpQubuLHI1q2s02wsmpEQgeO2KNeuxZclS5edmWEi7s4d6fKaGvYlJhLRl6qrhLgJ0Nxc\nvIGRy8UaBonSDYVtOEVFTDz6/ez/UChudQIwr1K53QmvQRsfZ+fn9TLBuHkzixwfOBBPT+U2Msmi\nD1mKKAK57WemJrbUBJ9eX0XDVjMa8yvZsgUtmzejubQUbxw4gIJVqwAwK5GuQ4c0z0vPfMTCVRy1\nssSebaV9v/7BByioqsKf/f73ePbNN1Ho9WJ/SwssdjsAlg5b+vjjgs2KUhOhsscfl8yVC5cHfX2Y\nFXV0js7MYHpwEJaf/xyh7m4MdnZiemAAXYcOCdHoqbt3MRMKYXpwELfffluyDbd8MQFwxLIdzHY7\nih97DG83NmL0+nUhRZcdUPp7RT73qYEBnA8EUrrXmXo+cpGleG65/DuKWJrQM0UsBlTjSRB6CIWA\nl15ib/Sam5n4UavpE9cickwmZi3yySdArKmIhKoqZoUijjiaTCwyqdSAKBWS1Ys6HKwrrrzhkdnM\nROLFiyyiye1e/H4mNF98kY175EiiIFRqtiO/NuXl7HhcBOdi7WaOotcqJp39jdSXyu1G8isqMDUw\noLvekM/Hkp+vWvspns/M2BgGOzsBAN76eljs9qT7yml7/nl8+qtfwZKfL1iU8C645wMB/OnUKUFU\neuvrkb9iBULd3Rjv6cHM+LiwjxJevx/DFy/iQV8fYLFg5e7d+NKJE0JNKMDSaedmZhCdmYGtuBjf\nvHIFZw8elHTlvd3WJqk/zVuxQmhK5P7sZ1F//rzkHMOhENpfegl333kH04ODcHg8KN64Edb8fNhc\nLviOHNH9rKT7fOUyy/ncCIIgFgtqLkQsS3Lee4oLLpcrMYro97N1cusTLvz0UlwMrF4N/O536c/X\nbGbNhPr6WK3m+HjiNuvWAX/8Y/y11cpE5NGj6hFIJWHOrw3AoqAPHsS3V+sGuwDdbHP+mVokjHS1\n5Y2DAFa7+MyJEyn5SSY7pnhdXkUFpmXCVu98zwUC6GlpkYhHuUB+48AB9Le1wVJQAEdxMaYGBxHV\n8SFQ6bZtKPrBDzD5d38nCGM+nz2HD6P9xRcxcOFCQiOi9Q0NmJmYkDRzCq5dK5nj6ro6mO12IBqF\nr7k5QZT3njyJ8L17sOTnwxzr3jscs06iLrdLG/odRWQaeqaITEM+ngRhlEyIHLlnJaekhKWsyhv6\nFBayqKER4enzAR98YHxuQLyTLY+Azs+zWtSyMmXRWVLCmgmJhefsLDu3DRuAJ55QvlZKHpzBYDxy\nzJsYVVczuxWlqCmQE/6YDytGUhL3BYOs5lAkivQKHXEk05wkbVY8nwPHjqHr0CFY8vNxyu9nkcjY\nBz1WlwufvvEGjhQXw2y34+sXL+LSD34giZZyQWcrLkbl008L0UA+F4vNBntpKWbu3cOk+AOSGLai\nIkTGxmCy2WArKGAdb/PyMDkwgIvPP49NsVReACirqREE+DMnTkhEOgDAYkF4dBRf+PGPcfLpp2F2\nOPB2YyPMIp9dW3Exwvfvw15UhPzycpzy+yWR3d6TJzEVy0iYjzVUMpnNqteSIAiCIBYaingShBgt\nyw49wpRvY7MBV6+y1NqSEuDSJVbfqGTtoGRrokZNDXDmDGtGJIqoaKJlzbJiBZurON3W7QYuXwa+\n/e14pJIjjlimkiKr134kB/wxH1YynY6rtq0kkrliBR558smEiB4AnD14EH9qbUXZ44/jwNGjCVFO\nNQqqqjA9MiLYqpgdDsyHwzBZrfj6Bx+gbNs2YdufrlwpCDie2spFJsAEoMlqRelnPwtHSQmmh4Zw\nN/ZzaLJaJVFRS34+rE4nympqhPny69qyeTOmBgYklivrGxowOTgonI/JaoXJbMbKvXsRmZwUIqgO\njwfhmKURj2Q2l5YKPqQAizq7N23C7bY2eKqrsV90fIIgCILINGSnQhBG0bLs4NG31lblTrYAcPIk\n26atjY2zbh3ztvz2t1kjoXT53e9YZ13elZajZbnCRSfvNiumpoZ13m1oAP7wB9Y8ye9nUU6vl4ls\nbu1iszEhnZfHXqdqb6K3WZBWN1u9zYkIw+jpamukQ6hWJ14ArDmP3a54zL7f/AbhoSH0t7Wh/cUX\nAQDjse65ptjzb5P/XAB40Ncn8fLkNiXR2Vlc/Ou/lmwb5n60iDcVWl1XB0dZGfJWrEDxpk2YGRnB\nQEcHbre14e4777CNzWaJ6LQVFqJ02zaER0bQ39YmOV+H241v3biB9Q0NEsuVPYcPS65FdHYW8zMz\ncLjdsMfOy1NbC091tWQfACiPNfsy2Wywud3I93iYt+jwMG7Ljp8NlqJFCUEQBLGwkPAklhwZ8Z5S\nEyvl5eyLv+mVb6fHS1KcMtvWBty6xSKTra0sssnhvp1ud/JIpJxIBHjsMfZ/XR378npZ859kIq6m\nhglKufCsq2Odeg8eZDWpxcXAiRNxaxQ+x48/ZgLwc59jacQjI6wpUrajkFoCVc+HARqQn1nqGEnH\nTdaJ15KfD4BFFGGx4KcrV6K5tBQ/W7UKJ3bvxlt1dYLnJgDBN7Mg1j05OjeHgqoqwS7FKvbFTcLA\nhQv42erV+PXu3Xht9WohTRUAzDYb9re0YPLOHYRHRjA9OIiJmN2Kp7YWc+Fw/GdXlLHwMYDI+Dju\nXbkiLLv1m9/gzQMHEA6F8ItNm/AvK1bgj8ePY25qCt76eqG+dF8wiLyKivg1KyhAeHQUe159Veis\nuz9muyKuSXVWVsJRXg6r04lIKITbbW2CpY3V5UJ4dFRTEKYjHsmiJLvQ7ygi09AzRSwGJDyJhxM1\nsdLbCwwNxb055dvp8ZLkNiMFBSzCyaMgJSWsO2xVFbBzZ7zx0NSUMeFpNrNx29rYMVatYqK4s5P9\nz43sucjlgvPMGSYoRbVnOH8eePNNdt5a4o0LQB5Rqq0FPvoo+6mvWhFNPR8GEFlDr31Lsm0dbjfK\ntm8HAETu38fttjZMDQxgZnQUk/39GOzsRF9rq2CBUlZTA3tREV73+TD4298K40zevYtjjz+O6dFR\nWJQi+3LMZoRHRjDZ14e7nZ2sC62IW7/5DX62apUkqln86KOCUNT6uXV/5jPC9/y82l98EZMDA4hG\nIojOzuJuZ6ckwutwu7H6S1+CKXausw8e4HZbG7oOHYLd7cYpvx9vNzYK6c9cLPaePInw0JBQu2p1\nueDeuBGOsjLMTkzoinqmIx6XokUJQRAEsbBQjSfxcKJWNyhf3thovL6Q1y52djKLFIClwX74IfO7\nlB/nD38wliJaUsIijnxOmzfHbU4A4K232PHffBP4/vcTayh7e4Hdu4ELF4Af/ICJ62vXgOFhfZ1l\nX30VOHRIuzbTKGr1s1p1t3prRYkFwUjNpxjecMdTWwuH243b4sZcgGA5wjvl8hpJNRxlZZidnITZ\nbsfc1JQkkmkpLMScqJGWvDZTC0teHsp27EDo+nVWV2mxKPrrmmw2qe8mgILVqzF5545wPJPVCmtB\nAeYjEZisVljsdhQ9+iiGYt1oAcBeUoLnb97EKb8/oWuvvMbV5nLBZLMJ9Z6Ttgo4IwMoqanFV88k\n/3BAfA/0fJAghixKCIIgHm7IToUg1FASK4EAcP060NMDvPsuE2Xi17GUPmFbuUjiy3p62La8FpPj\n9bLurVy8Pf00a85z7x6Liqq8eVWkspKJRbebpc6Ka0erqlh6rx7Eoq6qSj2CqSX+MoHaMai5kCp6\nRF6qQjDVfY1YsIgRCxcAaH/pJfS+8YaQMbC6rg7PvvmmsL28mY4SjrKyuG2JqGkW9xi1FBRgTqFj\nrRgtUWp1OmEpKEB4aEjzHIF4Y6PkB403AjPZbPBs3w5HaSnmIxH0t7UJwrCrqQk3jx1LuA7cambE\nVYsfThzDN3AIE/WHETyhz0uVxCNBEARhFGouRCxLMlKXoFQ32N3NopQDAyyiJ38tRilVly/r62P7\nyQ3mx8fj+xw6BKxZw7rI8je1zzyj3PhHic99Lj73WG2cgOjNuSbiNNVkabPZSmcVp9HGbDQSjqEn\nvTlNlmqti57UyHTSJ1PZVy3lUqt+UNzIyOF245njx7Eq5jFXVlODL772mmR7TyylvWjzZpjz82Er\nKYGJP0MxTOKGW7GfM09tLfzvvov1DQ2oePJJzfOp2L0bXr9f+VwLC1GydWuC6PwYTLACLOXVHEub\nNVmtmJdFQBWJRuGsrIS1oADRSARDXV1CqvH6hgaUbNmCU36/ougsq6nB12Ln9+6u07gHLy7VtuD/\nbU7ebfh1nw9vNzYK9jTUJCi3WKq/o4jchZ4pYjEgH09i+WLUk1Murhobpa/FY167xl57PCyddvXq\neM2mEtXVbFve6VY8Pl//2mvA+vXafp5mM0u1fewxJlwnJ6Xr//qvWS2nEvJrwj1HtdJU9W5nBO7J\nyQV6fT0TmPJjKPmBLkNSiS7qqatLp/YulX33BYOKUTMuYgHgfCCgGAnl12CspweFXi+s+fnw1tcr\nWqscOHoUv9q+HfmlpZjo6UGEC7BYtNBRVobCdesQDoUQnZlBWU0NXGvWwNfcjK6mJtw5fx6zU1OK\n6bBiBi5cQP6KFYp2RLPj47h39ariftHZWZjtdsyKfi8ki5yu9Plw97e/xXw4LEQ0g2vXSra529UF\na34+ImNjgr0LwDrorti1C9aCAsGP1O5242C/HzsrnPifjwXhFnmUyp8x8b0RW7Wo3SeCIAiCSAWK\neBJLDl8sCqIJtzVpbQVi1gtJkUfWlCJtPKo5PMxSUzduZNHNvj7lOk2Xi0Xztm1jTYQqKoBjx+Lj\n+/1McJ09y5bxxkQAi2TyRkDV1ay2E2DdMzs6WG3o/fusu60YU5IsB3mkVq+lid7tdHIuEMDrLS14\n6/59hAF2bs3NGT2GEczB4KJbQaQSXdTT2MdI8x+1fXmETc/14aJHvr0eEcuvAW/2c7utTdVaxeF2\no2DNGtzt7JTUbyIaRUFVFdybNmGoqwvRmRlYnU5YnU7MxbYLdXdjamAAkfv3EY1EYLLZhAilnOjs\nLCb7+1UbCc2Ju+Da7XCUl2MjWKSTd9a1FRezDVQsjyxOJ8xWK/7s44+Fe9XV1CQRl6b8fMzEGiEJ\ny2PjRcbHhSixWEwOd3bAM9CKq4cCkuurZmejZtVCLD66/+4RhE7omSIWAxKexPJFHDlMJsY4bjf7\n8vuZWOTL+Gu5ncpHH8U7vPL/a2rYtqWl7PXEBOs829ubmLbrdjPLkhUr4sf48Y/Z93Y7E6ozM6ye\n8+xZZpciPh/xG+Gysvjxi4rUu8DmSAfYUHc3Bu7fRx+A8zYbcOnSotZu5oIVRCrRRT0+m3q20dp3\nvLfX0PVRup77gkEUrlsHi8OBtxsbFQUsvwbci9Ph8aC/owPNpaV4I2ZFwjkXCMQ72ooEXWl1Nb75\n0UfCGJ7aWpTV1OBurDPuzyorMXL5srC9yWoVOsymhKgue35mBiaTSegkO3PvHqxOJ9ybNiG/ogJ5\n/OdUPsTkJG63teGd//AfsL+lBV1NTehpaZH8jDsKCyXXxl5SgpW7d7Pr5nJhWmaXovQ8cXsVW1ER\ndr7yirCt+MMJJasWgiAIgsgEJDyJJYfuugRe+1hYCPz93+vbRx4R1LJT4a+vXmX/nznDaiy5wCsu\nBl55Jf7a5WJpsuI33eJjHDrExGhBQXw9r+cMBuMNjsSi0+Vix+XHF1ujbN8uFaELUC+pB+FNcUkJ\n9nzyibRxUwYw6kd4fWqKzWcRozzpRCazjVFRrLS9OEKpJmD5NeBenCazGdODg5gZHUW/zA4k1N0d\nj3TOzcFst2N1XR2+cvas4IfpWrcOZocDoY8/Fvabm5oSLEdgMglzNYJadBQApgcHcZU3NAIwGw5j\nqKsLUwMDmB4cTD5G7Oc61N0dnyOYsCzZvBkurxfuzZuRX1GBb1y6hC+dOAFHeTlmJyYSro/S81QY\n+zmLjI2hS1S3Lq+vTfWDCiJ7UD0ekWnomSIWAxKexPJl3Tr2//h4YnMgNd5/n/1vtQL/5b9II4T/\nf3v3HhzVeeZ5/PdKfdENqYUkLMsYGceY4AQb2fgaKGvWJo4xDp148SSe3eCdyqomrtp1qiZ4s5PL\nTtXEtalJpWaSmirXpioLGSfEBmKIMSYuZK7GNg4bcBJDjA22bAxCCCSEuLRuZ/84fY5Ot7p1aZ1W\nq8X3U0WZVp8+5+3Tr4Ueve/zPMXF9mqkN5fzqafsPMtFi+xcz8ceswM8J5A6d86+9tq1Uk2N/drm\nZmnOnNSrqM4P9c6W24YGafVq+++RiF0VN1l3t902xdmm6g1yz57NbGttlrk/FB87prDPQac09hXM\nW7/3vZwHfZP5B/7kIGakwD5dED1SAOvcg2n19bp/3bqEQjzBioqE1yQHjAM9PTr9hz8knKts1iy1\n7d3r5iwOYVmD21a9uyIKhv+ncUyro2kqVYeSPufK+fPVuGaNJM/Kb0WFVFCgvu5undy1S93Hj7tB\n7Kb4DoiahQsl2avDF06ccD+T5Pm0u6lJHYcOSbILELGNFgAw0WingsllrAWBhpNJG46KCsn5QdRp\nL+IU1YlGB9t91NZKhw8nfs2xYoUdDCZf2xlPWdlg8BoMSvfcYz+/Zs3gGJPbvXiLGrW326+74w57\n+27y++vstAsPeSttLlwo3XSTvRrqx72d5MbTjxAj86Nlymg+k5eWLNGJ5mbJGLdCbe3nPqfPx4tn\nPTd3rmKeVUTJbj9SXFOjstmz1b5//8itS5IU19YqMneuTib/f+2nggLNuPtuheO5nwWhkFsUSJJ2\nrFypo88/r8LiYvUOs2I/bfZsldTVqevoUQ309bkBdn00qgc2bkw49tmrr3b7nia3pgEAYLxop4L8\nk6pNSaa820qfeip93qOXU8ynpER67bXEFULvCktrq11YyGnf4OR4OquWqba0Ol/z5mr29trvNxRK\nXcnVCTrXrUssatTWJr30Uupts5GIPffB4sIAACAASURBVA7JXjFdvtw+xrsFN/neetuaTIEWCpls\nWx3r9tx8Nt736qzIhaur1e1ZZRvJWFd1S+vqFK6pkQoKZPX1yerr08ldu7SnqUnhSERfefddlc2a\nlbBt1ert1cUTJ9S2d2/KoNMEg27Rn1Rm3HmnHdiOJi98rJyV1IEBte3dq/Y//EHn3ntPJ3bs0HNz\n5uh8S4sk6XxLiwZisZRBp/NeqxcuVO+FCzq1d68utbaqx3PsqddfV6yzM+Fz7otvJ5fktncZyZX0\n/wQAIPtY8cTkMopVyp07d469Gltj4+DK5IoV6dtztLTY22Zfe21o3mFnp10IyFtFNhCwCwlt22Zv\nd03VbiR5FVeS5s2zg1fJDg63b098nfc1XV32yqZkV389diz9sc5KZvKKqTT8vR3t/Zmidu7cqa5/\n/MeMVvHy0XArlqNp6+KsXHbHA7zk84ylNUy6Y3c3NenounUJuY6SvSW1uqFB51taFCgpUW9Xl045\n/384Cgrs6s/Ofx2FhTIFBWnbpwQjEVV+5jPqbmnRxZMn026TTSdQXq6+ri69K2muJBMKSZalUHm5\nZtx5p/p7euwVXA8TCLhbdwMlJaq+/XZ1vPPO0O3BhYUyxmjGnXeqqLpajWvW6NfXX+/28fS2QZHs\n1dDLZ8+6969oxgxdbmtTVUODlm3fPqrgP9OVbfgvo3/3gGEwp+A3VjyRf7JV/Ga01Vzr66WPPx4a\ndDY12dtq45UlXX199uqjN8cyWaoWJocP2yuR0ejQoDP5NfFKlKqsTF39NdUqcXIuZ1OTHcB627lk\ncn+msPH0u8w3w73X0eTHOiuXIU/lWO95xpJjm+5Yb4GdYHm5CouLFaqsVMlVV+nc0aPua5xKrdMX\nLNC1S5cqVFXlBptOFdnpN99sf72/P2XQaYJBFc2YoYJgUG179+ri8eNu0BmKRNxKsl4F4fCQr/Wd\nP5/w2OrpkdXbq9iZMwqWlmrJ+vUKJ1W2teLXKSwpUaC0VK27dtkBZHzFtaCkxF7l7O+X1denU3v3\nui1mquO54NMXLNCX9+9XcW2tJPvzKKmrc+9fqLJSX3rrLV2/YsWog04p9TxhFRQAkCkCT0wuoyh+\nk9Fv6JyA9qabEtujjJYT3J09a2+vdbbYSnaPzeEClVRBXSRir552dAwWJHI0NdlVciV7NbW+3g4Y\nDxxIXf11NEHjkSND27l4TZJqt7nS2Ng4qavK+m249zqWADzTIkKjud75eEBpAgF9cc8e1dxxh3o6\nOvRJc7O7yjp9wQJF33xT169YoYd37NCDW7YoGK9mHayo0EPNzfZzu3YpEP+6kyvqLSBk9fbqclub\nYt686Pi1p99yi6obGoaMO5hqu258d8/cFO+z4bvfVTgSUc0ddwx5TWFRkR49dEgD3qJF8XMNXLyY\nUMzIWxhoSbz1ycM7dmhafb0ePXzY/Ty649t2TSCgh3fudAs2jWVup/p8J0ProSsRK1PwG3MKuUDg\niSuDE9AOl+c4HG+l2N5e+09dnb1quWPH8MFauqAuXT7rkSN2QCrZqx779qUNGHc3NenFri69XFur\nWKqVzOTxpwtOJ0m121yazFVl/Zbuve5ualJvV5eKa2u1ZMOGEe9FuvOk69mZarUsXfBaGv8li9XX\npwM/+EHKticXT5xQqKJCoUhEr0SjennpUhVfc40kqffcOR34wQ/c8V08ccI+X3+/CouKFHT6YsYD\nSG/epyksVKiyUlZfn1p37VIoEhmywhnztEwZjd899JB7fwuKitxczekLFug/nTypA08/LSctxRlL\nMF58SIWFUiCggnBYJhRy72ny/Q9HIgpFIlo3b54uOO83fv9SGWn1MtXneyXtDAAA+Ct9MzJgkso4\nL8G7ktjQMLYtpWvX2q/v6LDboYylUq4T1HnH4VSolYYGg94gMRIZvF6K8XYeOaLW+OrPnlWr0udg\nOeNPlYMKcl3iOo8ccfMl9w03n1JIztUsnTXLzQ/c09Sk+9etc1fLvF9zgptkqbbxPj9vni47udGy\ne2b+e02NpMEWJ0We7aaLf/Yzd1zeXM/+y5fVf/myJNnVZSMRxeKrqZIdnDqBZvXChWpcs0a/W7Zs\naC5pGk6Op1fJNdeo49ChIeeYdt11Ckci9tbiePAXKC7WNffdp3t+8hO9sHChm7s50Nen9n37Eu6f\n974X19Tow9/+NiEvNlRZOSRAdF5z9o9/dHNEnfOl4r3G4mee0b5Vq9zKxGPJ50Xm+B4FvzGnkAsE\nnrhyeFcSZ80aWwDmBI+pivZIY2sD46x0SnaF2uQA1hskOudOEzAG4tsRqysqtPhHPxp5/JOFn21z\n4JvxrGYlB5WpzjXS+Xc3Nall82b1x2Kquvlm1Uejaly9WvueekqdR46o6rOfVcGtt+r0/v263Nbm\nVrt1FRaq4lOf0lV33qlQRYVeiUbV9sYbGujpGfY9hyIRdZ84oYJQSAM9PQpXV2tafb2MpPIbbtAr\n0ag633039QmSCxilcXrfPhV6tvta/f12UBvv0+td0b18+rQ+2rJFH//udxrwFDgKlJWpr7tbgbIy\nxTo6FOvsTLjv4ZqahKAzWFGhRw4cGBIMel8jjfx5e49P/oVEql8mAACQClVtceXIpK/naHmrwobD\ndkB1223S+vVDr+PjOGKLFmnP3r1aLCk8nmq0Ex0IXuFVdCcrb59NJ9gb7UpWcu9USQk9O3c3Nanj\n0CF1HT2q6JtvalpSvnKqKrZOJdXk6qrtBw+q6/333TzI5ODv+hUrdLGtLSG4SiUYiWggFlO/p9VI\n6cyZKquvd1cmvVVnTTA4WJyooEChigrJGPWcPatQZaWqbr45ff/PggIFy8rUG+8TXDpzpv7jn/7k\n3tdYZ6fWzZvn9tpMJVRZqd7ubncM4epq9Z4/b7eNKSxUqLxcPR0dCkUiuuqee/QffvWrlJ+b81lV\nNTSobNYsNa5ZM+znm/zZeufGQG+vTjQ30zMXAK5wVLXFlWe4fpRr10qzZ9uBYXJBn/FyVisCASkW\nG9ySmyqP1MdCPuHyct0vKTzaarTp7o+f/VNHgyq6WTHeiqPenL6xFpFJztVMzg90tvFeam3VvhT5\nyt4qtlJiEZ3kvqFdx44NBp2SwtOnu3+fvmCBCouLddbZVl+Q+p+5wqIiTf/MZxKCTkn64muvuf00\nvSuqocpK1d177+CBAwPq6ehQz9mzCpSUKHLTTTLBYPoemQMDbtAZLC/XF197LSFIC0cievTwYYWr\nq1O+vKCkRD0dHW7QGSgrU6y9fbBXaX+/ejo6VFJXp69+8IEe3LLFDfjT5dUu275dD2zaNGKwmPzZ\neudGsKzsiinKBQAYHwJP5J2dO3emf3K4ACoSsbfY7t3rT4DlDeKeecYOJr2VLisqsl/IZ6xBbLr7\nM9GB4CSrojvsnMojflYcHeu225GKM410Puf5YEWFrl26NKHtR9f778sEAurp7LQr2nq2n1bcdJO+\nvH+/yurrFaqqUlF1tbqOHh3sb+kJSh2FJSV69C9/Sdkm5fUnnxxcjY2vooYqK/XIgQO6f/16t2WJ\niVe2DpaXq3L+fLXt3asTzc0a6O1Vmk25dpEgSb1dXUOC791NTVo3b5560vzCIOQUQSotlQoK1BcP\nmANJ1XVrbr894TNINSfGUkhrd1OTXolG1dPd7X7N+1k2rl59xRTlyqWp8j0KkwdzCrlA4ImpJVUA\n5Q0QnTYofgRYmzcPBnFPPmkHkwsX2s8VFtptVrJtrEFsugBzogNBquhmhZ8VR/1uLzPS+ZznH/vw\nQ3e1znGprU1WX5+7+ljozYc8dUp7vvENlcycqZ4zZ3SiuVltb70labC/pVNwqHL+fJXU1enRQ4c0\nrb7eLoI0c2biQIxxA9LC0lIVzZihRw4c0LT6endV8voVK1R9662S7CDSWSFNDgK9XwtFIiqOF0IK\nVlRIhYXuSuSOlSt1dN06XWptdd9jcW2tTHy11gQCWvKb3yhcXa2+CxfsgDgefAfLyhSeMcN9v41r\n1iRef5xzIlXgeiW1HgIA+IccT0wNTo5iMCiVlkpr1gwGNd58wuXLpVDIn+qu06cPFiuKRqWNG+3t\nq3PmSPEqlJMufzFdcSRMCd4czYkOCLJZ3fQXNTWKtbersKREdY2NGujp0SfNzW6xHUl26yHLSsj3\nrI9G9cDGjdqxcqU+2rpVVbfcoiXr1yeMzZs/agIBfeX997X/+9/Xe88+627nrV++XA9s2pTwPjve\neUex9nZ7a6wxQ3qASlLRjBn60ltvuVVgty5b5vYg9eaOhmtqEl5f1dCgZdu361f19erz5IRWzp+v\n41u3uq8tLClR/Re/qAsff5wyd3Z3U5POxvNqvxR/bqyfU3J+J4EmACAVcjxx5XC2kDY324Gl94cj\n7yrfmjX+rbTddpv934YGKV6ZUpGIdPvtg9cb6wrDcDmqfmClcUrLZS9Sv7b5pspJ/PL+/SqdOVOP\nHjqkB7ds0f3r16ts9myZ+NbVwtLSwZzPeNBZvXChQuXlerGxUe/98peKnT6tE83N+vWcOQn5r95q\nslZfn15/8kl7BdPzC9OBeF6lUwCpddcuxdrbVRAKyerrSxl0StJVd9+tA08/rYttbXr1scd05sCB\nhGtJ9kpo1S23SBq6zbgwni9aWFKiqxYtUk9Xl4pqa7Vsxw73flw8edLNnX3h9tsT7lvnkSNq27tX\nlz15tePN3QUAIFMEnsg7KfMShstRzNY20vXr7fNu3z60HUqm15voIj+QRK7LcEZbsCiTLZ3ec+9Y\nuVIvNjbq2IYNQwKjafX1+puPP3ZX7F6JRtXT2ekWIwqWlrrnrJw/X/XRqB7atk3nW1rs1UxPxdue\n9nY9W1en3y5apJeXLtXiZ56xV0vjBnp7E4JRSTr7xz+6Y3MLIBUWaqCnJyEnM1hRoaIZM/SupOC0\nabrnJz9JCPRStXW56p57VFpXZ/cNNUb9nmO8AffFkyfdIPKdn/7UvR/OWANlZYqdPq3jW7dq3bx5\ninV2ZtTSJlkuf5mBQXyPgt+YU8gF+nhiavD2vkz+ASlbPSzTnXc816PaKyaZ0fZpvG/t2jFv803o\nQVldrZizRV3pA6NUPSiXbNig1598UjJGjatXu9d3giynb6Zj4NIlt13KvlWrFKqocAPIglBIjatX\n6xc1NVJ8VfLC8eO6cPy4+3pv65SqhgZdbm9X78WLqm5oUO/581Jbm3rPn9e+VasSAr3zH3yg2Jkz\ng9uCJX28dav794FYTCeam/X83Ln663ffdQNuSeqK9+wNlpfrTk/PXue+X+7o0InmZknSpdZW7Wlq\nSvmZZPI5AQDgB3I8gVxK7p/pfM3vHMyJ7tOJKSObOX7ec4cjEX3S3Dykt2RyTuKG+fN14fhxBadN\nU+3ixWl7VUqDOa8N3/2utj74oPp6etTjCW5DlZX66rFjal6xwr32su3bte+pp3Rs/fohFWZDkYiu\nvvdet4CPE8C9Eo26wXBxba0utbYqXF0tU1CggZ4eFYRC+lK84NGLixbpC1u26KX77ksItJM5PUwd\nv120yA2Wk59z3qvTB9Tbb7Nl82b1x2Kqvu22IfmtAAD4ZTQ5ngSeQC55Cx9lsxDRRF0HEy6bRX2k\n7BYs8p5bUsrreIv/XL9ihbpPnHAL9JTNnq2yWbNG/d5jnZ16ft48XW5ttStPO/82FRTomr/6K3dL\nqfeajlAkokcOHkwo3iPZ9//Yhg3q6eiQCQQUKClRYVGRps2erdP79rnHeYNF72tchYVupVonAPa+\nn9H8AiD5s0p+H6kCVgAA/EBxIUxJ485LyHYBn7EYaWutX2NlC++w8jnXxc/enamMJ8dvpPxQ77nT\nXSc5JzEUb3VSvXChSurqRvXedzc16dmrr9avr79elXPnKlxVZQd5AwP2n74+ffLqq0OuWdXQoGuX\nLlV9NKqvfvCBDjz99JD303nkiBtAWn196u3q0tttbeqOt1iR7DYn3m3D3tdI0jVLluirR4+qfvly\n1UejQ4JOyd4iO232bBWGw3r1sccU6+wccn+T76E3V7WwtFSxjo5h83QxeeXz9yhMTswp5AKBJ648\nk6mAz0iFiPwa60T36byCjLb4Trb42bvTT94KsOMJipOrqnofe4PQ4d5755EjutTaqp6ODp3ctUsF\nTj9fr4GBIX0ql23frge3bNEDGzcqHIkkBPnP3XijXl661D2X0/tTkspvuEHRN99UWX29QlVVKqqu\ndu/Ji42Nat2zJ+HS4UhE0+rr9cCmTe61vMe/vHSpJKl01iyd2rvXvZ8j/dLhvrVrVR+NKlxVpf4L\nF/RJc3NWfjkBAMBosNUWV56lS+1AbuHCkQOxXOdGjmWsyInkraATvZUxl707h+O9L04uZfL4xrtN\neLTv3dmmKtmrmJ/fuFH7Vq3S+Y8+GtwOW1CgqxcvVll9vY6tX6/+S5dkAgFd9bnP6YFNmxSORNzz\nePuHmkBA4enT9dC2bdr//e8nFDjy3oPi2lpZlqXLp04ljM0Eg/paW1vK8SfPrZ7ubvf6M+66S5J0\norl5xPxbenECALKNHE8glc7O0RfwyXVu5FjGipzgh/rUnPsSqqzUIwcODMmNlMYftI82cI11dmrn\n448PqXob6+zUr2+4QT1nzrjHhquq7MqzHsW1tXr08GFJGlJB1nvMzM9/XudbWhQoKVFxTY1aNm9O\n2FJrgkFZ8Z6gjof37NHVixalHHfy3JKk52680e0bWh+NqjAYHDHwnqy/nAAATB3keGJKGndegtPu\nZDQ/gOU6N3IsY0XGxjOnkreCwlZcU6NwTY2qb7tNoYqKlMckbxMe67bl0ea3hiMRFc+YobY339Sz\nV12l1ZGIXlqyRJI04447Eo41hYVDXn+ptVXPz5snSbp/3TotWb9exbW1Q475aOtWte7apVe3btVH\nW7cmBJ3B8nIVTZ8ev8jgv8vv/PSnacedPLfCkYhqFi6UZN+zxtWrR5V/Sy/O/Ec+HvzGnEIuZBx4\nGmN+ZIw5bIx52xjzgjEm9U8WQD4jNxIj4If61M63tCh2+rRODJNXmBxYjbVQ0ljyW508z4GeHvWe\nO+eO6761a1VQVGSfb9o0PbRtm0qvvVYmGJQCg62uL8d7Y0r2Z/7o4cOqX75cRTNmuGOouuUWSVLF\njTeq0MkjLbD/me3t6tKltjb7a/FdQN5xpwq6U80tftEBAMhXGW+1NcYskfSqZVkDxpgfSpJlWd9O\ncRxbbQHgCpPJFuSxvmbHypX66OWXVb1ggUrq6txtrqm23XrzPCUpNH26KufNU7C8XKd//3u3p2Z9\nNKpYR4e7BbggHNZALJZ2TOlawmxdtsxt+5JKSV2dVrzzjnu+Z6++WpdaWyXZ231r7rgjK+1xAADI\nhtFstQ0M9+RwLMva5nm4T9IjmZ4LADC13Ld27ajyCr15moufeUb7Vq1yXzNSDuf5lhbF2tv1SXOz\nwjU1bu7jnqYm3b9u3ZBzv/7Nb2qgp0cFwaAut7frlBMYera+DvT0JKyklt9wg33+NO/BWZV0OH93\nKu4Wlpaq/8KFhNdMX7BAVTffrFeiUfe99cdi7vOxM2fcVV/6bgIApgq/cjz/VtLLPp0LGNao8hIm\nU69OTHrkuvhvtFuQvdtr961alfCakbbenj96VJIUrKjQ9JtukpS4fbVl82b39a9/85t6YONGuz3K\npk1u+5NwdXVC4FkQDCZsZ7148qQb3I62FcnOnTvdc9TefXfCc6UzZ+rhHTt0vqUl4b1V33abJCkw\nTIuYXLfuSWeyjmsq4XsU/MacQi4Mu+JpjNkmqTbFU/9gWdbm+DHfkdRjWdbadOd5/PHHdd1110mS\nIpGIFixYoMbGRkmDE5/HPB7t44MHD458fLz/5U5JikbVGP/6ZBg/jyffY8dkGc+V9PjQpUuaLjvQ\nGvja17Rz5073+UOXLum0pM/Fg7Dk138Qiajj+HHNPXdOoUhE5++9V9d961tu4PpOd7d6Jc2VdHL3\nbv3wzjt16/e+p88vW6b71q7Vv0WjajtzRjPi22yPlpbquq9/3Q2aveMLlJXp90ePauCll1T04ovq\nPHJEhy5dcs/nfX+SHXgHnnhCA93dKvrzn3W5tVWtN96ou378Y/u5khK9KzsfdGU8wPy3aFTz/u7v\nFHrhBS3+2c/0xsGDCe93z1tv6ezbb2uu7FXdwBNP5Pzzk6Su+C8I3pV0OBrV3/P9lsc8nvSPDyZ9\nf8n1eHicf48PHjyozvgvGz/88EONxrjaqRhjHpf0XyXdZ1nW5TTHkOOJiUf/y7HJdb9SXLGGa/Ux\nUhuQkXJCX1qyRCeamxO2u16/YoVC8UJGgZISDfT26kRzs0KVlZr5wAO6ePJkwtbeWGennpszx80B\nLZs9WxeOH3fbotQvX64HNm3S85/+tC62tqogGNSX9+9PaB+T6n1k0uJksrbumazjAgBMnKz28TTG\nfEHSjyXda1lW+zDHEXhi4tH/cmwaG3PbrxTwGEt/zuGCN+f5WEeHPmluVqCsTDPuukv9ly65+Z3e\nXpivRKMp+4p6A6uCcDihaFB9NKoHNm7U6khEvefOSbK30/7Nxx/7ek9G835zZbKOCwAwcbIdeL4n\nKSTpbPxLb1iW9USK4wg84audnq148MkVvkLMnJpcXmxsTBkAjiRdwBrr7NRzN97oFh8qrq3VpdbW\nISt06VbuvIHVq4895lbHnX7zzXp41y6FIxH9oqZGsfZ2FZaU6Oqf/1xLv/KVMY9zrMeM5/zIL3yP\ngt+YU/DbaALPgkxPblnWHMuy6i3Laoj/GRJ0AsgT9CvFJDKW/pxe6YoRhSMR1Sxc6J4z+uabKXth\npuuR6S2UdN/atapfvlz10agbdO5uatK0T31KBeGwom+8oZLaVKURRh7nWI8Zz/kBAJho48rxHNUF\nWPEEAIxBpls3h8s1zOZ20LGu0CaPc99TTw1ZoRxP3iQ5lwCAiZbVFU8AALJhtK1YkqVbsRzPOVNJ\nbh8y1hXa5HGmWqEc7r2MpLimRuHqagJOAMCkQuCJvOOUdAb8wpyaGvwMLoeTHCgmB4kjzafkcaYK\nXMfzXs63tIy59ygmN75HwW/MKeQCgScAAGOQHCiON+Adz+rmaMYHAMBkQI4nAABjMNnbh0z28QEA\npp6stlMZwyAIPAEAAABgiqK4EKYk8hLgN+YU/MR8gt+YU/Abcwq5QOAJAAAAAMgqttoCAAAAADLG\nVlsAAAAAQM4ReCLvkJcAvzGn4CfmE/zGnILfmFPIBQJPAAAAAEBWkeMJAAAAAMgYOZ4AAAAAgJwj\n8ETeIS8BfmNOwU/MJ/iNOQW/MaeQCwSeAAAAAICsIscTAAAAAJAxcjwBAAAAADlH4Im8Q14C/Mac\ngp+YT/Abcwp+Y04hFwg8AQAAAABZRY4nAAAAACBj5HgCAAAAAHKOwBN5h7wE+I05BT8xn+A35hT8\nxpxCLhB4AgAAAACyihxPAAAAAEDGyPEEAAAAAOQcgSfyDnkJ8BtzCn5iPsFvzCn4jTmFXCDwBAAA\nAABkFTmeAAAAAICMkeMJAAAAAMg5Ak/kHfIS4DfmFPzEfILfmFPwG3MKuUDgCQAAAADIKnI8AQAA\nAAAZI8cTAAAAAJBzBJ7IO+QlwG/MKfiJ+QS/MafgN+YUcoHAEwAAAACQVeR4AgAAAAAyRo4nAAAA\nACDnCDyRd8hLgN+YU/AT8wl+Y07Bb8wp5AKBJwAAAAAgq8jxBAAAAABkjBxPAAAAAEDOEXgi75CX\nAL8xp+An5hP8xpyC35hTyAUCTwAAAABAVpHjCQAAAADIGDmeAAAAAICcI/BE3iEvAX5jTsFPzCf4\njTkFvzGnkAsEngAAAACArCLHEwAAAACQMXI8AQAAAAA5R+CJvENeAvzGnIKfmE/wG3MKfmNOIRcI\nPAEAAAAAWUWOJwAAAAAgY+R4AgAAAAByjsATeYe8BPiNOQU/MZ/gN+YU/MacQi4QeAIAAAAAsooc\nTwAAAABAxsjxBAAAAADkHIEn8g55CfAbcwp+Yj7Bb8wp+I05hVwg8AQAAAAAZBU5ngAAAACAjJHj\nCQAAAADIOQJP5B3yEuA35hT8xHyC35hT8BtzCrlA4AkAAAAAyCpyPAEAAAAAGSPHEwAAAACQcwSe\nyDvkJcBvzCn4ifkEvzGn4DfmFHKBwBMAAAAAkFXkeAIAAAAAMkaOJwAAAAAg5wg8kXfIS4DfmFPw\nE/MJfmNOwW/MKeQCgScAAAAAIKvI8QQAAAAAZIwcTwAAAABAzmUceBpj/skY87Yx5qAx5lVjzLV+\nDgxIh7wE+I05BT8xn+A35hT8xpxCLoxnxfOfLcu6xbKsBZI2SfpfPo0JGNbBgwdzPQRMMcwp+In5\nBL8xp+A35hRyIePA07Ks856HZZLaxz8cYGSdnZ25HgKmGOYU/MR8gt+YU/Abcwq5EBjPi40xT0v6\nz5IuSrrLlxEBAAAAAKaUYVc8jTHbjDF/SvHnYUmyLOs7lmXNkrRG0r9MwHgBffjhh7keAqYY5hT8\nxHyC35hT8BtzCrngSzsVY8wsSS9blvXZFM/RSwUAAAAAprCR2qlkvNXWGDPHsqz34g+XSzqQyQAA\nAAAAAFNbxiuexpgNkuZK6pd0VNI3LMtq83FsAAAAAIApwJettgAAAAAApDOePp6jZoz5J2PM28aY\ng8aYV40x107EdTE1GWN+ZIw5HJ9TLxhjKnI9JuQ3Y8wKY8w7xph+Y8ytuR4P8pcx5gvGmL8YY94z\nxvyPXI8H+c0Y83+NMaeMMX/K9VgwNRhjrjXG7Ij/m/dnY8x/z/WYkL+MMUXGmH3xGO+QMeZ/D3v8\nRKx4GmOmOX0/jTH/TdItlmV9PesXxpRkjFki6VXLsgaMMT+UJMuyvp3jYSGPGWM+LWlA0v+R9PeW\nZf0hx0NCHjLGFEp6V9L9kj6R9HtJX7Us63BOB4a8ZYxZLKlb0r9bljU/1+NB/jPG1EqqtSzroDGm\nTNL/kxTl+xQyZYwpsSzrojEm47bo4wAAAphJREFUIOk1Sd+yLOu1VMdOyIqnE3TGlUlqn4jrYmqy\nLGubZVkD8Yf7JM3M5XiQ/yzL+otlWUdyPQ7kvTskvW9Z1oeWZfVKek528T0gI5Zl7ZHUketxYOqw\nLKvVsqyD8b93SzosqS63o0I+syzrYvyvIUmFks6mO3ZCAk9JMsY8bYz5SNJKST+cqOtiyvtbSS/n\nehAAIOkaSR97Hh+Pfw0AJh1jzHWSGmT/Eh/IiDGmwBhzUNIpSTssyzqU7tiM26mkuOg2SbUpnvoH\ny7I2W5b1HUnfMcZ8W9K/SPovfl0bU89I8yl+zHck9ViWtXZCB4e8NJo5BYwT1foA5IX4NtsNkp6M\nr3wCGYnvQlwQr7nyijGm0bKsnamO9S3wtCxrySgPXStWqDCCkeaTMeZxSUsl3TchA0LeG8P3KCBT\nn0jyFs+7VvaqJwBMGsaYoKTfSPqlZVmbcj0eTA2WZZ0zxmyRtFDSzlTHTFRV2zmeh8slHZiI62Jq\nMsZ8QdIqScsty7qc6/FgyjG5HgDy1n5Jc4wx1xljQpL+WtKLOR4TALiMMUbSzyUdsizrX3M9HuQ3\nY0y1MSYS/3uxpCUaJs6bqKq2GyTNldQv6aikb1iW1Zb1C2NKMsa8JzuB2UlefsOyrCdyOCTkOWPM\nlyT9VFK1pHOSDliW9WBuR4V8ZIx5UNK/yi6w8HPLsoYtLQ8Mxxjza0n3SqqS1Cbp+5Zlrc7tqJDP\njDGLJO2W9EcNpgf8T8uyfpe7USFfGWPmS/qF7MXMAknPWpb1o7THT0TgCQAAAAC4ck1YVVsAAAAA\nwJWJwBMAAAAAkFUEngAAAACArCLwBAAAAABkFYEnAAAAACCrCDwBAAAAAFlF4AkAAAAAyCoCTwAA\nAABAVv1/lzHCzGUnjVoAAAAASUVORK5CYII=\n", - "text": [ - "" - ] - } - ], - "prompt_number": 5 - } - ], - "metadata": {} - } - ] -} \ No newline at end of file diff --git a/examples/triplet/mnist_triplet.prototxt b/examples/triplet/mnist_triplet.prototxt deleted file mode 100644 index 0e903f85909..00000000000 --- a/examples/triplet/mnist_triplet.prototxt +++ /dev/null @@ -1,113 +0,0 @@ -name: "mnist_siamese" -input: "data" -input_dim: 10000 -input_dim: 1 -input_dim: 28 -input_dim: 28 -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 500 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 10 - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 2 - } -} diff --git a/examples/triplet/mnist_triplet_solver.prototxt b/examples/triplet/mnist_triplet_solver.prototxt deleted file mode 100644 index 39222b89cf0..00000000000 --- a/examples/triplet/mnist_triplet_solver.prototxt +++ /dev/null @@ -1,25 +0,0 @@ -# The train/test net protocol buffer definition -net: "examples/triplet/mnist_triplet_train_test.prototxt" -# test_iter specifies how many forward passes the test should carry out. -# In the case of MNIST, we have test batch size 100 and 100 test iterations, -# covering the full 10,000 testing images. -test_iter: 100 -# Carry out testing every 500 training iterations. -test_interval: 500 -# The base learning rate, momentum and the weight decay of the network. -base_lr: 0.01 -momentum: 0.9 -weight_decay: 0.0000 -# The learning rate policy -lr_policy: "inv" -gamma: 0.0001 -power: 0.75 -# Display every 100 iterations -display: 100 -# The maximum number of iterations -max_iter: 50000 -# snapshot intermediate results -snapshot: 5000 -snapshot_prefix: "examples/triplet/mnist_triplet" -# solver mode: CPU or GPU -solver_mode: CPU diff --git a/examples/triplet/mnist_triplet_train_test.prototxt b/examples/triplet/mnist_triplet_train_test.prototxt deleted file mode 100644 index 3cea2376c11..00000000000 --- a/examples/triplet/mnist_triplet_train_test.prototxt +++ /dev/null @@ -1,498 +0,0 @@ -name: "mnist_triplet_train_test" -layer { - name: "triplet_data" - type: "Data" - top: "triplet_data" - top: "sim" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/mnist_triplet_train_leveldb" - batch_size: 64 - } -} -layer { - name: "triplet_data" - type: "Data" - top: "triplet_data" - top: "sim" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/mnist_triplet_test_leveldb" - batch_size: 100 - } -} -layer { - name: "slice_triplet" - type: "Slice" - bottom: "triplet_data" - top: "data" - top: "data_true" - top: "data_false" - slice_param { - slice_dim: 1 - slice_point: 1 - slice_point: 2 - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_true" - type: "Convolution" - bottom: "data_true" - top: "conv1_true" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_true" - type: "Pooling" - bottom: "conv1_true" - top: "pool1_true" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_true" - type: "Convolution" - bottom: "pool1_true" - top: "conv2_true" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_true" - type: "Pooling" - bottom: "conv2_true" - top: "pool2_true" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_true" - type: "InnerProduct" - bottom: "pool2_true" - top: "ip1_true" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_true" - type: "ReLU" - bottom: "ip1_true" - top: "ip1_true" -} -layer { - name: "ip2_true" - type: "InnerProduct" - bottom: "ip1_true" - top: "ip2_true" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_true" - type: "InnerProduct" - bottom: "ip2_true" - top: "feat_true" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_false" - type: "Convolution" - bottom: "data_false" - top: "conv1_false" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_false" - type: "Pooling" - bottom: "conv1_false" - top: "pool1_false" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_false" - type: "Convolution" - bottom: "pool1_false" - top: "conv2_false" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_false" - type: "Pooling" - bottom: "conv2_false" - top: "pool2_false" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_false" - type: "InnerProduct" - bottom: "pool2_false" - top: "ip1_false" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_false" - type: "ReLU" - bottom: "ip1_false" - top: "ip1_false" -} -layer { - name: "ip2_false" - type: "InnerProduct" - bottom: "ip1_false" - top: "ip2_false" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_false" - type: "InnerProduct" - bottom: "ip2_false" - top: "feat_false" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "loss" - type: "ContrastiveLoss" - bottom: "feat" - bottom: "feat_false" - bottom: "sim" - top: "loss" - contrastive_loss_param { - margin: 1 - } -} diff --git a/examples/triplet/readme.md b/examples/triplet/readme.md deleted file mode 100644 index ce98ec10819..00000000000 --- a/examples/triplet/readme.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: Siamese Network Tutorial -description: Train and test a siamese network on MNIST data. -category: example -include_in_docs: true -layout: default -priority: 100 ---- - -# Siamese Network Training with Caffe -This example shows how you can use weight sharing and a contrastive loss -function to learn a model using a siamese network in Caffe. - -We will assume that you have caffe successfully compiled. If not, please refer -to the [Installation page](../../installation.html). This example builds on the -[MNIST tutorial](mnist.html) so it would be a good idea to read that before -continuing. - -*The guide specifies all paths and assumes all commands are executed from the -root caffe directory* - -## Prepare Datasets - -You will first need to download and convert the data from the MNIST -website. To do this, simply run the following commands: - - ./data/mnist/get_mnist.sh - ./examples/siamese/create_mnist_siamese.sh - -After running the script there should be two datasets, -`./examples/siamese/mnist_siamese_train_leveldb`, and -`./examples/siamese/mnist_siamese_test_leveldb`. - -## The Model -First, we will define the model that we want to train using the siamese network. -We will use the convolutional net defined in -`./examples/siamese/mnist_siamese.prototxt`. This model is almost -exactly the same as the [LeNet model](mnist.html), the only difference is that -we have replaced the top layers that produced probabilities over the 10 digit -classes with a linear "feature" layer that produces a 2 dimensional vector. - - layers { - name: "feat" - type: INNER_PRODUCT - bottom: "ip2" - top: "feat" - blobs_lr: 1 - blobs_lr: 2 - inner_product_param { - num_output: 2 - } - } - -## Define the Siamese Network - -In this section we will define the siamese network used for training. The -resulting network is defined in -`./examples/siamese/mnist_siamese_train_test.prototxt`. - -### Reading in the Pair Data - -We start with a data layer that reads from the LevelDB database we created -earlier. Each entry in this database contains the image data for a pair of -images (`pair_data`) and a binary label saying if they belong to the same class -or different classes (`sim`). - - layers { - name: "pair_data" - type: DATA - top: "pair_data" - top: "sim" - data_param { - source: "examples/siamese/mnist-siamese-train-leveldb" - scale: 0.00390625 - batch_size: 64 - } - include: { phase: TRAIN } - } - -In order to pack a pair of images into the same blob in the database we pack one -image per channel. We want to be able to work with these two images separately, -so we add a slice layer after the data layer. This takes the `pair_data` and -slices it along the channel dimension so that we have a single image in `data` -and its paired image in `data_p.` - - layers { - name: "slice_pair" - type: SLICE - bottom: "pair_data" - top: "data" - top: "data_p" - slice_param { - slice_dim: 1 - slice_point: 1 - } - } - -### Building the First Side of the Siamese Net - -Now we can specify the first side of the siamese net. This side operates on -`data` and produces `feat`. Starting from the net in -`./examples/siamese/mnist_siamese.prototxt` we add default weight fillers. Then -we name the parameters of the convolutional and inner product layers. Naming the -parameters allows Caffe to share the parameters between layers on both sides of -the siamese net. In the definition this looks like: - - ... - param: "conv1_w" - param: "conv1_b" - ... - param: "conv2_w" - param: "conv2_b" - ... - param: "ip1_w" - param: "ip1_b" - ... - param: "ip2_w" - param: "ip2_b" - ... - -### Building the Second Side of the Siamese Net - -Now we need to create the second path that operates on `data_p` and produces -`feat_p`. This path is exactly the same as the first. So we can just copy and -paste it. Then we change the name of each layer, input, and output by appending -`_p` to differentiate the "paired" layers from the originals. - -### Adding the Contrastive Loss Function - -To train the network we will optimize a contrastive loss function proposed in: -Raia Hadsell, Sumit Chopra, and Yann LeCun "Dimensionality Reduction by Learning -an Invariant Mapping". This loss function encourages matching pairs to be close -together in feature space while pushing non-matching pairs apart. This cost -function is implemented with the `CONTRASTIVE_LOSS` layer: - - layers { - name: "loss" - type: CONTRASTIVE_LOSS - contrastive_loss_param { - margin: 1.0 - } - bottom: "feat" - bottom: "feat_p" - bottom: "sim" - top: "loss" - } - -## Define the Solver - -Nothing special needs to be done to the solver besides pointing it at the -correct model file. The solver is defined in -`./examples/siamese/mnist_siamese_solver.prototxt`. - -## Training and Testing the Model - -Training the model is simple after you have written the network definition -protobuf and solver protobuf files. Simply run -`./examples/siamese/train_mnist_siamese.sh`: - - ./examples/siamese/train_mnist_siamese.sh - -# Plotting the results - -First, we can draw the model and siamese networks by running the following -commands that draw the DAGs defined in the .prototxt files: - - ./python/draw_net.py \ - ./examples/siamese/mnist_siamese.prototxt \ - ./examples/siamese/mnist_siamese.png - - ./python/draw_net.py \ - ./examples/siamese/mnist_siamese_train_test.prototxt \ - ./examples/siamese/mnist_siamese_train_test.png - -Second, we can load the learned model and plot the features using the iPython -notebook: - - ipython notebook ./examples/siamese/mnist_siamese.ipynb - diff --git a/examples/triplet/train_mnist_triplet.sh b/examples/triplet/train_mnist_triplet.sh deleted file mode 100755 index e005970824a..00000000000 --- a/examples/triplet/train_mnist_triplet.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env sh - -TOOLS=./build/tools - -$TOOLS/caffe train --solver=examples/triplet/mnist_triplet_solver.prototxt diff --git a/include/caffe/data_layers.hpp b/include/caffe/data_layers.hpp index 2bb9d948169..3958cb7ecb0 100644 --- a/include/caffe/data_layers.hpp +++ b/include/caffe/data_layers.hpp @@ -14,7 +14,6 @@ #include "caffe/filler.hpp" #include "caffe/internal_thread.hpp" #include "caffe/layer.hpp" -#include "caffe/net.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/util/db.hpp" @@ -29,7 +28,6 @@ template class BaseDataLayer : public Layer { public: explicit BaseDataLayer(const LayerParameter& param); - virtual ~BaseDataLayer() {} // LayerSetUp: implements common data layer setup functionality, and calls // DataLayerSetUp to do special data layer setup for individual layer types. // This method may not be overridden except by the BasePrefetchingDataLayer. @@ -58,7 +56,6 @@ class BasePrefetchingDataLayer : public: explicit BasePrefetchingDataLayer(const LayerParameter& param) : BaseDataLayer(param) {} - virtual ~BasePrefetchingDataLayer() {} // LayerSetUp: implements common data layer setup functionality, and calls // DataLayerSetUp to do special data layer setup for individual layer types. // This method may not be overridden. diff --git a/include/caffe/data_transformer.hpp b/include/caffe/data_transformer.hpp index 880356601a4..0ad68c80216 100644 --- a/include/caffe/data_transformer.hpp +++ b/include/caffe/data_transformer.hpp @@ -62,6 +62,7 @@ class DataTransformer { */ void Transform(const vector & mat_vector, Blob* transformed_blob); + /** * @brief Applies the transformation defined in the data layer's * transform_param block to a cv::Mat @@ -87,6 +88,41 @@ class DataTransformer { */ void Transform(Blob* input_blob, Blob* transformed_blob); + /** + * @brief Infers the shape of transformed_blob will have when + * the transformation is applied to the data. + * + * @param datum + * Datum containing the data to be transformed. + */ + vector InferBlobShape(const Datum& datum); + /** + * @brief Infers the shape of transformed_blob will have when + * the transformation is applied to the data. + * It uses the first element to infer the shape of the blob. + * + * @param datum_vector + * A vector of Datum containing the data to be transformed. + */ + vector InferBlobShape(const vector & datum_vector); + /** + * @brief Infers the shape of transformed_blob will have when + * the transformation is applied to the data. + * It uses the first element to infer the shape of the blob. + * + * @param mat_vector + * A vector of Mat containing the data to be transformed. + */ + vector InferBlobShape(const vector & mat_vector); + /** + * @brief Infers the shape of transformed_blob will have when + * the transformation is applied to the data. + * + * @param cv_img + * cv::Mat containing the data to be transformed. + */ + vector InferBlobShape(const cv::Mat& cv_img); + protected: /** * @brief Generates a random integer from Uniform({0, 1, ..., n-1}). diff --git a/include/caffe/filler.hpp b/include/caffe/filler.hpp index bb18e8e1e28..ff3542e1f99 100644 --- a/include/caffe/filler.hpp +++ b/include/caffe/filler.hpp @@ -126,17 +126,18 @@ class PositiveUnitballFiller : public Filler { }; /** - * @brief Fills a Blob with values @f$ x \sim U(-a, +a) @f$ where @f$ a @f$ - * is set inversely proportional to the number of incoming nodes. + * @brief Fills a Blob with values @f$ x \sim U(-a, +a) @f$ where @f$ a @f$ is + * set inversely proportional to number of incoming nodes, outgoing + * nodes, or their average. * * A Filler based on the paper [Bengio and Glorot 2010]: Understanding - * the difficulty of training deep feedforward neuralnetworks, but does not - * use the fan_out value. + * the difficulty of training deep feedforward neuralnetworks. * - * It fills the incoming matrix by randomly sampling uniform data from - * [-scale, scale] where scale = sqrt(3 / fan_in) where fan_in is the number - * of input nodes. You should make sure the input blob has shape (num, a, b, c) - * where a * b * c = fan_in. + * It fills the incoming matrix by randomly sampling uniform data from [-scale, + * scale] where scale = sqrt(3 / n) where n is the fan_in, fan_out, or their + * average, depending on the variance_norm option. You should make sure the + * input blob has shape (num, a, b, c) where a * b * c = fan_in and num * b * c + * = fan_out. Note that this is currently not the case for inner product layers. * * TODO(dox): make notation in above comment consistent with rest & use LaTeX. */ @@ -148,7 +149,16 @@ class XavierFiller : public Filler { virtual void Fill(Blob* blob) { CHECK(blob->count()); int fan_in = blob->count() / blob->num(); - Dtype scale = sqrt(Dtype(3) / fan_in); + int fan_out = blob->count() / blob->channels(); + Dtype n = fan_in; // default to fan_in + if (this->filler_param_.variance_norm() == + FillerParameter_VarianceNorm_AVERAGE) { + n = (fan_in + fan_out) / Dtype(2); + } else if (this->filler_param_.variance_norm() == + FillerParameter_VarianceNorm_FAN_OUT) { + n = fan_out; + } + Dtype scale = sqrt(Dtype(3) / n); caffe_rng_uniform(blob->count(), -scale, scale, blob->mutable_cpu_data()); CHECK_EQ(this->filler_param_.sparse(), -1) @@ -156,6 +166,47 @@ class XavierFiller : public Filler { } }; +/** + * @brief Fills a Blob with values @f$ x \sim N(0, \sigma^2) @f$ where + * @f$ \sigma^2 @f$ is set inversely proportional to number of incoming + * nodes, outgoing nodes, or their average. + * + * A Filler based on the paper [He, Zhang, Ren and Sun 2015]: Specifically + * accounts for ReLU nonlinearities. + * + * Aside: for another perspective on the scaling factor, see the derivation of + * [Saxe, McClelland, and Ganguli 2013 (v3)]. + * + * It fills the incoming matrix by randomly sampling Gaussian data with std = + * sqrt(2 / n) where n is the fan_in, fan_out, or their average, depending on + * the variance_norm option. You should make sure the input blob has shape (num, + * a, b, c) where a * b * c = fan_in and num * b * c = fan_out. Note that this + * is currently not the case for inner product layers. + */ +template +class MSRAFiller : public Filler { + public: + explicit MSRAFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + CHECK(blob->count()); + int fan_in = blob->count() / blob->num(); + int fan_out = blob->count() / blob->channels(); + Dtype n = fan_in; // default to fan_in + if (this->filler_param_.variance_norm() == + FillerParameter_VarianceNorm_AVERAGE) { + n = (fan_in + fan_out) / Dtype(2); + } else if (this->filler_param_.variance_norm() == + FillerParameter_VarianceNorm_FAN_OUT) { + n = fan_out; + } + Dtype std = sqrt(Dtype(2) / n); + caffe_rng_gaussian(blob->count(), Dtype(0), std, + blob->mutable_cpu_data()); + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; /** * @brief Get a specific filler from the specification given in FillerParameter. @@ -176,6 +227,8 @@ Filler* GetFiller(const FillerParameter& param) { return new UniformFiller(param); } else if (type == "xavier") { return new XavierFiller(param); + } else if (type == "msra") { + return new MSRAFiller(param); } else { CHECK(false) << "Unknown filler name: " << param.type(); } diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index 2d13ef97c05..8f924a75755 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -406,6 +406,7 @@ template inline Dtype Layer::Forward(const vector*>& bottom, const vector*>& top) { Dtype loss = 0; + Reshape(bottom, top); switch (Caffe::mode()) { case Caffe::CPU: Forward_cpu(bottom, top); diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index 2f9c1f567a1..86c34241168 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -210,73 +210,6 @@ class ContrastiveLossLayer : public LossLayer { Blob summer_vec_; // tmp storage for gpu forward pass }; -template -class TripletLossLayer : public LossLayer { - public: - explicit TripletLossLayer(const LayerParameter& param) - : LossLayer(param), diff_() {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - - virtual inline int ExactNumBottomBlobs() const { return 4; } - virtual inline const char* type() const { return "TripletLoss"; } - /** - * Unlike most loss layers, in the TripletLossLayer we can backpropagate - * to the first three inputs. - */ - virtual inline bool AllowForceBackward(const int bottom_index) const { - return bottom_index != 3; - } - - protected: - /// @copydoc TripletLossLayer - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - - /** - * @brief Computes the Triplet error gradient w.r.t. the inputs. - * - * Computes the gradients with respect to the two input vectors (bottom[0] and - * bottom[1]), but not the similarity label (bottom[2]). - * - * @param top output Blob vector (length 1), providing the error gradient with - * respect to the outputs - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, - * as @f$ \lambda @f$ is the coefficient of this layer's output - * @f$\ell_i@f$ in the overall Net loss - * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence - * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. - * (*Assuming that this top Blob is not used as a bottom (input) by any - * other layer of the Net.) - * @param propagate_down see Layer::Backward. - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times 1 \times 1) @f$ - * the features @f$a@f$; Backward fills their diff with - * gradients if propagate_down[0] - * -# @f$ (N \times C \times 1 \times 1) @f$ - * the features @f$b@f$; Backward fills their diff with gradients if - * propagate_down[1] - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - Blob diff_; // cached for backward pass - Blob diff_pos; - Blob diff_neg; - Blob dist_sq_; // cached for backward pass - Blob dist_sq_pos; - Blob dist_sq_neg; - Blob diff_sq_; // tmp storage for gpu forward pass - Blob diff_sq_pos; - Blob diff_sq_neg; - Blob summer_vec_; // tmp storage for gpu forward pass -}; - /** * @brief Computes the Euclidean (L2) loss @f$ * E = \frac{1}{2N} \sum\limits_{n=1}^N \left| \left| \hat{y}_n - y_n @@ -672,8 +605,6 @@ class SigmoidCrossEntropyLossLayer : public LossLayer { /// @copydoc SigmoidCrossEntropyLossLayer virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); /** * @brief Computes the sigmoid cross-entropy loss error gradient w.r.t. the diff --git a/include/caffe/net.hpp b/include/caffe/net.hpp index 075afebc9b0..5665df1edf2 100644 --- a/include/caffe/net.hpp +++ b/include/caffe/net.hpp @@ -137,6 +137,9 @@ class Net { inline const vector& blob_loss_weights() const { return blob_loss_weights_; } + inline const vector& layer_need_backward() const { + return layer_need_backward_; + } /// @brief returns the parameters inline const vector > >& params() const { return params_; diff --git a/include/caffe/neuron_layers.hpp b/include/caffe/neuron_layers.hpp index 323215134c7..c2e0774aaa2 100644 --- a/include/caffe/neuron_layers.hpp +++ b/include/caffe/neuron_layers.hpp @@ -8,7 +8,6 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/layer.hpp" -#include "caffe/net.hpp" #include "caffe/proto/caffe.pb.h" #define HDF5_DATA_DATASET_NAME "data" @@ -268,6 +267,72 @@ class ExpLayer : public NeuronLayer { Dtype inner_scale_, outer_scale_; }; +/** + * @brief Computes @f$ y = log_{\gamma}(\alpha x + \beta) @f$, + * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, + * and base @f$ \gamma @f$. + */ +template +class LogLayer : public NeuronLayer { + public: + /** + * @param param provides LogParameter log_param, + * with LogLayer options: + * - scale (\b optional, default 1) the scale @f$ \alpha @f$ + * - shift (\b optional, default 0) the shift @f$ \beta @f$ + * - base (\b optional, default -1 for a value of @f$ e \approx 2.718 @f$) + * the base @f$ \gamma @f$ + */ + explicit LogLayer(const LayerParameter& param) + : NeuronLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Log"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = log_{\gamma}(\alpha x + \beta) + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the exp inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} = + * \frac{\partial E}{\partial y} y \alpha \log_e(gamma) + * @f$ if propagate_down[0] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Dtype base_scale_; + Dtype input_scale_, input_shift_; + Dtype backward_num_scale_; +}; + /** * @brief Computes @f$ y = (\alpha x + \beta) ^ \gamma @f$, * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, @@ -734,7 +799,8 @@ class PReLULayer : public NeuronLayer { const vector& propagate_down, const vector*>& bottom); bool channel_shared_; - Blob multiplier_; // dot multipler for backward computation of params + Blob multiplier_; // dot multiplier for backward computation of params + Blob backward_buff_; // temporary buffer for backward computation Blob bottom_memory_; // memory for in-place computation }; diff --git a/include/caffe/python_layer.hpp b/include/caffe/python_layer.hpp index 816ef453720..19cf18c9742 100644 --- a/include/caffe/python_layer.hpp +++ b/include/caffe/python_layer.hpp @@ -14,12 +14,12 @@ template class PythonLayer : public Layer { public: PythonLayer(PyObject* self, const LayerParameter& param) - : Layer(param), self_(self) { } + : Layer(param), self_(bp::handle<>(bp::borrowed(self))) { } virtual void LayerSetUp(const vector*>& bottom, const vector*>& top) { try { - bp::call_method(self_, "setup", bottom, top); + self_.attr("setup")(bottom, top); } catch (bp::error_already_set) { PyErr_Print(); throw; @@ -29,7 +29,7 @@ class PythonLayer : public Layer { virtual void Reshape(const vector*>& bottom, const vector*>& top) { try { - bp::call_method(self_, "reshape", bottom, top); + self_.attr("reshape")(bottom, top); } catch (bp::error_already_set) { PyErr_Print(); throw; @@ -42,7 +42,7 @@ class PythonLayer : public Layer { virtual void Forward_cpu(const vector*>& bottom, const vector*>& top) { try { - bp::call_method(self_, "forward", bottom, top); + self_.attr("forward")(bottom, top); } catch (bp::error_already_set) { PyErr_Print(); throw; @@ -51,8 +51,7 @@ class PythonLayer : public Layer { virtual void Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { try { - bp::call_method(self_, "backward", top, propagate_down, - bottom); + self_.attr("backward")(top, propagate_down, bottom); } catch (bp::error_already_set) { PyErr_Print(); throw; @@ -60,7 +59,7 @@ class PythonLayer : public Layer { } private: - PyObject* self_; + bp::object self_; }; } // namespace caffe diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index 4dcdc3dc20b..c2ced487d6f 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -11,7 +11,7 @@ namespace caffe { /** * @brief An interface for classes that perform optimization on Net%s. * - * Requires implementation of ComputeUpdateValue to compute a parameter update + * Requires implementation of ApplyUpdate to compute a parameter update * given the current state of the Net parameters. */ template @@ -39,8 +39,8 @@ class Solver { int iter() { return iter_; } protected: - // Get the update value for the current iteration. - virtual void ComputeUpdateValue() = 0; + // Make and apply the update value for the current iteration. + virtual void ApplyUpdate() = 0; // The Solver::Snapshot function implements the basic snapshotting utility // that stores the learned net. You should implement the SnapshotSolverState() // function that produces a SolverState protocol buffer that needs to be @@ -80,7 +80,10 @@ class SGDSolver : public Solver { protected: void PreSolve(); Dtype GetLearningRate(); - virtual void ComputeUpdateValue(); + virtual void ApplyUpdate(); + virtual void Normalize(int param_id); + virtual void Regularize(int param_id); + virtual void ComputeUpdateValue(int param_id, Dtype rate); virtual void ClipGradients(); virtual void SnapshotSolverState(SolverState * state); virtual void RestoreSolverState(const SolverState& state); @@ -102,7 +105,7 @@ class NesterovSolver : public SGDSolver { : SGDSolver(param_file) {} protected: - virtual void ComputeUpdateValue(); + virtual void ComputeUpdateValue(int param_id, Dtype rate); DISABLE_COPY_AND_ASSIGN(NesterovSolver); }; @@ -116,7 +119,7 @@ class AdaGradSolver : public SGDSolver { : SGDSolver(param_file) { constructor_sanity_check(); } protected: - virtual void ComputeUpdateValue(); + virtual void ComputeUpdateValue(int param_id, Dtype rate); void constructor_sanity_check() { CHECK_EQ(0, this->param_.momentum()) << "Momentum cannot be used with AdaGrad."; diff --git a/include/caffe/test/test_caffe_main.hpp b/include/caffe/test/test_caffe_main.hpp index bd5f31e063f..fc156091476 100644 --- a/include/caffe/test/test_caffe_main.hpp +++ b/include/caffe/test/test_caffe_main.hpp @@ -40,34 +40,36 @@ class MultiDeviceTest : public ::testing::Test { typedef ::testing::Types TestDtypes; -struct FloatCPU { - typedef float Dtype; +template +struct CPUDevice { + typedef TypeParam Dtype; static const Caffe::Brew device = Caffe::CPU; }; -struct DoubleCPU { - typedef double Dtype; - static const Caffe::Brew device = Caffe::CPU; +template +class CPUDeviceTest : public MultiDeviceTest > { }; #ifdef CPU_ONLY -typedef ::testing::Types TestDtypesAndDevices; +typedef ::testing::Types, + CPUDevice > TestDtypesAndDevices; #else -struct FloatGPU { - typedef float Dtype; +template +struct GPUDevice { + typedef TypeParam Dtype; static const Caffe::Brew device = Caffe::GPU; }; -struct DoubleGPU { - typedef double Dtype; - static const Caffe::Brew device = Caffe::GPU; +template +class GPUDeviceTest : public MultiDeviceTest > { }; -typedef ::testing::Types - TestDtypesAndDevices; +typedef ::testing::Types, CPUDevice, + GPUDevice, GPUDevice > + TestDtypesAndDevices; #endif diff --git a/include/caffe/test/test_gradient_check_util.hpp b/include/caffe/test/test_gradient_check_util.hpp index 22937711b58..cc5dcbad0ee 100644 --- a/include/caffe/test/test_gradient_check_util.hpp +++ b/include/caffe/test/test_gradient_check_util.hpp @@ -80,11 +80,14 @@ void GradientChecker::CheckGradientSingle(Layer* layer, CHECK_EQ(top_count, bottom[blob_id]->count()); } } - // First, figure out what blobs we need to check against. + // First, figure out what blobs we need to check against, and zero init + // parameter blobs. vector*> blobs_to_check; vector propagate_down(bottom.size(), check_bottom < 0); for (int i = 0; i < layer->blobs().size(); ++i) { - blobs_to_check.push_back(layer->blobs()[i].get()); + Blob* blob = layer->blobs()[i].get(); + caffe_set(blob->count(), static_cast(0), blob->mutable_cpu_diff()); + blobs_to_check.push_back(blob); } if (check_bottom < 0) { for (int i = 0; i < bottom.size(); ++i) { diff --git a/include/caffe/util/db.hpp b/include/caffe/util/db.hpp index afdb8d2c4f8..59ec3d390ba 100644 --- a/include/caffe/util/db.hpp +++ b/include/caffe/util/db.hpp @@ -3,10 +3,6 @@ #include -#include "leveldb/db.h" -#include "leveldb/write_batch.h" -#include "lmdb.h" - #include "caffe/common.hpp" #include "caffe/proto/caffe.pb.h" @@ -49,138 +45,6 @@ class DB { DISABLE_COPY_AND_ASSIGN(DB); }; -class LevelDBCursor : public Cursor { - public: - explicit LevelDBCursor(leveldb::Iterator* iter) - : iter_(iter) { SeekToFirst(); } - ~LevelDBCursor() { delete iter_; } - virtual void SeekToFirst() { iter_->SeekToFirst(); } - virtual void Next() { iter_->Next(); } - virtual string key() { return iter_->key().ToString(); } - virtual string value() { return iter_->value().ToString(); } - virtual bool valid() { return iter_->Valid(); } - - private: - leveldb::Iterator* iter_; -}; - -class LevelDBTransaction : public Transaction { - public: - explicit LevelDBTransaction(leveldb::DB* db) : db_(db) { CHECK_NOTNULL(db_); } - virtual void Put(const string& key, const string& value) { - batch_.Put(key, value); - } - virtual void Commit() { - leveldb::Status status = db_->Write(leveldb::WriteOptions(), &batch_); - CHECK(status.ok()) << "Failed to write batch to leveldb " - << std::endl << status.ToString(); - } - - private: - leveldb::DB* db_; - leveldb::WriteBatch batch_; - - DISABLE_COPY_AND_ASSIGN(LevelDBTransaction); -}; - -class LevelDB : public DB { - public: - LevelDB() : db_(NULL) { } - virtual ~LevelDB() { Close(); } - virtual void Open(const string& source, Mode mode); - virtual void Close() { - if (db_ != NULL) { - delete db_; - db_ = NULL; - } - } - virtual LevelDBCursor* NewCursor() { - return new LevelDBCursor(db_->NewIterator(leveldb::ReadOptions())); - } - virtual LevelDBTransaction* NewTransaction() { - return new LevelDBTransaction(db_); - } - - private: - leveldb::DB* db_; -}; - -inline void MDB_CHECK(int mdb_status) { - CHECK_EQ(mdb_status, MDB_SUCCESS) << mdb_strerror(mdb_status); -} - -class LMDBCursor : public Cursor { - public: - explicit LMDBCursor(MDB_txn* mdb_txn, MDB_cursor* mdb_cursor) - : mdb_txn_(mdb_txn), mdb_cursor_(mdb_cursor), valid_(false) { - SeekToFirst(); - } - virtual ~LMDBCursor() { - mdb_cursor_close(mdb_cursor_); - mdb_txn_abort(mdb_txn_); - } - virtual void SeekToFirst() { Seek(MDB_FIRST); } - virtual void Next() { Seek(MDB_NEXT); } - virtual string key() { - return string(static_cast(mdb_key_.mv_data), mdb_key_.mv_size); - } - virtual string value() { - return string(static_cast(mdb_value_.mv_data), - mdb_value_.mv_size); - } - virtual bool valid() { return valid_; } - - private: - void Seek(MDB_cursor_op op) { - int mdb_status = mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, op); - if (mdb_status == MDB_NOTFOUND) { - valid_ = false; - } else { - MDB_CHECK(mdb_status); - valid_ = true; - } - } - - MDB_txn* mdb_txn_; - MDB_cursor* mdb_cursor_; - MDB_val mdb_key_, mdb_value_; - bool valid_; -}; - -class LMDBTransaction : public Transaction { - public: - explicit LMDBTransaction(MDB_dbi* mdb_dbi, MDB_txn* mdb_txn) - : mdb_dbi_(mdb_dbi), mdb_txn_(mdb_txn) { } - virtual void Put(const string& key, const string& value); - virtual void Commit() { MDB_CHECK(mdb_txn_commit(mdb_txn_)); } - - private: - MDB_dbi* mdb_dbi_; - MDB_txn* mdb_txn_; - - DISABLE_COPY_AND_ASSIGN(LMDBTransaction); -}; - -class LMDB : public DB { - public: - LMDB() : mdb_env_(NULL) { } - virtual ~LMDB() { Close(); } - virtual void Open(const string& source, Mode mode); - virtual void Close() { - if (mdb_env_ != NULL) { - mdb_dbi_close(mdb_env_, mdb_dbi_); - mdb_env_close(mdb_env_); - mdb_env_ = NULL; - } - } - virtual LMDBCursor* NewCursor(); - virtual LMDBTransaction* NewTransaction(); - - private: - MDB_env* mdb_env_; - MDB_dbi mdb_dbi_; -}; - DB* GetDB(DataParameter::DB backend); DB* GetDB(const string& backend); diff --git a/include/caffe/util/db_leveldb.hpp b/include/caffe/util/db_leveldb.hpp new file mode 100644 index 00000000000..10623554b67 --- /dev/null +++ b/include/caffe/util/db_leveldb.hpp @@ -0,0 +1,73 @@ +#ifndef CAFFE_UTIL_DB_LEVELDB_HPP +#define CAFFE_UTIL_DB_LEVELDB_HPP + +#include + +#include "leveldb/db.h" +#include "leveldb/write_batch.h" + +#include "caffe/util/db.hpp" + +namespace caffe { namespace db { + +class LevelDBCursor : public Cursor { + public: + explicit LevelDBCursor(leveldb::Iterator* iter) + : iter_(iter) { SeekToFirst(); } + ~LevelDBCursor() { delete iter_; } + virtual void SeekToFirst() { iter_->SeekToFirst(); } + virtual void Next() { iter_->Next(); } + virtual string key() { return iter_->key().ToString(); } + virtual string value() { return iter_->value().ToString(); } + virtual bool valid() { return iter_->Valid(); } + + private: + leveldb::Iterator* iter_; +}; + +class LevelDBTransaction : public Transaction { + public: + explicit LevelDBTransaction(leveldb::DB* db) : db_(db) { CHECK_NOTNULL(db_); } + virtual void Put(const string& key, const string& value) { + batch_.Put(key, value); + } + virtual void Commit() { + leveldb::Status status = db_->Write(leveldb::WriteOptions(), &batch_); + CHECK(status.ok()) << "Failed to write batch to leveldb " + << std::endl << status.ToString(); + } + + private: + leveldb::DB* db_; + leveldb::WriteBatch batch_; + + DISABLE_COPY_AND_ASSIGN(LevelDBTransaction); +}; + +class LevelDB : public DB { + public: + LevelDB() : db_(NULL) { } + virtual ~LevelDB() { Close(); } + virtual void Open(const string& source, Mode mode); + virtual void Close() { + if (db_ != NULL) { + delete db_; + db_ = NULL; + } + } + virtual LevelDBCursor* NewCursor() { + return new LevelDBCursor(db_->NewIterator(leveldb::ReadOptions())); + } + virtual LevelDBTransaction* NewTransaction() { + return new LevelDBTransaction(db_); + } + + private: + leveldb::DB* db_; +}; + + +} // namespace db +} // namespace caffe + +#endif // CAFFE_UTIL_DB_LEVELDB_HPP diff --git a/include/caffe/util/db_lmdb.hpp b/include/caffe/util/db_lmdb.hpp new file mode 100644 index 00000000000..cc7c90afc4c --- /dev/null +++ b/include/caffe/util/db_lmdb.hpp @@ -0,0 +1,91 @@ +#ifndef CAFFE_UTIL_DB_LMDB_HPP +#define CAFFE_UTIL_DB_LMDB_HPP + +#include + +#include "lmdb.h" + +#include "caffe/util/db.hpp" + +namespace caffe { namespace db { + +inline void MDB_CHECK(int mdb_status) { + CHECK_EQ(mdb_status, MDB_SUCCESS) << mdb_strerror(mdb_status); +} + +class LMDBCursor : public Cursor { + public: + explicit LMDBCursor(MDB_txn* mdb_txn, MDB_cursor* mdb_cursor) + : mdb_txn_(mdb_txn), mdb_cursor_(mdb_cursor), valid_(false) { + SeekToFirst(); + } + virtual ~LMDBCursor() { + mdb_cursor_close(mdb_cursor_); + mdb_txn_abort(mdb_txn_); + } + virtual void SeekToFirst() { Seek(MDB_FIRST); } + virtual void Next() { Seek(MDB_NEXT); } + virtual string key() { + return string(static_cast(mdb_key_.mv_data), mdb_key_.mv_size); + } + virtual string value() { + return string(static_cast(mdb_value_.mv_data), + mdb_value_.mv_size); + } + virtual bool valid() { return valid_; } + + private: + void Seek(MDB_cursor_op op) { + int mdb_status = mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, op); + if (mdb_status == MDB_NOTFOUND) { + valid_ = false; + } else { + MDB_CHECK(mdb_status); + valid_ = true; + } + } + + MDB_txn* mdb_txn_; + MDB_cursor* mdb_cursor_; + MDB_val mdb_key_, mdb_value_; + bool valid_; +}; + +class LMDBTransaction : public Transaction { + public: + explicit LMDBTransaction(MDB_dbi* mdb_dbi, MDB_txn* mdb_txn) + : mdb_dbi_(mdb_dbi), mdb_txn_(mdb_txn) { } + virtual void Put(const string& key, const string& value); + virtual void Commit() { MDB_CHECK(mdb_txn_commit(mdb_txn_)); } + + private: + MDB_dbi* mdb_dbi_; + MDB_txn* mdb_txn_; + + DISABLE_COPY_AND_ASSIGN(LMDBTransaction); +}; + +class LMDB : public DB { + public: + LMDB() : mdb_env_(NULL) { } + virtual ~LMDB() { Close(); } + virtual void Open(const string& source, Mode mode); + virtual void Close() { + if (mdb_env_ != NULL) { + mdb_dbi_close(mdb_env_, mdb_dbi_); + mdb_env_close(mdb_env_); + mdb_env_ = NULL; + } + } + virtual LMDBCursor* NewCursor(); + virtual LMDBTransaction* NewTransaction(); + + private: + MDB_env* mdb_env_; + MDB_dbi mdb_dbi_; +}; + +} // namespace db +} // namespace caffe + +#endif // CAFFE_UTIL_DB_LMDB_HPP diff --git a/include/caffe/util/math_functions.hpp b/include/caffe/util/math_functions.hpp index f43036fcebc..2cacd8e72cd 100644 --- a/include/caffe/util/math_functions.hpp +++ b/include/caffe/util/math_functions.hpp @@ -88,6 +88,9 @@ void caffe_rng_bernoulli(const int n, const Dtype p, unsigned int* r); template void caffe_exp(const int n, const Dtype* a, Dtype* y); +template +void caffe_log(const int n, const Dtype* a, Dtype* y); + template void caffe_abs(const int n, const Dtype* a, Dtype* y); @@ -203,6 +206,9 @@ void caffe_gpu_abs(const int n, const Dtype* a, Dtype* y); template void caffe_gpu_exp(const int n, const Dtype* a, Dtype* y); +template +void caffe_gpu_log(const int n, const Dtype* a, Dtype* y); + template void caffe_gpu_powx(const int n, const Dtype* a, const Dtype b, Dtype* y); diff --git a/include/caffe/util/mkl_alternate.hpp b/include/caffe/util/mkl_alternate.hpp index 32fdbf79932..3355b6658a3 100644 --- a/include/caffe/util/mkl_alternate.hpp +++ b/include/caffe/util/mkl_alternate.hpp @@ -33,6 +33,7 @@ extern "C" { DEFINE_VSL_UNARY_FUNC(Sqr, y[i] = a[i] * a[i]); DEFINE_VSL_UNARY_FUNC(Exp, y[i] = exp(a[i])); +DEFINE_VSL_UNARY_FUNC(Ln, y[i] = log(a[i])); DEFINE_VSL_UNARY_FUNC(Abs, y[i] = fabs(a[i])); // A simple way to define the vsl unary functions with singular parameter b. diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index cd0ab8babb0..a6bd86a93f5 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -453,6 +453,72 @@ class CuDNNPoolingLayer : public PoolingLayer { }; #endif +/** + * @brief Does spatial pyramid pooling on the input image + * by taking the max, average, etc. within regions + * so that the result vector of different sized + * images are of the same size. + */ +template +class SPPLayer : public Layer { + public: + explicit SPPLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "SPP"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + // MAX POOL layers can output an extra top blob for the mask; + // others can only output the pooled inputs. + virtual inline int MaxTopBlobs() const { + return (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_MAX) ? 2 : 1; + } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + // calculates the kernel and stride dimensions for the pooling layer, + // returns a correctly configured LayerParameter for a PoolingLayer + virtual LayerParameter GetPoolingParam(const int pyramid_level, + const int bottom_h, const int bottom_w, const SPPParameter spp_param); + + int pyramid_height_; + int bottom_h_, bottom_w_; + int channels_; + int kernel_h_, kernel_w_; + int pad_h_, pad_w_; + + /// the internal Split layer that feeds the pooling layers + shared_ptr > split_layer_; + /// top vector holder used in call to the underlying SplitLayer::Forward + vector*> split_top_vec_; + /// bottom vector holder used in call to the underlying PoolingLayer::Forward + vector*>*> pooling_bottom_vecs_; + /// the internal Pooling layers of different kernel sizes + vector > > pooling_layers_; + /// top vector holders used in call to the underlying PoolingLayer::Forward + vector*>*> pooling_top_vecs_; + /// pooling_outputs stores the outputs of the PoolingLayers + vector*> pooling_outputs_; + /// the internal Flatten layers that the Pooling layers feed into + vector*> flatten_layers_; + /// top vector holders used in call to the underlying FlattenLayer::Forward + vector*>*> flatten_top_vecs_; + /// flatten_outputs stores the outputs of the FlattenLayers + vector*> flatten_outputs_; + /// bottom vector holder used in call to the underlying ConcatLayer::Forward + vector*> concat_bottom_vec_; + /// the internal Concat layers that the Flatten layers feed into + shared_ptr > concat_layer_; +}; + } // namespace caffe #endif // CAFFE_VISION_LAYERS_HPP_ diff --git a/python/caffe/draw.py b/python/caffe/draw.py index 834ea15ac9a..324929deca4 100644 --- a/python/caffe/draw.py +++ b/python/caffe/draw.py @@ -72,7 +72,7 @@ def get_layer_label(layer, rankdir): else: # If graph orientation is horizontal, vertical space is free and # horizontal space is not; separate words with newlines - separator = '\n' + separator = '\\n' if layer.type == 'Convolution': # Outer double quotes needed or else colon characters don't parse diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 6d2b3f502d9..94fdcc35fb6 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -26,6 +26,7 @@ void Blob::Reshape(const vector& shape) { shape_.resize(shape.size()); for (int i = 0; i < shape.size(); ++i) { CHECK_GE(shape[i], 0); + CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX"; count_ *= shape[i]; shape_[i] = shape[i]; } diff --git a/src/caffe/data_transformer.cpp b/src/caffe/data_transformer.cpp index b0b98e478c1..22633922a01 100644 --- a/src/caffe/data_transformer.cpp +++ b/src/caffe/data_transformer.cpp @@ -125,10 +125,31 @@ void DataTransformer::Transform(const Datum& datum, template void DataTransformer::Transform(const Datum& datum, Blob* transformed_blob) { + // If datum is encoded, decoded and transform the cv::image. + if (datum.encoded()) { + CHECK(!(param_.force_color() && param_.force_gray())) + << "cannot set both force_color and force_gray"; + cv::Mat cv_img; + if (param_.force_color() || param_.force_gray()) { + // If force_color then decode in color otherwise decode in gray. + cv_img = DecodeDatumToCVMat(datum, param_.force_color()); + } else { + cv_img = DecodeDatumToCVMatNative(datum); + } + // Transform the cv::image into blob. + return Transform(cv_img, transformed_blob); + } else { + if (param_.force_color() || param_.force_gray()) { + LOG(ERROR) << "force_color and force_gray only for encoded datum"; + } + } + + const int crop_size = param_.crop_size(); const int datum_channels = datum.channels(); const int datum_height = datum.height(); const int datum_width = datum.width(); + // Check dimensions. const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); const int width = transformed_blob->width(); @@ -139,8 +160,6 @@ void DataTransformer::Transform(const Datum& datum, CHECK_LE(width, datum_width); CHECK_GE(num, 1); - const int crop_size = param_.crop_size(); - if (crop_size) { CHECK_EQ(crop_size, height); CHECK_EQ(crop_size, width); @@ -196,10 +215,12 @@ void DataTransformer::Transform(const vector & mat_vector, template void DataTransformer::Transform(const cv::Mat& cv_img, Blob* transformed_blob) { + const int crop_size = param_.crop_size(); const int img_channels = cv_img.channels(); const int img_height = cv_img.rows; const int img_width = cv_img.cols; + // Check dimensions. const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); const int width = transformed_blob->width(); @@ -212,7 +233,6 @@ void DataTransformer::Transform(const cv::Mat& cv_img, CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte"; - const int crop_size = param_.crop_size(); const Dtype scale = param_.scale(); const bool do_mirror = param_.mirror() && Rand(2); const bool has_mean_file = param_.has_mean_file(); @@ -297,11 +317,23 @@ void DataTransformer::Transform(const cv::Mat& cv_img, template void DataTransformer::Transform(Blob* input_blob, Blob* transformed_blob) { + const int crop_size = param_.crop_size(); const int input_num = input_blob->num(); const int input_channels = input_blob->channels(); const int input_height = input_blob->height(); const int input_width = input_blob->width(); + if (transformed_blob->count() == 0) { + // Initialize transformed_blob with the right shape. + if (crop_size) { + transformed_blob->Reshape(input_num, input_channels, + crop_size, crop_size); + } else { + transformed_blob->Reshape(input_num, input_channels, + input_height, input_width); + } + } + const int num = transformed_blob->num(); const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); @@ -313,7 +345,7 @@ void DataTransformer::Transform(Blob* input_blob, CHECK_GE(input_height, height); CHECK_GE(input_width, width); - const int crop_size = param_.crop_size(); + const Dtype scale = param_.scale(); const bool do_mirror = param_.mirror() && Rand(2); const bool has_mean_file = param_.has_mean_file(); @@ -395,6 +427,82 @@ void DataTransformer::Transform(Blob* input_blob, } } +template +vector DataTransformer::InferBlobShape(const Datum& datum) { + if (datum.encoded()) { + CHECK(!(param_.force_color() && param_.force_gray())) + << "cannot set both force_color and force_gray"; + cv::Mat cv_img; + if (param_.force_color() || param_.force_gray()) { + // If force_color then decode in color otherwise decode in gray. + cv_img = DecodeDatumToCVMat(datum, param_.force_color()); + } else { + cv_img = DecodeDatumToCVMatNative(datum); + } + // InferBlobShape using the cv::image. + return InferBlobShape(cv_img); + } + + const int crop_size = param_.crop_size(); + const int datum_channels = datum.channels(); + const int datum_height = datum.height(); + const int datum_width = datum.width(); + // Check dimensions. + CHECK_GT(datum_channels, 0); + CHECK_GE(datum_height, crop_size); + CHECK_GE(datum_width, crop_size); + // Build BlobShape. + vector shape(4); + shape[0] = 1; + shape[1] = datum_channels; + shape[2] = (crop_size)? crop_size: datum_height; + shape[3] = (crop_size)? crop_size: datum_width; + return shape; +} + +template +vector DataTransformer::InferBlobShape( + const vector & datum_vector) { + const int num = datum_vector.size(); + CHECK_GT(num, 0) << "There is no datum to in the vector"; + // Use first datum in the vector to InferBlobShape. + vector shape = InferBlobShape(datum_vector[0]); + // Adjust num to the size of the vector. + shape[0] = num; + return shape; +} + +template +vector DataTransformer::InferBlobShape(const cv::Mat& cv_img) { + const int crop_size = param_.crop_size(); + const int img_channels = cv_img.channels(); + const int img_height = cv_img.rows; + const int img_width = cv_img.cols; + // Check dimensions. + CHECK_GT(img_channels, 0); + CHECK_GE(img_height, crop_size); + CHECK_GE(img_width, crop_size); + // Build BlobShape. + vector shape(4); + shape[0] = 1; + shape[1] = img_channels; + shape[2] = (crop_size)? crop_size: img_height; + shape[3] = (crop_size)? crop_size: img_width; + return shape; +} + +template +vector DataTransformer::InferBlobShape( + const vector & mat_vector) { + const int num = mat_vector.size(); + CHECK_GT(num, 0) << "There is no cv_img to in the vector"; + // Use first cv_img in the vector to InferBlobShape. + vector shape = InferBlobShape(mat_vector[0]); + // Adjust num to the size of the vector. + shape[0] = num; + return shape; +} + template void DataTransformer::InitRand() { const bool needs_rand = param_.mirror() || diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index 352200915d7..26a1118282f 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -2,7 +2,6 @@ #include #include "caffe/data_layers.hpp" -#include "caffe/net.hpp" #include "caffe/util/io.hpp" namespace caffe { @@ -21,11 +20,11 @@ void BaseDataLayer::LayerSetUp(const vector*>& bottom, } else { output_labels_ = true; } - // The subclasses should setup the size of bottom and top - DataLayerSetUp(bottom, top); data_transformer_.reset( new DataTransformer(transform_param_, this->phase_)); data_transformer_->InitRand(); + // The subclasses should setup the size of bottom and top + DataLayerSetUp(bottom, top); } template @@ -63,13 +62,15 @@ void BasePrefetchingDataLayer::Forward_cpu( JoinPrefetchThread(); DLOG(INFO) << "Thread joined"; // Reshape to loaded data. - top[0]->Reshape(this->prefetch_data_.num(), this->prefetch_data_.channels(), - this->prefetch_data_.height(), this->prefetch_data_.width()); + top[0]->ReshapeLike(prefetch_data_); // Copy the data caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), top[0]->mutable_cpu_data()); DLOG(INFO) << "Prefetch copied"; if (this->output_labels_) { + // Reshape to loaded labels. + top[1]->ReshapeLike(prefetch_label_); + // Copy the labels. caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), top[1]->mutable_cpu_data()); } diff --git a/src/caffe/layers/base_data_layer.cu b/src/caffe/layers/base_data_layer.cu index 775f6c47f7e..9335a5bc9a9 100644 --- a/src/caffe/layers/base_data_layer.cu +++ b/src/caffe/layers/base_data_layer.cu @@ -10,12 +10,14 @@ void BasePrefetchingDataLayer::Forward_gpu( // First, join the thread JoinPrefetchThread(); // Reshape to loaded data. - top[0]->Reshape(this->prefetch_data_.num(), this->prefetch_data_.channels(), - this->prefetch_data_.height(), this->prefetch_data_.width()); + top[0]->ReshapeLike(this->prefetch_data_); // Copy the data caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { + // Reshape to loaded labels. + top[1]->ReshapeLike(prefetch_label_); + // Copy the labels. caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), top[1]->mutable_gpu_data()); } diff --git a/src/caffe/layers/concat_layer.cu b/src/caffe/layers/concat_layer.cu index dbadb5aeb30..8f2e85d8f52 100644 --- a/src/caffe/layers/concat_layer.cu +++ b/src/caffe/layers/concat_layer.cu @@ -6,21 +6,41 @@ namespace caffe { +template +__global__ void Concat(const int nthreads, const Dtype* in_data, + const bool forward, const int num_concats, const int concat_size, + const int top_concat_axis, const int bottom_concat_axis, + const int offset_concat_axis, Dtype* out_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int total_concat_size = concat_size * bottom_concat_axis; + const int concat_num = index / total_concat_size; + const int concat_index = index % total_concat_size; + const int top_index = concat_index + + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; + if (forward) { + out_data[top_index] = in_data[index]; + } else { + out_data[index] = in_data[top_index]; + } + } +} + template void ConcatLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { Dtype* top_data = top[0]->mutable_gpu_data(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); + const bool kForward = true; for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); - for (int n = 0; n < num_concats_; ++n) { - caffe_copy(bottom_concat_axis * concat_input_size_, - bottom_data + n * bottom_concat_axis * concat_input_size_, - top_data + (n * top_concat_axis + offset_concat_axis) - * concat_input_size_); - } + const int bottom_concat_size = bottom_concat_axis * concat_input_size_; + const int nthreads = bottom_concat_size * num_concats_; + Concat // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, bottom_data, kForward, num_concats_, concat_input_size_, + top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); offset_concat_axis += bottom_concat_axis; } } @@ -31,15 +51,17 @@ void ConcatLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); + const bool kForward = false; for (int i = 0; i < bottom.size(); ++i) { if (!propagate_down[i]) { continue; } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); - for (int n = 0; n < num_concats_; ++n) { - caffe_copy(bottom_concat_axis * concat_input_size_, top_diff + - (n * top_concat_axis + offset_concat_axis) * concat_input_size_, - bottom_diff + n * bottom_concat_axis * concat_input_size_); - } + const int bottom_concat_size = bottom_concat_axis * concat_input_size_; + const int nthreads = bottom_concat_size * num_concats_; + Concat // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, top_diff, kForward, num_concats_, concat_input_size_, + top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); offset_concat_axis += bottom_concat_axis; } } diff --git a/src/caffe/layers/contrastive_loss_layer.cpp b/src/caffe/layers/contrastive_loss_layer.cpp index 0692c11c257..25e167819d3 100644 --- a/src/caffe/layers/contrastive_loss_layer.cpp +++ b/src/caffe/layers/contrastive_loss_layer.cpp @@ -41,6 +41,8 @@ void ContrastiveLossLayer::Forward_cpu( diff_.mutable_cpu_data()); // a_i-b_i const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.contrastive_loss_param().margin(); + bool legacy_version = + this->layer_param_.contrastive_loss_param().legacy_version(); Dtype loss(0.0); for (int i = 0; i < bottom[0]->num(); ++i) { dist_sq_.mutable_cpu_data()[i] = caffe_cpu_dot(channels, @@ -48,7 +50,12 @@ void ContrastiveLossLayer::Forward_cpu( if (static_cast(bottom[2]->cpu_data()[i])) { // similar pairs loss += dist_sq_.cpu_data()[i]; } else { // dissimilar pairs - loss += std::max(margin-dist_sq_.cpu_data()[i], Dtype(0.0)); + if (legacy_version) { + loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)); + } else { + Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]), 0.0); + loss += dist*dist; + } } } loss = loss / static_cast(bottom[0]->num()) / Dtype(2); @@ -59,6 +66,8 @@ template void ContrastiveLossLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.contrastive_loss_param().margin(); + bool legacy_version = + this->layer_param_.contrastive_loss_param().legacy_version(); for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; @@ -76,10 +85,20 @@ void ContrastiveLossLayer::Backward_cpu(const vector*>& top, Dtype(0.0), bout + (j*channels)); } else { // dissimilar pairs - if ((margin-dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + Dtype mdist(0.0); + Dtype beta(0.0); + if (legacy_version) { + mdist = margin - dist_sq_.cpu_data()[j]; + beta = -alpha; + } else { + Dtype dist = sqrt(dist_sq_.cpu_data()[j]); + mdist = margin - dist; + beta = -alpha * mdist / (dist + Dtype(1e-4)); + } + if (mdist > Dtype(0.0)) { caffe_cpu_axpby( channels, - -alpha, + beta, diff_.cpu_data() + (j*channels), Dtype(0.0), bout + (j*channels)); diff --git a/src/caffe/layers/contrastive_loss_layer.cu b/src/caffe/layers/contrastive_loss_layer.cu index 78a55995a0a..931239316ac 100644 --- a/src/caffe/layers/contrastive_loss_layer.cu +++ b/src/caffe/layers/contrastive_loss_layer.cu @@ -32,12 +32,20 @@ void ContrastiveLossLayer::Forward_gpu( Dtype(0.0), dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2 Dtype margin = this->layer_param_.contrastive_loss_param().margin(); + bool legacy_version = + this->layer_param_.contrastive_loss_param().legacy_version(); Dtype loss(0.0); for (int i = 0; i < bottom[0]->num(); ++i) { if (static_cast(bottom[2]->cpu_data()[i])) { // similar pairs loss += dist_sq_.cpu_data()[i]; } else { // dissimilar pairs - loss += std::max(margin-dist_sq_.cpu_data()[i], Dtype(0.0)); + if (legacy_version) { + loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)); + } else { + Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]), + Dtype(0.0)); + loss += dist*dist; + } } } loss = loss / static_cast(bottom[0]->num()) / Dtype(2); @@ -45,8 +53,8 @@ void ContrastiveLossLayer::Forward_gpu( } template -__global__ void CLLForward(const int count, const int channels, - const Dtype margin, const Dtype alpha, +__global__ void CLLBackward(const int count, const int channels, + const Dtype margin, const bool legacy_version, const Dtype alpha, const Dtype* y, const Dtype* diff, const Dtype* dist_sq, Dtype *bottom_diff) { CUDA_KERNEL_LOOP(i, count) { @@ -54,8 +62,18 @@ __global__ void CLLForward(const int count, const int channels, if (static_cast(y[n])) { // similar pairs bottom_diff[i] = alpha * diff[i]; } else { // dissimilar pairs - if ((margin-dist_sq[n]) > 0.0) { - bottom_diff[i] = -alpha * diff[i]; + Dtype mdist(0.0); + Dtype beta(0.0); + if (legacy_version) { + mdist = (margin - dist_sq[n]); + beta = -alpha; + } else { + Dtype dist = sqrt(dist_sq[n]); + mdist = (margin - dist); + beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i]; + } + if (mdist > 0.0) { + bottom_diff[i] = beta; } else { bottom_diff[i] = 0; } @@ -71,12 +89,14 @@ void ContrastiveLossLayer::Backward_gpu(const vector*>& top, const int count = bottom[0]->count(); const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.contrastive_loss_param().margin(); + const bool legacy_version = + this->layer_param_.contrastive_loss_param().legacy_version(); const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast(bottom[0]->num()); // NOLINT_NEXT_LINE(whitespace/operators) - CLLForward<<>>( - count, channels, margin, alpha, + CLLBackward<<>>( + count, channels, margin, legacy_version, alpha, bottom[2]->gpu_data(), // pair similarity 0 or 1 diff_.gpu_data(), // the cached eltwise difference between a and b dist_sq_.gpu_data(), // the cached square distance between a and b diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp index c0c9f6f3371..928ef5ee468 100644 --- a/src/caffe/layers/conv_layer.cpp +++ b/src/caffe/layers/conv_layer.cpp @@ -39,13 +39,6 @@ void ConvolutionLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->cpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); - if (this->param_propagate_down_[0]) { - caffe_set(this->blobs_[0]->count(), Dtype(0), weight_diff); - } - if (this->bias_term_ && this->param_propagate_down_[1]) { - caffe_set(this->blobs_[1]->count(), Dtype(0), - this->blobs_[1]->mutable_cpu_diff()); - } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->cpu_diff(); const Dtype* bottom_data = bottom[i]->cpu_data(); diff --git a/src/caffe/layers/conv_layer.cu b/src/caffe/layers/conv_layer.cu index 3902fdf3930..b8a98ff7cc9 100644 --- a/src/caffe/layers/conv_layer.cu +++ b/src/caffe/layers/conv_layer.cu @@ -31,13 +31,6 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); - if (this->param_propagate_down_[0]) { - caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); - } - if (this->bias_term_ && this->param_propagate_down_[1]) { - caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), - this->blobs_[1]->mutable_gpu_diff()); - } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Bias gradient, if necessary. diff --git a/src/caffe/layers/cudnn_conv_layer.cu b/src/caffe/layers/cudnn_conv_layer.cu index 4a1a4c4f4f2..b4e802e13d1 100644 --- a/src/caffe/layers/cudnn_conv_layer.cu +++ b/src/caffe/layers/cudnn_conv_layer.cu @@ -101,12 +101,10 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); - caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); - caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp index 0f2d66776a9..161a75e0c8c 100644 --- a/src/caffe/layers/data_layer.cpp +++ b/src/caffe/layers/data_layer.cpp @@ -38,32 +38,17 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, cursor_->Next(); } } - // Read a data point, and use it to initialize the top blob. + // Read a data point, to initialize the prefetch and top blobs. Datum datum; datum.ParseFromString(cursor_->value()); + // Use data_transformer to infer the expected blob shape from datum. + vector top_shape = this->data_transformer_->InferBlobShape(datum); + this->transformed_data_.Reshape(top_shape); + // Reshape top[0] and prefetch_data according to the batch_size. + top_shape[0] = this->layer_param_.data_param().batch_size(); + this->prefetch_data_.Reshape(top_shape); + top[0]->ReshapeLike(this->prefetch_data_); - bool force_color = this->layer_param_.data_param().force_encoded_color(); - if ((force_color && DecodeDatum(&datum, true)) || - DecodeDatumNative(&datum)) { - LOG(INFO) << "Decoding Datum"; - } - // image - int crop_size = this->layer_param_.transform_param().crop_size(); - if (crop_size > 0) { - top[0]->Reshape(this->layer_param_.data_param().batch_size(), - datum.channels(), crop_size, crop_size); - this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), - datum.channels(), crop_size, crop_size); - this->transformed_data_.Reshape(1, datum.channels(), crop_size, crop_size); - } else { - top[0]->Reshape( - this->layer_param_.data_param().batch_size(), datum.channels(), - datum.height(), datum.width()); - this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), - datum.channels(), datum.height(), datum.width()); - this->transformed_data_.Reshape(1, datum.channels(), - datum.height(), datum.width()); - } LOG(INFO) << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); @@ -86,25 +71,17 @@ void DataLayer::InternalThreadEntry() { CHECK(this->prefetch_data_.count()); CHECK(this->transformed_data_.count()); - // Reshape on single input batches for inputs of varying dimension. + // Reshape according to the first datum of each batch + // on single input batches allows for inputs of varying dimension. const int batch_size = this->layer_param_.data_param().batch_size(); - const int crop_size = this->layer_param_.transform_param().crop_size(); - bool force_color = this->layer_param_.data_param().force_encoded_color(); - if (batch_size == 1 && crop_size == 0) { - Datum datum; - datum.ParseFromString(cursor_->value()); - if (datum.encoded()) { - if (force_color) { - DecodeDatum(&datum, true); - } else { - DecodeDatumNative(&datum); - } - } - this->prefetch_data_.Reshape(1, datum.channels(), - datum.height(), datum.width()); - this->transformed_data_.Reshape(1, datum.channels(), - datum.height(), datum.width()); - } + Datum datum; + datum.ParseFromString(cursor_->value()); + // Use data_transformer to infer the expected blob shape from datum. + vector top_shape = this->data_transformer_->InferBlobShape(datum); + this->transformed_data_.Reshape(top_shape); + // Reshape prefetch_data according to the batch_size. + top_shape[0] = batch_size; + this->prefetch_data_.Reshape(top_shape); Dtype* top_data = this->prefetch_data_.mutable_cpu_data(); Dtype* top_label = NULL; // suppress warnings about uninitialized variables @@ -112,48 +89,31 @@ void DataLayer::InternalThreadEntry() { if (this->output_labels_) { top_label = this->prefetch_label_.mutable_cpu_data(); } + timer.Start(); for (int item_id = 0; item_id < batch_size; ++item_id) { - timer.Start(); - // get a blob + // get a datum Datum datum; datum.ParseFromString(cursor_->value()); - - cv::Mat cv_img; - if (datum.encoded()) { - if (force_color) { - cv_img = DecodeDatumToCVMat(datum, true); - } else { - cv_img = DecodeDatumToCVMatNative(datum); - } - if (cv_img.channels() != this->transformed_data_.channels()) { - LOG(WARNING) << "Your dataset contains encoded images with mixed " - << "channel sizes. Consider adding a 'force_color' flag to the " - << "model definition, or rebuild your dataset using " - << "convert_imageset."; - } - } read_time += timer.MicroSeconds(); timer.Start(); - // Apply data transformations (mirror, scale, crop...) int offset = this->prefetch_data_.offset(item_id); this->transformed_data_.set_cpu_data(top_data + offset); - if (datum.encoded()) { - this->data_transformer_->Transform(cv_img, &(this->transformed_data_)); - } else { - this->data_transformer_->Transform(datum, &(this->transformed_data_)); - } + this->data_transformer_->Transform(datum, &(this->transformed_data_)); + // Copy label. if (this->output_labels_) { top_label[item_id] = datum.label(); } trans_time += timer.MicroSeconds(); - // go to the next iter + timer.Start(); + // go to the next item. cursor_->Next(); if (!cursor_->valid()) { DLOG(INFO) << "Restarting data prefetching from start."; cursor_->SeekToFirst(); } } + timer.Stop(); batch_timer.Stop(); DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; diff --git a/src/caffe/layers/deconv_layer.cpp b/src/caffe/layers/deconv_layer.cpp index e6d65ab526b..a4612963b6b 100644 --- a/src/caffe/layers/deconv_layer.cpp +++ b/src/caffe/layers/deconv_layer.cpp @@ -39,13 +39,6 @@ void DeconvolutionLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->cpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); - if (this->param_propagate_down_[0]) { - caffe_set(this->blobs_[0]->count(), Dtype(0), weight_diff); - } - if (this->bias_term_ && this->param_propagate_down_[1]) { - caffe_set(this->blobs_[1]->count(), Dtype(0), - this->blobs_[1]->mutable_cpu_diff()); - } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->cpu_diff(); const Dtype* bottom_data = bottom[i]->cpu_data(); diff --git a/src/caffe/layers/deconv_layer.cu b/src/caffe/layers/deconv_layer.cu index 9198dd64c72..39bc4de8c66 100644 --- a/src/caffe/layers/deconv_layer.cu +++ b/src/caffe/layers/deconv_layer.cu @@ -31,13 +31,6 @@ void DeconvolutionLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); - if (this->param_propagate_down_[0]) { - caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); - } - if (this->bias_term_ && this->param_propagate_down_[1]) { - caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), - this->blobs_[1]->mutable_gpu_diff()); - } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); const Dtype* bottom_data = bottom[i]->gpu_data(); diff --git a/src/caffe/layers/flatten_layer.cpp b/src/caffe/layers/flatten_layer.cpp index 745f271ea45..f7e5c9c2172 100644 --- a/src/caffe/layers/flatten_layer.cpp +++ b/src/caffe/layers/flatten_layer.cpp @@ -9,9 +9,19 @@ namespace caffe { template void FlattenLayer::Reshape(const vector*>& bottom, const vector*>& top) { - vector top_shape(2); - top_shape[0] = bottom[0]->num(); - top_shape[1] = bottom[0]->count() / bottom[0]->num(); + const int start_axis = bottom[0]->CanonicalAxisIndex( + this->layer_param_.flatten_param().axis()); + const int end_axis = bottom[0]->CanonicalAxisIndex( + this->layer_param_.flatten_param().end_axis()); + vector top_shape; + for (int i = 0; i < start_axis; ++i) { + top_shape.push_back(bottom[0]->shape(i)); + } + const int flattened_dim = bottom[0]->count(start_axis, end_axis + 1); + top_shape.push_back(flattened_dim); + for (int i = end_axis + 1; i < bottom[0]->num_axes(); ++i) { + top_shape.push_back(bottom[0]->shape(i)); + } top[0]->Reshape(top_shape); CHECK_EQ(top[0]->count(), bottom[0]->count()); } diff --git a/src/caffe/layers/image_data_layer.cpp b/src/caffe/layers/image_data_layer.cpp index 38ebbd5ec14..18c035cba9d 100644 --- a/src/caffe/layers/image_data_layer.cpp +++ b/src/caffe/layers/image_data_layer.cpp @@ -62,21 +62,15 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, // Read an image, and use it to initialize the top blob. cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, new_height, new_width, is_color); - const int channels = cv_img.channels(); - const int height = cv_img.rows; - const int width = cv_img.cols; - // image - const int crop_size = this->layer_param_.transform_param().crop_size(); + // Use data_transformer to infer the expected blob shape from a cv_image. + vector top_shape = this->data_transformer_->InferBlobShape(cv_img); + this->transformed_data_.Reshape(top_shape); + // Reshape prefetch_data and top[0] according to the batch_size. const int batch_size = this->layer_param_.image_data_param().batch_size(); - if (crop_size > 0) { - top[0]->Reshape(batch_size, channels, crop_size, crop_size); - this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size); - this->transformed_data_.Reshape(1, channels, crop_size, crop_size); - } else { - top[0]->Reshape(batch_size, channels, height, width); - this->prefetch_data_.Reshape(batch_size, channels, height, width); - this->transformed_data_.Reshape(1, channels, height, width); - } + top_shape[0] = batch_size; + this->prefetch_data_.Reshape(top_shape); + top[0]->ReshapeLike(this->prefetch_data_); + LOG(INFO) << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); @@ -107,19 +101,19 @@ void ImageDataLayer::InternalThreadEntry() { const int batch_size = image_data_param.batch_size(); const int new_height = image_data_param.new_height(); const int new_width = image_data_param.new_width(); - const int crop_size = this->layer_param_.transform_param().crop_size(); const bool is_color = image_data_param.is_color(); string root_folder = image_data_param.root_folder(); - // Reshape on single input batches for inputs of varying dimension. - if (batch_size == 1 && crop_size == 0 && new_height == 0 && new_width == 0) { - cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, - 0, 0, is_color); - this->prefetch_data_.Reshape(1, cv_img.channels(), - cv_img.rows, cv_img.cols); - this->transformed_data_.Reshape(1, cv_img.channels(), - cv_img.rows, cv_img.cols); - } + // Reshape according to the first image of each batch + // on single input batches allows for inputs of varying dimension. + cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, + new_height, new_width, is_color); + // Use data_transformer to infer the expected blob shape from a cv_img. + vector top_shape = this->data_transformer_->InferBlobShape(cv_img); + this->transformed_data_.Reshape(top_shape); + // Reshape prefetch_data according to the batch_size. + top_shape[0] = batch_size; + this->prefetch_data_.Reshape(top_shape); Dtype* prefetch_data = this->prefetch_data_.mutable_cpu_data(); Dtype* prefetch_label = this->prefetch_label_.mutable_cpu_data(); diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp index 89e0c8fbad7..83c3235eb71 100644 --- a/src/caffe/layers/inner_product_layer.cpp +++ b/src/caffe/layers/inner_product_layer.cpp @@ -101,13 +101,13 @@ void InnerProductLayer::Backward_cpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->cpu_data(); // Gradient with respect to weight caffe_cpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., - top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_cpu_diff()); + top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_cpu_diff()); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->cpu_diff(); // Gradient with respect to bias caffe_cpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, - bias_multiplier_.cpu_data(), (Dtype)0., + bias_multiplier_.cpu_data(), (Dtype)1., this->blobs_[1]->mutable_cpu_diff()); } if (propagate_down[0]) { diff --git a/src/caffe/layers/inner_product_layer.cu b/src/caffe/layers/inner_product_layer.cu index a9e1784a205..dd90cac12a8 100644 --- a/src/caffe/layers/inner_product_layer.cu +++ b/src/caffe/layers/inner_product_layer.cu @@ -33,13 +33,13 @@ void InnerProductLayer::Backward_gpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight caffe_gpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., - top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_gpu_diff()); + top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_gpu_diff()); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias caffe_gpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, - bias_multiplier_.gpu_data(), (Dtype)0., + bias_multiplier_.gpu_data(), (Dtype)1., this->blobs_[1]->mutable_gpu_diff()); } if (propagate_down[0]) { diff --git a/src/caffe/layers/log_layer.cpp b/src/caffe/layers/log_layer.cpp index 1e86a751e9e..55a227f6226 100644 --- a/src/caffe/layers/log_layer.cpp +++ b/src/caffe/layers/log_layer.cpp @@ -79,53 +79,6 @@ void LogLayer::Backward_cpu(const vector*>& top, #ifdef CPU_ONLY STUB_GPU(LogLayer); -#else - -template -void LogLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { - const int count = bottom[0]->count(); - const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = top[0]->mutable_gpu_data(); - if (input_scale_ == Dtype(1) && input_shift_ == Dtype(0)) { - caffe_gpu_log(count, bottom_data, top_data); - } else { - caffe_copy(count, bottom_data, top_data); - if (input_scale_ != Dtype(1)) { - caffe_gpu_scal(count, input_scale_, top_data); - } - if (input_shift_ != Dtype(0)) { - caffe_gpu_add_scalar(count, input_shift_, top_data); - } - caffe_gpu_log(count, top_data, top_data); - } - if (base_scale_ != Dtype(1)) { - caffe_gpu_scal(count, base_scale_, top_data); - } -} - -template -void LogLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - if (!propagate_down[0]) { return; } - const int count = bottom[0]->count(); - const Dtype* bottom_data = bottom[0]->gpu_data(); - const Dtype* top_diff = top[0]->gpu_diff(); - Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - caffe_copy(count, bottom_data, bottom_diff); - if (input_scale_ != Dtype(1)) { - caffe_gpu_scal(count, input_scale_, bottom_diff); - } - if (input_shift_ != Dtype(0)) { - caffe_gpu_add_scalar(count, input_shift_, bottom_diff); - } - caffe_gpu_powx(count, bottom_diff, Dtype(-1), bottom_diff); - if (backward_num_scale_ != Dtype(1)) { - caffe_gpu_scal(count, backward_num_scale_, bottom_diff); - } - caffe_gpu_mul(count, top_diff, bottom_diff, bottom_diff); -} - #endif INSTANTIATE_CLASS(LogLayer); diff --git a/src/caffe/layers/log_layer.cu b/src/caffe/layers/log_layer.cu new file mode 100644 index 00000000000..847c86cd10c --- /dev/null +++ b/src/caffe/layers/log_layer.cu @@ -0,0 +1,57 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/neuron_layers.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void LogLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const int count = bottom[0]->count(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + if (input_scale_ == Dtype(1) && input_shift_ == Dtype(0)) { + caffe_gpu_log(count, bottom_data, top_data); + } else { + caffe_copy(count, bottom_data, top_data); + if (input_scale_ != Dtype(1)) { + caffe_gpu_scal(count, input_scale_, top_data); + } + if (input_shift_ != Dtype(0)) { + caffe_gpu_add_scalar(count, input_shift_, top_data); + } + caffe_gpu_log(count, top_data, top_data); + } + if (base_scale_ != Dtype(1)) { + caffe_gpu_scal(count, base_scale_, top_data); + } +} + +template +void LogLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (!propagate_down[0]) { return; } + const int count = bottom[0]->count(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + caffe_copy(count, bottom_data, bottom_diff); + if (input_scale_ != Dtype(1)) { + caffe_gpu_scal(count, input_scale_, bottom_diff); + } + if (input_shift_ != Dtype(0)) { + caffe_gpu_add_scalar(count, input_shift_, bottom_diff); + } + caffe_gpu_powx(count, bottom_diff, Dtype(-1), bottom_diff); + if (backward_num_scale_ != Dtype(1)) { + caffe_gpu_scal(count, backward_num_scale_, bottom_diff); + } + caffe_gpu_mul(count, top_diff, bottom_diff, bottom_diff); +} + +INSTANTIATE_LAYER_GPU_FUNCS(LogLayer); + +} // namespace caffe diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu index 24aa6a30130..001b3c34ac1 100644 --- a/src/caffe/layers/lrn_layer.cu +++ b/src/caffe/layers/lrn_layer.cu @@ -7,44 +7,46 @@ namespace caffe { template -__global__ void LRNFillScale(const int nthreads, const Dtype* in, +__global__ void LRNFillScale(const int nthreads, const Dtype* const in, const int num, const int channels, const int height, const int width, const int size, const Dtype alpha_over_size, - const Dtype k, Dtype* scale) { + const Dtype k, Dtype* const scale) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset - int w = index % width; - int h = (index / width) % height; - int n = index / width / height; - int offset = (n * channels * height + h) * width + w; - int step = height * width; - in += offset; - scale += offset; + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + const Dtype* const in_off = in + offset; + Dtype* const scale_off = scale + offset; int head = 0; - int pre_pad = (size - 1) / 2; - int post_pad = size - pre_pad - 1; + const int pre_pad = (size - 1) / 2; + const int post_pad = size - pre_pad - 1; Dtype accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad && head < channels) { - accum_scale += in[head * step] * in[head * step]; + accum_scale += in_off[head * step] * in_off[head * step]; ++head; } // both add and subtract while (head < channels) { - accum_scale += in[head * step] * in[head * step]; + accum_scale += in_off[head * step] * in_off[head * step]; if (head - size >= 0) { - accum_scale -= in[(head - size) * step] * in[(head - size) * step]; + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; } - scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { - accum_scale -= in[(head - size) * step] * in[(head - size) * step]; + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; } - scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } } @@ -68,8 +70,8 @@ void LRNLayer::Forward_gpu(const vector*>& bottom, // TODO: check if it would be faster to just put it into the previous kernel. template -__global__ void LRNComputeOutput(const int nthreads, const Dtype* in, - const Dtype* scale, const Dtype negative_beta, Dtype* out) { +__global__ void LRNComputeOutput(const int nthreads, const Dtype* const in, + const Dtype* const scale, const Dtype negative_beta, Dtype* const out) { CUDA_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } @@ -118,56 +120,58 @@ void LRNLayer::Backward_gpu(const vector*>& top, } template -__global__ void LRNComputeDiff(const int nthreads, const Dtype* bottom_data, - const Dtype* top_data, const Dtype* scale, const Dtype* top_diff, +__global__ void LRNComputeDiff(const int nthreads, + const Dtype* const bottom_data, const Dtype* const top_data, + const Dtype* const scale, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int size, const Dtype negative_beta, - const Dtype cache_ratio, - Dtype* bottom_diff) { + const Dtype cache_ratio, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset - int w = index % width; - int h = (index / width) % height; - int n = index / width / height; - int offset = (n * channels * height + h) * width + w; - int step = height * width; - bottom_data += offset; - top_data += offset; - scale += offset; - top_diff += offset; - bottom_diff += offset; + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + const Dtype* const bottom_off = bottom_data + offset; + const Dtype* const top_off = top_data + offset; + const Dtype* const scale_off = scale + offset; + const Dtype* const top_diff_off = top_diff + offset; + Dtype* const bottom_diff_off = bottom_diff + offset; int head = 0; - int pre_pad = size - (size + 1) / 2; - int post_pad = size - pre_pad - 1; + const int pre_pad = size - (size + 1) / 2; + const int post_pad = size - pre_pad - 1; Dtype accum_ratio = 0; // accumulate values while (head < post_pad && head < channels) { - accum_ratio += top_diff[head * step] * top_data[head * step] / - scale[head * step]; + accum_ratio += top_diff_off[head * step] * top_off[head * step] / + scale_off[head * step]; ++head; } // both add and subtract while (head < channels) { - accum_ratio += top_diff[head * step] * top_data[head * step] / - scale[head * step]; + accum_ratio += top_diff_off[head * step] * top_off[head * step] / + scale_off[head * step]; if (head - size >= 0) { - accum_ratio -= top_diff[(head - size) * step] * - top_data[(head - size) * step] / scale[(head - size) * step]; + accum_ratio -= top_diff_off[(head - size) * step] * + top_off[(head - size) * step] / scale_off[(head - size) * step]; } - bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] - * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * - bottom_data[(head - post_pad) * step] * accum_ratio; + bottom_diff_off[(head - post_pad) * step] = + top_diff_off[(head - post_pad) * step] + * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { - accum_ratio -= top_diff[(head - size) * step] * - top_data[(head - size) * step] / scale[(head - size) * step]; + accum_ratio -= top_diff_off[(head - size) * step] * + top_off[(head - size) * step] / scale_off[(head - size) * step]; } - bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] - * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * - bottom_data[(head - post_pad) * step] * accum_ratio; + bottom_diff_off[(head - post_pad) * step] = + top_diff_off[(head - post_pad) * step] + * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } } diff --git a/src/caffe/layers/mvn_layer.cpp b/src/caffe/layers/mvn_layer.cpp index b74d7b4f300..3e79bddcdde 100644 --- a/src/caffe/layers/mvn_layer.cpp +++ b/src/caffe/layers/mvn_layer.cpp @@ -22,6 +22,7 @@ void MVNLayer::Reshape(const vector*>& bottom, bottom[0]->height(), bottom[0]->width()); Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data(); caffe_set(sum_multiplier_.count(), Dtype(1), multiplier_data); + eps_ = this->layer_param_.mvn_param().eps(); } template @@ -36,7 +37,6 @@ void MVNLayer::Forward_cpu(const vector*>& bottom, num = bottom[0]->num() * bottom[0]->channels(); int dim = bottom[0]->count() / num; - Dtype eps = 1e-10; if (this->layer_param_.mvn_param().normalize_variance()) { // put the squares of bottom into temp_ @@ -66,7 +66,7 @@ void MVNLayer::Forward_cpu(const vector*>& bottom, caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5), variance_.mutable_cpu_data()); - caffe_add_scalar(variance_.count(), eps, variance_.mutable_cpu_data()); + caffe_add_scalar(variance_.count(), eps_, variance_.mutable_cpu_data()); caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., @@ -102,7 +102,6 @@ void MVNLayer::Backward_cpu(const vector*>& top, num = bottom[0]->num() * bottom[0]->channels(); int dim = bottom[0]->count() / num; - Dtype eps = 1e-10; if (this->layer_param_.mvn_param().normalize_variance()) { caffe_mul(temp_.count(), top_data, top_diff, bottom_diff); @@ -125,24 +124,6 @@ void MVNLayer::Backward_cpu(const vector*>& top, // put the squares of bottom into temp_ caffe_powx(temp_.count(), bottom_data, Dtype(2), temp_.mutable_cpu_data()); - - // computes variance using var(X) = E(X^2) - (EX)^2 - caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, - sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); // EX - caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, temp_.cpu_data(), - sum_multiplier_.cpu_data(), 0., - variance_.mutable_cpu_data()); // E(X^2) - caffe_powx(mean_.count(), mean_.cpu_data(), Dtype(2), - temp_.mutable_cpu_data()); // (EX)^2 - caffe_sub(mean_.count(), variance_.cpu_data(), temp_.cpu_data(), - variance_.mutable_cpu_data()); // variance - - // normalize variance - caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5), - variance_.mutable_cpu_data()); - - caffe_add_scalar(variance_.count(), eps, variance_.mutable_cpu_data()); - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., temp_.mutable_cpu_data()); diff --git a/src/caffe/layers/mvn_layer.cu b/src/caffe/layers/mvn_layer.cu index 0667f50380f..3888a0c7106 100644 --- a/src/caffe/layers/mvn_layer.cu +++ b/src/caffe/layers/mvn_layer.cu @@ -36,8 +36,6 @@ void MVNLayer::Forward_gpu(const vector*>& bottom, caffe_gpu_sub(mean_.count(), variance_.gpu_data(), temp_.gpu_data(), variance_.mutable_gpu_data()); // variance - Dtype eps = 1e-10; - // do mean and variance normalization // subtract mean caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., @@ -50,7 +48,7 @@ void MVNLayer::Forward_gpu(const vector*>& bottom, caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5), variance_.mutable_gpu_data()); - caffe_gpu_add_scalar(variance_.count(), eps, variance_.mutable_gpu_data()); + caffe_gpu_add_scalar(variance_.count(), eps_, variance_.mutable_gpu_data()); caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.gpu_data(), sum_multiplier_.gpu_data(), 0., @@ -87,8 +85,6 @@ void MVNLayer::Backward_gpu(const vector*>& top, int dim = bottom[0]->count() / num; - Dtype eps = 1e-10; - if (this->layer_param_.mvn_param().normalize_variance()) { caffe_gpu_mul(temp_.count(), top_data, top_diff, bottom_diff); caffe_gpu_gemv(CblasNoTrans, num, dim, 1., bottom_diff, @@ -111,23 +107,6 @@ void MVNLayer::Backward_gpu(const vector*>& top, caffe_gpu_powx(temp_.count(), bottom_data, Dtype(2), temp_.mutable_gpu_data()); - // computes variance using var(X) = E(X^2) - (EX)^2 - caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, - sum_multiplier_.gpu_data(), 0., mean_.mutable_gpu_data()); // EX - caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, temp_.gpu_data(), - sum_multiplier_.gpu_data(), 0., - variance_.mutable_gpu_data()); // E(X^2) - caffe_gpu_powx(mean_.count(), mean_.gpu_data(), Dtype(2), - temp_.mutable_gpu_data()); // (EX)^2 - caffe_gpu_sub(mean_.count(), variance_.gpu_data(), temp_.gpu_data(), - variance_.mutable_gpu_data()); // variance - - // normalize variance - caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5), - variance_.mutable_gpu_data()); - - caffe_gpu_add_scalar(variance_.count(), eps, variance_.mutable_gpu_data()); - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.gpu_data(), sum_multiplier_.gpu_data(), 0., temp_.mutable_gpu_data()); diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index d1d48501af3..ca4b13f7c41 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -9,31 +9,32 @@ namespace caffe { template -__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, - const int num, const int channels, const int height, - const int width, const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, const int pad_h, const int pad_w, Dtype* top_data, - int* mask, Dtype* top_mask) { +__global__ void MaxPoolForward(const int nthreads, + const Dtype* const bottom_data, const int num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, const int pad_h, const int pad_w, + Dtype* const top_data, int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; - int hend = min(hstart + kernel_h, height); - int wend = min(wstart + kernel_w, width); + const int hend = min(hstart + kernel_h, height); + const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; - bottom_data += (n * channels + c) * height * width; + const Dtype* const bottom_slice = + bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - if (bottom_data[h * width + w] > maxval) { + if (bottom_slice[h * width + w] > maxval) { maxidx = h * width + w; - maxval = bottom_data[maxidx]; + maxval = bottom_slice[maxidx]; } } } @@ -47,30 +48,32 @@ __global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, } template -__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, - const int num, const int channels, const int height, - const int width, const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { +__global__ void AvePoolForward(const int nthreads, + const Dtype* const bottom_data, const int num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, const int pad_h, const int pad_w, + Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); - int pool_size = (hend - hstart) * (wend - wstart); + const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; - bottom_data += (n * channels + c) * height * width; + const Dtype* const bottom_slice = + bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - aveval += bottom_data[h * width + w]; + aveval += bottom_slice[h * width + w]; } } top_data[index] = aveval / pool_size; @@ -79,37 +82,38 @@ __global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, template __global__ void StoPoolForwardTrain(const int nthreads, - const Dtype* bottom_data, + const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* rand_idx, Dtype* top_data) { + const int stride_w, Dtype* const rand_idx, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h; - int hend = min(hstart + kernel_h, height); - int wstart = pw * stride_w; - int wend = min(wstart + kernel_w, width); + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); Dtype cumsum = 0.; - bottom_data += (n * channels + c) * height * width; + const Dtype* const bottom_slice = + bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - cumsum += bottom_data[h * width + w]; + cumsum += bottom_slice[h * width + w]; } } - float thres = rand_idx[index] * cumsum; + const float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - cumsum += bottom_data[h * width + w]; + cumsum += bottom_slice[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; - top_data[index] = bottom_data[h * width + w]; + top_data[index] = bottom_slice[h * width + w]; return; } } @@ -120,29 +124,30 @@ __global__ void StoPoolForwardTrain(const int nthreads, template __global__ void StoPoolForwardTest(const int nthreads, - const Dtype* bottom_data, + const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* top_data) { + const int stride_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h; - int hend = min(hstart + kernel_h, height); - int wstart = pw * stride_w; - int wend = min(wstart + kernel_w, width); + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = FLT_MIN; Dtype cumvalues = 0.; - bottom_data += (n * channels + c) * height * width; + const Dtype* const bottom_slice = + bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - cumsum += bottom_data[h * width + w]; - cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w]; + cumsum += bottom_slice[h * width + w]; + cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; } } top_data[index] = cumvalues / cumsum; @@ -210,43 +215,43 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, template -__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, - const int* mask, const Dtype* top_mask, const int num, const int channels, - const int height, const int width, const int pooled_height, - const int pooled_width, const int kernel_h, const int kernel_w, - const int stride_h, const int stride_w, const int pad_h, const int pad_w, - Dtype* bottom_diff) { +__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, + const int* const mask, const Dtype* const top_mask, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - int w = index % width; - int h = (index / width) % height; - int c = (index / width / height) % channels; - int n = index / width / height / channels; - int phstart = - (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; - int phend = min((h + pad_h) / stride_h + 1, pooled_height); - int pwstart = - (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; - int pwend = min((w + pad_w) / stride_w + 1, pooled_width); + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = + (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; + const int phend = min((h + pad_h) / stride_h + 1, pooled_height); + const int pwstart = + (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; + const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; - int offset = (n * channels + c) * pooled_height * pooled_width; - top_diff += offset; + const int offset = (n * channels + c) * pooled_height * pooled_width; + const Dtype* const top_diff_slice = top_diff + offset; if (mask) { - mask += offset; + const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { - if (mask[ph * pooled_width + pw] == h * width + w) { - gradient += top_diff[ph * pooled_width + pw]; + if (mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; } } } } else { - top_mask += offset; + const Dtype* const top_mask_slice = top_mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { - if (top_mask[ph * pooled_width + pw] == h * width + w) { - gradient += top_diff[ph * pooled_width + pw]; + if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; } } } @@ -256,25 +261,26 @@ __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, } template -__global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, +__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, - Dtype* bottom_diff) { + Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - int w = index % width + pad_w; - int h = (index / width) % height + pad_h; - int c = (index / width / height) % channels; - int n = index / width / height / channels; - int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; - int phend = min(h / stride_h + 1, pooled_height); - int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; - int pwend = min(w / stride_w + 1, pooled_width); + const int w = index % width + pad_w; + const int h = (index / width) % height + pad_h; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; - top_diff += (n * channels + c) * pooled_height * pooled_width; + const Dtype* const top_diff_slice = + top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size @@ -283,7 +289,7 @@ __global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); - gradient += top_diff[ph * pooled_width + pw] / pool_size; + gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; @@ -293,29 +299,31 @@ __global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, template __global__ void StoPoolBackward(const int nthreads, - const Dtype* rand_idx, const Dtype* top_diff, + const Dtype* const rand_idx, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* bottom_diff) { + const int stride_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - int w = index % width; - int h = (index / width) % height; - int c = (index / width / height) % channels; - int n = index / width / height / channels; - int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; - int phend = min(h / stride_h + 1, pooled_height); - int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; - int pwend = min(w / stride_w + 1, pooled_width); + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; - rand_idx += (n * channels + c) * pooled_height * pooled_width; - top_diff += (n * channels + c) * pooled_height * pooled_width; + const Dtype* const rand_idx_slice = + rand_idx + (n * channels + c) * pooled_height * pooled_width; + const Dtype* const top_diff_slice = + top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { - gradient += top_diff[ph * pooled_width + pw] * - (index == static_cast(rand_idx[ph * pooled_width + pw])); + gradient += top_diff_slice[ph * pooled_width + pw] * + (index == static_cast(rand_idx_slice[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; diff --git a/src/caffe/layers/prelu_layer.cpp b/src/caffe/layers/prelu_layer.cpp index 7119a274dd3..81831755512 100644 --- a/src/caffe/layers/prelu_layer.cpp +++ b/src/caffe/layers/prelu_layer.cpp @@ -45,7 +45,8 @@ void PReLULayer::LayerSetUp(const vector*>& bottom, // Propagate gradients to the parameters (as directed by backward pass). this->param_propagate_down_.resize(this->blobs_.size(), true); - multiplier_.Reshape(vector(1, bottom[0]->count() / bottom[0]->num())); + multiplier_.Reshape(vector(1, bottom[0]->count(1))); + backward_buff_.Reshape(vector(1, bottom[0]->count(1))); caffe_set(multiplier_.count(), Dtype(1), multiplier_.mutable_cpu_data()); } @@ -112,7 +113,6 @@ void PReLULayer::Backward_cpu(const vector*>& top, // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_cpu_diff(); - caffe_set(this->blobs_[0]->count(), Dtype(0), slope_diff); for (int i = 0; i < count; ++i) { int c = (i / dim) % channels / div_factor; slope_diff[c] += top_diff[i] * bottom_data[i] * (bottom_data[i] <= 0); diff --git a/src/caffe/layers/prelu_layer.cu b/src/caffe/layers/prelu_layer.cu index fd0eda5d191..e1f20048f60 100644 --- a/src/caffe/layers/prelu_layer.cu +++ b/src/caffe/layers/prelu_layer.cu @@ -75,38 +75,36 @@ void PReLULayer::Backward_gpu(const vector*>& top, bottom_data = bottom_memory_.gpu_data(); } - // Propagte to param + // Propagate to param // Since to write bottom diff will affect top diff if top and bottom blobs // are identical (in-place computaion), we first compute param backward to // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); - // slope_diff is set as 0, then accumulated over batches - caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), slope_diff); int cdim = channels * dim; Dtype dsum = 0.; for (int n = 0; n < bottom[0]->num(); ++n) { - Dtype* temp_buff = multiplier_.mutable_gpu_diff(); // compute element-wise diff // NOLINT_NEXT_LINE(whitespace/operators) - PReLUParamBackward<<<<>>( cdim, top_diff + top[0]->offset(n), - bottom_data + bottom[0]->offset(n), multiplier_.mutable_gpu_diff()); + bottom_data + bottom[0]->offset(n), + backward_buff_.mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; if (channel_shared_) { Dtype d; - caffe_gpu_dot(channels * dim, multiplier_.gpu_diff(), + caffe_gpu_dot(channels * dim, backward_buff_.gpu_diff(), multiplier_.gpu_data(), &d); dsum += d; } else { caffe_gpu_gemv(CblasNoTrans, channels, dim, 1., - multiplier_.gpu_diff(), multiplier_.gpu_data(), 1., + backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1., slope_diff); } } if (channel_shared_) { - caffe_gpu_set(this->blobs_[0]->count(), Dtype(dsum), slope_diff); + caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff); } } // Propagate to bottom diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp index 077d949981c..cc236fe1e8e 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp @@ -71,7 +71,7 @@ void SigmoidCrossEntropyLossLayer::Backward_cpu( } #ifdef CPU_ONLY -STUB_GPU(SigmoidCrossEntropyLossLayer); +STUB_GPU_BACKWARD(SigmoidCrossEntropyLossLayer, Backward); #endif INSTANTIATE_CLASS(SigmoidCrossEntropyLossLayer); diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu index 08f7f492297..547fa80c72f 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu @@ -8,26 +8,6 @@ namespace caffe { -template -void SigmoidCrossEntropyLossLayer::Forward_gpu( - const vector*>& bottom, const vector*>& top) { - // The forward pass computes the sigmoid outputs. - sigmoid_bottom_vec_[0] = bottom[0]; - sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_); - // Compute the loss (negative log likelihood) - const int count = bottom[0]->count(); - const int num = bottom[0]->num(); - // Stable version of loss computation from input data - const Dtype* input_data = bottom[0]->cpu_data(); - const Dtype* target = bottom[1]->cpu_data(); - Dtype loss = 0; - for (int i = 0; i < count; ++i) { - loss -= input_data[i] * (target[i] - (input_data[i] >= 0)) - - log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); - } - top[0]->mutable_cpu_data()[0] = loss / num; -} - template void SigmoidCrossEntropyLossLayer::Backward_gpu( const vector*>& top, const vector& propagate_down, @@ -51,7 +31,7 @@ void SigmoidCrossEntropyLossLayer::Backward_gpu( } } -INSTANTIATE_LAYER_GPU_FUNCS(SigmoidCrossEntropyLossLayer); +INSTANTIATE_LAYER_GPU_BACKWARD(SigmoidCrossEntropyLossLayer); } // namespace caffe diff --git a/src/caffe/layers/slice_layer.cu b/src/caffe/layers/slice_layer.cu index e6e65677bd8..796841d3f52 100644 --- a/src/caffe/layers/slice_layer.cu +++ b/src/caffe/layers/slice_layer.cu @@ -6,22 +6,41 @@ namespace caffe { +template +__global__ void Slice(const int nthreads, const Dtype* in_data, + const bool forward, const int num_slices, const int slice_size, + const int bottom_slice_axis, const int top_slice_axis, + const int offset_slice_axis, Dtype* out_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int total_slice_size = slice_size * top_slice_axis; + const int slice_num = index / total_slice_size; + const int slice_index = index % total_slice_size; + const int bottom_index = slice_index + + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; + if (forward) { + out_data[index] = in_data[bottom_index]; + } else { + out_data[bottom_index] = in_data[index]; + } + } +} + template void SliceLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { int offset_slice_axis = 0; const Dtype* bottom_data = bottom[0]->gpu_data(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); + const bool kForward = true; for (int i = 0; i < top.size(); ++i) { Dtype* top_data = top[i]->mutable_gpu_data(); const int top_slice_axis = top[i]->shape(slice_axis_); - for (int n = 0; n < num_slices_; ++n) { - const int top_offset = n * top_slice_axis * slice_size_; - const int bottom_offset = - (n * bottom_slice_axis + offset_slice_axis) * slice_size_; - caffe_copy(top_slice_axis * slice_size_, - bottom_data + bottom_offset, top_data + top_offset); - } + const int top_slice_size = top_slice_axis * slice_size_; + const int nthreads = top_slice_size * num_slices_; + Slice // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, bottom_data, kForward, num_slices_, slice_size_, + bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data); offset_slice_axis += top_slice_axis; } } @@ -33,16 +52,16 @@ void SliceLayer::Backward_gpu(const vector*>& top, int offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); + const bool kForward = false; for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); const int top_slice_axis = top[i]->shape(slice_axis_); - for (int n = 0; n < num_slices_; ++n) { - const int top_offset = n * top_slice_axis * slice_size_; - const int bottom_offset = - (n * bottom_slice_axis + offset_slice_axis) * slice_size_; - caffe_copy(top_slice_axis * slice_size_, - top_diff + top_offset, bottom_diff + bottom_offset); - } + const int top_slice_size = top_slice_axis * slice_size_; + const int nthreads = top_slice_size * num_slices_; + Slice // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, top_diff, kForward, num_slices_, slice_size_, + bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff); offset_slice_axis += top_slice_axis; } } diff --git a/src/caffe/layers/triplet_loss_layer.cpp b/src/caffe/layers/triplet_loss_layer.cpp deleted file mode 100644 index fc8b9fe036f..00000000000 --- a/src/caffe/layers/triplet_loss_layer.cpp +++ /dev/null @@ -1,124 +0,0 @@ -#include -#include - -#include "caffe/layer.hpp" -#include "caffe/loss_layers.hpp" -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" - -namespace caffe { - -template -void TripletLossLayer::LayerSetUp( - const vector*>& bottom, const vector*>& top) { - LossLayer::LayerSetUp(bottom, top); - CHECK_EQ(bottom[0]->channels(), bottom[1]->channels()); - CHECK_EQ(bottom[0]->channels(), bottom[2]->channels()); - CHECK_EQ(bottom[0]->height(), 1); - CHECK_EQ(bottom[0]->width(), 1); - CHECK_EQ(bottom[1]->height(), 1); - CHECK_EQ(bottom[1]->width(), 1); - CHECK_EQ(bottom[2]->height(), 1); - CHECK_EQ(bottom[2]->width(), 1); - CHECK_EQ(bottom[3]->channels(), 1); - CHECK_EQ(bottom[3]->height(), 1); - CHECK_EQ(bottom[3]->width(), 1); - diff_pos.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); - diff_neg.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); - diff_sq_pos.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); - diff_sq_neg.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); - dist_sq_.Reshape(bottom[0]->num(), 1, 1, 1); - dist_sq_pos.Reshape(bottom[0]->num(), 1, 1, 1); - dist_sq_neg.Reshape(bottom[0]->num(), 1, 1, 1); - // vector of ones used to sum along channels - summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); - for (int i = 0; i < bottom[0]->channels(); ++i) - summer_vec_.mutable_cpu_data()[i] = Dtype(1); -} - -template -void TripletLossLayer::Forward_cpu( - const vector*>& bottom, - const vector*>& top) { - int count = bottom[0]->count(); - caffe_sub( - count, - bottom[0]->cpu_data(), // a - bottom[1]->cpu_data(), // b - diff_pos.mutable_cpu_data()); // a_i-b_i - caffe_sub( - count, - bottom[0]->cpu_data(), // a - bottom[2]->cpu_data(), // c - diff_neg.mutable_cpu_data()); // a_i-c_i - const int channels = bottom[0]->channels(); - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype loss(0.0); - - //Loss component calculated from ab - for (int i = 0; i < bottom[0]->num(); ++i) { - dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, - diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); - //ab is a similar pair - dist_sq_ += dist_sq_pos.cpu_data()[i]; - - - //Loss component calculated from ac - dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, - diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); - //ac is a dissimilar pair - dist_sq_ -= dist_sq_neg.cpu_data()[i]; - loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); - - } - loss = loss / static_cast(bottom[0]->num()) / Dtype(2); - top[0]->mutable_cpu_data()[0] = loss; -} - -template -void TripletLossLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - for (int i = 0; i < 3; ++i) { - if (propagate_down[i]) { - const Dtype sign = (i == 0) ? 1 : -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(bottom[i]->num()); - int num = bottom[i]->num(); - int channels = bottom[i]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[i]->mutable_cpu_diff(); - if ((margin-dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pairs - caffe_cpu_axpby( - channels, - alpha, - diff_pos.cpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - - // dissimilar pairs - - caffe_cpu_axpby( - channels, - -alpha, - diff_neg.cpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); - } - } - } - } -} - -#ifdef CPU_ONLY -STUB_GPU(TripletLossLayer); -#endif - -INSTANTIATE_CLASS(TripletLossLayer); -REGISTER_LAYER_CLASS(TripletLoss); - -} // namespace caffe diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index 888eec1d501..a18ee63818e 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -79,10 +79,17 @@ void Net::Init(const NetParameter& in_param) { } // Setup layer. const LayerParameter& layer_param = param.layer(layer_id); + if (layer_param.propagate_down_size() > 0) { + CHECK_EQ(layer_param.propagate_down_size(), + layer_param.bottom_size()) + << "propagate_down param must be specified " + << "either 0 or bottom_size times "; + } layers_.push_back(LayerRegistry::CreateLayer(layer_param)); layer_names_.push_back(layer_param.name()); LOG(INFO) << "Creating Layer " << layer_param.name(); bool need_backward = false; + // Figure out this layer's input and output for (int bottom_id = 0; bottom_id < layer_param.bottom_size(); ++bottom_id) { @@ -151,15 +158,33 @@ void Net::Init(const NetParameter& in_param) { // Go through the net backwards to determine which blobs contribute to the // loss. We can skip backward computation for blobs that don't contribute // to the loss. + // Also checks if all bottom blobs don't need backward computation (possible + // because the skip_propagate_down param) and so we can skip bacward + // computation for the entire layer set blobs_under_loss; + set blobs_skip_backp; for (int layer_id = layers_.size() - 1; layer_id >= 0; --layer_id) { bool layer_contributes_loss = false; + bool layer_skip_propagate_down = true; for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]]; if (layers_[layer_id]->loss(top_id) || (blobs_under_loss.find(blob_name) != blobs_under_loss.end())) { layer_contributes_loss = true; + } + if (blobs_skip_backp.find(blob_name) == blobs_skip_backp.end()) { + layer_skip_propagate_down = false; + } + if (layer_contributes_loss && !layer_skip_propagate_down) break; + } + // If this layer can skip backward computation, also all his bottom blobs + // don't need backpropagation + if (layer_need_backward_[layer_id] && layer_skip_propagate_down) { + layer_need_backward_[layer_id] = false; + for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); + ++bottom_id) { + bottom_need_backward_[layer_id][bottom_id] = false; } } if (!layer_contributes_loss) { layer_need_backward_[layer_id] = false; } @@ -178,6 +203,11 @@ void Net::Init(const NetParameter& in_param) { } else { bottom_need_backward_[layer_id][bottom_id] = false; } + if (!bottom_need_backward_[layer_id][bottom_id]) { + const string& blob_name = + blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; + blobs_skip_backp.insert(blob_name); + } } } // Handle force_backward if needed. @@ -367,9 +397,9 @@ void Net::AppendTop(const NetParameter& param, const int layer_id, // Helper for Net::Init: add a new bottom blob to the net. template -int Net::AppendBottom(const NetParameter& param, - const int layer_id, const int bottom_id, - set* available_blobs, map* blob_name_to_idx) { +int Net::AppendBottom(const NetParameter& param, const int layer_id, + const int bottom_id, set* available_blobs, + map* blob_name_to_idx) { const LayerParameter& layer_param = param.layer(layer_id); const string& blob_name = layer_param.bottom(bottom_id); if (available_blobs->find(blob_name) == available_blobs->end()) { @@ -381,7 +411,12 @@ int Net::AppendBottom(const NetParameter& param, bottom_vecs_[layer_id].push_back(blobs_[blob_id].get()); bottom_id_vecs_[layer_id].push_back(blob_id); available_blobs->erase(blob_name); - const bool need_backward = blob_need_backward_[blob_id]; + bool propagate_down = true; + // Check if the backpropagation on bottom_id should be skipped + if (layer_param.propagate_down_size() > 0) + propagate_down = layer_param.propagate_down(bottom_id); + const bool need_backward = blob_need_backward_[blob_id] && + propagate_down; bottom_need_backward_[layer_id].push_back(need_backward); return blob_id; } @@ -410,7 +445,7 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, // (i.e., not given a param_name) or explicitly given a name that we // haven't already seen. param_owners_.push_back(-1); - if (param_size) { + if (param_name.size()) { param_names_index_[param_name] = net_param_id; } } else { @@ -470,7 +505,6 @@ Dtype Net::ForwardFromTo(int start, int end) { } for (int i = start; i <= end; ++i) { // LOG(ERROR) << "Forwarding " << layer_names_[i]; - layers_[i]->Reshape(bottom_vecs_[i], top_vecs_[i]); Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], top_vecs_[i]); loss += layer_loss; if (debug_info_) { ForwardDebugInfo(i); } diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index edf7ae81d58..81a8c69d88e 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -49,6 +49,14 @@ message FillerParameter { // The expected number of non-zero output weights for a given input in // Gaussian filler -- the default -1 means don't perform sparsification. optional int32 sparse = 7 [default = -1]; + // Normalize the filler variance by fan_in, fan_out, or their average. + // Applies to 'xavier' and 'msra' fillers. + enum VarianceNorm { + FAN_IN = 0; + FAN_OUT = 1; + AVERAGE = 2; + } + optional VarianceNorm variance_norm = 8 [default = FAN_IN]; } message NetParameter { @@ -88,7 +96,7 @@ message NetParameter { // NOTE // Update the next available ID when you add a new SolverParameter field. // -// SolverParameter next available ID: 36 (last added: clip_gradients) +// SolverParameter next available ID: 37 (last added: iter_size) message SolverParameter { ////////////////////////////////////////////////////////////////////////////// // Specifying the train and test networks @@ -141,6 +149,8 @@ message SolverParameter { // Display the loss averaged over the last average_loss iterations optional int32 average_loss = 33 [default = 1]; optional int32 max_iter = 7; // the maximum number of iterations + // accumulate gradients over `iter_size` x `batch_size` instances + optional int32 iter_size = 36 [default = 1]; optional string lr_policy = 8; // The learning rate decay policy. optional float gamma = 9; // The parameter to compute the learning rate. optional float power = 10; // The parameter to compute the learning rate. @@ -259,7 +269,7 @@ message ParamSpec { // NOTE // Update the next available ID when you add a new LayerParameter field. // -// LayerParameter next available layer-specific ID: 132 (last added: prelu_param) +// LayerParameter next available layer-specific ID: 137 (last added: reduction_param) message LayerParameter { optional string name = 1; // the layer name optional string type = 2; // the layer type @@ -280,6 +290,10 @@ message LayerParameter { // The blobs containing the numeric parameters of the layer. repeated BlobProto blobs = 7; + + // Specifies on which bottoms the backpropagation should be skipped. + // The size must be either 0 or equal to the number of bottoms. + repeated bool propagate_down = 11; // Rules controlling whether and when a layer is included in the network, // based on the current NetState. You may specify a non-zero number of rules @@ -312,12 +326,14 @@ message LayerParameter { optional DummyDataParameter dummy_data_param = 109; optional EltwiseParameter eltwise_param = 110; optional ExpParameter exp_param = 111; + optional FlattenParameter flatten_param = 135; optional HDF5DataParameter hdf5_data_param = 112; optional HDF5OutputParameter hdf5_output_param = 113; optional HingeLossParameter hinge_loss_param = 114; optional ImageDataParameter image_data_param = 115; optional InfogainLossParameter infogain_loss_param = 116; optional InnerProductParameter inner_product_param = 117; + optional LogParameter log_param = 134; optional LRNParameter lrn_param = 118; optional MemoryDataParameter memory_data_param = 119; optional MVNParameter mvn_param = 120; @@ -325,14 +341,16 @@ message LayerParameter { optional PowerParameter power_param = 122; optional PReLUParameter prelu_param = 131; optional PythonParameter python_param = 130; + optional ReductionParameter reduction_param = 136; optional ReLUParameter relu_param = 123; + optional ReshapeParameter reshape_param = 133; optional SigmoidParameter sigmoid_param = 124; optional SoftmaxParameter softmax_param = 125; + optional SPPParameter spp_param = 132; optional SliceParameter slice_param = 126; optional TanHParameter tanh_param = 127; optional ThresholdParameter threshold_param = 128; optional WindowDataParameter window_data_param = 129; - optional TripletLossParameter triplet_loss_param = 132; } // Message that stores parameters used to apply transformation @@ -352,6 +370,10 @@ message TransformationParameter { // or can be repeated the same number of times as channels // (would subtract them from the corresponding channel) repeated float mean_value = 5; + // Force the decoded image to have 3 color channels. + optional bool force_color = 6 [default = false]; + // Force the decoded image to have 1 color channels. + optional bool force_gray = 7 [default = false]; } // Message that stores parameters shared by loss layers @@ -363,7 +385,9 @@ message LossParameter { optional bool normalize = 2 [default = true]; } -// Message that stores parameters used by AccuracyLayer +// Messages that store parameters used by individual layer types follow, in +// alphabetical order. + message AccuracyParameter { // When computing accuracy, count as correct by comparing the true label to // the top k scoring classes. By default, only compare to the top scoring @@ -381,14 +405,12 @@ message AccuracyParameter { optional int32 ignore_label = 3; } -// Message that stores parameters used by ArgMaxLayer message ArgMaxParameter { // If true produce pairs (argmax, maxval) optional bool out_max_val = 1 [default = false]; optional uint32 top_k = 2 [default = 1]; } -// Message that stores parameters used by ConcatLayer message ConcatParameter { // The axis along which to concatenate -- may be negative to index from the // end (e.g., -1 for the last axis). Other axes must have the @@ -400,16 +422,18 @@ message ConcatParameter { optional uint32 concat_dim = 1 [default = 1]; } -// Message that stores parameters used by ContrastiveLossLayer message ContrastiveLossParameter { - //margin for dissimilar pair - optional float margin = 1 [default = 1.0]; -} -message TripletLossParameter { - //margin for negative triplet + // margin for dissimilar pair optional float margin = 1 [default = 1.0]; + // The first implementation of this cost did not exactly match the cost of + // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2. + // legacy_version = false (the default) uses (margin - d)^2 as proposed in the + // Hadsell paper. New models should probably use this version. + // legacy_version = true uses (margin - d^2). This is kept to support / + // reproduce existing models and results + optional bool legacy_version = 2 [default = false]; } -// Message that stores parameters used by ConvolutionLayer + message ConvolutionParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms @@ -435,7 +459,6 @@ message ConvolutionParameter { optional Engine engine = 15 [default = DEFAULT]; } -// Message that stores parameters used by DataLayer message DataParameter { enum DB { LEVELDB = 0; @@ -466,12 +489,10 @@ message DataParameter { optional bool force_encoded_color = 9 [default = false]; } -// Message that stores parameters used by DropoutLayer message DropoutParameter { optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio } -// Message that stores parameters used by DummyDataLayer. // DummyDataLayer fills any number of arbitrarily shaped blobs with random // (or constant) data generated by "Fillers" (see "message FillerParameter"). message DummyDataParameter { @@ -491,7 +512,6 @@ message DummyDataParameter { repeated uint32 width = 5; } -// Message that stores parameters used by EltwiseLayer message EltwiseParameter { enum EltwiseOp { PROD = 0; @@ -506,7 +526,6 @@ message EltwiseParameter { optional bool stable_prod_grad = 3 [default = true]; } -// Message that stores parameters used by ExpLayer message ExpParameter { // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. // Or if base is set to the default (-1), base is set to e, @@ -516,6 +535,18 @@ message ExpParameter { optional float shift = 3 [default = 0.0]; } +/// Message that stores parameters used by FlattenLayer +message FlattenParameter { + // The first axis to flatten: all preceding axes are retained in the output. + // May be negative to index from the end (e.g., -1 for the last axis). + optional int32 axis = 1 [default = 1]; + + // The last axis to flatten: all following axes are retained in the output. + // May be negative to index from the end (e.g., the default -1 for the last + // axis). + optional int32 end_axis = 2 [default = -1]; +} + // Message that stores parameters used by HDF5DataLayer message HDF5DataParameter { // Specify the data source. @@ -531,7 +562,6 @@ message HDF5DataParameter { optional bool shuffle = 3 [default = false]; } -// Message that stores parameters used by HDF5OutputLayer message HDF5OutputParameter { optional string file_name = 1; } @@ -545,7 +575,6 @@ message HingeLossParameter { optional Norm norm = 1 [default = L1]; } -// Message that stores parameters used by ImageDataLayer message ImageDataParameter { // Specify the data source. optional string source = 1; @@ -577,13 +606,11 @@ message ImageDataParameter { optional string root_folder = 12 [default = ""]; } -// Message that stores parameters InfogainLossLayer message InfogainLossParameter { // Specify the infogain matrix source. optional string source = 1; } -// Message that stores parameters used by InnerProductLayer message InnerProductParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms @@ -596,6 +623,16 @@ message InnerProductParameter { optional int32 axis = 5 [default = 1]; } +// Message that stores parameters used by LogLayer +message LogParameter { + // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. + // Or if base is set to the default (-1), base is set to e, + // so y = ln(shift + scale * x) = log_e(shift + scale * x) + optional float base = 1 [default = -1.0]; + optional float scale = 2 [default = 1.0]; + optional float shift = 3 [default = 0.0]; +} + // Message that stores parameters used by LRNLayer message LRNParameter { optional uint32 local_size = 1 [default = 5]; @@ -609,7 +646,6 @@ message LRNParameter { optional float k = 5 [default = 1.]; } -// Message that stores parameters used by MemoryDataLayer message MemoryDataParameter { optional uint32 batch_size = 1; optional uint32 channels = 2; @@ -617,16 +653,17 @@ message MemoryDataParameter { optional uint32 width = 4; } -// Message that stores parameters used by MVNLayer message MVNParameter { // This parameter can be set to false to normalize mean only optional bool normalize_variance = 1 [default = true]; // This parameter can be set to true to perform DNN-like MVN optional bool across_channels = 2 [default = false]; + + // Epsilon for not dividing by zero while normalizing variance + optional float eps = 3 [default = 1e-9]; } -// Message that stores parameters used by PoolingLayer message PoolingParameter { enum PoolMethod { MAX = 0; @@ -656,7 +693,6 @@ message PoolingParameter { optional bool global_pooling = 12 [default = false]; } -// Message that stores parameters used by PowerLayer message PowerParameter { // PowerLayer computes outputs y = (shift + scale * x) ^ power. optional float power = 1 [default = 1.0]; @@ -664,12 +700,40 @@ message PowerParameter { optional float shift = 3 [default = 0.0]; } -// Message that stores parameters used by PythonLayer message PythonParameter { optional string module = 1; optional string layer = 2; } +// Message that stores parameters used by ReductionLayer +message ReductionParameter { + enum ReductionOp { + SUM = 1; + ASUM = 2; + SUMSQ = 3; + MEAN = 4; + } + + optional ReductionOp operation = 1 [default = SUM]; // reduction operation + + // The first axis to reduce to a scalar -- may be negative to index from the + // end (e.g., -1 for the last axis). + // (Currently, only reduction along ALL "tail" axes is supported; reduction + // of axis M through N, where N < num_axes - 1, is unsupported.) + // Suppose we have an n-axis bottom Blob with shape: + // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). + // If axis == m, the output Blob will have shape + // (d0, d1, d2, ..., d(m-1)), + // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) + // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. + // If axis == 0 (the default), the output Blob always has the empty shape + // (count 1), performing reduction across the entire input -- + // often useful for creating new loss functions. + optional int32 axis = 2 [default = 0]; + + optional float coeff = 3 [default = 1.0]; // coefficient for output +} + // Message that stores parameters used by ReLULayer message ReLUParameter { // Allow non-zero slope for negative inputs to speed up optimization @@ -686,7 +750,70 @@ message ReLUParameter { optional Engine engine = 2 [default = DEFAULT]; } -// Message that stores parameters used by SigmoidLayer +message ReshapeParameter { + // Specify the output dimensions. If some of the dimensions are set to 0, + // the corresponding dimension from the bottom layer is used (unchanged). + // Exactly one dimension may be set to -1, in which case its value is + // inferred from the count of the bottom blob and the remaining dimensions. + // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8: + // + // layer { + // type: "Reshape" bottom: "input" top: "output" + // reshape_param { ... } + // } + // + // If "input" is 2D with shape 2 x 8, then the following reshape_param + // specifications are all equivalent, producing a 3D blob "output" with shape + // 2 x 2 x 4: + // + // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 0 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 0 dim: 2 dim: -1 } } + // reshape_param { shape { dim: -1 dim: 0 dim: 2 } } + // + optional BlobShape shape = 1; + + // axis and num_axes control the portion of the bottom blob's shape that are + // replaced by (included in) the reshape. By default (axis == 0 and + // num_axes == -1), the entire bottom blob shape is included in the reshape, + // and hence the shape field must specify the entire output shape. + // + // axis may be non-zero to retain some portion of the beginning of the input + // shape (and may be negative to index from the end; e.g., -1 to begin the + // reshape after the last axis, including nothing in the reshape, + // -2 to include only the last axis, etc.). + // + // For example, suppose "input" is a 2D blob with shape 2 x 8. + // Then the following ReshapeLayer specifications are all equivalent, + // producing a blob "output" with shape 2 x 2 x 4: + // + // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 2 dim: 4 } axis: 1 } + // reshape_param { shape { dim: 2 dim: 4 } axis: -3 } + // + // num_axes specifies the extent of the reshape. + // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on + // input axes in the range [axis, axis+num_axes]. + // num_axes may also be -1, the default, to include all remaining axes + // (starting from axis). + // + // For example, suppose "input" is a 2D blob with shape 2 x 8. + // Then the following ReshapeLayer specifications are equivalent, + // producing a blob "output" with shape 1 x 2 x 8. + // + // reshape_param { shape { dim: 1 dim: 2 dim: 8 } } + // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 } + // reshape_param { shape { dim: 1 } num_axes: 0 } + // + // On the other hand, these would produce output blob shape 2 x 1 x 8: + // + // reshape_param { shape { dim: 2 dim: 1 dim: 8 } } + // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 } + // + optional int32 axis = 2 [default = 0]; + optional int32 num_axes = 3 [default = -1]; +} + message SigmoidParameter { enum Engine { DEFAULT = 0; @@ -696,7 +823,6 @@ message SigmoidParameter { optional Engine engine = 1 [default = DEFAULT]; } -// Message that stores parameters used by SliceLayer message SliceParameter { // The axis along which to slice -- may be negative to index from the end // (e.g., -1 for the last axis). @@ -723,7 +849,6 @@ message SoftmaxParameter { optional int32 axis = 2 [default = 1]; } -// Message that stores parameters used by TanHLayer message TanHParameter { enum Engine { DEFAULT = 0; @@ -733,12 +858,10 @@ message TanHParameter { optional Engine engine = 1 [default = DEFAULT]; } -// Message that stores parameters used by ThresholdLayer message ThresholdParameter { optional float threshold = 1 [default = 0]; // Strictly positive values } -// Message that stores parameters used by WindowDataLayer message WindowDataParameter { // Specify the data source. optional string source = 1; @@ -772,6 +895,22 @@ message WindowDataParameter { optional string root_folder = 13 [default = ""]; } +message SPPParameter { + enum PoolMethod { + MAX = 0; + AVE = 1; + STOCHASTIC = 2; + } + optional uint32 pyramid_height = 1; + optional PoolMethod pool = 2 [default = MAX]; // The pooling method + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 6 [default = DEFAULT]; +} + // DEPRECATED: use LayerParameter. message V1LayerParameter { repeated string bottom = 2; @@ -818,7 +957,6 @@ message V1LayerParameter { SPLIT = 22; SLICE = 33; TANH = 23; - TRIPLET_LOSS = 40; WINDOW_DATA = 24; THRESHOLD = 31; } @@ -864,7 +1002,6 @@ message V1LayerParameter { optional TransformationParameter transform_param = 36; optional LossParameter loss_param = 42; optional V0LayerParameter layer = 1; - optional TripletLossParameter triplet_loss_param = 43; } // DEPRECATED: V0LayerParameter is the old way of specifying layer parameters @@ -961,7 +1098,6 @@ message V0LayerParameter { optional HDF5OutputParameter hdf5_output_param = 1001; } -// Message that stores parameters used by PReLULayer message PReLUParameter { // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: // Surpassing Human-Level Performance on ImageNet Classification, 2015. diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 096980dd7af..aabe0edec80 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -167,7 +167,26 @@ void Solver::Step(int iters) { vector losses; Dtype smoothed_loss = 0; - for (; iter_ < stop_iter; ++iter_) { + while (iter_ < stop_iter) { + // zero-init the params + for (int i = 0; i < net_->params().size(); ++i) { + shared_ptr > blob = net_->params()[i]; + switch (Caffe::mode()) { + case Caffe::CPU: + caffe_set(blob->count(), static_cast(0), + blob->mutable_cpu_diff()); + break; + case Caffe::GPU: +#ifndef CPU_ONLY + caffe_gpu_set(blob->count(), static_cast(0), + blob->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + } + if (param_.test_interval() && iter_ % param_.test_interval() == 0 && (iter_ > 0 || param_.test_initialization())) { TestAll(); @@ -175,7 +194,13 @@ void Solver::Step(int iters) { const bool display = param_.display() && iter_ % param_.display() == 0; net_->set_debug_info(display && param_.debug_info()); - Dtype loss = net_->ForwardBackward(bottom_vec); + // accumulate the loss and gradient + Dtype loss = 0; + for (int i = 0; i < param_.iter_size(); ++i) { + loss += net_->ForwardBackward(bottom_vec); + } + loss /= param_.iter_size(); + // average the loss across iterations for smoothed reporting if (losses.size() < average_loss) { losses.push_back(loss); int size = losses.size(); @@ -207,11 +232,14 @@ void Solver::Step(int iters) { } } } - ComputeUpdateValue(); - net_->Update(); + ApplyUpdate(); + + // Increment the internal iter_ counter -- its value should always indicate + // the number of times the weights have been updated. + ++iter_; // Save a snapshot if needed. - if (param_.snapshot() && (iter_ + 1) % param_.snapshot() == 0) { + if (param_.snapshot() && iter_ % param_.snapshot() == 0) { Snapshot(); } } @@ -327,15 +355,14 @@ void Solver::Snapshot() { string model_filename, snapshot_filename; const int kBufferSize = 20; char iter_str_buffer[kBufferSize]; - // Add one to iter_ to get the number of iterations that have completed. - snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_ + 1); + snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); filename += iter_str_buffer; model_filename = filename + ".caffemodel"; LOG(INFO) << "Snapshotting to " << model_filename; WriteProtoToBinaryFile(net_param, model_filename.c_str()); SolverState state; SnapshotSolverState(&state); - state.set_iter(iter_ + 1); + state.set_iter(iter_); state.set_learned_net(model_filename); state.set_current_step(current_step_); snapshot_filename = filename + ".solverstate"; @@ -453,95 +480,138 @@ void SGDSolver::ClipGradients() { } template -void SGDSolver::ComputeUpdateValue() { - const vector > >& net_params = this->net_->params(); - const vector& net_params_lr = this->net_->params_lr(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); - // get the learning rate +void SGDSolver::ApplyUpdate() { Dtype rate = GetLearningRate(); if (this->param_.display() && this->iter_ % this->param_.display() == 0) { LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; } ClipGradients(); - Dtype momentum = this->param_.momentum(); + for (int param_id = 0; param_id < this->net_->params().size(); ++param_id) { + Normalize(param_id); + Regularize(param_id); + ComputeUpdateValue(param_id, rate); + } + this->net_->Update(); +} + +template +void SGDSolver::Normalize(int param_id) { + if (this->param_.iter_size() == 1) { return; } + // Scale gradient to counterbalance accumulation. + const vector > >& net_params = this->net_->params(); + const Dtype accum_normalization = Dtype(1.) / this->param_.iter_size(); + switch (Caffe::mode()) { + case Caffe::CPU: { + caffe_scal(net_params[param_id]->count(), accum_normalization, + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + caffe_gpu_scal(net_params[param_id]->count(), accum_normalization, + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void SGDSolver::Regularize(int param_id) { + const vector > >& net_params = this->net_->params(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); Dtype weight_decay = this->param_.weight_decay(); string regularization_type = this->param_.regularization_type(); + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; switch (Caffe::mode()) { - case Caffe::CPU: - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // Compute the value to history, and then copy them to the blob's diff. - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } + case Caffe::CPU: { + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; } - - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - history_[param_id]->mutable_cpu_data()); - // copy - caffe_copy(net_params[param_id]->count(), - history_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); } break; - case Caffe::GPU: + } + case Caffe::GPU: { #ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // Compute the value to history, and then copy them to the blob's diff. - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; } - - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - history_[param_id]->mutable_gpu_data()); - // copy - caffe_copy(net_params[param_id]->count(), - history_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); } #else NO_GPU; #endif break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector > >& net_params = this->net_->params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype momentum = this->param_.momentum(); + Dtype local_rate = rate * net_params_lr[param_id]; + // Compute the update to history, then copy it to the parameter diff. + switch (Caffe::mode()) { + case Caffe::CPU: { + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + history_[param_id]->mutable_cpu_data()); + caffe_copy(net_params[param_id]->count(), + history_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + history_[param_id]->mutable_gpu_data()); + caffe_copy(net_params[param_id]->count(), + history_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } default: LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); } @@ -568,252 +638,138 @@ void SGDSolver::RestoreSolverState(const SolverState& state) { } template -void NesterovSolver::ComputeUpdateValue() { +void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { const vector > >& net_params = this->net_->params(); const vector& net_params_lr = this->net_->params_lr(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); - // get the learning rate - Dtype rate = this->GetLearningRate(); - if (this->param_.display() && this->iter_ % this->param_.display() == 0) { - LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; - } - SGDSolver::ClipGradients(); Dtype momentum = this->param_.momentum(); - Dtype weight_decay = this->param_.weight_decay(); - string regularization_type = this->param_.regularization_type(); + Dtype local_rate = rate * net_params_lr[param_id]; switch (Caffe::mode()) { - case Caffe::CPU: - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - this->temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - - // update history - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - this->history_[param_id]->mutable_cpu_data()); - - // compute udpate: step back then over step - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->cpu_data(), -momentum, - this->update_[param_id]->mutable_cpu_data()); - - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } + case Caffe::CPU: { + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + this->history_[param_id]->mutable_cpu_data()); + + // compute update: step back then over step + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->cpu_data(), -momentum, + this->update_[param_id]->mutable_cpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); break; - case Caffe::GPU: + } + case Caffe::GPU: { #ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - this->temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - - // update history - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - this->history_[param_id]->mutable_gpu_data()); - - // compute udpate: step back then over step - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->gpu_data(), -momentum, - this->update_[param_id]->mutable_gpu_data()); - - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + this->history_[param_id]->mutable_gpu_data()); + + // compute update: step back then over step + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->gpu_data(), -momentum, + this->update_[param_id]->mutable_gpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); #else NO_GPU; #endif break; + } default: LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); } } template -void AdaGradSolver::ComputeUpdateValue() { +void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { const vector > >& net_params = this->net_->params(); const vector& net_params_lr = this->net_->params_lr(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); - // get the learning rate - Dtype rate = this->GetLearningRate(); Dtype delta = this->param_.delta(); - if (this->param_.display() && this->iter_ % this->param_.display() == 0) { - LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; - } - SGDSolver::ClipGradients(); - Dtype weight_decay = this->param_.weight_decay(); - string regularization_type = this->param_.regularization_type(); + Dtype local_rate = rate * net_params_lr[param_id]; switch (Caffe::mode()) { - case Caffe::CPU: - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - this->temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - - // compute square of gradient in update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history - caffe_add(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - this->history_[param_id]->cpu_data(), - this->history_[param_id]->mutable_cpu_data()); - - // prepare update - caffe_powx(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_cpu_data()); - - caffe_div(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), - this->update_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // scale and copy - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->cpu_data(), Dtype(0), - net_params[param_id]->mutable_cpu_diff()); - } + case Caffe::CPU: { + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_add(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + this->history_[param_id]->cpu_data(), + this->history_[param_id]->mutable_cpu_data()); + + // prepare update + caffe_powx(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_cpu_data()); + + caffe_div(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), + this->update_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // scale and copy + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->cpu_data(), Dtype(0), + net_params[param_id]->mutable_cpu_diff()); break; - case Caffe::GPU: + } + case Caffe::GPU: { #ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - this->temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - - // compute square of gradient in update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history - caffe_gpu_add(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - this->history_[param_id]->gpu_data(), - this->history_[param_id]->mutable_gpu_data()); - - // prepare update - caffe_gpu_powx(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_div(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), - this->update_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // scale and copy - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->gpu_data(), Dtype(0), - net_params[param_id]->mutable_gpu_diff()); - } + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_add(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + this->history_[param_id]->gpu_data(), + this->history_[param_id]->mutable_gpu_data()); + + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // scale and copy + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), + net_params[param_id]->mutable_gpu_diff()); #else NO_GPU; #endif break; + } default: LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); } diff --git a/src/caffe/test/test_accuracy_layer.cpp b/src/caffe/test/test_accuracy_layer.cpp index 6cbf51df45e..c14b67cc0e9 100644 --- a/src/caffe/test/test_accuracy_layer.cpp +++ b/src/caffe/test/test_accuracy_layer.cpp @@ -16,7 +16,7 @@ namespace caffe { template -class AccuracyLayerTest : public ::testing::Test { +class AccuracyLayerTest : public CPUDeviceTest { protected: AccuracyLayerTest() : blob_bottom_data_(new Blob()), @@ -92,7 +92,6 @@ TYPED_TEST(AccuracyLayerTest, TestSetupTopK) { TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { LayerParameter layer_param; - Caffe::set_mode(Caffe::CPU); AccuracyLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); @@ -118,7 +117,6 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { } TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { - Caffe::set_mode(Caffe::CPU); this->blob_bottom_data_->Reshape(2, 10, 4, 5); vector label_shape(3); label_shape[0] = 2; label_shape[1] = 4; label_shape[2] = 5; @@ -162,7 +160,6 @@ TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { } TYPED_TEST(AccuracyLayerTest, TestForwardIgnoreLabel) { - Caffe::set_mode(Caffe::CPU); LayerParameter layer_param; const TypeParam kIgnoreLabelValue = -1; layer_param.mutable_accuracy_param()->set_ignore_label(kIgnoreLabelValue); diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp index 3487d42f21e..895c3d372ff 100644 --- a/src/caffe/test/test_argmax_layer.cpp +++ b/src/caffe/test/test_argmax_layer.cpp @@ -13,13 +13,12 @@ namespace caffe { template -class ArgMaxLayerTest : public ::testing::Test { +class ArgMaxLayerTest : public CPUDeviceTest { protected: ArgMaxLayerTest() : blob_bottom_(new Blob(10, 20, 1, 1)), blob_top_(new Blob()), top_k_(5) { - Caffe::set_mode(Caffe::CPU); Caffe::set_random_seed(1701); // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_contrastive_loss_layer.cpp b/src/caffe/test/test_contrastive_loss_layer.cpp index d269fbc26f2..1e9447cbc51 100644 --- a/src/caffe/test/test_contrastive_loss_layer.cpp +++ b/src/caffe/test/test_contrastive_loss_layer.cpp @@ -22,15 +22,15 @@ class ContrastiveLossLayerTest : public MultiDeviceTest { protected: ContrastiveLossLayerTest() - : blob_bottom_data_i_(new Blob(128, 10, 1, 1)), - blob_bottom_data_j_(new Blob(128, 10, 1, 1)), - blob_bottom_y_(new Blob(128, 1, 1, 1)), + : blob_bottom_data_i_(new Blob(512, 2, 1, 1)), + blob_bottom_data_j_(new Blob(512, 2, 1, 1)), + blob_bottom_y_(new Blob(512, 1, 1, 1)), blob_top_loss_(new Blob()) { // fill the values FillerParameter filler_param; - filler_param.set_mean(0.0); - filler_param.set_std(0.3); // distances~=1.0 to test both sides of margin - GaussianFiller filler(filler_param); + filler_param.set_min(-1.0); + filler_param.set_max(1.0); // distances~=1.0 to test both sides of margin + UniformFiller filler(filler_param); filler.Fill(this->blob_bottom_data_i_); blob_bottom_vec_.push_back(blob_bottom_data_i_); filler.Fill(this->blob_bottom_data_j_); @@ -79,7 +79,8 @@ TYPED_TEST(ContrastiveLossLayerTest, TestForward) { if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs loss += dist_sq; } else { - loss += std::max(margin-dist_sq, Dtype(0)); + Dtype dist = std::max(margin - sqrt(dist_sq), 0.0); + loss += dist*dist; } } loss /= static_cast(num) * Dtype(2); @@ -99,4 +100,47 @@ TYPED_TEST(ContrastiveLossLayerTest, TestGradient) { this->blob_top_vec_, 1); } +TYPED_TEST(ContrastiveLossLayerTest, TestForwardLegacy) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_contrastive_loss_param()->set_legacy_version(true); + ContrastiveLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // manually compute to compare + const Dtype margin = layer_param.contrastive_loss_param().margin(); + const int num = this->blob_bottom_data_i_->num(); + const int channels = this->blob_bottom_data_i_->channels(); + Dtype loss(0); + for (int i = 0; i < num; ++i) { + Dtype dist_sq(0); + for (int j = 0; j < channels; ++j) { + Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq += diff*diff; + } + if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs + loss += dist_sq; + } else { + loss += std::max(margin - dist_sq, Dtype(0.0)); + } + } + loss /= static_cast(num) * Dtype(2); + EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); +} + +TYPED_TEST(ContrastiveLossLayerTest, TestGradientLegacy) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_contrastive_loss_param()->set_legacy_version(true); + ContrastiveLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + // check the gradient for the first two bottom layers + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 1); +} + } // namespace caffe diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index c1fe3b58c58..67d41fff844 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -424,7 +424,7 @@ TYPED_TEST(ConvolutionLayerTest, TestGradientGroup) { #ifdef USE_CUDNN template -class CuDNNConvolutionLayerTest : public ::testing::Test { +class CuDNNConvolutionLayerTest : public GPUDeviceTest { protected: CuDNNConvolutionLayerTest() : blob_bottom_(new Blob(2, 3, 6, 4)), @@ -467,7 +467,6 @@ class CuDNNConvolutionLayerTest : public ::testing::Test { TYPED_TEST_CASE(CuDNNConvolutionLayerTest, TestDtypes); TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { - Caffe::set_mode(Caffe::GPU); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); LayerParameter layer_param; @@ -505,7 +504,6 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { - Caffe::set_mode(Caffe::GPU); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); LayerParameter layer_param; @@ -541,7 +539,6 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionGroupCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); @@ -572,7 +569,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { // Test separable convolution by computing the Sobel operator // as a single filter then comparing the result // as the convolution of two rectangular filters. - Caffe::set_mode(Caffe::GPU); + // Fill bottoms with identical Gaussian noise. shared_ptr > filler; FillerParameter filler_param; @@ -665,7 +662,6 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); @@ -683,7 +679,6 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientGroupCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); diff --git a/src/caffe/test/test_data/generate_sample_data.py b/src/caffe/test/test_data/generate_sample_data.py index e5dbc3406d8..ab5572685cb 100644 --- a/src/caffe/test/test_data/generate_sample_data.py +++ b/src/caffe/test/test_data/generate_sample_data.py @@ -5,6 +5,8 @@ import numpy as np import h5py +script_dir = os.path.dirname(os.path.abspath(__file__)) + num_cols = 8 num_rows = 10 height = 6 @@ -27,12 +29,12 @@ print data print label -with h5py.File(os.path.dirname(__file__) + '/sample_data.h5', 'w') as f: +with h5py.File(script_dir + '/sample_data.h5', 'w') as f: f['data'] = data f['label'] = label f['label2'] = label2 -with h5py.File(os.path.dirname(__file__) + '/sample_data_2_gzip.h5', 'w') as f: +with h5py.File(script_dir + '/sample_data_2_gzip.h5', 'w') as f: f.create_dataset( 'data', data=data + total_size, compression='gzip', compression_opts=1 @@ -46,6 +48,6 @@ compression='gzip', compression_opts=1 ) -with open(os.path.dirname(__file__) + '/sample_data_list.txt', 'w') as f: - f.write(os.path.dirname(__file__) + '/sample_data.h5\n') - f.write(os.path.dirname(__file__) + '/sample_data_2_gzip.h5\n') +with open(script_dir + '/sample_data_list.txt', 'w') as f: + f.write(script_dir + '/sample_data.h5\n') + f.write(script_dir + '/sample_data_2_gzip.h5\n') diff --git a/src/caffe/test/test_dummy_data_layer.cpp b/src/caffe/test/test_dummy_data_layer.cpp index 99548352746..c9ed38db3a5 100644 --- a/src/caffe/test/test_dummy_data_layer.cpp +++ b/src/caffe/test/test_dummy_data_layer.cpp @@ -13,7 +13,7 @@ namespace caffe { template -class DummyDataLayerTest : public ::testing::Test { +class DummyDataLayerTest : public CPUDeviceTest { protected: DummyDataLayerTest() : blob_top_a_(new Blob()), @@ -44,7 +44,6 @@ class DummyDataLayerTest : public ::testing::Test { TYPED_TEST_CASE(DummyDataLayerTest, TestDtypes); TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) { - Caffe::set_mode(Caffe::CPU); LayerParameter param; DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); dummy_data_param->add_num(5); @@ -74,7 +73,6 @@ TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) { } TYPED_TEST(DummyDataLayerTest, TestTwoTopConstant) { - Caffe::set_mode(Caffe::CPU); LayerParameter param; DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); dummy_data_param->add_num(5); @@ -113,7 +111,6 @@ TYPED_TEST(DummyDataLayerTest, TestTwoTopConstant) { } TYPED_TEST(DummyDataLayerTest, TestThreeTopConstantGaussianConstant) { - Caffe::set_mode(Caffe::CPU); LayerParameter param; DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); dummy_data_param->add_num(5); diff --git a/src/caffe/test/test_filler.cpp b/src/caffe/test/test_filler.cpp index e04b0fd22af..728b8dc5f0d 100644 --- a/src/caffe/test/test_filler.cpp +++ b/src/caffe/test/test_filler.cpp @@ -142,4 +142,102 @@ TYPED_TEST(GaussianFillerTest, TestFill) { EXPECT_LE(var, target_var * 5.); } +template +class XavierFillerTest : public ::testing::Test { + protected: + XavierFillerTest() + : blob_(new Blob(1000, 2, 4, 5)), + filler_param_() { + } + virtual void test_params(FillerParameter_VarianceNorm variance_norm, + Dtype n) { + this->filler_param_.set_variance_norm(variance_norm); + this->filler_.reset(new XavierFiller(this->filler_param_)); + this->filler_->Fill(blob_); + EXPECT_TRUE(this->blob_); + const int count = this->blob_->count(); + const Dtype* data = this->blob_->cpu_data(); + Dtype mean = 0.; + Dtype ex2 = 0.; + for (int i = 0; i < count; ++i) { + mean += data[i]; + ex2 += data[i] * data[i]; + } + mean /= count; + ex2 /= count; + Dtype std = sqrt(ex2 - mean*mean); + Dtype target_std = sqrt(2.0 / n); + EXPECT_NEAR(mean, 0.0, 0.1); + EXPECT_NEAR(std, target_std, 0.1); + } + virtual ~XavierFillerTest() { delete blob_; } + Blob* const blob_; + FillerParameter filler_param_; + shared_ptr > filler_; +}; + +TYPED_TEST_CASE(XavierFillerTest, TestDtypes); + +TYPED_TEST(XavierFillerTest, TestFillFanIn) { + TypeParam n = 2*4*5; + this->test_params(FillerParameter_VarianceNorm_FAN_IN, n); +} +TYPED_TEST(XavierFillerTest, TestFillFanOut) { + TypeParam n = 1000*4*5; + this->test_params(FillerParameter_VarianceNorm_FAN_OUT, n); +} +TYPED_TEST(XavierFillerTest, TestFillAverage) { + TypeParam n = (2*4*5 + 1000*4*5) / 2.0; + this->test_params(FillerParameter_VarianceNorm_AVERAGE, n); +} + +template +class MSRAFillerTest : public ::testing::Test { + protected: + MSRAFillerTest() + : blob_(new Blob(1000, 2, 4, 5)), + filler_param_() { + } + virtual void test_params(FillerParameter_VarianceNorm variance_norm, + Dtype n) { + this->filler_param_.set_variance_norm(variance_norm); + this->filler_.reset(new MSRAFiller(this->filler_param_)); + this->filler_->Fill(blob_); + EXPECT_TRUE(this->blob_); + const int count = this->blob_->count(); + const Dtype* data = this->blob_->cpu_data(); + Dtype mean = 0.; + Dtype ex2 = 0.; + for (int i = 0; i < count; ++i) { + mean += data[i]; + ex2 += data[i] * data[i]; + } + mean /= count; + ex2 /= count; + Dtype std = sqrt(ex2 - mean*mean); + Dtype target_std = sqrt(2.0 / n); + EXPECT_NEAR(mean, 0.0, 0.1); + EXPECT_NEAR(std, target_std, 0.1); + } + virtual ~MSRAFillerTest() { delete blob_; } + Blob* const blob_; + FillerParameter filler_param_; + shared_ptr > filler_; +}; + +TYPED_TEST_CASE(MSRAFillerTest, TestDtypes); + +TYPED_TEST(MSRAFillerTest, TestFillFanIn) { + TypeParam n = 2*4*5; + this->test_params(FillerParameter_VarianceNorm_FAN_IN, n); +} +TYPED_TEST(MSRAFillerTest, TestFillFanOut) { + TypeParam n = 1000*4*5; + this->test_params(FillerParameter_VarianceNorm_FAN_OUT, n); +} +TYPED_TEST(MSRAFillerTest, TestFillAverage) { + TypeParam n = (2*4*5 + 1000*4*5) / 2.0; + this->test_params(FillerParameter_VarianceNorm_AVERAGE, n); +} + } // namespace caffe diff --git a/src/caffe/test/test_flatten_layer.cpp b/src/caffe/test/test_flatten_layer.cpp index 3042d293cf7..7b6757cba32 100644 --- a/src/caffe/test/test_flatten_layer.cpp +++ b/src/caffe/test/test_flatten_layer.cpp @@ -42,13 +42,48 @@ TYPED_TEST(FlattenLayerTest, TestSetup) { LayerParameter layer_param; FlattenLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - EXPECT_EQ(this->blob_top_->num(), 2); - EXPECT_EQ(this->blob_top_->channels(), 3 * 6 * 5); - EXPECT_EQ(this->blob_top_->height(), 1); - EXPECT_EQ(this->blob_top_->width(), 1); + ASSERT_EQ(this->blob_top_->num_axes(), 2); + EXPECT_EQ(this->blob_top_->shape(0), 2); + EXPECT_EQ(this->blob_top_->shape(1), 3 * 6 * 5); } -TYPED_TEST(FlattenLayerTest, Test) { +TYPED_TEST(FlattenLayerTest, TestSetupWithAxis) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_flatten_param()->set_axis(2); + FlattenLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(this->blob_top_->num_axes(), 3); + EXPECT_EQ(this->blob_top_->shape(0), 2); + EXPECT_EQ(this->blob_top_->shape(1), 3); + EXPECT_EQ(this->blob_top_->shape(2), 6 * 5); +} + +TYPED_TEST(FlattenLayerTest, TestSetupWithEndAxis) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_flatten_param()->set_end_axis(-2); + FlattenLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(this->blob_top_->num_axes(), 3); + EXPECT_EQ(this->blob_top_->shape(0), 2); + EXPECT_EQ(this->blob_top_->shape(1), 3 * 6); + EXPECT_EQ(this->blob_top_->shape(2), 5); +} + +TYPED_TEST(FlattenLayerTest, TestSetupWithStartAndEndAxis) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_flatten_param()->set_axis(0); + layer_param.mutable_flatten_param()->set_end_axis(-2); + FlattenLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(this->blob_top_->num_axes(), 2); + EXPECT_EQ(this->blob_top_->shape(0), 2 * 3 * 6); + EXPECT_EQ(this->blob_top_->shape(1), 5); +} + +TYPED_TEST(FlattenLayerTest, TestForward) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; FlattenLayer layer(layer_param); @@ -71,5 +106,4 @@ TYPED_TEST(FlattenLayerTest, TestGradient) { this->blob_top_vec_); } - } // namespace caffe diff --git a/src/caffe/test/test_gradient_based_solver.cpp b/src/caffe/test/test_gradient_based_solver.cpp index eb2569c04f2..c9135d64e70 100644 --- a/src/caffe/test/test_gradient_based_solver.cpp +++ b/src/caffe/test/test_gradient_based_solver.cpp @@ -23,7 +23,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { protected: GradientBasedSolverTest() : - seed_(1701), num_(5), channels_(3), height_(10), width_(10) {} + seed_(1701), num_(4), channels_(3), height_(10), width_(10) {} shared_ptr > solver_; int seed_; @@ -56,19 +56,21 @@ class GradientBasedSolverTest : public MultiDeviceTest { } void RunLeastSquaresSolver(const Dtype learning_rate, - const Dtype weight_decay, const Dtype momentum, const int num_iters) { + const Dtype weight_decay, const Dtype momentum, const int num_iters, + const int iter_size = 1) { ostringstream proto; proto << "max_iter: " << num_iters << " " "base_lr: " << learning_rate << " " "lr_policy: 'fixed' " + "iter_size: " << iter_size << " " "net_param { " " name: 'TestNetwork' " " layer { " " name: 'data' " " type: 'DummyData' " " dummy_data_param { " - " num: " << num_ << " " + " num: " << num_ / iter_size << " " " channels: " << channels_ << " " " height: " << height_ << " " " width: " << width_ << " " @@ -76,6 +78,10 @@ class GradientBasedSolverTest : public MultiDeviceTest { " height: 1 " " width: 1 " " data_filler { " + " type: 'constant' " + " value: 1.0 " + " } " + " data_filler { " " type: 'gaussian' " " std: 1.0 " " } " @@ -270,6 +276,45 @@ class GradientBasedSolverTest : public MultiDeviceTest { } } + void CheckAccumulation(const Dtype kLearningRate, const Dtype kWeightDecay, + const Dtype kMomentum, const int kNumIters, const int kIterSize) { + const double kPrecision = 1e-2; + const double kMinPrecision = 1e-7; + // Solve without accumulation and save parameters. + this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, + kNumIters); + // Save parameters for comparison. + Net& net = *this->solver_->net(); + const vector > >& param_blobs = + net.layer_by_name("innerprod")->blobs(); + vector > > noaccum_params(param_blobs.size()); + for (int i = 0; i < param_blobs.size(); ++i) { + noaccum_params[i].reset(new Blob()); + noaccum_params[i]->CopyFrom(*param_blobs[i], false, true); + } + // Solve by equivalent accumulation of gradients over divided batches. + this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, + kNumIters, kIterSize); + Net& net_accum = *this->solver_->net(); + const vector > >& accum_params = + net_accum.layer_by_name("innerprod")->blobs(); + // Compare accumulated parameters against no accumulation standard. + const int D = this->channels_ * this->height_ * this->width_; + for (int i = 0; i < D; ++i) { + const Dtype expected_param = noaccum_params[0]->cpu_data()[i]; + const Dtype accum_param = accum_params[0]->cpu_data()[i]; + const Dtype error_margin = std::max(kMinPrecision, kPrecision * + std::min(fabs(expected_param), fabs(accum_param))); + EXPECT_NEAR(expected_param, accum_param, error_margin); + } + ASSERT_EQ(1, accum_params[1]->count()); + const Dtype expected_bias = noaccum_params[1]->cpu_data()[0]; + const Dtype accum_bias = accum_params[1]->cpu_data()[0]; + const Dtype error_margin = std::max(kMinPrecision, kPrecision * + std::min(fabs(expected_bias), fabs(accum_bias))); + EXPECT_NEAR(expected_bias, accum_bias, error_margin); + } + // Test that the correct update is computed for a regularized least squares // problem: // @@ -372,6 +417,16 @@ TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverything) { } } +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} template class AdaGradSolverTest : public GradientBasedSolverTest { @@ -416,6 +471,16 @@ TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithEverything) { } } +TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} template class NesterovSolverTest : public GradientBasedSolverTest { @@ -482,4 +547,15 @@ TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithEverything) { } } +TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + } // namespace caffe diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index ee684c00255..0017ac23e69 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -25,7 +25,7 @@ __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; template -class Im2colKernelTest : public ::testing::Test { +class Im2colKernelTest : public GPUDeviceTest { protected: Im2colKernelTest() // big so launches > 1024 threads @@ -68,8 +68,6 @@ class Im2colKernelTest : public ::testing::Test { TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); TYPED_TEST(Im2colKernelTest, TestGPU) { - Caffe::set_mode(Caffe::GPU); - // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, diff --git a/src/caffe/test/test_math_functions.cpp b/src/caffe/test/test_math_functions.cpp index 667f744bdd7..a095b544e17 100644 --- a/src/caffe/test/test_math_functions.cpp +++ b/src/caffe/test/test_math_functions.cpp @@ -15,8 +15,10 @@ namespace caffe { -template -class MathFunctionsTest : public ::testing::Test { +template +class MathFunctionsTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + protected: MathFunctionsTest() : blob_bottom_(new Blob()), @@ -64,14 +66,19 @@ class MathFunctionsTest : public ::testing::Test { Blob* const blob_top_; }; -TYPED_TEST_CASE(MathFunctionsTest, TestDtypes); +template +class CPUMathFunctionsTest + : public MathFunctionsTest > { +}; + +TYPED_TEST_CASE(CPUMathFunctionsTest, TestDtypes); -TYPED_TEST(MathFunctionsTest, TestNothing) { +TYPED_TEST(CPUMathFunctionsTest, TestNothing) { // The first test case of a test suite takes the longest time // due to the set up overhead. } -TYPED_TEST(MathFunctionsTest, TestHammingDistanceCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestHammingDistance) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); const TypeParam* y = this->blob_top_->cpu_data(); @@ -79,7 +86,7 @@ TYPED_TEST(MathFunctionsTest, TestHammingDistanceCPU) { caffe_cpu_hamming_distance(n, x, y)); } -TYPED_TEST(MathFunctionsTest, TestAsumCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestAsum) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); TypeParam std_asum = 0; @@ -90,7 +97,7 @@ TYPED_TEST(MathFunctionsTest, TestAsumCPU) { EXPECT_LT((cpu_asum - std_asum) / std_asum, 1e-2); } -TYPED_TEST(MathFunctionsTest, TestSignCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestSign) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_cpu_sign(n, x, this->blob_bottom_->mutable_cpu_diff()); @@ -100,7 +107,7 @@ TYPED_TEST(MathFunctionsTest, TestSignCPU) { } } -TYPED_TEST(MathFunctionsTest, TestSgnbitCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestSgnbit) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_cpu_sgnbit(n, x, this->blob_bottom_->mutable_cpu_diff()); @@ -110,7 +117,7 @@ TYPED_TEST(MathFunctionsTest, TestSgnbitCPU) { } } -TYPED_TEST(MathFunctionsTest, TestFabsCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestFabs) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_abs(n, x, this->blob_bottom_->mutable_cpu_diff()); @@ -120,7 +127,7 @@ TYPED_TEST(MathFunctionsTest, TestFabsCPU) { } } -TYPED_TEST(MathFunctionsTest, TestScaleCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestScale) { int n = this->blob_bottom_->count(); TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % this->blob_bottom_->count()]; @@ -133,11 +140,10 @@ TYPED_TEST(MathFunctionsTest, TestScaleCPU) { } } -TYPED_TEST(MathFunctionsTest, TestCopyCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestCopy) { const int n = this->blob_bottom_->count(); const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); TypeParam* top_data = this->blob_top_->mutable_cpu_data(); - Caffe::set_mode(Caffe::CPU); caffe_copy(n, bottom_data, top_data); for (int i = 0; i < n; ++i) { EXPECT_EQ(bottom_data[i], top_data[i]); @@ -146,8 +152,14 @@ TYPED_TEST(MathFunctionsTest, TestCopyCPU) { #ifndef CPU_ONLY +template +class GPUMathFunctionsTest : public MathFunctionsTest > { +}; + +TYPED_TEST_CASE(GPUMathFunctionsTest, TestDtypes); + // TODO: Fix caffe_gpu_hamming_distance and re-enable this test. -TYPED_TEST(MathFunctionsTest, DISABLED_TestHammingDistanceGPU) { +TYPED_TEST(GPUMathFunctionsTest, DISABLED_TestHammingDistance) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); const TypeParam* y = this->blob_top_->cpu_data(); @@ -158,7 +170,7 @@ TYPED_TEST(MathFunctionsTest, DISABLED_TestHammingDistanceGPU) { EXPECT_EQ(reference_distance, computed_distance); } -TYPED_TEST(MathFunctionsTest, TestAsumGPU) { +TYPED_TEST(GPUMathFunctionsTest, TestAsum) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); TypeParam std_asum = 0; @@ -170,7 +182,7 @@ TYPED_TEST(MathFunctionsTest, TestAsumGPU) { EXPECT_LT((gpu_asum - std_asum) / std_asum, 1e-2); } -TYPED_TEST(MathFunctionsTest, TestSignGPU) { +TYPED_TEST(GPUMathFunctionsTest, TestSign) { int n = this->blob_bottom_->count(); caffe_gpu_sign(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); @@ -181,7 +193,7 @@ TYPED_TEST(MathFunctionsTest, TestSignGPU) { } } -TYPED_TEST(MathFunctionsTest, TestSgnbitGPU) { +TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) { int n = this->blob_bottom_->count(); caffe_gpu_sgnbit(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); @@ -192,7 +204,7 @@ TYPED_TEST(MathFunctionsTest, TestSgnbitGPU) { } } -TYPED_TEST(MathFunctionsTest, TestFabsGPU) { +TYPED_TEST(GPUMathFunctionsTest, TestFabs) { int n = this->blob_bottom_->count(); caffe_gpu_abs(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); @@ -203,7 +215,7 @@ TYPED_TEST(MathFunctionsTest, TestFabsGPU) { } } -TYPED_TEST(MathFunctionsTest, TestScaleGPU) { +TYPED_TEST(GPUMathFunctionsTest, TestScale) { int n = this->blob_bottom_->count(); TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % this->blob_bottom_->count()]; @@ -216,11 +228,10 @@ TYPED_TEST(MathFunctionsTest, TestScaleGPU) { } } -TYPED_TEST(MathFunctionsTest, TestCopyGPU) { +TYPED_TEST(GPUMathFunctionsTest, TestCopy) { const int n = this->blob_bottom_->count(); const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); TypeParam* top_data = this->blob_top_->mutable_gpu_data(); - Caffe::set_mode(Caffe::GPU); caffe_copy(n, bottom_data, top_data); bottom_data = this->blob_bottom_->cpu_data(); top_data = this->blob_top_->mutable_cpu_data(); diff --git a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp index 9038017e3e2..b2db984feb1 100644 --- a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp +++ b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp @@ -16,7 +16,7 @@ namespace caffe { template -class MultinomialLogisticLossLayerTest : public ::testing::Test { +class MultinomialLogisticLossLayerTest : public CPUDeviceTest { protected: MultinomialLogisticLossLayerTest() : blob_bottom_data_(new Blob(10, 5, 1, 1)), @@ -51,7 +51,6 @@ TYPED_TEST_CASE(MultinomialLogisticLossLayerTest, TestDtypes); TYPED_TEST(MultinomialLogisticLossLayerTest, TestGradientCPU) { LayerParameter layer_param; - Caffe::set_mode(Caffe::CPU); MultinomialLogisticLossLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); GradientChecker checker(1e-2, 2*1e-2, 1701, 0, 0.05); diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index 08106e79274..56959f4793b 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -288,6 +288,7 @@ class NetTest : public MultiDeviceTest { const bool force_backward = false, const bool bias_term = false, const Dtype blobs_lr_w1 = 1, const Dtype blobs_lr_b1 = 2, const Dtype blobs_lr_w2 = 1, const Dtype blobs_lr_b2 = 2) { + string bias_str = bias_term ? "true ":"false "; ostringstream proto; proto << "name: 'UnsharedWeightsNetwork' "; if (force_backward) { @@ -314,7 +315,7 @@ class NetTest : public MultiDeviceTest { " type: 'InnerProduct' " " inner_product_param { " " num_output: 10 " - " bias_term: " << bias_term << + " bias_term: " << bias_str << " weight_filler { " " type: 'gaussian' " " std: 10 " @@ -340,7 +341,7 @@ class NetTest : public MultiDeviceTest { " type: 'InnerProduct' " " inner_product_param { " " num_output: 10 " - " bias_term: " << bias_term << + " bias_term: " << bias_str << " weight_filler { " " type: 'gaussian' " " std: 10 " @@ -613,6 +614,105 @@ class NetTest : public MultiDeviceTest { InitNetFromProtoString(proto); } + virtual void InitSkipPropNet(bool test_skip_true) { + string proto = + "name: 'SkipPropTestNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " shape { " + " dim: 5 " + " dim: 2 " + " dim: 3 " + " dim: 4 " + " } " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " shape { " + " dim: 5 " + " } " + " data_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'silence' " + " bottom: 'label' " + " type: 'Silence' " + "} " + "layer { " + " name: 'innerproduct' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'data' " + " top: 'innerproduct' " + "} " + "layer { " + " name: 'ip_fake_labels' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " bottom: 'data' " + " top: 'fake_labels' " + "} " + "layer { " + " name: 'argmax' " + " bottom: 'fake_labels' " + " top: 'label_argmax' " + " type: 'ArgMax' " + "} " + "layer { " + " name: 'loss' " + " bottom: 'innerproduct' " + " bottom: 'label_argmax' "; + if (test_skip_true) + proto += " propagate_down: true " + " propagate_down: false "; + else + proto += " propagate_down: true " + " propagate_down: true "; + proto += + " top: 'cross_entropy_loss' " + " type: 'SigmoidCrossEntropyLoss' " + " loss_weight: 0.1 " + "} "; + InitNetFromProtoString(proto); + } + int seed_; shared_ptr > net_; }; @@ -2224,4 +2324,52 @@ TYPED_TEST(NetTest, TestReshape) { } } +TYPED_TEST(NetTest, TestSkipPropagateDown) { + // check bottom_need_backward if propagate_down is true + this->InitSkipPropNet(false); + vector vec_layer_need_backward = this->net_->layer_need_backward(); + for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { + string layer_name = this->net_->layer_names()[layer_id]; + if (layer_name == "loss") { + // access to bottom_need_backward coresponding to label's blob + bool need_back = this->net_->bottom_need_backward()[layer_id][1]; + // if propagate_down is true, the loss layer will try to + // backpropagate on labels + EXPECT_TRUE(need_back) << "bottom_need_backward should be True"; + } + // layer_need_backward should be True except for data and silence layers + if (layer_name.find("data") != std::string::npos || + layer_name == "silence") { + EXPECT_FALSE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be False"; + } else { + EXPECT_TRUE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be True"; + } + } + // check bottom_need_backward if propagat_down is false + this->InitSkipPropNet(true); + vec_layer_need_backward.clear(); + vec_layer_need_backward = this->net_->layer_need_backward(); + for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { + string layer_name = this->net_->layer_names()[layer_id]; + if (layer_name == "loss") { + // access to bottom_need_backward coresponding to label's blob + bool need_back = this->net_->bottom_need_backward()[layer_id][1]; + // if propagate_down is false, the loss layer will not try to + // backpropagate on labels + EXPECT_FALSE(need_back) << "bottom_need_backward should be False"; + } + // layer_need_backward should be False except for innerproduct and + // loss layers + if (layer_name == "innerproduct" || layer_name == "loss") { + EXPECT_TRUE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be True"; + } else { + EXPECT_FALSE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be False"; + } + } +} + } // namespace caffe diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index c9d52f247a6..c6e4d27b903 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -117,6 +117,49 @@ class NeuronLayerTest : public MultiDeviceTest { + slope_data[c] * std::min(bottom_data[i], (Dtype)(0))); } } + + void LogBottomInit() { + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + Dtype* bottom_data = this->blob_bottom_->mutable_cpu_data(); + caffe_exp(this->blob_bottom_->count(), bottom_data, bottom_data); + } + + void TestLogForward(const float base, const float scale, const float shift) { + LogBottomInit(); + LayerParameter layer_param; + layer_param.mutable_log_param()->set_base(base); + layer_param.mutable_log_param()->set_scale(scale); + layer_param.mutable_log_param()->set_shift(shift); + LogLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + layer.Forward(blob_bottom_vec_, blob_top_vec_); + const Dtype kDelta = 2e-4; + const Dtype* bottom_data = blob_bottom_->cpu_data(); + const Dtype* top_data = blob_top_->cpu_data(); + for (int i = 0; i < blob_bottom_->count(); ++i) { + const Dtype bottom_val = bottom_data[i]; + const Dtype top_val = top_data[i]; + if (base == -1) { + EXPECT_NEAR(top_val, log(shift + scale * bottom_val), kDelta); + } else { + EXPECT_NEAR(top_val, log(shift + scale * bottom_val) / log(base), + kDelta); + } + } + } + + void TestLogGradient(const float base, const float scale, const float shift) { + LogBottomInit(); + LayerParameter layer_param; + layer_param.mutable_log_param()->set_base(base); + layer_param.mutable_log_param()->set_scale(scale); + layer_param.mutable_log_param()->set_shift(shift); + LogLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientEltwise(&layer, blob_bottom_vec_, blob_top_vec_); + } }; TYPED_TEST_CASE(NeuronLayerTest, TestDtypesAndDevices); @@ -339,6 +382,88 @@ TYPED_TEST(NeuronLayerTest, TestExpGradientBase2Shift1Scale3) { this->TestExpGradient(kBase, kScale, kShift); } +TYPED_TEST(NeuronLayerTest, TestLogLayer) { + typedef typename TypeParam::Dtype Dtype; + // Test default base of "-1" -- should actually set base := e. + const Dtype kBase = -1; + const Dtype kScale = 1; + const Dtype kShift = 0; + this->TestLogForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogGradient) { + typedef typename TypeParam::Dtype Dtype; + // Test default base of "-1" -- should actually set base := e. + const Dtype kBase = -1; + const Dtype kScale = 1; + const Dtype kShift = 0; + this->TestLogGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogLayerBase2) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 1; + const Dtype kShift = 0; + this->TestLogForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogGradientBase2) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 1; + const Dtype kShift = 0; + this->TestLogGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Shift1) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 1; + const Dtype kShift = 1; + this->TestLogForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Shift1) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 1; + const Dtype kShift = 1; + this->TestLogGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Scale3) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 3; + const Dtype kShift = 0; + this->TestLogForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Scale3) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 3; + const Dtype kShift = 0; + this->TestLogGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Shift1Scale3) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 3; + const Dtype kShift = 1; + this->TestLogForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Shift1Scale3) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 3; + const Dtype kShift = 1; + this->TestLogGradient(kBase, kScale, kShift); +} + TYPED_TEST(NeuronLayerTest, TestDropoutHalf) { const float kDropoutRatio = 0.5; this->TestDropoutForward(kDropoutRatio); @@ -541,14 +666,10 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { caffe_copy(ip2.blobs()[0]->count(), ip.blobs()[0]->cpu_data(), ip2.blobs()[0]->mutable_cpu_data()); // Forward in-place - ip.Reshape(this->blob_bottom_vec_, this->blob_top_vec_); ip.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - prelu.Reshape(this->blob_top_vec_, this->blob_top_vec_); prelu.Forward(this->blob_top_vec_, this->blob_top_vec_); // Forward non-in-place - ip2.Reshape(blob_bottom_vec_2, blob_middle_vec_2); ip2.Forward(blob_bottom_vec_2, blob_middle_vec_2); - prelu2.Reshape(blob_middle_vec_2, blob_top_vec_2); prelu2.Forward(blob_middle_vec_2, blob_top_vec_2); // Check numbers for (int s = 0; s < blob_top_2->count(); ++s) { @@ -590,7 +711,7 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { #ifdef USE_CUDNN template -class CuDNNNeuronLayerTest : public ::testing::Test { +class CuDNNNeuronLayerTest : public GPUDeviceTest { protected: CuDNNNeuronLayerTest() : blob_bottom_(new Blob(2, 3, 4, 5)), @@ -613,7 +734,6 @@ class CuDNNNeuronLayerTest : public ::testing::Test { TYPED_TEST_CASE(CuDNNNeuronLayerTest, TestDtypes); TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNReLULayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -628,7 +748,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNReLULayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); @@ -637,7 +756,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CHECK(google::protobuf::TextFormat::ParseFromString( "relu_param { negative_slope: 0.01 }", &layer_param)); @@ -657,7 +775,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CHECK(google::protobuf::TextFormat::ParseFromString( "relu_param { negative_slope: 0.01 }", &layer_param)); @@ -668,7 +785,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSigmoidLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -685,7 +801,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSigmoidLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); @@ -694,7 +809,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNTanHLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -717,7 +831,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestTanHGradientCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNTanHLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index e9964e7f0b7..69f2d5c1135 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -608,7 +608,7 @@ TYPED_TEST(PoolingLayerTest, TestGradientAvePadded) { #ifdef USE_CUDNN template -class CuDNNPoolingLayerTest : public ::testing::Test { +class CuDNNPoolingLayerTest : public GPUDeviceTest { protected: CuDNNPoolingLayerTest() : blob_bottom_(new Blob()), @@ -963,7 +963,6 @@ class CuDNNPoolingLayerTest : public ::testing::Test { TYPED_TEST_CASE(CuDNNPoolingLayerTest, TestDtypes); TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -977,7 +976,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -994,7 +992,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { /* TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; layer_param.set_kernelsize(3); layer_param.set_stride(2); @@ -1020,7 +1017,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxCuDNN) { - Caffe::set_mode(Caffe::GPU); this->TestForwardSquare(); this->TestForwardRectHigh(); this->TestForwardRectWide(); @@ -1030,7 +1026,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxCuDNN) { // the corresponding backward test. /* TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxTopMaskCuDNN) { - Caffe::set_mode(Caffe::GPU); this->blob_top_vec_.push_back(this->blob_top_mask_); this->TestForwardSquare(); this->TestForwardRectHigh(); @@ -1039,7 +1034,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxTopMaskCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { - Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1059,7 +1053,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -1105,7 +1098,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { /* TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { - Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1126,7 +1118,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -1152,7 +1143,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { - Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1170,7 +1160,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAvePaddedCuDNN) { - Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; diff --git a/src/caffe/test/test_softmax_layer.cpp b/src/caffe/test/test_softmax_layer.cpp index f6674422e56..996da4b8f7c 100644 --- a/src/caffe/test/test_softmax_layer.cpp +++ b/src/caffe/test/test_softmax_layer.cpp @@ -82,7 +82,7 @@ TYPED_TEST(SoftmaxLayerTest, TestGradient) { #ifdef USE_CUDNN template -class CuDNNSoftmaxLayerTest : public ::testing::Test { +class CuDNNSoftmaxLayerTest : public GPUDeviceTest { protected: CuDNNSoftmaxLayerTest() : blob_bottom_(new Blob(2, 10, 2, 3)), @@ -104,7 +104,6 @@ class CuDNNSoftmaxLayerTest : public ::testing::Test { TYPED_TEST_CASE(CuDNNSoftmaxLayerTest, TestDtypes); TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSoftmaxLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -138,7 +137,6 @@ TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { } TYPED_TEST(CuDNNSoftmaxLayerTest, TestGradientCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSoftmaxLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); diff --git a/src/caffe/test/test_stochastic_pooling.cpp b/src/caffe/test/test_stochastic_pooling.cpp index 12962c65d85..f84464c322c 100644 --- a/src/caffe/test/test_stochastic_pooling.cpp +++ b/src/caffe/test/test_stochastic_pooling.cpp @@ -16,8 +16,10 @@ using std::min; namespace caffe { -template -class StochasticPoolingLayerTest : public ::testing::Test { +template +class StochasticPoolingLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + protected: StochasticPoolingLayerTest() : blob_bottom_(new Blob()), @@ -45,9 +47,14 @@ class StochasticPoolingLayerTest : public ::testing::Test { vector*> blob_top_vec_; }; -TYPED_TEST_CASE(StochasticPoolingLayerTest, TestDtypes); +template +class CPUStochasticPoolingLayerTest + : public StochasticPoolingLayerTest > { +}; + +TYPED_TEST_CASE(CPUStochasticPoolingLayerTest, TestDtypes); -TYPED_TEST(StochasticPoolingLayerTest, TestSetup) { +TYPED_TEST(CPUStochasticPoolingLayerTest, TestSetup) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -60,8 +67,16 @@ TYPED_TEST(StochasticPoolingLayerTest, TestSetup) { EXPECT_EQ(this->blob_top_->width(), 2); } -TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPU) { - Caffe::set_mode(Caffe::GPU); +#ifndef CPU_ONLY + +template +class GPUStochasticPoolingLayerTest + : public StochasticPoolingLayerTest > { +}; + +TYPED_TEST_CASE(GPUStochasticPoolingLayerTest, TestDtypes); + +TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochastic) { LayerParameter layer_param; layer_param.set_phase(TRAIN); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); @@ -104,8 +119,7 @@ TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPU) { EXPECT_GE(total / this->blob_top_->count(), 0.55); } -TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPUTestPhase) { - Caffe::set_mode(Caffe::GPU); +TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochasticTestPhase) { LayerParameter layer_param; layer_param.set_phase(TEST); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); @@ -142,8 +156,7 @@ TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPUTestPhase) { } } -TYPED_TEST(StochasticPoolingLayerTest, TestGradientGPU) { - Caffe::set_mode(Caffe::GPU); +TYPED_TEST(GPUStochasticPoolingLayerTest, TestGradient) { LayerParameter layer_param; layer_param.set_phase(TRAIN); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); @@ -158,6 +171,6 @@ TYPED_TEST(StochasticPoolingLayerTest, TestGradientGPU) { this->blob_top_vec_); } - +#endif } // namespace caffe diff --git a/src/caffe/test/test_triplet_loss_layer.cpp b/src/caffe/test/test_triplet_loss_layer.cpp deleted file mode 100644 index c8d9377fa23..00000000000 --- a/src/caffe/test/test_triplet_loss_layer.cpp +++ /dev/null @@ -1,107 +0,0 @@ -#include -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -#include "caffe/filler.hpp" -#include "caffe/vision_layers.hpp" - -#include "caffe/test/test_caffe_main.hpp" -#include "caffe/test/test_gradient_check_util.hpp" - -namespace caffe { - -template -class TripletLossLayerTest : public MultiDeviceTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - TripletLossLayerTest() - : blob_bottom_data_i_(new Blob(128, 10, 1, 1)), - blob_bottom_data_j_(new Blob(128, 10, 1, 1)), - blob_bottom_data_k_(new Blob(128, 10, 1, 1)), - blob_bottom_y_(new Blob(128, 1, 1, 1)), - blob_top_loss_(new Blob()) { - // fill the values - FillerParameter filler_param; - filler_param.set_mean(0.0); - filler_param.set_std(0.3); // distances~=1.0 to test both sides of margin - GaussianFiller filler(filler_param); - filler.Fill(this->blob_bottom_data_i_); - blob_bottom_vec_.push_back(blob_bottom_data_i_); - filler.Fill(this->blob_bottom_data_j_); - blob_bottom_vec_.push_back(blob_bottom_data_j_); - filler.Fill(this->blob_bottom_data_k_); - blob_bottom_vec_.push_back(blob_bottom_data_k_); - for (int i = 0; i < blob_bottom_y_->count(); ++i) { - blob_bottom_y_->mutable_cpu_data()[i] = caffe_rng_rand() % 2; // 0 or 1 - } - blob_bottom_vec_.push_back(blob_bottom_y_); - blob_top_vec_.push_back(blob_top_loss_); - } - virtual ~TripletLossLayerTest() { - delete blob_bottom_data_i_; - delete blob_bottom_data_j_; - delete blob_bottom_data_k_; - delete blob_bottom_y_; - delete blob_top_loss_; - } - - Blob* const blob_bottom_data_i_; - Blob* const blob_bottom_data_j_; - Blob* const blob_bottom_data_k_; - Blob* const blob_bottom_y_; - Blob* const blob_top_loss_; - vector*> blob_bottom_vec_; - vector*> blob_top_vec_; -}; - -TYPED_TEST_CASE(TripletLossLayerTest, TestDtypesAndDevices); - -TYPED_TEST(TripletLossLayerTest, TestForward) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // manually compute to compare - const Dtype margin = layer_param.contrastive_loss_param().margin(); - const int num = this->blob_bottom_data_i_->num(); - const int channels = this->blob_bottom_data_i_->channels(); - Dtype loss(0); - for (int i = 0; i < num; ++i) { - Dtype dist_sq(0); - for (int j = 0; j < channels; ++j) { - Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; - dist_sq += diff*diff; - } - if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs - loss += dist_sq; - } else { - loss += std::max(margin-dist_sq, Dtype(0)); - } - } - loss /= static_cast(num) * Dtype(2); - EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); -} - -TYPED_TEST(TripletLossLayerTest, TestGradient) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - GradientChecker checker(1e-2, 1e-2, 1701); - // check the gradient for the first two bottom layers - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 0); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 1); -} - -} // namespace caffe diff --git a/src/caffe/util/db.cpp b/src/caffe/util/db.cpp index 7f7018107ec..f55420e9840 100644 --- a/src/caffe/util/db.cpp +++ b/src/caffe/util/db.cpp @@ -1,64 +1,11 @@ #include "caffe/util/db.hpp" +#include "caffe/util/db_leveldb.hpp" +#include "caffe/util/db_lmdb.hpp" -#include #include namespace caffe { namespace db { -const size_t LMDB_MAP_SIZE = 1099511627776; // 1 TB - -void LevelDB::Open(const string& source, Mode mode) { - leveldb::Options options; - options.block_size = 65536; - options.write_buffer_size = 268435456; - options.max_open_files = 100; - options.error_if_exists = mode == NEW; - options.create_if_missing = mode != READ; - leveldb::Status status = leveldb::DB::Open(options, source, &db_); - CHECK(status.ok()) << "Failed to open leveldb " << source - << std::endl << status.ToString(); - LOG(INFO) << "Opened leveldb " << source; -} - -void LMDB::Open(const string& source, Mode mode) { - MDB_CHECK(mdb_env_create(&mdb_env_)); - MDB_CHECK(mdb_env_set_mapsize(mdb_env_, LMDB_MAP_SIZE)); - if (mode == NEW) { - CHECK_EQ(mkdir(source.c_str(), 0744), 0) << "mkdir " << source << "failed"; - } - int flags = 0; - if (mode == READ) { - flags = MDB_RDONLY | MDB_NOTLS; - } - MDB_CHECK(mdb_env_open(mdb_env_, source.c_str(), flags, 0664)); - LOG(INFO) << "Opened lmdb " << source; -} - -LMDBCursor* LMDB::NewCursor() { - MDB_txn* mdb_txn; - MDB_cursor* mdb_cursor; - MDB_CHECK(mdb_txn_begin(mdb_env_, NULL, MDB_RDONLY, &mdb_txn)); - MDB_CHECK(mdb_dbi_open(mdb_txn, NULL, 0, &mdb_dbi_)); - MDB_CHECK(mdb_cursor_open(mdb_txn, mdb_dbi_, &mdb_cursor)); - return new LMDBCursor(mdb_txn, mdb_cursor); -} - -LMDBTransaction* LMDB::NewTransaction() { - MDB_txn* mdb_txn; - MDB_CHECK(mdb_txn_begin(mdb_env_, NULL, 0, &mdb_txn)); - MDB_CHECK(mdb_dbi_open(mdb_txn, NULL, 0, &mdb_dbi_)); - return new LMDBTransaction(&mdb_dbi_, mdb_txn); -} - -void LMDBTransaction::Put(const string& key, const string& value) { - MDB_val mdb_key, mdb_value; - mdb_key.mv_data = const_cast(key.data()); - mdb_key.mv_size = key.size(); - mdb_value.mv_data = const_cast(value.data()); - mdb_value.mv_size = value.size(); - MDB_CHECK(mdb_put(mdb_txn_, *mdb_dbi_, &mdb_key, &mdb_value, 0)); -} - DB* GetDB(DataParameter::DB backend) { switch (backend) { case DataParameter_DB_LEVELDB: diff --git a/src/caffe/util/db_leveldb.cpp b/src/caffe/util/db_leveldb.cpp new file mode 100644 index 00000000000..06c46627d31 --- /dev/null +++ b/src/caffe/util/db_leveldb.cpp @@ -0,0 +1,21 @@ +#include "caffe/util/db_leveldb.hpp" + +#include + +namespace caffe { namespace db { + +void LevelDB::Open(const string& source, Mode mode) { + leveldb::Options options; + options.block_size = 65536; + options.write_buffer_size = 268435456; + options.max_open_files = 100; + options.error_if_exists = mode == NEW; + options.create_if_missing = mode != READ; + leveldb::Status status = leveldb::DB::Open(options, source, &db_); + CHECK(status.ok()) << "Failed to open leveldb " << source + << std::endl << status.ToString(); + LOG(INFO) << "Opened leveldb " << source; +} + +} // namespace db +} // namespace caffe diff --git a/src/caffe/util/db_lmdb.cpp b/src/caffe/util/db_lmdb.cpp new file mode 100644 index 00000000000..a054b796806 --- /dev/null +++ b/src/caffe/util/db_lmdb.cpp @@ -0,0 +1,51 @@ +#include "caffe/util/db_lmdb.hpp" + +#include + +#include + +namespace caffe { namespace db { + +const size_t LMDB_MAP_SIZE = 1099511627776; // 1 TB + +void LMDB::Open(const string& source, Mode mode) { + MDB_CHECK(mdb_env_create(&mdb_env_)); + MDB_CHECK(mdb_env_set_mapsize(mdb_env_, LMDB_MAP_SIZE)); + if (mode == NEW) { + CHECK_EQ(mkdir(source.c_str(), 0744), 0) << "mkdir " << source << "failed"; + } + int flags = 0; + if (mode == READ) { + flags = MDB_RDONLY | MDB_NOTLS; + } + MDB_CHECK(mdb_env_open(mdb_env_, source.c_str(), flags, 0664)); + LOG(INFO) << "Opened lmdb " << source; +} + +LMDBCursor* LMDB::NewCursor() { + MDB_txn* mdb_txn; + MDB_cursor* mdb_cursor; + MDB_CHECK(mdb_txn_begin(mdb_env_, NULL, MDB_RDONLY, &mdb_txn)); + MDB_CHECK(mdb_dbi_open(mdb_txn, NULL, 0, &mdb_dbi_)); + MDB_CHECK(mdb_cursor_open(mdb_txn, mdb_dbi_, &mdb_cursor)); + return new LMDBCursor(mdb_txn, mdb_cursor); +} + +LMDBTransaction* LMDB::NewTransaction() { + MDB_txn* mdb_txn; + MDB_CHECK(mdb_txn_begin(mdb_env_, NULL, 0, &mdb_txn)); + MDB_CHECK(mdb_dbi_open(mdb_txn, NULL, 0, &mdb_dbi_)); + return new LMDBTransaction(&mdb_dbi_, mdb_txn); +} + +void LMDBTransaction::Put(const string& key, const string& value) { + MDB_val mdb_key, mdb_value; + mdb_key.mv_data = const_cast(key.data()); + mdb_key.mv_size = key.size(); + mdb_value.mv_data = const_cast(value.data()); + mdb_value.mv_size = value.size(); + MDB_CHECK(mdb_put(mdb_txn_, *mdb_dbi_, &mdb_key, &mdb_value, 0)); +} + +} // namespace db +} // namespace caffe diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index 13e17be582b..0aab6b17b85 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -206,6 +206,16 @@ void caffe_exp(const int n, const double* a, double* y) { vdExp(n, a, y); } +template <> +void caffe_log(const int n, const float* a, float* y) { + vsLn(n, a, y); +} + +template <> +void caffe_log(const int n, const double* a, double* y) { + vdLn(n, a, y); +} + template <> void caffe_abs(const int n, const float* a, float* y) { vsAbs(n, a, y); diff --git a/src/caffe/util/math_functions.cu b/src/caffe/util/math_functions.cu index 43e65eb9a69..2631a0740d6 100644 --- a/src/caffe/util/math_functions.cu +++ b/src/caffe/util/math_functions.cu @@ -324,6 +324,27 @@ void caffe_gpu_exp(const int N, const double* a, double* y) { N, a, y); } +template +__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = log(a[index]); + } +} + +template <> +void caffe_gpu_log(const int N, const float* a, float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + log_kernel<<>>( + N, a, y); +} + +template <> +void caffe_gpu_log(const int N, const double* a, double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + log_kernel<<>>( + N, a, y); +} + template __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { From 00f25376cda4866a2fde448efea2e27cd51745a0 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Tue, 30 Jun 2015 21:29:31 +0800 Subject: [PATCH 20/82] lfw test added in examples of triplet --- examples/siamese/convert_lfw_siamese_data.cpp | 123 +++++ examples/siamese/create_lfw_siamese.sh | 21 + examples/siamese/lfw_siamese.prototxt | 113 ++++ examples/siamese/lfw_siamese_solver.prototxt | 25 + .../siamese/lfw_siamese_train_test.prototxt | 349 ++++++++++++ examples/siamese/train_lfw_siamese.sh | 5 + examples/triplet/convert_lfw_triplet_data.cpp | 128 +++++ examples/triplet/create_lfw_triplet.sh | 21 + examples/triplet/lfw_triplet.prototxt | 113 ++++ examples/triplet/lfw_triplet_solver.prototxt | 25 + .../triplet/lfw_triplet_train_test.prototxt | 500 ++++++++++++++++++ examples/triplet/train_lfw_triplet.sh | 5 + 12 files changed, 1428 insertions(+) create mode 100644 examples/siamese/convert_lfw_siamese_data.cpp create mode 100755 examples/siamese/create_lfw_siamese.sh create mode 100644 examples/siamese/lfw_siamese.prototxt create mode 100644 examples/siamese/lfw_siamese_solver.prototxt create mode 100644 examples/siamese/lfw_siamese_train_test.prototxt create mode 100755 examples/siamese/train_lfw_siamese.sh create mode 100644 examples/triplet/convert_lfw_triplet_data.cpp create mode 100755 examples/triplet/create_lfw_triplet.sh create mode 100644 examples/triplet/lfw_triplet.prototxt create mode 100644 examples/triplet/lfw_triplet_solver.prototxt create mode 100644 examples/triplet/lfw_triplet_train_test.prototxt create mode 100755 examples/triplet/train_lfw_triplet.sh diff --git a/examples/siamese/convert_lfw_siamese_data.cpp b/examples/siamese/convert_lfw_siamese_data.cpp new file mode 100644 index 00000000000..d6b5e59a002 --- /dev/null +++ b/examples/siamese/convert_lfw_siamese_data.cpp @@ -0,0 +1,123 @@ +// +// This script converts the MNIST dataset to the leveldb format used +// by caffe to train siamese network. +// Usage: +// convert_mnist_data input_image_file input_label_file output_db_file +// The MNIST dataset could be downloaded at +// http://yann.lecun.com/exdb/mnist/ +#include // NOLINT(readability/streams) +#include + +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "stdint.h" + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" + +uint32_t swap_endian(uint32_t val) { + val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); + return (val << 16) | (val >> 16); +} + +void read_image(std::ifstream* image_file, std::ifstream* label_file, + uint32_t index, uint32_t rows, uint32_t cols, + char* pixels, char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index + 8); + label_file->read(label, 1); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename) { + // Open files + std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); + std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); + CHECK(image_file) << "Unable to open file " << image_filename; + CHECK(label_file) << "Unable to open file " << label_filename; + // Read the magic and the meta data + uint32_t magic; + uint32_t num_items; + uint32_t num_labels; + uint32_t rows; + uint32_t cols; + + image_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + //CHECK_EQ(magic, 2051) << "Incorrect image file magic."; + label_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + //CHECK_EQ(magic, 2049) << "Incorrect label file magic."; + image_file.read(reinterpret_cast(&num_items), 4); + num_items = swap_endian(num_items); + label_file.read(reinterpret_cast(&num_labels), 4); + num_labels = swap_endian(num_labels); + CHECK_EQ(num_items, num_labels); + image_file.read(reinterpret_cast(&rows), 4); + rows = swap_endian(rows); + image_file.read(reinterpret_cast(&cols), 4); + cols = swap_endian(cols); + + // Open leveldb + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = true; + options.error_if_exists = true; + leveldb::Status status = leveldb::DB::Open( + options, db_filename, &db); + CHECK(status.ok()) << "Failed to open leveldb " << db_filename + << ". Is it already existing?"; + + char label_i; + char label_j; + char* pixels = new char[2 * rows * cols]; + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + + caffe::Datum datum; + datum.set_channels(2); // one channel for each image in the pair + datum.set_height(rows); + datum.set_width(cols); + LOG(INFO) << "A total of " << num_items << " items."; + LOG(INFO) << "Rows: " << rows << " Cols: " << cols; + for (int itemid = 0; itemid < num_items; ++itemid) { + int i = caffe::caffe_rng_rand() % num_items; // pick a random pair + int j = caffe::caffe_rng_rand() % num_items; + read_image(&image_file, &label_file, i, rows, cols, + pixels, &label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels + (rows * cols), &label_j); + datum.set_data(pixels, 2*rows*cols); + if (label_i == label_j) { + datum.set_label(1); + } else { + datum.set_label(0); + } + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); + } + + delete db; + delete pixels; +} + +int main(int argc, char** argv) { + if (argc != 4) { + printf("This script converts the MNIST dataset to the leveldb format used\n" + "by caffe to train a siamese network.\n" + "Usage:\n" + " convert_mnist_data input_image_file input_label_file " + "output_db_file\n" + "The MNIST dataset could be downloaded at\n" + " http://yann.lecun.com/exdb/mnist/\n" + "You should gunzip them after downloading.\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3]); + } + return 0; +} diff --git a/examples/siamese/create_lfw_siamese.sh b/examples/siamese/create_lfw_siamese.sh new file mode 100755 index 00000000000..3790b9eb2a0 --- /dev/null +++ b/examples/siamese/create_lfw_siamese.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env sh +# This script converts the mnist data into leveldb format. + +EXAMPLES=./build/examples/siamese +DATA=./data/lfw + +echo "Creating leveldb..." + +rm -rf ./examples/siamese/lfw_siamese_train_leveldb +rm -rf ./examples/siamese/lfw_siamese_test_leveldb + +$EXAMPLES/convert_lfw_siamese_data.bin \ + $DATA/traindata \ + $DATA/trainlabel \ + ./examples/siamese/lfw_siamese_train_leveldb +$EXAMPLES/convert_mnist_siamese_data.bin \ + $DATA/testdata \ + $DATA/testlabel \ + ./examples/siamese/lfw_siamese_test_leveldb + +echo "Done." diff --git a/examples/siamese/lfw_siamese.prototxt b/examples/siamese/lfw_siamese.prototxt new file mode 100644 index 00000000000..106d9aa76f4 --- /dev/null +++ b/examples/siamese/lfw_siamese.prototxt @@ -0,0 +1,113 @@ +name: "lfw_siamese" +input: "data" +input_dim: 10000 +input_dim: 1 +input_dim: 150 +input_dim: 80 +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 500 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 10 + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 2 + } +} diff --git a/examples/siamese/lfw_siamese_solver.prototxt b/examples/siamese/lfw_siamese_solver.prototxt new file mode 100644 index 00000000000..2aaafb63c1f --- /dev/null +++ b/examples/siamese/lfw_siamese_solver.prototxt @@ -0,0 +1,25 @@ +# The train/test net protocol buffer definition +net: "examples/siamese/lfw_siamese_train_test.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of MNIST, we have test batch size 100 and 100 test iterations, +# covering the full 10,000 testing images. +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 500 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.01 +momentum: 0.9 +weight_decay: 0.0000 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +max_iter: 50000 +# snapshot intermediate results +snapshot: 5000 +snapshot_prefix: "examples/siamese/lfw_siamese" +# solver mode: CPU or GPU +solver_mode: CPU diff --git a/examples/siamese/lfw_siamese_train_test.prototxt b/examples/siamese/lfw_siamese_train_test.prototxt new file mode 100644 index 00000000000..049187bf3d4 --- /dev/null +++ b/examples/siamese/lfw_siamese_train_test.prototxt @@ -0,0 +1,349 @@ +name: "lfw_siamese_train_test" +layer { + name: "pair_data" + type: "Data" + top: "pair_data" + top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/siamese/lfw_siamese_train_leveldb" + batch_size: 64 + } +} +layer { + name: "pair_data" + type: "Data" + top: "pair_data" + top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/siamese/lfw_siamese_test_leveldb" + batch_size: 100 + } +} +layer { + name: "slice_pair" + type: "Slice" + bottom: "pair_data" + top: "data" + top: "data_p" + slice_param { + slice_dim: 1 + slice_point: 1 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_p" + type: "Convolution" + bottom: "data_p" + top: "conv1_p" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_p" + type: "Pooling" + bottom: "conv1_p" + top: "pool1_p" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_p" + type: "Convolution" + bottom: "pool1_p" + top: "conv2_p" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_p" + type: "Pooling" + bottom: "conv2_p" + top: "pool2_p" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_p" + type: "InnerProduct" + bottom: "pool2_p" + top: "ip1_p" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_p" + type: "ReLU" + bottom: "ip1_p" + top: "ip1_p" +} +layer { + name: "ip2_p" + type: "InnerProduct" + bottom: "ip1_p" + top: "ip2_p" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_p" + type: "InnerProduct" + bottom: "ip2_p" + top: "feat_p" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "loss" + type: "ContrastiveLoss" + bottom: "feat" + bottom: "feat_p" + bottom: "sim" + top: "loss" + contrastive_loss_param { + margin: 1 + } +} diff --git a/examples/siamese/train_lfw_siamese.sh b/examples/siamese/train_lfw_siamese.sh new file mode 100755 index 00000000000..0a879a65419 --- /dev/null +++ b/examples/siamese/train_lfw_siamese.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env sh + +TOOLS=./build/tools + +$TOOLS/caffe train --solver=examples/siamese/lfw_siamese_solver.prototxt diff --git a/examples/triplet/convert_lfw_triplet_data.cpp b/examples/triplet/convert_lfw_triplet_data.cpp new file mode 100644 index 00000000000..5efb3090a43 --- /dev/null +++ b/examples/triplet/convert_lfw_triplet_data.cpp @@ -0,0 +1,128 @@ +// This script converts the MNIST dataset to the leveldb format used +// by caffe to train siamese network. +// Usage: +// convert_mnist_data input_image_file input_label_file output_db_file +// The MNIST dataset could be downloaded at +// http://yann.lecun.com/exdb/mnist/ +#include // NOLINT(readability/streams) +#include + +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "stdint.h" + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" + +uint32_t swap_endian(uint32_t val) { + val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); + return (val << 16) | (val >> 16); +} + +void read_image(std::ifstream* image_file, std::ifstream* label_file, + uint32_t index, uint32_t rows, uint32_t cols, + char* pixels, char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index + 8); + label_file->read(label, 1); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename) { + // Open files + std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); + std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); + CHECK(image_file) << "Unable to open file " << image_filename; + CHECK(label_file) << "Unable to open file " << label_filename; + // Read the magic and the meta data + uint32_t magic; + uint32_t num_items; + uint32_t num_labels; + uint32_t rows; + uint32_t cols; + + image_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + //CHECK_EQ(magic, 2051) << "Incorrect image file magic."; + label_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + //CHECK_EQ(magic, 2049) << "Incorrect label file magic."; + image_file.read(reinterpret_cast(&num_items), 4); + num_items = swap_endian(num_items); + label_file.read(reinterpret_cast(&num_labels), 4); + num_labels = swap_endian(num_labels); + CHECK_EQ(num_items, num_labels); + image_file.read(reinterpret_cast(&rows), 4); + rows = swap_endian(rows); + image_file.read(reinterpret_cast(&cols), 4); + cols = swap_endian(cols); + + // Open leveldb + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = true; + options.error_if_exists = true; + leveldb::Status status = leveldb::DB::Open( + options, db_filename, &db); + CHECK(status.ok()) << "Failed to open leveldb " << db_filename + << ". Is it already existing?"; + + char label_i; + char label_j; + char label_k; + char* pixels = new char[3 * rows * cols]; + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + + caffe::Datum datum; + datum.set_channels(3); // one channel for each image in the pair + datum.set_height(rows); + datum.set_width(cols); + LOG(INFO) << "A total of " << num_items << " items."; + LOG(INFO) << "Rows: " << rows << " Cols: " << cols; + for (int itemid = 0; itemid < num_items; ++itemid) { + int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups + int j = caffe::caffe_rng_rand() % num_items; + int k = caffe::caffe_rng_rand() % num_items; + read_image(&image_file, &label_file, i, rows, cols, + pixels, &label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels + (rows * cols), &label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels + (2 * rows * cols), &label_k); + + datum.set_data(pixels, 3*rows*cols); + if (label_i == label_j && label_i != label_k) { + datum.set_label(1); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); + } else { + itemid--; + datum.set_label(0); + } + } + + delete db; + delete pixels; +} + +int main(int argc, char** argv) { + if (argc != 4) { + printf("This script converts the MNIST dataset to the leveldb format used\n" + "by caffe to train a siamese network.\n" + "Usage:\n" + " convert_mnist_data input_image_file input_label_file " + "output_db_file\n" + "The MNIST dataset could be downloaded at\n" + " http://yann.lecun.com/exdb/mnist/\n" + "You should gunzip them after downloading.\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3]); + } + return 0; +} diff --git a/examples/triplet/create_lfw_triplet.sh b/examples/triplet/create_lfw_triplet.sh new file mode 100755 index 00000000000..382a9021f10 --- /dev/null +++ b/examples/triplet/create_lfw_triplet.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env sh +# This script converts the lfw data into leveldb format. + +EXAMPLES=./build/examples/triplet +DATA=./data/lfw + +echo "Creating leveldb..." + +rm -rf ./examples/triplet/lfw_triplet_train_leveldb +rm -rf ./examples/triplet/lfw_triplet_test_leveldb + +$EXAMPLES/convert_lfw_triplet_data.bin \ + $DATA/traindata \ + $DATA/trainlabel \ + ./examples/triplet/lfw_triplet_train_leveldb +$EXAMPLES/convert_lfw_triplet_data.bin \ + $DATA/testdata \ + $DATA/testlabel \ + ./examples/triplet/lfw_triplet_test_leveldb + +echo "Done." diff --git a/examples/triplet/lfw_triplet.prototxt b/examples/triplet/lfw_triplet.prototxt new file mode 100644 index 00000000000..9537d1feb8b --- /dev/null +++ b/examples/triplet/lfw_triplet.prototxt @@ -0,0 +1,113 @@ +name: "lfw_siamese" +input: "data" +input_dim: 10000 +input_dim: 1 +input_dim: 150 +input_dim: 130 +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 500 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 10 + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 2 + } +} diff --git a/examples/triplet/lfw_triplet_solver.prototxt b/examples/triplet/lfw_triplet_solver.prototxt new file mode 100644 index 00000000000..eb4c2c369e9 --- /dev/null +++ b/examples/triplet/lfw_triplet_solver.prototxt @@ -0,0 +1,25 @@ +# The train/test net protocol buffer definition +net: "examples/triplet/lfw_triplet_train_test.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of lfw, we have test batch size 100 and 100 test iterations, +# covering the full 10,000 testing images. +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 500 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.01 +momentum: 0.9 +weight_decay: 0.0000 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +max_iter: 50000 +# snapshot intermediate results +snapshot: 5000 +snapshot_prefix: "examples/triplet/lfw_triplet" +# solver mode: CPU or GPU +solver_mode: CPU diff --git a/examples/triplet/lfw_triplet_train_test.prototxt b/examples/triplet/lfw_triplet_train_test.prototxt new file mode 100644 index 00000000000..59ef26e90a4 --- /dev/null +++ b/examples/triplet/lfw_triplet_train_test.prototxt @@ -0,0 +1,500 @@ +name: "lfw_triplet_train_test" +layer { + name: "triplet_data" + type: "Data" + top: "triplet_data" + top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/lfw_triplet_train_leveldb" + batch_size: 64 + } +} +layer { + name: "triplet_data" + type: "Data" + top: "triplet_data" + top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/lfw_triplet_test_leveldb" + batch_size: 100 + } +} +layer { + name: "slice_triplet" + type: "Slice" + bottom: "triplet_data" + top: "data" + top: "data_true" + top: "data_false" + slice_param { + slice_dim: 1 + slice_point: 1 + slice_point: 2 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_true" + type: "Convolution" + bottom: "data_true" + top: "conv1_true" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_true" + type: "Pooling" + bottom: "conv1_true" + top: "pool1_true" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_true" + type: "Convolution" + bottom: "pool1_true" + top: "conv2_true" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_true" + type: "Pooling" + bottom: "conv2_true" + top: "pool2_true" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_true" + type: "InnerProduct" + bottom: "pool2_true" + top: "ip1_true" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_true" + type: "ReLU" + bottom: "ip1_true" + top: "ip1_true" +} +layer { + name: "ip2_true" + type: "InnerProduct" + bottom: "ip1_true" + top: "ip2_true" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_true" + type: "InnerProduct" + bottom: "ip2_true" + top: "feat_true" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_false" + type: "Convolution" + bottom: "data_false" + top: "conv1_false" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_false" + type: "Pooling" + bottom: "conv1_false" + top: "pool1_false" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_false" + type: "Convolution" + bottom: "pool1_false" + top: "conv2_false" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_false" + type: "Pooling" + bottom: "conv2_false" + top: "pool2_false" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_false" + type: "InnerProduct" + bottom: "pool2_false" + top: "ip1_false" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_false" + type: "ReLU" + bottom: "ip1_false" + top: "ip1_false" +} +layer { + name: "ip2_false" + type: "InnerProduct" + bottom: "ip1_false" + top: "ip2_false" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_false" + type: "InnerProduct" + bottom: "ip2_false" + top: "feat_false" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "loss" + type: "TripletLoss" + bottom: "feat" + bottom: "feat_true" + bottom: "feat_false" + bottom: "sim" + top: "loss" + triplet_loss_param { + margin: 0.2 + } +} + diff --git a/examples/triplet/train_lfw_triplet.sh b/examples/triplet/train_lfw_triplet.sh new file mode 100755 index 00000000000..076738a5e63 --- /dev/null +++ b/examples/triplet/train_lfw_triplet.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env sh + +TOOLS=./build/tools + +$TOOLS/caffe train --solver=examples/triplet/lfw_triplet_solver.prototxt From 455c96ba3f44233c82e272e3786bf76ab168e74f Mon Sep 17 00:00:00 2001 From: Wangyida Date: Thu, 2 Jul 2015 12:35:16 +0800 Subject: [PATCH 21/82] delete unuseful codes --- examples/siamese/convert_lfw_siamese_data.cpp | 2 -- examples/triplet/convert_lfw_triplet_data.cpp | 2 -- 2 files changed, 4 deletions(-) diff --git a/examples/siamese/convert_lfw_siamese_data.cpp b/examples/siamese/convert_lfw_siamese_data.cpp index d6b5e59a002..fe134ca9b4e 100644 --- a/examples/siamese/convert_lfw_siamese_data.cpp +++ b/examples/siamese/convert_lfw_siamese_data.cpp @@ -46,10 +46,8 @@ void convert_dataset(const char* image_filename, const char* label_filename, image_file.read(reinterpret_cast(&magic), 4); magic = swap_endian(magic); - //CHECK_EQ(magic, 2051) << "Incorrect image file magic."; label_file.read(reinterpret_cast(&magic), 4); magic = swap_endian(magic); - //CHECK_EQ(magic, 2049) << "Incorrect label file magic."; image_file.read(reinterpret_cast(&num_items), 4); num_items = swap_endian(num_items); label_file.read(reinterpret_cast(&num_labels), 4); diff --git a/examples/triplet/convert_lfw_triplet_data.cpp b/examples/triplet/convert_lfw_triplet_data.cpp index 5efb3090a43..9f65fab76b4 100644 --- a/examples/triplet/convert_lfw_triplet_data.cpp +++ b/examples/triplet/convert_lfw_triplet_data.cpp @@ -45,10 +45,8 @@ void convert_dataset(const char* image_filename, const char* label_filename, image_file.read(reinterpret_cast(&magic), 4); magic = swap_endian(magic); - //CHECK_EQ(magic, 2051) << "Incorrect image file magic."; label_file.read(reinterpret_cast(&magic), 4); magic = swap_endian(magic); - //CHECK_EQ(magic, 2049) << "Incorrect label file magic."; image_file.read(reinterpret_cast(&num_items), 4); num_items = swap_endian(num_items); label_file.read(reinterpret_cast(&num_labels), 4); From 15e09ce7b7288f89334a9a7c86937589ca064cfe Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 6 Jul 2015 14:49:43 +0800 Subject: [PATCH 22/82] Pair wise part combined with Triplrt training --- .../triplet/convert_mnist_triplet_data.cpp | 20 +- .../triplet/mnist_triplet_train_test.prototxt | 300 ++++++++++++++++++ include/caffe/loss_layers.hpp | 7 +- src/caffe/layers/triplet_loss_layer.cpp | 61 +++- 4 files changed, 370 insertions(+), 18 deletions(-) diff --git a/examples/triplet/convert_mnist_triplet_data.cpp b/examples/triplet/convert_mnist_triplet_data.cpp index 0cbab642b7c..66fb9abaf14 100644 --- a/examples/triplet/convert_mnist_triplet_data.cpp +++ b/examples/triplet/convert_mnist_triplet_data.cpp @@ -69,16 +69,18 @@ void convert_dataset(const char* image_filename, const char* label_filename, CHECK(status.ok()) << "Failed to open leveldb " << db_filename << ". Is it already existing?"; - char label_i; + char label_i; // label for triplet char label_j; char label_k; - char* pixels = new char[3 * rows * cols]; + char label_l; // label for pair wise + char label_m; + char* pixels = new char[5 * rows * cols]; const int kMaxKeyLength = 10; char key[kMaxKeyLength]; std::string value; caffe::Datum datum; - datum.set_channels(3); // one channel for each image in the pair + datum.set_channels(5); // one channel for each image in the triplet and pair datum.set_height(rows); datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; @@ -87,15 +89,21 @@ void convert_dataset(const char* image_filename, const char* label_filename, int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups int j = caffe::caffe_rng_rand() % num_items; int k = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, + int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups + int m = caffe::caffe_rng_rand() % num_items; + read_image(&image_file, &label_file, i, rows, cols, // read triplet groups pixels, &label_i); read_image(&image_file, &label_file, j, rows, cols, pixels + (rows * cols), &label_j); read_image(&image_file, &label_file, k, rows, cols, pixels + (2 * rows * cols), &label_k); + read_image(&image_file, &label_file, l, rows, cols, // read pair wise groups + pixels + (3 * rows * cols), &label_l); + read_image(&image_file, &label_file, m, rows, cols, + pixels + (4 * rows * cols), &label_m); - datum.set_data(pixels, 3*rows*cols); - if (label_i == label_j && label_i != label_k) { + datum.set_data(pixels, 5*rows*cols); // set data + if ((label_i == label_j && label_i != label_k) && (label_l == label_m)) { datum.set_label(1); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", itemid); diff --git a/examples/triplet/mnist_triplet_train_test.prototxt b/examples/triplet/mnist_triplet_train_test.prototxt index 12b8e1072fd..c86ef933045 100644 --- a/examples/triplet/mnist_triplet_train_test.prototxt +++ b/examples/triplet/mnist_triplet_train_test.prototxt @@ -38,10 +38,14 @@ layer { top: "data" top: "data_true" top: "data_false" + top: "data_p1" + top: "data_p2" slice_param { slice_dim: 1 slice_point: 1 slice_point: 2 + slice_point: 3 + slice_point: 4 } } layer { @@ -485,12 +489,308 @@ layer { } } } +layer { + name: "conv1_p1" + type: "Convolution" + bottom: "data_p1" + top: "conv1_p1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_p1" + type: "Pooling" + bottom: "conv1_p1" + top: "pool1_p1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_p1" + type: "Convolution" + bottom: "pool1_p1" + top: "conv2_p1" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_p1" + type: "Pooling" + bottom: "conv2_p1" + top: "pool2_p1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_p1" + type: "InnerProduct" + bottom: "pool2_p1" + top: "ip1_p1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_p1" + type: "ReLU" + bottom: "ip1_p1" + top: "ip1_p1" +} +layer { + name: "ip2_p1" + type: "InnerProduct" + bottom: "ip1_p1" + top: "ip2_p1" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_p1" + type: "InnerProduct" + bottom: "ip2_p1" + top: "feat_p1" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_p2" + type: "Convolution" + bottom: "data_p2" + top: "conv1_p2" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_p2" + type: "Pooling" + bottom: "conv1_p2" + top: "pool1_p2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_p2" + type: "Convolution" + bottom: "pool1_p2" + top: "conv2_p2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_p2" + type: "Pooling" + bottom: "conv2_p2" + top: "pool2_p2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_p2" + type: "InnerProduct" + bottom: "pool2_p2" + top: "ip1_p2" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_p2" + type: "ReLU" + bottom: "ip1_p2" + top: "ip1_p2" +} +layer { + name: "ip2_p2" + type: "InnerProduct" + bottom: "ip1_p2" + top: "ip2_p2" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_p2" + type: "InnerProduct" + bottom: "ip2_p2" + top: "feat_p2" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} layer { name: "loss" type: "TripletLoss" bottom: "feat" bottom: "feat_true" bottom: "feat_false" + bottom: "feat_p1" + bottom: "feat_p2" bottom: "sim" top: "loss" triplet_loss_param { diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index 8993dd70910..3e163790af8 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -218,14 +218,14 @@ class TripletLossLayer : public LossLayer { virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); - virtual inline int ExactNumBottomBlobs() const { return 4; } + virtual inline int ExactNumBottomBlobs() const { return 6; } virtual inline const char* type() const { return "TripletLoss"; } /** * Unlike most loss layers, in the TripletLossLayer we can backpropagate * to the first three inputs. */ virtual inline bool AllowForceBackward(const int bottom_index) const { - return bottom_index != 3; + return bottom_index != 5; } protected: @@ -268,12 +268,15 @@ class TripletLossLayer : public LossLayer { Blob diff_; // cached for backward pass Blob diff_pos; Blob diff_neg; + Blob diff_par; Blob dist_sq_; // cached for backward pass Blob dist_sq_pos; Blob dist_sq_neg; + Blob dist_sq_par; Blob diff_sq_; // tmp storage for gpu forward pass Blob diff_sq_pos; Blob diff_sq_neg; + Blob diff_sq_par; Blob summer_vec_; // tmp storage for gpu forward pass }; diff --git a/src/caffe/layers/triplet_loss_layer.cpp b/src/caffe/layers/triplet_loss_layer.cpp index cbbb02b3b5c..a4de6d4ba5f 100644 --- a/src/caffe/layers/triplet_loss_layer.cpp +++ b/src/caffe/layers/triplet_loss_layer.cpp @@ -14,22 +14,31 @@ void TripletLossLayer::LayerSetUp( LossLayer::LayerSetUp(bottom, top); CHECK_EQ(bottom[0]->channels(), bottom[1]->channels()); CHECK_EQ(bottom[0]->channels(), bottom[2]->channels()); + CHECK_EQ(bottom[0]->channels(), bottom[3]->channels()); + CHECK_EQ(bottom[0]->channels(), bottom[4]->channels()); CHECK_EQ(bottom[0]->height(), 1); CHECK_EQ(bottom[0]->width(), 1); CHECK_EQ(bottom[1]->height(), 1); CHECK_EQ(bottom[1]->width(), 1); CHECK_EQ(bottom[2]->height(), 1); CHECK_EQ(bottom[2]->width(), 1); - CHECK_EQ(bottom[3]->channels(), 1); CHECK_EQ(bottom[3]->height(), 1); CHECK_EQ(bottom[3]->width(), 1); + CHECK_EQ(bottom[4]->height(), 1); + CHECK_EQ(bottom[4]->width(), 1); + CHECK_EQ(bottom[5]->channels(), 1); + CHECK_EQ(bottom[5]->height(), 1); + CHECK_EQ(bottom[5]->width(), 1); diff_pos.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); diff_neg.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + diff_par.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); diff_sq_pos.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); diff_sq_neg.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + diff_sq_par.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); dist_sq_.Reshape(bottom[0]->num(), 1, 1, 1); dist_sq_pos.Reshape(bottom[0]->num(), 1, 1, 1); dist_sq_neg.Reshape(bottom[0]->num(), 1, 1, 1); + dist_sq_par.Reshape(bottom[0]->num(), 1, 1, 1); // vector of ones used to sum along channels summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); for (int i = 0; i < bottom[0]->channels(); ++i) @@ -45,27 +54,40 @@ void TripletLossLayer::Forward_cpu( count, bottom[0]->cpu_data(), // a bottom[1]->cpu_data(), // b - diff_pos.mutable_cpu_data()); // a_i-b_i + diff_pos.mutable_cpu_data()); // a_i-b_i: positive caffe_sub( count, bottom[0]->cpu_data(), // a bottom[2]->cpu_data(), // c - diff_neg.mutable_cpu_data()); // a_i-c_i + diff_neg.mutable_cpu_data()); // a_i-c_i: negative + caffe_sub( + count, + bottom[3]->cpu_data(), // d + bottom[4]->cpu_data(), // e + diff_par.mutable_cpu_data()); // d_i-e_i: pair wise const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.triplet_loss_param().margin(); Dtype loss(0.0); - // Loss component calculated from ab + for (int i = 0; i < bottom[0]->num(); ++i) { + // Triplet loss accumulation + // Loss component calculated from a and b dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); - // ab is a similar pair + // a b is a similar pair for triplet dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; - // Loss component calculated from ac + // Loss component calculated from a and c dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); - // ac is a dissimilar pair + // a c is a dissimilar pair for triplet dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; - loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); + loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); // loss accumulated accumulated by the triplet part + // Pair wise loss accumulation + // Loss component calculated from d and e + dist_sq_par.mutable_cpu_data()[i] = caffe_cpu_dot(channels, + diff_par.cpu_data() + (i*channels), diff_par.cpu_data() + (i*channels)); + // d e is a similar pair for pair wise + loss += dist_sq_par.cpu_data()[i]; // loss accumulated by the pair wise part } loss = loss / static_cast(bottom[0]->num()) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; @@ -105,7 +127,6 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, } } for (int i = 1; i < 3; ++i) { -// there must be further check to ensure the gradient calc if (propagate_down[i]) { const Dtype sign = (i == 1) ? -1 : 1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / @@ -123,7 +144,8 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, diff_pos.cpu_data() + (j*channels), Dtype(0.0), bout + (j*channels)); - } else { + } + else { // dissimilar pairs caffe_cpu_axpby( channels, @@ -138,6 +160,25 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, } } } + // pair wise back + for (int i = 3; i < 5; ++i) { + if (propagate_down[i]) { + const Dtype sign = (i == 3) ? 1 : -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(bottom[i]->num()); + int num = bottom[i]->num(); + int channels = bottom[i]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[i]->mutable_cpu_diff(); // similar pairs + caffe_cpu_axpby( + channels, + alpha, + diff_par.cpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } + } + } } #ifdef CPU_ONLY From 9fbee514d1fdb3ad542d9b2e0b8edd0b787e3984 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 13 Jul 2015 12:10:05 +0800 Subject: [PATCH 23/82] add feature extraction codes in samples --- examples/triplet/extract_triplet_feature.cpp | 195 +++++++++++++++++++ src/caffe/layers/triplet_loss_layer.cpp | 10 +- 2 files changed, 200 insertions(+), 5 deletions(-) create mode 100644 examples/triplet/extract_triplet_feature.cpp diff --git a/examples/triplet/extract_triplet_feature.cpp b/examples/triplet/extract_triplet_feature.cpp new file mode 100644 index 00000000000..aaa9c12bd63 --- /dev/null +++ b/examples/triplet/extract_triplet_feature.cpp @@ -0,0 +1,195 @@ +#include // for snprintf +#include +#include + +#include "boost/algorithm/string.hpp" +#include "google/protobuf/text_format.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/net.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/db.hpp" +#include "caffe/util/io.hpp" +#include "caffe/vision_layers.hpp" + +using caffe::Blob; +using caffe::Caffe; +using caffe::Datum; +using caffe::Net; +using boost::shared_ptr; +using std::string; +namespace db = caffe::db; + +template +int feature_extraction_pipeline(int argc, char** argv); + +int main(int argc, char** argv) { + return feature_extraction_pipeline(argc, argv); +// return feature_extraction_pipeline(argc, argv); +} + +template +int feature_extraction_pipeline(int argc, char** argv) { + ::google::InitGoogleLogging(argv[0]); + const int num_required_args = 7; + if (argc < num_required_args) { + LOG(ERROR)<< + "This program takes in a trained network and an input data layer, and then" + " extract features of the input data produced by the net.\n" + "Usage: extract_features pretrained_net_param" + " feature_extraction_proto_file extract_feature_blob_name1[,name2,...]" + " save_feature_dataset_name1[,name2,...] num_mini_batches db_type" + " [CPU/GPU] [DEVICE_ID=0]\n" + "Note: you can extract multiple features in one pass by specifying" + " multiple feature blob names and dataset names seperated by ','." + " The names cannot contain white space characters and the number of blobs" + " and datasets must be equal."; + return 1; + } + int arg_pos = num_required_args; + + arg_pos = num_required_args; + if (argc > arg_pos && strcmp(argv[arg_pos], "GPU") == 0) { + LOG(ERROR)<< "Using GPU"; + int device_id = 0; + if (argc > arg_pos + 1) { + device_id = atoi(argv[arg_pos + 1]); + CHECK_GE(device_id, 0); + } + LOG(ERROR) << "Using Device_id=" << device_id; + Caffe::SetDevice(device_id); + Caffe::set_mode(Caffe::GPU); + } else { + LOG(ERROR) << "Using CPU"; + Caffe::set_mode(Caffe::CPU); + } + + arg_pos = 0; // the name of the executable + std::string pretrained_binary_proto(argv[++arg_pos]); + + // Expected prototxt contains at least one data layer such as + // the layer data_layer_name and one feature blob such as the + // fc7 top blob to extract features. + /* + layers { + name: "data_layer_name" + type: DATA + data_param { + source: "/path/to/your/images/to/extract/feature/images_leveldb" + mean_file: "/path/to/your/image_mean.binaryproto" + batch_size: 128 + crop_size: 227 + mirror: false + } + top: "data_blob_name" + top: "label_blob_name" + } + layers { + name: "drop7" + type: DROPOUT + dropout_param { + dropout_ratio: 0.5 + } + bottom: "fc7" + top: "fc7" + } + */ + std::string feature_extraction_proto(argv[++arg_pos]); + boost::shared_ptr > feature_extraction_net( + new Net(feature_extraction_proto, caffe::TEST)); + feature_extraction_net->CopyTrainedLayersFrom(pretrained_binary_proto); + + std::string extract_feature_blob_names(argv[++arg_pos]); + std::vector blob_names; + boost::split(blob_names, extract_feature_blob_names, boost::is_any_of(",")); + + std::string save_feature_dataset_names(argv[++arg_pos]); + std::vector dataset_names; + boost::split(dataset_names, save_feature_dataset_names, + boost::is_any_of(",")); + CHECK_EQ(blob_names.size(), dataset_names.size()) << + " the number of blob names and dataset names must be equal"; + size_t num_features = blob_names.size(); + + for (size_t i = 0; i < num_features; i++) { + CHECK(feature_extraction_net->has_blob(blob_names[i])) + << "Unknown feature blob name " << blob_names[i] + << " in the network " << feature_extraction_proto; + } + + int num_mini_batches = atoi(argv[++arg_pos]); + + /*std::vector > feature_dbs; + std::vector > txns; + const char* db_type = argv[++arg_pos]; + for (size_t i = 0; i < num_features; ++i) { + LOG(INFO)<< "Opening dataset " << dataset_names[i]; + shared_ptr db(db::GetDB(db_type)); + db->Open(dataset_names.at(i), db::NEW); + feature_dbs.push_back(db); + shared_ptr txn(db->NewTransaction()); + txns.push_back(txn); + }*/ + std::vector files; + for (size_t i = 0; i < num_features; ++i) + { + LOG(INFO) << "Opening file " << dataset_names[i]; + FILE * temp = NULL; + fopen_s(&temp,dataset_names[i].c_str(), "wb"); + files.push_back(temp); + } + + + LOG(ERROR)<< "Extacting Features"; + + bool header_flag = true; + + Datum datum; + const int kMaxKeyStrLength = 100; + char key_str[kMaxKeyStrLength]; + std::vector*> input_vec; + std::vector image_indices(num_features, 0); + for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index) { + feature_extraction_net->Forward(input_vec); + for (int i = 0; i < num_features; ++i) { + const boost::shared_ptr > feature_blob = feature_extraction_net + ->blob_by_name(blob_names[i]); + int batch_size = feature_blob->num(); + int dim_features = feature_blob->count() / batch_size; + if (batch_index == 0) + { + int fea_num = batch_size*num_mini_batches; + fwrite(&dim_features, sizeof(int), 1, files[i]); + fwrite(&fea_num, sizeof(int), 1, files[i]); + header_flag = false; + } + const Dtype* feature_blob_data; + for (int n = 0; n < batch_size; ++n) { + + feature_blob_data = feature_blob->cpu_data() + + feature_blob->offset(n); + fwrite(feature_blob_data, sizeof(Dtype), dim_features, files[i]); + + ++image_indices[i]; + if (image_indices[i] % 1000 == 0) { + LOG(ERROR)<< "Extracted features of " << image_indices[i] << + " query images for feature blob " << blob_names[i]; + } + } // for (int n = 0; n < batch_size; ++n) + } // for (int i = 0; i < num_features; ++i) + } // for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index) + // write the last batch + for (int i = 0; i < num_features; ++i) { + /* if (image_indices[i] % 1000 != 0) { + txns.at(i)->Commit(); + } + LOG(ERROR)<< "Extracted features of " << image_indices[i] << + " query images for feature blob " << blob_names[i]; + feature_dbs.at(i)->Close();*/ + fclose(files[i]); + } + + LOG(ERROR)<< "Successfully extracted the features!"; + return 0; +} diff --git a/src/caffe/layers/triplet_loss_layer.cpp b/src/caffe/layers/triplet_loss_layer.cpp index a4de6d4ba5f..16082620a01 100644 --- a/src/caffe/layers/triplet_loss_layer.cpp +++ b/src/caffe/layers/triplet_loss_layer.cpp @@ -81,13 +81,15 @@ void TripletLossLayer::Forward_cpu( diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); // a c is a dissimilar pair for triplet dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; - loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); // loss accumulated accumulated by the triplet part + // loss accumulated accumulated by the triplet part + loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); // Pair wise loss accumulation // Loss component calculated from d and e dist_sq_par.mutable_cpu_data()[i] = caffe_cpu_dot(channels, diff_par.cpu_data() + (i*channels), diff_par.cpu_data() + (i*channels)); // d e is a similar pair for pair wise - loss += dist_sq_par.cpu_data()[i]; // loss accumulated by the pair wise part + // loss accumulated by the pair wise part + loss += dist_sq_par.cpu_data()[i]; } loss = loss / static_cast(bottom[0]->num()) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; @@ -97,7 +99,6 @@ template void TripletLossLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.triplet_loss_param().margin(); -// there must be further check to ensure the gradient calc if (propagate_down[0]) { const Dtype sign = 1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / @@ -144,8 +145,7 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, diff_pos.cpu_data() + (j*channels), Dtype(0.0), bout + (j*channels)); - } - else { + } else { // dissimilar pairs caffe_cpu_axpby( channels, From 4a351a33a1a8fbfc8434044433d275d4f4ccbb6f Mon Sep 17 00:00:00 2001 From: Wangyida Date: Wed, 15 Jul 2015 10:42:04 +0800 Subject: [PATCH 24/82] add 3d network training param --- examples/triplet/3d_triplet.prototxt | 110 ++++++ examples/triplet/3d_triplet_solver.prototxt | 25 ++ ...rototxt => 3d_triplet_train_test.prototxt} | 341 ++++++++++++++---- examples/triplet/convert_3d_triplet_data.cpp | 136 +++++++ examples/triplet/create_3d_triplet.sh | 21 ++ examples/triplet/extract_triplet_feature.cpp | 195 ---------- examples/triplet/train_3d_triplet.sh | 5 + 7 files changed, 572 insertions(+), 261 deletions(-) create mode 100644 examples/triplet/3d_triplet.prototxt create mode 100644 examples/triplet/3d_triplet_solver.prototxt rename examples/triplet/{mnist_orpe_train_test.prototxt => 3d_triplet_train_test.prototxt} (66%) create mode 100644 examples/triplet/convert_3d_triplet_data.cpp create mode 100755 examples/triplet/create_3d_triplet.sh delete mode 100644 examples/triplet/extract_triplet_feature.cpp create mode 100755 examples/triplet/train_3d_triplet.sh diff --git a/examples/triplet/3d_triplet.prototxt b/examples/triplet/3d_triplet.prototxt new file mode 100644 index 00000000000..6865d67a6c3 --- /dev/null +++ b/examples/triplet/3d_triplet.prototxt @@ -0,0 +1,110 @@ +name: "3d_triplet" +input: "data" +input_dim: 276 +input_dim: 1 +input_dim: 64 +input_dim: 64 +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 16 + kernel_size: 8 + stride: 1 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "pool1" + top: "pool1" +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 7 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu2" + type: "ReLU" + bottom: "pool2" + top: "pool2" +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 256 + } +} +layer { + name: "relu3" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip1" + top: "feat" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 4 + } +} diff --git a/examples/triplet/3d_triplet_solver.prototxt b/examples/triplet/3d_triplet_solver.prototxt new file mode 100644 index 00000000000..8dd159e92c5 --- /dev/null +++ b/examples/triplet/3d_triplet_solver.prototxt @@ -0,0 +1,25 @@ +# The train/test net protocol buffer definition +net: "examples/triplet/lfw_triplet_train_test.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of lfw, we have test batch size 23 and 23 test iterations, +# covering the full 276 testing images. +test_iter: 23 +# Carry out testing every 500 training iterations. +test_interval: 23 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.01 +momentum: 0.9 +weight_decay: 0.0000 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +max_iter: 1000 +# snapshot intermediate results +snapshot: 200 +snapshot_prefix: "examples/triplet/3d_triplet" +# solver mode: CPU or GPU +solver_mode: CPU diff --git a/examples/triplet/mnist_orpe_train_test.prototxt b/examples/triplet/3d_triplet_train_test.prototxt similarity index 66% rename from examples/triplet/mnist_orpe_train_test.prototxt rename to examples/triplet/3d_triplet_train_test.prototxt index afad6f9051e..bff19047ab8 100644 --- a/examples/triplet/mnist_orpe_train_test.prototxt +++ b/examples/triplet/3d_triplet_train_test.prototxt @@ -1,4 +1,4 @@ -name: "mnist_triplet_train_test" +name: "3d_triplet_train_test" layer { name: "triplet_data" type: "Data" @@ -11,8 +11,8 @@ layer { scale: 0.00390625 } data_param { - source: "examples/triplet/mnist_triplet_train_leveldb" - batch_size: 64 + source: "examples/triplet/3d_triplet_train_leveldb" + batch_size: 23 } } layer { @@ -27,8 +27,8 @@ layer { scale: 0.00390625 } data_param { - source: "examples/triplet/mnist_triplet_test_leveldb" - batch_size: 100 + source: "examples/triplet/3d_triplet_test_leveldb" + batch_size: 23 } } layer { @@ -38,10 +38,14 @@ layer { top: "data" top: "data_true" top: "data_false" + top: "data_p1" + top: "data_p2" slice_param { slice_dim: 1 slice_point: 1 slice_point: 2 + slice_point: 3 + slice_point: 4 } } layer { @@ -157,33 +161,10 @@ layer { bottom: "ip1" top: "ip1" } -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} layer { name: "feat" type: "InnerProduct" - bottom: "ip2" + bottom: "ip1" top: "feat" param { name: "feat_w" @@ -194,7 +175,7 @@ layer { lr_mult: 2 } inner_product_param { - num_output: 2 + num_output: 4 weight_filler { type: "xavier" } @@ -217,7 +198,7 @@ layer { lr_mult: 2 } convolution_param { - num_output: 15 + num_output: 16 kernel_size: 8 stride: 1 weight_filler { @@ -316,33 +297,10 @@ layer { bottom: "ip1_true" top: "ip1_true" } -layer { - name: "ip2_true" - type: "InnerProduct" - bottom: "ip1_true" - top: "ip2_true" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} layer { name: "feat_true" type: "InnerProduct" - bottom: "ip2_true" + bottom: "ip1_true" top: "feat_true" param { name: "feat_w" @@ -353,7 +311,7 @@ layer { lr_mult: 2 } inner_product_param { - num_output: 2 + num_output: 4 weight_filler { type: "xavier" } @@ -476,20 +434,20 @@ layer { top: "ip1_false" } layer { - name: "ip2_false" + name: "feat_false" type: "InnerProduct" bottom: "ip1_false" - top: "ip2_false" + top: "feat_false" param { - name: "ip2_w" + name: "feat_w" lr_mult: 1 } param { - name: "ip2_b" + name: "feat_b" lr_mult: 2 } inner_product_param { - num_output: 10 + num_output: 4 weight_filler { type: "xavier" } @@ -499,10 +457,259 @@ layer { } } layer { - name: "feat_false" + name: "conv1_p1" + type: "Convolution" + bottom: "data_p1" + top: "conv1_p1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 16 + kernel_size: 8 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_p1" + type: "Pooling" + bottom: "conv1_p1" + top: "pool1_p1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu1_p1" + type: "ReLU" + bottom: "pool1_p1" + top: "pool1_p1" +} +layer { + name: "conv2_p1" + type: "Convolution" + bottom: "pool1_p1" + top: "conv2_p1" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 7 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_p1" + type: "Pooling" + bottom: "conv2_p1" + top: "pool2_p1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu2_p1" + type: "ReLU" + bottom: "pool2_p1" + top: "pool2_p1" +} +layer { + name: "ip1_p1" type: "InnerProduct" - bottom: "ip2_false" - top: "feat_false" + bottom: "pool2_p1" + top: "ip1_p1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 256 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu3_p1" + type: "ReLU" + bottom: "ip1_p1" + top: "ip1_p1" +} +layer { + name: "feat_p1" + type: "InnerProduct" + bottom: "ip1_p1" + top: "feat_p1" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 4 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_p2" + type: "Convolution" + bottom: "data_p2" + top: "conv1_p2" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 16 + kernel_size: 8 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_p2" + type: "Pooling" + bottom: "conv1_p2" + top: "pool1_p2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu1_p2" + type: "ReLU" + bottom: "pool1_p2" + top: "pool1_p2" +} +layer { + name: "conv2_p2" + type: "Convolution" + bottom: "pool1_p2" + top: "conv2_p2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 7 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_p2" + type: "Pooling" + bottom: "conv2_p2" + top: "pool2_p2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu2_p2" + type: "ReLU" + bottom: "pool2_p2" + top: "pool2_p2" +} +layer { + name: "ip1_p2" + type: "InnerProduct" + bottom: "pool2_p2" + top: "ip1_p2" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 256 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu3_p2" + type: "ReLU" + bottom: "ip1_p2" + top: "ip1_p2" +} +layer { + name: "feat_p2" + type: "InnerProduct" + bottom: "ip1_p2" + top: "feat_p2" param { name: "feat_w" lr_mult: 1 @@ -512,7 +719,7 @@ layer { lr_mult: 2 } inner_product_param { - num_output: 2 + num_output: 4 weight_filler { type: "xavier" } @@ -527,10 +734,12 @@ layer { bottom: "feat" bottom: "feat_true" bottom: "feat_false" + bottom: "feat_p1" + bottom: "feat_p2" bottom: "sim" top: "loss" triplet_loss_param { - margin: 3 + margin: 1 } } diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp new file mode 100644 index 00000000000..66fb9abaf14 --- /dev/null +++ b/examples/triplet/convert_3d_triplet_data.cpp @@ -0,0 +1,136 @@ +// This script converts the MNIST dataset to the leveldb format used +// by caffe to train siamese network. +// Usage: +// convert_mnist_data input_image_file input_label_file output_db_file +// The MNIST dataset could be downloaded at +// http://yann.lecun.com/exdb/mnist/ +#include // NOLINT(readability/streams) +#include + +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "stdint.h" + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" + +uint32_t swap_endian(uint32_t val) { + val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); + return (val << 16) | (val >> 16); +} + +void read_image(std::ifstream* image_file, std::ifstream* label_file, + uint32_t index, uint32_t rows, uint32_t cols, + char* pixels, char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index + 8); + label_file->read(label, 1); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename) { + // Open files + std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); + std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); + CHECK(image_file) << "Unable to open file " << image_filename; + CHECK(label_file) << "Unable to open file " << label_filename; + // Read the magic and the meta data + uint32_t magic; + uint32_t num_items; + uint32_t num_labels; + uint32_t rows; + uint32_t cols; + + image_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + CHECK_EQ(magic, 2051) << "Incorrect image file magic."; + label_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + CHECK_EQ(magic, 2049) << "Incorrect label file magic."; + image_file.read(reinterpret_cast(&num_items), 4); + num_items = swap_endian(num_items); + label_file.read(reinterpret_cast(&num_labels), 4); + num_labels = swap_endian(num_labels); + CHECK_EQ(num_items, num_labels); + image_file.read(reinterpret_cast(&rows), 4); + rows = swap_endian(rows); + image_file.read(reinterpret_cast(&cols), 4); + cols = swap_endian(cols); + + // Open leveldb + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = true; + options.error_if_exists = true; + leveldb::Status status = leveldb::DB::Open( + options, db_filename, &db); + CHECK(status.ok()) << "Failed to open leveldb " << db_filename + << ". Is it already existing?"; + + char label_i; // label for triplet + char label_j; + char label_k; + char label_l; // label for pair wise + char label_m; + char* pixels = new char[5 * rows * cols]; + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + + caffe::Datum datum; + datum.set_channels(5); // one channel for each image in the triplet and pair + datum.set_height(rows); + datum.set_width(cols); + LOG(INFO) << "A total of " << num_items << " items."; + LOG(INFO) << "Rows: " << rows << " Cols: " << cols; + for (int itemid = 0; itemid < num_items; ++itemid) { + int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups + int j = caffe::caffe_rng_rand() % num_items; + int k = caffe::caffe_rng_rand() % num_items; + int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups + int m = caffe::caffe_rng_rand() % num_items; + read_image(&image_file, &label_file, i, rows, cols, // read triplet groups + pixels, &label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels + (rows * cols), &label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels + (2 * rows * cols), &label_k); + read_image(&image_file, &label_file, l, rows, cols, // read pair wise groups + pixels + (3 * rows * cols), &label_l); + read_image(&image_file, &label_file, m, rows, cols, + pixels + (4 * rows * cols), &label_m); + + datum.set_data(pixels, 5*rows*cols); // set data + if ((label_i == label_j && label_i != label_k) && (label_l == label_m)) { + datum.set_label(1); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); + } else { + itemid--; + datum.set_label(0); + } + } + + delete db; + delete pixels; +} + +int main(int argc, char** argv) { + if (argc != 4) { + printf("This script converts the MNIST dataset to the leveldb format used\n" + "by caffe to train a siamese network.\n" + "Usage:\n" + " convert_mnist_data input_image_file input_label_file " + "output_db_file\n" + "The MNIST dataset could be downloaded at\n" + " http://yann.lecun.com/exdb/mnist/\n" + "You should gunzip them after downloading.\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3]); + } + return 0; +} diff --git a/examples/triplet/create_3d_triplet.sh b/examples/triplet/create_3d_triplet.sh new file mode 100755 index 00000000000..4d5e647db88 --- /dev/null +++ b/examples/triplet/create_3d_triplet.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env sh +# This script converts the mnist data into leveldb format. + +EXAMPLES=./build/examples/triplet +DATA=./data/linemod + +echo "Creating leveldb..." + +rm -rf ./examples/triplet/mnist_3d_train_leveldb +rm -rf ./examples/triplet/mnist_3d_test_leveldb + +$EXAMPLES/convert_3d_triplet_data.bin \ + $DATA/train-images-idx3-ubyte \ + $DATA/train-labels-idx1-ubyte \ + ./examples/triplet/3d_triplet_train_leveldb +$EXAMPLES/convert_3d_triplet_data.bin \ + $DATA/test-images-idx3-ubyte \ + $DATA/test-labels-idx1-ubyte \ + ./examples/triplet/3d_triplet_test_leveldb + +echo "Done." diff --git a/examples/triplet/extract_triplet_feature.cpp b/examples/triplet/extract_triplet_feature.cpp deleted file mode 100644 index aaa9c12bd63..00000000000 --- a/examples/triplet/extract_triplet_feature.cpp +++ /dev/null @@ -1,195 +0,0 @@ -#include // for snprintf -#include -#include - -#include "boost/algorithm/string.hpp" -#include "google/protobuf/text_format.h" - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -#include "caffe/net.hpp" -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/db.hpp" -#include "caffe/util/io.hpp" -#include "caffe/vision_layers.hpp" - -using caffe::Blob; -using caffe::Caffe; -using caffe::Datum; -using caffe::Net; -using boost::shared_ptr; -using std::string; -namespace db = caffe::db; - -template -int feature_extraction_pipeline(int argc, char** argv); - -int main(int argc, char** argv) { - return feature_extraction_pipeline(argc, argv); -// return feature_extraction_pipeline(argc, argv); -} - -template -int feature_extraction_pipeline(int argc, char** argv) { - ::google::InitGoogleLogging(argv[0]); - const int num_required_args = 7; - if (argc < num_required_args) { - LOG(ERROR)<< - "This program takes in a trained network and an input data layer, and then" - " extract features of the input data produced by the net.\n" - "Usage: extract_features pretrained_net_param" - " feature_extraction_proto_file extract_feature_blob_name1[,name2,...]" - " save_feature_dataset_name1[,name2,...] num_mini_batches db_type" - " [CPU/GPU] [DEVICE_ID=0]\n" - "Note: you can extract multiple features in one pass by specifying" - " multiple feature blob names and dataset names seperated by ','." - " The names cannot contain white space characters and the number of blobs" - " and datasets must be equal."; - return 1; - } - int arg_pos = num_required_args; - - arg_pos = num_required_args; - if (argc > arg_pos && strcmp(argv[arg_pos], "GPU") == 0) { - LOG(ERROR)<< "Using GPU"; - int device_id = 0; - if (argc > arg_pos + 1) { - device_id = atoi(argv[arg_pos + 1]); - CHECK_GE(device_id, 0); - } - LOG(ERROR) << "Using Device_id=" << device_id; - Caffe::SetDevice(device_id); - Caffe::set_mode(Caffe::GPU); - } else { - LOG(ERROR) << "Using CPU"; - Caffe::set_mode(Caffe::CPU); - } - - arg_pos = 0; // the name of the executable - std::string pretrained_binary_proto(argv[++arg_pos]); - - // Expected prototxt contains at least one data layer such as - // the layer data_layer_name and one feature blob such as the - // fc7 top blob to extract features. - /* - layers { - name: "data_layer_name" - type: DATA - data_param { - source: "/path/to/your/images/to/extract/feature/images_leveldb" - mean_file: "/path/to/your/image_mean.binaryproto" - batch_size: 128 - crop_size: 227 - mirror: false - } - top: "data_blob_name" - top: "label_blob_name" - } - layers { - name: "drop7" - type: DROPOUT - dropout_param { - dropout_ratio: 0.5 - } - bottom: "fc7" - top: "fc7" - } - */ - std::string feature_extraction_proto(argv[++arg_pos]); - boost::shared_ptr > feature_extraction_net( - new Net(feature_extraction_proto, caffe::TEST)); - feature_extraction_net->CopyTrainedLayersFrom(pretrained_binary_proto); - - std::string extract_feature_blob_names(argv[++arg_pos]); - std::vector blob_names; - boost::split(blob_names, extract_feature_blob_names, boost::is_any_of(",")); - - std::string save_feature_dataset_names(argv[++arg_pos]); - std::vector dataset_names; - boost::split(dataset_names, save_feature_dataset_names, - boost::is_any_of(",")); - CHECK_EQ(blob_names.size(), dataset_names.size()) << - " the number of blob names and dataset names must be equal"; - size_t num_features = blob_names.size(); - - for (size_t i = 0; i < num_features; i++) { - CHECK(feature_extraction_net->has_blob(blob_names[i])) - << "Unknown feature blob name " << blob_names[i] - << " in the network " << feature_extraction_proto; - } - - int num_mini_batches = atoi(argv[++arg_pos]); - - /*std::vector > feature_dbs; - std::vector > txns; - const char* db_type = argv[++arg_pos]; - for (size_t i = 0; i < num_features; ++i) { - LOG(INFO)<< "Opening dataset " << dataset_names[i]; - shared_ptr db(db::GetDB(db_type)); - db->Open(dataset_names.at(i), db::NEW); - feature_dbs.push_back(db); - shared_ptr txn(db->NewTransaction()); - txns.push_back(txn); - }*/ - std::vector files; - for (size_t i = 0; i < num_features; ++i) - { - LOG(INFO) << "Opening file " << dataset_names[i]; - FILE * temp = NULL; - fopen_s(&temp,dataset_names[i].c_str(), "wb"); - files.push_back(temp); - } - - - LOG(ERROR)<< "Extacting Features"; - - bool header_flag = true; - - Datum datum; - const int kMaxKeyStrLength = 100; - char key_str[kMaxKeyStrLength]; - std::vector*> input_vec; - std::vector image_indices(num_features, 0); - for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index) { - feature_extraction_net->Forward(input_vec); - for (int i = 0; i < num_features; ++i) { - const boost::shared_ptr > feature_blob = feature_extraction_net - ->blob_by_name(blob_names[i]); - int batch_size = feature_blob->num(); - int dim_features = feature_blob->count() / batch_size; - if (batch_index == 0) - { - int fea_num = batch_size*num_mini_batches; - fwrite(&dim_features, sizeof(int), 1, files[i]); - fwrite(&fea_num, sizeof(int), 1, files[i]); - header_flag = false; - } - const Dtype* feature_blob_data; - for (int n = 0; n < batch_size; ++n) { - - feature_blob_data = feature_blob->cpu_data() + - feature_blob->offset(n); - fwrite(feature_blob_data, sizeof(Dtype), dim_features, files[i]); - - ++image_indices[i]; - if (image_indices[i] % 1000 == 0) { - LOG(ERROR)<< "Extracted features of " << image_indices[i] << - " query images for feature blob " << blob_names[i]; - } - } // for (int n = 0; n < batch_size; ++n) - } // for (int i = 0; i < num_features; ++i) - } // for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index) - // write the last batch - for (int i = 0; i < num_features; ++i) { - /* if (image_indices[i] % 1000 != 0) { - txns.at(i)->Commit(); - } - LOG(ERROR)<< "Extracted features of " << image_indices[i] << - " query images for feature blob " << blob_names[i]; - feature_dbs.at(i)->Close();*/ - fclose(files[i]); - } - - LOG(ERROR)<< "Successfully extracted the features!"; - return 0; -} diff --git a/examples/triplet/train_3d_triplet.sh b/examples/triplet/train_3d_triplet.sh new file mode 100755 index 00000000000..e421af54493 --- /dev/null +++ b/examples/triplet/train_3d_triplet.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env sh + +TOOLS=./build/tools + +$TOOLS/caffe train --solver=examples/triplet/3d_triplet_solver.prototxt From 02bae98536e5dea47511c8ced8a7b642776f0033 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Fri, 24 Jul 2015 11:36:02 +0800 Subject: [PATCH 25/82] ready for a review on triplet training using the 3D data, net work definition is the same as described in paper --- examples/triplet/create_3d_triplet.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/examples/triplet/create_3d_triplet.sh b/examples/triplet/create_3d_triplet.sh index 4d5e647db88..3cd8ee469ce 100755 --- a/examples/triplet/create_3d_triplet.sh +++ b/examples/triplet/create_3d_triplet.sh @@ -6,16 +6,16 @@ DATA=./data/linemod echo "Creating leveldb..." -rm -rf ./examples/triplet/mnist_3d_train_leveldb -rm -rf ./examples/triplet/mnist_3d_test_leveldb +rm -rf ./examples/triplet/3d_triplet_train_leveldb +rm -rf ./examples/triplet/3d_triplet_test_leveldb $EXAMPLES/convert_3d_triplet_data.bin \ - $DATA/train-images-idx3-ubyte \ - $DATA/train-labels-idx1-ubyte \ + $DATA/binary_image_train \ + $DATA/binary_label_train \ ./examples/triplet/3d_triplet_train_leveldb $EXAMPLES/convert_3d_triplet_data.bin \ - $DATA/test-images-idx3-ubyte \ - $DATA/test-labels-idx1-ubyte \ + $DATA/binary_image_test \ + $DATA/binary_label_test \ ./examples/triplet/3d_triplet_test_leveldb echo "Done." From 050b57fa2a96c7b855370159576d3c41e38e2ac4 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Fri, 24 Jul 2015 21:43:44 +0800 Subject: [PATCH 26/82] delete spaces --- examples/triplet/convert_3d_triplet_data.cpp | 14 +++++++++----- examples/triplet/convert_mnist_triplet_data.cpp | 14 +++++++++----- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp index 66fb9abaf14..0fb87a17b4b 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp +++ b/examples/triplet/convert_3d_triplet_data.cpp @@ -72,7 +72,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, char label_i; // label for triplet char label_j; char label_k; - char label_l; // label for pair wise + char label_l; // label for pair wise char label_m; char* pixels = new char[5 * rows * cols]; const int kMaxKeyLength = 10; @@ -86,18 +86,22 @@ void convert_dataset(const char* image_filename, const char* label_filename, LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; for (int itemid = 0; itemid < num_items; ++itemid) { - int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups + // pick triplet groups + int i = caffe::caffe_rng_rand() % num_items; int j = caffe::caffe_rng_rand() % num_items; int k = caffe::caffe_rng_rand() % num_items; - int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups + // pick pair wise groups + int l = caffe::caffe_rng_rand() % num_items; int m = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, // read triplet groups + // read triplet groups + read_image(&image_file, &label_file, i, rows, cols, pixels, &label_i); read_image(&image_file, &label_file, j, rows, cols, pixels + (rows * cols), &label_j); read_image(&image_file, &label_file, k, rows, cols, pixels + (2 * rows * cols), &label_k); - read_image(&image_file, &label_file, l, rows, cols, // read pair wise groups + // read pair wise groups + read_image(&image_file, &label_file, l, rows, cols, pixels + (3 * rows * cols), &label_l); read_image(&image_file, &label_file, m, rows, cols, pixels + (4 * rows * cols), &label_m); diff --git a/examples/triplet/convert_mnist_triplet_data.cpp b/examples/triplet/convert_mnist_triplet_data.cpp index 66fb9abaf14..c59a75efe01 100644 --- a/examples/triplet/convert_mnist_triplet_data.cpp +++ b/examples/triplet/convert_mnist_triplet_data.cpp @@ -72,7 +72,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, char label_i; // label for triplet char label_j; char label_k; - char label_l; // label for pair wise + char label_l; // label for pair wise char label_m; char* pixels = new char[5 * rows * cols]; const int kMaxKeyLength = 10; @@ -86,18 +86,22 @@ void convert_dataset(const char* image_filename, const char* label_filename, LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; for (int itemid = 0; itemid < num_items; ++itemid) { - int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups + // pick triplet groups + int i = caffe::caffe_rng_rand() % num_items; int j = caffe::caffe_rng_rand() % num_items; int k = caffe::caffe_rng_rand() % num_items; - int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups + // pick pair wise groups + int l = caffe::caffe_rng_rand() % num_items; int m = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, // read triplet groups + // read triplet groups + read_image(&image_file, &label_file, i, rows, cols, pixels, &label_i); read_image(&image_file, &label_file, j, rows, cols, pixels + (rows * cols), &label_j); read_image(&image_file, &label_file, k, rows, cols, pixels + (2 * rows * cols), &label_k); - read_image(&image_file, &label_file, l, rows, cols, // read pair wise groups + // pair wise groups + read_image(&image_file, &label_file, l, rows, cols, pixels + (3 * rows * cols), &label_l); read_image(&image_file, &label_file, m, rows, cols, pixels + (4 * rows * cols), &label_m); From 1f6c87a19f75f5466151616c48697c077f13cf82 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Wed, 29 Jul 2015 14:20:05 +0800 Subject: [PATCH 27/82] rewrite the prototxt for 3d model training --- examples/triplet/3d_triplet_solver.prototxt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/triplet/3d_triplet_solver.prototxt b/examples/triplet/3d_triplet_solver.prototxt index 8dd159e92c5..d61a6c42f4b 100644 --- a/examples/triplet/3d_triplet_solver.prototxt +++ b/examples/triplet/3d_triplet_solver.prototxt @@ -1,7 +1,7 @@ # The train/test net protocol buffer definition -net: "examples/triplet/lfw_triplet_train_test.prototxt" +net: "examples/triplet/3d_triplet_train_test.prototxt" # test_iter specifies how many forward passes the test should carry out. -# In the case of lfw, we have test batch size 23 and 23 test iterations, +# In the case of 3d database, we have test batch size 23 and 23 test iterations, # covering the full 276 testing images. test_iter: 23 # Carry out testing every 500 training iterations. From 254673633a8f1ecaba3faca56a7193b5b5456ed6 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Thu, 30 Jul 2015 15:16:19 +0800 Subject: [PATCH 28/82] add pose information for training data arrangement in triplet training --- examples/triplet/convert_3d_triplet_data.cpp | 73 +++++++++----------- 1 file changed, 31 insertions(+), 42 deletions(-) diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp index 0fb87a17b4b..07ba92f1471 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp +++ b/examples/triplet/convert_3d_triplet_data.cpp @@ -1,17 +1,14 @@ -// This script converts the MNIST dataset to the leveldb format used -// by caffe to train siamese network. // Usage: // convert_mnist_data input_image_file input_label_file output_db_file // The MNIST dataset could be downloaded at // http://yann.lecun.com/exdb/mnist/ #include // NOLINT(readability/streams) #include - #include "glog/logging.h" #include "google/protobuf/text_format.h" #include "leveldb/db.h" #include "stdint.h" - +#define CPU_ONLY #include "caffe/proto/caffe.pb.h" #include "caffe/util/math_functions.hpp" @@ -22,11 +19,13 @@ uint32_t swap_endian(uint32_t val) { void read_image(std::ifstream* image_file, std::ifstream* label_file, uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label) { + char* pixels, char* label_temp, signed char* label) { image_file->seekg(index * rows * cols + 16); image_file->read(pixels, rows * cols); - label_file->seekg(index + 8); - label_file->read(label, 1); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); } void convert_dataset(const char* image_filename, const char* label_filename, @@ -48,7 +47,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, CHECK_EQ(magic, 2051) << "Incorrect image file magic."; label_file.read(reinterpret_cast(&magic), 4); magic = swap_endian(magic); - CHECK_EQ(magic, 2049) << "Incorrect label file magic."; + CHECK_EQ(magic, 2050) << "Incorrect label file magic."; image_file.read(reinterpret_cast(&num_items), 4); num_items = swap_endian(num_items); label_file.read(reinterpret_cast(&num_labels), 4); @@ -69,11 +68,12 @@ void convert_dataset(const char* image_filename, const char* label_filename, CHECK(status.ok()) << "Failed to open leveldb " << db_filename << ". Is it already existing?"; - char label_i; // label for triplet - char label_j; - char label_k; - char label_l; // label for pair wise - char label_m; + char* label_temp = new char[4]; // label for unsigned char* + signed char* label_i = new signed char[4]; // label for triplet + signed char* label_j = new signed char[4]; + signed char* label_k = new signed char[4]; + signed char* label_l = new signed char[4]; // label for pair wise + signed char* label_m = new signed char[4]; char* pixels = new char[5 * rows * cols]; const int kMaxKeyLength = 10; char key[kMaxKeyLength]; @@ -85,29 +85,30 @@ void convert_dataset(const char* image_filename, const char* label_filename, datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (int itemid = 0; itemid < num_items; ++itemid) { - // pick triplet groups - int i = caffe::caffe_rng_rand() % num_items; + for (unsigned int itemid = 0; itemid < 10 * num_items; ++itemid) { + int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups int j = caffe::caffe_rng_rand() % num_items; int k = caffe::caffe_rng_rand() % num_items; - // pick pair wise groups - int l = caffe::caffe_rng_rand() % num_items; + int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups int m = caffe::caffe_rng_rand() % num_items; - // read triplet groups - read_image(&image_file, &label_file, i, rows, cols, - pixels, &label_i); + read_image(&image_file, &label_file, i, rows, cols, // read triplet groups + pixels, label_temp, label_i); read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), &label_j); + pixels + (rows * cols), label_temp, label_j); read_image(&image_file, &label_file, k, rows, cols, - pixels + (2 * rows * cols), &label_k); - // read pair wise groups - read_image(&image_file, &label_file, l, rows, cols, - pixels + (3 * rows * cols), &label_l); + pixels + (2 * rows * cols), label_temp, label_k); + read_image(&image_file, &label_file, l, rows, cols, // read pair wise groups + pixels + (3 * rows * cols), label_temp, label_l); read_image(&image_file, &label_file, m, rows, cols, - pixels + (4 * rows * cols), &label_m); + pixels + (4 * rows * cols), label_temp, label_m); datum.set_data(pixels, 5*rows*cols); // set data - if ((label_i == label_j && label_i != label_k) && (label_l == label_m)) { + bool pose_pass; + int dist_ij = (int)((*(label_i+1)-*(label_j+1))*(*(label_i+1)-*(label_j+1)) + (*(label_i+2)-*(label_j+2))*(*(label_i+2)-*(label_j+2)) + (*(label_i+3)-*(label_j+3))*(*(label_i+3)-*(label_j+3))); + int dist_ik = (int)((*(label_i+1)-*(label_k+1))*(*(label_i+1)-*(label_k+1)) + (*(label_i+2)-*(label_k+2))*(*(label_i+2)-*(label_k+2)) + (*(label_i+3)-*(label_k+3))*(*(label_i+3)-*(label_k+3))); + if (dist_ij < dist_ik ) + pose_pass = true; + if (((*label_i == *label_j && *label_i != *label_k) || ((*label_i == *label_j && *label_i == *label_k) && pose_pass)) && (*label_l == *label_m)) { datum.set_label(1); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", itemid); @@ -117,24 +118,12 @@ void convert_dataset(const char* image_filename, const char* label_filename, datum.set_label(0); } } - delete db; delete pixels; } int main(int argc, char** argv) { - if (argc != 4) { - printf("This script converts the MNIST dataset to the leveldb format used\n" - "by caffe to train a siamese network.\n" - "Usage:\n" - " convert_mnist_data input_image_file input_label_file " - "output_db_file\n" - "The MNIST dataset could be downloaded at\n" - " http://yann.lecun.com/exdb/mnist/\n" - "You should gunzip them after downloading.\n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3]); - } + convert_dataset("/home/wangyida/Desktop/caffe/data/linemod/binary_image_train", "/home/wangyida/Desktop/caffe/data/linemod/binary_label_train", "/home/wangyida/Desktop/caffe/data/linemod/leveldb"); return 0; } + From 8842528de333d174de2efa0dc3c5551c6b0cca6f Mon Sep 17 00:00:00 2001 From: Wangyida Date: Thu, 30 Jul 2015 15:30:54 +0800 Subject: [PATCH 29/82] delete macro --- examples/triplet/convert_3d_triplet_data.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp index 07ba92f1471..94414887995 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp +++ b/examples/triplet/convert_3d_triplet_data.cpp @@ -8,7 +8,6 @@ #include "google/protobuf/text_format.h" #include "leveldb/db.h" #include "stdint.h" -#define CPU_ONLY #include "caffe/proto/caffe.pb.h" #include "caffe/util/math_functions.hpp" From bbb366473de496f4aaf3536d5ac96f278a22b061 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Thu, 30 Jul 2015 21:39:01 +0800 Subject: [PATCH 30/82] add the codes wrongly deteted... --- examples/triplet/convert_3d_triplet_data.cpp | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp index 94414887995..53e81f972ce 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp +++ b/examples/triplet/convert_3d_triplet_data.cpp @@ -122,7 +122,19 @@ void convert_dataset(const char* image_filename, const char* label_filename, } int main(int argc, char** argv) { - convert_dataset("/home/wangyida/Desktop/caffe/data/linemod/binary_image_train", "/home/wangyida/Desktop/caffe/data/linemod/binary_label_train", "/home/wangyida/Desktop/caffe/data/linemod/leveldb"); + if (argc != 4) { + printf("This script converts the MNIST dataset to the leveldb format used\n" + "by caffe to train a siamese network.\n" + "Usage:\n" + " convert_mnist_data input_image_file input_label_file " + "output_db_file\n" + "The MNIST dataset could be downloaded at\n" + " http://yann.lecun.com/exdb/mnist/\n" + "You should gunzip them after downloading.\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3]); + } return 0; } From 0e9273c771a10e3f43a0e01e5f0bb6b1afc16aea Mon Sep 17 00:00:00 2001 From: Wangyida Date: Fri, 31 Jul 2015 14:08:02 +0800 Subject: [PATCH 31/82] rearrange the training samples selection codes --- examples/triplet/convert_3d_triplet_data.cpp | 35 +++++++++++++++----- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp index 53e81f972ce..6a01e19aa3f 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp +++ b/examples/triplet/convert_3d_triplet_data.cpp @@ -4,12 +4,12 @@ // http://yann.lecun.com/exdb/mnist/ #include // NOLINT(readability/streams) #include +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" #include "glog/logging.h" #include "google/protobuf/text_format.h" #include "leveldb/db.h" #include "stdint.h" -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" uint32_t swap_endian(uint32_t val) { val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); @@ -90,24 +90,41 @@ void convert_dataset(const char* image_filename, const char* label_filename, int k = caffe::caffe_rng_rand() % num_items; int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups int m = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, // read triplet groups + read_image(&image_file, &label_file, i, rows, cols, // read triplet pixels, label_temp, label_i); read_image(&image_file, &label_file, j, rows, cols, pixels + (rows * cols), label_temp, label_j); read_image(&image_file, &label_file, k, rows, cols, pixels + (2 * rows * cols), label_temp, label_k); - read_image(&image_file, &label_file, l, rows, cols, // read pair wise groups + read_image(&image_file, &label_file, l, rows, cols, // read pair wise pixels + (3 * rows * cols), label_temp, label_l); read_image(&image_file, &label_file, m, rows, cols, pixels + (4 * rows * cols), label_temp, label_m); datum.set_data(pixels, 5*rows*cols); // set data - bool pose_pass; - int dist_ij = (int)((*(label_i+1)-*(label_j+1))*(*(label_i+1)-*(label_j+1)) + (*(label_i+2)-*(label_j+2))*(*(label_i+2)-*(label_j+2)) + (*(label_i+3)-*(label_j+3))*(*(label_i+3)-*(label_j+3))); - int dist_ik = (int)((*(label_i+1)-*(label_k+1))*(*(label_i+1)-*(label_k+1)) + (*(label_i+2)-*(label_k+2))*(*(label_i+2)-*(label_k+2)) + (*(label_i+3)-*(label_k+3))*(*(label_i+3)-*(label_k+3))); + bool triplet_class_pass; + bool triplet_class_same; + bool triplet_pose_pass; + bool pair_class_pass; + static_cast ij_x, ij_y, ij_z; + static_cast ik_x, ik_y, ik_z; + ij_x = (*(label_i+1)-*(label_j+1))*(*(label_i+1)-*(label_j+1)); + ij_y = (*(label_i+2)-*(label_j+2))*(*(label_i+2)-*(label_j+2)); + ij_z = (*(label_i+3)-*(label_j+3))*(*(label_i+3)-*(label_j+3)); + ik_x = (*(label_i+1)-*(label_k+1))*(*(label_i+1)-*(label_k+1)); + ik_y = (*(label_i+2)-*(label_k+2))*(*(label_i+2)-*(label_k+2)); + ik_z = (*(label_i+3)-*(label_k+3))*(*(label_i+3)-*(label_k+3)); + static_cast dist_ij = ij_x + ij_y + ij_z; + static_cast dist_ik = ik_x + ik_y + ik_z; if (dist_ij < dist_ik ) - pose_pass = true; - if (((*label_i == *label_j && *label_i != *label_k) || ((*label_i == *label_j && *label_i == *label_k) && pose_pass)) && (*label_l == *label_m)) { + triplet_pose_pass = true; + if ((*label_i == *label_j) && (*label_i != *label_k)) + triplet_class_pass = true; + if ((*label_i == *label_j) && (*label_i == *label_k)) + triplet_class_same = true; + if (*label_l == *label_m) + pair_class_pass = true; + if (( triplet_class_pass || (triplet_class_same && triplet_pose_pass)) && pair_class_pass) { datum.set_label(1); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", itemid); From a327b5c6be02ea1402bd6dda536c3d6297724301 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Fri, 31 Jul 2015 14:23:11 +0800 Subject: [PATCH 32/82] remove static_cast declare --- examples/triplet/convert_3d_triplet_data.cpp | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp index 6a01e19aa3f..5990da5c0e8 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp +++ b/examples/triplet/convert_3d_triplet_data.cpp @@ -106,16 +106,16 @@ void convert_dataset(const char* image_filename, const char* label_filename, bool triplet_class_same; bool triplet_pose_pass; bool pair_class_pass; - static_cast ij_x, ij_y, ij_z; - static_cast ik_x, ik_y, ik_z; - ij_x = (*(label_i+1)-*(label_j+1))*(*(label_i+1)-*(label_j+1)); - ij_y = (*(label_i+2)-*(label_j+2))*(*(label_i+2)-*(label_j+2)); - ij_z = (*(label_i+3)-*(label_j+3))*(*(label_i+3)-*(label_j+3)); - ik_x = (*(label_i+1)-*(label_k+1))*(*(label_i+1)-*(label_k+1)); - ik_y = (*(label_i+2)-*(label_k+2))*(*(label_i+2)-*(label_k+2)); - ik_z = (*(label_i+3)-*(label_k+3))*(*(label_i+3)-*(label_k+3)); - static_cast dist_ij = ij_x + ij_y + ij_z; - static_cast dist_ik = ik_x + ik_y + ik_z; + int ij_x, ij_y, ij_z; + int ik_x, ik_y, ik_z; + ij_x = static_cast(*(label_i+1)-*(label_j+1))*(*(label_i+1)-*(label_j+1)); + ij_y = static_cast(*(label_i+2)-*(label_j+2))*(*(label_i+2)-*(label_j+2)); + ij_z = static_cast(*(label_i+3)-*(label_j+3))*(*(label_i+3)-*(label_j+3)); + ik_x = static_cast(*(label_i+1)-*(label_k+1))*(*(label_i+1)-*(label_k+1)); + ik_y = static_cast(*(label_i+2)-*(label_k+2))*(*(label_i+2)-*(label_k+2)); + ik_z = static_cast(*(label_i+3)-*(label_k+3))*(*(label_i+3)-*(label_k+3)); + int dist_ij = ij_x + ij_y + ij_z; + int dist_ik = ik_x + ik_y + ik_z; if (dist_ij < dist_ik ) triplet_pose_pass = true; if ((*label_i == *label_j) && (*label_i != *label_k)) From 3fa4ba25f929720eaf6aac1260f8add9752c046f Mon Sep 17 00:00:00 2001 From: Wangyida Date: Sat, 1 Aug 2015 11:24:48 +0800 Subject: [PATCH 33/82] modify the data generation condition modify the data generation condition --- examples/triplet/3d_triplet.prototxt | 2 +- examples/triplet/3d_triplet_solver.prototxt | 10 ++--- .../triplet/3d_triplet_train_test.prototxt | 5 +-- examples/triplet/convert_3d_triplet_data.cpp | 42 +++++++++++-------- 4 files changed, 32 insertions(+), 27 deletions(-) diff --git a/examples/triplet/3d_triplet.prototxt b/examples/triplet/3d_triplet.prototxt index 6865d67a6c3..2732b9a0e82 100644 --- a/examples/triplet/3d_triplet.prototxt +++ b/examples/triplet/3d_triplet.prototxt @@ -1,6 +1,6 @@ name: "3d_triplet" input: "data" -input_dim: 276 +input_dim: 2760 input_dim: 1 input_dim: 64 input_dim: 64 diff --git a/examples/triplet/3d_triplet_solver.prototxt b/examples/triplet/3d_triplet_solver.prototxt index d61a6c42f4b..f1dba141539 100644 --- a/examples/triplet/3d_triplet_solver.prototxt +++ b/examples/triplet/3d_triplet_solver.prototxt @@ -2,10 +2,10 @@ net: "examples/triplet/3d_triplet_train_test.prototxt" # test_iter specifies how many forward passes the test should carry out. # In the case of 3d database, we have test batch size 23 and 23 test iterations, -# covering the full 276 testing images. -test_iter: 23 +# covering the full 2760 testing images. +test_iter: 100 # Carry out testing every 500 training iterations. -test_interval: 23 +test_interval: 100 # The base learning rate, momentum and the weight decay of the network. base_lr: 0.01 momentum: 0.9 @@ -17,9 +17,9 @@ power: 0.75 # Display every 100 iterations display: 100 # The maximum number of iterations -max_iter: 1000 +max_iter: 20000 # snapshot intermediate results -snapshot: 200 +snapshot: 2000 snapshot_prefix: "examples/triplet/3d_triplet" # solver mode: CPU or GPU solver_mode: CPU diff --git a/examples/triplet/3d_triplet_train_test.prototxt b/examples/triplet/3d_triplet_train_test.prototxt index bff19047ab8..dc5373dd125 100644 --- a/examples/triplet/3d_triplet_train_test.prototxt +++ b/examples/triplet/3d_triplet_train_test.prototxt @@ -12,7 +12,7 @@ layer { } data_param { source: "examples/triplet/3d_triplet_train_leveldb" - batch_size: 23 + batch_size: 69 } } layer { @@ -28,7 +28,7 @@ layer { } data_param { source: "examples/triplet/3d_triplet_test_leveldb" - batch_size: 23 + batch_size: 69 } } layer { @@ -742,4 +742,3 @@ layer { margin: 1 } } - diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp index 5990da5c0e8..280755fddab 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp +++ b/examples/triplet/convert_3d_triplet_data.cpp @@ -84,7 +84,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (unsigned int itemid = 0; itemid < 10 * num_items; ++itemid) { + for (unsigned int itemid = 0; itemid < 10 * num_items; ++itemid) {\ int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups int j = caffe::caffe_rng_rand() % num_items; int k = caffe::caffe_rng_rand() % num_items; @@ -102,29 +102,36 @@ void convert_dataset(const char* image_filename, const char* label_filename, pixels + (4 * rows * cols), label_temp, label_m); datum.set_data(pixels, 5*rows*cols); // set data - bool triplet_class_pass; - bool triplet_class_same; - bool triplet_pose_pass; - bool pair_class_pass; - int ij_x, ij_y, ij_z; - int ik_x, ik_y, ik_z; - ij_x = static_cast(*(label_i+1)-*(label_j+1))*(*(label_i+1)-*(label_j+1)); - ij_y = static_cast(*(label_i+2)-*(label_j+2))*(*(label_i+2)-*(label_j+2)); - ij_z = static_cast(*(label_i+3)-*(label_j+3))*(*(label_i+3)-*(label_j+3)); - ik_x = static_cast(*(label_i+1)-*(label_k+1))*(*(label_i+1)-*(label_k+1)); - ik_y = static_cast(*(label_i+2)-*(label_k+2))*(*(label_i+2)-*(label_k+2)); - ik_z = static_cast(*(label_i+3)-*(label_k+3))*(*(label_i+3)-*(label_k+3)); + bool triplet_class_pass = false; + bool triplet_class_same = false; + bool triplet_pose_pass = false; + bool pair_class_pass = false; + + int ij_diff_x = static_cast(*(label_i+1)-*(label_j+1)); + int ij_diff_y = static_cast(*(label_i+2)-*(label_j+2)); + int ij_diff_z = static_cast(*(label_i+3)-*(label_j+3)); + int ik_diff_x = static_cast(*(label_i+1)-*(label_k+1)); + int ik_diff_y = static_cast(*(label_i+2)-*(label_k+2)); + int ik_diff_z = static_cast(*(label_i+3)-*(label_k+3)); + + int ij_x = ij_diff_x*ij_diff_x; + int ij_y = ij_diff_y*ij_diff_y; + int ij_z = ij_diff_z*ij_diff_z; + int ik_x = ik_diff_x*ik_diff_x; + int ik_y = ik_diff_y*ik_diff_y; + int ik_z = ik_diff_z*ik_diff_z; + int dist_ij = ij_x + ij_y + ij_z; int dist_ik = ik_x + ik_y + ik_z; - if (dist_ij < dist_ik ) + if ((*label_i == *label_j) && (*label_i == *label_k)) + triplet_class_same = true; + if ((dist_ij < dist_ik) && (triplet_class_same)) triplet_pose_pass = true; if ((*label_i == *label_j) && (*label_i != *label_k)) triplet_class_pass = true; - if ((*label_i == *label_j) && (*label_i == *label_k)) - triplet_class_same = true; if (*label_l == *label_m) pair_class_pass = true; - if (( triplet_class_pass || (triplet_class_same && triplet_pose_pass)) && pair_class_pass) { + if ((triplet_class_pass || triplet_pose_pass) && pair_class_pass) { datum.set_label(1); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", itemid); @@ -154,4 +161,3 @@ int main(int argc, char** argv) { } return 0; } - From 03e16df9a8836a3c48539a8eb1bfc76fd02e3914 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Sun, 2 Aug 2015 10:36:54 +0800 Subject: [PATCH 34/82] the soft style triplet loss as added in triplet loss layer as an option the soft style triplet loss as added in triplet loss layer as an option --- .../triplet/3d_triplet_train_test.prototxt | 1 + examples/triplet/mnist_siamese.ipynb | 154 ------------------ examples/triplet/readme.md | 131 +++++++-------- src/caffe/layers/triplet_loss_layer.cpp | 109 ++++++++++++- src/caffe/proto/caffe.proto | 1 + 5 files changed, 176 insertions(+), 220 deletions(-) delete mode 100644 examples/triplet/mnist_siamese.ipynb diff --git a/examples/triplet/3d_triplet_train_test.prototxt b/examples/triplet/3d_triplet_train_test.prototxt index dc5373dd125..db9b786e110 100644 --- a/examples/triplet/3d_triplet_train_test.prototxt +++ b/examples/triplet/3d_triplet_train_test.prototxt @@ -740,5 +740,6 @@ layer { top: "loss" triplet_loss_param { margin: 1 + losstype: 1 } } diff --git a/examples/triplet/mnist_siamese.ipynb b/examples/triplet/mnist_siamese.ipynb deleted file mode 100644 index 8e076663ca6..00000000000 --- a/examples/triplet/mnist_siamese.ipynb +++ /dev/null @@ -1,154 +0,0 @@ -{ - "metadata": { - "description": "Extracting features and plotting the Siamese network embedding.", - "example_name": "Siamese network embedding", - "include_in_docs": true, - "priority": 6, - "signature": "sha256:845bb18929f96543ba2611eb5eca744fd98939cbef876df6bc319c29f616fc64" - }, - "nbformat": 3, - "nbformat_minor": 0, - "worksheets": [ - { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Setup\n", - "\n", - "Import Caffe and the usual modules." - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "import numpy as np\n", - "import matplotlib.pyplot as plt\n", - "%matplotlib inline\n", - "\n", - "# Make sure that caffe is on the python path:\n", - "caffe_root = '../../' # this file is expected to be in {caffe_root}/examples/siamese\n", - "import sys\n", - "sys.path.insert(0, caffe_root + 'python')\n", - "\n", - "import caffe" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 1 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Load the trained net\n", - "\n", - "Load the model definition and weights and set to CPU mode TEST phase computation with input scaling." - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "MODEL_FILE = 'mnist_siamese.prototxt'\n", - "# decrease if you want to preview during training\n", - "PRETRAINED_FILE = 'mnist_siamese_iter_50000.caffemodel' \n", - "caffe.set_mode_cpu()\n", - "net = caffe.Net(MODEL_FILE, PRETRAINED_FILE, caffe.TEST)" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 2 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Load some MNIST test data" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "TEST_DATA_FILE = '../../data/mnist/t10k-images-idx3-ubyte'\n", - "TEST_LABEL_FILE = '../../data/mnist/t10k-labels-idx1-ubyte'\n", - "n = 10000\n", - "\n", - "with open(TEST_DATA_FILE, 'rb') as f:\n", - " f.read(16) # skip the header\n", - " raw_data = np.fromstring(f.read(n * 28*28), dtype=np.uint8)\n", - "\n", - "with open(TEST_LABEL_FILE, 'rb') as f:\n", - " f.read(8) # skip the header\n", - " labels = np.fromstring(f.read(n), dtype=np.uint8)" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 3 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Generate the Siamese features" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "# reshape and preprocess\n", - "caffe_in = raw_data.reshape(n, 1, 28, 28) * 0.00390625 # manually scale data instead of using `caffe.io.Transformer`\n", - "out = net.forward_all(data=caffe_in)" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 4 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Visualize the learned Siamese embedding" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "feat = out['feat']\n", - "f = plt.figure(figsize=(16,9))\n", - "c = ['#ff0000', '#ffff00', '#00ff00', '#00ffff', '#0000ff', \n", - " '#ff00ff', '#990000', '#999900', '#009900', '#009999']\n", - "for i in range(10):\n", - " plt.plot(feat[labels==i,0].flatten(), feat[labels==i,1].flatten(), '.', c=c[i])\n", - "plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])\n", - "plt.grid()\n", - "plt.show()" - ], - "language": "python", - "metadata": {}, - "outputs": [ - { - "metadata": {}, - "output_type": "display_data", - "png": "iVBORw0KGgoAAAANSUhEUgAAA54AAAIXCAYAAAD0R4FDAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXtwXOWZr/usvurWUktqGdmxaawEHEMuthGXITiIyMaJ\nwbEMFmCTDMkkoyqSyTnZMwdqpmYyzEyS2ruKue2ZqSTHO/vYGQbhCxdjwI637ViWMEEEMJhgB4MB\ngSRLsizJkiypuyX1+WP1Wlp971YvSd3y+1S5rF69Lt/6+lOrf/2+v/dVgsEggiAIgiAIgiAIgjBT\nWOZ6AIIgCIIgCIIgCML8RoSnIAiCIAiCIAiCMKOI8BQEQRAEQRAEQRBmFBGegiAIgiAIgiAIwowi\nwlMQBEEQBEEQBEGYUUR4CoIgCIIgCIIgCDNKRsJTUZQ8RVFaFUV5U1GUU4qi/HezBiYIgiAIgiAI\ngiDMD5RM+3gqilIQDAZHFEWxAS8B/08wGHzJlNEJgiAIgiAIgiAIOU/GqbbBYHAk9KMDsAJ9mZ5T\nEARBEARBEARBmD9kLDwVRbEoivIm0A0cDQaDpzIfliAIgiAIgiAIgjBfMCPiORkMBlcAi4EvK4pS\nk/GoBEEQBEEQBEEQhHmDzawTBYPBi4qivAhUA03adkVRMjORCoIgCIIgCIIgCFlNMBhUEj2fkfBU\nFMUDjAeDwQFFUfKBtcDfxxhEJpcRhDC+9a1vsWPHjrkehjCPkDUlmImsJ8FsZE0JZiNrSjAbRUmo\nOYHMI54LgV8pimJBTdt9PBgMHsnwnIIgCIIgCIIgCMI8IiPhGQwG3wZWmTQWQUiJq666aq6HIMwz\nZE0JZiLrSTAbWVOC2ciaEuaCjIsLCcJsU1NTM9dDEOYZsqYEM5H1JJiNrCnBbGRNCXOBCE9BEARB\nEARBEARhRjGtqq0gCIIgCIIgCIIQTSrFd3KF6RaOVWa64qyiKEGpaisIgiAIgiAIwuWKoijzotNH\nvPsIbU+oriXVVhAEQRAEQRAEQZhRRHgKOUdTU9NcD0GYZ8iaEsxE1pNgNrKmBLORNSXMBSI8BUEQ\nBEEQBEEQhBlFPJ6CIAiCIAiCIAgziHg8JeIpCIIgCIIgCIJwWdPX18emTZsoKiriqquu4sknnzT9\nGiI8hZxDfAmC2ciaEsxE1pNgNrKmBLORNSVE8v3vf5+8vDx6enp44okneOihhzh16pSp1xDhKQiC\nIAiCIAiCcJly6dIlnnnmGX784x9TUFDAl770JTZu3Mjjjz9u6nXE4ykIgiAIgiAIgjCDJPV4NjTA\nmTNQUACNjeB2p3eBDI4/ceIEt956K5cuXdK3/fM//zNNTU3s27cvpfsQj6cgCIIgCIIgCEK2c+YM\nHDsGBw6oInIWjx8eHqa4uDhsm8vlYmhoKP1xJECEp5BziC9BMBtZU4KZyHoSzEbWlGA2sqaykIIC\n9f/qati2bVaPLyoqYnBwMGzbxYsXcblc6Y8jASI8BUEQBEEQBEEQ5pLGRqivh0OH0k+zzfD4a665\nhvHxcd5//31921tvvcXnPve59MeRAPF4CoIgCIIgCIIgzCDZ3sdzy5YtKIrCL3/5S9544w3uuusu\nfvvb37J8+fKw/cTjKQiCIAiCIAiCIEyLn/3sZ4yOjrJgwQK+8Y1v8Itf/CJKdGaKCE8h5xBfgmA2\nsqYEM5H1JJiNrCnBbGRNCZGUlpby7LPPMjw8zEcffcT9999v+jVEeAqCIAiCIAiCIAgzing8BUEQ\nBEEQBEEQZpBs93iming8BUEQBEEQBEEQhKxFhKeQc4gvQTAbWVOCmch6EsxG1pRgNrKmhLlAhKcg\nCIIgCIIgCIIwo4jHUxAEQRAEQRAEYQYRj6dEPAVBEARBEARBEIQZRoSnkHOIL0EwG1lTgpnIehLM\nRtaUYDaypoS5QISnIAiCIAiCIAiCMKOIx1MQBEEQBEEQBGEGyWaP53/8x3+wY8cOfv/737Nlyxa2\nb98ed99MPJ62zIcqCIIgCIIgCIIg5CKf+tSn+NGPfsTBgwcZHR2dsetIqq2Qc4gvQTAbWVOCmch6\nEsxG1pRgNrKmBCObNm1i48aNlJeXz+h1RHgKgiAIgiAIgiDMKQ1ADbAeGJiD45nxVGDxeAqCIAiC\nIAiCIMwgyT2eNcCx0M/1wO40r5Dp8fCjH/2I9vZ28XgKgiAIgiBoNDc3MDBwBputgNraRpxO91wP\nSRAEIQMKQv9XA9vm4PiZj3hKqq2Qc4gvQTAbWVOCmch6mh0GBs7Q1XWM9vYDtLQ0zPVwZhRZU4LZ\nyJrKRhpRI5WHgOl8kZbp8WrUciaRiKcgCIIgCDmHzaZ+u+/xVLN69fS+3RcEQcge3EwnPdaM4ycm\nJggEAoyPjzMxMYHP58Nms2G1WjMYTzTi8RQEQRAEIefw+QZoaWlg9eptkmYrCELWk819PP/u7/6O\nf/iHf4ja9rd/+7dR+2bi8RThKQiCIAiCIAiCMINks/BMh0yEp3g8hZxDfAmC2ciaEswkcj01Nzew\nb18N+/evx+ebXon7TMmGMQjTR96jBLORNSXMBSI8BUEQBGEGyYYiONkwBkEQBOHyRlJtBUEQBGEG\n2b9/Pe3tB/B4qrnzzkNz4kfMhjEIgiBczkiqrQhPQRAEQZhRsqEITjaMQRAE4XJGhKek2go5iPgS\nBLORNSWYSeR6cjrdrFmze04FnxljEJ/o3CHvUYLZyJoS5gLp4ykIgiAIOUZzcwMDA2ew2QqorW2c\nFVGr+UQBWloaWLMmk35zU8zFvQiCIAizj6TaCoIgCEKOsW9fjS4Cq6rqTROBiUjHJ5qOmJyLexEE\nQZhtJNVWUm0FQRAEIeew2QoA8HiqWb16W9hzM5USW1vbSFVVfUrFidKpopvoXgRBEIT5gwhPIecQ\nX4JgNrKmBDOZjfWUSATOVOuUdHyi6YjJdATt5Yq8RwlmI2tKmAtEeAqCIAhCDmCMZAJxRWA2RBDz\n8ytwOj0pCclsKL4kCIJwOeP3+/nOd77DVVddRXFxMStXruTXv/616dcRj6cgCIJw2ZMLBW5S9UJm\nQ+sU8W0KgiCEk80ez5GRER577DG+/e1vc+WVV/Liiy+yZcsW3n77bbxeb9i+mXg8paqtIAiCcNkz\nUxVbzSTVSKYWQYyFWQI72XmyIeqqkQtfKgiCIMwlBQUFPProo/rjO++8k6VLl/LGG29ECc9MkFRb\nIecQX4JgNrKmBDOF0kytJzO8kGb5P5OdJ5t8mzPleZ1N5D1KMBtZU9lHAw3UUMN61jNA+oXhMj3e\nSHd3N2fOnOG6667L6DyRSMRTEARBuOyprW2c8/TUZLS2PsLISA9HjmxNO3KnRf36+98BUhPYiSKF\nyYR6oqhrJuza9VlGRrqwWOzcffdruFzJv4nPpuirIAhCPM5whmOomTcNNLCb9N5DMz1eIxAI8MAD\nD/Ctb32La665ZlrniId4PAVBEAQhy4gl+tLxTUYef/BgnX5sYeFiNm9+O6lwTXS9ufKRbt/uJhC4\nCKj38cADnyQ9Jhs8r4IgCMk8nutZzwEOUE01hziEm/TerzI9HmBycpKtW7cyPDzMc889h9VqTfk+\nxOMpCIIgCFlMvKhiLM9prMhdqscbj001/TVRpHCmIprJsFjsAFitBXz96y/F3U+bl6GhsxQWenE4\nimdriIIgCNOikUYaaGAb26YlGjM9PhgM8p3vfIfz58+zf//+mKIzU8TjKeQc4ksQzEbWlADh7Up8\nvun7Y9JZT/H8h4ODZ0M/WRkd7cHnG4jpm4x3fKRonI7nMpt8mhp33/0ahYWLuffeUwnTbLV5uXSp\nnZ6e4znt7wR5jxLMR9ZU9uHGzW52T0s0mnH8Qw89xB/+8Af27duH0+mc1jmSIcJTEARBuGxIJC7n\nogiNJjDt9mJuuukxffvkpD/00wTnzh2jpaUhZr/LeFHJSNE4nV6Z2dhf0+Xy8sADnyT1dmrzYreX\nAOLvFARBSERbWxvbtm3jrbfeorKyEpfLhcvl4sknnzT1OuLxFARBEC4bEvkW9+9fT3v7gbTSUdNF\nSwEdHDxLMBhgdLQXmIgaz44dZfj9/YAqnrZu/SjmeDL1L2baaiRbW5Vo83LTTY/R2vqw+DsFQZhz\nsrmPZzpk4vEU4SkIgiBcNiQSl7NRhMYofI1YLA48nhtwOIqprW3k0KF6OjsPY7eXsGTJ1xgZOYfN\nVsDg4HuMjp5PqaprKqIwlhBvbm6gre15JiZ8eDzXs3btnrjzkU7BI0EQhMsZEZ6SaivkIOJLEMxG\n1tTlQyLfolmppYnW01QK6FSxG4fDTVnZyjAv4tq1e6iqqmfr1o84d65JTwEeHPyIQOAiPl8vu3Zd\nE9eP2tzcwNmzu5OmDsdK1R0YOMPoaBd+fz+dnYdpaWngf/0vB9u2KWzbZuHcuZcSHp8uZnlr5zPy\nHiWYjawpYS4Q4SkIgiBcNqQjLmdCEGnCd/Pmk3i9G/F669iy5UPy8soAVcBZrfns3r2c9vbDHDpU\nz8TEmOEM4/pPk5P+uKJyYOCM3nYELHz88UF+9asKhobawu7twoW3cDrLcTiifaMAZWUrWL16G8Fg\nILQlyPPP36afIxAYJD+/krVrn5q2YJ8Lb60gCIIw+0iqrSAIgpDVzJWPcDbTSI8efZCPP96Px7OC\nCxdOMjbWoz+nKHaD8FOxWBxMTvqx20vYvPktPeVWm6uenleYnPShKDYsljwmJoaBqd6XsVJ+tXv0\n+QZoavo2EKSmZgdOp5tt2yyA+rd8w4YWFi681bT5mQ1vrSAIwlwjqbbSx1MQBEHIcmL1tJwNUk0j\nzUQYa8f29Z3E7++no+MwimIP20cVnQqa8AO1yTdAIHCRZ56ppqLiBmprG8PmSj12XK+Qa7UWYLE4\n2L7dzfj4SNg1HI5SrNZ89u2riXkfGzY08/zzt7FhwzEWLrw1rflJRm1t44x7awVBEIS5RyKeQs7R\n1NRETU3NXA9DmEfImspuzI6IpSoUUy02ZIz8uVxL+eCDYlauXER+vprammpxH1BFnN1eyLlz0QWI\nNCyWPK644ibOnTuGzVbE+LgazayqqsfvH6a9/YC+r93uprj40/T3/55Nm15l374vG1JwVRTFxt13\nv87x4/+XPpaioqUUFV2ZcOyx5idbq9zmOvIeJZiNrKnZRyKe4vEUBEEQspxEBYESEc+jmaqnMFU/\nqDHyV1CwiL6+t2hvP8DHHx9IubhPWdkKvN46Skuvpb//dNR+ZWVf4FOfWktBwSKuuqqOyclx8vMr\nqai4PnSeIsbG+lm9+udhxwUCA1y48DqTkz5eeGGNIdJpwWp1AWpU9MUX12G1qpFWRbExPNyWdOxO\npxuHw83Bg3X6HItfUxAEQYiHRDwFQRCEeYkxmpifX8m9957G6XSbHkE1Rv6OHNmqn9vhcNPZeRib\nrYgrrriZNWvUtiS7dn2WkZEuLBY7d911hBMnfqJHDR9/fCGjo11h53c4SrHZ8pmcHGdyMkAgMGzw\nfNqwWCx6Oq3XW0db296oMRojo/HwejfS1fUyPt/5sGtv2fJBVERzaOgsExMBxsbC+5BqEdd05lai\npIIgXA5IxFOEpyAIgjBP0QSmhrF4jlmeQqNoys+v4OLFs1y48DplZV/Ebndx4cIJfL4LAOTlVeJw\nuBgcfE8/vrBwMUuWfE0/R0/Pb/H7p19B1+vdSFvb88Ckvq2gYBFu97V0dh4O29fpLMfvv0gwOE5Z\n2Qo2bDiqC2dQ27zcc8+bnDjxU318vb2vhxU+0tAEKpD23EovUEEQLgdEeIrwFHIQ8SUIZiNran7i\n8w2we/dyRke7oiJwZkXZjKLJ6fTg8/Xy7ruwbJn6fH5+ZVgEU1FsBINTLVEWLryNiYlxenqOT/Mu\nw1Er4E6iRSGt1gIqK2/hy1/+Jbt3f5aJiTHsdheVlas5f/41XUTabIWhCrg2PJ5V2GyF9Pf/ntHR\n84yPXzKMObzIkXbNpUvvYWTk3LTmczaq2uZ6VFXeowSzkTU1+2S78PzGN77BkSNHuHTpEh6Ph+98\n5zv89V//ddR+4vEUBEEQhAicTjf33ns6pj/ULC+i0d/p8awIe87hKGXhwhq9Sm1Z2QocjpKwfc6d\nO0ZPz8sJrpDen2k1BVcVnYpiZ2JihI6Ow7S2Poz2OSEQGAJgcnKqRcv4+BiBwEV8vgt0dBzmllv+\nldHR8wQCF8OEcqTotNuLuf/+9xgZOTft+dQ8vKWl14b5Rc1EvKeCIAiJ+au/+is+/PBDBgcHOXDg\nAP/+7//Or3/9a1OvIcJTyDnkGzrBbGRNzV/iFQjKtBWIVrgoGAxQVOTFanUCasXZZcvUyOaGDU2M\njJzT/ZiXLn1CWdnnKChYRH5+ZehMViLF3BQKxpRZu70ktC05ijLVLc1ud3HTTY8xOTmmb+vtfQOP\nRy1MVFa2ImJ+guzbdysWiyqYrdYCnE4PAOXlK1myZD1LlqynqMhLWdnnaWl5SN83cj7jFXgyor1G\nQ0PJCxpNF7Nav8wV8h4lmI2sKSGS6667jry8PP2xzWZjwYIFpl5DhKcgCIJw2THdSrkaWgSto+Mw\ngcAluruP09FxGLu9EFArxf6f/1NHX99J/Rif7wLnzh2jouIGios/Hdo6keAqU4JUUewsWfI1Uv2z\nHQyO64I3EBjihRduD3ve41nF2rV7qKqqZ8OGo9x99+toolZRrIyP+5icDKAoDurqfsv9979HUdFS\nrNYC+vpO4vNdxO8fpLv7OO3tB7DbizKOLM+kOMz09RYEQZhpGoAaYD0wnZyPTI8H+N73vkdhYSHX\nXXcdf/M3f8OqVaumeabYiMdTyDnElyCYjawpIV2MvkSn001Hx2G9ku3Ro4f50peqsVqddHdHezeX\nLFnPhQsnGRlpT+laDoebxYu/yocfPm2oZps6dnsJZWWf08eiKHYqKm7k/PlXCAaDKIqVu+9+DYej\nhH37bqWg4FOcP9+qH+90VlBRUY3fPxjTi5rIm5mOfzPdok/NzQ20tT3PxIQPj+d61q7dM29FpbxH\nCWYja2r2SebxrAG0Ds71QLpl1jI9XiMYDHLs2DE2b97M/v37ufHGG8Oez8TjaUv0pCAIgiBc7kRW\nrh0aasNqteP1bqSmZgcwVckV4N1367jzzr0cObI15vk6OtQWK6mgKDbKy79IZ+dvpiU6QSEYnKSn\n51VA9Z0WF18TJiCDwUmefnolDz54gQce+IT9+9eHHe/znae9/QB5eZVEUlCwKGHRptraRp55ZhVW\nq5MjR7YmLOyjpdymysDAGb1wU2fnYVpaGnA43DldREgQhMuXgtD/1cB0cj4yPV5DURRqamqor6/n\nySefjBKemSART0EQBEEwoImnwcGzuFxe+vtP4ff3A1rVWFUAxmr9YRReq1f/nGefvZmxsa6oa4Rj\nBSawWBx6P04zyMtbwPj4GOPjg/p1Fiy4md7e15ic9EWPwppPefkqbLZ8rFYH3d2t+P1qKxiHo5R7\n7jnBM8/coPf5dDjcbNnyYZi4i9UaxbitqGgpRUVXRgnDVKrORu5jbP2itYM5eLBOWrMIgpCVJIt4\nDqCmy24DpvOVWabHR/Ld736XyspKfvKTn4Rtl6q2giAIgmASmi9xZKSd7u7juugEdNEZz4do9DS2\ntj7Mffed1gvzxGPDhiZcrqVYrXkJ90uXsbEeg+i0UF6+gp6e4zFFJ8DExCg9Pcfp7DxMd/crKIr6\n+cFicVBScg0tLQ9RXv5FQBOib6ZUtMm4raBgUUzPZype0Mh9amsb8Xrr8Ho3smHDUZxOd84XERLm\nK2a474T5jhs1PXa6ojGT48+fP8/OnTu5dOkSExMTHDx4kD179rBx48ZpjiY2IjyFnKOpqWmuhyDM\nM2RNXd5EVl7VxIvdXhy1b1nZCrzeurh+RZutgHffnRI+TqebioobEl7/nXf+jcLCKwkEBhPulxmT\n9PefSnlvv78Pn68Xi8VJeflKzp9v1YsIuVxLKS29lpaWh6Iq1cYq4mPc5nCocxopDCMFo/aaPPHE\nEvbuvZX9+9dHVc51Ot2sW/cs69btjXmt+ZRmK+9Ruc4ZVPfdAVQROvfImhKMKIrCL37xCxYvXkx5\neTk/+tGPePzxx7nhhsR/v9JFPJ6CIAhCzpFKamaqaJE0QI+ktbQ0cNNNj7F793ImJkax211UVq7m\nK195Iupakem1p08/SGmpl4MH6/Rte/fezOhoFzZbEePjw/qximJncPBjhobOTnv8qRLej1NDTfMF\nsNkKGR+/hN3uMvT69DE01AaA0+lhdPQ8Pt8AQ0MfAvDMM6soLAxPnXU43Pq9a55YTVhqcxtZQChy\nu/E1uXRJLcLk9dZRVLQUiyW+XzRdn6ggzA5mue8EYWbweDyz8mVERh5PRVGWAP8JLECt+74tGAz+\nW8Q+4vEUBEEQTCWWl3C6JKq8unfvrXohHqfTQ0XFDdTWNtLa+oguNgOBQb1irOZh7Os7qafoVlXV\nY7Xm8/HHBwgEBuOmuloszrjPzSRWax6LF3+VW275V1pbH+ammx4LE8oVFdczOPghPt+AIXUXbLYi\nJiZ8evqx11vHunXPhr02TmeF7gnNy6vkvvtOh81dvC8NtNfEbi8hELiovzbi4RRyE7Pdd0Iukszj\nmSvMZVXbAPDfgsHgm4qiFAGvK4pyKBgMns7wvIIgCIIQFzO9fPn5FTidnpgCSEsNtdmK8Pl6dX/h\nyEiPLoDy8yv1sVgsTn07qGJ1eLiTgYFTYV7RWMy86JyKbhqpqLiJmprtYdHCe+89zeOPVzI+Psy5\nc8dwOsvDRCcQFrkF9MJIxtfG4XDT2XkYgLGxrqi5i1eJ1hh1bm19GKs1n927lzM21guoKc/i4RRy\nB819l000AM8DPuB6YA8iioWZJiOPZzAY7AoGg2+Gfh4GTgOLzBiYIMRDfAmC2ciayj3M9PINDbXh\n8/XS0XE4qrCNdp0rrrgZmBK6mrjS2qIoioOLF9/j4sU/8O67YLUWkpe3AL9/iJ6e44aquOr3vQ6H\nO2nRIQ2bzWV4pKAKyPQxFt8x0tV1TL9vzVt55MhWrNb8qasqqV/T+NqsXbtHb8MSOXfaY2PRoJ07\nr9HbuaxZsxuXy8uaNbsZGmpjdLRLTxd2ua6aVx7OZMh7lGA2TU2vAl1AP3CYbPGeCvMb0zyeiqJc\nBawEWhPvKQiCIAiZYaaXL1H0VLuOzzcQ5kHMz69AUWyMjw/rkb9AQI34KYqViYlLTExcirpWMDiO\nzVaA232d7pNMRFnZ9fT1nTCegVhRy1Tw+S7E3K4oNnp732T7dneowJGaQuVweEJjWMG6dXtpbX2Y\njo7f4PNdwGrNZ9Gi22lvP6Sn2p4//zt8voGo1+a++06HzV2kn9Mo4rWeoTt3XkNFRbUeATV6YMvK\nvkBNzfZpzYEgCBpOw88rEe+pMBuY0sczlGbbBPwkGAzujXgu+OCDD3LVVVcB4Ha7WbFiBTU1NcDU\nt3jyWB7LY3ksj+XxXDz+oz9aQUtLA5OTf4zDURT1vMXSyMDAGVpb3yE/v5JVq5YQCAzS3Kz6Opct\nA4B331X//+IXFzA21kNX1zWMjHRSVTWsP2+x5HH11T4gqO8febz2+IMPigkEBuM+b9bjlSsXMTLS\nzbvvToQ9399/EzZbHn/2Z2rV2KamJkZGuujvf5ivf/0lXn/9Q158cS1XXz2un2/Rotv4i79omtb8\n/+53Z+ntfYNlyyzAZNj59u69lZYWdb7vuGMj69btzZr1I4/ny+O7gHZqahYBjTQ1vZll44v1+B+p\nqRkGCmhq+h4Q/f4V//ELwP+gpqYC2J4j95vbj2+//fZ54/E8evQob775JgMDamXzjz76iF/96ldJ\nPZ4ZC09FUezAC8CBYDD4rzGel+JCgiAIQk7S3NzA2bO7CQQuhm3Pz69kdLQLh6OU0tJr9eJCpaWf\nZ3z8UiiaacHjqaav74Tuf0wVq1Ut6mP0i84csb2fDkcpixevY2TkXNxCQGqU9GJofzdbtnw47RRY\nn2+AnTuv0YsRORylbNnyAU6nO2EBqHQwsxqyMF9oQG13chI17RSgnuzzZMaiBrVNC+TOmC9fpLhQ\nhh5PRe0u/b+BU7FEpyDMBNq3SIJgFrKmhHgMDJzRhZXmz/R4qqmre4Wqqnq2bPmAr371BbzeOgoL\nr2R0tIfXXvsALSW2t7cVu10VN1N9QRP+XQZgYmKYrq7mNEdr0ceYHrFTd/3+fj74YLfuv2xq+nbU\nPgsWqD3eHA4399zzZlwhF9krNRZq2q2W/qdQXFzFkSNb8fkGTPP0Gv2kkX7ebEbeo2YSrcemJjpz\nqeXJ9Nu0TG9NNaCK3fWolXoFIT0yEp7Al4BvALcrinIi9O+rJoxLEARBEOYEo0gaHHwPUEXnXXcd\n1cWPVvTG6XTjdLpZt+5ZJif9jI11R51vwYJqqqrq2bz5JFVV9TgcpWHPxxeL6XwzbmHLlg/Iz1+Q\nxjGxiBTFxsfh42lubiAQGAWsTE5O8Mwz1XrPz0hSFXyFhV79Wr29r+v7a77RTCOUZlZDFuYLmnhb\nAdQBh8id6q6NqJHO2RqzJtIPIMWIhOlgiscz4QUk1VYQBEGYYcxMoQzvQ1luKMqjsHDhl7njjr0x\nz//LXzqjUmq1VNzh4TYKC704HMV0d78clbprJP1+nqrodLm8YX1H06WsbAWjo12MjnZFPWe3u9i8\n+W1OnPgpbW3P4/P1MTk5QWS0tLBwMQ888EnU8ammyk7171T9rZmm1kYSWSRKEOa+x6aW6luAKiTN\nHoOZ51+PKjqryS2Bnh1Iqm3mEU9BEARBmBOMkcm+vlOmpVAao2Iez0rDM0HOnTsW9/zGViWKYiMv\nbwElJdfQ3X2cS5fa6ek5Tnv7AaxWZ8zjVSw4naWk2jJFUWxcdVUdR48+yP7967HZ8pMfFPGn32Yr\nZMmS9ZSXf0Hvk2l8DiAQGKK19WEGBs4wOtoVEtjhotNqLeDrX38p5hXVXqkVOByJP6hqKbVadDhS\ndKaSspsIsyKnwnxC67E5V2vCGEW8mvTTWJOlv5oZpZztCKswF7z33nvk5eXxzW9+0/Rzi/AUcg7x\nughmI2u1e9m8AAAgAElEQVQqNzGmbw4Oqu02pptCaRQ0q1f/XBc9a9bsQVEc+n6lpZ9n9eptMQVQ\nRUU1AO+/n0cwOM7YWA+9vW8AasQQ1JYhbvdyLJZ44nMyFHFM1jJFFbb33/8+Y2MX9Hm4cOFE2Hhj\noSjhf/rHxy/R0XGE999v1Ptkqvs59HtSW530Y7Xaw46120vYsKGFwsLF3HvvKVwuL7FQe6Wep7Mz\nvFdq5DxqwtCYymwkVz2amZJb71HiA0wP7QurIqCX9AWiUViuInruY/tAp7em5lqkC7PB97//fW68\n8UbUUj7mYlofT0EQBEGYTYyRybVrn6K19eGUUygjU3M1QQPQ2vpwWB/KpUvv5sMPn8HhKOarX30e\np9Mdtn9LSwMOh5tAYJS8vEqCwT79WK3HpcXixOFw4PerIlEtCK+hkJ6fE0AVti+//H9H9MGM3avT\niCoup65ptRYwMTESYz8/Docbp9ODz9dLR8dhFMXOpz61FovFjsVip6ZmB06nmwce+CRhunM8b2Xk\nPCbrzTpdj6ZUs51NNCEEqoCajUqrM52uaiaRY20MbesHDqMKxHxUAXkW8ALFxL8vTVh6gAuA1h94\nOXDacP65SiUWcomdO3dSWlrKtddey/vvv2/6+SXiKeQcWk8kQTCLy3VNZZq2ONcYK52eOPFTRkZ6\n9Cqoye4tMnKWSNCMjJwjGPTj8/XS2vowEC2ABgbO0NNznLGxLq65ZjLqej5fLxbLlNjUBGnoERZL\n4ihlOMY/3Qr5+RVYLE7Gx0dTOtpmK+aee97EYsnH4SiLm57rcJRSU7ODioobwsbd3/8OX/vai+Tn\nL+DgwTp9jo1zunPnNWFzH68qbbpCcrrVbXM9Uppb71HTr7Q6fXKp6M3zTI3128AjQE/oOa24UVto\nn3bgOInvS0t/tQCDhu1doWNiRymj15REqueahgaoqYH162FgGi9BpscPDg7y6KOP8i//8i8z5kUV\n4SkIwmVBQ3MzNfv2sX7/fgZ86RRumb9k84fxVNtvaOmYkfeiPf7v7eXcuPP/jXrdjYLHas3H7x/E\nas1DUay6eI21ryaOIgWQcR+HowQARbGiJRaVl69k06ZXyM+vDJ3VmMJkxWo1ir/o9Can04PDUUpe\n3gKuuOKPQtvK6e5+mfffbwwVI0qWnhu6mtVJUdGVLFhwI35/X8woqcNRyj33nMDpdFNb2xj2XHn5\nCiC+eFcjr+fD1lWkt1J7fScnA3i9dSkLyel6NKWa7WwyFz7AuRC706EBVRBq+JkSzYcBO+qcafej\nfVlVAjwWca7PAg6gAlW49kc8n+5c5JJ4n5+cOQPHjsGBA6qInO3jf/SjH/Hd736XRYsWzUiaLYjw\nFHKQ3PK6CNnCmYEBjnV1caC9nYaWlrDnLtc1lc0fxtMVxUbRMzY25UXss32ak75S/XXXBE8wGMDr\n3ciddx5iaKiNnp7jTEyMcf58a9Q1a2sbcbmWYrU6dVEaKYCMQrSi4j8oLFxMeXk1oHomh4ba2Lfv\n1lC7kMjU2gm9yq0qQKMLC/l8vfj9/YyN9dDd/dvQtgHGxnrCfJnxcDrLDec6T1PTt8KKIRlRFDsV\nFdfrAtrpdLNw4W2A6nHNy/Owb18N/f3vAFPrR5uDBQtu1rdbrfkxv0DQXt/OzsNYrfYZT301qw/o\nXJFb71Fz4QPMlaI3ZyIe24ktmrX7WRV6fBG4nfCIZBcQQH2POUb4e8oiEs9FA01NK0jFCyrMHgWh\nl6C6GrZN4yXI5Pg333yTI0eO8MMf/hBAIp6CIAiZUGBTI0/VHg/bVq+e49FkB9n8YTxSFCeLgKq+\nvQrGx4fp7DyMzVZIVVU9i69QP7hpr7smeDo6DmO1OsKilXZ7cdg1NZxON+Pjo3R3q1Vpn3zy01Hj\nMArRgoJKHnjgE/LyykL3UoTf38elS+309rYS6ee0Wl36ddUiRPGFpM1WBGipvKlFODdsaGFiIhCx\nVdFf/8g+osFggI6O8CJAd9yxl6qqejyelXz00XN0dR3D5+ulsHCxvn60OVi7do++roaG2vQvEHbv\nXq7P2Wx/6SHVbLMZM1I8s7nojfH+jN7uzwE7mBKZF4Ey1C+mqlAjnGWhfatRxaQxImk81xeAL4V+\nXgGsQU3bjTWnDaHrvhU617LQPrki3ucvjY1QXw+HDoF7Gi9BJscfO3aMjz76iCuvvJKFCxfyT//0\nTzz99NNUV1enP5AESB9PQRBynobmZs4MDFBgs9FYW4vbGV0xdMDno6GlhW2rV8d8XsguIvstGntr\nVlXVxyxCE6tXZOTrHmsf7Vo33fRYVIEirShNd/fxqMhiVVU9Doc7btEa7bw+X79emEf1dlpRRaON\ngoIrmJjw4/cPUFl5C11dL0f4P9V9S0quxe2+hkBAFdbxsNmKGB8fDtvmci1laOjDsG2LFq1h7do9\nUXOrEa9/ZuS+DkcZ99zzBi6XV5+roaGzFBZ6GR5uY3x8GL9/6oOv9tql0k9TCgLlItMp8lPDVDGi\nemanGNFsUsPU/S1AFZG/B5agFg2qQPV0vkT4l0mLgHdQo56LgNcAX+iYk6F9bkEVmk+EHmtFhOoM\n16xELTKkvRbG8WjMx3nPPrK5j+fo6ChDQ0OAGu38x3/8Rz766CN+8YtfUF5eHrZvJn08RXgKgpDz\n1Ozbx7Eu1TdTX1XF7jVr5nhEgtnEEoyRpCJmUtnHSKTQsttdBAJD+jgOHqxLKIibmxvo6zvF4OBZ\nCgs/xYULrwNgsThYuPDLnDt3nMnJ5EWBvN6NrFu3F59vgJ07r8bn68VudxMIDGH8sLpkyXo++WR/\nxNFWYkVHi4qupKhoKRaLnXPnjhEMBrBa81m06Ha+8pUnosS3zVbA5GQgSvhaLE6++c2usLmIRaLX\nLhapfNkQOb54AtW4T35+BUNDbSJoZ4Qa0heR61Ejb9XkVrQtmcjWnn8FVTBqeFCLAPlDj50Rz2tY\nUFusjBCdBVEJ3IEqWGNdX5tTDeNrEfncCuBojPELZpPNwjOSv//7v+fs2bP853/+Z9RzmQhPSbUV\nco7c8roIs0GmabSyprKfVNKCW1sfCatsG4t0Uy61lNCyshV4vXVs3vx23KJCWsqocT0Zq90ODJwK\nbbUyOemno+NwSqLTYrHT3/8O27e72bnzaiorvxxKK76EUVDa7S5uvfVnRBcnMorOqeeGhz/WfZYO\nRwlWawF2exHd3b/l8OF6fQ6Nflu7vYiqqvowz+jkpI9du5brVXvt9pLQ/2rqcnn5St1Pm2zejSnV\n2vmSpeOm4gc27vPxxweytqhWPHLnPWo6PsFcTfE0FuOpRE2LXctUaqtWvdYoKq2ovTr9hm2xfz/V\nlPpBYqfedwFPEr8YUGNoTBD9WjQCG2lqugnYiIhOIRaPPvpoTNGZKSI8BUHIeRpra6mvquLQnXdK\nGu08JRXBmG5Boli+0chtmuAtL/8CPl8/LS0PhUVLkwliTZg6nR6Dl3IixjYj4cWFNm16jdHR8wQC\nF/H5emlrexaf73xESi4EAkO0tj4c8oFGoyhqlDUWPl8vExOjjI2dx+/vD/N4GsV1Tc121qzZzd13\nv47FMvW7NjbWpYvSzZvfCv1/kqqqeu666zesW7c3JbEfKXIjizrFIhW/qHGf8vIvJt1fmC7TEZHZ\n7M+MJJZf04YqLrU+nNp7T6woZnSrpaltVuBOwr2bidB+/2NVvHUDrtDYPkT1jxqf2wv8D9TU30Re\n0Jo4zwnC9JBUW0EQBGFekEo6LkylXfb1ncTvV1sQaKmc8dI7jdudzgoqKqqTpmka02xdLi/nz7cC\n4HCoVWLHxnrp7j6u719evpKioisZH79ER8dhLBY7mza9Rnn5F/jVryrw+XrDzq+l/Uam/2qpuEYs\nlnzuu+80DkcJO3deg893Puz5SG+oxeLA47kBh6OY1at/zvPP305BwSIcjmL9vn2+AXbtWs7YWJd+\n7dbWRzLyZUa+hslSmSH9FGsgrXRr4XLls6iRxTHUdNQy4HWmem6WoqbJjjDVP9OKWn12D6oAj+/H\nVlkGdAJDMZ6zMyUujdiIjoIuCe3rA64PXf8qpgTnYuCTGOeqIX5qdKLnhOmQS6m2iRCPpyAIOUMq\nhYCE7CJXiryk6t+M9G0aRdMHHzyF399PeflKyso+r3sBNW+jUaAl8h1GXic/v5LR0S69P6bL5dVF\nliY4a2p2hBU7Mt7H0FAbu3YtY3LSp+9/yy3/kxdeuJ28vAUMDbWxadMruFxehobaeO65W1AUK4HA\nCH5/H1dccQvFxZ9maKiN/v538Pl6sVjs3Hnnb3jnnX9jbKxf927a7SWUlHw2VIFXjcwGgxNRIj3W\nnCfzZSZbS5HnS/XLhFjkyroVspEG4P8jtcrRsTzUTtRIZjD0L955FFRBG91LN1pg5qOmMmuCNNYx\nGvWoKbS9ofFVAx2AF7U4keYJTeSvzVXvbfYiwlNSbYUcJHe8LkIsEvXTnCuycU01NDdTs28f6/fv\nZ8AXK2Vr9kg3hXWuSNW/Genb1CvgDpzRxVVR0ZVhrUD6+k7i9W7Ue1Rq/UJjpX9q68mY3llX9wpV\nVfVs2fIBLpcXmErTjUxFjXUfJ078FIejBEWx43AU43CUcPTog/h8A5w/38rYWBetrQ8D4HJ5+cY3\nOnC5qvD7LwBBuruP09b2ot4GxWJxct9977Fw4a16CxSvdyNebx1bt36kt4IBi95DVLuXyFYzxrEm\nS3uNtZaM6c1A3P6o6QrHXFm3qZCN71HzD2Nq6SkSi04tHd5D7PRZH1M9NhOdJxjneIiOavpRxWYX\niUXnClRP52uokc5qoBVoB46jeULVNZUoNTpXvbdCNiPCUxCEWUX6aaZGNgn02e65ONNoYmbDhqOs\nW/dsTNFUU7NDfwwwNtaD1eoItSCZ6heaSNDU1jbici3l0qVPePrplfh8/WHPp1PoaGDgDGNjPQSD\nAc6dO8b77z9JV9exuIJQTfM9GXYOi2XKOzo56dOFqjaWdev2sm7ds7S2PkIgMIii2NE+FCuKjSVL\n1idNYfb7B8nLq2Tt2qcSel6N400kEDPpvznf1q0w0xiLBZ1Nsm9F6N9FIvvypk9/jG2xgkbJoq/F\nqKL5C6i+zYeAt5nqBVoS+t9YbCiRvzaXvLdCriCptoIgzCrSTzM11u/fz4H2dqo9njkvmpRuC5Jc\nITIVE6a8f62tj9DXd4re3t8xOekPS/VMJ/0zMq03WXpuvPRQ7ZqRlJWtwO/vZ3x8lMnJABUV11NQ\nsIiPPnqOQGCqoEh5+UruuONZdu1azuTkKIpix+NZhdNZFtVeJF5bFK2lS7wxx/LMRhJrLaXrzU01\ndXa+rlshVRK1O9GeO8tU+mkA1ZPpCe0T7pMOJ57/ci7ZiFo0qIZwb+Y21Pt9DHg49Fh+H+YCSbUV\n4SkIgpCViECfeeL5EZubGzh7drcu3AoLF7N589u6eElH0BgFY3n5Su666zcpC9W8vEruu++07vts\navo23d0vMzbWE+YLjRSKTqdHLy5kt5ewaNHt1NRsp7X1Ec6e3UUgMBh2TaezQi825HRWAMGo4kQA\nXm8d69Y9m3DMkH6/zul4c5MJeCPi9cxmkvXCzIQawgWY23CtQdS0UyN1qIKyM8ZzELuoT7bgQo1u\n/hR4CjWKWgTcjFpoSNZ8NiDCU1JthRxEvC5CLDLxRJq5pszyZrqdTnavWSOic4YwpqKWla0IS8Uc\nGDiji06HozRMdELy9E9tPWmpp07nApYsWZ9UdAIR6b1d7Nz5Gd37uG7ds9x337u4XEux2QqYmPBH\nHVNevhKPZ4Vh7G/p6cTqfamiU2vjYrMVhQlRn++87gEFtXKudt6amu0Jx2zs19na+khUq5p4pOvN\nTTd1dj54Pefv3z1jeqtZr43m1Xwn9FhLLTVe69XQcy7DPsWoFWtfi3FOK9kpOrXWK0Oo0cwzTKXu\nDhPe3iWc+bumhGxGhKcgCPOCbPFEZss4hMQYCwmNjHSGPacJHK0C7XQjZAMDZ+jpOY7P14PdXpjS\neWprG0PeShWf7wLt7QfYufMaXYAWFl5Jd/dxfXswGGDJkvV4vXXcdddvWLNmj17IaP/+dWzf7uZX\nv6pACX0P7XCUcvfdr+N0ehgfH2ZyMvwLEoejlPvuezfUi/NtvQBSvPFrntmyss/j8w1w5MhW+vtP\nZdxTNd510i00NJ+8nqnMU26hfWli9B1miiYwe1GL62jFcbRrFTGVJrsaNRp6LfBc6LhYXximUt12\nJlmAGnE14gZuC/2szZ92j8UR2wUhO5BUW0EQ5gXZ4onMlnEIiYn0TBYVLaWo6EpstgJWr/45ra0P\nZ+wNnG4rkBdfXEtHx2G9P2dkCxe/f5j29gMptXbZvt2tR28VxU5eXjl1da+EtXMxoig27r//fb3y\nbjoYU2Gt1nwmJkax24vZvPlk0vNNN402FeaT13Mm52luGECNyJnpO4zXBkS7Vj9qJND4fA1TabnZ\nSDmq8OwOPbYCbwBXEj5/2j2KnzMbyfZU25qaGlpbW7GFikAuXryY06dPR+0nHk9BEC57ssUTmS3j\nEBLj8w2wa9dyxsa68HiqsVic9PSovi6zPtD7fAM888wqCgoWYbcXR/kLY3kPm5sb6O8/RW/vG5SU\nXMvISAelpcs4d+6YLmBBLYKk9d50Oj1YLFYmJvxUVFzPmjV7aG19hIGBM3R3v0wwGECtkhkMuz/j\nHIDqB928+a24IjFyvNo1tMdHjmzVhbaiWDl/vjXl+cykX+flhMyTRiJvaDIxG/l8A1O+yGxm6ndY\nZSmq8JwJf6wwE2S78Lz99tv55je/yZ/8yZ8k3E88nsJlhfgShFhk4onMZE1FejrFm5kbOJ1u7rvv\ntJ666XCoqWlmpGNq68npdIelxUamnMbyHqpi8TgTE6P09b3O2FgXfX2/Jz+/kuLiz3DwYB1Hjmxl\n9eptrF2rptS63csYHe3G7++no0Nt8aKdOxgMYLXmsXDhl6PuT5sDr3cjRUVeyso+R0vLQ3FTOCPH\nG/k4PBW2LK35zKRfpxGzUlGzqY8uTK0ps+YpOzH20Uz22kV6Q43HQuI2IMY2IQ2hn7NddFoJF502\n1F6eifyxiedTPksJsZhpYRyZMC4IgnDZ8Y9vvcXfDQ5SYLPRWFsbJRobmps5MzAQ8/nn29roGh0F\n4NtNTTy7bt2sjj1byYVKolpRG1A/0E8nHVO7z8HBs7hcXuz2Ymy27+nPJ/IXDg6qvQLt9mJuuumx\niN6bFmASq7UQn09tFt/RcUSvPtvS0oDD4ebcuRbGxqYq0JaXr2T16m0cObJVv64xShp5f1r/TmMK\nZ0tLQ8wIZeS97Nnz+bDxZzKfxmMj5zadNaSJ4UT3kQqaVxugoaWF3WvWTOs8ZhNrnsxhJqvLpoom\nJrXxJLrPSG9oXZJjY7VPaQSeR+3Fma0UovbfXAK0GrYXMSUmS4nt40xnPoWsINNfQxN+jf/qr/6K\nv/zLv2TZsmX89Kc/5bbbbkt+UBpIqq0gCFlJPLGXSATGe17bdnZwEK/LRbHdHnZszb59+ofM+qoq\ndq9ZE3aewUCA492qt6YyP5/T996rH1u2Ywf9frW66JVFRfgnJvBNTHC9x8OetWsv28inUcg4nRVU\nVFRnrQBNF6Mg+tfea/loLIgDP9/lf1PAaFhqqeYvtFrzw3plOp1unnvuVrq7p9J7R0Z6ovpn5uUt\nYGysB4+nGofDTWfnYTyeakpLr43q1VlQsIj6+nf09iuJhF+kqDOmyRqjacb9Ir2vkeM3QxAZr+f3\nD6ad/qylojqdHkpKluFwRKc4p8L892pHfkI1Crd65kakxPNmQvR4tW1aumyyY3cQ3XfTgxo1zObP\nqG7gj4C3UNu8aCxArcBbCpxAFdORaHPiAZYxJbZz/z04V0maaltDZr+GGR7/6quvct111+FwOHjy\nySf5sz/7M958802qqqrC9pNUW0EQ5h3xqsMat6965pmodLhYx2nb2kdGON7dHXXOgpCRvtrjYdvq\n1VHnOTs41W6ia3Q07NjrPWqz8UKrlUG/n67RUfr9fg53dl7WVW216JjNVoTPd35GWllkklaZybHG\nFNOPfXbeYxnv8Hn+i29ERTa1CNXQUFtUWq3dHp7eq82Z3V6ib9+06VU9tVJLrb3zzkMMDbWFic7y\n8pW66DReN57oPHt2d4I02aljjPfa2vpw2Dkjx28GxutpEeF0zq/dR0nJMnp6Yqc4p0JjbS31VVXz\nVHRCdKpq8uqyxt+Zo0cfnIHquo2on5YjhWOs8WrpsjeHfn4VVWjFEp27iRadCmrV27kUnZH3GOvz\n+gDqPY8ZtpWg3m898AGxRSdMzacVtS/pAeBb0x+uMPNkWuQ5w+NvvPFGCgsLsdvt/PEf/zFf+tKX\n2L9//zQGEh8RnkLOIb6E+UmkpyqWGIRwkbiooCBKZGrPF9ls9Pt8YefSKHU4ws75PZst6kOm8Tqv\n1NVRmZ8fczx71q7F43RyaWKCgVDkE2BFWVnYfmbMyVwxnXFoAmDBgpuBmWllkUl/xkyONaacXll5\nAwCryor5G+8wd955iN/+9s2Ex2jzECn2tMebN79FX1U9P7/zEPe5vFSHxJ5RTE61fHGn3CPUeO/G\nPqVaBDOWUE2UKjwTfkPj9TZteiXt82v3kalnd7a92sm+CDH/717kJ9REok8l7AuXj/fPQG/UR1Cj\neFuZSiON15NTows1VfYCcDLG2OOl0mZDlDPydS6JeKwJ0ULDz6XA14AHUft0JkIT537DtilxK5+l\nspDkv4Yze/wsIMJTEISsIDJSGS/iYNxebFf7HRrFYGNtLR6nk+HxcQ53dOjnyrdaAbApCk0bNoSd\ns8jhiPqQabyO1+Xi9L33xhyP2+nkhooKAFaWl7OksJBypxNPSKiaNSfLd+9OKPpmUqROpzepJgCM\nUTqz02wz6c+YybFGwbX7jjupr6riyIZN1K2Ln9IZS6RFij3tscvl5ddrdnPY6Y5bNkQ735YtH/K1\nr704rb6Wxj6l8YSPcdw/aD0Ztsa08ba2PmJa9Cs/vwKnswKHw43DURI3apuMXCvCk8kXIdMj8hOq\nseBObIy/Mx7PCv1n875QioxqGrdF9uTU0HreFgAvhY5bCJQBawmPFEZ+5E2YETgHXIp4HDRsv4B6\n/xtQ5ydRUaFIrg/9vxLYnvkwhZkj+a/hjB1/8eJFDh48yNjYGOPj4zzxxBO0tLTw1a9+dZqDiY14\nPAVByAqm46mK17ok1rlu3buX4z09wJSPM1WS+UoHfD5WPf00iwoKODUwoHs+66uqcDsc+rEV+fm0\nDQ3FPU+8OdGIHHeYD9Xvj7q/ZONOlWz1u2XSn9Hs3o6ZFlOKPH5TSHTGcqxlSqx7T6U/ZCwvdKrH\npsr861OZGpm1SZmdwkDGdQOxi1VlRiyfZiLvJkAbcCuq6Pwp6qduY4RT80LagMnQv2zAguq97Elx\n/xLgI8K9uA7gBuJ7N7V1YUctRrQ9xj7CbJLN7VR6e3tZv349f/jDH7BarSxfvpwf//jH1NbWRu2b\nicdTqtoKgjCrxBNDjbW1afW/NJ4nkljnKnY4gOhU2VTGF6/CpXHfRQUFuvAzXqfu4EH9WKfFgm9S\n/eCTSgXcxtpalu/eTdfoaMxxG8dVmZcXdX9mVeZM97WZLTKp8JnKsemIyUyrqUYe/98cbm4bOMNy\nWwH5tY2Q5MN9OmONde+pRIDjpb9nEj2OxMxz5RLTraqsMjvVSyPXjXlfChgFUh3hAqmR+D05teM+\njyrMzhAuOhXgfOjncZPGahZfBl5JY/8bUe9fS5EuBa5B9W5C7NfduC48qCnMUlxIiI3H4+HVV1+d\n8etIqq2Qc4gvIbeJl7aZyFMVK4001nm0/bYeORIlkhIVCzGuqcjzNjQ3c7KvD4Ayh4NjnZ2U7djB\n2hde4FR/f1QBohVlZdR5vfp1jB/W8w0iOZXvPB9pbeXTxcVU5ufzVIwKuWE+1E2b4vpUPU4nncPD\n007DNb422eI7nQ1STX80tkEpL1/J5OQfJ9w3VlpqpOAaHThDadcxulJMvcw0VTOV1NR4v0NmprXm\nWoqsWSQqBgXJ/u5lWpFkrtEE0mFU8Wmcg8jcQWNvylPELpCkESQ7vJyxOA7Ee/+0od5fsWGbdm9a\nivQHqOnEMPW6R/bt1I4pQk1VDk/Nlc9SwlwgEU9BEGaMWNHDWFGTRC1QItuZLN+1i9P33ZewEi3A\noscfZ1VFhd465ZHWVnpGRth65EjCtNMwsXbpEofb2/XUWUVR6BlTPUOHOzv1gkMepxOvywWKwt51\n69SfQxijhfWHDnG4s5MVZWXsqKlJOn/GHqE/fPnlqAhpZCQyMhJrt1rZ6PXSOzqqR2Mz7Uk4nSiq\nWSm/s02q0beBgTP4/WoD+qKiK3E4ihLuGysyGhnxSjfyl2mkMJUIsHGNpXusmeMQIkkUFcwFEgln\nYxpxBfAcU1HNyhjHLUctOFQNvBbjWjayI/oZWWW3HNXHaWyPshZVjK9AbQcDU0Icol/3yMi39nx/\n6Dy5+sWEMJ8Qj6cgCDNGLE9YLF9mrP2M2yrz83UBBqrQW+HxUGizsaOmRj9PpCdSo76qip6RkZj+\ntEi08XVeuqSLXVAFrtvh4HCn2kttRVkZe9et4/bnn+fC2BiD4+Nxz20UgpFjbmhu5vm2NrX3Z0UF\newxRX2OP0I1eL3sTpOYm8nsO+/2meTSn4/eM5w1Mh0w9lNMhVR9oOv68VPdN14Nqhmc1kzmei9fH\nbF/t5RRhnXuMgvLnwMPEFs41TImpCqZSZzWBZjzus8A51IJCdwH7yA6RCWqCoZUpwWlFFYKtwBdQ\nxxo5BwOk94WC5octQm0zsyd0XLrnEWaKbPZ4poP08RQEISuJFZWMlVIba7+odiYhD2ORzUavz8fh\njg4cVmtUOq22n1bxVotcvtPfrx+vtVmJhTY+7fhyp5NyhwO3w8Evb7uNOq+XjV4vRzds4KcnTtDn\n8ySAV+QAACAASURBVOmiM7JNi4YWJTSOWUtZfeqDD6Z6f3Z0cPXOnXoaq9YjNJUIaay+o9p8mtmT\ncDrniucNTIfZr/qZPP1RI5300FT3TfXaxv0dDjcHD9ZNu7rsXLWnmS7Ga+7ceU3a9z0XY85+ItM1\nZwpjBduHiV+K0xgN/WLoZ2NU0HhcFzCI2j7kWbJHdK5FjWYaP48XAi6migVF3gukX6K0EdXLOYwa\n4dTWdKalUgXBPCTVVsg5mpqaqEkhTVGYe2IVpdEic2eHhvAWFlLscFDicFDhdOIOFQAyHptvtfLg\n0aN8rqyMm+12vU1KpIjRzvu58nJustn4n7fcwsOtrWGRSwvox3/xqadY6nJRYLPxPZuNu+64I+bY\nO4eHOd7Tw+HOTh5ubQ1Ldz0zMMDFwFTK1BNf+UpMMZYsLVijMCSqNX/pnrVrUy7qY7zGU2vX8nBr\nK9tWr+aR1ta4RZimQ7x0y0SYUZwom4vORKaHJnqPmslU0kwLHM1Vexoj6UQhtWvabEX4fOd1AZnq\nfWfzmorErL97yed3dgoVpe5LNaaTamPKR43o+VBbhWiRPWNrlQDR6azTwYIqwl+I87xWNTeSItQ2\nKM2oVXcJjVvrqTmIKg4row+dNm7UKrdaFeDEa1o+SwlzgaTaCjmHvFnmNsa0Sw2P00lvKAIZmYoZ\nmaa5bfXqmCImXjqnMTX0/YsXGQgJxXKnkwuha942NETTX/wFn921i66REcYmJii22xkPBrEoChd8\nPpwWC5PBIEHgS5WV7L3jDrYeORKW2msFbq2sjPIyxkov1sa1oqyMRYWFOCyWMFGdbnQyXmsZM9Jc\ns4FEqaTZljI5V+9RqabxxpuveHOcyvymkuqbynnSaaeiXXNsrJ/OzsNptyIxu6XOTGLWmko+v8na\nl5hFvPTPVFrD1DAljkEttrObqdYqt4Yem9U6xYNanCcSK3AWeDBiPEa0scGUZ9MFDMXZJ5J0W+Wk\nnlYrn6VmH0m1FeEpCMIsowmuErudi4GA6p10OuMKLm3/IpuNm6+4gkUFBTF7YUbut2fNGh5pbeVU\nXx9nBwd5ZdMmvtvczOGODlaWl9M9MkLn6CjFdjsnN2/G63Lh3r49LIKpoQBWRWHc8F5W5/VS7HDw\nn++9p2+zMPVRR+vhqfs3PR72rF3LzXv30jUygs1i4aYFC8KipPHEoxnznaqYzcVCQJdr78dIjELq\nB0533I+r6c5XuvvHE5jJztPc3MAHHzyF399PeflK7rrrN7Pmb71cSP7lhFl+wOn2Fq1hSsTFE2Sa\nOAa1sutypnpZPkJ0L89MsKPeQ+T5lNC1J1E9mu8DHRH7lKJWn53ybDawijMsoIATNOLHnVTg15B8\nPoRcQYSneDwFQZhlNI/gW5s3617BPWvWxPUNNtbW4nE69Wjgk++/H7MdS0V+PjZF0fdraGnhzMAA\nx3t66Bob4+HWVv06v7nrLpYWq6XqBwMBlu3aRdmOHYyMx/YEBSFMdAI0nzvHzrNnw7ZpolNLqT0z\nMDDl3+zspKGlha6RES4GAlwI+VS3Hjmi+01j+V8zbV+SriczXrubbCaXUiZnEqMv1Oigi3Qvpjtf\nQ0PqOrfbS7jppseS7h/PO5nsupHVgdPxt6bjh72cSe4xTtS+JB3PZ6IVqBHr3Mkq3NagptCuBzai\nOsaOh67zbcJ7edpRo4vTQUvbDRAtOhcBt6D6NvtR7zNWRHQQ+AxqJBbAzRmu5BitHMBPA4tJHlU2\no1XObPl2BSE5IjyFnEN6T+U2mrjyuly6yErUw9PtdHJDRYX+OBASgJq404TZ821tujjUivyE9dC0\nWqk7eJDhUJXYtiE11ckK+E6fpt/vJxAM4rRYuL68POl99Pn9+CfDU7lcdjvrlyzh2tJS6g4e1Asa\ngVogaNvq1dgt6tuuBfBPTnKgvZ2rd+5kyX/9F7c+91yUwMxUCCaa21iYUQhotsm23o+pvEfN9EfB\nRB9X052vwkIvAIHARVpbH066fzyBmey6xuNqanYkvc7lhFl/96JFerKVmIqAjEUqginWubU+lbEE\nmbHf5+9Q/ZLGL+N+DbwV+tkOrCLc95kMLVCzErgt9LM1Yp+VwDuE99gsI7yQkXbNCVRxugxtbgtC\n46jGwza8wFYSvwMkmg8jiV7H2K+hfJYS5gIRnoIgZDUNzc0MBgI4QoJtZXk5VxYW4rRY2HrkCPva\n2jjW1aW3HSl1ODhxzz24nU4q8vPxhITt2YsXdQG36umnGQztPxFxva8tWcKCUH/OVLEp6geWodA4\n24aGONbVRa/Px6KCAr0Krtvp5LW772ZxYSGrFy7Uj+31+WgfGeF4d/eUEH3iCW7du1cXr5p4ziT6\nmQpmVsCdLcyKeM1mXGC6H+dTJdHH1XTny+FQP2SnGiGNJzCTXTfbvkC4PEi2EqcbcYu3Ao2/ZZpA\nM547UQVWbSxFqG1VDgDG96gxpn5zi1FblfQBi1EjlLEwCkstq+VK1KimhymB+TnUCrS/CY2tMfR4\nI2qqr/aXpIQp0arhQ5vbRhqpp55DLMOtR2qvJv67TqoVaRO9jmZETYXLhZ07d7J8+XKKior4zGc+\nw0svvWTq+cXjKQhCVhHpMVy+e7few3NRQQHv1NdTd/CgXjDHrih6FNSCWjRoPBjkeo+H0YkJvaJt\nZV4eXWNjVHs8OC0Wvc+l2+HQxd/K8nJ+c9ddACzftYuusTF9XEU2G8MxUnEVpj6uACxwOvmCx8Ph\njg48TifLSkoodjioyM8P86YCNLS0cKi9nQG/Xz++0Grl0kS4HF5cWMjbmzeH3XesQkHZVmQn16hh\n9txUs1XCxQw0D6XVms/QUFvY+pI1l+skW4lm94CsYeq3rA5VfCY7dwPwPDCKKjSXh85RDTyFWuG2\nK3SuAKqYLEZNg60GrkUtAvQuagQyiCrGalArzx4L7T/I1DxobU5AFa5vJxijNodam5cS4AHgCKro\njDW3xp6bw6FtmbzrJHodpY9ntpDtHs9Dhw7xp3/6p+zevZsbb7yRc+fOEQwGWbRoUdh+4vEUBGHe\nEJla6jOIsPFQaqvWp7LYbufGBQv05yeB8z6f7qk09rOsWbRI7ek5MsKr59Um5FbgKwsXsqykhDyr\nFYfFwuefeoq7Dhzgc+XlrF+yhCWh6Gq8N8vIt94en49jnZ2sX7IEi6JwvKeHA+3tvBCKzGr3paXA\nVhvSiAHyQqmuJaE+otUeD29v3ozb6UyaBit9CTNjNuMCqSbQZQNapHJoqC1qfcmay3WSrcRMekAm\n83BuT/HcZ1CF5UXU1iWnUYXhIdT+l6dD97AqtP8EqujUPJRtqD7QXuBroe2ngBeBvaFjTxI+D8Zx\nJhKdMDWHH4TG4w6du4v4c6sdc7PhOpm86yR6HaWPp5Aajz76KI8++ig33ngjAAsXLowSnZkiwlPI\nOcSXML+JFFfXG4RZz9gYDS0teF1qwYjBQID3Ll5kQV4eoApRjRVlZbxSV6enjZ4bGaHX56NzZESP\nkE4A+z7+mOMtLYxNTNB6/jztly6pfTs7Oii026lyufBNTjIYp/BQLALBIEc6OsI8oFq01ON00jk8\nrKfL7omIWg4FAtR5vWHFl7SUV2Ma7COtrVFpt5pHbo/zh/zDpftZv38/Dx49OuPpufMFs8RgKu9R\nufhRMJZ383Iq7NTc3MC+fTXs378eny+zZOx0zzVzf/fMWomxRKYx/XMVU4WBNpLeb1mkP7MHNbr5\nCLAQqEIVlu8a9lnJlGDUisAVAz8DPkEViDB1/17C5yGdd4N4c5hobrXn9qRxnemMIT7yWSr7aG5o\nYF9NDfvXr8c3kP57TCbHT0xM8Prrr9PT08PVV1/NkiVL+MEPfsCYIfPLDER4CoKQVUR6DPesWUNl\nyHOpidG24WF9/56xMW654grqq6o4uXkzdV6v7qnUChjdvHcvL4VSVItCwhbUiGdktVoNh8XCsc5O\nPVU35j5K/IwS3+QkYwax6p+cxGGxMD45qUdBtchnucFL6Z+cxG618tMTJ+gZGYlb9dYYGb5m507W\n79/P9at3UFVVj8+9mpbuXg60t3Pg449zrkrtXHGyuYFv7KvhZROERTLMFDGxMd+xGsuDORu+zJmf\nq9QwRnd37rw6o/HMv0hxLI+hMWqopbQeRjUopLNWGlHFqpbdokUH/ws1qtgPdDK1ziuZ8mLClMgc\nBD6L2ucz2e/FbH01lItfQQkzxcCZM3QdO0b7gQO0NKT/vpDJ8d3d3QQCAZ5++mleeukl3nzzTU6c\nOMFPfvKTtMeRCBGeQs4hDY/nB/HahERWYXU7nZy+994wMeotLNT3L3U42F5Tg9vh4Oa9ezl27hyX\nDL04G5qbef/iRb30Q5HdrotTlxYhXbYsanzjk5P0jI3pwjRSYlqBP6qsxB5HfHqcTr2Crba/f3KS\ngdDYtCq3AK/ffTfO0L42ReHQJ5+w++xZXTBeHRKWxnnSIsNFNhvnfT4OtLfzg9aTrFmzmyK7GgGu\n9nj4YqhC70xUqc201Uu2YZYYSOU9amDgDI91LeGH7ctZvftnMzB/5pcvilUcaDZamWSLSNOiuzZb\nET5fb0bjSTdSnP1/92IlqmtRw2tRi/xopOtxc6OmxL5LeHQwuueyyirChZyxAu0YpFTUZ/6T/Wvq\n8sNWEHpfqK5m9bb0M0gyOT4/9AX/D37wA6644grKy8v58z//c/bv35/2OBIhwlMQhDkhnTYhRjHa\n0NzMqVAKiRX4QkhYRfbM/OLTT1Ozbx9PffCBLjqtwCt1dTy7bh17162jwJCaG8lkxOMg6OLQqihM\nAMfOnWPt4sUsLiykZcMGFhcWcrfXi8fp5ILPx1Ao4mlTFL0qr8ZVLpcurr0uF13f/CaeUGGkgUCA\niwbx3BsSlvcePqxvq8jPp8Lp1M9rFJbGqPGetWtnrEptLvb8TMRspo3abAV0s4D3WMbvRj0zMH/Z\nVckyk6il8XWxWvPnLPqpRXevuOJmfTzTXSfzr4JvrNRULZrXxlS7kRJgxzSvERkd1ASlhfB+nY4Y\nY6s0XB/Uoj69qAJ0OdHiM52MAemTKZhDbWMjVfX13HnoEE53+u8LmRxfWlrK4sWL075mukhVWyHn\naGpqkm/q5gHr9+/nQHs71R5PWqKoZt8+vbKrxtL/n723D2/ivNNGb1lf/rZsy8QhBgU3hKYfCU7c\n0ha81tZOKSbUboKSJu1F0rO1djdtt/tuN+w53bNnu233fa/T9Lq63Z7Tbjh9NyRN/YKTNIEU3BQT\n/FGSOk1DIF+NuyTQGjDGIGHjD9mY3/lj5hk9Gs1IM9JIlsxzc+nCmo9nnueZkTT33L/f/SstRXhu\nTimpAsS7zTptNmxZuRI9IyOYW1iAx+3G9SUl+N2FC8A772iqnjxYmZaHhoYQmp1F7+nTcX0PDgyg\n+/jxGNIIALVFRTg/O6vkljptNtx7ww0xLrcetxsrfvYzjExNAZBupdTk111QgNkvfSluHpjrrVli\nuXv3+zE9PYqCAifuvPMVlJX5lHVqd+FjQ1+Ncy9N9RzmKph7a1PTjrTIgJHvqEgkjKbuH+G3M15l\n/rTmOHXklpPl3r1+jI5KLqH19QG0thp37+TPy/PPd6TcjlWw6joxg/z+3VO7vvoSb24YJyGFzf4a\nwJcghfE2IDbMFpA+B29ByvV8HsB3IIXn9nLbqB1l/TDucW1m29xBfl9T+Ylcd7X953/+Z/T09GDf\nvn1wOBz4zGc+g09+8pP4l3/5l5jt0nG1dSRaKSAgIGA1GKFx2u1o9/mw0+83RViKOdfXi/Pzkro4\nOxtn/sN/JfJlWGZkl9zzkQguGAxvdNhs6ONyRsORCIKDg9jR1KSosMPhMI5duBBHOovtdvymowM3\n7NqlLFtWVIQ9J04o2960ezfevuce+EpKFOKpJp0A8PJnPxs3D+mQvunpUczPXwQA7N27AZ///J+U\ndUzNBKSyL1+YHlZu+AcHg2ht7UZXS0vMPKihJq+5TkxZ2Gi2jvWru78eM38spBSIznHqkNShXDkH\n6ajJ/HnJBTMjq66TIIIYxjCKUYwudMGTAw8IMoMuZOYhiA+SURAgmfSojxGEFHJ+DBLRBCTS2Q3p\nwcxNkHJEtaICzEQM5FZ0gYBAqvinf/onjI+P48Ybb0RhYSHuuece/OM//qOlxxCKp4CAQFbBK3Va\ntSjVUN84f+3FF9Hzxz/iA5WVqHS7cW52VjEAsgMokOt68ornNYWFIADnZmeVZWpFlEeVy4Wpy5cR\n4Vxp230+PLtxY1x/tg8NYec77yhqJoPDZsNlIqnkS02NUlP05qoqjE5PY0zlFBeor8dLZ88qxPOD\nHg9WV1Tg6zffjNv378dQR4cSVgwgjvxqzZWaZKjX79lVh0hkHHZ7Me6++60YxVOtZr548LMYGemB\n19toODzQ7LnOZaRD4NjtbzGkW3C9mdu/v830HCdDrpwDq1TCxVAbMwU//OiXlbIAAujOE6Usf+BH\nVIkE4mtcakUFsE+rE0AJpLDgZNdZutEFRr8hjG4nkKvIdcXTKITiKSAgkJPQullPVotSDbXyNjY9\njXORCPpHRxGor1dKqNhtNiwQYYEIdSUluMjlWJ7VsAP3ut04F4koyimP8NxcnOI4cOYM2vbvx8Tc\nHA6PjSn9+cXJk3GkE4i65U7Mz6P39GksKyyEr7QUZQ4H3lIprWwu7ujpUYjnDRUVeGbjRgDAzF/8\nRcz2/Lzy749duKCEGwcHB+NIhnou/+edr2Dv3g04cM0j+Omhoyh2vKmcJ7Wa2dLSZfqG3+y5zmWo\n584MgWM2P4B066hHL1KZ42TIlXNglUqYTVU60yiWlbJGNGLHoiplmSY0VrQf28bAwHYDYelMiVwL\n4HpIdUP57VjOKA/+0xow2FetdszA6DeE0e0EBHIXwlxIIO8gak/lD7TMZ9TlUpJBfeN8fGICgFSz\n8+F165T2/vzaa5XtXt+6NaaGpvqLrqG6Gr+9804E6utxdOtWOP7wB2WdQ8elNjQ3h56REYV0Mlfa\nuYUFze3VGJudxclLl3B4bEwhpaUOB9pWrFDmotzlUsZQ4XLpOsby83pTdzfeunAB/aOjCumskOdG\nDfXclZX58PnP/wknpi/HnSe1u3Aq7qVmz3UuwwyBU39HGQ3Ey4RD7FI6B0sNXehCAAEcwIGkYbaZ\n/d1L7ICcfjkbKxyWY9sw5nTMDI8OAXgGxkjkYoTNGj2mke2MGx2JeymBxYAgngICAhmD1s26mtAk\nA3Nv9cikzFcmuRdOzM/joaEhpb0nb78dq0pL4S4owH0HD+JDVVVKG7x6aYNEwD6xZw/6T5/Gjbt3\n4zJHUi8TaeZXqnHswgXUPP44ktHOKpfaYVEKCQaAS5cvo/fUKVyUCWNXSwtWlZXBbbfjuZMnFTJ4\n689/HkNCi7lapKMzMwqhLLFLLV+U50YN9dwx6JEqM+VStLY1e65zGekQODOl6K1GqueAkY3v7m/D\n7ZFw3vh15krNTyPwwINudOdAbmdiQpN+ORsryFxsG8ZyfVOpkbkYn1ajxzSynfVllAQErITI8RQQ\nEMgYtPIQjUIrfDRQX49LsvKoZarD57PVFhVhdGYG5U4nJubnUepw4JLKgCgTYLmdDO0+H4bOnsWo\nHO5rB1DucsU48BYVFOCjy5bh+MQEJubnMcGF/jZUV6PY4VDyWGvcbtzi9eLY+fMYm51Fo9eLp26/\nXXLbjUTQe+oUGr1eXJybw9jMDJwFBXjlzjvhKyvTdaHVO09m8gNzJZdQwBrwLrSv1Afw/7V254Vf\nZzruuVcvEucopp97bIXDcmwbkcj9GBzsQVPTLXC7n0yj3aUG5iCszmcVyAWIHE+heAoICGQQ6She\nLJyUEbRGrxdFdjsm5uZQW1iIp26/Pa5dXrn7TUcHAvX1OLZ1KwL19fjYsmUx25Y6MpPi/ufLl6NI\nVh6dNhtGp6bwoepqtK1YAXdBARaAGNIJQKoJOjqKkenpGNIJSPMwJIf3ljgcOCeTy49fc42i8G7Y\nswcDZ87gt2NjWFZYiBvKy/HuxAQuzs9jPBLBhr17AeirdnrnyUx4KdvW63bj9NSUIZVUIHfBFKWQ\ntxFPNO3IG7/OXHC9XSykrvYmVgbN1xxVh3smat9oaGhsG273SbS2noPb3Quh7PFYzPgKAYHkEIqn\nQN5B1J66OsDUuYbqalyIRFBXUoK3QiGFtBXZ7bjV60W506kY4oQjEdz6859jeXExyp1O1BQVKbUy\nf9zUhBt37cKc/H10+3XXoffUKcnZ1kAdTyP4QEUFGpctw7PvvRdX3qXa7cbFubkYNRSQyKmWOREg\nhelqGR1Vu914v8cTMx88vG43xmXSZwdw/N57lTBbI1BK3hQUoNTpxKMGSt4w1fT01JSizl6tyudS\n+I5i7rE3N+3AV9weU1rVwEDQwnqk5pCK620+lDUxck3ljtrrR+ZrYAplL10she+pfINQPIWrrYCA\nQI6Cd1XteP55JYyTgZUnAYA1u3fjnXvuwfahIVyYncV7k5MApLDUczIB+8jPfx5D4EocDlQ4nQir\nFMZUUVtUhMMdHVj+xBNKrVAe5zn1rwBS3qmroADuggLM64QAT12+rJtvysYOQAknBqTQ3OrCQvSe\nOgWnzaaE2ZoB7+AaqK/H9qGhpKVEmGratn8/gFiV1EgpksUkKwLx4N1jzdIXa+uRmkMqrrfDGFbK\nmgQRzNuyJrmj9maqBibvbPtjAA/B+tqgySBKmggIpAMRaiuQdxBP6K4O8OGfLIyz2u1WnpbZuW3H\nZmcRHBzEcydPKqVRKpxO3CLXvSyVQ1SZ2thQXY1ylytKOtNUO20A3r77bmwfGtIknWowMrlw5YpS\n8oX1k2FtVRWucE8UPXLZGK/bjQV5+YcrK9Hu8+HY1q1o9/nQ4fPhhTvuwJOtrQjU12Ns27aY2p9G\noQ6x1XIn1oNWOK+R/dM3MEkPVprSsO+ofDK6sRKJCJBxz03rEEQQfvjRhjaENY6aO2VN9GHkd898\nSGymYCbc08y2vHHOhwD8CsBqACfT6axJLB3zHnEvJbAYEMRTQEAg58HIjMNmA6NpFTIRA4AyhwMP\nr1uHCEf67HJZlA6fT8nvLLHbsaywEM986lM4KauiPHgyW66TA1pss6FtxYqYZZUuF+7o6cFT775r\neEwFQIwj7u3XXYc3AgF0+Hxo9/lwaMsWlHB9KHO54HW7cZkIYTm8tnj6GIILP8C3XvkNwpGIMv7t\nQ0MYm57GfQcPKnmWZhxq1eTRTK6nVr6okf0XW62xjPgGg4DfD7S1ITz+VrTNbwWzy7YWEYkI0GLc\ntjNFswc9CGoc1UxZk1xGJsrxpAYzbrJmtuXV0QIAFwGMA9iA7D3SyGa5lcV4TCMgkFmIHE+BvIPI\nS8gv8GGWfM6lXshlonb+8/e/V8ia02ZDicOhqJZetxsEKaSVd7AN1NdjR1MTan/6U0TksikdPh9e\nOXcOI9PTUmOqHM+bystxXWkpek+fjutHAYCm2lq8eu4cJg3W8DSClSUlmLtyBZGFBdxWU4PlxcXY\ne/IkwnNzuLmqCmUOh1JDFAAKMYfr8Qd4cBG/wcch6a5A24oVmJqfj3OY5V1na9xuNNbUxJyDROGw\n6bgTG90/ldw8K5G+c6cMvx99/f3wA9j/r7UY8Y7Ce74Rm799AO4Zj7k0tiWIxcjMa0MbetCDRjTm\nLbm8un739MJZeWfb1QDGMTBgRzjcCIdjGC0tIUhfL5n8kFnh0GsUfqSW/2oMV9c1lRvI5RzP0tJS\n2Lg65jMzM3jwwQfx7//+73HbihxPAQGBnMVzJ09idGYGAFDtcuG8rNYFBwcV4xkjOYDD4XCMQjhP\nhGmZXJY6HIqZTl1JCd5fUYHe06cVh9X7Dh7EHFers+/MmYR9/v3EBMpdLk3jnytAXL6pHZJ6yXI3\nU8HU5ctKHmjvqVOocbsVZfOtCxdQLtcDXVtVhT9NTeF8BPg9PogyzICRTgB47fx53CLXMOUVRqY6\nsrBjFvbKzgGf18kvB6IqZqowsn8quXlWoqWlyxriWywrIo2NaPnSUxg89hCa9u2QSGe+WMNaAD3q\n0IXs3bazPjixB+3oxE78W16SzqsPTBcHpLPIvhc83N+vANiAcPg6jI5KNYkHB4HW1kx/yPg+ZBrZ\nVFcFrnZcunRJ+Xtqagq1tbW4++67LT+OUDwFBAQsg5pAbh8ailEplxUWKrUn+RxAIzUgmcstj2q3\nGxtqa/Hbc+dwenoa5U4njm3digqXC7c+/TTOz87GucuqcXNVFd4OhXSdZY2gyuXCBQ13WQZXQUEM\n8dXcxmZTHHdtALR6U1dSgte3bsV9Bw+iZ2QEN7ou4rrq1Th0RlJCCwsK8Pt77kGFy6UojMwYyGm3\no8ThwNT8PHpPn445B+/fvRv/dfEiFgB8yOPBYHt7QmXTyIOCqxbhsBRuu2MH4JFJTjZFEg0shnGT\nH/FaTbZtWbT6IKCP3DH4Mq6LRyMVGrB580q43TsTbp8vGAgGER5+C47i42jp+g3cHt9id0nAAuSy\n4snjsccew7e//W3813/9l+Z6oXgKCAjkBNSq2dj0tEI6PS4XXv7sZ/HQ0FBcyKVWDqCa3DCXW75c\nx/lIBIdHRxXToIn5edy4ezeG77kHK0tL8Z78BM9hs8WVMWGoKynBssJCzbBaI3DYbLipshKHz55F\nid2OKY3wW5fNBp6Waimpc9x7rZ6udk3hmyVP4sWDP0OV629Q43ZjZfVN+ElzMx789a/x2vnzeLG9\nXXGw1VIyA/X12On3x4W9jnLn6cLcXFIiqT7PHpdLEFEGjwfoVlGcbIokGlgMl1ktrUZPx7IKauJU\nLBMnoRcZw2/Dz6FM/lwfHPwi2lqfWaSeGNfFLYtUyDGEh4cx2n8YADAYfAit6u8UgSWJdB/+WPXw\n6LHHHsO2bdtS2jcZhLmQQN6hr69vsbsgoAM1gWTvK10uvHbXXfCVlcUZzwCxZjbbh4bg37sXMEr3\nVgAAIABJREFUT737ruKEeuOuXbjv4EHsaGrCLz79aRTZJRsgO4DxSEQJSQWAuStXsGHv3phjr7/m\nGmV9md0ec2xXQQEK/vCHpGMrQNRZlsdlIrw6Pg4boEk6AeCSarndFv9AkPWKN00qtNlwXXExql0u\nFNFFjI0dxv8YqcYz7x3HuUgEvadP46GhIezbtAmnvvCFmLIpzEzozVAIQPScaJn/zMr9KwDQs2lT\n0rlIx/X2akCufUcthnGTlldppgMH1QZR6j4EB4Lw7/WjbX8bwnnmMpyNa2rCIYX6v+cFnmhaTFXG\nuOHQ4hoqZc78xyGH7HsbG9G0IzOf2Vz7nhJI3+TOCpO8kydPYmBgAPfff39K+yeDIJ4CAgKWQe2G\nyt6/e++9CWtJ8mSIkZiQTCbVOYketxu3yiVCGJ1rqK6GUyZzxXY7fv2ZzyjHri4sxJHxcYk4ulyw\nq4hnKBLBb8+di+vT8uJiLCssVN5fAXRrfi5cuaKpUlbJeZnqZc6C+K/eBUjq69GtW9G2YgWK7Hbc\n4vVi+vJlnJ+bw7H55XgCX8AFx/swTRI5rXS5dF1i2TyORyKoKymJIfVqZ9u1ck7oFQDfOXJEsz0g\nSmbnr1xBh8+XkuutQPZRVFQDt9ub1ZtzLepgpnBGKlATbHUfhsPD6B/tR89ID4KLULJnMZCslAyP\n11puwyv1wMDmtfiRe2fGerR0nFqfQ9Sj+QFLW27p6kJ9IIDNBw7A7Vk6Sq5AYqT7kNCKh4w//elP\n0dTUBJ8vM+HdIsdTQEBgUcBCaY9PTsJXUoKTly7BV1aGd8JhjEciWFtVhevLynBJzklkxPHVu+7C\nXw8OomdkBCV2O0qcTrz82c8CADbs3YsN11yDM9PTStjn9V1dSm1PPahDcddWVeHQli0AgJt278bo\n7KyyzobYUigFAApU+zttNthtNly+cgV8hul1xcWYnJuLyTtlDrylDgc+ds01eFIm4HzeKwDcVl2J\n/7P0GXzl3Cacmp6Bw2bD7+68U7dOJ8uJ5XM59XJptbbVgt7+6breZgq5k7O2uNi716+E2tbXBxbV\nxCmTSOaM3La/DT0jPWj0NuLA5gPwXAXXgx9+9MsBzgEE0J0gwDmMMIIIYgd2ZNCEyY/sZ95mKru4\nCkBI/rsDwGKFJgvkC5LleKbr7m6FO/yNN96Ib3zjG3jggQd0t0knx1MQTwEBgZQQDALDw5KJZ1dX\n1Ecl6X4y4Tx24YKiaqrBTHQ8bjfCkQhqHn9cIXZs3epduxQnW54EqcnRk6ramg3V1Tg1NYUxjkzW\nFBbiHPfeV1qK60tLcXxyEtcVFWFofFxZV2a3JyyjoudsawdQ6nQqJNhhs+FTdXX40YYNuGX347h4\nRVIx7/TV4emNbQoZbKiuxsrSUuz0++Fxu7Fhzx4lx1XPiAnQJoN6BJPflpkRaeVrGiWouQKjhGup\nE1TLSsXkOcKRMIKDQexo2nFVkE4gF0vJpFtQJxUS6UdmyO7tAHoBNAB4wWBfBK5m5Lq50IsvvohP\nfepTOHv2LEpKSnS3E+ZCAlcVRO2p3MDwMNAv/5YHg/F+Knrgy6sAUk7jxfl5lDudmJifR6nDgfd7\nPPjaiy/iVyMjiCwsKMVCWBitx+3GR2pqFBLEh3eysE+v243TnD04Q3VhIZ751Kfw0WeewdjsLNZW\nVeHs0aPAihUAJHLI18EcmZqK2Z+RTlZChY2hwGZDaG5Ot5zKAqCQzgqnE0e3blXCj9/nGMOrc9fB\nh/cwef4U/Hsv4w8XL6La7Ua1262QTgAol3NA2bjVynG5y6UQRjUpZQZNjIxqudMmKqui3j/XYTTs\nyGrzHfYdlcj9NxnZtZIMLyUDlnS0K4/bg+48VXuN/e7Fz04XurKgYppBugV1UrGoylR28ZNYVLvq\nNCHupQTUePzxx3HXXXclJJ3pQuR4CggIpASuXCHMeB9EOLXQXVCAgc98BoH6ehzbuhVetxuXLl9G\n76lT6PnjHzE6M4PQ3BzmiVBot+Otu+/Gvx45ouQZ+kpL4S4owH0HDyo5i10tLVhVWoq5hQUcHhuL\nO37vqVN4aGgI79xzDwL19Ti0ZQvGOCJ8aX4eFxOURgGAQrsdf37ddQAkc6L3V1QoNUWdNptiFGTX\n2f/Ply+PyXn9B+9ruA2v4NtVAzi2UI/+0VGcnpnBedlAqObxx1H4k5/gY888g3kitHP5lYwojkxN\n4fDYWEKDH4/bDY/LhY7nn0fb/v1468KFOFOgRPmaamMilvOpzhnNFbS0dKG+PpBU5cuU+U4i06Vk\nJhBWmEQwLK4Bi7VgtKMH0i1/MqgzCpdShmE84mfHAw+60Z0C6czUTBk3DtJGKiQyU9nF6Y5FQCC3\n8B//8R947LHHMnoMoXgK5B3EE7rcQFdXfLlCPfDKz83V1eg/cwYAELlyBd85ckRR1XgV0+NyKSVO\nGqqr8cIdd8Qpcl63WyGXK372M9htNjgLCvC+8vJoKRVIamOFy4Xw3BwavV4U2e3oeP55hWTZ1qwB\n5LARp82GIocD8/PzUt1LjTqgQx0dWFlaiuDgIPpPn44JxeXLpGgF5DZ6vXhUdQ0/X/IVzLtfxRNF\ndyBy6ULcPpeJcJkIQ7IJUm1RkbKOjYEpx8kMfvj5q5XNk7xuNwZOn0bVzp24uboa7T5fjMpqpC2m\njuZSjU9GuJLBakWQfUclIvHJyO5iONFaiUxl1ZmlHWp9bAyZLemSKah/97QVcSuVvUwXv0n1CklF\nMV3kekY5CnEvJbAYEIqngMASwGIoT6xcoZHcTl758bhcCnFS35DzrrhP3n472n0+dPh8CukEYm/m\nXbJDrdNmw/Tly7g4P4/xSASvnT8PQFIjFyDVxQzPzaG2qAgHNm/GycnJGCXKKxMwO4Cbq6owkcSM\naMsvf4mburvROzKCC6r5ZuVQtNTO5cXFmrmRxydncCxSiV+dGoVLdry9uaoK7T4fHBqlV0ZnZhQF\njc3Z0a1bYxyF9a6J4xMTAKSQ3ec3b0agvh5rKipwdnYWobk59J85A5fdbogwahGrfCytkilFUO3y\nzCOZGmtUreWRS+VCzCqTRmFWu1JTsePy+woAD1vYr+xBUiLD4ac0FHErlb3MFb+RnHZ3og39CJt2\nhBUqo4BAPkMQT4G8g6g9FY9cv9nnCcpOvx9v33235g05H8rpcbvx7MaNeGbjxpht+Jv5VXK46jyR\nkltpB/DyZz+LQH09PlJTE1PmpMBmiyn/wfJAJ994A4CkUL4qk1YboKl2AsDpqSklDJiZHhXb7VhW\nWKiEDm+49lrpmNx+H6mp0SxpwvpT6nCg4Gwlqv/kw7KfbsHOdRtjapDyGJueRjgSwfahIYxNT+Ov\nVbmXz508qVwTD/T1KUT0kkyqJ+bn0fqLX+DS3ByKHNHgl4bqasMlUbSIlSitEv2O0qqZypCM7KZC\nhnOpXEimaItZ2qGmYj4AXxgI4i/2+vGz/W2I5Ek9z+jvnkTpHQ7JTTVWEbeSlGWu+M0whtGPee6h\nREIvEoEMQdxLCSwGBPEUEEiAYBDw+4G2NiCcw/cnuX6zryYoiW7Ik4HflxntMNgAvHrXXfh/3nwT\nY9PTeIc7aYUFBXixvT2mP2sqKnB4bCyGYJLqfy3wdJQplNMLCxibncV3jhzBsfPnldqh7Eu21OHA\nDz7xCc2HBF0tLUp+62jFGZw/a0fvXjeCQeDZjRtjQmsZ+kdHERwc1H3owCuxg2fO4Kl330X/6KhS\ni5Svj1rqdGqqy8mgdR4TqXwCEjIVoVAsh+c2ehuxY5HDczNds9Mo1FSsHMCy8DDWjPbDa0H+rFUY\nGAhi714/9uuQ4e/he3I9zjcRBtDSshb19R0ZdCnOnLJYLD+WkB5KfBjAo5YfwzyWdvavgECuQJRT\nERBIAL8/6twaCBh3bs02crWOYqYRjkRwU3c3RmdmUOly4chdd8FXVhZTUqW2qAgFNhtebG+PMfQB\nouVB1lZV4Y1QKKYWp91mw4LGdxdzs61wOrG+tha/PnNGqcvptNlw7w034Gd/+INmfqdDru8ZuXIF\ndgAbamvx7MaN2D40hKfefRehuTmUhasx+c074P3LIaxpCqO80IF3Ll7Eu5OTMW1VOJ04cd99uO/g\nQc0SJ5WPPqqQTB7q+qj5UhplKUGvHmq6SKVciFamXabyM5P3JYhhDKMYxehCV0ZcWMMAfrS/Dd4c\nKy+TrPRPbD3OOnTjdeRruKlUL/SL2AGCBzuRG+PwI/v1RQWuNuR6ORWjEHU8BQQyhLY2oKdHcm49\ncMB4rUqB5EhmQmPUpMZMvcpE+wZ6e9F76hQKIJHOPRs34rO/+hUiV2ILpNx+3XXwuN3K8Woeewzj\nkQhsAG71evHuxIRufVItOGSCy74l3QUFKL1Qg4WaEMLzc8o2PCkuANB07bV49lOfkuZK46HD7b/4\nBXpPn0a5w4GJy5fj6oHqPawIDgzg5ZO/hX1hAh77AubLb0Wps9BSo6BUa8DmMsyYKuVSPVQ/4m+3\ntZZlpy88uQqgO0NHNlNkPVskPFmt1dyrx7nUkG590Vhk4yGKQP5BEE9BPAXyENmsPRUOG3duFTCH\nZKrPtT/9qVLvs8PnwzMbNxpum5GqIrsdJycnY8gATxBqiopwcnISM2+9he4vfxk37NqlEDxXQQH+\n7Npr4SoowMFTpxC5cgVlTide5+pvAsDJyUls2LsX1xUXK66zDOu83hjHW0AijXq1PrXQ6PXivclJ\nnJdDMvn6oYnUMjYHD69bh4eGhgyr4fx5KcUELqE86bHMIt1IArWj51eHji26ky4/b82Tk+j7+td1\nt82lCAWt221rb8GNIYggnsJTCCGEBjTgBbyQlZv1ZMTSj+yQ8GRk+Bd9v8Dj/sdzqB7nUgMrtmNN\nTc5sPURJB6KOZ/YhiKfI8RQQSAgzzq0C2tDLZ0uWl8rX+zT7Nc1yD9XutUCsEVPPH/+I/tFRvHzu\nHB4aGoKdc5Cdu3IFvadOocTpRGNNDQBgcn4eDw0NxRxr4/79mJybw6sywWyorka1ywUAGBofh1Nu\n0wbA43Jpkk69L2Lmgvu7O+9Esd2OMrtdIZ0Omw0Pr1uXdA58ZWWm8mkVoyNM4gqkcVS5XDg9NZVW\nTiJ/HTgrpDbM1oBlUNe4zAVzLf56/vsPfzjhtunkOFsNrVxMftn2LDlmD2MYIUiGOSuxMmvkKpn7\nrtUmSXqZhMnMpEpRmmI9TgFjsDanNZrH2ogdFrsCCwjkM9Imnjab7T9tNttZm832uhUdEhBIBvGE\nLreQzChFjxQkM6G5zesFIOUkVrhcMccwas6iRW75ZbdUV0t/r1+PHU1NWCu/Z/C4XNjR1KSYGGmR\n5NHpaVycn8c8EWwAqt1uNMh997rduLm6Gu6CArx21134+LJlAKIlVz7k8WB5cTE6fL64osoN1dV4\nMxCAx+2Gr6wMH6mpwSRHxi8TxZFgK9DV0oI7fXXwuRYwDanMzNTlyzh89ix6RkbwRZNOiOxcMXOj\nnpERlP7lIAKB1MPX1TUu2Tm9wXEeW2d/uChOpfz1fIccAp0LSGaZonW7zS/LFqnnb9R3YmfGjhN/\nXMjHjRJLfs5+DGtNklItM5Pq755UusSPNrQhLExzsoYudCGAQE6HRYt7KYHFgBWK56MAPm1BOwIC\nAnmIZDemespmMtXnydtvR6C+Hoe2bIlRLmsefxyPvvOO8n71rl26BFSL3Kprha4qLYW7oAAffuop\n/F5lXfyJa66R8jiLilDjdsMjK5k8nLKrbQEkZbb39GmUOp0I1NejwGbD78bHEblyBf/8yitKO2u9\nXrT7fBhsb8epL3wB5yMRxSm3wunUdJdl88jqelrlYKwm8R63G09vbMPKZR9SjsOXW0mmPqvbY9cH\ny3ttqK7Goy1NSiRBKg6v6hqXXS0tWO8+iS9f/jbCp/cuilNpLqmYPNKtp5kJx2yteqOJbtQz6Teq\nVnyDkEg3m7NGAJcsOA4bw5vye+urY2pDKl3Sjx70IGhpRVUjiD1zi0WCkzkGZwIeeIRCLZB3GBkZ\nwZYtW1BdXY1rr70WX/3qV7GwoGWVmDrSJp5ENAjI8TECAlmAqD2VW0h2Y5pqeQ3+Rp4dowCS0sfy\nMO0AxuWSIFpKnBYZUNcKXVlaisODgxiZmsJFzgV2bVUVfvbJTwKQ8jjPRSLoPX06jly/cuedqCsp\nwTK55Em504lCux1j09MxJU2Ia6f/zBm47Pa42peVLhc2rViBUCSC+w4ejCFibB7/63Ofs7RcCV/v\nc83u3cox+fPWyKnPO5M8JecfRGzZ/d/htseuX1laGtNvvQcXiQipOizR43bjGzVHUIwZVV3DxUEu\nfUelGyqayuc3GVHUqjea6EY9XfKcCEzd3S73+SkAF+V1dgDjFh2XjWEcQB3iFdRkc5bqNbW4IZ+x\nZy4bJFiL3KpD8wUk5NL3lEBu4G/+5m/g9Xpx5swZvPbaa+jv78ePfvQjS48hcjwFBATSQrIbUyuU\nIHaMSlUbBVxOJq/E6ZEWreWM9LHw17VVVejw+XBoy5Y4YqhFrn1lZfjT5z+P95VLJjwT8/PoPXUK\n/aOjCkEusdsxdfmyoo6q22Hje/fee3FmelohYjdxRNBszqZ6rHpzwufSjs3OKuSPHW/70BBmLl9G\nbWEhnt24Melx2Vz58B7umn0Yf+3YhdrCQmXcjLiy/rwZCmnOidkQT7UKKiAh3XqaqXx+k+ZNquqN\napEutmwFgKPysrXInErI+syeolcCqJH/rgDwcJrt8w8AtAqhZIpcL27IZ+xjj2yQYC1yqw7NF+Ah\n6pcKRPHmm2/innvugcvlwjXXXINPf/rTePPNN5PvaALqtKKM4IEHHsD1118PAPB4PFi7dq0SW86e\nuIj34r2Z9wy50p9cfX/HD36AkUuXsLyhAV0tLXjtpZcycjzmdmpm/+DAAF4eHITbbsfz/+2/weN2\nJ9ze43Jh2cmTOH/xIrBmDRq9Xhz/7W+l2pcf/CB+8IlPKNsPT0xIDqPvvIOOt99WHEZfHhzE0QsX\ngDVrEBwcxIMOBxbeeQc1N98Me0EB6J13UHD+PB79u7+L6U9XSwuCg4O4+Prr8H/ve3Hz2VVQgLdC\nIeCdd/C+8nKsamxE76lTKHv3XUxdvoypG29E76lTWB8Oo9lux7P33x833u7WVvT19WHmrbeAqioA\nwOjRo+g4d07pv5n5HQ6H0S9bxwZdLoxNT8e8Z8dbdeYMQnJu6w1nzmCb/F3N2nv58GEclc2V7t+x\nA9+87TZ0FRRgOBzGzFtv4Z9uvVXJaezr68ODDgcmC0/hrtkfYGJ0BYqvvw9v33M7goOD2HblCl57\n6aW4/tXdeisObN4cc30WOxzAO+/gxooK7Lj//qTjdbs9cDgexEsvvZYznz+t998DcMnvRzGAB/v6\nUJqF43dnuP0uvx/DAGb6+vBPAIrl9Tf29WGbtEPs9i1dCA4Gse3KNrz20msY9vsl/8++PnQA6JPb\n62ff9/L+JX19+IKB+VP35w4D4ymWj38DgA/6/dgJoKmvD6MALvr9eEg+Xqrz1QWgo68Pfw/Ak+D4\nNwLYobHe7/encf67TffXmvcPApiG3/8sAA8e7HsQ05jGs/5n4YEnI8efwQzgl8jttr5t6EMfWlq6\nMDgYxJUr23L++yGb76VlL8PvPyr/3QHgmznTv6X6PhGCA0EMh4dR7ChGV0uX4XrMVu2/ceNGdHV1\nobm5GRcuXEBPTw++853vaG7b19eH1157DWE5RenEiROGjmFJORWbzXY9gOeIKM7KT5RTERBYPGSq\nUL0VMNs3fvu6khK8vnUr7vjlL3H47Nm4NvTqJGot59tl0OsPv+2q0lKsLC1FscOBifl5pR9Omw2f\nuOYaVLrdODczg8NjYwCkMNp37703qXIUjkRw0+7dGJ2djet/OrUi7zt4UHNOwpEIHujrgw3Ao35/\nXJusHa97Cmsq9qPc5cTE/Jdw+Oy47lwZqZOYrJalVsmR4MAAnjt5EpGFBdxWU4MnczCnMhn8yE55\njiCsqz/Jt1UD4KSqXT+iY6oF8BsADwEoUm27XadPiUq6MNgB/DmAGQCH5WXq+WP9PIaocml0jrWK\naWSzrIy1xTz0YOVVkZsIy7mkouyMUSxG8aSrF8nKqfj3+tE/KpfhqQ+gu9XcL0S6+1+4cAGtra14\n/fXXsbCwgAceeAD/+Z//GbedKKcicFXByFMjAQks7NE75cXp/7sJbW1SbdJcgFnTEn7717duhcft\n1nWbVYf/srDO+StX0OHzxRAdpqwlcq5lOD45CUAKy11WVKSEgh6fmFC2mSdC/+gonHY7jly4oCzf\nu3Ejtg8NJTXS8bjdePueezTDl82En6rnQC8k2uN249mNG/GMThgt229NxX4cHutFz0gPjk/8Xneu\n3r97N67pegb3ntqM0Tl7XHt6/dOaB3WI53A4jNGZGYTm5tB76tSilU5JhkTfUVaX59CDlaGbfFv/\nS6PdYm7bUUiksxsS6eS3fQJh5f1qXFEC+7qgXdKllmt3AUAvgOPyey+A04gNEFSHy5bDeIislruv\nVr9SgRFTnWTFPKz53ctktmxuQJj6GId0TVl1lQtYAXUaQjb3JyJs3LgRgUAA09PTGB8fx4ULF/AP\n//APpvuRCGkTT5vN9r8AvAjgRpvN9iebzfbF9LslICBgBRTSsH8zDve60dMDBDN0vxEMShFxK74x\ngA0/T+5S2tXSojjKqo109LZP5FDLq2I3dXejd2QEgQMHEI5EFAOd3tOnQUAMmelqaUHz8uVoW7Ei\nzrlWnRfpKykBAFycn8fL584BkBTO64qL4SqIfp2W2O0IRSKwc08E733hhRgjnwcS3Ejq5dWZIese\neSw3dXejaudOBA4cUNRDM06yrC/lLkbMG/Gbji/pkkZWXmY8EsGGvXtNjzERijl33YbqastcVrOJ\nbN3mpUtwg5CUzDYATm45s98qhUTwwogliV5I1KYKUi4j34c57pZjHAW4Sd5fi3RtB/A+1bFdkAio\nTT72YWgTYHaUCfnYibLX+HGqt7GqsuPiOsvyyNxjD2scaxOdDYHMwNr6pQLpoaulC4H6AA5sPmA6\nTDbd/cfHx/G73/0OX/nKV+B0OlFVVYUHHngA+/fvN92PRLAk1DbhAUSorYDAoqOtDejpARobU6+d\nmAx+P9DfD+Dv9gJrjIXQZiIUWB06G6ivR+/IiFLOo8PnwzMbNxrqi3rZpbk5qQ6lw4FLly/HtbG8\nuBiRhQWcl8mcDZLpUbHdjrfuvhsNTz+dtB+AfkitVvip2bnwuFzoPn5ccfA1Ou/hSBjBwSB2NO1I\n+INW89hjGI9ElDH7ysqStm0UycKC9TAwEEQ4PAyHoxgtLV15bT5kNFgy1dBNrXDVlQDOQCKdHkjl\nRdjVz0JZ2fFOIxoKC0gOrsxMx4tXcR63xhyvBhINqgHwKwARALchNqQWkEinG8Ckqr+VAKoBnIMU\njgsALM7ADomo8v1Uw4/Mhz63oQ096EEjGhe5rmPmAnr98KNfnskAAuhOaSb9yE4guoDA4iBZqO1i\ngohQV1eHr33ta/j617+OyclJfPGLX0RJSQmeeOKJmG1FqK2AgEBCdHUBgUB6pDOZSnb8EwPA3+2F\nfaW2S6kWMlEjkFfF1lZVYUdTE26TzXEaqqvxKGeskKwv6mVMYf3YNdco+5VxIbpvBgL4aE2Nso4g\nfcm+1NEBX1mZoX4A+iG1TMXseP75mPOgd2605mI4HFZIZ6XLZXjePW4Pulu7kz5FZeVlrCadUh8S\nhwXrYSmVUzAaLJmqjqEOV22E5CzLlE47oqSzElHdjB3vJNfWhxDr4Po7vA/FGIND9qAuhUQYewDs\nhxSmG0JsSG0DgHa5DTXp9AA4IrdxERLhnOL6toEbA+snr6ndD4lgA8Ycc1PV46xwlrWmFmXm1C1r\nHGuzFYguICCghs1mw89//nM899xz8Hq9WL16NdxuN77//e9behxBPAXyDiLH0zw8HqC7Oz2lM1l+\noa8xDKwZxUJRBHUlJYbq/tUUFcWFt6aLrpYWdPh8aOdKojzZ2opAfT1euOMOzT7d8YMfYGJ+HrVF\nRXjq9tt1Q3m3f9WNse+0Ajta0bbchw6fD5tXrIBXrgnK9mHlQwDgCoDvHDkCAEn7wZCIkGudB71z\nozUXfM3QI3fdZbk5DysvYzXpTAfZLqeQye+oTN+aHx8YAPbuRdn+/VgRicAN4B3umA3y35WQSJ+6\nFuUE9/4GROtjtgGoQAXKsQyXIT0QvyRv9yFIxI+hDMAnIKmg1QB2Ikp8AWAZAB8kFbQBwLS8vBjA\ny5C0sncBPItoWDNfp5PPV2UE+3qNsfghke4Ncv/fQmoZklbkHQ4Ovqz78MSaMNf0YE3ZFpFvmE2I\neykBNdatW4fBwUGEQiGcO3cOu3btQg33MN0KZKWcioCAQP4jmTpZXhhdb7TY/MnJSZyLRNB7+jSC\ng4OWhNp63O64EFaWT8iDD2f90+Qk3pBdaR8aGlK2Ve83PCyHE8ONVbcUYWVjGMcuXIgxu+lubcXb\n99wT40zL5otXLFkY7fahIQyHwzg+MQFfWRnKnU78uKkJDw0NaYbUJlJmSx0OhCIRhCMR6Vgac8FK\nw+iF65pxzs0XsHIKiVx28wVdsDZYUh266wuHMTI6ikkAhYODOCxf/3UAPgBJiWTOtT5VO92IEs9K\nAI8C6EA0ePJWSOqkGhcADEIiquchKZuD8ra98n6MpJZCIpf3c+0yPA/gZkQDNIMAxgDcB+B38t88\nGJllobor5DGVy+Ngob4j8v8sjzUR6TcSCp2Kt6zdLn0OtR6esBxSqe1gimGuZnoZv46R6/TAFFkB\nAYGlCpHjKSAgYAjJ8gvN5h8CyUtqWA1Gqo5PTmIiEsGEnKdZW1SE0ZmZpP3gc2Xd/8deHB6P5k/y\n+wYHBvBWKITjExP4jRxmy6DOGx2bnjZczgXQnudwJILVu3ZhXA6zTSdfVi/vNhiUiHfnNT18AAAg\nAElEQVRxsRS6nYk8YYHsw4/YrLpL3GfSs3kzet1updDCTZDCYQGJUD4D7ZxQJ4A/QCJxrFhDKaLk\nkUcxJCXxXwE8BmBOXs7yM9cCKEFsvqcbUiQBr4Kytj4CiRz75HZZn1i+NSAppXcPBLEsPIw5RzEe\na+nCpOqBRK081gpIYbyNkNTSh5CY9PuRPEvRyDZqJCpRlJkc0kS9TLRucRBEEMMYRjGK0YUu4Wor\nkJPI5RxPMxA5ngICArow42Cqub/sVnvfZ93Y0ajvQpqKS6le2ZNU+5oMLCR1ZGpKIZ2VLhd+09GR\nsLSHUo7lr/aj/d4IDhyIKrxrq6riSrQMh8M4fPYsRmdm8NDQUExbasWSvTdSzgXQnmeP242PyOEw\n6ebL6inbTO3NpDPyUkC++XKqQ3f5z+STbjcCkJROnnQCURKnzgmtBHAXJEWyDcCPIRFFLdIJSGZC\nHwOwG1HSCURNgV5HfGhWBPGks0DuYz8khfIw16dSxN7stAKoCw9jzWg/PjzSg8/JoavMnKgBkqIb\nAHAU0eBPH5JnSBoJhU4lXNrt9qC1tRtDQ9vjcj2tCXM108vcy8XMHedgAQGBRBDEUyDvIPISzMFM\n7UfN/TNIONQkKt2+JgMjVRUyybMDcNvtuOMHP8CluTnd/Vi/ekdH4PrfBuHxRG/QD23ZgmdUNTr/\nINf1rHA68fC6dTFtsf0+UFmJjuefxzwRfKWluMnjicsxNQOj5WkSkfvgwIBmrisgKZ2ApPbuyI17\nzZzEMID+vr6crJSoJsUsJ7MWkprnAbDd7cZYayvuk889MwziSacTwDhiS600QHK//QCkkFeWC7kG\nElHUw4Lc9kSC9YMAEj5Ch6SAHtFYboNEehmRXQvgZwA+Luf9vudtxM+bdsDBbbMSUZJphGzyMJKl\nmEomI/vd0zLKykztykS9zJ1cTJbf+ibeBKBtbpQLObC5CHEvJbAYEMRTQGCJI13n2GwSjuOTkm+l\nFmEzAz1yxUjf0a1b4XW7pZvemRm8EQolJLtac5iINEcWpFvYi/PzcYon2+/k5KREZk+dwtT8PIbO\nndNUSOPGJivQbW1AmLuH8rjdWFlaisNjYwnHwvdz9a5dMXOkp9QGg8DEBFBbCzz1lAiz5aG+1qys\nn2n1LbLaEXcYkjI4CimEVGsbIKpvARLN8CBaQ9MFYBWAU4iWUuHDW62IW7iCqMKqhwpVP/n+AlF3\n3EOQjIZ+2NKFN+oD+H83H8B5t0dx6m2U2/IjtXNgxDc2HW/Z7BllJepl7tR+ZErnOMZRhzpN1Tdf\n1VBBmAWWIgTxFMg7+BOUoRCIhzqc1fT+cimWD/z3AXQMZC4MFgB8JSUAtAmbGTz32yi5+uLB2HIk\n3a2t8JWVKaGpFU4nsGZNQmJuZA55ctpQXa38rdcmv/1arzfp9gyJFGi+zaKnmzQJKm9ENB6JxJDU\nRGG2hw8Do6PAQw9BgINape8CEPD7U9aCjJZLSQVqUqx+H0S0vEgVgAH5/2lI5kLV8rbjsvMt9u/H\n85EILkAy7lFXtk01k6k0hX0uIlpKhUcIQCEkYjwA4IOQwnp73R78sLVbye30QCKmByApvJk6B6mC\n/e61tHShvj6AzZsP5L1RFo9USRZfxuV1vK6p+lpT6iX9vppFpgmzuJcSWAwIcyEBgTxHtkxf9Exn\nrIQRsyEjrqtV/7wfoetGgPe8aD+5Gc926ZshPbxuna6DrBnwpj8Akhotmd2egTc4Utdl5dvs2OiW\nHXilBwfd3bHbhCIR9J46FTPXauMiNtdvHnVg/HgRSq+fxMdudeDJjUvD7dYKWG2Qxcx4mKGPlR9n\nFl7LzHHY+yJIZIs3CHIglkjy5jzYu1d6CgEA9fWAxd8FyxDvQJsMMf0zCVYahrn0ZvIcLDYGBoII\nh4fhcBSjpaUrZ8irH37FmTeAgGGH3DDCCCKIHdihG2psZJts9NUsMmMaJbCYEOZCgngK5CH6+vrE\nkzoOfj80CYbVyIYDrRFnXCME+Pb2CHqvGcTaN5twaJ87KRnPp2sqHJYeNuzYkfghQyKCCsTPtRah\n5+caBCXRrsPniyvTcrVC65pN53pSk0OjYKGzxyGZ9MwDuA3AckikMlHpDj/iS5PwKIAU7qpg/35g\nZATweoHNmwELvwtskGp4HtZZH9eXNFEHycCInxc1ITdT9sQKaBUyseo7au9eP0ZHpbNdXx9Aa+vi\nO9IC2SFZVjnfZosQahFmK9178+l3b6lAEE8RaisgkPfIVg5muiG7RmDEGddIzuqTj7kRCLcaIp35\nBo9HeriQbFwsRFqLdALGjJ3YXAOIcXex+mcz027GmUQqbs4J20Nq2XMsRHcEkloYglQDczeiYaMP\naOzHh9d+GJIDrRpxRK+lRVI6LSadgHRt6ZFOzb4YAH+jUw5JUQWksU4AWA2JYDKwc7BYIbeZDLfO\nXo6oOWTGmTcWRkJXjYTRZqOvgLZplJnwW5EjKpCLEIqngECew6gClktIJzw4lXqhAsmhpWiHIxHc\n1N2N0ZkZlDmdmJyfx9qqKhzassXSuc9GGPdSBVPH3oTkNFuOqENsFaTcR+bWympv8vAjqnZ2ILY0\nihFUAbhgttMyqgGcT3HfVOCEVOrlT5CU4SkAk/K6Onk5j8UKuc3kcRPVAzUKI6pbLtbVNKJUZiuM\nNlWYUVtzfSxXI3Jd8Xz77bfx5S9/Ga+++ipqamrw8MMPo6OjI247EWorICCQV8hUeLCR/M+rCWbm\nQ4/QW50Lq4VMhnFrhS0m3SdPrqMgJGXuovy+DsCvAfwtJOVwHFH10APgPcSPX01yApCUUjuihFUP\ntZAUSLP5mJmGOj+VwQ7JuIjNlwtSWHIxgLcQzfFk14wTQAmAnchunmeq4dbZghFCs5ikR4/0Gsn1\nzPW8SjP5qrk+lqsRuUw8L1++jA984AN48MEH8bWvfQ19fX3YsmULjhw5gtWrV8dsK4inwFWFxcxL\nyJcb0lSQ7tjM7J8s/zBVpKKcBYPAyy/3YflyvyXmTGbmIdG26ZyP4MAAnjt5EudmZhTykMtKohbp\nteqz5kdU0QsAhm5/01Vgs/Ud5Ud0bJUA3kUsUWGkUm2ew4PPZ/wVogpkCRKXErHJLyvzLdOBHUAZ\nJDJ5AMBnEBs+q0YFgF8AuBcSWefnxg/9a4aRmuP4B/hwO8rhQBekEi1mH3CYQS7l4xkhNItJetIh\nvVYbES0G2DXqhBMlKMFO7NQcSy5dU1cLcpl4vvHGG/j4xz+OyclJZdnGjRuxbt06fOtb34rZVuR4\nCghkCVp5cEsF/Nhu/f6gZikOo/snm5tk+YepIpWapcPDwNGj2uVJUsFzJ08q83DL008nzF1MNGda\n64zmQg6HwxjlSGely5VSDddsQStP0qrPWip1NdOtfZstsLExYqn+KHVBIk7vAvhXaNem5PMZRyGZ\nEs0jef1KQmZIZ8I7FhXs3N8FkPo8BuD/AnB9kvYvAvh3AJsAfAxSyPB1ADYA+I28TTmAh1X7sxy7\nERThMBwxNVHVeZmsJusKud1M1GZdDBjJccxkHuTAQBB79/qxf38bIpH4GU2nfIpWXmW+gV2jveiF\nCy7dsXwP3xM5oDmGdP0OrPZLuHLlCt5444202+HhSL6JgEBuIZNP6JLlHubLDalR8ON1/lV0bO4n\nm5RQ2GDQWCismblhBjnpgil7kYUF3Ob14ifNzYbCQXk1zVnRAsBvmTlTZCEaoDg1P68oZ8HBwTjl\nLNGcaa1jZEyvPfW+AOBxuXDkrrvyTp3XmxuzSmgXzIctdrW0pJVHbPV3lDpcmKlrTki1J3dCe2yM\nVAJRYgQAtwJYKbdXA4l0vmlpj1PH5weCWBYexpyjGP+zpQszCfIQ+VBgngTbECXlgERK/wySey1T\ndB2QSOXHIBFuQMptPc3tNyGvfxvR+WWkphxOTCD6QOM+eT3/gIOf8xH5fxYebRaZ+N1LJQwdiJKz\ndLdJFeHwsOLMOzgYjHPm7UJXTqmWeqG/mcqDNUq8L/kvKcpwEEGRA5oDMPobn4n916xZg2XLluHh\nhx/G3/7t3+LQoUMYGBjAJz/5SVN9SAaheAoIcBgelnIP9dQvI86uwSBMqYVmt7cS/HhLdkXHVu6U\nxmaUjAWDwMT3W1B7qh5Pbcic660aTNkLzc2h9/RpPDQ0ZMhhlFfTSv9y0FL19baaGgBAQ3U1Gqqr\nAeiT8a6WFqwqK4Pbbsd9Bw/GPKFUX2vBIHDsdxIZa6hMTO67WlrQ7vOhw+fDe/feC19ZWfoDyzL0\nPmtmldBUXGKtdqpNB0FIxJJ3pmWEphdSaKne2Jji1gbgD/Iy5urK2ntc/nscUrhtJcypjkk7zzpg\n8LttWXgYa0b78eGRHnxhsNPwodgcVAM4B0m1vQ4SOW+CZKr0UW77ywAeAsBrAuxxTTm3bBSxzrJM\nyTuGDyGAqPkPU5d5MyBGfp1cu2oFdTFh1j03V1xSkznz5ppqqedEa8ah1gyMqs3pKMMCmUG64kY6\n+zudTjz77LPYt28frr32Wnz/+9/H3Xffjbq6OtP9SAgiyuhLOoSAgHU4dOhQxtretIkIIGpsJAqF\nUmujuVlqAyAKBKzf3gqsWUNUUUHkdMaOt7NT6k9rK1FHh/E5WIwxEBFt2reP8MgjhEceobVPPkmh\n2VlT+zU+/TSFZmctvaZCs7MUOHCAQrOzMX/roXnPHmUMgQMH9LdrJkLRLKHzALXfa2yci4nO/n5q\n3rOHNu3bZ/i8GIH63OUirLyemin2R7WDiDbJfzcSEfuIdsrbbuKW8fs6ub9dlPzHu8DANklfzdHv\nBQSM7fOVfZvokUdA//vTjVQ0GzJ8LJc8xnKd9W3yvNSq5q6Vm5/b5PVHNbZLBSEiChDROq4fqX49\nGr2mtK4DPWhdR4nQTM0E+V8g5ZGkhk7qpFqqpUqqpE2zzbTvQAfNzsb3upM6qZmaaRNtolDKZ858\n3xIdcxNtIhCokRpj1ustzxaeO/QcBSiwKMe+WpGMExm5Z8jk/mp8/OMfpx07dsQt1xuHvDwxL0y2\nQbovQTwFzIARn02b9ElPJolnKCQRp1RJJ5F58mp2eyNzlAwVFdEbwsLCaDupEkgrCHsqCM3OUscv\nf0ntv/xl0i9angidmJiI+XLe/G//lhGSpHX8uieeoPXPPKMcyyiRWqw5ThXJCHWqxNTqH9ZMwMrv\nKEYOQEQfJokgMELDXwbN3HYBjX3NEMsGIjpBREXcstVEVJVgH82XfM2ikQghY/sUzYao80DAFOks\n0VhWqnrvlOfjBDd3nUS0jOIJK1uvnmMz6O/vpD17mmnfvk30GXks6ZBYo9dUMxknuUbG2N/ZSXua\nm2nfpk30mVBr1oiSmszxpDcR8V0McpzsmCEKaRI8veXZQibvpQS0keuc6NixYzQzM0NTU1P08MMP\nU319Pc3NzcVtlw7xFK62AjmFTJXZyCbM1tU0u70Vc1RTA4yPS7mdb70F+GRLx1TdZtOtJZpOXU+j\nSORUmo06kvwxGAL19djR1GQonzDf6rUmK5EiancaQxjAFyGZ+eyEflitVu3HMICbIIWLNgA4Bcl8\npxGSMc+QTltVANZBqs/JtukA8AqiuYqGO5+huiCrAcwCmIY0N6yWaDGkkik3IZpfyWMVovmtE4iW\nm2EwUzszUY7k3r1+JQ+xrj6Ana3dWSmPYnUN0L1+P0blH5y6QAd2djtjciczlaeodqa9hEvoQQ8A\noAENeAEv5IybrihbImAUuexqCwDbt2/HT37yE8zPz+PP/uzP8MMf/hD19fVx26XjaivMhQRyCsVy\nUoxVRi9WwwhBSmaco9WGGfJoxRy98gqwYQPw619HSScg9UeL3AwEgwgPD8NRXIyWri645ZV682GW\nSLJcU7ZvJh44mDXyydTxK5xOXJyfV47F8gmTwSpDJjNIp6RJMoOepWbUlQqMmLt4IOUnJgMzUSqC\nRBKZcdD75PXPQCohwnggb4YzD+Ao19YFSOSlVn7vALBvIIgr4WHAUQy0dAEq058KROtjxnQ+Q9fs\nJIA1iCeX0/LrEwC8kHJXSwFcgjRWN7dPLbffhwHUQyL3Rkuj6Bk2dSE2D7GlaQfazA8xJfOfVMy0\nEsEh/+B4GxvRsuNRtKlaZXmKUn9TM6jRIq9a+YcP4AHYYMOjeDSG3PH7/xg/xkN4KGvGQgMDQXwp\nPIENjlp8qeUpeFSfi1SJeaL9MkX2BQS++93v4rvf/W5mD5JMEk33hRyXlQVyC0ZCXQ3nuqQZkqre\nv7MzNkQ11VxGo+Gsev1PNkdWhOKqsae5mR4B6BGADnCd5seyalX0uOvXm5unbISRbnvhBfLu3Emt\nv/hFXJjmc88/n/HwTRYiqg7zzWUYzT9NBfkQMpsqMhEWaRR8m16N9jtJCqG1ycvXUzTPkX9VkhSW\nWsOW7WkmPALpdSCQ2RsHAy87Nwb1y8uNq4Niw2v5vMYT8vp2Sh62rEanPEcgKTR5vWqf2dkQHTgQ\n0MxD5NtoJv18TL4fzYsUFjkbCtGBQIBmdb6YrchT1ApVNROGyu9fS7VZDV3ds6eZHnkE9MgjoP9x\nYFVcrmeyMFy9/NBE+1kVTixCbbOPpcKJ9MYBA6G2QvEUyClYqeqkq6Kp9x8bAy7Kj/QrK1NXG5Mp\nlkwtPHYMCIXi+59sjsyM26iixT/1buI6zY/F7Y4et7Y28RjV0FNarcTJyUmMRyLoPXUqzma81OVC\nd4YLafPKZr6ElWZSlTSq9OYjvnf0KL45MZH0c5WsxqhRxYvfjjmoNsrb96rafwLADLfvYQBc0IOC\nmyGpmI2Q1E/ICh68jYCGk2i2saCxzAWgFZLyykJonQBehhRiex+AH0Nys2WKoJaabKT26zCk8iuA\npHTOqfZxuz1xZT602mCKabTMSvRsFmMPACcaAfx9wpYyB7fHg9YEPyJWlC7RUjfNlGMpVs4YMIpR\n3IpbsRIrs6II8sr2k03uOPVXzzmWqZYv4SXMyVfPA3gAz+LZmDFpOc7mixutUGYFNJGMmab7whJh\n9wL5h3RVNPX+7H1lJdGJE6n3S0ux5FVKXi1Mpf9mxm1U0dJ76s2PhT/uiRPpmzRZjXxwQ801LGVV\nMpMw+rlKZu7STMYUUX67Dq5NdfudFP8jXUX6TrBs33YiqpwNSUqnCdMfvZfX5PZOInJTcqfdFfJc\nuIkI/Z2SSrtvU0yfjehDRkx31I6wqZgR8W1sI6Z+vkQhqiAiUIjuT8vgKF9gVN3UUgc7qZPW03py\nkUtRXtfTeksUQSPglW0t9VdvbGqzJBCogzqU9Wy/bbQtbsyLbUpkFEaU2cVwIV5MLBVOpDcOCFdb\ngaWIRKGk/Lp0yY+aIOqFuFoR2sqHrNbWSv83NBC1t5tv04wzr5VkzApH4EziaiNRmQi5FjAGs58r\nPYdfo+Uu1NutIaIKkgge/4ysmWJ/oGsonnR6KEoO11M0DJQd40OUfqmVExRbYiTRS8uxFiSF2fLr\nKik23JUPDXbIocGJ5tFMGRKi9F1v1W00c30P0C6d3prtZXaRaRKhRWT4ZXVURyHSJoCZ6iPf3gk6\noUsI1cdlfSyjMgKBGqhBcz9+fF7y5hVBMxKGfbWR06XCiQTxFFhSSHbDfMsth3TzB9X5k9m4+bai\nhuViqYWMjG37q1nNedKbv6VGapZirsti1Va1ApmqAZotJMoZ1qy3qaOQbiOJALZSYpqhJkGSXia9\n3BRV09RKo149z+UUS+K88rIquS/JSKNe7iXkNowS1zqK5p8yglxBUk1Ovn9OksgsI8d2IknpfARU\n8XQjHZ0NJSWJzVx7i/FxiT48mKcQ3U/q3krfUc20uL1MjEyVMmHEw0veOCJjRmXMRB+NtqfejvUx\nEVnlx1dKpaYJWjLCls7vnhEyaESZtYqc5guWCicSxFNgSSHZDfNHP3pIN5RUHWaq15aVxKmuTmq/\noiL1ENxU1EIrSaHePJldnq/IBvHMNplKJ9Q8o301INpk0tQoG0h0PTVTPHXQU0i1ttWCekrVBJN/\nz0hhMRHVkvYPdztFiZC6HiYS7Gflq5QkMrmN/oZq6AVqpm3UQRHlkmH9YyZIJI9dMUOaDdHyAwEK\nceY+iS49I+pyJvXGZAqqdE0Z1cAlZFspStVoKFk/tVRNBrNhp6yPXvLSelqf9twYHXOqc8PG10rJ\na6iqCRr/3k1uqqRKaqVWZX/+e8rstVJLtUrbfIiwWVhFTvMFS4UTCeIpsGTQ2SnlULJQU60b5lBI\nclBdvz654yu7+fZ6Y7dXh7amQz7NOrjyY02H/FpJCvVIitnlVoLNT12d9rnOZWidW6vJVDJyyH8W\nzBLJjBK/ZkrKpqwKAefHrafqZxta1EEvDNwozeCJYDtJRMxNRI90Er3STHRwE1FFSFILB0lSEk+Q\nKjRVfn2AYnMW1Y635RR1ia3Q2N/Kl5eIKuklAlUoN9OM/LUSkY+IlpFEPpkqzBNmtVLczLXNu/yy\n9tqTzLPW/gzZCYI1F+CbbaUo1dxDvX4mUjrT7WO6eaCsb63USh3Uodsvre06qZNqqTaOCKr30cvr\n1COJalLN5o1XS/XGq3UOEpHRSqpUtm+ndtPzZwb5ktNqBJDKDi+Jl974SBBPgXwCT5raE3yXGSVX\n7OZbTQ4ZcbJCtUuVhKWrGlpJCvUUV7PL04GarPHzk+ghQS6G/WqdW6vNjcyQQ7NEku/rthdesEb9\nZHfmTH5LwKasysflx13zjQM5odKboQ5Gt2VlPUCkaA8hInqjObpiVyCeMG2i+B/t5Rp94NcvI+lU\nGlU97RQlgmZuHopj3u9SSAc3pDhll82VVhkZfrz8pdess60WEj0I4NtZRbmRiWm1UpQpBVWvn4mU\nTjN91iJ56c5NqiG26mVa+ydrW289I2jLaJmyfjktV9RSEGgtrY0ZbyJyn6gfrM0SKtEkzwJXJwTx\nFMg7GCFNhw4dMk2u1NuHQlETn3RVu1RJGOtTaSlRa6t1JkK5bvKjBzVZY/NTXp74IYEVYb9Wh9pq\nXZ9Wmht19vdT5aOPEh55hNY++WTSNs2SXr6vlqmfzRT9ZaijrNyR8+Nu/cxsxlV6hmznDC8naVqd\nJOVfKoRHZkpHGiXFk6mVRBJ5XE8SkXRSlOydoHj1jpFHkJRf2UzxP/YOjWXLKaqOnlCts9EsNdM2\naqMILdPYl4XMNtA8tdP9tI1m455b8GosPzY9gqhF5M0Er4ZIIpW86ZJWO+qanlYglWvKaqUoEwoq\nc6WtpVo6EWOFZX2NUL7f6c5Nsr5pETrmUMuW8USQJ/VaYbXJ1vNQq5HbaBtVUzUto2Uxc3zo0KGE\n5D7RGEMUihnHKoqvYZoqlpKZ0NUGQTwF8g4x4YE6StahQ4dMkyut7a3Mq0wFoZAUAsyTJr79bdsy\nq+Rl+lhac5Vo/rQeDgQCUt5soocEVoT9Wk0UMkn+OzuJKr4VJYMdv/xl8v6kQXotU2rNpafpwkzY\nMD/ubD6QySTx1Arp1AqZDRApTGtjKJ4INXPbekgKzT2qsS7AvS8hiey1EsWUKymYDSnmP4ykrqX4\n0xxLTs8RaB+10/0UIkmpdXLr2yiWJPJ9Ys8tQnK/2XIWJJOuqpzoxpfvRy23H9+O3qXe2d9JzXua\nadO+TTG5p0aQCwZomVBQK+Qwai0yawVxZn0G6TvHpoJkfdMidPyy5bSc2qldU11cSSuphmpilER+\nfTu1Jzw2I6aM1Oo9MDh06FBScmnE+MjqEjZLyUzoaoMgngJ5DSuULKthdZ/UxkR8+2pSajX4Yzkc\n1h9La64SzV8iYpDquqWI5mYifEUig5XfzXxNUsuUWjNsIAHy3XgoHXRSbF6lYk4kv2fr1IRHTYQ6\nKRqey5ckYURKnSd5gmKdaJ1EMeVKquRyJSBJqewg7dMcDc+9rGzfQRFlPVMwtUirHpnTCjNOF4lu\nfNXhyTz5ZNC71Gu5Oes4kCM/aiaQSQW1kipTbncNraEKqiA3uWkdrYvLjWyn9pg8zGwoalqETs/Y\nqJM6FZWygRpiSFwN1VAd1ZGHPIbIs5aCbIRcbqNtVERFZCc7VVN1nPqsFbLMXw9WPpRYSmZCVxsE\n8RTIa2TDwMYsrO6TXu5pY6MUfssfy+pcRj7Ul/WhslK77TVrJHLs9Rp37tWaq1w8p/mGTZuIUDRL\nldsP0Imz+VdqJF1YnSubVZhwoNEsu0LRH9dKbjkLAV1HEhFSf0TVRIjPz1TnSbL9a7hlAYoNtwWR\nUq6k8ulGWj4bilmnNu5hY1Arsw00HzMNfD/V++qRueX0JoGICmiKmmnO8G2qun3+fSttTXCjHp/f\napRCVspzhqcbqd2k4rkUwQhGJVXS5+hzKZNBXjXlCZtWW2qVdRWtSmj0YwTJzIDYNowQrqN1MQ82\n1GqmVgkVfr3WMdl7PszWTFixOiS5juoSrs+EOp2JtgSyC0E8BfIaekqWkZAjK0iaVhtWq2t64aXq\nv4ni1dB0CShzB/Z4KEZ15cHmgFdE6+o0m9Ns34rwZiuRKHx7sZAwdFSDfSz2HC42rMyVzRQObT6k\nTTCbyTBb0dpUq4SI2aY7KT5nU4tIsWN5SSKMcbU5Z0PkPBCgdbMh8nDLeUKs7lOd/HeZfNxEl7DR\n8Syj/QSKxGxrhN+r2+ffd1Ak4Y0vTz55FTnZMVtnQ4QDAVo7a/6WOhdCba0AT5j4GpbphFeyXMMC\nKogjbImMeyqpMkZdNHJsLZKpV1qE35Y/Dtte7T7LHnSoS6gwoszWa4Uoq4lhIzXSalpNFVRBXvIq\nCibfp+cOPaf0lQ9JLqIi3XxbPoTX7DwZhcjxzF8I4imwqMiE22hnJ9EttxxK2GZnp0Si1CGd6v4k\n6x/LKwSIOkzGcBkdu1ESwZeZKSmJH1uq4Mms1hj59QBRcXHqtUpzAXqhvmkV0k6z5mXC0NFmMi+r\n5BhSmZ9s1zxNB1p9PXTLIe3zZiLPVbPsCulHKxttupmiXfNQVE1UEyl2LK380ZGHiMoAACAASURB\nVOUk5VOq19nl9tnx1X0yYrpjwvyYiIgq6Sg3ngUKkf7HJlbVjG3fbAqy+lzoHTPRPmawqA/HEhAB\nsyRBTTD1XFXNtHuCTlAd1dFROhpD2LQUa15lPUEnEuaA8n1gxkBaiqLazIft5yKXsryQChUSyfrJ\nk1E3uWPIHq+Qsu218j0d5IgZRwM1KLmjPDllCmYM8T4UDW8OUYjaqI2W0/I40snWd1AHraSVhuqf\npvMgQeR45i8E8RRYVGQiR9NIm+rcRUaU1PtqtcUTRp68Jirtkmo/9aBZA5Jrb9kySilcVasupjqc\nVw2myN58M9Hy5eZIZy6WOclEqG+6OYcJQ0ctMuRZTKQyP2b3yU4NRW1o9tWMraoOkm1qNBRVDS3V\nlDncaoXpMpWSKZ5a7rFriaiaoj/8Xnk/deivVq4pPwaiWALnovhanGq00pxCOvWOw8Aru2rzonRI\nYaJj5iPU5yURETBLEtT5e1omPKm0yyNRqCaf09hMUn3NNmrTrMXJ96GGapS/Wf9ZG1VURUwJ3Ebb\nNEN/WY4mPx6e9IKiYb8ucpGNbMpyJzljyGAxFcfsx8ZaSqVUTuVKrquTnAQCFVOxEsrMO9GCQD7y\npfXggEj74YNenqaRBwp8qLEo1ZJfEMRTwDDSJQla+6dyk5+sH0ba1KvRqS5fokW6tAje2rXax0rk\nCsv306xjrBZp5dv73OeIamrMl2BRq5eMUCdSXNMJ68zEg4d0kYkw1XRzDhOGjqZ7N5wDSGV+zO7T\nTNEfHe1LLQE1TZO1avY1C+etmZKNWRtaXUvUlpbi2UHxZJU3JFJvz9pMpBJqGRsZGZ/WeEJEVEpn\nqZyOkZdephMUJqJYIyKTzxKTIh8/qnqXfjPFzn0isxezRjBqUqi3v5F2UwnJTJQLqQbfB94p1kc+\nWk/rY9oopuK4ZexfBVVoqrAhCilht2pnWPU/plh2UqcSUsz+HZX9qBnR5P8xJZUnjDypraZq5W+9\nkijJSrlokVE98q+neKvzY3mCLFTP/IEgngKGkS5J0NrfTBgpI2Zqsx01QiGi5uZDScNXtcpvhEJE\nbne0/ba2+NItzEm2sVFS99T95/vKiClAVF0d22+WP7l+fTRE1ujcataA5OaSn+tVq4yTWtYuU3L1\nyLtVSmU+GQmlE8aWDzmHhpEB6TCV+TG7T3K1qZl0aUyCVUawbXaWag4coNbZWeXY/PVk1ZSq27FS\nYePb2qY6DlM8+Vc7xU8bI16tqm35nE+94yZqJ9XxVdARpd06OkxEiV1zGbKlnps9jjWhtrFHbSbt\nS199bRlREFNVpfT2N2uIo2cmxKBFOJMRW74PvFKqVjS1SCMjdw5y0FE6SttoG3nJG6fgrabV5CAH\nVVN1TK6o+l8zNccpxOyfi1xxKij710ZtRBRLolkb7zv0vhgiqVcShe9XG7XFnRczDx8SKd78MYWz\nbX5CEE8Bw0iXJKSzP0+kEtVrZND7AeYJkxZpJIolgcuWRUknH1ZbV6d/bC3VUC/8Vb0tv05PLd22\nTSKrtbXaYa18rmdDQzxRT0Qa+bqYeg8E9PJj9eY5lfzVXAzBXSrGHWmjmdIiYYuF5GpTApqWJoNr\npvgp468nrfWpQN1OojEnIzVsfR1JqmUrSWQypHEcXvF8pJPot81Ec5uItoa0yeoJig1p1TJCYghR\nfG4pwzaSnHWThdrqwUsvE4iomN5QFE8jqmQzZecjYPY41nxHxR7VgojwjCKZoqnl/qqnjqkJG58L\naaYP6vzKNmqjEIWojuoIBCqjMmqjthjn2lW0Ks4MiLVrJ7uynFcs+fxQfj92HPZPrX6q/7Gc0/W0\nngqpkNbROmqlVmqndnru0HMxhFhLzeykTnKQI649fk7MPHwwqngLZ9v8hCCeAoaRbghiOvvzpJWR\nIrPhqUTGVFsWXquX66lXTkTdV74EidNJtG5dPFlk27rdRHa7pIqy9bxxEa+WJqvdyfe1vT2e8GvN\ngVZupxFizeZCTRQzoY4L5AgskNE6+zupeU8zbdq3iUI5UzIiwa10mnfZyaYs1SlNR+FspsSkhl+v\n3k59HPa+gYhe53aMBKLTpj5eiIgc3LI6jfEw6E1/sjEkwwkKUx0dVkinUWjNs7UqqNTaJpkYZzcn\nNHZ0uUIw9ZAsz1Pt/qquj8mDN99hobJGQnTVfVDnZTrIQV7y0m10WwyBZCGsaiXRTnZqpVbNsFpG\nQhuoQXH8VZNBfj8b2RQFlyew7F8Jlegei82nlprJclfVbrwVVJFQpUwFIQrRKlpF62k91VGd4fMi\nkJsQxFPAFLKlRKmJUGur5Kj6/7P39tFtnfed55cEQIgvIgG+GaYp03QiK87YLhmxcRLGBVpT9ZB2\nQ9QTbhRvDtOzO+DO+GS3ezqxN+2cnHZ3JzOd05w5090507VmWuXNTCNbtWVFVhwqAWlVSezaieg0\nTc02Cd3IDi1LASVLFqm33/7x4Ln3dx889w24AEHpfnFwSAD3Pm/3Eryf+3vjffqBE9VNtrvbHrCm\npwUoSoshj8eMREQ7dq61vMRJX5/YJxol2rlTP1a5bXu7+bksRcItrxxg5batraIPdR5yrHKOY2MC\nQCWoc1dhO8ur05rK9pNJfVKmwUFz7F5iX8uN0w21QarkSrR4dZ7+XFrUKnwMNDV37d9ZcFsyr0u6\ng4g6SCTmWSZ/Fk5VXmG4XbOd2o/ltabhHJmxk8PFt3NkgmezzXwqnYNfGMxRjlL0DCVpkcYc6n3q\n1pmPfZD+xndcoVWitQJ10BQdq/Hldb2jplVO7pa6six2rqJEVgsaB6cttMUWdnKUM8Cui7polEap\nj/oMCyCPlYxTvATu+qiPClSgVmot+ayXeg04VD/roi4jk67MbCuTC6ngK/tZpMWShETy92ZqJgnJ\ncj0lXPJ9pFsuXx8JprzWqpxrO7VrM+C6ycmKHBTQhtoYheAZypdqZYnSgZBal9IJTqTLkQQcDnES\nZnXzUN1IJyfFe6OjJoyqlkL+Pi83wvuQ2wwNEW3fLl5HoybEShjkpUhUy6vbU42b5fGl2ax1TVVX\nYTXZkpNVV2e55seCz7urSw+XbueRrg+nRE1uCuKGSehqWyrfJU3SRASi8U+NEx4DjewfKbF4Vlom\npT6tqaUq53zqIPMfZz85g5cbdLnhxTQJwE2TSBTk2bKnaTjNxj2peW+i+F6l5UqsylGaxXB6+Xcl\nLm7zYp+FHHX7OI94wqMOepFQdJss7+K4PBt4kN9R1aiTWI02ndwtdfGdTmVUuHTwJsFMxmCqtTJ5\nEh75kG6ujdRIR+loSYxmL/XSNE1r3WFvpBstLqx8DLo+4xSnJmqidmov2UeCqlyTIRqiPuoz4JBb\nY2Xm4DSlCXlrO5PFv2AO/Ha1Vvm4kpT0lH3WLrGT7E/OLcxmu3kVgmcoX1KtadWyfKpJbrjbqkyW\no7OCSt1/f74EODmkcndYbjXk0NTQIPrnYOlmKZQxoXwO0aj5+cSEFWwleO3eTdTUZLWmTk+XwmUk\nYl0Xaf3UwTefu87lVo13la693JLpVRwUda7GKlw6jcWLi29PjzO4Ou1b7g2TEDxL5VbSpAQii9fT\nhQ8VaOrQlPaCvtLSM+kD6U1hTS3nfOomAUQNB9KUPjROy2sFW/BKk/lPtpv8u4Dy/acc3ucgqiYd\nktJhlO69YG1taRqnQ8U+/sGjFXicUNyn9cCvOZ5HulI1PcQvbv6ygqQn/lciRzn6lfyvBAZ1Xlwl\n/VqUy3W/LBdYdfGdWcqWuIryWEVuIdVlgOXj5/ORYMXhaIRGLEAnrZLSkikfavkS+eD9N1ADdVAH\nbaEtFkiVbekAVffopE5KUYp2027LPvL3IRoy1qOf+i3geQfdYXymAr9aa5UDorpuXs8RuYbcqrtI\ni2E2202uEDxDeZIEA+m26ZZZtlKpSW54WRPed0+PsN7dcIMAJlk+RAXCoSErpHIrI39K+JKApz77\n+pwthdzKJ8eeSJifxWJWILzrLrJk2AWIBgZKrbT8GY8TLS7qkwBxF2UJpXfcYXUB1kFzLCZeT06W\ntuX35oLsx67+p7Qg83hXL2DIYdWttqjTvqHrbnByK2lSApEerqcrLT0zfsjemrrZtUxETQyse+am\ntBf93LW1lfQA6SY7m5v6fpq1H7PpS3fYvaCVDmy8w8540VX1m1TwGMNZoAJN0icpS+s05nIepal0\nrmas6yWapE/W1BrjDRQFHHiJk/OSMTRNzueWCozlZiH1A6y8z920m3qox6ih6VbeQ3Ufla9VeBqm\nYct8JHgu0iIN0iDdTXcbkKmrwzlKo0ZioBEasWSb3UpbCSRcX50y2Eq42027tS68bg872OU1O3ny\nI7kOHdRB3dRNy7RsWWvuwtxCLcYa8DWV6+YkHmcrEzvZxdCGVs/NqRA8Q3mSCga1uJC3y0Db3+8M\nh3x8w8MmTKkgpSsdooIuf27daoISB/GJiVKrKAcoO5fZjg4TlDlk8kRCdk/pdjw9LQBOQjd3r5XP\naNRaz1ONd9WNWXfM/Wp6WvTR12e9MaC7aeHlfOLg7DdRVTVqc4Yimv72v6KeL/wBjR38iPbivByI\nrLT0TGGtQFNzemvqtSAJ1m37RwhrBeIX/RLKeC3KXtIDpJq11mtCH/m+tG52s77k064vvwCZZm3K\nOaYWcoQDacKhcZp0PMaV2U/dzqPqW2z9yRsomhfwbiBX6sJaesRM9+K/ozH6qOF+qoMR2ZbqFuvF\nmsnnprNU2s2Rw5WsoamDYNmmCmO91EtZytIyLVOWsrSNtlEXddEYjRlWOL59IzVa3FwlfPI6nFto\ni+Xz7bS95Jg0UAMdpaOONTtBoAQlSkq/RClqicm0e/BtZNKhDuqwwCa3uLZRm8XS2kiNlmRFco7d\n1G1Zg17qpQmaoCxlPQGi7hxRz+0CFaiHelzPYT83Wpz2DxMZBasQPEN5kgoGlV7I+3Wt5ODDwYW7\nmwIi4c7YGNFXv5ovGZ/anlPpkELB6iKrjkNtSyYS4k/pNlsoEDU22kMkB92hIevvW7aY20nQ5i6s\n3OVUQqZTP+rYec1SmUjJ7pjbaccOAdHd3VYXXdXqLJ929VPrHQyr4mpbq4KAVZKbW+umqF/q9RgE\nfKzKPZ8kEI0VoZODT5pKAXCZ3DPCqnDnRXz/ePHnMJklV/hy8XIrU5r97frVwV2SnXPZDXSl3kjI\n1Gmapqkj3+FoAaosTi5N6hET7sXfJh7PqloNdTDsBKc62SX90W3PIcWp/iQvEaJmgVVBTq4Rt0Dq\n4jl1D2nhbKZmCyzJ9VHrfcpHH/UZc0lSsiRuUyYDUh8TNKGN8XR6SIuwtt186fbqGsmkQj3Uo3VP\n5sfJDuacIM8LjOrPWO83Wtz2D116g1MInqE8iYNBEIla/LpW6oCoq0s802lhdeSWwnQ6b2yfywnY\nkVCmZlq1m48EwK1bS8fB4xjHxkSpFCfLJM9qC5ggOjJC9O53C3huahIutHKtp6etUN3dLdyFJeTy\nDLfqUwXdri4zjlXOq61NzHvbNvH52Jg1aY9TLU8utb6pepz4c3jYe7v1pqqAZ5rKu+rfQPG/l7ED\nG+fWWmkSIkNp8nYMvG7nUZWeTzrwkaA2RNaEQDrJbTuoFO68iEOhDm7TZC6Xrg6nl/Q5qnV1nIjS\nRYvv8DXoSl2J0pQ2IMEN4JZpuYw4Of0RUwHALulMyVjJhC83gLCOwhk4OKTw39X9dGNQXWr5Y4AG\ntO9voS22FsYRGimJ53QCOG5BnKAJCxyrbrcS8Pg+7dRO0zTtyeIpH13UZbS1lbZaYlJBKAHPOMUt\na5egBO2m3bYArbrX2sGcX3dqNZOvDlzlMZdj8+viXa5reChnheB5naoSeKzUBZPI2ZrG3Vh1yYMk\nmHHL5+CgADcJXdLaqI4XEJDqZT6yn927hWWRu6uqMaLSisctjq2t5u+qW/CuXWLMo6PWz+Jxsw8e\n98nHp1p8nZ6plFhDvk82ax27zuXW6diq544uIy+RtSxNY6NYQ79Ji+pVgUFPADUx7RTYGBXxv5fJ\nj2+cW2ulSYgMeT0GVTxWfuRkePVjhZPb6qDRi9z6cgPTAhENkrCG2rn7SqXJvGCYvMZdqcuV34tk\n/xfV+iOuWqOcsszq+raDU/tRuLevQkgHdVAjNRourHZjkBZS3cMteY+M2ZSPPurTutHaPSZowoCv\nO+nOEjjWWVjjFLdAZoYyWiufXZIk1TUYhBLA1Y1TxEJP0gANUC/1auuDykeEIkZMqLruXiyYO2iH\nJa6UyD0+d4qmLHC6SIu+zjE/51oo/wrB8zpVJfDoN75TB7mFggleKvx6HZtdCQ91X9XyFo1a3ULt\n5qMrxdLTY45XxprGYsKimUoJi2U2K+JKeQIcaTWV26sJhXTjdsvIq85Hvnay0Kpt8EQ9/Kkrp6Jb\nD+mq3N9fCpU6V9tqluCpVY1ZogChp4r+euWO0WuN1f3dRJecaKHKqjQJkSGHY2CB9zfX6sK3Mk3m\nP896NpJ7ObXTVHpBoJtTnTB/XcvvRfJGXlTbZUStVkZeDkZbaIt2DPI9p0y2TnCluuumKKUtkWIH\ngKq1NUYxo80RGjFKnzg9uHuu20Pn+ttIjTRKozRBE7SNtlGCEhSnuJHwCCQAWMZMqvGlTo9+6rdd\nd50FU4pbUmUbOkhV3+MAnaUshaofheB5naqS5EB+4/HsQFJ9X016Yzc2nUVUhbFbbskbbqNjY9ZY\nRvlsahJWOZ5hlccrqu6zlpIun12g9v/zAOFThwjNayVgytu99VZzv8ZGAae5nD45UiRC9OCDYtzS\ngtvWJqy0HNp57Ke0Yk5Oip/Ly6VQrx4zNVEPh+CODr1lUgVJt5I68pg4lXwJUkFY4p3EXSMDg54q\nynGMDmYzt3WUSaOOJ8gXAQVtga1F/GhgNxg08utqKw9ZNxE9liN6KU10kR+/HST8ZruJyqjXbtuf\n032FSsNeJVC2kzNYVvH+zDUlr+dUPSRN8Rvnqe7j1aJaoILFKigtnnaSVs8EJShWfIBEmREeCykB\nUwU3O/AqcWH18YhQRBu32U3dBlRL2L2b7naF50ZqpJ2009aKa4nVzcOSEMnro4EajLE1U7MFKFUr\nppObrXQJb6EWow27mwb8PQ7Fk0b1YH/nUajqKATP61S1TOYiLYPt7VagUeGXX/D299uPTXdhXChY\nYzwTibzFCiohTn3ybLRqoh4VVmXG2JERotH95gUpZuZKwJRbILn7bSoloFOXBddprKmUdT343HTW\nSb5GsZjVTVin6Wmxfr299u6w8ngNDYmSLzy+VNZWVa3adsmbpIK0UlY70zK/qNsMSXMcx5gmW2h0\nW0d5bh2S+3s0Q1UT4qqlat5g8AueaTIP2Xf4ix4SBNfO3uvXNlF2f3Yo4GUbJ1Xq7hvKKq/nlO5C\nv9YX417jPMsBVBVCOPQN0mDJPHn2U1kGhGd3lRlxJQQ5xYKqjxjFaJEWPVsivTyaqIkiFHF0//UT\n58kfUYoSj4m9LX9bSVkVdXu1LzU77gRZ45tUK6bOgimPSZrS1Ed9JZZQN8kbCLwuqe7c8xLfHIJq\nsArBM1SJOAzwZDPlXsyrsZh2yWu8goPcTrW4qVZPCXdDQ86gJy2R6ns33mju19oqxj0wIPqMf1pc\nkOIP9hsWTwmYXV1m7c7hYaLOTvG7jIHUuaByi6f8XE0cxK1Pcq5NTcIyK9fAa6kU9XjzBEHcasuP\nPb9ZwefQ1GQdqx9rY5BWys2QGddOtXQTJiJH30XtOjLT1keLrtmZIaJ1tww2vMtNYCVWFeQNhkou\nXnhdziEqWjpBRG1k/idtKv5sIcPiWYlF0ot767M5onya6K/HiVY34d/d9SrdhX6tM3h6jfO0SwIk\nS5o4/U3JvzkJjLrstmofur4SlLB8Zgd1W2mr4b56F91l1KEkElmHvbjx3k63G5ZT7iLcRm10E92k\nrdnpFGPpNOZO6ix5f5EWiciE92ma1pZsAYFaqVW7bk41W3OUM/aXllCdBdPufPT6PerkSu43vrnW\nfxvXukLwvMYUdMZZDjBBJBLS1XCU4+Yur06ySy40OmpNMCQ/y2ZNEFSf0WhpLCWHQPW9hobi781r\nhNycxc1WPrnltbdXuNbyGEhdtlf5HB01gXx5WV96ZMcOMwsuz5Y7NWU9dk6lUqRLcTxutcjyOXML\nsHrs5RySSatLMre+ejkXg7JS1hzcAlY5AF7RnP36LqbJ+MZezxLNDfqP79wMVuJqyrx4eYy66W99\nwWCazH+YWSLz+I2RSYeLJCydy/r9/H59ezlFLlXSQagNU7nlKao9Bp34uHbTbouVz62WIweGOMVp\nmZapn/oNWEtT2gJJEgxjFKOdtNN3vOdO2kljNEZt1Ebt1G6bEMfp0U/9xto8SA9aPnMaj5OFM0KR\nkqyzcYqXWDKTlDSATgKeXRxnX/Eh2weZGWydYjb5OqiWULvjbgekTtZrJzD1G98cZrcNViF4XmMK\nwoKkSzwjy4b4gQopbkFRy5DoMs+qQGpnfbUDWgGfeQMsl5f1CXTk0y7Jj/rkrrR2z85OPeRGIgJI\nl5fF2FU3XgNoYU1gpFqfcjnrfrIdCW7crXlx0Yz7VI8Rt3Cq41ePPYdCp/jaZNK+jqfduejXSml3\n3lU7vpOoSuVUiioHwGsxZ0Oq+StNIXD4lLx4aaOXxNLl856Xztb66EKH1UrKIy2pL1Y7608AtVOD\nLpWbW8hR+kCaxg+NVy2zbrnW8Uq+o+o1g6ddDc8kJS11Op0sWxxOucVTwouEJLs4TJ1lz+sjTnEL\n3PJEPeqjkRrpFrrFiH90cnH1+miiJtt27GC1mZrNmNK8eE+1qk7SpGUtpTtyP/Vb4lHVGwJeIY5b\nXPnfAt/fzXodlHWyXv82NqtC8LzGFIQFSU08wy1fEorsLJde2tZZ0uzGzS+u1f14vUtptRwelsCU\nN7aVgCQBk1tDYzEzGQ+3/KnPeNw6ltZWe0up01PWueT7NjSYbXO4jcfFdmNjRNu3C1jkgAoQ3XST\nsEr39YljwqGXx4WqwKZLtgSIJEb82KtQaBdfq8tQXI2YSzvYqnZ8J1F1wbMcN+FazNmQCjjXcppR\nN1Ipk2TkxcsYXSQQ0W35vOfdp0nkDBrz16Utl1YKY2kSh7+jQHSsmsGZsqMKbnAE0IS1vQNpwmMg\nPAaamqvOXZdyL56r+R1ViYKKkZPQIWFqjMYoS9mSNmV/YzRm1NFU64xKoORutNK9VloH26iNeqlX\na62MUMSSTMjJ4ihrcd5MN9PddHdJ6RW7h86t1u8jRjHP/ekerfnWklIuHdRhWctGatTGmyYpabFE\npihFHdRBvdTrOWZT/Vtwqs3Kz5GNtE7KucqbIyGwWhWC5zWmasS5cSul1apoXvT6sYCqF8xObrY6\n66uM7ezvFz85xE1OijZ5TOfNN5tWuslJqyvs6GhpzCJA1NxcCqLqvioE8ufWraVxlg0NZkZbbnHs\n7RXuqlu2mLGSvAao3bOx0d6FmMOZ2t/UlNU9VkKo6o7r5dhJ2QFpENZML/1v5vjOcrWhc/brqltN\nFSlq4fdzdGB/mg4dGqe1MixREsYW0+RMKm6fu6icpauwS9/tuYFpze47BNCR1ya8wvj4oXHCY6CR\n/SNVs3jWw8VzkAoqmYuEDrckQ7y/ARowwG+apmmURqmXektAkceaLtOyxY13kiZLMtruol1G7GiE\nInSUjlIzNTtCHG/TS6mVXbSrpOSJ7M8NNu0+S1DCyFIr20lS0tbau4t2WWC9gzos2WVV4JTQnqSk\nBS5VeFePm90xd/pb8JLddiPkNtfrXSF4hnKVvMBV3VV55lmvbn86yFT3zeXE5zJpjcy0qovt5E8O\nI3KsQ0PW7WWtTZ45trvbhMTOTgGt6bR1XFu3ijHoINzumc2WWhYnJ52TC3mBWgmd/LVdPOrOnVYw\nlzGYHOCcss7anQvqtkFY33TngQqi1yNghnJRmohAdOD30vTYY6DHHgPNlWGJKjZDh9xIxQPJ6CCm\n2ol+gmwvTc5gWrP7DkpH5bi5eh1rmrzBfWGtQFNzU1WDTqL6uHgmKt9SqR4nGVfZTu2eLF1uoCph\nJE7xklhK/rnqjilBUX1Id1g+bxnbKOMWVbfdSZo0YkaXaZlylNOWPOHxjzImsp3ataDHYbid2mmU\nRn0nE2qmZlqkRUsdS/mIUpSWaZluoBuM9/qoT5tAiEPsIi1SlrJGsiR+XkhraDM10wRNWBJF8WzB\nPMZ0iIZKXGjtjjn/W9gs2WX5uSLPn1CmQvAM5VncXXVkxBpzqVoj7axWOriQYDQ0pLc+qjAr4xgl\nnG3daq1zSUR08GC+JK6Uw58OIKUFlYMjt3BKd9JUyjpGaaWMxcz95fqoVtOJCefkQl6fst22Nqul\n1OnJYzCle+wNN5juvF7Knehg0E9iKCc5ldepegxjUU61Ju+/P1+7BEZBB6ZdyypS1KF/O06PPQba\nv3+kLIunhLFMgWjdiVQ8kEyaSiFGfc+PW2TQoOfWXr16UlfTzbVe5+xHQbvaluvyqx4nr2VQpCSo\ncusaV4EKNEiDFqthP/VbXGylCy6HUB4Tyl1sG6mRuqhLmwhI1oAsUMFw2+2kThqlUQsAuSUPmqRJ\nC/DJtviji7pogiboZrpZC7FOjwZqoDjFLVlpB2jAso1M5sMhM0tZ57HnRdvSZTRHOQtETtCEAd9S\nTomJ+qhPC5perPzViN+shgpUoEma1LqBhwrBM5RP2ZXU6OwU4MFdOHWw4AQX2ayAGLWOZTQqXEol\nHOksnmpf/B/w9LR1295eot27rRldOzsFhMnX0u1UQm4kYoW7m282614++KAA7rEx03o4Pa1P4HPz\nzdbsu8mktV272Evd0642qe7Z11cKS2pSJd3xUuFPB4O8nWw2mHNLPVeCKOvjRbpakxK229rytQPh\nNJWSy/WqHFHu0wuU/twBGj9QekNAUtTamwWam5sqCzpZM74uE+wscDqIHj3k7QAAIABJREFUUd+r\n13g8ovrypOby4+bq995Nvc7Zj4I+p8p1+VWPk992dKCqWrs4hEQoQsu0bHlPlvUoUIHaqM2oNxml\nKKUpTYu0WBL72ERNNE7jtkmLnFx9dXU6pWVStsNBbIImaJImSwC0h3psrY8gZzdaPm8islg9pbWT\niCwW06N0VDt2Dp58rmqCJ102WTWBkwRVuQ47aIcxhjvoDuM4qVZ+bjHldVT9nI+bxUp6PSkEz1Bl\nS2c11JX/4HKCC25RtXtyWJTupmqGXFU6CypPVARYLZuAsIoS6SFXzaLL40mlFVdXN7S93bqfdFWW\n1uLhYQGuXuAzEjH3c4sLvfNO/dpwF9xEQr+Nenx0LrUcgCfss6P7lt1NjmpCn67WpHr+1CSZz7Vg\nfglKaaL075XeEKgHqZYdCaJjh8Zpcq1Aa4x+VgtWsAmN2v7lx801TaX3bsKLUH+yc3N0q5+pHiev\n7pK6ups6i+IgDRpwFqUoLdKixT1WhUK1lIgENOn6CrK6uU7SpKOrswS1buo2Mrn2UE8JFMYoRgM0\nYFhH5RyGadhYwz7qc6yLyduyi/lUH73UWwK6EmrHabwkvvVBerAkVvNd9C7L6wQlLC65shyNnAfv\nSwJvK7VSL/XSIi1a1pMfjz7qsz3/dJZYWW7GqzaLlfR6UgieoRzllPBFjf30Gy8o2+AZUe3KfKiA\nq3uqQCLHLuM3pWtuR4cVJoaHhUVQvr7rLnP80uKpWg7V9zmQ2MVwdnaaUBmJiO102WNVS2ZLS2lb\nN91kurcuLpbG4N5xh4BAmWxJJ7l9ImG6yMr4Wul+qx5PXYwlT3DELZ5uyYL8fK4r7eKkcmtc6mpN\nStgeGtKXpqmKrgXzi07l0NY40finijcE9u33Vwe0ynSnWnZKXEHTVEo/RTl8VKvhbwqpoOI5CRCV\n3rtxvwgNV9xOfO14rKTfi3m7Y6C6cKqAYRe3mRWVbUsghceT2sV28mytchsJk17qQKqWPd2Dx2hO\n0IS2NIx8SCBspEZby6Yue+xtdFvJ9iKD9pjxuo3aXMfKH1nKGvsnKFFiUZYPtV+ZpImveYpSlpsV\nMlFTC7U4xvzKY65aTP3oWkvUdS0oBM9QjvJiaao04Qvvo7dXD209Pc5JeWS9Tql8Pm/ZXiYq4hbN\nyUmigQERI8nb2rLFBGHuOsz7UPeRGWbHxqwQK2HXzhoZi5mlUCQkqZlqJyas69LWZh3X4CAZWXud\nYFOFMbdyKV6ti9xia9eWHCMHQbdzS3XD9nOOBWkhlet08GC+soY2mcqFd0elyZ22VBWICh9fo6lD\nc/6gs9z+fEi17JS4gjpYrt+fz9NjOaKX0kQXbTinysPfFFJBJU3e1kR378b9ItRr6/Upe1fbyoHa\nLlYyKBDgbqEJSpS067WMBgcsCbbLtEx91Ee7aJelvAqfx27aTXGKW9xQdVDNb4RIiynfp5VaDRhT\nY0kbqIESlKCx4oNDle7RT/1a0OSPCEVojMYsfycyoQ2PNZT9eQHQERoxMgAn82Z2WhUE+WOYhi3J\nh1RrKwfRbbSNGqiBOqjDsdyIPOYyYZGbpd2pjRA660cheIZyVC1qBaoZVgcG9Flao1EBoNJNVk3c\nE4uZLrf33583XEnV7Vpbze2cYFYHwm1tzlZZoNRtV93faV9dW6OjYrzcNZaXs+HuuzrAk+JuzNKV\nWEq1DutA0k5eMt3yMcpasBLQ29v1SYn8nnuVWEid2pL7u8VPVQXUypRTkiSvqop7cz+Jb/12Io9l\n3CpTNV2WlWv53EKORp8apdQXU7R8tjg5B8v1wXye/jZNjpxT9vADNNw5NVUL11UVMio5pO4XoZvb\nx93+OypNlQB1jnI0SqOUohQt03JFF/N2+3JQSVLSk8VRF3/pBsV8X/67as2TtSpV8e04FHLLX5ay\nRrvc6sgf0vq5TMvaDLQRilCi+ODv30V3GRDHXXNl1lme0Ib/fUqwlmP+Z/TPjBjXrbSVQKA76U4D\nHo155k0An6Zp6qEeSlHKaOcOuoPaqM2SXVhdyxEaMSC9gzrobrrb8rkb4OvcrYN2mw3d8GunEDxD\nOapa5Sv4RbrqzukGg5OTArDuvlufYEdNgmP3bG52r4M5MWGN19QBMf/8rrtM6NHFecZiRNu2lcKw\n7iktofK1Gv8qY0TtAC+Vsh43vladnfbWx74+/y6lbqVP5Bj5vDlI68BGdcN2Go9aq3RyMjgrvFfo\n0u1TVRh1IAJdkiS/qspNp1Fyvv5V51QpQFXgsszhfXptrXQYaSICUe4TOUr/cZqSe5P+M666cE7Z\nwy+OrUzO8NxU0BeCumRNKmRU1wv9WvVxrwyoaxEnJwGNw5TXWo9c8nxRrWNu+6oJdiZoQruPLhFP\nkpKOCYl0YCmz5eYoZ8l2207tJdbCFmqhJCWpl3ot4M8trmlKl8xL5xrLQVW1KHJglWNoozbDKqlr\nL0tZC3T3UZ+xRsM0TDfTzTRKoxYrKV+PIRpyBXw1gZO0yAYJimEsaO0UgmcoV1Vy8Wy3r5P1TS03\nwoFJJhLigMVrXnZ0WEHHCRL5Mx4XsZLc4ifb0sVY2j1lzVEny6bdGPizrc1aN3RkRGTilfvyhEo6\nwNNBkw6ypQVX164fOYEaL7fC4VBak2UJHlnOxo87rq5/Wau0EpUDXbp9qmI1lEqTLRHokiT5VVVu\nOvktIKm+rqE4vPfMzZUOoziX9B+mDeD0mnHV0DQR9RDRGAXLOgEa7pyaCjp+qprlUjazKrfGVAbU\ntYiTU2FKV0rFCQ6cst6q2VgHabBkPXm5FAlDuv4KVLBYOhuowYDBQRrUxocWqGCJJ+XWVBXmeqnX\nYiUdpmHbcjRqjKm6JnbZanlyI905pQNMXvJElnqR5wNfD2n1tLMkt1EbpSltZPX1k2DKzkIdBCgG\ndY6HllN3heAZylWVXDzb7cutXTIhjYTUsTErnMm4Re7CKsGVgyJAdPSoaOs3fzNvAGlbG9GuXaIN\nHhspP+eJfiQ8yJqXuZyZPdfrs6fHCsPlPmVdTHnxr8v4K7Pocuux3E6FSDUL7siINe6Vj1l3nNXE\nQxxInECNnwMSNmUG36kp5/I4XgFQPW6VSgddbq62un2q6qruQAS6JEl1IbfrX+mK20HCFXcDPR/H\n/2MR3v94P33kzbXSYRTnMn5AxHUOPzlMk9+Y9Ayd+Xy+emAdoOHOqamg46f8lEvxKruSN26f1ZO8\nXmRXq0RPLePknGp+6uBAVzNSVxfSLjkR70Odp6wnyhMVEZnJiiIUMepmEjkfJ9l/kpKWtnRw2Eu9\nNEET2lqk3HrL4yZlXCfXNE1b2o1SlCZowhXcLLGcebNfuT67aTf1UI+tJbSbug3wkm0N0ZAFvu3O\nY96WUwbboG+GBHWOh5ZTd4XgGcpVlVw82+2rS0ijA5TOTtMKpsueq0JLf79oK5nMW96XcMsBZedO\n676yFidPzuPkshuJlJY+0Vk6t261utfq3HXtnhzK1f10WXQl+HAglxAnwYjDrNyupcVshx8rDrXq\nWnM4dbKOqTG8dnDGgdgui66dBb1aLuFc5VzUyXGtTVNgMXdm47T5vAJzRJQioiQR9ZFwvR0nyn2z\n6Nb62UNUaF4zQczrHKuQjLQwtkZTuTkqNK/RlRTRJws28LVWoMHHB2n0qVFfAJPP5zd7SGHg8lMu\nxaucrKibxcLq9SK7nmvDepXdXNU4UyldPKEav0lkBQuv62kHqMu0TP3Ub4zDzkrHrV+qO6uUTACk\n1vPk/cnYSgl63FU1RSlLXCcXX5sYxSzrxqF6N+22WOm4C246X+rCy/uXVkv5Hk9eJMcst/Gy7l6P\nTb0mDQqz6LorBM9Qrqrkot5uX/n+9LQ+IYwOLHt7S2MPuWtpczPRrbfau7K2t4u+BgZEuxzOeNZZ\nHhtp57KrezY2Wi2IgJkJ160+KX9yy6N0Q+Zw2d5uhWM5RlnjNBo1LcpuNwuWlwWsLy/rjxXvl89h\naKi03XKhUAfEdqqq62o1lSbzG28zjTsoSTBMkva/QPrfspjU3Jx/EEuT4/qWZdmSUGjXLoPd9P4y\nAaZGNw9yCzlKfSlFyb1JGjs4VtfWvaDlZEWthoWVKyi3u6AusjeDG6DdXDnsyBIqRMxiuADCAVD7\noXYaW9NnSpXz5zDkJAlnOrdfLg54TdSktQS6Wb84vKnQorbDrbsyVlQnbmXdTbspRSkjHpUn+OG1\nQb1Y6Xj/smaodDWWyZB02YW9nMf1CpRetdnHXwuF4HkdqxqJT3Rt2vWjJoTRlcxQwU9CIXfL3bVL\nJMRZXnbPOCthUP4uE+nwGpgc+NRapX7cbrkLL3ct5gApLbtyTCMjJlwNDQkwT6XMz3nNTSk5RhV6\nm5rKi9fkUq2V2awJvepx1UGhn/PB73g2OnOsL13vlq00lX7zbyVjTcY/W3Rr/cx+KrxrrXSN3Cya\n6voq25dl2SqQsM7aHTc2p/HPVRdgvMgJrvn8a2LdCzKrboXusE5WVDcLayV95xZy1HGgg3AIhLX6\ncLvbrG6AOcpZ4gg5bBkxhgfM8xtz+vnp5u8E405uv1x2pVzsXGT7qd82FlRN8qOzpMo42K20tcRa\nyeWUtZdDrt/yOGqmXrk2vA9etiaEsFBcIXhex+Kg4FSGo9w2JXzYWan4+3YJYXSxjWrWWt6macXM\na2GQu8LyupyFgtVS2tVlXYvpafdMtPLz4WFrQqQtW0wglv0nkyJZkEy6s7hoQje3BqsgzRMxqQDH\n3X6bm/Xr41fcWukGml6T61RitayFS62dKnJj24xusQ4yMr7+x0NUGFtzBwwJhkNEtI2IukiAyaTY\nr/CRolvrhzTQSaS3aHK4WSaiQTJcd9XsuY6WLe7+y5P85IrtpEhf+kXOqY2oMF6gqUP+XEQt51MA\noOYE13L+eAw09MRQ9eE4TaXHq9ymNtAdtpK++b7JuWRNLr5131EcrCqpv1lLOSUK0pU56aZuAfiP\ngbAfNLSmz5Sqy4qqxobabe/FSqeurwqSdkl7dHNWt+fxjhyI3ayVunjQLuqiu+luT+Vx8vl8ydjs\nrLN8vexci0OFCsHzOpZdGQ5dVlIvUJrLmZY9HrtpZ6Xi8Za7d9v3weFzZEQAmewnEhFWQGnZW14W\nVsydO/M0OSmArq9PWEWzWSuQybnK+cnkRRxOJZDrLJa6pyxxYrd9U5MA3HTaWiNUTbCki6lU3VtV\ngJP1TQETouWaB2HddgNNr8l1amG1rIY1//7787Wt01lSJ7Ly2pxBjSe9X3GNdQOMaSLqJgF2HApT\nJECrQFZwVNdXZzFOs3bUDLiKpdLRssX34/NQ21dVIJGRVreNB5C0QIJbXzopfTjBdWGtQNlvZH0l\nP6pIQWbVrUbCIY8up5X0LfdN7k/S8tpyTRIZ6cCTw8skTXqGgY10y1Utk3aJeaSWaZn61vpo19wu\nmlyzd6F1sgB2U3eJFVIHZE7r4uZmaUna4wGA5fZ8bNM0bWw7TMMW2JVtcYsqh9Q+6qMsZT1bconE\nOaUeDx5vyy2uEjaDLnUS6tpSCJ7XsXidRGkpdMtK6rWkBbfM2dVjtLPs2dV0lFDD+1EhkqgUOvjr\nrVutcKa2199vjTXVZVyVTw56gABgOUfdGNXEQ9yCqovllJAcjQpwVt2UJdxKIFVhV0Kwn2PoJC+g\n6XTcnN7zIj8wGcR8a9Gmc4dkgZEganP6FV/zSwwYxz9nZnwtNK+5A0ba3NeAQj+gp7MYq3DDXy9r\nti+ZXLHPbtZvJ5nwO6a0r5MdYDnNxU87TlL68J2YpwoJmQwFaOGvSsIhjy6n5SaOkvvycfuxngYF\nfQu5HH0unaRPjYM+VNBbAe3k1y21XDnVyrSzHPppy06yj67iQ8YmSpfZDuowSoNwleOuLMfVR33U\nRV2UprSREEgFYF35EA6KquWSx6vqLKrywbPe+k2Ao27P++HjaaZmGqVRRytyqFAheF7n4hfTjY2i\n3Ih6Ye+3pIUfeFXjPLnLLb/o1SUh4jGN3Bqo9sVfSxfYSERYQ/m4ZfkRnuSmv9+Ev2TSatFdXrbC\nI3evVcu/qJ8DJuzzsfM15GCbNXMplMxxYMBqsQUEYPNYULdj6AXq/ABjNSyOfsCvGlbVmseXKjBi\nV5uzGmstxdf8ZQmMbUSF8TWaOjRHhTfXvAGGCoW62Ek3+FJBSYUbJ9jRQVYzmf+FeologIja2XsD\nZFpp7eZn16dfkCy2Y2T39WLVrtSqmCbPcFxNRt0I+bnwDsrV14/1tBy40elAOk2PAfQYQIemsu47\n8PFq1iiocXHp2iw3QYuf8emgTs5X1qmULq5cXhMO2Y2LAxt3fx6mYduER9zyKQG5kRpL5qrW2eQP\nNS7Wz/qq2/NzQ433tINoVZsh0VWo6igEz+tcdllbufVwdFRY33RQyuW1pIadu2gsZoUl/hmHsMlJ\n0c/u3QK2GhoEaHV3i/cEHOaNUizcmru4aGZx5ePm7XOo0Vk8uSVRth2JmCDc2ioAVk1Y1Nlp/t7R\noc/iyteQWzClRVRCBp8THyOPU/Va7kRda/XGQDlQE3R7RP7ArxqxoAcP5stus6x5K1BjV5uzmpZY\nvuary2R1LZVusl7E56LGTkqqGSOirEObHBQnfE6EW1nl9VeEvddHVhBLUkmcqC95sPhp3SL9WLUr\ntSr6ANc0lb8U9Si7JC66i+CgXH2dLLcq2JdbkkE9pw6Nj9NjAO0fGaE1n19cOjipRqmIctq0O17l\ntCX34eAnrXgt1FIClxxUB2nQm8u24mLLkxDp3J91MZU6SAYJ92PVQrqbdlOMYsY2uhqfXqX7nuLn\nBo/3lMDrBNFSXm4ShHB6baom4AngnwP4ewD/AOD/0Hxek8mGKhWPn5SWR7vkMJVc3NqBgLywjUbJ\nyACrfjYyYnV/dRqbaVXMWyyAHBZ1cotD5TUmZabZZFJAX1+fgHJ1LBMT1qy1sg6nhE439fUR4RML\n1PjoAUrvP0TT/2rNYh2Wc3JbJy/ycmPAz3EPuj2i6sCkH1WSXKhWcOhpbXyYr0rW3K3EiBellf3V\n13aKsu36HLbTzY+XcZGGn67i6xYSACznllReczDjbVdYm1V3PtlZtasiH+B6rSdldroIroarb2n/\n1j+Bci1+6jm1VijQ3NSUb+hUxcuQ2NWM5NtxUHCDh3Lmane8ymlLdxPibrqb4hSnRVos2Z7DrddY\nSdmHjIF0S/LES8d0UqexdqpF0y7mla9PH/VVBG1e/u+p8yvHfVenaljY60HXO1BXHTwBRAD8I4Bb\nAMQAHAdwu7JNjaYbSienOoryolYHparKseo4WRv5Ra9T4hqd+6pfCLODGt3aqMDLE+2o4KnOT+c2\na6fRUSL8nmkB6fmDOQtgy3jS5WUzhnZsrLTWqU7qsXK7MeAXZCttb8cOcc51d3uD9JqpTJ/Darrp\n+gbyNOlBz8vcCmRaD9vI2Q3VTk6xmbwtOZ5+EtZHCZ7NpM8yK/fpoNL5yXjN4WIfO0iUc2kgoqNs\nblNkAuUYGVl3DaVZ29z6202B+KHaWbX9qBoXNZUaV+tdG130vd7B3isA6LarBjx4OV7l/h24jZeD\narnnjRsg8xhJPhavgFeOO3Ct5eUmwUb/XVZL1ypQe1UtwPODAL7BXn8GwGeUbWoy2VD+5QSlqoK2\njnkdm3Q1veMO6ziDiEnUvc8hU8ZSTk9by5lw91jd9p7X5VPCAtL1+f3UceOacROAW1jVONaentK4\n2HITRvEEUEHEEXo9Jqplt26UJj20uWijrbUWVZoQp0DeXW51MMspxqlkCR+PfG5h2+na5vskbfok\nssKpen7xNtR14GsnYbbNYXsnVSlwkl/UDC4MBp5JNcjsrG5tldNXOcBRroUxCOUWcjR6IE2pQ+O0\nvEE1YN1kV4/Si6trNeDBy/Eq9+Lez3irdd5Ii+hW2lrW2vnJWstVb5a4jfy7rKauVaD2qlqA50cB\n/Df2+hMA/l9lm5pMNlT1ZFdKxYt0F+V+QFC3v3QP8dqOHYjp3i8UrPGa3d2lGWUjEWuNUO72K8HQ\nixV28uNrlD00R723rFksqSqs8wRJvB87uNTBvpPF2glUq5HcRlquW1ocQL3GGU/y+Xz9mya8yM58\n5WZ55Ovs1eU2zbbpodJjxT8fJGs9TQl2EhKdQHmw+FpmqG0iors1/UnJ7aSbrZd1ILKunfxdzX7r\n8bzM/0q+PGB1kcUV8MCokRhn8PFgIDTIuppubZXT12axJsiL/OSBZGDrWVGtYQepAODH1XWj4KFa\n1shaqBzXVa5y5647rtU6p65n1cM5tpGqBXj+ixA8rz05gRsvpVKu/ICgTvLL0ms7bjGeaj1MCUZq\niRTVBVeulQRTvr1d+Red1ERDKmzL19y9WP4us/W6lTThayLrl8oxlZOxuBItLwtLp1N913Ktj+WC\ncj6fv7Z9DnmtTTvAky6lu4koVnyvtXQfucYvSsCzswr2F99rJwGK/L9HlES22UVyB2WeCKi/uJ98\nLV3bORAuFrfT3dTwe4zV7dM2c1WUf3++KjcxLK6ALDHO6FOjtoDjx7IYZF1Nt7bK6WuzWBOMi/xD\nCGw97//P91e9VijR5ljjal3clxPHWkvxGpt+3Wx1xzUEz1BBywt4RlGZXgewjb3eBuCEutHv/M7v\n4JZbbgEAJBIJDA0NIZPJAADm5+cBIHxdR69ffBFYXBSvs9l5XLgAABmMjAD/8l/OY37euv3nPw+c\nO5dBSwvw8MPzaGsTn8/MAC++OI94HHjuuQwSCbE9b296WrQ3O5vBK68AwDze9S5gzx7n8c7MwNj+\n3e+2bq+2DwBtbRns2QMcP262Nzsr5vfpTwOJRAZLS8DCgvi8vz+D97wHOHJEtL+6msGpU6X9vfji\nPAoF0d/amv7zxUXx+cyMWB91PoODQKGQwdCQWN/jx4F9+6zz3bcvg9VVc7wf/nAG27cDp07N48gR\n4PbbM/jxj835qfu3tIjXt902j6YmYGHBPL6f/rR+fQDgwgXxemQkg+ZmYGio9Hjqjo/b65//PINM\nxlzvmZkM9u1j2xfHO3/bPDANZOCtfS/r7fj64Xng+Mb8/dn9vQBAZjYDLAHzP5oHUkBmWwaYBeaP\nzwOfBzLnMkBLcfz/n/K6Dci8lgFOAfNH5oEskJkv9l88vpm24ueH54EOIHOp+Pn5eeAIkLktA4wA\n81fmcf+3gP9wJYMLAA5F59HaUDw+I8D89DwwX5zfADB/Yh44C2S+X2wPxf4uZ4CTwPz/Ng/8EZB5\nVJlfKgNkgfn/eR74v1n7fzgPfJydD9+ZB4aAzD9lgEKx/XeAzM9t1vv4PPAwkEl4PD7q9nK9RjLA\nHof9n8sAM8X1CPB8Oj5/HA/jYSQyCczeO4vsf8ni07d8Gv/18n8FANy2chumb5mG1Pz8PF489iIW\nexYBANn/ksUf7fwj2/Yfjj6Md95+B09/8mkk4omKxsvHl4gnfH+uHd/8w3gH7+DpzNNIIIEH/vQB\nnDh3An3DfZi9dxbHv3u8ovUN6nVLpgUA8K7ou5B6O4Wvf/LrFa/nucFzWFhYAADMNM1g39i+qoz/\nYTyMv8/8PeKI4775+/BZfBYPZB6oyno9MP8ATuAE+jJ9mMUsjs97P377YD//2cwslrCEC/MXfI3/\nxfkXsYhFIAPMYAYPzz+MF/EiFjPFv5/5LP4I9n8/1X7Nx/cIHsHD8w973n8Ws8jOZ/FpfBqJjPh7\nk9tsxPHbLK8/j8/jXOYcWtCCh+cfRhva6mp8G/36+PHjWF1dBQAsLy/Dk9zI1OkJIArgJxDJhZoQ\nJheqa3m1BqkWsELBTHDjx1XT7n03i5xM0OPVPVS1wpYbc8fnPT0t5ptKCQtdoSD6UZP76NxgeXkU\nLy7K09PCdVa1XHodr594TjcLp9N+QVs/ZR3V9naNy22Z1sea1+MMUI7rm6bSb+ApzWd2mWSl9bGD\nrJZAnUup6gbbyNrrptJxgIjiZO/Wyi2iTez3rWwfp/mp54Ic3xBZraHlWBj9unRXySpeqWe5U3bW\nIK2YGyl1jXILOer4i47AXFmDVDUscrU8jrVyafbaj1+rY5AxoPVkAa6nsRBtHtf3SnQ9zDFIodqu\ntqIPjAN4FSK77e9rPq/JZEO5y2/SGa+lMry6sjpJt61dn/l83ti+u1sAYn8/0Q03CNDzC3C6eftd\nK7eSME4uyuUCHS+X4we01ONb7g2JSsVrlNrN26/rbLk3HerB5chxfSXE6WIinTLJyiyucj8OdFwc\nqKRbboqs9TBBlrInV9X/BmoiomkSQNpAJmguklnqhO/jND+nscr9hsi5Tqid0uS8LnZyIcWS88ll\ne2MYCwvUfeAAjR86FFjJlWqUDAkyCZFXpUm5v8JiRKN7orR8Vr3zUXtVc10OPnew6qVfpGoFOF77\n8XvxH2QM6DRNUzd10xiNbQjsceguNy7UTpX+36s3EK6Groc5BqmagKdrByF41o0qAQenfe0son4g\nwKmkitpnPp8vyXprF4NZrvyulZ/xV9qXW79+VckNiUrkZd7ViDHVqR7A03F9JWwtU6nFTbXC8ddp\nsn4jDyv75sia9Ee3j58nLz2ia2eSSpMXDZKZ/dYu5tNOfK7lmA3LTSiVJkdgLTmfXLY3jMMHzBJL\nU3NzPgZUW7klBqoGgJXcXylaAIO2eFYy9iCTM6kK4jvKq+WwVglSvPbj9+I/yPFvtMWrmv1Xek5d\nD4l0roc5BqkQPENZVAk4uO0rLW/cVdar7KxaXsar1iJ1c2v1YkHL5axutuXK63pvdDmOoC2ZXq2U\nfo6vtGwHmV3XrxznVY30v5UqRwIo+TdykpivIlktoSBhjZTutPIz1erZSPpv+1b2+6Cmb5CwSr6b\nvW4hyv1POUr/XprGPzVOhY9r1q7Ex5L0gJlm7Xq9PlOh3av8AqvL9nIYY4dEiaWR/fsDs3hWQxL6\n2v68jca+PlYCaNUAsJL7K2sFSn0pZet+Wi5AljN22Vf3F7rr2q2gjvvyAAAgAElEQVR5oyHKTm5A\nvJEX/xtt8apV/0ElUaqnZEyhaq8QPEP5VrnXz2pmVrUdv+U8/MLL7t2irElvrwmLuja8WND8lBfR\n9VGPDGKnHTtEjGVTE9HiYjBtBmml1Fm21ay8VRWDnPudXIMtaYPnaloKRjdWV8ulGguqPmVWWB7/\nKZ8xm33k+2omWv7cWfpe+vfYhf4fTJWuGR+nXQwrkb+SMZXKL7B63L6wtkZTc3PeoXODvmwKawUD\nsvAYqPsL3RbAq1U8opMbMQfI1FzK80Wwl7GrF9e8r/6v9FdtzpVakmsNUV7HW69ATLTxFq9a9R/U\nMajnYxmq+grBM5RvlQsNMsZxaEgfI+k3RtRpe517iG573Xt2JVT4dZuf8iJe+3XSRoIqtxT393vb\nx6l+aipFFI1az4UgxI+Jl9hQv7J1OUqT8W224BRPaxngJT0YBakdJCyS3STKn6TJamGcIhPEtpIV\nDGMkypvE2fYgUfYEJJIB9RHRDcU202zbDjKhspUsMZ8EEvGcaSqFVYfn+P9avND/zAgVmgti7BwW\n1VqadoBpB3dpqv7xUFQz1+1a+aJrxK2eqoXQa1ypDkyCctM1XHH3g7Dm7SI4t5Cj0adHKfWllGPM\nqHpxXQvQzufzWmus03qpgFxriPJqPd5oq6IfXUsWPf49FdQx2EzHMlTwCsEzlG+V63apuk2q7bjF\niPqJj9Rd1Om2172n9qW7bnNyAfUyLz9rmMtZ4a/G145GzdKWFu9uxXZu1Xwty3G5dpJbVt5K4d0W\nFMaJcp9YoPQfHqCx/Ydo8uNrNjGYfICkB6NKxWFTwqSEPf6tK/uVILZc/KnW0lSfSSJKlL6f+0SO\n0v+m6ArbXCiFTd1ThVqQ6aIrf24RPwvNBZrKTYm2ZQxqmu2XJfsYVjdxd2M1vrWKqhl4bmAaZwmX\nY18fc3S7dZIOTPSwkqPcQorSB5I0fshbH4W1AqXmUoQ194tgCW/JvcmyQKkaCZxU5fN5LeA6wZ0K\nyLVOCuUVyDfaquhH1bLobQTQ8u+poI7BZjqWoYJXCJ6hfCuoeEO1Hb/tBrG9nxjCcpMIeenXq5tx\nMul9vkFZSZeXhaXTTywrd6vu7S0Fbrc420qlW/OqGX4KROk/9pnwxa8bpk7c4icz03Lgk7DZQtY4\nzRgJC+E02397cRs7F9lGTbvsaXGFzU25f+tr2qBeInpQmUNv8WcrCVDtJDPBUVDwnmb93czW5Fq5\nJtroAHEqdbv1E9OpAxM9rKQpfcBfIqHcQo5GD4xS6lCKltecv+A4vEnX4dGnRm0BTXdxXQuo0wGu\nE9ypgFzN5Edex7vZVS2LXuiiGupaUAieoTa1JFz191cvsUwtrtvsoIjX+ezo8Ad/G+hhZ7hV6yzF\nulqntZCvGwg+Y/7Giwlf2v79fhr7iI3FM2ilyfwWVWtnthDRUTLjMKUraqvDPk5POyAtPsc/pbjC\nSlj1Yvm0eyaLY9eNc5Lc4d3rMeS1RLk1N7yuC1TluprqwEQPK+M0fgjFPoY99SETD+ExUPYbzu4X\ncvxDTwxR9htZGn1q1Deg1RrqpJzgTgXka6Wm60aqWha90EU11LWgEDxDbapEN6pU100JOUG6sflZ\nn3LX0g6K+PyyWX/tO4FWtY95oVBe/VA3VTJuXzcQ0lQCIE7nVGFtjXr+YI7QvFZ90JdAJYFshEyw\nvItE7KV6g0JC2phmnwDA0+IKK9/XWTXVZ5fN+xI6mRvsFfb5+i4P65Rm7dkdjxyJOFW1/6BdoDVy\n+47aiDqY1ZQKP8HPr0CFtUmamst6bo+7zU5+Y1I7rtxCjlJfSlHHX3RQ7xd7jbhOP4DmlNE2yHUo\n9/+epQ7k2vI1Z4GsZ/lxn90IF9V6KCMW6tpSCJ6hNtQy5iY30JBw1d5uhZxKvyx5v06JatTxlbuW\ndlCkwqOf9p1AqxbHvFJLsVvG4VSqijdKNG6cJeeUYlHzHUrnN5Oq3J4nCOov7jtNRD0koHPUoU1u\nJSyQqItZ9W94h2cXETVr3lsujjdtvn+RbfNCn4f18uKKy9o3ni3kvIYBye07aqOsYxWDkMe7Q3x+\nasbbcsfhd5+xg2OGFdMuHlJ1sfWbHEltU81oG+RxLvf/XujCuXGq97UPwTNU0ArBM9RG5p5wlRsg\nSbhZXg7WHVYFHC8ZbFMp08U0qLV0S8hUrur5mEs5ZRyu+o0SLzGYaTK/xabKAG1lf1cQ5durQKV+\n5tRmjgTsRYioSbOf16edRdOre61drU+QAGIex9lGtFps9++aiVYlmNrNL03Copu1WUsp2b583knW\nMi/VOL883nDYKJdHJxCyAzvL+2PmnbrcZwdtQdAp463bOMoZu3UiRJQmKnykQFOH9PGQ3V/opt4v\n9lJ0T9Roc/hJby68qhxjLFl/o0/bx4zaTiUAi+lmceGsZXKdEst3FfrOUY6SlCQQaJiG63rtQ4UK\nSiF4hqqH3BO26u8nw6LpJ76xUnEwc4JaFYSy2equZbUSO9Wj7DIOB+XCW7G7caXJbdT90+QMPHL7\nISoFKhWgkpo2e0hYSNupgm9r0ma1tTy3FPuy+zzio68kWeB4rYFoldeS1a2Z2zpyFUjEi06QGTda\nrYzDUh7Ht1FJV5wgyQ7sLLGSf9hr/IGm99vHQaoZb9X+dONwgyxP9TUXcpT+v1gGZuUYFNYKNPj4\nILX/ebvF0tn35b6yj4VjjKWmPz+WzyAsppsly2gtrYMllu8q9M3bnKTJQNoMFareFYLnJtBmjsGs\nVOXWY6zUPcQrmFUrlnHTKsCT1e4YBAXNft2NS84pL1ZRJ+uWun8ReH7aTXS/LlGWU38FMl1Wo0S0\nWOy7Eoum3dMtdjPoPtX++LHSQaJXcLQ7Nl6OayUqji9/Wz64Pvy6bTtIgpAuY6sd2FliJQ9OGH+g\nfPvpb09rodEOynTvu0GWE+DpyqGk/lOKCm+WQi1PHITHQJ17Ox2tkeVaHXVjSu5N+or/5Gt88LmD\nnvv2q3qoTVlLy6x6rlej71pbmss5hqGrbaigFYLnJlA9x2BWW+W6hNbyy3IzWA+rpRLO3ICTNeiE\nTka7yj/pss6pNOmBSaci8By+gSgPokMgyjuV98iRcElNkojt5HU7p5S+y4VI3fZbfe7j9PRi/eSu\nu3eQOyR6BUe+Pj1UuxIqxfHlD+aDazNN3s8zL83ZAJ4d2OliJdXty3HhVVWO+7EO7nQxm3x8qS8K\nC27HX3TQxLMTNPq0CaKpL6U8W4LtxiLnqcaPJvcmjeRFXtvla1zN/3v1EItYS8useq5Xo+9aW5rL\nOYYheIYKWl7As0FsVz01NDRQtfvYzJqYAA4fBkZGgLk5IJHY6BHVTqurwMwMsGePOe+ZGWBpCWhp\nAWZn62M96nFMQclpbpkMsLAgfp+aAvadq/3JWjKGfd72051blnaRwQJEw1OYwj54bJhrAsBhACMA\n5gB4WI5XOoG7CuL3q11A4+niB1PF/ZcAtAA4C+CYTSMdAKIATtt8Xom6fLTbDOCC5v1WAE0ACprP\nOgH80qa9LIA8gHMAGgH8VnEsLQBm4Wl9Dclj01ZsDxBrXMZh3nCVcZ7pNPP8DJZWl/Cjwo9wav0U\nRrpHMHf/HBJx5wZX11cxc3QGe+7ZY9lWttcSbcEluoQjrx+xtDmDGSxhCa888woKK+JkmLp1Comm\nhLHf7L2zRpt2/fD+Dr52EL9c/yVaIi24ePUi1q6s4SquGtsMdw3j9fOv4+TaSWMsj77wKJ786ZMo\nXCxgqHMIT9/3NB554RGjn4lnJ3D4xGGjjalbp7BvzDxR5Odu65V5JoOFFfGd0hPvAYFwav0U2qJt\naIm24MXffhEDWwd8t+tX/Ljw9XXSBCZwGIcxghHMYQ6Jck+yUBum8BiGqgc1NDSAiBoctwnBc2Pl\ndoF8valc0Kim6nFMQclpbiU3RVD7kzWQGzMzMIGuCDCB/JNeLba9B95gYAa4+gTQuApcvhOI3gDg\nCARQvBfAAQBnitumAKwo+0cBXGavGwA4fbW2A0gC6AbwsofxyT6uuLQLCDD8VQAv2HweA3CpuN1V\nm224hgF8G2KsV4rv8flxaHwPxNqsARiCgFkVTOWxKcBc4wqgbUPl9zyzEQej/tZ+/PCjP6wIdnh7\nkwOTaIo0Yc89e/DoC49iaXUJr0RfQeHeAvAtACeAtmgbPnDDB3Dh0gUcOynuqkjI8wJLN375Rqxc\nUP8ohKINUdzUehP6W/rRHG1GW6wNezN78egLj2LfT/bhzCXxh5UdyOKp+56y7Lu6vorb992OlQsr\nWgjkQPzoC4/i4GsHsX5lHTt7duKJsSeMbbc9vg0nzp9ABBFcKZ7ETY1NuHj1omWuunaDgk7AelzU\nPu20ilXMYAZ7sCcElk2q8BiGqgd5Ac9orQYTSq9E4toCmUrV0iJ+jowIvtFpfn4emUymrsa0WeU0\nt9lZlTNrf7KWjsGjOGxy6+EMgH3ALGYt/6TLOqcS8GdBWxLQCQDRdwHYC+B9AOIADsKEziSA7wF4\nP4CTEBBHsEKnnbWR63xxv9d8jPGy+yYABEy+aPNZFAI6ATEXaUHdCuBtzfYpCOhMQIDqFQjo7IGY\nfweACIAMxPH8BcQxBUzwLR5XQ/LY6KBNcyMiaAX6HeXzPLODuJao+GMPysLG2/tC5gtGe0urSwb4\n4CjQ2dyJsw1nce7yORx5/QhSW1LGfnvu2VOyz7bHtyHSEEGsMYaXHnzJsBKuX1m39M8BrzXailRz\nygK0ibiwrEroTDYlsTezt2QeiXgCP/4ffoyZozNojjQj+1wWLdEW9DT34LW3X7Os49LqkgG/R14/\ngtSXU2iJtmBn907c1HwTTpw/YYxppHsEiaYEjrxxxDJXqUdfeBQn3zmJh771kCfLpO6c0h1rflzU\nPu2UQKI8r49QdaNyjmGtr6VChQLEv/lQoepGs7PC8lZPbsf1OKag5DQ3eVNkQ+Y8MwNkMkg8NIF9\ne1b9j2EJwAKEi+JPiu+NQAAIzH/SNbkzPAMBTT9i49gL4FEIt9NjMN1SkwB+AGAAwKsQlr5mlAKh\nA3Q+jxk8gwyevTKB9bdXncfW5XkWpeJWUf6fhI/1fRAutJMAfojSW52tAO5gr18CsAXAcQDbi++d\ngbCayeO5pvTJjqtFM8W+zynv83NjRrOf2szzM8g8k8HEsxNYXXdZzzqQhLjDJw5j5qg5wdl7ZzF1\n61Rgbp127UnwaY22one9F9vPbsdlEidFsimJ7/3290r247DUiEacuXQGp9ZPYcfXdhhrvrN7JwCg\nPdaOba3bMNQ1ZPR55tIZHD993GhDAtdP3hZ//A1owK1tt+Khbz2kPYaJeAL7xvbhmye+aazdV//x\nq8bvt++7Havrq8Y4AWHBXb+6jsLFAo68cQSvnRN3eIY6h5AdyGLu/jk8sesJ2zW3O06A93NO10bQ\nxzlUqFChglToahuqItVr/ONGjKte12JTqlL/Zh4X9ySAR1Cxq6Krpczu8wxQDCcF+iEALKG83wRh\nERwG8ITSdg+AU96H+QwyWCk2fCumMOZ0F1x139VJ59LLXWgjxedFzb6TAJ4u/i6tkmcg5neO9Z0C\n8GNY582PYQKmy+zNAJ6CWK8tEJbXAZQqA3N9uauuz5jJclwXN1J+YwdlLGYLWjCL2YpvxqyuryL1\n5RTWrwoLZe+WXpxcO4lkUxL39d+HX7zzC8f4zu1/uR2n1s0TXq453yb7XNa0qgJoibTgu9nv4t/9\n4N/h+KnjOHnhJNaurCHWGMO5y9Y7D3ZxpjPPz2Dvq3sNSFY1desUmiPNOPTaIUQaI7g9eTsWfiHG\noIsddZN6nKSLcku0BWcvncWxN63uyDpJ996OWAcWP7poiSENFSpUqFordLUNVXUtLZl8MDNTP27D\nGzGuel2LcrThEO3Vv9luoLOwulgGcSykpQwode10+lwaSVTQke8nAdwG4TZ6RNP2SwA+DOAd2Cfm\nkeoComdbgEtAN0Zwj9YUyOTFtTai2Y7HbV4BZj4+g6XeJbRcbMHsn88icSEhrJl/yrbj7sRnlTZW\nYM5bAnwMwmIpvSPl8cxCgPDZ4vN3YcItl1zfNgiL8irE2qvnhovKcV3cSM3eO+srdnAJS0airRnM\n+HbXs3P3XL8owPPq1avIDmSxN7PXAowzR2e0APjSgy9hx9d2YP3qumXNpVUSsFoyCYR4JI6Opg7s\nG9uHxN6E4V4r4TfaEMVluoxGNOKZ5WfQ1NCEt68Iv+/eL/Ui3ZfGhcsXLNB5V+ddWHlnBSfXTqIt\n2obCWgFvXHkDpy8K3/Erp6+gd0svRnpG8PhvPI5HX3gUR39xFLd+9daS+M+SNcMMzt57FqmjKTx5\nz5OGG69cm+ZIMwCgI9aBP7n7T2zXfqB1ACfOn8CZS2fwyAuP1P1NkVChQoUKXW1DVaSNiH+cn593\n3WYjxnUtxYJKiD58WLBdzeXVv9luoBI2PQKzl3PKApC641v8/MdtwFRBJA4DIEBnClbonIGAphSE\na21n8X0OSVL3QcRGcuiM2YzxNHBvxyxu7ZzC/ZhDPAhXYg9wunTDEhZ2LODwnYcx84nicTgPYWmW\n4uNXEw51AXgDwhr5dxAAfwRinglYj2eLsq/dvdVZiGRF52ACPeD73CjHddHT+RSAnp+ZwTOZDJ6d\nmMB68YSTgOZlrDPPz+CVZ14BngWG1oewx+1GhUY6d0/pFgsApy6eQiwS08Yf6vb93A8+h46mDsQa\nYmiNtRrtvOdr70FibwI9X+zBDVtuAABQ0RRfuFjAh5/5MAAg1mj944g1xvDygy8j2hDFVVzF+tV1\nAzoBGBl5v3/q+wBE7Oium3Zh4bcW8OrHXkVPvEfEp75xBD85K4C3LdqG0xdP4+TaSbTGWi3xn4WL\nBRx5/Qhmjs7YuswuYQnH4sew0rSCkedGMPHsBGKRmLE2dyXvAgADKAH9OdXe1G7s0xxp3lQu4aE2\nXrX6ngoViisEz1AVqZ7iH4thgZiYAP7sz2o/rmqsBZ/Tag2vJTYcohMJzCT2IZNNOM+9lgPVAaTy\neb4H+OA54MkjjIN1oLMEEdu5AgFnKiTdBgFhq8VtzsCqXcWx3Fd8zeArfiqBsXP7vEGnCm3N7ruU\nKAa0bCkCxekR7PlK8Ti0ohSipSKa947BNibXolkAvcXfh2FaRFUlIDLvOrVlJxmXOwEkLngHuVpr\ndWkJKwsLOHH4MI6WcYdoaXVJlDo5Adxy9Jay3GxVmJx5fgYXrlxAU0OT5X1AQPzg1kHEI3E89K2H\nDEhUEw2dXDuJS3QJC79YMIB05Z0VI/bzbwt/CwCINIgTqSXSgr/+yF8DAF568CXEG+MARFbZ93W+\nD5954TPoaOqwnUNXvAvRBuEAdgVX8N03v4tbZm9B6sspIyvtUOcQvpcV8akS+BrRiJMXTmJ1fdWw\nwgJAW6wNf3L3n1jAevtfbjegsKV496RttQ2nVk7h8InDaI22Gjc4Ord0lqyLTvymyGtvv2YbMxoq\nVKhQ9aIwxjPUNaNrsezJRs2pHsr8eJp7PQyUqaT8y6PQx32qcYYfgACuyxDAdr64XQrCUsjjJ+8A\ncBSlcaKq+iGAVZdJlisGkTm2ubitHeQPF+dxDNa4zwZgdcsqZj4xgz3P7UHijoRwG5bZbFMAfhPA\n4zBLpXQW57gOsQZvFJ/txbn9Ozi7wrqVGOHuum0QcOrn9MhAHx9aZ3p2YgInDh9G98gI7p+bQ9zn\n30AQtSTVsiBOZVtmnp8pKW8Si8QsbsG8ruZQ5xDyv5XHoy88asRfNjc2Y3zbOI6uHMVtidvws7d/\nhu9MfscS3yjH9Ma5N4xMtxPbJnDk9SMGSBprsG0CL731Ek6unQQg3FuJCGcvn7Vs1xXvwvt73o/Z\ne2fxwDceMGIwARGHyfuS73135bs48c4JNKLRqDea2pLC9z72PTwSfwTHnj2GN068gY7uDizev4iB\n+IB2Tb1IdyzLqekZKlSoUOXKS4xnaPEMdc1ow610VdBGzWlDM9oW5WnuNRqoV8tzidVbzaAqLWmX\nIBLixAE8BJHBVrq08uviFQhw4vp7CBhahel2OgTTXRcQcaM/hD7hj6pLEMmLfg576ERxLj+E+K/B\n/3NQ0Sr43/Yh8U/F2M73K3P4GkzoBARM//PiPN4LM/PsWQjodHOFdXOXlevO3XX9yM2tuk507+ws\nbp2aKgs6Z56fwdmLZ5HaksKTu54sG0pU115uAVVrherKm6jW5J7mHnTFu9C7pRdP3/e04cYq4y9/\n/aZfx+n103hr/S0ce/MYzl48i1958lfQ80VR/gQwS5W8euZVAMI19uLVi/i1G3/NMvYHtj2A85fO\n45frph/4aGoUTZGmknmeXj+NwycO47a/vA2vrr5qvD/cNYw99+wxrKD8PQnDV5lv+craCn59/6/j\n5DMncf7qeWAAOHP/GTwSFy61M8/PIPtcFucuqumYnaVzCXfKnBsqVKhQG6EQPENtOtnFJdST229Q\nuhbn5FW1nLtbrIvXmNcSDlYB5iBMIHodpnspF0FkuZX7vU/5/HJx/9sB/BkEvOVhlhkBgGcgYKsd\nztK5vOqULG4rkwJdcdhuD4R1N1V8bwQi+yxXG4TFcw9EnVFpXIoCsMulwtxfHQEZqBwc3dyqXVSr\n2Kl4IoGxfft8QycgoOTYyWNYWVvBB576gOe4QKdSHyrMPvrCo5ZtJZQmm5L4wb/4QQnsvudr78Hj\n//A4Tq+L+EkZ3yj3645348z6GfyoIGoTjXSPYO3ymuGC+6EDHwIAfOUfvoKFlQWcWj+FKKJGDdFX\nTr2CRJPZ5wsnX8DCyoIBta2RVly8ehHfeuBb2rk3ohFvrb+FU+un0NTQhIltE/j2A99GIp7A7L2z\nmByYRHYga7zXHms32o01xIw2fn7+51hYWcCZN84AJ4Gt2Io/KZ74drDodk7pYnvLSYy12coHhSpf\nYYxnqI1QCJ6hrhnVg5UuaF2Lc/Kqepp72ZbnWQCDMC2bvP6mtHB2wATEhuL7FyHgKV58fwJmXKPU\nCoB3AXgOouYl/zZPQ2TCLcBZV2CfnEeqCSLm1KF2qDH2H8BMBvRjmPAmYy3vhEgkJGNZ+wAssjYu\nw5qQCDCB80l4r79ZITj6TUBUkfwAdYDiNSlX1lYMyJHgse0r2/DhAx8uTYyjAaOZ52dw45dvxF/8\n/V8YMPvIC49Ytn3fX70PZy+dRXOkGdGGKIb3D2PX13dZ2l55ZwVXinc1Yo0xS2zo1K1T2NGxA8dO\nHsOp9VPob+3H3P1zlvM30hDBjV++EReumCfrFXaX5OS6KLMCCJfa9ybfC0CAIQCcv3IeR14/gt9/\n8feNGFUubrm8SBfx49UfI/tcFhPPTgAAnr7vaTx131MG/M3eO4vueDfOXzmPS3TJaMNSsuUC8PbX\n3sbvrv+u5bi4waIXQCwnMVZoJQ0VKlQ1FYJnqE2nTCaz0UMI5UMblSDJj9zOKd/WVwkTD0HAlbRs\nxjXbnoGAxH4A0hNwBCKm8hgEoLVCuON2KvtegbAWnoIZFwoIq+QxuGekbURpjU6md1reAW0hEbN5\nyaWt4zDrac5AWGSPQABgd/F5A8S8pC7AClvDELGmGZggJt1mJUR7sWJK+M2i5kAH+PyOUt2xayBp\nmWxqLE0AJMHjxDsncOzNYyUAwsGoOdJsAOfKBRMak01J7Llnj2XbvpY+HHvzGC5cuYC31t8S2V/f\nOILbv3a7AU4y2VAEEbz02y8ZcYrS9bQ5JrJfNaIRFy5fwL8++q/REhF9vLfjvbh49SJWLqxY5krK\nCX71qoDHM5fO4Kdv/xTRhijOXzlv2eb7p76P4e5hy3vSeik11DmEvpY+R0hLxBP41Z5fLXm/OdKM\nni095htrQMNRQdCz985isG0Q8UaRgEmujTynJHA++dMnXQHRT4Zjqc1WPihU+QqvpUJthELwDBUq\nVFW14aVZApCr9VW1WnGY4Flaf0Oz7wgElL0LZu3KOZhWUAlaCQB3F98zKjAXL6ob1oBDZ0yw9epC\nKw04sr1GGBaky42XEb0YRcPZhtI22TUzUBzrv4EJeEsQFtkCBHwegYDjIxButnblYG6GcL3lICYN\nc8MAJuHdirkBQFeWNiCeVLrZXrx60bAcqjGajcXLg6HOIQuA8My0B187aAFOAEg0JQw3Wm5xk+Cm\nAtzK2gre/dV3Y+LZCXzrgW+hv7UfP/n4T3BX111GMiIJWPNvzAMQVsPT66fxV8t/ZSQB2p7YjsK6\ns4k/2hDFFTLH+sb5N6zWx6Le1/0+dMbFXZ7hrmFMDkzilY++gp64eeL/ePXHeOHkC8Y2KqRJQLx0\n9RJ6twh3hc6mTsQaYnh/7/vxN7/9N8b7w93D2HuPSM+ciCdwc9vNOHayFPoB88ZA4aKYa9CAWI6V\nNFSoUKG8KgTPUJtOYVzC5tJmSPpU8TnFIWc7gB8V3x8B8D2Ybp+/YPu0wwQpCVs8GY7OXbQHAlJH\nUYTFIhTSL4G9OQFugD4GU01SJNUB4OViX6cB/BK4HL2M6NUomi4X3Q2TMGEzBtFPb/F9QFhdea1M\nXmtzqPiU67EXwhUYEBl6e9lnX0ApiMl1+DaAp+Hd/XUDEwT5Op8qdQv2INUtk1u1fqPvNwx30dX1\nVcM9VLqV3rL1FguAJOIJ3Nx6M469ecyAH0AA5cS2Cfzs4z8zkupIi9ujLzyKl0+9jFhDDHcm78S2\n1m2W8Z2+KBL33HfoPvzwoz/EwNaBkgy4ACyQ2BJpMeCwPdaOP/3QnxrWTztdpssGJDei0QLMHTFR\nbiWCCL6z8h384NQPEG+M42dnf4bzl8+jo6kD8YjpsrB+dd0Yz9LqEm6ZvQVb/vsWfOCpD2Db49vw\ntX/8GhZWFnDkjSP44A0fxNStU7g9ebtRJmb7X27H7cnbRUzo/d92jc2U55T8TAJxJYCoc9ctx0oa\nanMqvJYKtREKwTNUqFBV1XWRIEle77ZBWPZOQbjOzkG4n15tJGsAACAASURBVMp4QbldEsI6+gKA\nWyGyxQJWSNLFGX6z2PYCGFyeA+4olpQ5aTO+LRAlWwABmlH2WSuAu4p9PQogC0QuC/MmgYQl96cQ\nFllAuNy+DNEXNzANsbHPinYwUGwfMC25CQBPQMDWUQgXYg5eKoiVG29ZA6ALRDWIJ1Xj9pzqP3L3\n0JHuEezNlBZK5fAzsW0C2YEsXnvoNRwaP2QBFu4WKmtzHjt5DG9eeFM7zpW1Fdy+T7je8gy4XM2R\nZqSaU/jIwEcMt9qzl84i93zOErfqpq54l+W17OsKrmD10ipW1lawfnUdq5dWceT1I+j5Uk+JGy8g\ngDXSEMGZS2ewfnUdL7z1Ak6cP2FYYiOI4Pyl89hzzx68du41Yz8JoAAMl2IJgX92z59Z6p3yGE55\n7L79wLeNrL/lKoznDBUqVK0V1vEMFSpUIJqZEW61LS0CNq8pyJR1IdV6nFKyrqR0LZX1OdXttgNY\nhrB2bocAT6l+CAB1WrdOWGEvfhW491PA4/9eLHgPBJhyNUMk+umAWftye3G7SHGsncV53Q7hIsvV\nCGFl/SVEzOhWiHqfjTBddbdAWHPVsWdgrYcpS8z8BAJKXyv+bId+XUMFIqd6nbrP3OpIrq6v4n37\n34e+lj60N7Xb1ojkNT25Yg0xI9mOTk0NTfhg6oMGmAFAAxoQa4zhgW0P4PT6abzyy1csFtcGNKCj\nqQOrF70F9EYQwVVcLYkB9aNIQwTff/D7uPfr9+LUuvmHF22I4jJdNn4C+lqfANC7pRevfuxVZJ/L\nGms1desUTr5z0ngdb4yjJdqCnd078cSuJyx1USup0xlEHddQoUKFkvJSxzMEz1ChQgWiTEbEcgLC\nwrlv34YOJ1hlYAUou7lJAJUxmaoSEMmEABGPuV78fQiiJIrbdd8uCLCVygJ4qvj7DIBXIJL8HAHw\nH4q/fwfA5wB8BSKZTxTC4roKAdJyPFPF/dwy4U5AWDG/DeGaG4EA18sAdkJYMxNs28MwQTwLcx1V\nOa1rHer5mRmsLi0h2tKCe2dnyyppUitpQbJ4p2i1I4aZ/6UNe+7d6ws8bvzyjYYFMDuQxVP3PVWy\njQSboc4hvP7O63hr7S0Mdw2jPdaOhZUFNKDBFvzijXG0xdpwev205X0Oc3aKNkSRfyCPe79+Ly6S\nl2K25Wvq1im8/NbL+OnbPy2ZT7IpicLFAhrRiGQ8iVhjTGs1HWwbxOrFVRQuFtAWbcMHej8ANABH\nXj+Ctmgbzl0+Z+lv35j4Q+Fgz9/3KrcbDKH0msEMlrCEFrRgFrNIhHfMQoUC4A08Q1fbUJtOYVxC\nfWozxHLayfWc8hov6OY2KZPqtAB4EQLEbv4B0PoA8JBN2l+euOi/w6yJuRXAf2bbLUFYUNcB/D8A\nDkHUCh0ofiYrTFwG8BaEy6yETjmvncXXt8NMVMRzwQxBWD9PQsRn9kFA51swrb3cY091d5Xr2K78\n9BKHuUFlR+y0urSElYUFnDh8GEeVrFn19h2ljdsrZv1KPHME+74Q04KHU8mO9Svrxu928CjdQvO/\nlcfSx5YsLqLd8W7LfhElI9b61XX8cv2Xlvca0egKnYCI5dz17C7c2XknGlzrBZWvtmgbCmsF/NO5\nfwJQug7SIiuTIZ1eMyFajqs10orCesHY9tzlczjyhqg5mh3I4gM3FH3kXxWJnpojzcYxiUXEF0q5\nCYbCeM7ytIQlLGABh3EYM3WducxZ9fY9Fer6UAieoUKFCkTXdCxnUPGCL0G41P4dRFzlUwAungCO\nfRo4/Cngd/730n144qJHYMLh28XXEsh4QiP1GtQu9K0NAuTU2MvvAPgYTPiMQ9TzXAbwTHE8CxCu\nttxjskPpWwVxuY6vABh8Cbj9Y0DqeeDJM+7rapeldoOANFq809I9MoJ7NtudFsDTnSK1DieH0J3d\n4kRsjbTi/OXzFjCVwPrQtx4yrGmJeAKJpgSyz2Xx0LcewlC3yDiVaEpgYtsEIg1W8GyLtpWAXKOP\nS5a1K2t4+fTLIBCaI83oineVwK1Ue6wdu2/d7bntCCKINkQNSOSZcrk6Yh1GPdBGNOL9ve8HYJ3b\n+SvnsXpp1RiH1Mn1k4hFYnhi7AlkB7IYvWEU+d/K45snvmkck6bGJl8ZaL3U/gzlrpbiF+oIRrCn\n1pnLQoXa5ApdbUOFClXfulaDR2Xc6LGzwOXiBWf2ItDTZI2DfBUiHlO6q94J4ASEtfAVAJ+E6b7a\nBFELlLu7AgLI/kcA34ewiHJ3WuniqsaxOrnFcm0BsAbhcvt9CKB2mq/Rfsafb7bqtivnl4E3N+iA\ntb66iqMzM7hnz566crP1HPe3uir+tvbssfxN8f0v0SUcef0IRrpHEI/EcexNEZ+Yak7he9nvYeSv\nRozYxsG2QVy4cgHr/z97bx8U13nne377HZoGGmhkhJBakkvWSyIZJBzJsRS1IyleEyd0XshcM3cs\nu2rdU8luJffurrh3tu7O3Jqb3Joqp27NTO2uK9pkxEzingQpkWLZZhRhCSThGFu2XhxJMQ6KiRBC\nvIgWIKBpoPePp5/T55w+p885/QIN+n1UlOjz8pznvAD97d/Ldy6MKKJCraVaami9tx5XR64KdaKd\ndzsxFmENeRxmB3Y9sktS4+k0OzE5P8nWmxwIR+MRVy2SpeceWHUALftbJDWWRpBfG47NZMN2z3a8\nN/SeIDJXOVcJ12ckPIIiWxHGImMoc5QhGo1ia9lW3Bi9gcHpQaHusqmrSXI/1//reiE66vf6UZ5f\nLlkv3158/9NNzSUYIYQQQACHcZjSbAlCBNV4EgSx9FmuxaM+SIVdzSxwxqos+KrAPEB7AVxBPLLX\nAGACTJC5Yt/z5fLLxIXfNcQbENWA1Wq6AaxEvLGQH0ygtsZe84ZCcmrBBCdvkpRM+InPtwHARB3Q\n6gdcO4Bd24CjtuRRT7X6WTVB+pCSrriQi0O7xY7Dew6j8e1GtPa1Cts1rG/AxMwEWvtaYYYZZlNi\nGqxcPF0bvYbh8DBcVheK7EWYmJkQur+uyFuBwel4W+byvHJE5iJCNJCLx+rSajgsDnQNdUGJUnsp\n7s3cU1wnF6Al9hLcfP4m3A43Vr+2Gn0P+gxdKwB43P04yp3luDBwAdPz03BZXfjCyi/g/sz9BDFq\nN9sxM89qTqsKqnDhqxdwqOuQpOmQ+Jq7HW7J/eDpySPhEaEWdGp2SthX3pTI4/DgifInBAFKzYQI\ngsgmVONJLEuoLkGFQICJtDqVWsGlygIUjy7YMyVOC+X1ntVgQu+MlYmmnthy/qu7CMAFxL0++a2V\n+1zuki2Xw1NVh8FqM/2Ii04g3ugIAKKIRz3rwbrt1iPuucnnfBqsxlN8XLXU1wR/ziDgeQqY2AG0\n2aBZKqVWP7tQtikGUnoX/HeUaG5CGqBK3Z9WuqXYQ7LZ1yzUAAb3BVGRXyEZO7gvCKvJinnMSwTd\nttJtEo9JnrI7HB6G3WzHxOwE+if7BdFZbCvGFyu/KJnH0PQQ8iysoNlldQnj90/249bELdVLoSQ6\nnWYnVuStSPD5HJ0ZxSP/8gieb3seY+Ex1TGTcSV0BW39bZienwbAajQvDl1EvjU/YdsnVzwJgF0/\n7lfasr8FRfYiYbn4mgPx+5H3hzwMh4cxEh4RrmFbfxt6xnqEfQ/vOSxs77K6MBwelliliG10SHQS\n9F6KWAxIeBLEciHWLAStrUyELheUikeXqsgW1ym6AKwD6xDLRV8AAH//yxNFxsBqOfl75howESj3\nueT1mWoCTCz8roHVl4q347Wj1QCaY+uOAzgBlvJ7AnHPzbOi/eXCT3yOm8FSgX1gtaCSebuBJz4T\nn5PaZwpagq8JrNlRo8p6AyQVZWo1ppkYO11Ecwv+PLm40PJuVBMnbocbN751Q7KO120CgNPixIFV\nB+D3+tHxlQ6Jx6RYzH5h5RcSjllkL0LrrVbJspqyGvgqfXCYHZianRKWD04PYnpuWtdlMcMME0yY\nnJ/E4PSgYofbmegMfnHzF4IIlrPKuUry2mlOLJiWNzAanB5Ed6g7YS4wAWsK1sBhZv6cB88ehPMn\nTvym7zewmWz4yd6foKmrSfKc8PuxpWQLgNg1rPiC8P27X3tXck/49rwpkfgDCLVmQlT7SRDEQkGp\ntgSxXKirY6KztnaZdvgRsVTTb3laqAssQjkFgGfjNYAJKJ6OagXrQMtTSL8X2/dxJNZwqiGuq3wV\nTMCqWb1oWcHohZ8jx4N4aq88FVfPMX1IXsOptd4ASdNU00zpzWp9nYG58XRLj8ODje6NKLIp+3Dq\nrRXtHe/F7td348JXL8Bb6FUcw2axocBagGJ7MXrGevDe4HuCj6fVZMUOzw5J6mx5Xjne/9r72HVi\nl6L9iBVWzEK7u61RtKxaHGYHyvLK0D/Zn3ScbaXbUGgtTPDs5GOE59knTfLzqHRWYkPRBsXnRGx9\nAkDTBsWIVQrVfhIEkQn0pNpaF2oyBEFkmWBQsVnIsmSpercEATwGZj/SBqAitpxH/Bpjr0sAtAP4\nPuKirFe0XwDKDYHkt51HwgAmOpO9n3RrrBeT7LhBsEjnQOy83LE5K0U19RxTy8pGr9WNDsSRuYQ0\n1SDSEuZJx04XA3ML7gsicD6A/gf9Qg1i4HxAEBvcn/S3X7qK33lGE9bL8RZ6cevP46mvYsE6FhkT\njtGwvgG9470JdY+z0Vl8OPwhAMBismAuOoeh6SEc6joksWyR7JNh0WmGOSFdWIlCWyEGJhOFsJx1\nheswM5cYXXVZXZLorfw8qsuqcXXkKgDW4faVna8I65q6mjA4OYjGtxvRebcTDyIPcOKPJ3Dx6xex\nrSyxoxePbuohlWdTdxMrgiAIEZRqSyw5qC5BBbebRf6Wu+gEMu7dsmDPlBtMICH2/7uQpqnytNWb\nYN1hxTWNSgJLK/1TTZSla0GS7LhuADcQPy+tFGAttGo4y2NfGXjsk9bAqdWYKqD0PKVaXxc4dw6+\n119H3VtvIRRW6eRqYG5ckHDrDrnY4P6k0Tujiuu1ONl7Ukjl/eT+J5IxuMApc5RJ9olEI6gqqMLT\nK5+WbM8tW7LNPOZ1bTccHtbc1mayodnXjOC+IErtpZJ1E7MTmEPceoVbrQBAHvIwMz+DVQUstXcs\nMoZDXYeE9e+df0+4rmORMcxhDpFoBDtP7ExIlRW/Pnj2oGYabSrPplbKNpFZAgjABx/qUIdQhnyj\n6L0UsRhQxJMgiKUHF9nZJADgJFj95Q7oT2/VQh6dEp9GsgigUlSLC0sPgH4wISmOQKpFwsSRUB49\n1WITmCCeRbzxUQ2Uo4zy80jnVmlFRZUiwRyDVjy6okRaUeZ0xlagOxRCxwCLsgXOn0fL/v2Gx1CC\nRz7lqZjcn/Q/fViNE8+uxU/2HTEklMVRytryWhTYCnB4z2E0dTVhLDKGivwKeF1ejAyNCNtZYMFE\nZAKRaAT13no0+5rhdrhRWVAJj8ODueicYCHisrowMTuRcFwxWimz2cICCy5+/aLQxddsSvxs3wIL\niu3FsJqsmJqbwswsi4xOYxptt9sSGjhxHBaHsPzKyBVEohGYYEKXvwvf7fyukCq74ecbJNer3FGO\nofAQAPXIdSrPZlYj+EQC3ehGR+yXdgABtCyUbxRBZBiKeBJLDp/Pt9hTIBaTdKN1Cig+U91g6aKj\niIuaTGAgOqW5H48GbgSrFZVHINWOlUp66gBYg6AoIAR9/gRpUx899ybT9y/ZuWSj4ZaOJkOZ/B3l\ntLLPh2s9Hhzes8fw/mqNY9QazewLBrG+oQHfevMsfll33HAK5Y5yFqWsKavBa198DS37W9DU1YSW\nnhZ03u3EwNQArt5j6aRWkxXFtmLMYQ6hmZDg28mP2Tvei+HwMEZnRuEwO7C6YDXyLdJusfJmP2ZI\nbV1K7aUosZdItrEqfOZugSWhSVDCNiYLDqw6gJ3lOxPWmWBCz/M92Fa2TdLFVz7mPOZxb+YeBsPx\nJknm2FuxWk8t3vW/KzQT8p/yC/dt085N8Dg8cDvcOPPcGeRZ8nD5G5exrWxbQidbLjprPbV4vOxx\n4ftMCkS9UVJqXJQZhG7VqMXhdGsKYtB7KWIxoOZCBEEsLXzIWDOZpIib5Ij9LnMRo41vUmkkVI54\nkyBA2TfUB+17o2cbI8jORVJ79qMI3K+3Zbbh1gL7hobCYQTOn8fhPXvgdjgM778QjWPE1/zVPa/i\nUNch5Fvy0Tvey2o9Z8aERjsl9hI8VvQYuoZZM6GK/ApJA6EyRxk+V/45BPcFE7xDxdE7Tt3qOpzu\nOy00K+LbiCOjJpgQRfx9yFv/01v4Hx/9D7TdbjN8rmL/Uo4JJkEEBs4FcOzmMYzOjMICiyS11hz7\nx2s7rbDCbDLj7efexj9e+0dJ9HnlT1cK16XeW49QOKR6H3kjodHwKNput6G6tBprC9fiiO8Iuz86\nmwxlA2pclBlCCCGAAA7jMNw5+4eIeNghH09iWUJ1CQ85GWwmw1F8poJg9h9+5LboBIx7WaYSdb0I\n5v95AOyaKPmGiu9NPvT5eRpBKVoqOxdJ7dlfujJaCwxA17XO5O8ot8OBlv37UxKdANAzznwei23F\nkmY1mSJwLoCWnhbhmh/qOoSW/S3oHe8VlnGvyRJ7CS594xJK81jtI4/wrchjBrEuqwsj4RG09rVi\n+6+2Y2xmDHazXdiWR+84BZYCXB65LLx22Vxoe65NYicCQCI6ASbEju4/mlBrqoXL6sJoeBSv7nkV\nDesbsKN0hzD+9y99HwB7/njEUZ5qO495mEzx92SzmMVMdAY/vPrDhOizOGX5VN8pXOy8CIBFkuWR\nSx69Prr/KBrWN+DsV87i+DPHBcsbpcj2QkEpuZnBDTda0JJR0UnvpYjFgGo8CYJYWqTZXVQ3bjDv\nyoXEaP1gKvWGKdYowgvgtui1UtRUfG/8UK4jTef+6ahNlbzR3XcEqMvwQ5Ks5pRf2ykAp5CR55N3\nmbU6ndgXDMJhUEB7C7zoe9CH+5H7gijUQq1jqdLy7lA37kfuA2DpqqPTowiFQ4LYLLIV4dSXT+H7\nl74vRN3k9aUf/9nH2P7L7bgXvgcAqC6tRoGtQOiAazfbcX30OiwmC2wmG56qeApXR67i3sw9PJh8\nIMx7IjKBr576KsJzYdybvqd6fo8WPYrvvfM9zEf1NRUqsBQgYolgYmYCbbfbsPZf16Iiv0LoUFtT\nVoPLw5fhPuLG5OwkAPb89T3ow8DUgBBxLbIVYWvp1oTOvkopvjvKdwgR2em5afAGuLcf3E7YlpNq\nHXE2UaslJgji4YRSbQmCIFIlVRGnhg/G0lCNbp/qPqmQjZRUHWMa8S/MOD6kdW2VhN3rPh8GYp61\n6xsasN9gUy3u21nrqcWWki1C+msyCwy19Eil5Xx8cUOfhvUNEruWdYXrsKZgTdLjisf2e/0Iz4XR\n2teq2EyoqqAKW0u2orWvFcW2YkH4AqxT7Ew00cpEjlLabqrUe+vR3t8umcfeir2YnpuW+JMC7Nyi\niOKdu+9gaHoIDrMDDosD4bkwqsuqUeooRXBfEACw+RebMTA9AJvJJqQSA5SyShBEbkKptgRBENlE\nR6MZQxhNQ00lbTULqcoJBACMgfmUHkPmItM60lwXNbUwzWurZFHBu8x6amuxJwXPWnETGHH6azIL\nDLX0SKXlfHzfSp9kndiupdJZqXlc8dhHfEeEcXetYCmzVhNL0HJanNj9yG6hQ+6+yn1Cs6CtJVuF\ncZJRYi9JSNtNlc+6P4tmXzNsZptkecdAh+BPajOxdTazDXce3MHM3Aze/9r7aFjfAIfFgbHIGMLz\nYXQNdQnXyO1w48af3UDD+gZs92yXzJ1SVgmCWKqQ8CSWHFSXQGSalJ+pTIs4o7WaRrfX2idTHWe7\nwbrsDgA4pLGtEZRqU7PQ5ThlYte2/W/aUxLbSsKOd5n98unThtNsAakQ11tvp9axNLgvCJfVhe5Q\nNzb8fAN6x3vj9YUHjkr2EY+h5Bkq9yWUH1M+7gdf/wBVBVW4/q3ruDN5R+iQe37gvNCsZ33RetSu\nYEa5ltg/JbaVbcOP9/5YELNizAbfFl0PXcf6f12Pje6NcJildbjcn3R7GROOkfkIuoa7JLWwvIZV\n3NmWXyN+DW4/uA18zMR3+1fa0/5QhTrNEgC9lyIWB0q1JZYc7e3t1Aac0I8OL8eUn6lUusPmMj4k\npIp+5Qeb8Mf5AeTBhjf/8iIeWeHVHmchO7/6sDCpwwZI9XnKdppwJsZ3H3ELKaVVBVW49ee3Ujqu\nDz50nOsAQkCFtQI39t2QzClZbas4fdhtd6Otvw21nlqc/vJpAMBjP39MkkZbZCvCWGQMFpMFc1HW\nZdbj8OD+zH1JCmuRtQhOm1PSZVeMFVZB5Crh9/rxzt13MDg9CIDVqj6YfYCbYzcl3W1L7CW4+fxN\nNHU14cQfT2A4PIzPrfgc7jy4g9UFq1FkL5KkJO/+9W50nusENmYmzZY6zRIAvZciMo+eVFsSngRB\nLG98PublCLAOpwZr5B4qFATjZ/+bG9ceYULjC3er0PF/aQuNjAvyZLW0C2xv8rBT/s/lGA4Pw2lx\n4vq3rsNb6FVtRpSM1edWo6+nj3nDIlEA8drWn/57YOyzHqza+oQwtljIAol2IVyY1pTVYI1rDfIt\n+Wi52YL5mAGt0+LE5BxrAmSCCSX2EhTYCrDGtQbXRq8hNJMYBXRanHhixRPouNORYM8CANtKt6Hj\nK+z3zOrXVmNqdgpuhxszczMYnx0XtjPBhO2l27HCuQJjkTFJoyFx3an4eoiFtpZvph4yPR5BEARA\nwpMgCAKoqwNaWzPr5bhcURCMtf+tHB88MoxHB53oDFzXF/HMND6oRzWXWNQ53S61mSQVwdg73ovd\nr+/Gha9egLeQPQvyCJrb7lYdlx/zyr0rgsDjEUDxdm/V1aGvtRX/8DcuXK+cEMbWE52TR1jF8xMj\nblwkbo6khAUWFNmL8OQjT6JrsAsj4RGYYRbEbL23HivyV6A71I3Ou53CWEoilSP2MK0pq0GZo0wS\nvXU73AicC+D6vevoGevBu197V7jm6bCoDbgIgli2UHMhYllCdQnLiIWozwsGNb0cl/UzZeQaK9RQ\nvvmXF/GFu1UZFZ2Ga8yS1dKm4kmaZZI9T6Hubgx0dKCvtRXnA5noSJU6Ss2MtPAWenHrz29JBJC8\ndjTZuCd7T6JjoEMiOi9941KCAOK1rVXbWXMh7qEpfl7UniN5gymlhkNOixMWE6sBLbAWCEKx2FYM\nv9ePYluxZPs5zGF0ZhRX710V6k0dlnhN5/DUMF7vfR0dAx3CWE6LE+e+ck6o4xRTXVqNd/3vot5b\nD7/XjzPPnUmokwXYPeoc7MTAlQEc6ooXTBv5GZJvu9jenkRusKz/7hE5C/l4EgSxeIh9GbdfBNb8\nH0lrMVPC7V6c9FodtaULgg7vy2SprI+s8OpLrzUypZgwAViapGYUa6G8WxeAdLvUZhK9zYa0kHs1\nJhs3PBcWvq90VuJawzVFAeRwu9Hyv7rxYLQfNpMNE7PMQ1P8vIifo+2/2o6p2SmE58LY4dmByoJK\nwTrm1T2v4onjT2BomqWxVpdWo8BagM5BluZaYC3Ag9kHggj2Fnpx4M0Dgo+mmP4H/dj4i42oLqvG\nnQd3hOWdg50SP06H2SGkIu+r3IfWvlbJOKMzozh49mBCVLhlf4skEm2zsI64RbYi9E/0o+6tOgT3\nBQ39DOnZNpXoN0EQhFEo1ZYgiMVDXJ/neA7ofJMtXw61mLlSW6qnBtKHBW3Q8zDXmIVDIZwPBLDn\n8GHNNFuxGPhfTpZj7kYvrE4nfvkfy9Ezpe3HqTXmq3texaGuQxlPueSpnPmW/ATfUC7oaspqcOa5\nM0mPK0+RlT8v4ufIYXFI6iXlvqKH9xzGS+0vIYoomn3N2Hp0K/om+1BkK8L5r57H9y99XzLfV/e8\nisd+/pguT1CA2arcenBLaLzk9/px/JnjwvXgnpwAS6t1WpyC8F3nWoc1rrjPqf+UXzjvem897BY7\n+if6he0b1jdgYmZC8WdISUDq+XmjhkMEQaQL1XgSBJHbiOvzGpdZLebq1UBfH1BUBFy9CnhFaarJ\nmuVkGn6N8wH0qhwz0w16NM7vYasxSzWaJBYDT/WW44X/ziJ2/8/feXC1ZBiAMZEQOBdAS0+LII6y\nLTCUxIyRey9vEtTsa5bsIx6r8e1GIapYYCnAg7kHAJTrR4FYp9i7cSHXsr8lYb6v7HwFW45uQWQu\nIul+y9lctBmjkVFYTVZ4XV78/v7vMRIeURTVoXAIL7a/CBNMOOI7Isy31lMLh9mhKSpX/2y1IJSv\nfvMqiu3FitfRyDUXP5eRaARtt9seyg+DCILIDFTjSSxLqC5hGSGuz9NRi5ktsvJMcaE5NgYckplZ\n8vTXVjCRlk34Ne5NcsxU/ECToXF+y73GTP48adVSqtXriVNWv/u7xwGwFN2KzdXCciMpst2hbkF0\nlthLDO2bivejUsptsnsvPwb39txauhWhcAiNbzeq1nIG9wXh9/pR761HsZ3VZybzveTeouLaUfl8\nvYVePOF5QlF0AsDGko248xd38GjRo+gc7MRIeARVBVWKkVy3w40Tz5zA8WeOY9eJXegc6ITdbMdP\n9v4ERXapz6mSj6r7U/b/WGQMh7oOqV5HI9dc/FwWWAsUvVuJ5Qu9lyIWAxKeBEHkBrwWc6lHOjlF\n7M0kamsBeS1fsmY52WIhG/QsxvnlMFq1lGrCVCxA6v/5KNY3NODLp0/jF88kNqExMg+1hj7JSKUR\nkZKAMnIMLph6x3s1j+12uHH8meM48cwJrCtaBwCYjc7i+5e+rzo3j8MjqR1Vmq9SYyKA1Yke8R2R\nbFPrqcVH3/xI81wHJgcwNjuGmfkZfPnfvpxwXCWhqLce18g1F4/Z7GvW/DAolQ8fCIIgxFCqLUEs\ndXKliQ0hJRRi9+bw4cR7YtQCJBP3eCFtR5aYxUm2EOlmbQAAIABJREFU0UovTaXmNZX03XRSnNOp\nyw0ggG50wwknggjCrfJQqB3D6LH1bq9nu1A4hJfaX8KD2Qf46N5H2Fq6FU6rU5L2K76uTV1NmlYy\n79x9B5FoROKFqoXeYxjB6PNAdaAEQSSDajwJ4mEgV5rY5DpLWaDTPRbIJR/MTJGKIFxoEaCnTlBN\nBPngQ0ese1UDGtCi0r1K7RjJmhUZGUc+32w0V0p2X8Tr8ix5+P23fp+SL+diCcCHuSkYQRDaUI0n\nsSyhugQZMXsGxZROIk53NxNvra1MhIrI+WdqOd9jg16uRn0wzwUCeN3nw1t1dQiHFiY90OjzlErN\na6asUPSip05QLQ3WGcu9rkUtDifJvVY7hpGU22TjyOd7qOtQwnbidNKDZw9mpK5Vad2df3/HkOgU\nP1MLfe85RlOnidwm5//uEcsS8vEkiKVOMKie0vmwI45y2pgfnm7xZrTzbDYjqnrvsWwOgSZ37gd5\n9fiMijDqg8mFKgCcDwSwfwGjxdn0RpR7Zy4kSj6TyURQEEEEEMBhHFZNs00WyebHuzZ6TfNYWuit\ntwWAckc5hsKsk7Auv1kkvy+ZumeLde+5oCcIgkgVSrUlCCKz5FJKqzhF1e9n4lOvQPfBmLdlLqTD\nyubgG2xZ9ClpYtDKxYgPJgC8VVeHvtZWeGpr8eXTpxc0NTeXa+LSEcXi8+I+k+mKoNd9PuEDgvUN\nDZIPCMTHqyqo0tXARw0j9bZuuxtt/Zm3GMnmBxIEQRCLhZ5UW4p4EgSRWXhKK8BE6GKqHXGK6pEj\nxkSw0c6suZAOK5uDs1FlSgEAJwGEAewAcBRAExbOW1RMEIYaETncbkNRy33BoCGhyslELelipUTq\nQRzZ0xvN48i7oaoJJyMCK1kkW3w8w42NzgVwsvckwnNh7PDswNEDR5OeqziaCCArkcV0rj1BEMRS\nhiKexJKjvb0dPp9vsadBqFFXx+ooa2sXxZNTQrLOsiIUnymjnVl1HiuryOagOiUf4tFcgEV0B2Es\nwrvMSRaB04I/T+l0kVUjU9GydBrF6D0vIxHfZJHsdK6jeA565pEJ+D3qGe+Bt8CLInsRyveVo9fR\nCyeciLwVQVtfGzwODza6N6LIVqR5L+nvHpFp6JkiMg1FPAmCWHhyqeaUe4OmtC+Mia90jpUpZHNQ\nnZLYmrAaTFzHoqPkvckwWkuqRDZq4lKNlsktTdKpE9R7XkoRX7VIcrJIdjrXUezDWV1avSCRZ/E9\n6nvQBwAoP1+Oof2sXrR+Xz0azjeg/0E/Ou92AqDIJ0EQDwcU8SQIgnjYCAF4CUAUQDOYyF6i3pvZ\nslcxWkuaCfScS6qRSr2WJplEKVKZTiQ51Tm81P4SoogmTQtOF3EkOhKNoO12G2xmGyLzERTbilH9\nzWp0FHagFrU4jdNww032JARBLCvIx5MgiMyRS02DlhpGO+QSulloIZNN9JxLqmmndahDK1olwidb\nJEsHXsxmT3pINZVZqeHSn8b+hK7hLgCAf70ftv02SWffbKRiEwRBLBbk40ksS8h7apFI4oO5JAkE\nWBfYujq0v/FGWvtDyx+SW4a0gonQ5YxBX850yURKbDLEvo56vRyN/o7ix/jbfdcwmZ/8XJJ5VCbz\nLA0iiAY0ZF10Asm9PfcFg1jf0JCTohPQ50uqhLzhUsv+FpTmlQrLjuw5gha0SK69Ef9W+rtHZBp6\npojFgIQnQRD6yIWurZlELKR/+MP09tcS4kY75C4gRvSzLhZYZGdbyKQqRFI5xgePDOP4X1WlfC7c\ns7SvtRXnZc+kG+4E4ZMtknXz5bWciyk6AwjABx/qUIeQ7NORVDsRB/cF0bC+QZIyq7SMIAjiYYaE\nJ7HkoC5si0QwyMwgF7tTbaYQCWnfiRNp7a8pxINgnWJ1+FQuNBkPZC+wyM62kElFiBj9HSU+xq+b\nPkr5XLId/dVLrguubnSjAx1oRSsCsk9HUp17U1cTBicH0fh2oxAZNxLR1IL+7hGZhp4pYjGgGk+C\nIB5O0rU/yaB9SiYa5KRagptx9xuNJkXZagaUjHSOuRB1eJk6hpGGSJmyZVmKZKPe1Yh1DEEQxHKE\najyJZQnVJRC6SZZH6nazL78f7Tt3Gs8z5V4lGRBOqimSBvJgU41cZjyQzW1oVMZKlg6aLdI5ZipR\nK6O/ozIVGTMS/V2IFOJcJRv1rqmm6OqF/u4RmYaeKWIxIOFJEMTyYNM5wH0ZKH8f6L3PlmmpMb7+\nvfcWtWGSaoqkATWZagluBvWzLvSmg6bS1CfdYz5MZFso5TJ6612NPIO5nl5MEASRC1CqLUEQywP3\nZeB+Nfu+6h3g1ue180i11i+QhYxqiqSBPNgMZv5mFb3poJlMXVwMT85ch6w8tKH0WYIgCP2QjydB\nEA8P5e8Dw08AzmvA9SrAW6ytxrTW+3ws4giwfFS3e2G9TJeKmswCdW/VobWvFbWeWooiEYsCPYME\nQRD6oRpPYllCdQmEIhcfY5HOr/7fwMF6Fi0EkueRxvJM2y9fVl7P81c9HqC/Hzh2bGG9TBc6DzaH\nWMqpi/Q7anmQS88gPVNEpqFnilgMUhaeJpOpwWQyXTOZTHMmk2l7JidFEMRDRKaMJL3FLL32zg1t\ncaj3mLzzzsaNQGcnMDrKli8XL9McJp2GO+cCAbzu8+GtujqEM2JOSjyMZNIOhSAIgkgj1dZkMm0C\nMA/gRwD+92g0+qHKdpRqSxBEHHndpN8fT2etqABu3EgvwqenLlKeQtuiUbvFx6ypAdasAZqbtee4\nQPWhRBxum3Lv6lXMxD4kWN/QgP1a93eRWAxrmUUnAKAbzO81iJzztSUIgiBSQ0+qrTXVwaPR6O/5\nQQiCWMIstEDinVr5sXk6KwAMDLBl6QiFYFC7LtJoC1i1MZNdO/l55qj4WYqoCTZum8LJ9S624vme\nDwQUBfKy89vsBsBvUQDMeocgCIJ4KKAaT2LJQXUJKqSaspqqAWSy4x88qD4XuegLBlmkU7wsHfTU\nRQaDwLp1gMMBNDai/Y03Uhsz2bVL1d+E0ETNl5PbppRWV8Pr9+PLp08vShRR7+8oPTYvy85vk3/O\nVAuAfix0Q3/3iExDzxSxGCSNeJpMptMAKhRW/Z/RaPSk3oO8+OKLWLt2LQDA7XajuroaPp8PQPzB\np9f0Wu/ry5cv59R8cuZ1dzfaY9ETXyzCpmv/qSn4AKC2Fu0vvAC0t6d2/JMn0T4wwF6XlQEjI2gH\nAL8fvth27e3twHe+A5/LBRw+LDT18X3pS0BrK9rn54ELF+B77rnsX681a4TrhclJ4LnnjI83NcVe\nx8Rle3s78MMfwjcxAdhsaH/kEWB6Gr7GRiAYjJ9vLjwvS/g1F2wDjz2GtS+8AI71O9/B+OQkDp44\nAYfbveDz+4fnnsNEXx8sDgeimzbhnStXYHE48B9PnVKcj575Tl2fAkqZ3+YL8y+gPdWfz1x5/R3A\n5/IBh4H2yzkwH3pNrx/S15fp7xG9TvP15cuXEYoFFz799FPoIW07FZPJdBZU40kQi48Bz0cJmbLs\nKC2NN99ZsQIYHIzPpakpeTqvz2es5jITpHq9xChdO/G5eDzA8DD7fqHO6yEgV305X/f5hNRZR3k5\nwkNDANKrMyW/TYIgCGIpsJB2KlToSRCLDe/AalREpWrZEQgAK1cywXngALBtG1teXQ289550Llrp\nvIuRlprq9RLDr11TU/xa/O53bF1tLbsW/HtKt9WNVldah9uN/S0tOSU6AWnqbNnjjwvfp1NnSp1V\nCYIgiOVCOl1tvwbgHwF4ANwHcCkajT6rsB1FPImM0i5KNSMWGHEznbExZjHC8fsBm005cqoVXcxU\n1DVF0nqmeOOg+/fjy6qqgI8+iq9fpPNaqogjh7nclVYOj8TOv/AC9u7enZNRWWJpQn/3iExDzxSR\nabLd1fY4gOOp7k8QxBJg0ybg5k0gGgWeegqYnY2LzQpR+XdNDXDkiLq40uo0yyOHegkEgJMngXAY\n2LEDOHpUeVx511mtlF+dh5YM0d0tFZ01NcCZM/Gxl4hoyiUSmu4EkLYFx0JYl/BIbHt7u/D9YqN1\n3g+lpQtBEASxKKRd46l5AIp4EsTSxe2WiiqbDYhEWArppk3Ab34DWK0stdbrTdxfrNLKy4He3tRF\nX7Joq1r9pLx2dHBQu5ZUw14moRx1IhbNdbuBz38eeO21hYtuZkCQsXGk53yuqWlRxUhCDacPcQuO\nBqRkwZHrUdRkAvBcIIDekycxFw7Ds2MHDhw9qvueaJ13rl8XgiAIYmmQ1YgnQRDLlE2bmJ+mzQaY\nRWXgBQXAgwfs+7VrgTt3gHv32Otdu4AbN9TtRuRs3qy8vRrydFZ5tFVWQye8ib92DfsAOHiNZWMj\n20Ct5lJ+HAX/zcRyVI1objbJlCeizHM0NDio6S+pRTqRNIfbDbvbjVN+P9vfFoQD7rQsOPRYlywm\nSp6e/Breu3oVM7HGXf1tbYbuidZ5q61fdv6hBEEQxKKTqeZCBLFg8JbORJYYGGDCa3iY+VxWVgKr\nV7PIJhBPq+UKjO+TrGmQ0jGMeIaK01lLSoB33wXq61ldqTitNYbg8zg8jPMOB3DsGNtGpaGQ8EzJ\nj6PwRj1hiFSbM2WCTHkiytR0JkSamtdmSvu7AizSeRopR3X3BYNY39CwIN6een5HyRsoKV1zfg24\n6ASYR6mRe6J13mrrl51/6BKH/u4RmYaeKWIxoIgnQTwsJEshFa/jAtPpZALP65Xml65ZExdxmzcz\nEcnDf/JjBIPAI48AMzNsX7MZmJ833uWVC6OSEvb1+OMsInvxoqLgE97EA9gTDgOHDsXFoVKk6Ic/\nBP7rfwWuXYsf59IlxbGNlqNK0EjjNUwQLNJ5GMqCTG8qrqwGd18wmHZjnHTFq2T/I4dTTyOOsRg1\nl8mivvIIp9I159egrKYGzpUrYbbZ4GtuNhw9TnbeauudVnbsWk8tDu/JvQgxQRAEsQSJRqNZ/WKH\nIIglzssvR6N790ajzz4bjY6OLvz+mWDv3miUtQmKRhsapOsqKuLrDhyIRquqotFPP42vf/ZZtq62\nVjr/0VE21ugoO8fi4sRjfPppNFpZGY3W1bHv+fZKbNzIxvB4pMfnx3nhhWjUYokfo6pKcZjp0dHo\n6YqK6LTSnJWOI742VVXZu0fJ7kFWjheN/zZegMOJmR4djZ5uaIhOp3gt090/F/j13r3RHwHRHwHR\n07L7/eazz0Z/BER/WVureo4LfQ06Xn45+uu9e6NvPvtsdODup9GG0w3R0emle/0JgiCIhSOm+ZLq\nQmouRBB6SOgoYzByku7+mSCZpUlpKcDT+errgRMnpPvqsTsRn2NJCeuGqxWZkUcA166Np7pWVQG3\nbqkfw2IBenpYRFYpksjnnP9ToNchjfqJmyZVVQFbt8avzZYt6k2Q9EQsk22jZSuTaeoAtIKl4hpN\nU00zOqsW7XuYuqi+VVeHvtZWeGprE1JZExoo5QDUaIggCIJIFT3NhajGk1hyLEpdQmJHmYXd3wiB\nABNodXVMfHFU6hsBMEsSgHWrLS5O3F9cx6g2fk9P/PstW/TN6+RJJiRbW4GXXmLpswC7XhcuJO4j\nPkZBQfx73hyntTVeO8rn3OtgDXhawVJPgYTjtH/nO/Fr09ubOFay48hJtk2ye5ANgki9NlLPuSZB\nrcYz3drPdJDXVWYL/jtKrX7yXCCAU34/ZiYmMjrPdM9PnN5syc9fkGtF6IPq8YhMQ88UsRhQjSdB\n6EHLhzLb+2uhZjUi7sra1MTsRBobEyNYR4/GooP5wK9/HY8GirvP8mNcvRqPjm7YADzxBBvP6wX6\n+tjyzk7gsceY0ObHOnmS1YMCwIsvsqhqOByfwzvvAG+/DXz5y8Du3axT7vAw8w7l5yI+xtgY2+7W\nreTCXqkBz8WLwO7dOLd7N0IHD+L61BSePHWKiYOkY+n4ACHZNmkViKaAG6l3uk3zwxK1Gs/F7C6r\n1Dk2E8ijuBy1+kmteYjX/3zDBpQ/8QTyy8sx3tsriRTLj5vu+YnrTE/5/Vm5VgRBEMTDC6XaEsRy\nQJyCWlERb/gjjqzJ033d7sRUSvE2HL6t2GZETkMD8NvfxkWh1RoXjGVlTNDydQBw4ABLq5WP6fEw\nISv36eSpu1u3xsfJywN+/3smRg8eZJG5xx9nIlosqkNgkc5LTwBDn8SbEnm9yqmFydKKldbJU1L5\nssWwV8kketKrk6CWSprNFFOtNN5kqa/pYDRFVWsefL3V5cJsLCrq8HgQHh6WHEN+3JmJiYydX7au\nFUEQBLE8oVRbglguqKW3csTRqXffVU7nFG+Tn89EnzyVkm/DO9vyaNfJk3GBaLFIj81tR7ze+DIu\nOgFgZEQqOgHWPVZsXQKwjrfDw2w+4pRaiwVob2fnIj7GF78Yf93bCwwNAW1tiWmhPOo39EncJmb3\nbnaaPPrmcmHP6Ci7tsnsUZTWyVNSNexVFirdMxm65pCmTQyP9skFi9ryTKCVxptfXg6zw4GRK1cQ\nXLcObxw4IJx/OvdFLYrLx3xt9Wqc2L1bGFuvxckju3YJ43qqqxOOwY/r8Hgw0d+P+UgEXr8/I0JR\nj/1MLjzLBEEQxNKBhCex5Hgo6xK06u3EtYNer7JgEG/T2yv1q8zPB1auZFHLFSuADz4A1q1jPp6N\njUw8cubm4t8XF8dtR3p79Z1Lfj5Lq+Uit6aGRUXn59lru52dAxe/c3PA/v1MdOfns2W1tcBrr8XH\nFAvm06dZRFX+Rnh6Ov691wtwAVBeDtfEBBxKolUPBlNSF7PGMZfmkA200njHe3sxHw4jGokgEgqh\nv61NOP+k1yQAwAfWrElBX8lFGv8dxcd80NeHwc5OYWwuvruamhSFG1+//+hRYVzx91wI8uMWb9yI\nwc5O9Le1wWKzZUTU6/mAYLk+R7nIQ/l3j8gq9EwRiwEJT4JYCmiJGz3RKfE2fDy7HZicBP7lX1h6\nbijE6kD/6q+YX2dnJxO78nR5sxlwudjy2lomOsXRSCXsdpYGvHIlS4k9c4bNpayMiU++jcMBdHXF\no6ZmM4tmtrYykVtRARw7Jj3XYJCl6c7OsnNQEpGxiBEAdl5cANTWwp7s2mphsGHQYtY4pjqHpRLZ\n0orS8fPm2AoLsfOVVyTrFK9JNxIbVIlQE2l8TFtxseLYWsJNPK7SMRxuN+xuN0LXrwNgfp/yuWfz\n3uXCs0wQBEEsHajGkyAyRZr2E0nHtNlYF9fmZmlt4cmTrEHPjh3x2kaleciXfe97wC9+wYSaOILJ\nsdlYNHN4mAm6SESaFnvlCvCFL0iXlZTEmw6p0dDAmgpFItLlK1YATz7Jjieu7RRjscTnqmRJw61K\nACYyz55VtjKRr1erZczG/URu2GgYncNSsNlIVt/J11lsNpjtdtz97W8xE3tW8yoq8Gc3bgCA+jVJ\n0ZaGX+edr7yCrkOHEsbORB2l+N546+vxjMgK6VwggJ6WFkRiP6da986o1U0uPMsEQRBEbqCnxpOE\nJ0Fkimx4dSYbU94IaN06FqUUd53l+8jHOX8+3mE2GZWVbFwuBk0m4PJlYNs21txH3JVWiVWrWAQ1\nEmFRzTNngPJyaQ0op6EBmJhg4pA3JyoqYo2GLBb2/eiougdmKMQsWaJRJtB37WLnyJsJFRdL12u9\nUc4F79UcIZcbzXCxdO/qVUFMygWWWhMejqaY5g2qDsO4LU0SMiHckt0b8XnDYkHl00/jwNGjqsda\nCh8wEARBELkJNRciliU5W5eQDa9OPdYeABN1lZVMKHHR6fEA/f0s0sd9K/k4WoKR87nPMcHHx/v8\n54H//J+ZyBOnrirhcrHj8OjmypVM7D31FHv9mc+wSKd4XsEgE7o7drCU2vPnmVCdm2PnVVWlntLq\ndgPHj7OIqtvNRKe4mZB8PSA0bWrfuTOxJnQhvVdzHD2NZhYLnq7KRadS2qfcnzIyNgaT3a66fQK8\nQZXo1M8FAvjpypVoLi3Fm6ImRYD+31GZaLSU7N5IUovn5iQ1rUrkYursUknzzjY5+3ePWLLQM0Us\nBiQ8CSJTGKz1S3vMYBCorwf8fhZJ5AKxpoYt37gxXqPpcknH2bFD/ZguV3wcHnHMz2ciko+3eTNQ\nWJh87hMT0qZEnBMn2FwuXAA+/pgJzT/9CVi/nonRkRFW4zkwwIRvzEICRUVsH73Xlottp5PtpwRv\n2vTee4k1odm4n6mi1dU4y8d0AFnrRJsuXCyVVlerdnQVi7Px3l7c7exEdGYGBVVVKYvpUHc3pgYG\nMDM6itttbfjl9u2CQJqJWaAsBGLxKhdp+4JBOMrLhW3tJSVJBWUufsBADYwIgiCWD5RqSxALQZbq\nBYVxe3pYWuuVK6xxT2kpizS2tbGI3ZYtrAGQ08kiiP/2b6xhj/xnc/VqlhobDrOmPkC826wSNhtb\nz2svS0uBe/fY99XVzHtzbIy9NplYumttLYvO8vnIPTs54ppOjpGU195eFum8cIE1PlK6B7zuUy19\nN1dYjLTfJZJqLE9XPRcIoPfkScyFw/Ds2JGQWpqptGE+DsCa+licTgzGnuNkaao8NXispweFXi9s\nRUXYFwyiq6nJUH2lEkqpsnye9pISfOPSJRRqNQHLMXI5zZsgCIKIQzWeBJErZPpNPBdR4npOJXh9\n43e/Gz++xxOPIsrhtZVGsNuBmRkm2jZuZOI3P599TUzEhaeY8nImfAGWUiuvNzWZWPMicQ1raSmL\ntBYVGRPvSteK3wO1xkK5xmII5KUiymVI6hqRKAL11lUqNdoRL9vz6qt453vfA0wm+I4cwduNjboE\nknx+fI6Tg4OK9ZVGGv6IRVrJli0Y7+2FxWaDtaAAvubmJSnatO6X0YZIBEEQRHYg4UksS9rb2+Hz\n+RZ7GsaimJl+Ey9vLKQUHeRUVbH/+/qYaKupie9rVGh+5jMsPVa8j9vN0m7v31cWmXK2bWPCt7+f\nRUDPnQP+5m+AN99kUVqLBfjwQ9Yo6cUX2TKbTdrx1oh4l18rhXvQ/txz8E1MZD4inSkWQyAvsihP\nVVCII5Gl1dX4ytmzhsRIsmZFyZrvcIFkyc/HO1euoKayUlGwRiMR3G5rg62oCJGxMUGoqglXpWOq\nXRuxSDvl9z8UjYIeloZIOfN3j1g20DNFZBo9wtO6UJMhiGUHrw8E2Bv0ZG94gkFjb+LVRC1ffu0a\ne22N/Qi7XEwoOJ1MqPGGPvn5LNV00yb2emyMRSi9XuDWLePRzU8+SdwnFFKuOzSZElN5AVbTWVjI\nhOf9+8AzzwA3brDvt2wBtm5lDYzKy+Pn1NwMNDay/fU0+xFfP17rWV0NrF0LHDmSeA/6+liklu+b\n7TevRlOvuQfrQqJxzGxHmnhtH8BsTvQKin3BINpj3YvVonzJ5i4+LgDYioo0vT7F6b2IRnEvFELf\nlSv45fbtcK1ZIxGx3vp6rG9okFisdDU1ITI2hvyKChw4dkwiVkdjP+viY6pdG17vmWyuelhKUcRc\nbIhEEARBqBCNRrP6xQ5BEMuQZ5+NRoFotLY2Gh0dTVz/8svR6N69bDul9cnYu5eNDUSjDQ3xsUpK\n4svlX3Z7/PuKimi0sjIa/fRTNp7JFF9nNkejFov6OJn4qqqKRgsLE5d7PNHoU09Fow6HdHlDQ+J5\nl5dL14+Oxv/Xus7icerrlfczci8zjfz+LkF+vXdv9EdA9EdA9HQWzuHNZ5+N/giI/rK2Njpt8J50\nvPxy9F8qKqJHSkqiJ/fvT9g/2dz5cX9kNidsMz06Gj3d0JB0PP71E5cr+k/FxZJlaueiNB/xsp9V\nVUn203Nt1Oaqh2zf20ySznkSBEEQmSOm+ZLqQop4EkSqaEUx5RFRt1t/lKunh/1vsbBmP/39yg14\nOG43iwTyZkI8lXTTJlY/KY48JmsWlAm4X+eGDcD4OFu2cyezU/ntbxPPw2pl5+nzxSO5tbVs/vx8\n+DUWR72Uajd5tFJshaLHs9NoRDpdloFVS7YjTfuCwZRrMXnHWQCChYg4Ypps7vuCQfx8wwaEY3XQ\nJosF06OjCIdCQkRRfswx/vPKMZsxK+psW1ZTA9eaNaoRWKX5iJd9+fRpSfOhPa++KkRL1a6NOPpp\nlGSR3VQjofJ9M9FMCUjvPAmCIIgFRkuZpvsFingSGebs2bOLOwG9kUweReNRPnG0ct06NkZVFVsn\nH+upp5SjmTU1iVFEkyka3bEjGt2/n0X3xOPYbOlFLs1m9XVWq/Jyj4fN4dNPpVHYhgb1iK04ullV\nxfZXi3ByxFFDebRydJRdY6Vrq0DCM5VOtFoPWue2BMiFSFPHyy9Looo8OidELYHo0erqhDlqzV3Y\n32JRjPyJI4L/XFER/dXOncLr/89uj/5vJpPw+qeVlZrXSGk+8mXZikJ2vPxy9Nd790bffPZZ4Vh6\nIrtKc1AaS23fpRRVzQUW/e8eseygZ4rINNAR8SQfT4IwCo9ktrYmej+K4T6Q3E+TR+W4nUhHB6st\n5N6Y4rG4JydnZoY1CTpzJl7XyYlGgQ8+YNHBq1eZr+fq1cxKhNd6pkqy6KhafejwMIt2fvvbrDMt\nEI/sKfmHFhay2k6+3Ucfsagj//L7lf0redSwupptI24Y1NTEbF2Urq0WPGqq5x6nCo/eLnLtnNz3\n0Qhi/8jFItTdjcj9+wCkHpX7gkF4/X546+sTmgudCwRwyu9P6rXJ/SxXPf00gMTI37gowjk9MICJ\n3l5hDmU1NZIMg+INGzTPw+F2w+5245TfL9wL+fXNVoRZySdT7d5qzSGZ56Z8X6rNJAiCeAjRUqbp\nfoEinsRyQ289II+aiaOGxcUsMrl/P3vNay1raqLRF16IR9k+/ZRFL/Py4tvt3cu2KS6W7iv+2rEj\nvQhnJr7E8yovj0b9/vh1euGFaLSsLDFa6vcrRwArKuLb1NdL1yWLGoqjoSUlxiKL6ey7hFCLFi4l\neGTySElJdIzXM2sgjrQ1ezyK0TmOOPInjuZCKrKxAAAgAElEQVSJI5z82Hw7cbRVHBXVinpqRQCz\nFWE2UkurN1KsNJZ831yImBMEQRCZAzoinmSnQhBG0WszIbfxEOP3s26z3E+zvp6NK/f6fOQRVuPJ\nsdniUUwlKxTuqcntVZLZrKSLy8W65g4Nsbns3s3sUTo6pNFJsfWJ0jXZto1FLXt7E+tfS0vjkWK/\nHzh+XN/cuH1NSQlw6RLr4quXdPZdQohtKOwlJXj+5s2Uo5eZ6IJqdIxzgQBGr1/HWE8P/O++i0KN\n+yTuEhseHobV5RLqMPMrKvCtGzeSHlN8vfIrKjA1MAB7SQmqnnkGk3fuwOp0Ir+8HPd7ejD0/vuI\nzsxI9tey+hB7cCbzAc0Ecj9SrXrRhRqLIAiCWLrosVOhVFtiydHe3r64E9CbJslTQS0W6fKaGmbp\n8cQT7DVvgCNuOJOfz0TavXvSfbnoNJmUU13n59k6LjazJTpNJpY2+/77TFgODbH02lAIMIt+rRQU\nMOHIhai8CQvA7FV6e5VTW/Pz2f+FhcDf/33ivoEAu07yVFye5nzzpi7hKDxTgQCznKmoWNaiE4in\nPtpLSvCNS5fSEgrJUiy14Om+N48dSxgjWSpwqLsbdzs7MTUwgK5Dh3TPMTw8jIKqKjyya5ewbmpg\nQHPe/HpZXS64N26Et74ez9+8ick7d4R5/+Ff/xWDnZ34/cwMnJWVMDscAKSWLGrw9F7eSCjVFGgl\n5NdRfL9+vmEDpvmHOykgHqvr0KFFT79eriz63z1i2UHPFLEYkPAkiGzBxc+HH7JIJGfNGiZa+Xpe\nmyh+zYWYWh2lWhbB7Kz6OjW4f6URolE2v8ceA155Jd6xt6ODiWWnkwnuBw9Y7WkgEBd1YqxW4B/+\nQb3LKxfO4+PA976XOA+1etvYhwPnjL6B7+5mdaEDA4AOMbOU4ULn+Zs3NaOFWqRTr8eFC/e5FI/R\ne/KkIGraX3oprWOKt//mRx9h/9GjyK+oUBxDSfDuCwbh8HgwOzGBOx0dGHjnHbzd2Agz94kFEI19\nMFT82GNouHYN5bW1AIDI2JimOBbXVeoR8mqiXGm5fDx+LficeeffVKBaTYIgCEIvJDyJJYfP51vs\nKeiDR0a3bQP27WPLeHRTvJ5HB8Sv+RvDmhqWbqqF1crScI1gsQD/7t8BzzxjbD8xMzMsxTYQYFYp\nAItObt0aF40lJSxy2dKSKDxnZ5nAk4tw8fgck0L2hoYtid5InPBMLQObE72k2hxITZTxaJ3R8bhw\nKa2uhtfvl4wxFw7HN5R9oKJ1TPk85ds73G5868YNxTHUGu5Y8/KEbcJDQ+hrbYXN5YJJ9LPnrKzE\nf+rqgsPtxnis6ZCtqAiwWJJ+CMLn+7PVqzES+zCotLpaVcypPdtKy+XicF8wKIhureMoXUsx6dx7\nQj9L5u8esWSgZ4pYDEh4EsRCoCasxIjTRouLgfJyoKwM2L6drd+4Mb5tYaF03y99KS6a9GC1srTX\nO3dYdM8oXAQ6nUzw/tM/xUXi+DiL2ALxOsneXiDWfVQ4PpDo0Sm/NrwLLk9PlqNxXQ1HY/Tcp4cc\nI11QAe3OuVy4fOXsWTxz/LhkDE/s/pdWV8NeXCwZR0s4y+fZ1dSEycFBvN3YKMzDSPfWc4EAwvIP\nTiwWRCYmUPH5zwNgfp0N164J4/FIcmRsDLfb2pJ+CMLnO9nXh0hsfoVr10rm9otNm3DE7cY/l5eD\nfwwjf7aV5q4mutU6/2pdSzG50N2YIAiCWBpQcyFiydHe3r60P6kLBFhKp7yRzsqVcRFYVgaMjLDv\n6+uZTUplJYscfvIJS2HljYkA1njnvfeA/n59c9i5k0VSOzqAyUlj83e7gS9+EXjjDeDJJ5mwFL8h\nt1qZcB4bAz7/eeDECaCxkaXDihsiVVXFrVPU0NvISYVwKITzgYBms5OMPFNq93UByUSTHy2MNsER\nN+XRarAjR3z/Tvn9muOIz38+lkLK56lnf6Xj8vMTn4ccl9eLyIMHsNjtcK1bh99HIti5aRN6T57E\nzOgoSqurke/x4LZoPvLrxq+rragIkbExxe2OuN2CfYyzshIVTz2FPYcPo6upKasNfhay8RGhzJL/\nu0fkHPRMEZlGT3Mha7KVBEGkiZIY4XWJAItmrlnD1k9Px/fjzT6sVuBv/xb47nfj+4g72wIsZfbU\nKUCclqhFV1d8fD3w7rglJezr+PF4nac8xXd2Ni6aOzqAF19k5x4IsPNqa2ORTj1RRR4JTREejVkQ\nxPeVe4EuMDwyBQDnA4G0z11JyO4LBnWJeY44AmfJz8frPp9uYSy+f3qi1+Lz9/r9WN/QIMxTvr/8\n3MTibV8wmHDtrCoZBVaXCzP372MmFqWc7O/HEIBP3ntP2KZw7Vr4jhwRrpv8WOLruvOVVwTh2NXU\nhN6TJzEXDqN8xw6YYo3KLE4n6t95R4iois/7V088IdSWZgqj95wgCIIglKCIJ0Gkgt7oltg+pKGB\nbXfsGBNgSnYoAEujjUYBbnBvtwNFRdIIZyawWlkHWpntAwAgL48dl0cyy8pYLWdzM7B2rTRt9sAB\ndo5K4wCsM+zatSy1d9Uqlnb77rvGO8bmQEQxKdyGRa+ozgKZjkylE63kGI1aJhvnl9u3w1lZCXtR\nkaJwVTp/LjDvf/IJ5sNhWBwOFK5bh9Hr14WGRo7yckRnZ4XXJquV+Y2Zzfj6xYso27YN4VAIv9i8\nGdOxrAT3Zz+LqTt3EOYfsqhhsaDy6adRUFmJ8d5eWJ1ODH/wAaZjNkkurxeutWsFOxa+zb5gUHK9\nAGB1XR3uXb2Kr164IGkIxc9bbBGT6v0iCIIgiFTQE/Ek4UkQRuHRLC6+xD6VcrgY8XhY1HB4WJ/F\nicnExCf/P1uYzcyCRUxBAUuhjUSknpvl5Uz4bdgQF8EWC/PztFji1i9btjB7laGh+DqxUAWSXzM1\n5CI+195Up5kWrIaR9NlwKIRfxcSZTUWcGUGPkFWbn9Jy8XglW7ZIRJaeeWoJYaUU2Z84nZibmlId\nUyzWABYRHf7wQ+HnwpKXhw1/8RcIdXfDbLPBYrfDbLPB19yMtxsb0dfaitLqajy4dStBhJosFkRj\nP++O8nKEh4bYcptN6IDrKCsT9hNv4ygvR2RsDPOxTIbSbdvwlY4OxevEz3t6dFSSXkzRSYIgCGKh\nIB9PYlmyKN5TgQCrwSwtlYpOi0XqUynfh3tCPvoocPductHpcsW/56JzxYr4cZKxebOx8+FjykWn\n2Ry3QCkokK4bGmLndPEiqze129n53L/PRGdFBas17exkArW8nEVt+bUqLmb/p9oxNosdZzPyTOn1\ndzWIWmMXpaY9DrcbBWvW4G5np2YnX62mP4C+jqVGuquKxxvv7ZWs1zMftXRbvu/bjY0J6aBzski8\nragIAJjHpsWC2ViKO++qW7Jli+TnwrNjB+5dv46Bjg70t7XBVlCAZ06cENJjC9etg62gQNJ1+Q9O\nJ1bX1WHl008L8y17/HHh+0dizYhKq6vhqalJ2MbqciE8NCSITgAoXLdOiOAq3ff9LS0oiHmH3v/D\nH3C6oUFYr+fayklln6XCUjw38lwkMg09U8RiQMKTIPTQ3c0a/4yOSkXn3Fzcp1JpH+4JKar3SsBu\nZ+mqzz0nXR6NxlNxuWC125VF6I0bxs7HYmHpu0C8ztPlkgrRU6ek+xQVMcHn9QK3bycK0127WO2n\n282+HA62vLCQRX6vXEmvY+xD2nFWTWypCT69nXz1WM3o6Viqdryxnh4ATOjtfOWVhPHk++mZj5oQ\n1uq6CgDmvDysrqvDN69exfqGBiY85+aA2VlY8vKErrrcAoVzt7MTY598IpkrFy7Htm7F1MgI7nZ2\nIjw8DJPdjrwVK+D7yU9QsGoVZqemkFdRgQPHjuHA0aMoXLcOD27dwr0rV5C3YgXcmzYhIttmfUMD\nVuzaJZmDvaQEvuZmnAsE8HFzs6q36XhvL+bDYURCIfS3taFl82aEQyHdtkJiUtlnqbCcz40gCCKX\nIeFJLDkWpQubuLHI1q2s02wsmpEQgeO2KNeuxZclS5edmWEi7s4d6fKaGvYlJhLRl6qrhLgJ0Nxc\nvIGRy8UaBonSDYVtOEVFTDz6/ez/UChudQIwr1K53QmvQRsfZ+fn9TLBuHkzixwfOBBPT+U2Msmi\nD1mKKAK57WemJrbUBJ9eX0XDVjMa8yvZsgUtmzejubQUbxw4gIJVqwAwK5GuQ4c0z0vPfMTCVRy1\nssSebaV9v/7BByioqsKf/f73ePbNN1Ho9WJ/SwssdjsAlg5b+vjjgs2KUhOhsscfl8yVC5cHfX2Y\nFXV0js7MYHpwEJaf/xyh7m4MdnZiemAAXYcOCdHoqbt3MRMKYXpwELfffluyDbd8MQFwxLIdzHY7\nih97DG83NmL0+nUhRZcdUPp7RT73qYEBnA8EUrrXmXo+cpGleG65/DuKWJrQM0UsBlTjSRB6CIWA\nl15ib/Sam5n4UavpE9cickwmZi3yySdArKmIhKoqZoUijjiaTCwyqdSAKBWS1Ys6HKwrrrzhkdnM\nROLFiyyiye1e/H4mNF98kY175EiiIFRqtiO/NuXl7HhcBOdi7WaOotcqJp39jdSXyu1G8isqMDUw\noLvekM/Hkp+vWvspns/M2BgGOzsBAN76eljs9qT7yml7/nl8+qtfwZKfL1iU8C645wMB/OnUKUFU\neuvrkb9iBULd3Rjv6cHM+LiwjxJevx/DFy/iQV8fYLFg5e7d+NKJE0JNKMDSaedmZhCdmYGtuBjf\nvHIFZw8elHTlvd3WJqk/zVuxQmhK5P7sZ1F//rzkHMOhENpfegl333kH04ODcHg8KN64Edb8fNhc\nLviOHNH9rKT7fOUyy/ncCIIgFgtqLkQsS3Lee4oLLpcrMYro97N1cusTLvz0UlwMrF4N/O536c/X\nbGbNhPr6WK3m+HjiNuvWAX/8Y/y11cpE5NGj6hFIJWHOrw3AoqAPHsS3V+sGuwDdbHP+mVokjHS1\n5Y2DAFa7+MyJEyn5SSY7pnhdXkUFpmXCVu98zwUC6GlpkYhHuUB+48AB9Le1wVJQAEdxMaYGBxHV\n8SFQ6bZtKPrBDzD5d38nCGM+nz2HD6P9xRcxcOFCQiOi9Q0NmJmYkDRzCq5dK5nj6ro6mO12IBqF\nr7k5QZT3njyJ8L17sOTnwxzr3jscs06iLrdLG/odRWQaeqaITEM+ngRhlEyIHLlnJaekhKWsyhv6\nFBayqKER4enzAR98YHxuQLyTLY+Azs+zWtSyMmXRWVLCmgmJhefsLDu3DRuAJ55QvlZKHpzBYDxy\nzJsYVVczuxWlqCmQE/6YDytGUhL3BYOs5lAkivQKHXEk05wkbVY8nwPHjqHr0CFY8vNxyu9nkcjY\nBz1WlwufvvEGjhQXw2y34+sXL+LSD34giZZyQWcrLkbl008L0UA+F4vNBntpKWbu3cOk+AOSGLai\nIkTGxmCy2WArKGAdb/PyMDkwgIvPP49NsVReACirqREE+DMnTkhEOgDAYkF4dBRf+PGPcfLpp2F2\nOPB2YyPMIp9dW3Exwvfvw15UhPzycpzy+yWR3d6TJzEVy0iYjzVUMpnNqteSIAiCIBYaingShBgt\nyw49wpRvY7MBV6+y1NqSEuDSJVbfqGTtoGRrokZNDXDmDGtGJIqoaKJlzbJiBZurON3W7QYuXwa+\n/e14pJIjjlimkiKr134kB/wxH1YynY6rtq0kkrliBR558smEiB4AnD14EH9qbUXZ44/jwNGjCVFO\nNQqqqjA9MiLYqpgdDsyHwzBZrfj6Bx+gbNs2YdufrlwpCDie2spFJsAEoMlqRelnPwtHSQmmh4Zw\nN/ZzaLJaJVFRS34+rE4nympqhPny69qyeTOmBgYklivrGxowOTgonI/JaoXJbMbKvXsRmZwUIqgO\njwfhmKURj2Q2l5YKPqQAizq7N23C7bY2eKqrsV90fIIgCILINGSnQhBG0bLs4NG31lblTrYAcPIk\n26atjY2zbh3ztvz2t1kjoXT53e9YZ13elZajZbnCRSfvNiumpoZ13m1oAP7wB9Y8ye9nUU6vl4ls\nbu1iszEhnZfHXqdqb6K3WZBWN1u9zYkIw+jpamukQ6hWJ14ArDmP3a54zL7f/AbhoSH0t7Wh/cUX\nAQDjse65ptjzb5P/XAB40Ncn8fLkNiXR2Vlc/Ou/lmwb5n60iDcVWl1XB0dZGfJWrEDxpk2YGRnB\nQEcHbre14e4777CNzWaJ6LQVFqJ02zaER0bQ39YmOV+H241v3biB9Q0NEsuVPYcPS65FdHYW8zMz\ncLjdsMfOy1NbC091tWQfACiPNfsy2Wywud3I93iYt+jwMG7Ljp8NlqJFCUEQBLGwkPAklhwZ8Z5S\nEyvl5eyLv+mVb6fHS1KcMtvWBty6xSKTra0sssnhvp1ud/JIpJxIBHjsMfZ/XR378npZ859kIq6m\nhglKufCsq2Odeg8eZDWpxcXAiRNxaxQ+x48/ZgLwc59jacQjI6wpUrajkFoCVc+HARqQn1nqGEnH\nTdaJ15KfD4BFFGGx4KcrV6K5tBQ/W7UKJ3bvxlt1dYLnJgDBN7Mg1j05OjeHgqoqwS7FKvbFTcLA\nhQv42erV+PXu3Xht9WohTRUAzDYb9re0YPLOHYRHRjA9OIiJmN2Kp7YWc+Fw/GdXlLHwMYDI+Dju\nXbkiLLv1m9/gzQMHEA6F8ItNm/AvK1bgj8ePY25qCt76eqG+dF8wiLyKivg1KyhAeHQUe159Veis\nuz9muyKuSXVWVsJRXg6r04lIKITbbW2CpY3V5UJ4dFRTEKYjHsmiJLvQ7ygi09AzRSwGJDyJhxM1\nsdLbCwwNxb055dvp8ZLkNiMFBSzCyaMgJSWsO2xVFbBzZ7zx0NSUMeFpNrNx29rYMVatYqK4s5P9\nz43sucjlgvPMGSYoRbVnOH8eePNNdt5a4o0LQB5Rqq0FPvoo+6mvWhFNPR8GEFlDr31Lsm0dbjfK\ntm8HAETu38fttjZMDQxgZnQUk/39GOzsRF9rq2CBUlZTA3tREV73+TD4298K40zevYtjjz+O6dFR\nWJQi+3LMZoRHRjDZ14e7nZ2sC62IW7/5DX62apUkqln86KOCUNT6uXV/5jPC9/y82l98EZMDA4hG\nIojOzuJuZ6ckwutwu7H6S1+CKXausw8e4HZbG7oOHYLd7cYpvx9vNzYK6c9cLPaePInw0JBQu2p1\nueDeuBGOsjLMTkzoinqmIx6XokUJQRAEsbBQjSfxcKJWNyhf3thovL6Q1y52djKLFIClwX74IfO7\nlB/nD38wliJaUsIijnxOmzfHbU4A4K232PHffBP4/vcTayh7e4Hdu4ELF4Af/ICJ62vXgOFhfZ1l\nX30VOHRIuzbTKGr1s1p1t3prRYkFwUjNpxjecMdTWwuH243b4sZcgGA5wjvl8hpJNRxlZZidnITZ\nbsfc1JQkkmkpLMScqJGWvDZTC0teHsp27EDo+nVWV2mxKPrrmmw2qe8mgILVqzF5545wPJPVCmtB\nAeYjEZisVljsdhQ9+iiGYt1oAcBeUoLnb97EKb8/oWuvvMbV5nLBZLMJ9Z6Ttgo4IwMoqanFV88k\n/3BAfA/0fJAghixKCIIgHm7IToUg1FASK4EAcP060NMDvPsuE2Xi17GUPmFbuUjiy3p62La8FpPj\n9bLurVy8Pf00a85z7x6Liqq8eVWkspKJRbebpc6Ka0erqlh6rx7Eoq6qSj2CqSX+MoHaMai5kCp6\nRF6qQjDVfY1YsIgRCxcAaH/pJfS+8YaQMbC6rg7PvvmmsL28mY4SjrKyuG2JqGkW9xi1FBRgTqFj\nrRgtUWp1OmEpKEB4aEjzHIF4Y6PkB403AjPZbPBs3w5HaSnmIxH0t7UJwrCrqQk3jx1LuA7cambE\nVYsfThzDN3AIE/WHETyhz0uVxCNBEARhFGouRCxLMlKXoFQ32N3NopQDAyyiJ38tRilVly/r62P7\nyQ3mx8fj+xw6BKxZw7rI8je1zzyj3PhHic99Lj73WG2cgOjNuSbiNNVkabPZSmcVp9HGbDQSjqEn\nvTlNlmqti57UyHTSJ1PZVy3lUqt+UNzIyOF245njx7Eq5jFXVlODL772mmR7TyylvWjzZpjz82Er\nKYGJP0MxTOKGW7GfM09tLfzvvov1DQ2oePJJzfOp2L0bXr9f+VwLC1GydWuC6PwYTLACLOXVHEub\nNVmtmJdFQBWJRuGsrIS1oADRSARDXV1CqvH6hgaUbNmCU36/ougsq6nB12Ln9+6u07gHLy7VtuD/\nbU7ebfh1nw9vNzYK9jTUJCi3WKq/o4jchZ4pYjEgH09i+WLUk1Murhobpa/FY167xl57PCyddvXq\neM2mEtXVbFve6VY8Pl//2mvA+vXafp5mM0u1fewxJlwnJ6Xr//qvWS2nEvJrwj1HtdJU9W5nBO7J\nyQV6fT0TmPJjKPmBLkNSiS7qqatLp/YulX33BYOKUTMuYgHgfCCgGAnl12CspweFXi+s+fnw1tcr\nWqscOHoUv9q+HfmlpZjo6UGEC7BYtNBRVobCdesQDoUQnZlBWU0NXGvWwNfcjK6mJtw5fx6zU1OK\n6bBiBi5cQP6KFYp2RLPj47h39ariftHZWZjtdsyKfi8ki5yu9Plw97e/xXw4LEQ0g2vXSra529UF\na34+ImNjgr0LwDrorti1C9aCAsGP1O5242C/HzsrnPifjwXhFnmUyp8x8b0RW7Wo3SeCIAiCSAWK\neBJLDl8sCqIJtzVpbQVi1gtJkUfWlCJtPKo5PMxSUzduZNHNvj7lOk2Xi0Xztm1jTYQqKoBjx+Lj\n+/1McJ09y5bxxkQAi2TyRkDV1ay2E2DdMzs6WG3o/fusu60YU5IsB3mkVq+lid7tdHIuEMDrLS14\n6/59hAF2bs3NGT2GEczB4KJbQaQSXdTT2MdI8x+1fXmETc/14aJHvr0eEcuvAW/2c7utTdVaxeF2\no2DNGtzt7JTUbyIaRUFVFdybNmGoqwvRmRlYnU5YnU7MxbYLdXdjamAAkfv3EY1EYLLZhAilnOjs\nLCb7+1UbCc2Ju+Da7XCUl2MjWKSTd9a1FRezDVQsjyxOJ8xWK/7s44+Fe9XV1CQRl6b8fMzEGiEJ\ny2PjRcbHhSixWEwOd3bAM9CKq4cCkuurZmejZtVCLD66/+4RhE7omSIWAxKexPJFHDlMJsY4bjf7\n8vuZWOTL+Gu5ncpHH8U7vPL/a2rYtqWl7PXEBOs829ubmLbrdjPLkhUr4sf48Y/Z93Y7E6ozM6ye\n8+xZZpciPh/xG+Gysvjxi4rUu8DmSAfYUHc3Bu7fRx+A8zYbcOnSotZu5oIVRCrRRT0+m3q20dp3\nvLfX0PVRup77gkEUrlsHi8OBtxsbFQUsvwbci9Ph8aC/owPNpaV4I2ZFwjkXCMQ72ooEXWl1Nb75\n0UfCGJ7aWpTV1OBurDPuzyorMXL5srC9yWoVOsymhKgue35mBiaTSegkO3PvHqxOJ9ybNiG/ogJ5\n/OdUPsTkJG63teGd//AfsL+lBV1NTehpaZH8jDsKCyXXxl5SgpW7d7Pr5nJhWmaXovQ8cXsVW1ER\ndr7yirCt+MMJJasWgiAIgsgEJDyJJYfuugRe+1hYCPz93+vbRx4R1LJT4a+vXmX/nznDaiy5wCsu\nBl55Jf7a5WJpsuI33eJjHDrExGhBQXw9r+cMBuMNjsSi0+Vix+XHF1ujbN8uFaELUC+pB+FNcUkJ\n9nzyibRxUwYw6kd4fWqKzWcRozzpRCazjVFRrLS9OEKpJmD5NeBenCazGdODg5gZHUW/zA4k1N0d\nj3TOzcFst2N1XR2+cvas4IfpWrcOZocDoY8/Fvabm5oSLEdgMglzNYJadBQApgcHcZU3NAIwGw5j\nqKsLUwMDmB4cTD5G7Oc61N0dnyOYsCzZvBkurxfuzZuRX1GBb1y6hC+dOAFHeTlmJyYSro/S81QY\n+zmLjI2hS1S3Lq+vTfWDCiJ7UD0ekWnomSIWAxKexPJl3Tr2//h4YnMgNd5/n/1vtQL/5b9II4T/\nf3v3HhzVeeZ5/PdKfdENqYUkLMsYGceY4AQb2fgaKGvWJo4xDp148SSe3eCdyqomrtp1qiZ4s5PL\nTtXEtalJpWaSmirXpioLGSfEBmKIMSYuZK7GNg4bcBJDjA22bAxCCCSEuLRuZ/84fY5Ot7p1aZ1W\nq8X3U0WZVp8+5+3Tr4Ueve/zPMXF9mqkN5fzqafsPMtFi+xcz8ceswM8J5A6d86+9tq1Uk2N/drm\nZmnOnNSrqM4P9c6W24YGafVq+++RiF0VN1l3t902xdmm6g1yz57NbGttlrk/FB87prDPQac09hXM\nW7/3vZwHfZP5B/7kIGakwD5dED1SAOvcg2n19bp/3bqEQjzBioqE1yQHjAM9PTr9hz8knKts1iy1\n7d3r5iwOYVmD21a9uyIKhv+ncUyro2kqVYeSPufK+fPVuGaNJM/Kb0WFVFCgvu5undy1S93Hj7tB\n7Kb4DoiahQsl2avDF06ccD+T5Pm0u6lJHYcOSbILELGNFgAw0WingsllrAWBhpNJG46KCsn5QdRp\nL+IU1YlGB9t91NZKhw8nfs2xYoUdDCZf2xlPWdlg8BoMSvfcYz+/Zs3gGJPbvXiLGrW326+74w57\n+27y++vstAsPeSttLlwo3XSTvRrqx72d5MbTjxAj86Nlymg+k5eWLNGJ5mbJGLdCbe3nPqfPx4tn\nPTd3rmKeVUTJbj9SXFOjstmz1b5//8itS5IU19YqMneuTib/f+2nggLNuPtuheO5nwWhkFsUSJJ2\nrFypo88/r8LiYvUOs2I/bfZsldTVqevoUQ309bkBdn00qgc2bkw49tmrr3b7nia3pgEAYLxop4L8\nk6pNSaa820qfeip93qOXU8ynpER67bXEFULvCktrq11YyGnf4OR4OquWqba0Ol/z5mr29trvNxRK\nXcnVCTrXrUssatTWJr30Uupts5GIPffB4sIAACAASURBVA7JXjFdvtw+xrsFN/neetuaTIEWCpls\nWx3r9tx8Nt736qzIhaur1e1ZZRvJWFd1S+vqFK6pkQoKZPX1yerr08ldu7SnqUnhSERfefddlc2a\nlbBt1ert1cUTJ9S2d2/KoNMEg27Rn1Rm3HmnHdiOJi98rJyV1IEBte3dq/Y//EHn3ntPJ3bs0HNz\n5uh8S4sk6XxLiwZisZRBp/NeqxcuVO+FCzq1d68utbaqx3PsqddfV6yzM+Fz7otvJ5fktncZyZX0\n/wQAIPtY8cTkMopVyp07d469Gltj4+DK5IoV6dtztLTY22Zfe21o3mFnp10IyFtFNhCwCwlt22Zv\nd03VbiR5FVeS5s2zg1fJDg63b098nfc1XV32yqZkV389diz9sc5KZvKKqTT8vR3t/Zmidu7cqa5/\n/MeMVvHy0XArlqNp6+KsXHbHA7zk84ylNUy6Y3c3NenounUJuY6SvSW1uqFB51taFCgpUW9Xl045\n/384Cgrs6s/Ofx2FhTIFBWnbpwQjEVV+5jPqbmnRxZMn026TTSdQXq6+ri69K2muJBMKSZalUHm5\nZtx5p/p7euwVXA8TCLhbdwMlJaq+/XZ1vPPO0O3BhYUyxmjGnXeqqLpajWvW6NfXX+/28fS2QZHs\n1dDLZ8+6969oxgxdbmtTVUODlm3fPqrgP9OVbfgvo3/3gGEwp+A3VjyRf7JV/Ga01Vzr66WPPx4a\ndDY12dtq45UlXX199uqjN8cyWaoWJocP2yuR0ejQoDP5NfFKlKqsTF39NdUqcXIuZ1OTHcB627lk\ncn+msPH0u8w3w73X0eTHOiuXIU/lWO95xpJjm+5Yb4GdYHm5CouLFaqsVMlVV+nc0aPua5xKrdMX\nLNC1S5cqVFXlBptOFdnpN99sf72/P2XQaYJBFc2YoYJgUG179+ri8eNu0BmKRNxKsl4F4fCQr/Wd\nP5/w2OrpkdXbq9iZMwqWlmrJ+vUKJ1W2teLXKSwpUaC0VK27dtkBZHzFtaCkxF7l7O+X1denU3v3\nui1mquO54NMXLNCX9+9XcW2tJPvzKKmrc+9fqLJSX3rrLV2/YsWog04p9TxhFRQAkCkCT0wuoyh+\nk9Fv6JyA9qabEtujjJYT3J09a2+vdbbYSnaPzeEClVRBXSRir552dAwWJHI0NdlVciV7NbW+3g4Y\nDxxIXf11NEHjkSND27l4TZJqt7nS2Ng4qavK+m249zqWADzTIkKjud75eEBpAgF9cc8e1dxxh3o6\nOvRJc7O7yjp9wQJF33xT169YoYd37NCDW7YoGK9mHayo0EPNzfZzu3YpEP+6kyvqLSBk9fbqclub\nYt686Pi1p99yi6obGoaMO5hqu258d8/cFO+z4bvfVTgSUc0ddwx5TWFRkR49dEgD3qJF8XMNXLyY\nUMzIWxhoSbz1ycM7dmhafb0ePXzY/Ty649t2TSCgh3fudAs2jWVup/p8J0ProSsRK1PwG3MKuUDg\niSuDE9AOl+c4HG+l2N5e+09dnb1quWPH8MFauqAuXT7rkSN2QCrZqx779qUNGHc3NenFri69XFur\nWKqVzOTxpwtOJ0m121yazFVl/Zbuve5ualJvV5eKa2u1ZMOGEe9FuvOk69mZarUsXfBaGv8li9XX\npwM/+EHKticXT5xQqKJCoUhEr0SjennpUhVfc40kqffcOR34wQ/c8V08ccI+X3+/CouKFHT6YsYD\nSG/epyksVKiyUlZfn1p37VIoEhmywhnztEwZjd899JB7fwuKitxczekLFug/nTypA08/LSctxRlL\nMF58SIWFUiCggnBYJhRy72ny/Q9HIgpFIlo3b54uOO83fv9SGWn1MtXneyXtDAAA+Ct9MzJgkso4\nL8G7ktjQMLYtpWvX2q/v6LDboYylUq4T1HnH4VSolYYGg94gMRIZvF6K8XYeOaLW+OrPnlWr0udg\nOeNPlYMKcl3iOo8ccfMl9w03n1JIztUsnTXLzQ/c09Sk+9etc1fLvF9zgptkqbbxPj9vni47udGy\ne2b+e02NpMEWJ0We7aaLf/Yzd1zeXM/+y5fVf/myJNnVZSMRxeKrqZIdnDqBZvXChWpcs0a/W7Zs\naC5pGk6Op1fJNdeo49ChIeeYdt11Ckci9tbiePAXKC7WNffdp3t+8hO9sHChm7s50Nen9n37Eu6f\n974X19Tow9/+NiEvNlRZOSRAdF5z9o9/dHNEnfOl4r3G4mee0b5Vq9zKxGPJ50Xm+B4FvzGnkAsE\nnrhyeFcSZ80aWwDmBI+pivZIY2sD46x0SnaF2uQA1hskOudOEzAG4tsRqysqtPhHPxp5/JOFn21z\n4JvxrGYlB5WpzjXS+Xc3Nall82b1x2Kquvlm1Uejaly9WvueekqdR46o6rOfVcGtt+r0/v263Nbm\nVrt1FRaq4lOf0lV33qlQRYVeiUbV9sYbGujpGfY9hyIRdZ84oYJQSAM9PQpXV2tafb2MpPIbbtAr\n0ag633039QmSCxilcXrfPhV6tvta/f12UBvv0+td0b18+rQ+2rJFH//udxrwFDgKlJWpr7tbgbIy\nxTo6FOvsTLjv4ZqahKAzWFGhRw4cGBIMel8jjfx5e49P/oVEql8mAACQClVtceXIpK/naHmrwobD\ndkB1223S+vVDr+PjOGKLFmnP3r1aLCk8nmq0Ex0IXuFVdCcrb59NJ9gb7UpWcu9USQk9O3c3Nanj\n0CF1HT2q6JtvalpSvnKqKrZOJdXk6qrtBw+q6/333TzI5ODv+hUrdLGtLSG4SiUYiWggFlO/p9VI\n6cyZKquvd1cmvVVnTTA4WJyooEChigrJGPWcPatQZaWqbr45ff/PggIFy8rUG+8TXDpzpv7jn/7k\n3tdYZ6fWzZvn9tpMJVRZqd7ubncM4epq9Z4/b7eNKSxUqLxcPR0dCkUiuuqee/QffvWrlJ+b81lV\nNTSobNYsNa5ZM+znm/zZeufGQG+vTjQ30zMXAK5wVLXFlWe4fpRr10qzZ9uBYXJBn/FyVisCASkW\nG9ySmyqP1MdCPuHyct0vKTzaarTp7o+f/VNHgyq6WTHeiqPenL6xFpFJztVMzg90tvFeam3VvhT5\nyt4qtlJiEZ3kvqFdx44NBp2SwtOnu3+fvmCBCouLddbZVl+Q+p+5wqIiTf/MZxKCTkn64muvuf00\nvSuqocpK1d177+CBAwPq6ehQz9mzCpSUKHLTTTLBYPoemQMDbtAZLC/XF197LSFIC0cievTwYYWr\nq1O+vKCkRD0dHW7QGSgrU6y9fbBXaX+/ejo6VFJXp69+8IEe3LLFDfjT5dUu275dD2zaNGKwmPzZ\neudGsKzsiinKBQAYHwJP5J2dO3emf3K4ACoSsbfY7t3rT4DlDeKeecYOJr2VLisqsl/IZ6xBbLr7\nM9GB4CSrojvsnMojflYcHeu225GKM410Puf5YEWFrl26NKHtR9f778sEAurp7LQr2nq2n1bcdJO+\nvH+/yurrFaqqUlF1tbqOHh3sb+kJSh2FJSV69C9/Sdkm5fUnnxxcjY2vooYqK/XIgQO6f/16t2WJ\niVe2DpaXq3L+fLXt3asTzc0a6O1Vmk25dpEgSb1dXUOC791NTVo3b5560vzCIOQUQSotlQoK1BcP\nmANJ1XVrbr894TNINSfGUkhrd1OTXolG1dPd7X7N+1k2rl59xRTlyqWp8j0KkwdzCrlA4ImpJVUA\n5Q0QnTYofgRYmzcPBnFPPmkHkwsX2s8VFtptVrJtrEFsugBzogNBquhmhZ8VR/1uLzPS+ZznH/vw\nQ3e1znGprU1WX5+7+ljozYc8dUp7vvENlcycqZ4zZ3SiuVltb70labC/pVNwqHL+fJXU1enRQ4c0\nrb7eLoI0c2biQIxxA9LC0lIVzZihRw4c0LT6endV8voVK1R9662S7CDSWSFNDgK9XwtFIiqOF0IK\nVlRIhYXuSuSOlSt1dN06XWptdd9jcW2tTHy11gQCWvKb3yhcXa2+CxfsgDgefAfLyhSeMcN9v41r\n1iRef5xzIlXgeiW1HgIA+IccT0wNTo5iMCiVlkpr1gwGNd58wuXLpVDIn+qu06cPFiuKRqWNG+3t\nq3PmSPEqlJMufzFdcSRMCd4czYkOCLJZ3fQXNTWKtbersKREdY2NGujp0SfNzW6xHUl26yHLSsj3\nrI9G9cDGjdqxcqU+2rpVVbfcoiXr1yeMzZs/agIBfeX997X/+9/Xe88+627nrV++XA9s2pTwPjve\neUex9nZ7a6wxQ3qASlLRjBn60ltvuVVgty5b5vYg9eaOhmtqEl5f1dCgZdu361f19erz5IRWzp+v\n41u3uq8tLClR/Re/qAsff5wyd3Z3U5POxvNqvxR/bqyfU3J+J4EmACAVcjxx5XC2kDY324Gl94cj\n7yrfmjX+rbTddpv934YGKV6ZUpGIdPvtg9cb6wrDcDmqfmClcUrLZS9Sv7b5pspJ/PL+/SqdOVOP\nHjqkB7ds0f3r16ts9myZ+NbVwtLSwZzPeNBZvXChQuXlerGxUe/98peKnT6tE83N+vWcOQn5r95q\nslZfn15/8kl7BdPzC9OBeF6lUwCpddcuxdrbVRAKyerrSxl0StJVd9+tA08/rYttbXr1scd05sCB\nhGtJ9kpo1S23SBq6zbgwni9aWFKiqxYtUk9Xl4pqa7Vsxw73flw8edLNnX3h9tsT7lvnkSNq27tX\nlz15tePN3QUAIFMEnsg7KfMShstRzNY20vXr7fNu3z60HUqm15voIj+QRK7LcEZbsCiTLZ3ec+9Y\nuVIvNjbq2IYNQwKjafX1+puPP3ZX7F6JRtXT2ekWIwqWlrrnrJw/X/XRqB7atk3nW1rs1UxPxdue\n9nY9W1en3y5apJeXLtXiZ56xV0vjBnp7E4JRSTr7xz+6Y3MLIBUWaqCnJyEnM1hRoaIZM/SupOC0\nabrnJz9JCPRStXW56p57VFpXZ/cNNUb9nmO8AffFkyfdIPKdn/7UvR/OWANlZYqdPq3jW7dq3bx5\ninV2ZtTSJlkuf5mBQXyPgt+YU8gF+nhiavD2vkz+ASlbPSzTnXc816PaKyaZ0fZpvG/t2jFv803o\nQVldrZizRV3pA6NUPSiXbNig1598UjJGjatXu9d3giynb6Zj4NIlt13KvlWrFKqocAPIglBIjatX\n6xc1NVJ8VfLC8eO6cPy4+3pv65SqhgZdbm9X78WLqm5oUO/581Jbm3rPn9e+VasSAr3zH3yg2Jkz\ng9uCJX28dav794FYTCeam/X83Ln663ffdQNuSeqK9+wNlpfrTk/PXue+X+7o0InmZknSpdZW7Wlq\nSvmZZPI5AQDgB3I8gVxK7p/pfM3vHMyJ7tOJKSObOX7ec4cjEX3S3Dykt2RyTuKG+fN14fhxBadN\nU+3ixWl7VUqDOa8N3/2utj74oPp6etTjCW5DlZX66rFjal6xwr32su3bte+pp3Rs/fohFWZDkYiu\nvvdet4CPE8C9Eo26wXBxba0utbYqXF0tU1CggZ4eFYRC+lK84NGLixbpC1u26KX77ksItJM5PUwd\nv120yA2Wk59z3qvTB9Tbb7Nl82b1x2Kqvu22IfmtAAD4ZTQ5ngSeQC55Cx9lsxDRRF0HEy6bRX2k\n7BYs8p5bUsrreIv/XL9ihbpPnHAL9JTNnq2yWbNG/d5jnZ16ft48XW5ttStPO/82FRTomr/6K3dL\nqfeajlAkokcOHkwo3iPZ9//Yhg3q6eiQCQQUKClRYVGRps2erdP79rnHeYNF72tchYVupVonAPa+\nn9H8AiD5s0p+H6kCVgAA/EBxIUxJ485LyHYBn7EYaWutX2NlC++w8jnXxc/enamMJ8dvpPxQ77nT\nXSc5JzEUb3VSvXChSurqRvXedzc16dmrr9avr79elXPnKlxVZQd5AwP2n74+ffLqq0OuWdXQoGuX\nLlV9NKqvfvCBDjz99JD303nkiBtAWn196u3q0tttbeqOt1iR7DYn3m3D3tdI0jVLluirR4+qfvly\n1UejQ4JOyd4iO232bBWGw3r1sccU6+wccn+T76E3V7WwtFSxjo5h83QxeeXz9yhMTswp5AKBJ648\nk6mAz0iFiPwa60T36byCjLb4Trb42bvTT94KsOMJipOrqnofe4PQ4d5755EjutTaqp6ODp3ctUsF\nTj9fr4GBIX0ql23frge3bNEDGzcqHIkkBPnP3XijXl661D2X0/tTkspvuEHRN99UWX29QlVVKqqu\ndu/Ji42Nat2zJ+HS4UhE0+rr9cCmTe61vMe/vHSpJKl01iyd2rvXvZ8j/dLhvrVrVR+NKlxVpf4L\nF/RJc3NWfjkBAMBosNUWV56lS+1AbuHCkQOxXOdGjmWsyInkraATvZUxl707h+O9L04uZfL4xrtN\neLTv3dmmKtmrmJ/fuFH7Vq3S+Y8+GtwOW1CgqxcvVll9vY6tX6/+S5dkAgFd9bnP6YFNmxSORNzz\nePuHmkBA4enT9dC2bdr//e8nFDjy3oPi2lpZlqXLp04ljM0Eg/paW1vK8SfPrZ7ubvf6M+66S5J0\norl5xPxbenECALKNHE8glc7O0RfwyXVu5FjGipzgh/rUnPsSqqzUIwcODMmNlMYftI82cI11dmrn\n448PqXob6+zUr2+4QT1nzrjHhquq7MqzHsW1tXr08GFJGlJB1nvMzM9/XudbWhQoKVFxTY1aNm9O\n2FJrgkFZ8Z6gjof37NHVixalHHfy3JKk52680e0bWh+NqjAYHDHwnqy/nAAATB3keGJKGndegtPu\nZDQ/gOU6N3IsY0XGxjOnkreCwlZcU6NwTY2qb7tNoYqKlMckbxMe67bl0ea3hiMRFc+YobY339Sz\nV12l1ZGIXlqyRJI04447Eo41hYVDXn+ptVXPz5snSbp/3TotWb9exbW1Q475aOtWte7apVe3btVH\nW7cmBJ3B8nIVTZ8ev8jgv8vv/PSnacedPLfCkYhqFi6UZN+zxtWrR5V/Sy/O/Ec+HvzGnEIuZBx4\nGmN+ZIw5bIx52xjzgjEm9U8WQD4jNxIj4If61M63tCh2+rRODJNXmBxYjbVQ0ljyW508z4GeHvWe\nO+eO6761a1VQVGSfb9o0PbRtm0qvvVYmGJQCg62uL8d7Y0r2Z/7o4cOqX75cRTNmuGOouuUWSVLF\njTeq0MkjLbD/me3t6tKltjb7a/FdQN5xpwq6U80tftEBAMhXGW+1NcYskfSqZVkDxpgfSpJlWd9O\ncRxbbQHgCpPJFuSxvmbHypX66OWXVb1ggUrq6txtrqm23XrzPCUpNH26KufNU7C8XKd//3u3p2Z9\nNKpYR4e7BbggHNZALJZ2TOlawmxdtsxt+5JKSV2dVrzzjnu+Z6++WpdaWyXZ231r7rgjK+1xAADI\nhtFstQ0M9+RwLMva5nm4T9IjmZ4LADC13Ld27ajyCr15moufeUb7Vq1yXzNSDuf5lhbF2tv1SXOz\nwjU1bu7jnqYm3b9u3ZBzv/7Nb2qgp0cFwaAut7frlBMYera+DvT0JKyklt9wg33+NO/BWZV0OH93\nKu4Wlpaq/8KFhNdMX7BAVTffrFeiUfe99cdi7vOxM2fcVV/6bgIApgq/cjz/VtLLPp0LGNao8hIm\nU69OTHrkuvhvtFuQvdtr961alfCakbbenj96VJIUrKjQ9JtukpS4fbVl82b39a9/85t6YONGuz3K\npk1u+5NwdXVC4FkQDCZsZ7148qQb3I62FcnOnTvdc9TefXfCc6UzZ+rhHTt0vqUl4b1V33abJCkw\nTIuYXLfuSWeyjmsq4XsU/MacQi4Mu+JpjNkmqTbFU/9gWdbm+DHfkdRjWdbadOd5/PHHdd1110mS\nIpGIFixYoMbGRkmDE5/HPB7t44MHD458fLz/5U5JikbVGP/6ZBg/jyffY8dkGc+V9PjQpUuaLjvQ\nGvja17Rz5073+UOXLum0pM/Fg7Dk138Qiajj+HHNPXdOoUhE5++9V9d961tu4PpOd7d6Jc2VdHL3\nbv3wzjt16/e+p88vW6b71q7Vv0WjajtzRjPi22yPlpbquq9/3Q2aveMLlJXp90ePauCll1T04ovq\nPHJEhy5dcs/nfX+SHXgHnnhCA93dKvrzn3W5tVWtN96ou378Y/u5khK9KzsfdGU8wPy3aFTz/u7v\nFHrhBS3+2c/0xsGDCe93z1tv6ezbb2uu7FXdwBNP5Pzzk6Su+C8I3pV0OBrV3/P9lsc8nvSPDyZ9\nf8n1eHicf48PHjyozvgvGz/88EONxrjaqRhjHpf0XyXdZ1nW5TTHkOOJiUf/y7HJdb9SXLGGa/Ux\nUhuQkXJCX1qyRCeamxO2u16/YoVC8UJGgZISDfT26kRzs0KVlZr5wAO6ePJkwtbeWGennpszx80B\nLZs9WxeOH3fbotQvX64HNm3S85/+tC62tqogGNSX9+9PaB+T6n1k0uJksrbumazjAgBMnKz28TTG\nfEHSjyXda1lW+zDHEXhi4tH/cmwaG3PbrxTwGEt/zuGCN+f5WEeHPmluVqCsTDPuukv9ly65+Z3e\nXpivRKMp+4p6A6uCcDihaFB9NKoHNm7U6khEvefOSbK30/7Nxx/7ek9G835zZbKOCwAwcbIdeL4n\nKSTpbPxLb1iW9USK4wg84audnq148MkVvkLMnJpcXmxsTBkAjiRdwBrr7NRzN97oFh8qrq3VpdbW\nISt06VbuvIHVq4895lbHnX7zzXp41y6FIxH9oqZGsfZ2FZaU6Oqf/1xLv/KVMY9zrMeM5/zIL3yP\ngt+YU/DbaALPgkxPblnWHMuy6i3Laoj/GRJ0AsgT9CvFJDKW/pxe6YoRhSMR1Sxc6J4z+uabKXth\npuuR6S2UdN/atapfvlz10agbdO5uatK0T31KBeGwom+8oZLaVKURRh7nWI8Zz/kBAJho48rxHNUF\nWPEEAIxBpls3h8s1zOZ20LGu0CaPc99TTw1ZoRxP3iQ5lwCAiZbVFU8AALJhtK1YkqVbsRzPOVNJ\nbh8y1hXa5HGmWqEc7r2MpLimRuHqagJOAMCkQuCJvOOUdAb8wpyaGvwMLoeTHCgmB4kjzafkcaYK\nXMfzXs63tIy59ygmN75HwW/MKeQCgScAAGOQHCiON+Adz+rmaMYHAMBkQI4nAABjMNnbh0z28QEA\npp6stlMZwyAIPAEAAABgiqK4EKYk8hLgN+YU/MR8gt+YU/Abcwq5QOAJAAAAAMgqttoCAAAAADLG\nVlsAAAAAQM4ReCLvkJcAvzGn4CfmE/zGnILfmFPIBQJPAAAAAEBWkeMJAAAAAMgYOZ4AAAAAgJwj\n8ETeIS8BfmNOwU/MJ/iNOQW/MaeQCwSeAAAAAICsIscTAAAAAJAxcjwBAAAAADlH4Im8Q14C/Mac\ngp+YT/Abcwp+Y04hFwg8AQAAAABZRY4nAAAAACBj5HgCAAAAAHKOwBN5h7wE+I05BT8xn+A35hT8\nxpxCLhB4AgAAAACyihxPAAAAAEDGyPEEAAAAAOQcgSfyDnkJ8BtzCn5iPsFvzCn4jTmFXCDwBAAA\nAABkFTmeAAAAAICMkeMJAAAAAMg5Ak/kHfIS4DfmFPzEfILfmFPwG3MKuUDgCQAAAADIKnI8AQAA\nAAAZI8cTAAAAAJBzBJ7IO+QlwG/MKfiJ+QS/MafgN+YUcoHAEwAAAACQVeR4AgAAAAAyRo4nAAAA\nACDnCDyRd8hLgN+YU/AT8wl+Y07Bb8wp5AKBJwAAAAAgq8jxBAAAAABkjBxPAAAAAEDOEXgi75CX\nAL8xp+An5hP8xpyC35hTyAUCTwAAAABAVpHjCQAAAADIGDmeAAAAAICcI/BE3iEvAX5jTsFPzCf4\njTkFvzGnkAsEngAAAACArCLHEwAAAACQMXI8AQAAAAA5R+CJvENeAvzGnIKfmE/wG3MKfmNOIRcI\nPAEAAAAAWUWOJwAAAAAgY+R4AgAAAAByjsATeYe8BPiNOQU/MZ/gN+YU/MacQi4QeAIAAAAAsooc\nTwAAAABAxsjxBAAAAADkHIEn8g55CfAbcwp+Yj7Bb8wp+I05hVwg8AQAAAAAZBU5ngAAAACAjJHj\nCQAAAADIOQJP5B3yEuA35hT8xHyC35hT8BtzCrlA4AkAAAAAyCpyPAEAAAAAGSPHEwAAAACQcwSe\nyDvkJcBvzCn4ifkEvzGn4DfmFHKBwBMAAAAAkFXkeAIAAAAAMkaOJwAAAAAg5wg8kXfIS4DfmFPw\nE/MJfmNOwW/MKeQCgScAAAAAIKvI8QQAAAAAZIwcTwAAAABAzmUceBpj/skY87Yx5qAx5lVjzLV+\nDgxIh7wE+I05BT8xn+A35hT8xpxCLoxnxfOfLcu6xbKsBZI2SfpfPo0JGNbBgwdzPQRMMcwp+In5\nBL8xp+A35hRyIePA07Ks856HZZLaxz8cYGSdnZ25HgKmGOYU/MR8gt+YU/Abcwq5EBjPi40xT0v6\nz5IuSrrLlxEBAAAAAKaUYVc8jTHbjDF/SvHnYUmyLOs7lmXNkrRG0r9MwHgBffjhh7keAqYY5hT8\nxHyC35hT8BtzCrngSzsVY8wsSS9blvXZFM/RSwUAAAAAprCR2qlkvNXWGDPHsqz34g+XSzqQyQAA\nAAAAAFNbxiuexpgNkuZK6pd0VNI3LMtq83FsAAAAAIApwJettgAAAAAApDOePp6jZoz5J2PM28aY\ng8aYV40x107EdTE1GWN+ZIw5HJ9TLxhjKnI9JuQ3Y8wKY8w7xph+Y8ytuR4P8pcx5gvGmL8YY94z\nxvyPXI8H+c0Y83+NMaeMMX/K9VgwNRhjrjXG7Ij/m/dnY8x/z/WYkL+MMUXGmH3xGO+QMeZ/D3v8\nRKx4GmOmOX0/jTH/TdItlmV9PesXxpRkjFki6VXLsgaMMT+UJMuyvp3jYSGPGWM+LWlA0v+R9PeW\nZf0hx0NCHjLGFEp6V9L9kj6R9HtJX7Us63BOB4a8ZYxZLKlb0r9bljU/1+NB/jPG1EqqtSzroDGm\nTNL/kxTl+xQyZYwpsSzrojEm47bo4wAAAphJREFUIOk1Sd+yLOu1VMdOyIqnE3TGlUlqn4jrYmqy\nLGubZVkD8Yf7JM3M5XiQ/yzL+otlWUdyPQ7kvTskvW9Z1oeWZfVKek528T0gI5Zl7ZHUketxYOqw\nLKvVsqyD8b93SzosqS63o0I+syzrYvyvIUmFks6mO3ZCAk9JMsY8bYz5SNJKST+cqOtiyvtbSS/n\nehAAIOkaSR97Hh+Pfw0AJh1jzHWSGmT/Eh/IiDGmwBhzUNIpSTssyzqU7tiM26mkuOg2SbUpnvoH\ny7I2W5b1HUnfMcZ8W9K/SPovfl0bU89I8yl+zHck9ViWtXZCB4e8NJo5BYwT1foA5IX4NtsNkp6M\nr3wCGYnvQlwQr7nyijGm0bKsnamO9S3wtCxrySgPXStWqDCCkeaTMeZxSUsl3TchA0LeG8P3KCBT\nn0jyFs+7VvaqJwBMGsaYoKTfSPqlZVmbcj0eTA2WZZ0zxmyRtFDSzlTHTFRV2zmeh8slHZiI62Jq\nMsZ8QdIqScsty7qc6/FgyjG5HgDy1n5Jc4wx1xljQpL+WtKLOR4TALiMMUbSzyUdsizrX3M9HuQ3\nY0y1MSYS/3uxpCUaJs6bqKq2GyTNldQv6aikb1iW1Zb1C2NKMsa8JzuB2UlefsOyrCdyOCTkOWPM\nlyT9VFK1pHOSDliW9WBuR4V8ZIx5UNK/yi6w8HPLsoYtLQ8Mxxjza0n3SqqS1Cbp+5Zlrc7tqJDP\njDGLJO2W9EcNpgf8T8uyfpe7USFfGWPmS/qF7MXMAknPWpb1o7THT0TgCQAAAAC4ck1YVVsAAAAA\nwJWJwBMAAAAAkFUEngAAAACArCLwBAAAAABkFYEnAAAAACCrCDwBAAAAAFlF4AkAAAAAyCoCTwAA\nAABAVv1/lzHCzGUnjVoAAAAASUVORK5CYII=\n", - "text": [ - "" - ] - } - ], - "prompt_number": 5 - } - ], - "metadata": {} - } - ] -} \ No newline at end of file diff --git a/examples/triplet/readme.md b/examples/triplet/readme.md index efcca277d7a..7e2310900b1 100644 --- a/examples/triplet/readme.md +++ b/examples/triplet/readme.md @@ -1,6 +1,6 @@ --- title: Triplet Network Tutorial -description: Train and test a triplet network on MNIST data. +description: Train and test a triplet network on data generated by 3D model. category: example include_in_docs: true layout: default @@ -21,41 +21,41 @@ root caffe directory* ## Prepare Datasets -You will first need to download and convert the data from the MNIST -website. To do this, simply run the following commands: +You will first need to convert the data from the some .ply models using +opencv_contrib cnn_3donj module. After construcing the binary files including images and labels and put them in ./data/linemod folder, just run: - ./data/mnist/get_mnist.sh - ./examples/triplet/create_mnist_triplet.sh + ./examples/triplet/create_3d_triplet.sh After running the script there should be two datasets, -`./examples/triplet/mnist_triplet_train_leveldb`, and -`./examples/triplet/mnist_triplet_test_leveldb`. +`./examples/triplet/3d_triplet_train_leveldb`, and +`./examples/triplet/3d_triplet_test_leveldb`. ## The Model First, we will define the model that we want to train using the triplet network. We will use the convolutional net defined in -`./examples/triplet/mnist_triplet.prototxt`. This model is almost -exactly the same as the [LeNet model](mnist.html), the only difference is that -we have replaced the top layers that produced probabilities over the 10 digit -classes with a linear "feature" layer that produces a 2 dimensional vector. - - layers { - name: "feat" - type: INNER_PRODUCT - bottom: "ip2" - top: "feat" - blobs_lr: 1 - blobs_lr: 2 - inner_product_param { - num_output: 2 - } - } +`./examples/triplet/3d_triplet.prototxt`. + +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip1" + top: "feat" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 4 + } +} ## Define the triplet Network In this section we will define the triplet network used for training. The resulting network is defined in -`./examples/triplet/mnist_triplet_train_test.prototxt`. +`./examples/triplet/3d_triplet_train_test.prototxt`. ### Reading in the Triplet Data @@ -69,9 +69,9 @@ images (`triplet_data`) and the label (`sim`) is not nessesary in our method. top: "triplet_data" top: "sim" data_param { - source: "examples/triplet/mnist-triplet-train-leveldb" + source: "examples/triplet/3d-triplet-train-leveldb" scale: 0.00390625 - batch_size: 64 + batch_size: 69 } include: { phase: TRAIN } } @@ -80,27 +80,31 @@ In order to pack a triplet of images into the same blob in the database we pack image per channel. We want to be able to work with these three images separately, so we add a slice layer after the data layer. This takes the `triplet_data` and slices it along the channel dimension so that we have a single image in `data` -and its positive image in `data_pos.` & its negative image in `data_neg.` - - layers { - name: "slice_triplet" - type: SLICE - bottom: "triplet_data" - top: "data" - top: "data_pos" - top: "data_neg" - slice_param { - slice_dim: 1 - slice_point: 1 - slice_point: 2 +and its positive image in `data_pos.` & its negative image in `data_neg.`, as described in paper for 3D object classification and pose estimation, a pair wise term is also need alone with the triplet part. + +layer { + name: "slice_triplet" + type: "Slice" + bottom: "triplet_data" + top: "data" + top: "data_true" + top: "data_false" + top: "data_p1" + top: "data_p2" + slice_param { + slice_dim: 1 + slice_point: 1 + slice_point: 2 + slice_point: 3 + slice_point: 4 } - } +} ### Building the First part of the triplet Net Now we can specify the first side of the triplet net. This side operates on `data` and produces `feat`. Starting from the net in -`./examples/triplet/mnist_triplet.prototxt` we add default weight fillers. Then +`./examples/triplet/3d_triplet.prototxt` we add default weight fillers. Then we name the parameters of the convolutional and inner product layers. Naming the parameters allows Caffe to share the parameters between layers on three channels of the triplet net. In the definition this looks like: @@ -138,24 +142,27 @@ paste it. Then we change the name of each layer, input, and output by appending To train the network we will optimize a triplet loss function proposed in: This cost function is implemented with the `TRIPLET_LOSS` layer: - layers { - name: "loss" - type: TRIPLET_LOSS - triplet_loss_param { - margin: 0.2 - } - bottom: "feat" - bottom: "feat_pos" - bottom: "feat_neg" - bottom: "sim" - top: "loss" - } +layer { + name: "loss" + type: "TripletLoss" + bottom: "feat" + bottom: "feat_true" + bottom: "feat_false" + bottom: "feat_p1" + bottom: "feat_p2" + bottom: "sim" + top: "loss" + triplet_loss_param { + margin: 1 + losstype: 1 + } +} ## Define the Solver Nothing special needs to be done to the solver besides pointing it at the correct model file. The solver is defined in -`./examples/triplet/mnist_triplet_solver.prototxt`. +`./examples/triplet/3d_triplet_solver.prototxt`. ## Training and Testing the Model @@ -163,7 +170,7 @@ Training the model is simple after you have written the network definition protobuf and solver protobuf files. Simply run `./examples/triplet/train_mnist_triplet.sh`: - ./examples/triplet/train_mnist_triplet.sh + ./examples/triplet/train_3d_triplet.sh # Plotting the results @@ -171,15 +178,9 @@ First, we can draw the model and triplet networks by running the following commands that draw the DAGs defined in the .prototxt files: ./python/draw_net.py \ - ./examples/triplet/mnist_triplet.prototxt \ - ./examples/triplet/mnist_triplet.png + ./examples/triplet/3d_triplet.prototxt \ + ./examples/triplet/3d_triplet.png ./python/draw_net.py \ - ./examples/triplet/mnist_triplet_train_test.prototxt \ - ./examples/triplet/mnist_triplet_train_test.png - -Second, we can load the learned model and plot the features using the iPython -notebook: - - ipython notebook ./examples/triplet/mnist_triplet.ipynb - + ./examples/triplet/3d_triplet_train_test.prototxt \ + ./examples/triplet/3d_triplet_train_test.png \ No newline at end of file diff --git a/src/caffe/layers/triplet_loss_layer.cpp b/src/caffe/layers/triplet_loss_layer.cpp index 16082620a01..a4e6402c76a 100644 --- a/src/caffe/layers/triplet_loss_layer.cpp +++ b/src/caffe/layers/triplet_loss_layer.cpp @@ -67,8 +67,9 @@ void TripletLossLayer::Forward_cpu( diff_par.mutable_cpu_data()); // d_i-e_i: pair wise const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); Dtype loss(0.0); - + if (losstype == 0) { for (int i = 0; i < bottom[0]->num(); ++i) { // Triplet loss accumulation // Loss component calculated from a and b @@ -93,12 +94,43 @@ void TripletLossLayer::Forward_cpu( } loss = loss / static_cast(bottom[0]->num()) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; + } else { + for (int i = 0; i < bottom[0]->num(); ++i) { + // softTriplet loss accumulation + // Loss component calculated from a and b + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, + diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); + // a b is a similar pair for triplet + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += margin; + // Loss component calculated from a and c + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, + diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); + // a c is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[i] = 1 - \ +dist_sq_neg.cpu_data()[i] / dist_sq_.mutable_cpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(dist_sq_.cpu_data()[i], Dtype(0.0)); + // Pair wise loss accumulation + // Loss component calculated from d and e + dist_sq_par.mutable_cpu_data()[i] = caffe_cpu_dot(channels, + diff_par.cpu_data() + (i*channels), diff_par.cpu_data() + (i*channels)); + // d e is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_par.cpu_data()[i]; + } + loss = loss / static_cast(bottom[0]->num()) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; + } } template void TripletLossLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + if (losstype == 0) { + // BP for feat1 if (propagate_down[0]) { const Dtype sign = 1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / @@ -127,6 +159,7 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, } } } + // BP for feat2 and feat3 for (int i = 1; i < 3; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 1) ? -1 : 1; @@ -160,6 +193,80 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, } } } + } else { + // BP for data1(feat1) + if (propagate_down[0]) { + const Dtype alpha = top[0]->cpu_diff()[0] / + static_cast(bottom[0]->num()); + int num = bottom[0]->num(); + int channels = bottom[0]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + channels, + alpha*dist_sq_neg.mutable_cpu_data()[j]\ +/((dist_sq_pos.mutable_cpu_data()[j]+margin)\ +*(dist_sq_pos.mutable_cpu_data()[j]+margin)), + diff_pos.cpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + caffe_cpu_axpby( + channels, + -alpha*(dist_sq_pos.mutable_cpu_data()[j] + margin)\ +/((dist_sq_pos.mutable_cpu_data()[j] + margin)\ +*(dist_sq_pos.mutable_cpu_data()[j] + margin)), + diff_neg.cpu_data() + (j*channels), + Dtype(1.0), + bout + (j*channels)); + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); + } + } + } + // BP for positive data(feat2) + if (propagate_down[1]) { + const Dtype alpha = top[0]->cpu_diff()[0] / + static_cast(bottom[1]->num()); + int num = bottom[1]->num(); + int channels = bottom[1]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[1]->mutable_cpu_diff(); + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + channels, + -alpha*dist_sq_neg.mutable_cpu_data()[j]\ +/((dist_sq_pos.mutable_cpu_data()[j] + margin)\ +*(dist_sq_pos.mutable_cpu_data()[j] + margin)), + diff_pos.cpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); + } + } + } + // BP for negative data(feat3) + if (propagate_down[2]) { + const Dtype alpha = top[0]->cpu_diff()[0] / + static_cast(bottom[2]->num()); + int num = bottom[2]->num(); + int channels = bottom[2]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[2]->mutable_cpu_diff(); + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + channels, + alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), + diff_neg.cpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); + } + } + } + } // pair wise back for (int i = 3; i < 5; ++i) { if (propagate_down[i]) { diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 8a8196eb21c..8b90554b689 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -438,6 +438,7 @@ message ContrastiveLossParameter { message TripletLossParameter { //margin for negative triplet optional float margin = 1 [default = 1.0]; + optional uint32 losstype = 2 [default = 1]; } message ConvolutionParameter { From 8dcf4a4e960687433f11a082aad6976488bb8cdc Mon Sep 17 00:00:00 2001 From: Wangyida Date: Tue, 4 Aug 2015 23:46:13 +0800 Subject: [PATCH 35/82] Caffe gtest on triplet, GPU codes, modification on training data creatorfor review --- examples/triplet/convert_3d_triplet_data.cpp | 39 ++-- examples/triplet/create_3d_triplet.sh | 6 +- src/caffe/layers/triplet_loss_layer.cu | 166 +++++++++++++++++- ...loss_layer => test_triplet_loss_layer.cpp} | 97 +++++----- 4 files changed, 236 insertions(+), 72 deletions(-) rename src/caffe/test/{test_triplet_loss_layer => test_triplet_loss_layer.cpp} (65%) diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp index 280755fddab..1c9806c0e2a 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp +++ b/examples/triplet/convert_3d_triplet_data.cpp @@ -3,6 +3,7 @@ // The MNIST dataset could be downloaded at // http://yann.lecun.com/exdb/mnist/ #include // NOLINT(readability/streams) +#include #include #include "caffe/proto/caffe.pb.h" #include "caffe/util/math_functions.hpp" @@ -28,7 +29,8 @@ void read_image(std::ifstream* image_file, std::ifstream* label_file, } void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename) { + const char* db_filename, const char* class_number) { + int class_num = atoi(class_number); // Open files std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); @@ -77,16 +79,19 @@ void convert_dataset(const char* image_filename, const char* label_filename, const int kMaxKeyLength = 10; char key[kMaxKeyLength]; std::string value; - caffe::Datum datum; datum.set_channels(5); // one channel for each image in the triplet and pair datum.set_height(rows); datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (unsigned int itemid = 0; itemid < 10 * num_items; ++itemid) {\ - int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups - int j = caffe::caffe_rng_rand() % num_items; + // iteration in the samples of all class + for (unsigned int itemid = 0; itemid < 5*num_items/class_num; ++itemid) { + // iteration in the samples in one class + for (unsigned int class_ind = 0; class_ind < class_num; ++class_ind) { + // use reference sample one by one at each iteration + int i = itemid % num_items + class_ind*num_items/class_num; + int j = caffe::caffe_rng_rand() % num_items; // pick triplet groups int k = caffe::caffe_rng_rand() % num_items; int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups int m = caffe::caffe_rng_rand() % num_items; @@ -113,6 +118,9 @@ void convert_dataset(const char* image_filename, const char* label_filename, int ik_diff_x = static_cast(*(label_i+1)-*(label_k+1)); int ik_diff_y = static_cast(*(label_i+2)-*(label_k+2)); int ik_diff_z = static_cast(*(label_i+3)-*(label_k+3)); + int lm_diff_x = static_cast(*(label_l+1)-*(label_m+1)); + int lm_diff_y = static_cast(*(label_l+2)-*(label_m+2)); + int lm_diff_z = static_cast(*(label_l+3)-*(label_m+3)); int ij_x = ij_diff_x*ij_diff_x; int ij_y = ij_diff_y*ij_diff_y; @@ -120,27 +128,32 @@ void convert_dataset(const char* image_filename, const char* label_filename, int ik_x = ik_diff_x*ik_diff_x; int ik_y = ik_diff_y*ik_diff_y; int ik_z = ik_diff_z*ik_diff_z; + int lm_x = lm_diff_x*lm_diff_x; + int lm_y = lm_diff_y*lm_diff_y; + int lm_z = lm_diff_z*lm_diff_z; - int dist_ij = ij_x + ij_y + ij_z; - int dist_ik = ik_x + ik_y + ik_z; + float dist_ij = std::sqrt(ij_x + ij_y + ij_z); + float dist_ik = std::sqrt(ik_x + ik_y + ik_z); + float dist_lm = std::sqrt(lm_x + lm_y + lm_z); if ((*label_i == *label_j) && (*label_i == *label_k)) triplet_class_same = true; - if ((dist_ij < dist_ik) && (triplet_class_same)) + if ((dist_ij < 100 && dist_ik > 100*sqrt(2)) && (triplet_class_same)) triplet_pose_pass = true; if ((*label_i == *label_j) && (*label_i != *label_k)) triplet_class_pass = true; - if (*label_l == *label_m) + if (*label_l == *label_m && dist_lm < 100/2) pair_class_pass = true; if ((triplet_class_pass || triplet_pose_pass) && pair_class_pass) { datum.set_label(1); datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); + snprintf(key, kMaxKeyLength, "%08d", itemid*class_num+class_ind); db->Put(leveldb::WriteOptions(), std::string(key), value); } else { - itemid--; + class_ind--; datum.set_label(0); } - } + } // iteration in the samples of all class + } // iteration in the samples in one class delete db; delete pixels; } @@ -157,7 +170,7 @@ int main(int argc, char** argv) { "You should gunzip them after downloading.\n"); } else { google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3]); + convert_dataset(argv[1], argv[2], argv[3], argv[4]); } return 0; } diff --git a/examples/triplet/create_3d_triplet.sh b/examples/triplet/create_3d_triplet.sh index 3cd8ee469ce..f29adbf6a8f 100755 --- a/examples/triplet/create_3d_triplet.sh +++ b/examples/triplet/create_3d_triplet.sh @@ -12,10 +12,12 @@ rm -rf ./examples/triplet/3d_triplet_test_leveldb $EXAMPLES/convert_3d_triplet_data.bin \ $DATA/binary_image_train \ $DATA/binary_label_train \ - ./examples/triplet/3d_triplet_train_leveldb + ./examples/triplet/3d_triplet_train_leveldb \ + 4 $EXAMPLES/convert_3d_triplet_data.bin \ $DATA/binary_image_test \ $DATA/binary_label_test \ - ./examples/triplet/3d_triplet_test_leveldb + ./examples/triplet/3d_triplet_test_leveldb \ + 4 echo "Done." diff --git a/src/caffe/layers/triplet_loss_layer.cu b/src/caffe/layers/triplet_loss_layer.cu index fa1259d0160..4f7391af787 100644 --- a/src/caffe/layers/triplet_loss_layer.cu +++ b/src/caffe/layers/triplet_loss_layer.cu @@ -23,6 +23,11 @@ void TripletLossLayer::Forward_gpu( bottom[0]->gpu_data(), // a bottom[2]->gpu_data(), // c diff_neg.mutable_gpu_data()); // a_i-c_i + caffe_gpu_sub( + count, + bottom[3]->gpu_data(), // d + bottom[4]->gpu_data(), // e + diff_par.mutable_gpu_data()); // d_i-e_i caffe_gpu_powx( count, diff_pos.mutable_gpu_data(), // a_i-b_i @@ -33,31 +38,89 @@ void TripletLossLayer::Forward_gpu( diff_neg.mutable_gpu_data(), // a_i-c_i Dtype(2), diff_sq_neg.mutable_gpu_data()); // (a_i-c_i)^2 + caffe_gpu_powx( + count, + diff_par.mutable_gpu_data(), // d_i-e_i + Dtype(2), + diff_sq_par.mutable_gpu_data()); // (d_i-e_i)^2 + caffe_gpu_gemv( + CblasNoTrans, + bottom[0]->num(), + bottom[0]->channels(), + Dtype(1.0), + diff_sq_pos.gpu_data(), // (a_i-b_i)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_pos.mutable_gpu_data()); // \Sum (a_i-b_i)^2 + caffe_gpu_gemv( + CblasNoTrans, + bottom[0]->num(), + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (a_i-c_i)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data()); // \Sum (a_i-c_i)^2 + caffe_gpu_gemv( + CblasNoTrans, + bottom[0]->num(), + bottom[0]->channels(), + Dtype(1.0), + diff_sq_par.gpu_data(), // (a_i-c_i)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_par.mutable_gpu_data()); // \Sum (a_i-c_i)^2 const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); Dtype loss(0.0); - // Loss component calculated from ab + + if (losstype == 0) { for (int i = 0; i < bottom[0]->num(); ++i) { - /*dist_sq_pos.mutable_gpu_data()[i] = caffe_gpu_dot(channels, - diff_pos.gpu_data() + (i*channels), diff_pos.gpu_data() + (i*channels));*/ + // Loss component calculated from ab // ab is a similar pair dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; // Loss component calculated from ac - /*dist_sq_neg.mutable_gpu_data()[i] = caffe_gpu_dot(channels, - diff_neg.gpu_data() + (i*channels), diff_neg.gpu_data() + (i*channels));*/ // ac is a dissimilar pair dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; loss += std::max(margin + dist_sq_.gpu_data()[i], Dtype(0.0)); + // Pair wise loss accumulation + // d e is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_par.gpu_data()[i]; } loss = loss / static_cast(bottom[0]->num()) / Dtype(2); top[0]->mutable_gpu_data()[0] = loss; + } else { + for (int i = 0; i < bottom[0]->num(); ++i) { + // softTriplet loss accumulation + // Loss component calculated from a and b + // a b is a similar pair for triplet + dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; + dist_sq_.mutable_gpu_data()[i] += margin; + // Loss component calculated from a and c + // a c is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[i] = 1 - \ +dist_sq_neg.gpu_data()[i] / dist_sq_.mutable_gpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(dist_sq_.gpu_data()[i], Dtype(0.0)); + // Pair wise loss accumulation + // d e is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_par.gpu_data()[i]; + } + loss = loss / static_cast(bottom[0]->num()) / Dtype(2); + top[0]->mutable_gpu_data()[0] = loss; + } } template void TripletLossLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); // there must be further check to ensure the gradient calc + if (losstype == 0) { if (propagate_down[0]) { const Dtype sign = 1; const Dtype alpha = sign * top[0]->gpu_diff()[0] / @@ -119,6 +182,99 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, } } } + } + } else { + // BP for data1(feat1) + if (propagate_down[0]) { + const Dtype alpha = top[0]->gpu_diff()[0] / + static_cast(bottom[0]->num()); + int num = bottom[0]->num(); + int channels = bottom[0]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + caffe_gpu_axpby( + channels, + alpha*dist_sq_neg.mutable_gpu_data()[j]\ +/((dist_sq_pos.mutable_gpu_data()[j]+margin)\ +*(dist_sq_pos.mutable_gpu_data()[j]+margin)), + diff_pos.gpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + caffe_gpu_axpby( + channels, + -alpha*(dist_sq_pos.mutable_gpu_data()[j] + margin)\ +/((dist_sq_pos.mutable_gpu_data()[j] + margin)\ +*(dist_sq_pos.mutable_gpu_data()[j] + margin)), + diff_neg.gpu_data() + (j*channels), + Dtype(1.0), + bout + (j*channels)); + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); + } + } + } + // BP for positive data(feat2) + if (propagate_down[1]) { + const Dtype alpha = top[0]->gpu_diff()[0] / + static_cast(bottom[1]->num()); + int num = bottom[1]->num(); + int channels = bottom[1]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[1]->mutable_gpu_diff(); + if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + caffe_gpu_axpby( + channels, + -alpha*dist_sq_neg.mutable_gpu_data()[j]\ +/((dist_sq_pos.mutable_gpu_data()[j] + margin)\ +*(dist_sq_pos.mutable_gpu_data()[j] + margin)), + diff_pos.gpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); + } + } + } + // BP for negative data(feat3) + if (propagate_down[2]) { + const Dtype alpha = top[0]->gpu_diff()[0] / + static_cast(bottom[2]->num()); + int num = bottom[2]->num(); + int channels = bottom[2]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[2]->mutable_gpu_diff(); + if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + caffe_gpu_axpby( + channels, + alpha/(dist_sq_pos.mutable_gpu_data()[j] + margin), + diff_neg.gpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); + } + } + } + } + // pair wise back + for (int i = 3; i < 5; ++i) { + if (propagate_down[i]) { + const Dtype sign = (i == 3) ? 1 : -1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(bottom[i]->num()); + int num = bottom[i]->num(); + int channels = bottom[i]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[i]->mutable_gpu_diff(); // similar pairs + caffe_gpu_axpby( + channels, + alpha, + diff_par.gpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } + } } } diff --git a/src/caffe/test/test_triplet_loss_layer b/src/caffe/test/test_triplet_loss_layer.cpp similarity index 65% rename from src/caffe/test/test_triplet_loss_layer rename to src/caffe/test/test_triplet_loss_layer.cpp index 4050a35d80b..ccbefbd059f 100644 --- a/src/caffe/test/test_triplet_loss_layer +++ b/src/caffe/test/test_triplet_loss_layer.cpp @@ -22,9 +22,11 @@ class TripletLossLayerTest : public MultiDeviceTest { protected: TripletLossLayerTest() - : blob_bottom_data_i_(new Blob(512, 2, 1, 1)), - blob_bottom_data_j_(new Blob(512, 2, 1, 1)), - blob_bottom_data_k_(new Blob(512, 2, 1, 1)), + : blob_bottom_data_i_(new Blob(512, 1, 1, 1)), + blob_bottom_data_j_(new Blob(512, 1, 1, 1)), + blob_bottom_data_k_(new Blob(512, 1, 1, 1)), + blob_bottom_data_l_(new Blob(512, 1, 1, 1)), + blob_bottom_data_m_(new Blob(512, 1, 1, 1)), blob_bottom_y_(new Blob(512, 1, 1, 1)), blob_top_loss_(new Blob()) { // fill the values @@ -38,6 +40,10 @@ class TripletLossLayerTest : public MultiDeviceTest { blob_bottom_vec_.push_back(blob_bottom_data_j_); filler.Fill(this->blob_bottom_data_k_); blob_bottom_vec_.push_back(blob_bottom_data_k_); + filler.Fill(this->blob_bottom_data_l_); + blob_bottom_vec_.push_back(blob_bottom_data_l_); + filler.Fill(this->blob_bottom_data_m_); + blob_bottom_vec_.push_back(blob_bottom_data_m_); for (int i = 0; i < blob_bottom_y_->count(); ++i) { blob_bottom_y_->mutable_cpu_data()[i] = caffe_rng_rand() % 2; // 0 or 1 } @@ -48,6 +54,8 @@ class TripletLossLayerTest : public MultiDeviceTest { delete blob_bottom_data_i_; delete blob_bottom_data_j_; delete blob_bottom_data_k_; + delete blob_bottom_data_l_; + delete blob_bottom_data_m_; delete blob_bottom_y_; delete blob_top_loss_; } @@ -55,6 +63,8 @@ class TripletLossLayerTest : public MultiDeviceTest { Blob* const blob_bottom_data_i_; Blob* const blob_bottom_data_j_; Blob* const blob_bottom_data_k_; + Blob* const blob_bottom_data_l_; + Blob* const blob_bottom_data_m_; Blob* const blob_bottom_y_; Blob* const blob_top_loss_; vector*> blob_bottom_vec_; @@ -71,85 +81,68 @@ TYPED_TEST(TripletLossLayerTest, TestForward) { layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // manually compute to compare const Dtype margin = layer_param.triplet_loss_param().margin(); + const Dtype losstype = layer_param.triplet_loss_param().losstype(); const int num = this->blob_bottom_data_i_->num(); const int channels = this->blob_bottom_data_i_->channels(); Dtype loss(0); + if (losstype == 0) { for (int i = 0; i < num; ++i) { Dtype dist_sq(0); + Dtype dist_par(0); for (int j = 0; j < channels; ++j) { Dtype diff_pos = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; dist_sq += diff_pos*diff_pos; Dtype diff_neg = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + this->blob_bottom_data_k_->cpu_data()[i*channels+j]; dist_sq -= diff_neg*diff_neg; + Dtype diff_par = this->blob_bottom_data_l_->cpu_data()[i*channels+j] - + this->blob_bottom_data_m_->cpu_data()[i*channels+j]; + dist_par = diff_par*diff_par; } - loss += std::max(margin + dist_sq, 0.0); - /*if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs - loss += dist_sq; - } else { - Dtype dist = std::max(margin - sqrt(dist_sq), 0.0); - loss += dist*dist; - }*/ + loss += std::max(margin + dist_sq, Dtype(0.0)); + loss += dist_par; } - loss /= static_cast(num) * Dtype(2); - EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); -} - -TYPED_TEST(TripletLossLayerTest, TestGradient) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - GradientChecker checker(1e-2, 1e-2, 1701); - // check the gradient for the first two bottom layers - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 0); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 1); -} - -/*TYPED_TEST(TripletLossLayerTest, TestForwardLegacy) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_triplet_loss_param()->set_legacy_version(true); - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // manually compute to compare - const Dtype margin = layer_param.triplet_loss_param().margin(); - const int num = this->blob_bottom_data_i_->num(); - const int channels = this->blob_bottom_data_i_->channels(); - Dtype loss(0); + } else { for (int i = 0; i < num; ++i) { Dtype dist_sq(0); + Dtype dist_par(0); for (int j = 0; j < channels; ++j) { - Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + Dtype diff_pos = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; - dist_sq += diff*diff; - } - if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs - loss += dist_sq; - } else { - loss += std::max(margin - dist_sq, Dtype(0.0)); + dist_sq += diff_pos*diff_pos; + dist_sq += margin; + Dtype diff_neg = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_k_->cpu_data()[i*channels+j]; + dist_sq = 1 - diff_neg*diff_neg/dist_sq; + Dtype diff_par = this->blob_bottom_data_l_->cpu_data()[i*channels+j] - + this->blob_bottom_data_m_->cpu_data()[i*channels+j]; + dist_par = diff_par*diff_par; } + loss += std::max(dist_sq, Dtype(0.0)); + loss += dist_par; + } } loss /= static_cast(num) * Dtype(2); EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); } -TYPED_TEST(TripletLossLayerTest, TestGradientLegacy) { +TYPED_TEST(TripletLossLayerTest, TestGradient) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; - layer_param.mutable_triplet_loss_param()->set_legacy_version(true); TripletLossLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); GradientChecker checker(1e-2, 1e-2, 1701); - // check the gradient for the first two bottom layers + // check the gradient for the first 5 bottom layers checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, this->blob_top_vec_, 0); checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, this->blob_top_vec_, 1); -}*/ - + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 4); +} } // namespace caffe From 14add46370ae06ef066ce62209c880799edde4c8 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Sun, 9 Aug 2015 15:11:32 +0800 Subject: [PATCH 36/82] No sclice layer version which could forward a set of triplets together with 1 pair wise --- examples/siamese/convert_lfw_siamese_data.cpp | 121 --- examples/siamese/create_lfw_siamese.sh | 21 - examples/siamese/lfw_siamese.prototxt | 113 --- examples/siamese/lfw_siamese_solver.prototxt | 25 - .../siamese/lfw_siamese_train_test.prototxt | 349 -------- examples/siamese/train_lfw_siamese.sh | 5 - examples/triplet/3d_triplet_solver.prototxt | 2 +- .../triplet/3d_triplet_train_test.prototxt | 580 +------------ examples/triplet/convert_3d_triplet_data.cpp | 184 ++-- examples/triplet/convert_lfw_triplet_data.cpp | 126 --- .../triplet/convert_mnist_triplet_data.cpp | 140 --- examples/triplet/create_lfw_triplet.sh | 21 - examples/triplet/create_mnist_triplet.sh | 21 - examples/triplet/lfw_triplet.prototxt | 113 --- examples/triplet/lfw_triplet_solver.prototxt | 25 - .../triplet/lfw_triplet_train_test.prototxt | 500 ----------- examples/triplet/mnist_triplet.prototxt | 113 --- .../triplet/mnist_triplet_solver.prototxt | 25 - .../triplet/mnist_triplet_train_test.prototxt | 800 ------------------ examples/triplet/train_lfw_triplet.sh | 5 - examples/triplet/train_mnist_triplet.sh | 5 - include/caffe/loss_layers.hpp | 7 +- src/caffe/layers/triplet_loss_layer.cpp | 501 ++++++----- src/caffe/proto/caffe.proto | 1 + src/caffe/test/test_triplet_loss_layer.cpp | 79 +- 25 files changed, 443 insertions(+), 3439 deletions(-) delete mode 100644 examples/siamese/convert_lfw_siamese_data.cpp delete mode 100755 examples/siamese/create_lfw_siamese.sh delete mode 100644 examples/siamese/lfw_siamese.prototxt delete mode 100644 examples/siamese/lfw_siamese_solver.prototxt delete mode 100644 examples/siamese/lfw_siamese_train_test.prototxt delete mode 100755 examples/siamese/train_lfw_siamese.sh delete mode 100644 examples/triplet/convert_lfw_triplet_data.cpp delete mode 100644 examples/triplet/convert_mnist_triplet_data.cpp delete mode 100755 examples/triplet/create_lfw_triplet.sh delete mode 100755 examples/triplet/create_mnist_triplet.sh delete mode 100644 examples/triplet/lfw_triplet.prototxt delete mode 100644 examples/triplet/lfw_triplet_solver.prototxt delete mode 100644 examples/triplet/lfw_triplet_train_test.prototxt delete mode 100644 examples/triplet/mnist_triplet.prototxt delete mode 100644 examples/triplet/mnist_triplet_solver.prototxt delete mode 100644 examples/triplet/mnist_triplet_train_test.prototxt delete mode 100755 examples/triplet/train_lfw_triplet.sh delete mode 100755 examples/triplet/train_mnist_triplet.sh diff --git a/examples/siamese/convert_lfw_siamese_data.cpp b/examples/siamese/convert_lfw_siamese_data.cpp deleted file mode 100644 index fe134ca9b4e..00000000000 --- a/examples/siamese/convert_lfw_siamese_data.cpp +++ /dev/null @@ -1,121 +0,0 @@ -// -// This script converts the MNIST dataset to the leveldb format used -// by caffe to train siamese network. -// Usage: -// convert_mnist_data input_image_file input_label_file output_db_file -// The MNIST dataset could be downloaded at -// http://yann.lecun.com/exdb/mnist/ -#include // NOLINT(readability/streams) -#include - -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "stdint.h" - -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" - -uint32_t swap_endian(uint32_t val) { - val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); - return (val << 16) | (val >> 16); -} - -void read_image(std::ifstream* image_file, std::ifstream* label_file, - uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index + 8); - label_file->read(label, 1); -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename) { - // Open files - std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); - std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); - CHECK(image_file) << "Unable to open file " << image_filename; - CHECK(label_file) << "Unable to open file " << label_filename; - // Read the magic and the meta data - uint32_t magic; - uint32_t num_items; - uint32_t num_labels; - uint32_t rows; - uint32_t cols; - - image_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - label_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - image_file.read(reinterpret_cast(&num_items), 4); - num_items = swap_endian(num_items); - label_file.read(reinterpret_cast(&num_labels), 4); - num_labels = swap_endian(num_labels); - CHECK_EQ(num_items, num_labels); - image_file.read(reinterpret_cast(&rows), 4); - rows = swap_endian(rows); - image_file.read(reinterpret_cast(&cols), 4); - cols = swap_endian(cols); - - // Open leveldb - leveldb::DB* db; - leveldb::Options options; - options.create_if_missing = true; - options.error_if_exists = true; - leveldb::Status status = leveldb::DB::Open( - options, db_filename, &db); - CHECK(status.ok()) << "Failed to open leveldb " << db_filename - << ". Is it already existing?"; - - char label_i; - char label_j; - char* pixels = new char[2 * rows * cols]; - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - - caffe::Datum datum; - datum.set_channels(2); // one channel for each image in the pair - datum.set_height(rows); - datum.set_width(cols); - LOG(INFO) << "A total of " << num_items << " items."; - LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (int itemid = 0; itemid < num_items; ++itemid) { - int i = caffe::caffe_rng_rand() % num_items; // pick a random pair - int j = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, - pixels, &label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), &label_j); - datum.set_data(pixels, 2*rows*cols); - if (label_i == label_j) { - datum.set_label(1); - } else { - datum.set_label(0); - } - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); - db->Put(leveldb::WriteOptions(), std::string(key), value); - } - - delete db; - delete pixels; -} - -int main(int argc, char** argv) { - if (argc != 4) { - printf("This script converts the MNIST dataset to the leveldb format used\n" - "by caffe to train a siamese network.\n" - "Usage:\n" - " convert_mnist_data input_image_file input_label_file " - "output_db_file\n" - "The MNIST dataset could be downloaded at\n" - " http://yann.lecun.com/exdb/mnist/\n" - "You should gunzip them after downloading.\n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3]); - } - return 0; -} diff --git a/examples/siamese/create_lfw_siamese.sh b/examples/siamese/create_lfw_siamese.sh deleted file mode 100755 index 3790b9eb2a0..00000000000 --- a/examples/siamese/create_lfw_siamese.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env sh -# This script converts the mnist data into leveldb format. - -EXAMPLES=./build/examples/siamese -DATA=./data/lfw - -echo "Creating leveldb..." - -rm -rf ./examples/siamese/lfw_siamese_train_leveldb -rm -rf ./examples/siamese/lfw_siamese_test_leveldb - -$EXAMPLES/convert_lfw_siamese_data.bin \ - $DATA/traindata \ - $DATA/trainlabel \ - ./examples/siamese/lfw_siamese_train_leveldb -$EXAMPLES/convert_mnist_siamese_data.bin \ - $DATA/testdata \ - $DATA/testlabel \ - ./examples/siamese/lfw_siamese_test_leveldb - -echo "Done." diff --git a/examples/siamese/lfw_siamese.prototxt b/examples/siamese/lfw_siamese.prototxt deleted file mode 100644 index 106d9aa76f4..00000000000 --- a/examples/siamese/lfw_siamese.prototxt +++ /dev/null @@ -1,113 +0,0 @@ -name: "lfw_siamese" -input: "data" -input_dim: 10000 -input_dim: 1 -input_dim: 150 -input_dim: 80 -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 500 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 10 - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 2 - } -} diff --git a/examples/siamese/lfw_siamese_solver.prototxt b/examples/siamese/lfw_siamese_solver.prototxt deleted file mode 100644 index 2aaafb63c1f..00000000000 --- a/examples/siamese/lfw_siamese_solver.prototxt +++ /dev/null @@ -1,25 +0,0 @@ -# The train/test net protocol buffer definition -net: "examples/siamese/lfw_siamese_train_test.prototxt" -# test_iter specifies how many forward passes the test should carry out. -# In the case of MNIST, we have test batch size 100 and 100 test iterations, -# covering the full 10,000 testing images. -test_iter: 100 -# Carry out testing every 500 training iterations. -test_interval: 500 -# The base learning rate, momentum and the weight decay of the network. -base_lr: 0.01 -momentum: 0.9 -weight_decay: 0.0000 -# The learning rate policy -lr_policy: "inv" -gamma: 0.0001 -power: 0.75 -# Display every 100 iterations -display: 100 -# The maximum number of iterations -max_iter: 50000 -# snapshot intermediate results -snapshot: 5000 -snapshot_prefix: "examples/siamese/lfw_siamese" -# solver mode: CPU or GPU -solver_mode: CPU diff --git a/examples/siamese/lfw_siamese_train_test.prototxt b/examples/siamese/lfw_siamese_train_test.prototxt deleted file mode 100644 index 049187bf3d4..00000000000 --- a/examples/siamese/lfw_siamese_train_test.prototxt +++ /dev/null @@ -1,349 +0,0 @@ -name: "lfw_siamese_train_test" -layer { - name: "pair_data" - type: "Data" - top: "pair_data" - top: "sim" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/siamese/lfw_siamese_train_leveldb" - batch_size: 64 - } -} -layer { - name: "pair_data" - type: "Data" - top: "pair_data" - top: "sim" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/siamese/lfw_siamese_test_leveldb" - batch_size: 100 - } -} -layer { - name: "slice_pair" - type: "Slice" - bottom: "pair_data" - top: "data" - top: "data_p" - slice_param { - slice_dim: 1 - slice_point: 1 - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_p" - type: "Convolution" - bottom: "data_p" - top: "conv1_p" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_p" - type: "Pooling" - bottom: "conv1_p" - top: "pool1_p" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_p" - type: "Convolution" - bottom: "pool1_p" - top: "conv2_p" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_p" - type: "Pooling" - bottom: "conv2_p" - top: "pool2_p" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_p" - type: "InnerProduct" - bottom: "pool2_p" - top: "ip1_p" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_p" - type: "ReLU" - bottom: "ip1_p" - top: "ip1_p" -} -layer { - name: "ip2_p" - type: "InnerProduct" - bottom: "ip1_p" - top: "ip2_p" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_p" - type: "InnerProduct" - bottom: "ip2_p" - top: "feat_p" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "loss" - type: "ContrastiveLoss" - bottom: "feat" - bottom: "feat_p" - bottom: "sim" - top: "loss" - contrastive_loss_param { - margin: 1 - } -} diff --git a/examples/siamese/train_lfw_siamese.sh b/examples/siamese/train_lfw_siamese.sh deleted file mode 100755 index 0a879a65419..00000000000 --- a/examples/siamese/train_lfw_siamese.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env sh - -TOOLS=./build/tools - -$TOOLS/caffe train --solver=examples/siamese/lfw_siamese_solver.prototxt diff --git a/examples/triplet/3d_triplet_solver.prototxt b/examples/triplet/3d_triplet_solver.prototxt index f1dba141539..5008af22df9 100644 --- a/examples/triplet/3d_triplet_solver.prototxt +++ b/examples/triplet/3d_triplet_solver.prototxt @@ -7,7 +7,7 @@ test_iter: 100 # Carry out testing every 500 training iterations. test_interval: 100 # The base learning rate, momentum and the weight decay of the network. -base_lr: 0.01 +base_lr: 0.001 momentum: 0.9 weight_decay: 0.0000 # The learning rate policy diff --git a/examples/triplet/3d_triplet_train_test.prototxt b/examples/triplet/3d_triplet_train_test.prototxt index db9b786e110..1ac185aa2cc 100644 --- a/examples/triplet/3d_triplet_train_test.prototxt +++ b/examples/triplet/3d_triplet_train_test.prototxt @@ -1,8 +1,8 @@ name: "3d_triplet_train_test" layer { - name: "triplet_data" + name: "data" type: "Data" - top: "triplet_data" + top: "data" top: "sim" include { phase: TRAIN @@ -12,13 +12,13 @@ layer { } data_param { source: "examples/triplet/3d_triplet_train_leveldb" - batch_size: 69 + batch_size: 250 } } layer { - name: "triplet_data" + name: "data" type: "Data" - top: "triplet_data" + top: "data" top: "sim" include { phase: TEST @@ -28,24 +28,7 @@ layer { } data_param { source: "examples/triplet/3d_triplet_test_leveldb" - batch_size: 69 - } -} -layer { - name: "slice_triplet" - type: "Slice" - bottom: "triplet_data" - top: "data" - top: "data_true" - top: "data_false" - top: "data_p1" - top: "data_p2" - slice_param { - slice_dim: 1 - slice_point: 1 - slice_point: 2 - slice_point: 3 - slice_point: 4 + batch_size: 250 } } layer { @@ -184,562 +167,15 @@ layer { } } } -layer { - name: "conv1_true" - type: "Convolution" - bottom: "data_true" - top: "conv1_true" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 16 - kernel_size: 8 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_true" - type: "Pooling" - bottom: "conv1_true" - top: "pool1_true" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu1_true" - type: "ReLU" - bottom: "pool1_true" - top: "pool1_true" -} -layer { - name: "conv2_true" - type: "Convolution" - bottom: "pool1_true" - top: "conv2_true" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 7 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_true" - type: "Pooling" - bottom: "conv2_true" - top: "pool2_true" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu2_true" - type: "ReLU" - bottom: "pool2_true" - top: "pool2_true" -} -layer { - name: "ip1_true" - type: "InnerProduct" - bottom: "pool2_true" - top: "ip1_true" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 256 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu3_true" - type: "ReLU" - bottom: "ip1_true" - top: "ip1_true" -} -layer { - name: "feat_true" - type: "InnerProduct" - bottom: "ip1_true" - top: "feat_true" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 4 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_false" - type: "Convolution" - bottom: "data_false" - top: "conv1_false" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 16 - kernel_size: 8 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_false" - type: "Pooling" - bottom: "conv1_false" - top: "pool1_false" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu1_false" - type: "ReLU" - bottom: "pool1_false" - top: "pool1_false" -} -layer { - name: "conv2_false" - type: "Convolution" - bottom: "pool1_false" - top: "conv2_false" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 7 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_false" - type: "Pooling" - bottom: "conv2_false" - top: "pool2_false" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu2_false" - type: "ReLU" - bottom: "pool2_false" - top: "pool2_false" -} -layer { - name: "ip1_false" - type: "InnerProduct" - bottom: "pool2_false" - top: "ip1_false" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 256 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu3_false" - type: "ReLU" - bottom: "ip1_false" - top: "ip1_false" -} -layer { - name: "feat_false" - type: "InnerProduct" - bottom: "ip1_false" - top: "feat_false" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 4 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_p1" - type: "Convolution" - bottom: "data_p1" - top: "conv1_p1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 16 - kernel_size: 8 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_p1" - type: "Pooling" - bottom: "conv1_p1" - top: "pool1_p1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu1_p1" - type: "ReLU" - bottom: "pool1_p1" - top: "pool1_p1" -} -layer { - name: "conv2_p1" - type: "Convolution" - bottom: "pool1_p1" - top: "conv2_p1" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 7 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_p1" - type: "Pooling" - bottom: "conv2_p1" - top: "pool2_p1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu2_p1" - type: "ReLU" - bottom: "pool2_p1" - top: "pool2_p1" -} -layer { - name: "ip1_p1" - type: "InnerProduct" - bottom: "pool2_p1" - top: "ip1_p1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 256 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu3_p1" - type: "ReLU" - bottom: "ip1_p1" - top: "ip1_p1" -} -layer { - name: "feat_p1" - type: "InnerProduct" - bottom: "ip1_p1" - top: "feat_p1" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 4 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_p2" - type: "Convolution" - bottom: "data_p2" - top: "conv1_p2" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 16 - kernel_size: 8 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_p2" - type: "Pooling" - bottom: "conv1_p2" - top: "pool1_p2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu1_p2" - type: "ReLU" - bottom: "pool1_p2" - top: "pool1_p2" -} -layer { - name: "conv2_p2" - type: "Convolution" - bottom: "pool1_p2" - top: "conv2_p2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 7 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_p2" - type: "Pooling" - bottom: "conv2_p2" - top: "pool2_p2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu2_p2" - type: "ReLU" - bottom: "pool2_p2" - top: "pool2_p2" -} -layer { - name: "ip1_p2" - type: "InnerProduct" - bottom: "pool2_p2" - top: "ip1_p2" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 256 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu3_p2" - type: "ReLU" - bottom: "ip1_p2" - top: "ip1_p2" -} -layer { - name: "feat_p2" - type: "InnerProduct" - bottom: "ip1_p2" - top: "feat_p2" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 4 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} layer { name: "loss" type: "TripletLoss" bottom: "feat" - bottom: "feat_true" - bottom: "feat_false" - bottom: "feat_p1" - bottom: "feat_p2" bottom: "sim" top: "loss" triplet_loss_param { margin: 1 - losstype: 1 + losstype: 0 + num_triplets: 3 } } diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp index 1c9806c0e2a..ce1981d90da 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp +++ b/examples/triplet/convert_3d_triplet_data.cpp @@ -1,7 +1,5 @@ // Usage: -// convert_mnist_data input_image_file input_label_file output_db_file -// The MNIST dataset could be downloaded at -// http://yann.lecun.com/exdb/mnist/ +// convert_3d_data input_image_file input_label_file output_db_file #include // NOLINT(readability/streams) #include #include @@ -75,99 +73,127 @@ void convert_dataset(const char* image_filename, const char* label_filename, signed char* label_k = new signed char[4]; signed char* label_l = new signed char[4]; // label for pair wise signed char* label_m = new signed char[4]; - char* pixels = new char[5 * rows * cols]; + char* pixels1 = new char[rows * cols]; + char* pixels2 = new char[rows * cols]; + char* pixels3 = new char[rows * cols]; + char* pixels4 = new char[rows * cols]; + char* pixels5 = new char[rows * cols]; const int kMaxKeyLength = 10; char key[kMaxKeyLength]; std::string value; caffe::Datum datum; - datum.set_channels(5); // one channel for each image in the triplet and pair + datum.set_channels(1); datum.set_height(rows); datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - // iteration in the samples of all class - for (unsigned int itemid = 0; itemid < 5*num_items/class_num; ++itemid) { - // iteration in the samples in one class - for (unsigned int class_ind = 0; class_ind < class_num; ++class_ind) { - // use reference sample one by one at each iteration - int i = itemid % num_items + class_ind*num_items/class_num; - int j = caffe::caffe_rng_rand() % num_items; // pick triplet groups - int k = caffe::caffe_rng_rand() % num_items; - int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups - int m = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, // read triplet - pixels, label_temp, label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), label_temp, label_j); - read_image(&image_file, &label_file, k, rows, cols, - pixels + (2 * rows * cols), label_temp, label_k); - read_image(&image_file, &label_file, l, rows, cols, // read pair wise - pixels + (3 * rows * cols), label_temp, label_l); - read_image(&image_file, &label_file, m, rows, cols, - pixels + (4 * rows * cols), label_temp, label_m); + int counter = 0; + for (unsigned int times = 0; times < 5; ++times) { + // iteration in the samples of all class + for (unsigned int itemid = 0; itemid < num_items/class_num; ++itemid) { + // iteration in the samples in one class + for (unsigned int class_ind = 0; class_ind < class_num; ++class_ind) { + // use reference sample one by one at each iteration + int i = itemid % num_items + class_ind*num_items/class_num; + int j = caffe::caffe_rng_rand() % num_items; // pick triplet groups + int k = caffe::caffe_rng_rand() % num_items; + int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups + int m = caffe::caffe_rng_rand() % num_items; + read_image(&image_file, &label_file, i, rows, cols, // read triplet + pixels1, label_temp, label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels2, label_temp, label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels3, label_temp, label_k); + read_image(&image_file, &label_file, l, rows, cols, // read pair wise + pixels4, label_temp, label_l); + read_image(&image_file, &label_file, m, rows, cols, + pixels5, label_temp, label_m); - datum.set_data(pixels, 5*rows*cols); // set data - bool triplet_class_pass = false; - bool triplet_class_same = false; - bool triplet_pose_pass = false; - bool pair_class_pass = false; + bool pair_pass = false; + bool triplet1_pass = false; + bool triplet2_pass = false; + bool triplet3_class_same = false; + bool triplet3_pass = false; - int ij_diff_x = static_cast(*(label_i+1)-*(label_j+1)); - int ij_diff_y = static_cast(*(label_i+2)-*(label_j+2)); - int ij_diff_z = static_cast(*(label_i+3)-*(label_j+3)); - int ik_diff_x = static_cast(*(label_i+1)-*(label_k+1)); - int ik_diff_y = static_cast(*(label_i+2)-*(label_k+2)); - int ik_diff_z = static_cast(*(label_i+3)-*(label_k+3)); - int lm_diff_x = static_cast(*(label_l+1)-*(label_m+1)); - int lm_diff_y = static_cast(*(label_l+2)-*(label_m+2)); - int lm_diff_z = static_cast(*(label_l+3)-*(label_m+3)); + int ij_diff_x = static_cast(*(label_i+1)-*(label_j+1)); + int ij_diff_y = static_cast(*(label_i+2)-*(label_j+2)); + int ij_diff_z = static_cast(*(label_i+3)-*(label_j+3)); + int im_diff_x = static_cast(*(label_i+1)-*(label_m+1)); + int im_diff_y = static_cast(*(label_i+2)-*(label_m+2)); + int im_diff_z = static_cast(*(label_i+3)-*(label_m+3)); - int ij_x = ij_diff_x*ij_diff_x; - int ij_y = ij_diff_y*ij_diff_y; - int ij_z = ij_diff_z*ij_diff_z; - int ik_x = ik_diff_x*ik_diff_x; - int ik_y = ik_diff_y*ik_diff_y; - int ik_z = ik_diff_z*ik_diff_z; - int lm_x = lm_diff_x*lm_diff_x; - int lm_y = lm_diff_y*lm_diff_y; - int lm_z = lm_diff_z*lm_diff_z; + int ij_x = ij_diff_x*ij_diff_x; + int ij_y = ij_diff_y*ij_diff_y; + int ij_z = ij_diff_z*ij_diff_z; + int im_x = im_diff_x*im_diff_x; + int im_y = im_diff_y*im_diff_y; + int im_z = im_diff_z*im_diff_z; - float dist_ij = std::sqrt(ij_x + ij_y + ij_z); - float dist_ik = std::sqrt(ik_x + ik_y + ik_z); - float dist_lm = std::sqrt(lm_x + lm_y + lm_z); - if ((*label_i == *label_j) && (*label_i == *label_k)) - triplet_class_same = true; - if ((dist_ij < 100 && dist_ik > 100*sqrt(2)) && (triplet_class_same)) - triplet_pose_pass = true; - if ((*label_i == *label_j) && (*label_i != *label_k)) - triplet_class_pass = true; - if (*label_l == *label_m && dist_lm < 100/2) - pair_class_pass = true; - if ((triplet_class_pass || triplet_pose_pass) && pair_class_pass) { - datum.set_label(1); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid*class_num+class_ind); - db->Put(leveldb::WriteOptions(), std::string(key), value); - } else { - class_ind--; - datum.set_label(0); - } - } // iteration in the samples of all class - } // iteration in the samples in one class + float dist_ij = std::sqrt(ij_x + ij_y + ij_z); + float dist_im = std::sqrt(im_x + im_y + im_z); + if (*label_i == *label_j && dist_ij < 100/2) + pair_pass = true; + if (pair_pass && (*label_i != *label_k)) + triplet1_pass = true; + if (pair_pass && (*label_i != *label_l)) + triplet2_pass = true; + if (pair_pass && (*label_i == *label_m)) + triplet3_class_same = true; + if (triplet3_class_same && dist_im > 100*sqrt(2)) + triplet3_pass = true; + if (pair_pass && triplet1_pass && triplet2_pass && triplet3_pass) { + datum.set_data(pixels1, rows*cols); // set data + datum.set_label(int(*label_i)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels2, rows*cols); // set data + datum.set_label(int(*label_j)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels3, rows*cols); // set data + datum.set_label(int(*label_k)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels4, rows*cols); // set data + datum.set_label(int(*label_l)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels5, rows*cols); // set data + datum.set_label(int(*label_m)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + } else { + class_ind--; + } + } // iteration in the samples of all class + } // iteration in the samples in one class + } // iteration in times delete db; - delete pixels; + delete pixels1; + delete pixels2; + delete pixels3; + delete pixels4; + delete pixels5; } int main(int argc, char** argv) { - if (argc != 4) { - printf("This script converts the MNIST dataset to the leveldb format used\n" - "by caffe to train a siamese network.\n" + if (argc != 5) { + printf("This script converts the images dataset to the leveldb format used\n" + "by caffe to train a triplet network.\n" "Usage:\n" - " convert_mnist_data input_image_file input_label_file " - "output_db_file\n" - "The MNIST dataset could be downloaded at\n" - " http://yann.lecun.com/exdb/mnist/\n" - "You should gunzip them after downloading.\n"); + " convert_3d_data input_image_file input_label_file " + "output_db_file class_number\n"); } else { google::InitGoogleLogging(argv[0]); convert_dataset(argv[1], argv[2], argv[3], argv[4]); diff --git a/examples/triplet/convert_lfw_triplet_data.cpp b/examples/triplet/convert_lfw_triplet_data.cpp deleted file mode 100644 index 9f65fab76b4..00000000000 --- a/examples/triplet/convert_lfw_triplet_data.cpp +++ /dev/null @@ -1,126 +0,0 @@ -// This script converts the MNIST dataset to the leveldb format used -// by caffe to train siamese network. -// Usage: -// convert_mnist_data input_image_file input_label_file output_db_file -// The MNIST dataset could be downloaded at -// http://yann.lecun.com/exdb/mnist/ -#include // NOLINT(readability/streams) -#include - -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "stdint.h" - -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" - -uint32_t swap_endian(uint32_t val) { - val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); - return (val << 16) | (val >> 16); -} - -void read_image(std::ifstream* image_file, std::ifstream* label_file, - uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index + 8); - label_file->read(label, 1); -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename) { - // Open files - std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); - std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); - CHECK(image_file) << "Unable to open file " << image_filename; - CHECK(label_file) << "Unable to open file " << label_filename; - // Read the magic and the meta data - uint32_t magic; - uint32_t num_items; - uint32_t num_labels; - uint32_t rows; - uint32_t cols; - - image_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - label_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - image_file.read(reinterpret_cast(&num_items), 4); - num_items = swap_endian(num_items); - label_file.read(reinterpret_cast(&num_labels), 4); - num_labels = swap_endian(num_labels); - CHECK_EQ(num_items, num_labels); - image_file.read(reinterpret_cast(&rows), 4); - rows = swap_endian(rows); - image_file.read(reinterpret_cast(&cols), 4); - cols = swap_endian(cols); - - // Open leveldb - leveldb::DB* db; - leveldb::Options options; - options.create_if_missing = true; - options.error_if_exists = true; - leveldb::Status status = leveldb::DB::Open( - options, db_filename, &db); - CHECK(status.ok()) << "Failed to open leveldb " << db_filename - << ". Is it already existing?"; - - char label_i; - char label_j; - char label_k; - char* pixels = new char[3 * rows * cols]; - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - - caffe::Datum datum; - datum.set_channels(3); // one channel for each image in the pair - datum.set_height(rows); - datum.set_width(cols); - LOG(INFO) << "A total of " << num_items << " items."; - LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (int itemid = 0; itemid < num_items; ++itemid) { - int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups - int j = caffe::caffe_rng_rand() % num_items; - int k = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, - pixels, &label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), &label_j); - read_image(&image_file, &label_file, k, rows, cols, - pixels + (2 * rows * cols), &label_k); - - datum.set_data(pixels, 3*rows*cols); - if (label_i == label_j && label_i != label_k) { - datum.set_label(1); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); - db->Put(leveldb::WriteOptions(), std::string(key), value); - } else { - itemid--; - datum.set_label(0); - } - } - - delete db; - delete pixels; -} - -int main(int argc, char** argv) { - if (argc != 4) { - printf("This script converts the MNIST dataset to the leveldb format used\n" - "by caffe to train a siamese network.\n" - "Usage:\n" - " convert_mnist_data input_image_file input_label_file " - "output_db_file\n" - "The MNIST dataset could be downloaded at\n" - " http://yann.lecun.com/exdb/mnist/\n" - "You should gunzip them after downloading.\n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3]); - } - return 0; -} diff --git a/examples/triplet/convert_mnist_triplet_data.cpp b/examples/triplet/convert_mnist_triplet_data.cpp deleted file mode 100644 index c59a75efe01..00000000000 --- a/examples/triplet/convert_mnist_triplet_data.cpp +++ /dev/null @@ -1,140 +0,0 @@ -// This script converts the MNIST dataset to the leveldb format used -// by caffe to train siamese network. -// Usage: -// convert_mnist_data input_image_file input_label_file output_db_file -// The MNIST dataset could be downloaded at -// http://yann.lecun.com/exdb/mnist/ -#include // NOLINT(readability/streams) -#include - -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "stdint.h" - -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" - -uint32_t swap_endian(uint32_t val) { - val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); - return (val << 16) | (val >> 16); -} - -void read_image(std::ifstream* image_file, std::ifstream* label_file, - uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index + 8); - label_file->read(label, 1); -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename) { - // Open files - std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); - std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); - CHECK(image_file) << "Unable to open file " << image_filename; - CHECK(label_file) << "Unable to open file " << label_filename; - // Read the magic and the meta data - uint32_t magic; - uint32_t num_items; - uint32_t num_labels; - uint32_t rows; - uint32_t cols; - - image_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2051) << "Incorrect image file magic."; - label_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2049) << "Incorrect label file magic."; - image_file.read(reinterpret_cast(&num_items), 4); - num_items = swap_endian(num_items); - label_file.read(reinterpret_cast(&num_labels), 4); - num_labels = swap_endian(num_labels); - CHECK_EQ(num_items, num_labels); - image_file.read(reinterpret_cast(&rows), 4); - rows = swap_endian(rows); - image_file.read(reinterpret_cast(&cols), 4); - cols = swap_endian(cols); - - // Open leveldb - leveldb::DB* db; - leveldb::Options options; - options.create_if_missing = true; - options.error_if_exists = true; - leveldb::Status status = leveldb::DB::Open( - options, db_filename, &db); - CHECK(status.ok()) << "Failed to open leveldb " << db_filename - << ". Is it already existing?"; - - char label_i; // label for triplet - char label_j; - char label_k; - char label_l; // label for pair wise - char label_m; - char* pixels = new char[5 * rows * cols]; - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - - caffe::Datum datum; - datum.set_channels(5); // one channel for each image in the triplet and pair - datum.set_height(rows); - datum.set_width(cols); - LOG(INFO) << "A total of " << num_items << " items."; - LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (int itemid = 0; itemid < num_items; ++itemid) { - // pick triplet groups - int i = caffe::caffe_rng_rand() % num_items; - int j = caffe::caffe_rng_rand() % num_items; - int k = caffe::caffe_rng_rand() % num_items; - // pick pair wise groups - int l = caffe::caffe_rng_rand() % num_items; - int m = caffe::caffe_rng_rand() % num_items; - // read triplet groups - read_image(&image_file, &label_file, i, rows, cols, - pixels, &label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), &label_j); - read_image(&image_file, &label_file, k, rows, cols, - pixels + (2 * rows * cols), &label_k); - // pair wise groups - read_image(&image_file, &label_file, l, rows, cols, - pixels + (3 * rows * cols), &label_l); - read_image(&image_file, &label_file, m, rows, cols, - pixels + (4 * rows * cols), &label_m); - - datum.set_data(pixels, 5*rows*cols); // set data - if ((label_i == label_j && label_i != label_k) && (label_l == label_m)) { - datum.set_label(1); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); - db->Put(leveldb::WriteOptions(), std::string(key), value); - } else { - itemid--; - datum.set_label(0); - } - } - - delete db; - delete pixels; -} - -int main(int argc, char** argv) { - if (argc != 4) { - printf("This script converts the MNIST dataset to the leveldb format used\n" - "by caffe to train a siamese network.\n" - "Usage:\n" - " convert_mnist_data input_image_file input_label_file " - "output_db_file\n" - "The MNIST dataset could be downloaded at\n" - " http://yann.lecun.com/exdb/mnist/\n" - "You should gunzip them after downloading.\n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3]); - } - return 0; -} diff --git a/examples/triplet/create_lfw_triplet.sh b/examples/triplet/create_lfw_triplet.sh deleted file mode 100755 index 382a9021f10..00000000000 --- a/examples/triplet/create_lfw_triplet.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env sh -# This script converts the lfw data into leveldb format. - -EXAMPLES=./build/examples/triplet -DATA=./data/lfw - -echo "Creating leveldb..." - -rm -rf ./examples/triplet/lfw_triplet_train_leveldb -rm -rf ./examples/triplet/lfw_triplet_test_leveldb - -$EXAMPLES/convert_lfw_triplet_data.bin \ - $DATA/traindata \ - $DATA/trainlabel \ - ./examples/triplet/lfw_triplet_train_leveldb -$EXAMPLES/convert_lfw_triplet_data.bin \ - $DATA/testdata \ - $DATA/testlabel \ - ./examples/triplet/lfw_triplet_test_leveldb - -echo "Done." diff --git a/examples/triplet/create_mnist_triplet.sh b/examples/triplet/create_mnist_triplet.sh deleted file mode 100755 index f404f2aa255..00000000000 --- a/examples/triplet/create_mnist_triplet.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env sh -# This script converts the mnist data into leveldb format. - -EXAMPLES=./build/examples/triplet -DATA=./data/mnist - -echo "Creating leveldb..." - -rm -rf ./examples/triplet/mnist_siamese_train_leveldb -rm -rf ./examples/triplet/mnist_siamese_test_leveldb - -$EXAMPLES/convert_mnist_triplet_data.bin \ - $DATA/train-images-idx3-ubyte \ - $DATA/train-labels-idx1-ubyte \ - ./examples/triplet/mnist_triplet_train_leveldb -$EXAMPLES/convert_mnist_triplet_data.bin \ - $DATA/t10k-images-idx3-ubyte \ - $DATA/t10k-labels-idx1-ubyte \ - ./examples/triplet/mnist_triplet_test_leveldb - -echo "Done." diff --git a/examples/triplet/lfw_triplet.prototxt b/examples/triplet/lfw_triplet.prototxt deleted file mode 100644 index 9537d1feb8b..00000000000 --- a/examples/triplet/lfw_triplet.prototxt +++ /dev/null @@ -1,113 +0,0 @@ -name: "lfw_siamese" -input: "data" -input_dim: 10000 -input_dim: 1 -input_dim: 150 -input_dim: 130 -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 500 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 10 - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 2 - } -} diff --git a/examples/triplet/lfw_triplet_solver.prototxt b/examples/triplet/lfw_triplet_solver.prototxt deleted file mode 100644 index eb4c2c369e9..00000000000 --- a/examples/triplet/lfw_triplet_solver.prototxt +++ /dev/null @@ -1,25 +0,0 @@ -# The train/test net protocol buffer definition -net: "examples/triplet/lfw_triplet_train_test.prototxt" -# test_iter specifies how many forward passes the test should carry out. -# In the case of lfw, we have test batch size 100 and 100 test iterations, -# covering the full 10,000 testing images. -test_iter: 100 -# Carry out testing every 500 training iterations. -test_interval: 500 -# The base learning rate, momentum and the weight decay of the network. -base_lr: 0.01 -momentum: 0.9 -weight_decay: 0.0000 -# The learning rate policy -lr_policy: "inv" -gamma: 0.0001 -power: 0.75 -# Display every 100 iterations -display: 100 -# The maximum number of iterations -max_iter: 50000 -# snapshot intermediate results -snapshot: 5000 -snapshot_prefix: "examples/triplet/lfw_triplet" -# solver mode: CPU or GPU -solver_mode: CPU diff --git a/examples/triplet/lfw_triplet_train_test.prototxt b/examples/triplet/lfw_triplet_train_test.prototxt deleted file mode 100644 index 59ef26e90a4..00000000000 --- a/examples/triplet/lfw_triplet_train_test.prototxt +++ /dev/null @@ -1,500 +0,0 @@ -name: "lfw_triplet_train_test" -layer { - name: "triplet_data" - type: "Data" - top: "triplet_data" - top: "sim" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/lfw_triplet_train_leveldb" - batch_size: 64 - } -} -layer { - name: "triplet_data" - type: "Data" - top: "triplet_data" - top: "sim" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/lfw_triplet_test_leveldb" - batch_size: 100 - } -} -layer { - name: "slice_triplet" - type: "Slice" - bottom: "triplet_data" - top: "data" - top: "data_true" - top: "data_false" - slice_param { - slice_dim: 1 - slice_point: 1 - slice_point: 2 - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_true" - type: "Convolution" - bottom: "data_true" - top: "conv1_true" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_true" - type: "Pooling" - bottom: "conv1_true" - top: "pool1_true" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_true" - type: "Convolution" - bottom: "pool1_true" - top: "conv2_true" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_true" - type: "Pooling" - bottom: "conv2_true" - top: "pool2_true" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_true" - type: "InnerProduct" - bottom: "pool2_true" - top: "ip1_true" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_true" - type: "ReLU" - bottom: "ip1_true" - top: "ip1_true" -} -layer { - name: "ip2_true" - type: "InnerProduct" - bottom: "ip1_true" - top: "ip2_true" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_true" - type: "InnerProduct" - bottom: "ip2_true" - top: "feat_true" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_false" - type: "Convolution" - bottom: "data_false" - top: "conv1_false" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_false" - type: "Pooling" - bottom: "conv1_false" - top: "pool1_false" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_false" - type: "Convolution" - bottom: "pool1_false" - top: "conv2_false" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_false" - type: "Pooling" - bottom: "conv2_false" - top: "pool2_false" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_false" - type: "InnerProduct" - bottom: "pool2_false" - top: "ip1_false" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_false" - type: "ReLU" - bottom: "ip1_false" - top: "ip1_false" -} -layer { - name: "ip2_false" - type: "InnerProduct" - bottom: "ip1_false" - top: "ip2_false" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_false" - type: "InnerProduct" - bottom: "ip2_false" - top: "feat_false" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "loss" - type: "TripletLoss" - bottom: "feat" - bottom: "feat_true" - bottom: "feat_false" - bottom: "sim" - top: "loss" - triplet_loss_param { - margin: 0.2 - } -} - diff --git a/examples/triplet/mnist_triplet.prototxt b/examples/triplet/mnist_triplet.prototxt deleted file mode 100644 index 0e903f85909..00000000000 --- a/examples/triplet/mnist_triplet.prototxt +++ /dev/null @@ -1,113 +0,0 @@ -name: "mnist_siamese" -input: "data" -input_dim: 10000 -input_dim: 1 -input_dim: 28 -input_dim: 28 -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 500 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 10 - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 2 - } -} diff --git a/examples/triplet/mnist_triplet_solver.prototxt b/examples/triplet/mnist_triplet_solver.prototxt deleted file mode 100644 index edd8e1e0338..00000000000 --- a/examples/triplet/mnist_triplet_solver.prototxt +++ /dev/null @@ -1,25 +0,0 @@ -# The train/test net protocol buffer definition -net: "examples/triplet/mnist_triplet_train_test.prototxt" -# test_iter specifies how many forward passes the test should carry out. -# In the case of MNIST, we have test batch size 100 and 100 test iterations, -# covering the full 10,000 testing images. -test_iter: 100 -# Carry out testing every 500 training iterations. -test_interval: 500 -# The base learning rate, momentum and the weight decay of the network. -base_lr: 0.001 -momentum: 0.9 -weight_decay: 0.0000 -# The learning rate policy -lr_policy: "inv" -gamma: 0.0001 -power: 0.75 -# Display every 100 iterations -display: 100 -# The maximum number of iterations -max_iter: 50000 -# snapshot intermediate results -snapshot: 5000 -snapshot_prefix: "examples/triplet/mnist_triplet" -# solver mode: CPU or GPU -solver_mode: CPU diff --git a/examples/triplet/mnist_triplet_train_test.prototxt b/examples/triplet/mnist_triplet_train_test.prototxt deleted file mode 100644 index c86ef933045..00000000000 --- a/examples/triplet/mnist_triplet_train_test.prototxt +++ /dev/null @@ -1,800 +0,0 @@ -name: "mnist_triplet_train_test" -layer { - name: "triplet_data" - type: "Data" - top: "triplet_data" - top: "sim" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/mnist_triplet_train_leveldb" - batch_size: 64 - } -} -layer { - name: "triplet_data" - type: "Data" - top: "triplet_data" - top: "sim" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/mnist_triplet_test_leveldb" - batch_size: 100 - } -} -layer { - name: "slice_triplet" - type: "Slice" - bottom: "triplet_data" - top: "data" - top: "data_true" - top: "data_false" - top: "data_p1" - top: "data_p2" - slice_param { - slice_dim: 1 - slice_point: 1 - slice_point: 2 - slice_point: 3 - slice_point: 4 - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_true" - type: "Convolution" - bottom: "data_true" - top: "conv1_true" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_true" - type: "Pooling" - bottom: "conv1_true" - top: "pool1_true" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_true" - type: "Convolution" - bottom: "pool1_true" - top: "conv2_true" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_true" - type: "Pooling" - bottom: "conv2_true" - top: "pool2_true" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_true" - type: "InnerProduct" - bottom: "pool2_true" - top: "ip1_true" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_true" - type: "ReLU" - bottom: "ip1_true" - top: "ip1_true" -} -layer { - name: "ip2_true" - type: "InnerProduct" - bottom: "ip1_true" - top: "ip2_true" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_true" - type: "InnerProduct" - bottom: "ip2_true" - top: "feat_true" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_false" - type: "Convolution" - bottom: "data_false" - top: "conv1_false" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_false" - type: "Pooling" - bottom: "conv1_false" - top: "pool1_false" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_false" - type: "Convolution" - bottom: "pool1_false" - top: "conv2_false" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_false" - type: "Pooling" - bottom: "conv2_false" - top: "pool2_false" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_false" - type: "InnerProduct" - bottom: "pool2_false" - top: "ip1_false" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_false" - type: "ReLU" - bottom: "ip1_false" - top: "ip1_false" -} -layer { - name: "ip2_false" - type: "InnerProduct" - bottom: "ip1_false" - top: "ip2_false" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_false" - type: "InnerProduct" - bottom: "ip2_false" - top: "feat_false" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_p1" - type: "Convolution" - bottom: "data_p1" - top: "conv1_p1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_p1" - type: "Pooling" - bottom: "conv1_p1" - top: "pool1_p1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_p1" - type: "Convolution" - bottom: "pool1_p1" - top: "conv2_p1" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_p1" - type: "Pooling" - bottom: "conv2_p1" - top: "pool2_p1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_p1" - type: "InnerProduct" - bottom: "pool2_p1" - top: "ip1_p1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_p1" - type: "ReLU" - bottom: "ip1_p1" - top: "ip1_p1" -} -layer { - name: "ip2_p1" - type: "InnerProduct" - bottom: "ip1_p1" - top: "ip2_p1" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_p1" - type: "InnerProduct" - bottom: "ip2_p1" - top: "feat_p1" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_p2" - type: "Convolution" - bottom: "data_p2" - top: "conv1_p2" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_p2" - type: "Pooling" - bottom: "conv1_p2" - top: "pool1_p2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_p2" - type: "Convolution" - bottom: "pool1_p2" - top: "conv2_p2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_p2" - type: "Pooling" - bottom: "conv2_p2" - top: "pool2_p2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_p2" - type: "InnerProduct" - bottom: "pool2_p2" - top: "ip1_p2" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_p2" - type: "ReLU" - bottom: "ip1_p2" - top: "ip1_p2" -} -layer { - name: "ip2_p2" - type: "InnerProduct" - bottom: "ip1_p2" - top: "ip2_p2" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_p2" - type: "InnerProduct" - bottom: "ip2_p2" - top: "feat_p2" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "loss" - type: "TripletLoss" - bottom: "feat" - bottom: "feat_true" - bottom: "feat_false" - bottom: "feat_p1" - bottom: "feat_p2" - bottom: "sim" - top: "loss" - triplet_loss_param { - margin: 0.2 - } -} - diff --git a/examples/triplet/train_lfw_triplet.sh b/examples/triplet/train_lfw_triplet.sh deleted file mode 100755 index 076738a5e63..00000000000 --- a/examples/triplet/train_lfw_triplet.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env sh - -TOOLS=./build/tools - -$TOOLS/caffe train --solver=examples/triplet/lfw_triplet_solver.prototxt diff --git a/examples/triplet/train_mnist_triplet.sh b/examples/triplet/train_mnist_triplet.sh deleted file mode 100755 index e005970824a..00000000000 --- a/examples/triplet/train_mnist_triplet.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env sh - -TOOLS=./build/tools - -$TOOLS/caffe train --solver=examples/triplet/mnist_triplet_solver.prototxt diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index 3e163790af8..925af825a1d 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -218,14 +218,14 @@ class TripletLossLayer : public LossLayer { virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); - virtual inline int ExactNumBottomBlobs() const { return 6; } + virtual inline int ExactNumBottomBlobs() const { return 2; } virtual inline const char* type() const { return "TripletLoss"; } /** * Unlike most loss layers, in the TripletLossLayer we can backpropagate * to the first three inputs. */ virtual inline bool AllowForceBackward(const int bottom_index) const { - return bottom_index != 5; + return bottom_index != 1; } protected: @@ -268,15 +268,12 @@ class TripletLossLayer : public LossLayer { Blob diff_; // cached for backward pass Blob diff_pos; Blob diff_neg; - Blob diff_par; Blob dist_sq_; // cached for backward pass Blob dist_sq_pos; Blob dist_sq_neg; - Blob dist_sq_par; Blob diff_sq_; // tmp storage for gpu forward pass Blob diff_sq_pos; Blob diff_sq_neg; - Blob diff_sq_par; Blob summer_vec_; // tmp storage for gpu forward pass }; diff --git a/src/caffe/layers/triplet_loss_layer.cpp b/src/caffe/layers/triplet_loss_layer.cpp index a4e6402c76a..613fa703676 100644 --- a/src/caffe/layers/triplet_loss_layer.cpp +++ b/src/caffe/layers/triplet_loss_layer.cpp @@ -12,33 +12,25 @@ template void TripletLossLayer::LayerSetUp( const vector*>& bottom, const vector*>& top) { LossLayer::LayerSetUp(bottom, top); - CHECK_EQ(bottom[0]->channels(), bottom[1]->channels()); - CHECK_EQ(bottom[0]->channels(), bottom[2]->channels()); - CHECK_EQ(bottom[0]->channels(), bottom[3]->channels()); - CHECK_EQ(bottom[0]->channels(), bottom[4]->channels()); + // number of triplet in a batch + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + // dimension of each descriptor + int dim = bottom[0]->count()/bottom[0]->num(); + CHECK_EQ(bottom[0]->channels(), dim); CHECK_EQ(bottom[0]->height(), 1); CHECK_EQ(bottom[0]->width(), 1); + CHECK_EQ(bottom[1]->channels(), 1); CHECK_EQ(bottom[1]->height(), 1); CHECK_EQ(bottom[1]->width(), 1); - CHECK_EQ(bottom[2]->height(), 1); - CHECK_EQ(bottom[2]->width(), 1); - CHECK_EQ(bottom[3]->height(), 1); - CHECK_EQ(bottom[3]->width(), 1); - CHECK_EQ(bottom[4]->height(), 1); - CHECK_EQ(bottom[4]->width(), 1); - CHECK_EQ(bottom[5]->channels(), 1); - CHECK_EQ(bottom[5]->height(), 1); - CHECK_EQ(bottom[5]->width(), 1); - diff_pos.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); - diff_neg.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); - diff_par.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); - diff_sq_pos.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); - diff_sq_neg.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); - diff_sq_par.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); - dist_sq_.Reshape(bottom[0]->num(), 1, 1, 1); - dist_sq_pos.Reshape(bottom[0]->num(), 1, 1, 1); - dist_sq_neg.Reshape(bottom[0]->num(), 1, 1, 1); - dist_sq_par.Reshape(bottom[0]->num(), 1, 1, 1); + // In each set, we have: + // the descriptor of reference sample, closest sample, and negative samples + // number of sets in the whole batch + int num_set = bottom[0]->num()/(2 + num_triplets); + dist_sq_.Reshape(num_set, 1, 1, 1); + diff_pos.Reshape(num_set, dim, 1, 1); + dist_sq_pos.Reshape(num_set, 1, 1, 1); + diff_neg.Reshape(num_set, dim, 1, 1); + dist_sq_neg.Reshape(num_set, 1, 1, 1); // vector of ones used to sum along channels summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); for (int i = 0; i < bottom[0]->channels(); ++i) @@ -49,77 +41,78 @@ template void TripletLossLayer::Forward_cpu( const vector*>& bottom, const vector*>& top) { - int count = bottom[0]->count(); - caffe_sub( - count, - bottom[0]->cpu_data(), // a - bottom[1]->cpu_data(), // b - diff_pos.mutable_cpu_data()); // a_i-b_i: positive - caffe_sub( - count, - bottom[0]->cpu_data(), // a - bottom[2]->cpu_data(), // c - diff_neg.mutable_cpu_data()); // a_i-c_i: negative - caffe_sub( - count, - bottom[3]->cpu_data(), // d - bottom[4]->cpu_data(), // e - diff_par.mutable_cpu_data()); // d_i-e_i: pair wise - const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.triplet_loss_param().margin(); Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); Dtype loss(0.0); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); if (losstype == 0) { - for (int i = 0; i < bottom[0]->num(); ++i) { - // Triplet loss accumulation - // Loss component calculated from a and b - dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, - diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); - // a b is a similar pair for triplet - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; - // Loss component calculated from a and c - dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, - diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); - // a c is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); - // Pair wise loss accumulation - // Loss component calculated from d and e - dist_sq_par.mutable_cpu_data()[i] = caffe_cpu_dot(channels, - diff_par.cpu_data() + (i*channels), diff_par.cpu_data() + (i*channels)); - // d e is a similar pair for pair wise + for (int i = 0; i < num_set; ++i) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close + // Loss component calculated from reference and close one + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); + // a b is a similar pair for pair wise // loss accumulated by the pair wise part - loss += dist_sq_par.cpu_data()[i]; + loss += dist_sq_pos.cpu_data()[i]; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + i*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); + } } - loss = loss / static_cast(bottom[0]->num()) / Dtype(2); + loss = loss / static_cast(num_set) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; } else { - for (int i = 0; i < bottom[0]->num(); ++i) { - // softTriplet loss accumulation - // Loss component calculated from a and b - dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, - diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); - // a b is a similar pair for triplet - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] += margin; - // Loss component calculated from a and c - dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, - diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); - // a c is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[i] = 1 - \ -dist_sq_neg.cpu_data()[i] / dist_sq_.mutable_cpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(dist_sq_.cpu_data()[i], Dtype(0.0)); - // Pair wise loss accumulation - // Loss component calculated from d and e - dist_sq_par.mutable_cpu_data()[i] = caffe_cpu_dot(channels, - diff_par.cpu_data() + (i*channels), diff_par.cpu_data() + (i*channels)); - // d e is a similar pair for pair wise + for (int i = 0; i < num_set; ++i) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close + // Loss component calculated from reference and close one + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); + // a b is a similar pair for pair wise // loss accumulated by the pair wise part - loss += dist_sq_par.cpu_data()[i]; + loss += dist_sq_pos.cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += margin; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + i*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[i] = 1 - \ + dist_sq_neg.cpu_data()[i] / dist_sq_.mutable_cpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(dist_sq_.cpu_data()[i], Dtype(0.0)); + } } - loss = loss / static_cast(bottom[0]->num()) / Dtype(2); + loss = loss / static_cast(num_set) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; } } @@ -129,162 +122,268 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.triplet_loss_param().margin(); Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); if (losstype == 0) { - // BP for feat1 + // BP for feat1(extracted from reference) + for (int i = 0; i < 1; ++i) { if (propagate_down[0]) { const Dtype sign = 1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(bottom[0]->num()); - int num = bottom[0]->num(); - int channels = bottom[0]->channels(); - for (int j = 0; j < num; ++j) { + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { Dtype* bout = bottom[0]->mutable_cpu_diff(); - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pairs - caffe_cpu_axpby( - channels, - alpha, - diff_pos.cpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - // dissimilar pairs - caffe_cpu_axpby( - channels, - -alpha, - diff_neg.cpu_data() + (j*channels), - Dtype(1.0), - bout + (j*channels)); - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + // Loss component calculated from negative part + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + (2 + num_triplets)*j*dim); + // dissimilar pair in triplet + caffe_cpu_axpby( + dim, + -alpha, + diff_neg.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for feat2(extracted from the closest sample) + for (int i = 1; i < 2; ++i) { + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } } } } - // BP for feat2 and feat3 - for (int i = 1; i < 3; ++i) { - if (propagate_down[i]) { - const Dtype sign = (i == 1) ? -1 : 1; + } + // BP for negative feature used in the num_triplets triplet part + for (int i = 2; i < 2 + num_triplets; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(bottom[i]->num()); - int num = bottom[i]->num(); - int channels = bottom[i]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[i]->mutable_cpu_diff(); + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - if (i == 1) { - // similar pairs + // dissimilar pairs caffe_cpu_axpby( - channels, + dim, alpha, - diff_pos.cpu_data() + (j*channels), + diff_neg.cpu_data() + (j*dim), Dtype(0.0), - bout + (j*channels)); - } else { - // dissimilar pairs - caffe_cpu_axpby( - channels, - alpha, - diff_neg.cpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - } + bout + ((2 + num_triplets)*j + i)*dim); } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); } } } } } else { + for (int i = 0; i < 1; ++i) { // BP for data1(feat1) if (propagate_down[0]) { - const Dtype alpha = top[0]->cpu_diff()[0] / - static_cast(bottom[0]->num()); - int num = bottom[0]->num(); - int channels = bottom[0]->channels(); - for (int j = 0; j < num; ++j) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { Dtype* bout = bottom[0]->mutable_cpu_diff(); - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - channels, - alpha*dist_sq_neg.mutable_cpu_data()[j]\ -/((dist_sq_pos.mutable_cpu_data()[j]+margin)\ -*(dist_sq_pos.mutable_cpu_data()[j]+margin)), - diff_pos.cpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - caffe_cpu_axpby( - channels, - -alpha*(dist_sq_pos.mutable_cpu_data()[j] + margin)\ -/((dist_sq_pos.mutable_cpu_data()[j] + margin)\ -*(dist_sq_pos.mutable_cpu_data()[j] + margin)), - diff_neg.cpu_data() + (j*channels), - Dtype(1.0), - bout + (j*channels)); - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha*dist_sq_neg.mutable_cpu_data()[j]\ + /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_cpu_data()[j]+margin)), + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + caffe_cpu_axpby( + dim, + -alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), + diff_neg.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } } } } + } + for (int i = 1; i < 2; ++i) { // BP for positive data(feat2) - if (propagate_down[1]) { - const Dtype alpha = top[0]->cpu_diff()[0] / - static_cast(bottom[1]->num()); - int num = bottom[1]->num(); - int channels = bottom[1]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[1]->mutable_cpu_diff(); - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - channels, - -alpha*dist_sq_neg.mutable_cpu_data()[j]\ -/((dist_sq_pos.mutable_cpu_data()[j] + margin)\ -*(dist_sq_pos.mutable_cpu_data()[j] + margin)), - diff_pos.cpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha*dist_sq_neg.mutable_cpu_data()[j]\ + /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_cpu_data()[j]+margin)), + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } } } } + } + for (int i = 2; i < 2 + num_triplets; ++i) { // BP for negative data(feat3) - if (propagate_down[2]) { - const Dtype alpha = top[0]->cpu_diff()[0] / - static_cast(bottom[2]->num()); - int num = bottom[2]->num(); - int channels = bottom[2]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[2]->mutable_cpu_diff(); + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { caffe_cpu_axpby( - channels, + dim, alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), - diff_neg.cpu_data() + (j*channels), + diff_neg.cpu_data() + (j*dim), Dtype(0.0), - bout + (j*channels)); + bout + ((2 + num_triplets)*j + i)*dim); } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); } } } } - // pair wise back - for (int i = 3; i < 5; ++i) { - if (propagate_down[i]) { - const Dtype sign = (i == 3) ? 1 : -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(bottom[i]->num()); - int num = bottom[i]->num(); - int channels = bottom[i]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[i]->mutable_cpu_diff(); // similar pairs - caffe_cpu_axpby( - channels, - alpha, - diff_par.cpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - } - } } } diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 8b90554b689..37480f6de8c 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -439,6 +439,7 @@ message TripletLossParameter { //margin for negative triplet optional float margin = 1 [default = 1.0]; optional uint32 losstype = 2 [default = 1]; + optional uint32 num_triplets = 3 [default = 3]; } message ConvolutionParameter { diff --git a/src/caffe/test/test_triplet_loss_layer.cpp b/src/caffe/test/test_triplet_loss_layer.cpp index ccbefbd059f..fa44fb5e62f 100644 --- a/src/caffe/test/test_triplet_loss_layer.cpp +++ b/src/caffe/test/test_triplet_loss_layer.cpp @@ -22,28 +22,16 @@ class TripletLossLayerTest : public MultiDeviceTest { protected: TripletLossLayerTest() - : blob_bottom_data_i_(new Blob(512, 1, 1, 1)), - blob_bottom_data_j_(new Blob(512, 1, 1, 1)), - blob_bottom_data_k_(new Blob(512, 1, 1, 1)), - blob_bottom_data_l_(new Blob(512, 1, 1, 1)), - blob_bottom_data_m_(new Blob(512, 1, 1, 1)), - blob_bottom_y_(new Blob(512, 1, 1, 1)), + : blob_bottom_data_(new Blob(250, 1, 1, 1)), + blob_bottom_y_(new Blob(250, 1, 1, 1)), blob_top_loss_(new Blob()) { // fill the values FillerParameter filler_param; filler_param.set_min(-1.0); filler_param.set_max(1.0); // distances~=1.0 to test both sides of margin UniformFiller filler(filler_param); - filler.Fill(this->blob_bottom_data_i_); - blob_bottom_vec_.push_back(blob_bottom_data_i_); - filler.Fill(this->blob_bottom_data_j_); - blob_bottom_vec_.push_back(blob_bottom_data_j_); - filler.Fill(this->blob_bottom_data_k_); - blob_bottom_vec_.push_back(blob_bottom_data_k_); - filler.Fill(this->blob_bottom_data_l_); - blob_bottom_vec_.push_back(blob_bottom_data_l_); - filler.Fill(this->blob_bottom_data_m_); - blob_bottom_vec_.push_back(blob_bottom_data_m_); + filler.Fill(this->blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_data_); for (int i = 0; i < blob_bottom_y_->count(); ++i) { blob_bottom_y_->mutable_cpu_data()[i] = caffe_rng_rand() % 2; // 0 or 1 } @@ -51,20 +39,12 @@ class TripletLossLayerTest : public MultiDeviceTest { blob_top_vec_.push_back(blob_top_loss_); } virtual ~TripletLossLayerTest() { - delete blob_bottom_data_i_; - delete blob_bottom_data_j_; - delete blob_bottom_data_k_; - delete blob_bottom_data_l_; - delete blob_bottom_data_m_; + delete blob_bottom_data_; delete blob_bottom_y_; delete blob_top_loss_; } - Blob* const blob_bottom_data_i_; - Blob* const blob_bottom_data_j_; - Blob* const blob_bottom_data_k_; - Blob* const blob_bottom_data_l_; - Blob* const blob_bottom_data_m_; + Blob* const blob_bottom_data_; Blob* const blob_bottom_y_; Blob* const blob_top_loss_; vector*> blob_bottom_vec_; @@ -82,28 +62,29 @@ TYPED_TEST(TripletLossLayerTest, TestForward) { // manually compute to compare const Dtype margin = layer_param.triplet_loss_param().margin(); const Dtype losstype = layer_param.triplet_loss_param().losstype(); - const int num = this->blob_bottom_data_i_->num(); - const int channels = this->blob_bottom_data_i_->channels(); + const int num_triplets = 3; + const int num_set = this->blob_bottom_data_->num()/(2 + num_triplets); + const int channels = this->blob_bottom_data_->channels(); Dtype loss(0); if (losstype == 0) { - for (int i = 0; i < num; ++i) { - Dtype dist_sq(0); + for (int i = 0; i < num_set; ++i) { Dtype dist_par(0); - for (int j = 0; j < channels; ++j) { - Dtype diff_pos = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; - dist_sq += diff_pos*diff_pos; - Dtype diff_neg = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_k_->cpu_data()[i*channels+j]; - dist_sq -= diff_neg*diff_neg; - Dtype diff_par = this->blob_bottom_data_l_->cpu_data()[i*channels+j] - - this->blob_bottom_data_m_->cpu_data()[i*channels+j]; - dist_par = diff_par*diff_par; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + Dtype dist_sq(0); + for (int j = 0; j < channels; ++j) { + Dtype diff_pos = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - + this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+1)*channels+j]; + dist_par = diff_pos*diff_pos; + loss += dist_par; + dist_sq += diff_pos*diff_pos; + Dtype diff_neg = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - + this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+2+triplet)*channels+j]; + dist_sq -= diff_neg*diff_neg; + } + loss += std::max(margin + dist_sq, Dtype(0.0)); } - loss += std::max(margin + dist_sq, Dtype(0.0)); - loss += dist_par; } - } else { + } /*else { for (int i = 0; i < num; ++i) { Dtype dist_sq(0); Dtype dist_par(0); @@ -122,8 +103,8 @@ TYPED_TEST(TripletLossLayerTest, TestForward) { loss += std::max(dist_sq, Dtype(0.0)); loss += dist_par; } - } - loss /= static_cast(num) * Dtype(2); + }*/ + loss /= static_cast(num_set) * Dtype(2); EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); } @@ -136,13 +117,5 @@ TYPED_TEST(TripletLossLayerTest, TestGradient) { // check the gradient for the first 5 bottom layers checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, this->blob_top_vec_, 0); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 1); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 2); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 3); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 4); } } // namespace caffe From c3f89b832b5a516d1e63a8d41bf498c66eaf2731 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 10 Aug 2015 13:28:11 +0800 Subject: [PATCH 37/82] delete file --- ...plet_loss_layer.cpp => test_triplet_loss_layer} | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) rename src/caffe/test/{test_triplet_loss_layer.cpp => test_triplet_loss_layer} (89%) diff --git a/src/caffe/test/test_triplet_loss_layer.cpp b/src/caffe/test/test_triplet_loss_layer similarity index 89% rename from src/caffe/test/test_triplet_loss_layer.cpp rename to src/caffe/test/test_triplet_loss_layer index fa44fb5e62f..6c25ce9bd4b 100644 --- a/src/caffe/test/test_triplet_loss_layer.cpp +++ b/src/caffe/test/test_triplet_loss_layer @@ -22,8 +22,8 @@ class TripletLossLayerTest : public MultiDeviceTest { protected: TripletLossLayerTest() - : blob_bottom_data_(new Blob(250, 1, 1, 1)), - blob_bottom_y_(new Blob(250, 1, 1, 1)), + : blob_bottom_data_(new Blob(50, 1, 1, 1)), + blob_bottom_y_(new Blob(50, 1, 1, 1)), blob_top_loss_(new Blob()) { // fill the values FillerParameter filler_param; @@ -61,7 +61,7 @@ TYPED_TEST(TripletLossLayerTest, TestForward) { layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // manually compute to compare const Dtype margin = layer_param.triplet_loss_param().margin(); - const Dtype losstype = layer_param.triplet_loss_param().losstype(); + const Dtype losstype = 0;//layer_param.triplet_loss_param().losstype(); const int num_triplets = 3; const int num_set = this->blob_bottom_data_->num()/(2 + num_triplets); const int channels = this->blob_bottom_data_->channels(); @@ -69,13 +69,17 @@ TYPED_TEST(TripletLossLayerTest, TestForward) { if (losstype == 0) { for (int i = 0; i < num_set; ++i) { Dtype dist_par(0); + for (int j = 0; j < channels; ++j) { + Dtype diff_pos = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - + this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+1)*channels+j]; + dist_par = diff_pos*diff_pos; + loss += dist_par; + } for (int triplet = 0; triplet < num_triplets; ++triplet) { Dtype dist_sq(0); for (int j = 0; j < channels; ++j) { Dtype diff_pos = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+1)*channels+j]; - dist_par = diff_pos*diff_pos; - loss += dist_par; dist_sq += diff_pos*diff_pos; Dtype diff_neg = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+2+triplet)*channels+j]; From d78cf528b65f43d4c21bf644cd19a189cdd945f0 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 10 Aug 2015 14:32:42 +0800 Subject: [PATCH 38/82] add gpu codes for triplet loss --- examples/triplet/convert_3d_triplet_data.cpp | 20 +- src/caffe/layers/triplet_loss_layer.cu | 628 +++++++++++++------ 2 files changed, 431 insertions(+), 217 deletions(-) diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp index ce1981d90da..f610f9c4de6 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp +++ b/examples/triplet/convert_3d_triplet_data.cpp @@ -1,13 +1,13 @@ // Usage: // convert_3d_data input_image_file input_label_file output_db_file #include // NOLINT(readability/streams) -#include #include #include "caffe/proto/caffe.pb.h" #include "caffe/util/math_functions.hpp" #include "glog/logging.h" #include "google/protobuf/text_format.h" #include "leveldb/db.h" +#include "math.h" #include "stdint.h" uint32_t swap_endian(uint32_t val) { @@ -144,31 +144,31 @@ void convert_dataset(const char* image_filename, const char* label_filename, triplet3_pass = true; if (pair_pass && triplet1_pass && triplet2_pass && triplet3_pass) { datum.set_data(pixels1, rows*cols); // set data - datum.set_label(int(*label_i)); + datum.set_label(static_cast(*label_i)); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", counter); db->Put(leveldb::WriteOptions(), std::string(key), value); counter++; datum.set_data(pixels2, rows*cols); // set data - datum.set_label(int(*label_j)); + datum.set_label(static_cast(*label_j)); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", counter); db->Put(leveldb::WriteOptions(), std::string(key), value); counter++; datum.set_data(pixels3, rows*cols); // set data - datum.set_label(int(*label_k)); + datum.set_label(static_cast(*label_k)); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", counter); db->Put(leveldb::WriteOptions(), std::string(key), value); counter++; datum.set_data(pixels4, rows*cols); // set data - datum.set_label(int(*label_l)); + datum.set_label(static_cast(*label_l)); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", counter); db->Put(leveldb::WriteOptions(), std::string(key), value); counter++; datum.set_data(pixels5, rows*cols); // set data - datum.set_label(int(*label_m)); + datum.set_label(static_cast(*label_m)); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", counter); db->Put(leveldb::WriteOptions(), std::string(key), value); @@ -176,9 +176,9 @@ void convert_dataset(const char* image_filename, const char* label_filename, } else { class_ind--; } - } // iteration in the samples of all class - } // iteration in the samples in one class - } // iteration in times + } // iteration in the samples of all class + } // iteration in the samples in one class + } // iteration in times delete db; delete pixels1; delete pixels2; @@ -189,7 +189,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, int main(int argc, char** argv) { if (argc != 5) { - printf("This script converts the images dataset to the leveldb format used\n" + printf("This script converts the dataset to the leveldb format used\n" "by caffe to train a triplet network.\n" "Usage:\n" " convert_3d_data input_image_file input_label_file " diff --git a/src/caffe/layers/triplet_loss_layer.cu b/src/caffe/layers/triplet_loss_layer.cu index 4f7391af787..691b98055f8 100644 --- a/src/caffe/layers/triplet_loss_layer.cu +++ b/src/caffe/layers/triplet_loss_layer.cu @@ -12,104 +12,129 @@ template void TripletLossLayer::Forward_gpu( const vector*>& bottom, const vector*>& top) { - int count = bottom[0]->count(); - caffe_gpu_sub( - count, - bottom[0]->gpu_data(), // a - bottom[1]->gpu_data(), // b - diff_pos.mutable_gpu_data()); // a_i-b_i - caffe_gpu_sub( - count, - bottom[0]->gpu_data(), // a - bottom[2]->gpu_data(), // c - diff_neg.mutable_gpu_data()); // a_i-c_i - caffe_gpu_sub( - count, - bottom[3]->gpu_data(), // d - bottom[4]->gpu_data(), // e - diff_par.mutable_gpu_data()); // d_i-e_i - caffe_gpu_powx( - count, - diff_pos.mutable_gpu_data(), // a_i-b_i - Dtype(2), - diff_sq_pos.mutable_gpu_data()); // (a_i-b_i)^2 - caffe_gpu_powx( - count, - diff_neg.mutable_gpu_data(), // a_i-c_i - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (a_i-c_i)^2 - caffe_gpu_powx( - count, - diff_par.mutable_gpu_data(), // d_i-e_i - Dtype(2), - diff_sq_par.mutable_gpu_data()); // (d_i-e_i)^2 - caffe_gpu_gemv( - CblasNoTrans, - bottom[0]->num(), - bottom[0]->channels(), - Dtype(1.0), - diff_sq_pos.gpu_data(), // (a_i-b_i)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_pos.mutable_gpu_data()); // \Sum (a_i-b_i)^2 - caffe_gpu_gemv( - CblasNoTrans, - bottom[0]->num(), - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (a_i-c_i)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data()); // \Sum (a_i-c_i)^2 - caffe_gpu_gemv( - CblasNoTrans, - bottom[0]->num(), - bottom[0]->channels(), - Dtype(1.0), - diff_sq_par.gpu_data(), // (a_i-c_i)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_par.mutable_gpu_data()); // \Sum (a_i-c_i)^2 - const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.triplet_loss_param().margin(); Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); Dtype loss(0.0); - + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); if (losstype == 0) { - for (int i = 0; i < bottom[0]->num(); ++i) { - // Loss component calculated from ab - // ab is a similar pair - dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; - // Loss component calculated from ac - // ac is a dissimilar pair - dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; - loss += std::max(margin + dist_sq_.gpu_data()[i], Dtype(0.0)); - // Pair wise loss accumulation - // d e is a similar pair for pair wise + for (int i = 0; i < num_set; ++i) { + caffe_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close + caffe_gpu_powx( + dim, + diff_pos.mutable_gpu_data() + i*dim, // reference-pose_close + Dtype(2), + diff_sq_pos.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_pos.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_pos.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 + // a b is a similar pair for pair wise // loss accumulated by the pair wise part - loss += dist_sq_par.gpu_data()[i]; + loss += dist_sq_pos.gpu_data()[i]; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + i*dim); // reference-negative + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + i*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(margin + dist_sq_.gpu_data()[i], Dtype(0.0)); + } } - loss = loss / static_cast(bottom[0]->num()) / Dtype(2); + loss = loss / static_cast(num_set) / Dtype(2); top[0]->mutable_gpu_data()[0] = loss; } else { - for (int i = 0; i < bottom[0]->num(); ++i) { - // softTriplet loss accumulation - // Loss component calculated from a and b - // a b is a similar pair for triplet - dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; - dist_sq_.mutable_gpu_data()[i] += margin; - // Loss component calculated from a and c - // a c is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[i] = 1 - \ -dist_sq_neg.gpu_data()[i] / dist_sq_.mutable_gpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(dist_sq_.gpu_data()[i], Dtype(0.0)); - // Pair wise loss accumulation - // d e is a similar pair for pair wise + for (int i = 0; i < num_set; ++i) { + caffe_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close + // Loss component calculated from reference and close one + caffe_gpu_powx( + dim, + diff_pos.mutable_gpu_data() + i*dim, // reference-pose_close + Dtype(2), + diff_sq_pos.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_pos.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_pos.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 + // a b is a similar pair for pair wise // loss accumulated by the pair wise part - loss += dist_sq_par.gpu_data()[i]; + loss += dist_sq_pos.gpu_data()[i]; + dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.mutable_gpu_data()[i]; + dist_sq_.mutable_gpu_data()[i] += margin; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + i*dim); // reference-negative + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + i*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[i] = 1 - \ + dist_sq_neg.gpu_data()[i] / dist_sq_.mutable_gpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(dist_sq_.gpu_data()[i], Dtype(0.0)); + } } - loss = loss / static_cast(bottom[0]->num()) / Dtype(2); + loss = loss / static_cast(num_set) / Dtype(2); top[0]->mutable_gpu_data()[0] = loss; } } @@ -119,165 +144,354 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.triplet_loss_param().margin(); Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); -// there must be further check to ensure the gradient calc + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); if (losstype == 0) { + // BP for feat1(extracted from reference) + for (int i = 0; i < 1; ++i) { if (propagate_down[0]) { const Dtype sign = 1; const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(bottom[0]->num()); - int num = bottom[0]->num(); - int channels = bottom[0]->channels(); - for (int j = 0; j < num; ++j) { + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { Dtype* bout = bottom[0]->mutable_gpu_diff(); - if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - // similar pairs - caffe_gpu_axpby( - channels, - alpha, - diff_pos.gpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - // dissimilar pairs - caffe_gpu_axpby( - channels, - -alpha, - diff_neg.gpu_data() + (j*channels), - Dtype(1.0), - bout + (j*channels)); - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); + // the pair part + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; + // Loss component calculated from negative part + if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(1.0), + bout + (2 + num_triplets)*j*dim); + // dissimilar pair in triplet + caffe_gpu_axpby( + dim, + -alpha, + diff_neg.gpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } } } } - for (int i = 1; i < 3; ++i) { -// there must be further check to ensure the gradient calc - if (propagate_down[i]) { - const Dtype sign = (i == 1) ? -1 : 1; + } + // BP for feat2(extracted from the closest sample) + for (int i = 1; i < 2; ++i) { + if (propagate_down[0]) { + const Dtype sign = -1; const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(bottom[i]->num()); - int num = bottom[i]->num(); - int channels = bottom[i]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[i]->mutable_gpu_diff(); + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + // the pair part + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; + if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for negative feature used in the num_triplets triplet part + for (int i = 2; i < 2 + num_triplets; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + caffe_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - if (i == 1) { - // similar pairs - caffe_gpu_axpby( - channels, - alpha, - diff_pos.gpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - } else { - // dissimilar pairs + // dissimilar pairs caffe_gpu_axpby( - channels, + dim, alpha, - diff_neg.gpu_data() + (j*channels), + diff_neg.gpu_data() + (j*dim), Dtype(0.0), - bout + (j*channels)); - } + bout + ((2 + num_triplets)*j + i)*dim); } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); } } } - } + } } else { + for (int i = 0; i < 1; ++i) { // BP for data1(feat1) if (propagate_down[0]) { - const Dtype alpha = top[0]->gpu_diff()[0] / - static_cast(bottom[0]->num()); - int num = bottom[0]->num(); - int channels = bottom[0]->channels(); - for (int j = 0; j < num; ++j) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { Dtype* bout = bottom[0]->mutable_gpu_diff(); - if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - caffe_gpu_axpby( - channels, - alpha*dist_sq_neg.mutable_gpu_data()[j]\ -/((dist_sq_pos.mutable_gpu_data()[j]+margin)\ -*(dist_sq_pos.mutable_gpu_data()[j]+margin)), - diff_pos.gpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - caffe_gpu_axpby( - channels, - -alpha*(dist_sq_pos.mutable_gpu_data()[j] + margin)\ -/((dist_sq_pos.mutable_gpu_data()[j] + margin)\ -*(dist_sq_pos.mutable_gpu_data()[j] + margin)), - diff_neg.gpu_data() + (j*channels), - Dtype(1.0), - bout + (j*channels)); - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); + // the pair part + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; + dist_sq_.mutable_gpu_data()[j] += margin; + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] = 1 - \ + dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + caffe_gpu_axpby( + dim, + alpha*dist_sq_neg.mutable_gpu_data()[j]\ + /((dist_sq_pos.mutable_gpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_gpu_data()[j]+margin)), + diff_pos.gpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + caffe_gpu_axpby( + dim, + -alpha/(dist_sq_pos.mutable_gpu_data()[j] + margin), + diff_neg.gpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } } } } + } + for (int i = 1; i < 2; ++i) { // BP for positive data(feat2) - if (propagate_down[1]) { - const Dtype alpha = top[0]->gpu_diff()[0] / - static_cast(bottom[1]->num()); - int num = bottom[1]->num(); - int channels = bottom[1]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[1]->mutable_gpu_diff(); - if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - caffe_gpu_axpby( - channels, - -alpha*dist_sq_neg.mutable_gpu_data()[j]\ -/((dist_sq_pos.mutable_gpu_data()[j] + margin)\ -*(dist_sq_pos.mutable_gpu_data()[j] + margin)), - diff_pos.gpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + // the pair part + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; + dist_sq_.mutable_gpu_data()[j] += margin; + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] = 1 - \ + dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + caffe_gpu_axpby( + dim, + alpha*dist_sq_neg.mutable_gpu_data()[j]\ + /((dist_sq_pos.mutable_gpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_gpu_data()[j]+margin)), + diff_pos.gpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } } } } + } + for (int i = 2; i < 2 + num_triplets; ++i) { // BP for negative data(feat3) - if (propagate_down[2]) { - const Dtype alpha = top[0]->gpu_diff()[0] / - static_cast(bottom[2]->num()); - int num = bottom[2]->num(); - int channels = bottom[2]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[2]->mutable_gpu_diff(); + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; + dist_sq_.mutable_gpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] = 1 - \ + dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; + // loss accumulated accumulated by the triplet part if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { caffe_gpu_axpby( - channels, + dim, alpha/(dist_sq_pos.mutable_gpu_data()[j] + margin), - diff_neg.gpu_data() + (j*channels), + diff_neg.gpu_data() + (j*dim), Dtype(0.0), - bout + (j*channels)); + bout + ((2 + num_triplets)*j + i)*dim); } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); } } } } - // pair wise back - for (int i = 3; i < 5; ++i) { - if (propagate_down[i]) { - const Dtype sign = (i == 3) ? 1 : -1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(bottom[i]->num()); - int num = bottom[i]->num(); - int channels = bottom[i]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[i]->mutable_gpu_diff(); // similar pairs - caffe_gpu_axpby( - channels, - alpha, - diff_par.gpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - } - } } } -INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer); +#ifdef CPU_ONLY +STUB_GPU(TripletLossLayer); +#endif + +INSTANTIATE_CLASS(TripletLossLayer); +REGISTER_LAYER_CLASS(TripletLoss); } // namespace caffe From 05549e4a9f9dff65f36f92562671a8bcfe8c501c Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 10 Aug 2015 19:54:16 +0800 Subject: [PATCH 39/82] add initiate class name of triplet loss layer --- src/caffe/layers/triplet_loss_layer.cu | 247 ++++++++++++------------- 1 file changed, 121 insertions(+), 126 deletions(-) diff --git a/src/caffe/layers/triplet_loss_layer.cu b/src/caffe/layers/triplet_loss_layer.cu index 691b98055f8..4e2b9a444be 100644 --- a/src/caffe/layers/triplet_loss_layer.cu +++ b/src/caffe/layers/triplet_loss_layer.cu @@ -54,21 +54,21 @@ void TripletLossLayer::Forward_gpu( bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + i*dim); // reference-negative - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + i*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + i*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; // loss accumulated accumulated by the triplet part @@ -112,21 +112,21 @@ void TripletLossLayer::Forward_gpu( bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + i*dim); // reference-negative - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + i*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + i*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] = 1 - \ dist_sq_neg.gpu_data()[i] / dist_sq_.mutable_gpu_data()[i]; @@ -170,21 +170,21 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; @@ -236,21 +236,21 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -282,21 +282,21 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -339,21 +339,21 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -403,21 +403,21 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -452,21 +452,21 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -487,11 +487,6 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, } } -#ifdef CPU_ONLY -STUB_GPU(TripletLossLayer); -#endif - -INSTANTIATE_CLASS(TripletLossLayer); -REGISTER_LAYER_CLASS(TripletLoss); +INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer); } // namespace caffe From 7c13cdac04cf05c510ddc57c15a69c9f1a0a20b2 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 10 Aug 2015 20:05:44 +0800 Subject: [PATCH 40/82] add initiate class name of triplet loss layer --- src/caffe/layers/triplet_loss_layer.cu | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/caffe/layers/triplet_loss_layer.cu b/src/caffe/layers/triplet_loss_layer.cu index 4e2b9a444be..09243ee1dc4 100644 --- a/src/caffe/layers/triplet_loss_layer.cu +++ b/src/caffe/layers/triplet_loss_layer.cu @@ -68,7 +68,7 @@ void TripletLossLayer::Forward_gpu( diff_sq_neg.gpu_data(), // (reference-pose_close)^2 summer_vec_.gpu_data(), Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 + dist_sq_neg.mutable_gpu_data() + i); // \Sum(reference-pose_close)^2 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; // loss accumulated accumulated by the triplet part @@ -126,7 +126,7 @@ void TripletLossLayer::Forward_gpu( diff_sq_neg.gpu_data(), // (reference-pose_close)^2 summer_vec_.gpu_data(), Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 + dist_sq_neg.mutable_gpu_data() + i); // \Sum(reference-pose_close)^2 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] = 1 - \ dist_sq_neg.gpu_data()[i] / dist_sq_.mutable_gpu_data()[i]; @@ -184,7 +184,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, diff_sq_neg.gpu_data(), // (reference-pose_close)^2 summer_vec_.gpu_data(), Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 + dist_sq_neg.mutable_gpu_data() + j); // \Sum(ref-close)^2 // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; @@ -250,7 +250,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, diff_sq_neg.gpu_data(), // (reference-pose_close)^2 summer_vec_.gpu_data(), Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 + dist_sq_neg.mutable_gpu_data() + j); // \Sum (ref-close)^2 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -296,7 +296,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, diff_sq_neg.gpu_data(), // (reference-pose_close)^2 summer_vec_.gpu_data(), Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 + dist_sq_neg.mutable_gpu_data() + j); // \Sum (ref-close)^2 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -353,7 +353,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, diff_sq_neg.gpu_data(), // (reference-pose_close)^2 summer_vec_.gpu_data(), Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 + dist_sq_neg.mutable_gpu_data() + j); // \Sum (ref-close)^2 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -417,7 +417,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, diff_sq_neg.gpu_data(), // (reference-pose_close)^2 summer_vec_.gpu_data(), Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 + dist_sq_neg.mutable_gpu_data() + j); // \Sum (ref-close)^2 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -466,7 +466,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, diff_sq_neg.gpu_data(), // (reference-pose_close)^2 summer_vec_.gpu_data(), Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 + dist_sq_neg.mutable_gpu_data() + j); // \Sum (ref-close)^2 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; From f6d25b586a4fbfb5ac20c8b81d5e5092060f97b6 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Wed, 12 Aug 2015 12:50:56 +0800 Subject: [PATCH 41/82] debug GPU triplet loss codes for loss type 0 --- src/caffe/layers/triplet_loss_layer.cu | 200 +++++++------------------ 1 file changed, 50 insertions(+), 150 deletions(-) diff --git a/src/caffe/layers/triplet_loss_layer.cu b/src/caffe/layers/triplet_loss_layer.cu index 09243ee1dc4..87c14a83df0 100644 --- a/src/caffe/layers/triplet_loss_layer.cu +++ b/src/caffe/layers/triplet_loss_layer.cu @@ -21,26 +21,16 @@ void TripletLossLayer::Forward_gpu( int num_set = bottom[0]->num()/(2 + num_triplets); if (losstype == 0) { for (int i = 0; i < num_set; ++i) { - caffe_sub( + caffe_gpu_sub( dim, bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close - caffe_gpu_powx( + caffe_gpu_dot( dim, - diff_pos.mutable_gpu_data() + i*dim, // reference-pose_close - Dtype(2), - diff_sq_pos.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_pos.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_pos.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 + diff_pos.gpu_data() + i*dim, + diff_pos.gpu_data() + i*dim, + dist_sq_pos.mutable_gpu_data() + i); // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.gpu_data()[i]; @@ -49,26 +39,16 @@ void TripletLossLayer::Forward_gpu( // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; // Loss component calculated from negative part - caffe_sub( + caffe_gpu_sub( dim, bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + i*dim); // reference-negative - caffe_gpu_powx( + caffe_gpu_dot( dim, - diff_neg.mutable_gpu_data() + i*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + i); // \Sum(reference-pose_close)^2 + diff_neg.gpu_data() + i*dim, + diff_neg.gpu_data() + i*dim, + dist_sq_neg.mutable_gpu_data() + i); // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; // loss accumulated accumulated by the triplet part @@ -79,27 +59,17 @@ void TripletLossLayer::Forward_gpu( top[0]->mutable_gpu_data()[0] = loss; } else { for (int i = 0; i < num_set; ++i) { - caffe_sub( + caffe_gpu_sub( dim, bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close // Loss component calculated from reference and close one - caffe_gpu_powx( + caffe_gpu_dot( dim, - diff_pos.mutable_gpu_data() + i*dim, // reference-pose_close - Dtype(2), - diff_sq_pos.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_pos.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_pos.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 + diff_pos.gpu_data() + i*dim, + diff_pos.gpu_data() + i*dim, + dist_sq_pos.mutable_gpu_data() + i); // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.gpu_data()[i]; @@ -107,26 +77,16 @@ void TripletLossLayer::Forward_gpu( dist_sq_.mutable_gpu_data()[i] += margin; for (int triplet = 0; triplet < num_triplets; ++triplet) { // Loss component calculated from negative part - caffe_sub( + caffe_gpu_sub( dim, bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + i*dim); // reference-negative - caffe_gpu_powx( + caffe_gpu_dot( dim, - diff_neg.mutable_gpu_data() + i*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + i); // \Sum(reference-pose_close)^2 + diff_neg.gpu_data() + i*dim, + diff_neg.gpu_data() + i*dim, + dist_sq_neg.mutable_gpu_data() + i); // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] = 1 - \ dist_sq_neg.gpu_data()[i] / dist_sq_.mutable_gpu_data()[i]; @@ -165,26 +125,16 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bout + ((2 + num_triplets)*j + i)*dim); // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_sub( + caffe_gpu_sub( dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative - caffe_gpu_powx( + caffe_gpu_dot( dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum(ref-close)^2 + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, + dist_sq_neg.mutable_gpu_data() + j); // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; @@ -228,7 +178,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bout + ((2 + num_triplets)*j + i)*dim); // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_sub( + caffe_gpu_sub( dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, @@ -236,21 +186,11 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; - caffe_gpu_powx( + caffe_gpu_dot( dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (ref-close)^2 + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, + dist_sq_neg.mutable_gpu_data() + j); // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -274,7 +214,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, static_cast(num_set); for (int j = 0; j < num_set; ++j) { Dtype* bout = bottom[0]->mutable_gpu_diff(); - caffe_sub( + caffe_gpu_sub( dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, @@ -282,21 +222,11 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; - caffe_gpu_powx( + caffe_gpu_dot( dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (ref-close)^2 + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, + dist_sq_neg.mutable_gpu_data() + j); // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -334,26 +264,16 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { // Loss component calculated from negative part - caffe_sub( + caffe_gpu_sub( dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative - caffe_gpu_powx( + caffe_gpu_dot( dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (ref-close)^2 + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, + dist_sq_neg.mutable_gpu_data() + j); // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -398,26 +318,16 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { // Loss component calculated from negative part - caffe_sub( + caffe_gpu_sub( dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative - caffe_gpu_powx( + caffe_gpu_dot( dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (ref-close)^2 + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, + dist_sq_neg.mutable_gpu_data() + j); // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -447,26 +357,16 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; dist_sq_.mutable_gpu_data()[j] += margin; // Loss component calculated from negative part - caffe_sub( + caffe_gpu_sub( dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative - caffe_gpu_powx( + caffe_gpu_dot( dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (ref-close)^2 + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, + dist_sq_neg.mutable_gpu_data() + j); // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; From 2799398dac8ca721d3b129d3eba78c393d9e4baa Mon Sep 17 00:00:00 2001 From: Wangyida Date: Thu, 13 Aug 2015 17:52:23 +0800 Subject: [PATCH 42/82] closest sample fix --- examples/triplet/convert_3d_triplet_data.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp index f610f9c4de6..fd63634907e 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp +++ b/examples/triplet/convert_3d_triplet_data.cpp @@ -132,7 +132,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, float dist_ij = std::sqrt(ij_x + ij_y + ij_z); float dist_im = std::sqrt(im_x + im_y + im_z); - if (*label_i == *label_j && dist_ij < 100/2) + if (*label_i == *label_j && dist_ij < 100/2 && dist_ij != 0) pair_pass = true; if (pair_pass && (*label_i != *label_k)) triplet1_pass = true; From 0ace83d618ce019b7bbc6d27746f1b20c3b663c4 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Sat, 15 Aug 2015 20:05:02 +0800 Subject: [PATCH 43/82] bug fixes for the newest triplet loss, now it could be used by setting in prototxt --- examples/triplet/3d_triplet_train_test.prototxt | 4 ++-- src/caffe/layers/triplet_loss_layer.cpp | 12 ++++++------ src/caffe/layers/triplet_loss_layer.cu | 12 ++++++------ 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/examples/triplet/3d_triplet_train_test.prototxt b/examples/triplet/3d_triplet_train_test.prototxt index 1ac185aa2cc..4c766ebe0e4 100644 --- a/examples/triplet/3d_triplet_train_test.prototxt +++ b/examples/triplet/3d_triplet_train_test.prototxt @@ -174,8 +174,8 @@ layer { bottom: "sim" top: "loss" triplet_loss_param { - margin: 1 - losstype: 0 + margin: 0.02 + losstype: 1 num_triplets: 3 } } diff --git a/src/caffe/layers/triplet_loss_layer.cpp b/src/caffe/layers/triplet_loss_layer.cpp index 613fa703676..7d11ff59ce4 100644 --- a/src/caffe/layers/triplet_loss_layer.cpp +++ b/src/caffe/layers/triplet_loss_layer.cpp @@ -94,9 +94,9 @@ void TripletLossLayer::Forward_cpu( // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] += margin; for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += margin; // Loss component calculated from negative part caffe_sub( dim, @@ -268,10 +268,10 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, diff_pos.cpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; // Loss component calculated from negative part caffe_sub( dim, @@ -319,10 +319,10 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, diff_pos.cpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; // Loss component calculated from negative part caffe_sub( dim, diff --git a/src/caffe/layers/triplet_loss_layer.cu b/src/caffe/layers/triplet_loss_layer.cu index 87c14a83df0..fa526205e27 100644 --- a/src/caffe/layers/triplet_loss_layer.cu +++ b/src/caffe/layers/triplet_loss_layer.cu @@ -73,9 +73,9 @@ void TripletLossLayer::Forward_gpu( // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.gpu_data()[i]; - dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.mutable_gpu_data()[i]; - dist_sq_.mutable_gpu_data()[i] += margin; for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.mutable_gpu_data()[i]; + dist_sq_.mutable_gpu_data()[i] += margin; // Loss component calculated from negative part caffe_gpu_sub( dim, @@ -259,10 +259,10 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, diff_pos.gpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; - dist_sq_.mutable_gpu_data()[j] += margin; // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; + dist_sq_.mutable_gpu_data()[j] += margin; // Loss component calculated from negative part caffe_gpu_sub( dim, @@ -313,10 +313,10 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, diff_pos.gpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; - dist_sq_.mutable_gpu_data()[j] += margin; // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; + dist_sq_.mutable_gpu_data()[j] += margin; // Loss component calculated from negative part caffe_gpu_sub( dim, From 0f4cead46061714e17834b5cb94917769a9cae69 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Sun, 16 Aug 2015 21:43:45 +0800 Subject: [PATCH 44/82] add RGB data training as an option in triplet training --- examples/triplet/convert_3d_triplet_data.cpp | 60 +++++++++++++------- examples/triplet/create_3d_triplet.sh | 6 +- 2 files changed, 43 insertions(+), 23 deletions(-) diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp index fd63634907e..80a8906288d 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp +++ b/examples/triplet/convert_3d_triplet_data.cpp @@ -17,17 +17,30 @@ uint32_t swap_endian(uint32_t val) { void read_image(std::ifstream* image_file, std::ifstream* label_file, uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label_temp, signed char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index * 4 + 8); - label_file->read(label_temp, 4); - for (int i = 0; i < 4; i++) - *(label+i) = (signed char)*(label_temp+i); + char* pixels, char* label_temp, signed char* label, int rgb_use) { + if (rgb_use == 0) + { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); + } + else + { + image_file->seekg(3 * index * rows * cols + 16); + image_file->read(pixels, 3 * rows * cols); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); + } } void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename, const char* class_number) { + const char* db_filename, const char* class_number, const char* rgb_use) { + int rgb_use1 = atoi(rgb_use); int class_num = atoi(class_number); // Open files std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); @@ -73,11 +86,16 @@ void convert_dataset(const char* image_filename, const char* label_filename, signed char* label_k = new signed char[4]; signed char* label_l = new signed char[4]; // label for pair wise signed char* label_m = new signed char[4]; - char* pixels1 = new char[rows * cols]; - char* pixels2 = new char[rows * cols]; - char* pixels3 = new char[rows * cols]; - char* pixels4 = new char[rows * cols]; - char* pixels5 = new char[rows * cols]; + int db_size; + if (rgb_use1 == 0) + db_size = rows * cols; + else + db_size = 3 * rows * cols; + char* pixels1 = new char[db_size]; + char* pixels2 = new char[db_size]; + char* pixels3 = new char[db_size]; + char* pixels4 = new char[db_size]; + char* pixels5 = new char[db_size]; const int kMaxKeyLength = 10; char key[kMaxKeyLength]; std::string value; @@ -100,15 +118,15 @@ void convert_dataset(const char* image_filename, const char* label_filename, int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups int m = caffe::caffe_rng_rand() % num_items; read_image(&image_file, &label_file, i, rows, cols, // read triplet - pixels1, label_temp, label_i); + pixels1, label_temp, label_i, rgb_use1); read_image(&image_file, &label_file, j, rows, cols, - pixels2, label_temp, label_j); + pixels2, label_temp, label_j, rgb_use1); read_image(&image_file, &label_file, k, rows, cols, - pixels3, label_temp, label_k); + pixels3, label_temp, label_k, rgb_use1); read_image(&image_file, &label_file, l, rows, cols, // read pair wise - pixels4, label_temp, label_l); + pixels4, label_temp, label_l, rgb_use1); read_image(&image_file, &label_file, m, rows, cols, - pixels5, label_temp, label_m); + pixels5, label_temp, label_m, rgb_use1); bool pair_pass = false; bool triplet1_pass = false; @@ -188,15 +206,15 @@ void convert_dataset(const char* image_filename, const char* label_filename, } int main(int argc, char** argv) { - if (argc != 5) { + if (argc != 6) { printf("This script converts the dataset to the leveldb format used\n" "by caffe to train a triplet network.\n" "Usage:\n" " convert_3d_data input_image_file input_label_file " - "output_db_file class_number\n"); + "output_db_file class_number rgb_use \n"); } else { google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3], argv[4]); + convert_dataset(argv[1], argv[2], argv[3], argv[4], argv[5]); } return 0; } diff --git a/examples/triplet/create_3d_triplet.sh b/examples/triplet/create_3d_triplet.sh index f29adbf6a8f..7f1c9798772 100755 --- a/examples/triplet/create_3d_triplet.sh +++ b/examples/triplet/create_3d_triplet.sh @@ -13,11 +13,13 @@ $EXAMPLES/convert_3d_triplet_data.bin \ $DATA/binary_image_train \ $DATA/binary_label_train \ ./examples/triplet/3d_triplet_train_leveldb \ - 4 + 4 \ + 0 $EXAMPLES/convert_3d_triplet_data.bin \ $DATA/binary_image_test \ $DATA/binary_label_test \ ./examples/triplet/3d_triplet_test_leveldb \ - 4 + 4 \ + 0 echo "Done." From 053c7849ba583dee3568ed4c96981696dfb5132a Mon Sep 17 00:00:00 2001 From: Wangyida Date: Tue, 18 Aug 2015 11:25:11 +0800 Subject: [PATCH 45/82] format fix --- examples/triplet/convert_3d_triplet_data.cpp | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp index 80a8906288d..7dd2219d8af 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp +++ b/examples/triplet/convert_3d_triplet_data.cpp @@ -18,17 +18,14 @@ uint32_t swap_endian(uint32_t val) { void read_image(std::ifstream* image_file, std::ifstream* label_file, uint32_t index, uint32_t rows, uint32_t cols, char* pixels, char* label_temp, signed char* label, int rgb_use) { - if (rgb_use == 0) - { + if (rgb_use == 0) { image_file->seekg(index * rows * cols + 16); image_file->read(pixels, rows * cols); label_file->seekg(index * 4 + 8); label_file->read(label_temp, 4); for (int i = 0; i < 4; i++) *(label+i) = (signed char)*(label_temp+i); - } - else - { + } else { image_file->seekg(3 * index * rows * cols + 16); image_file->read(pixels, 3 * rows * cols); label_file->seekg(index * 4 + 8); @@ -39,7 +36,8 @@ void read_image(std::ifstream* image_file, std::ifstream* label_file, } void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename, const char* class_number, const char* rgb_use) { + const char* db_filename, const char* class_number, + const char* rgb_use) { int rgb_use1 = atoi(rgb_use); int class_num = atoi(class_number); // Open files From 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Sat, 12 Sep 2015 16:32:37 +0800 Subject: [PATCH 46/82] standard master --- examples/triplet/3d_triplet.prototxt | 110 --------- examples/triplet/3d_triplet_solver.prototxt | 25 -- .../triplet/3d_triplet_train_test.prototxt | 181 --------------- examples/triplet/convert_3d_triplet_data.cpp | 218 ------------------ examples/triplet/create_3d_triplet.sh | 25 -- examples/triplet/readme.md | 186 --------------- examples/triplet/train_3d_triplet.sh | 5 - src/caffe/layers/triplet_loss_layer.cu | 24 +- src/caffe/proto/caffe.proto | 2 +- 9 files changed, 13 insertions(+), 763 deletions(-) delete mode 100644 examples/triplet/3d_triplet.prototxt delete mode 100644 examples/triplet/3d_triplet_solver.prototxt delete mode 100644 examples/triplet/3d_triplet_train_test.prototxt delete mode 100644 examples/triplet/convert_3d_triplet_data.cpp delete mode 100755 examples/triplet/create_3d_triplet.sh delete mode 100644 examples/triplet/readme.md delete mode 100755 examples/triplet/train_3d_triplet.sh diff --git a/examples/triplet/3d_triplet.prototxt b/examples/triplet/3d_triplet.prototxt deleted file mode 100644 index 2732b9a0e82..00000000000 --- a/examples/triplet/3d_triplet.prototxt +++ /dev/null @@ -1,110 +0,0 @@ -name: "3d_triplet" -input: "data" -input_dim: 2760 -input_dim: 1 -input_dim: 64 -input_dim: 64 -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 16 - kernel_size: 8 - stride: 1 - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "pool1" - top: "pool1" -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 7 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu2" - type: "ReLU" - bottom: "pool2" - top: "pool2" -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 256 - } -} -layer { - name: "relu3" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip1" - top: "feat" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 4 - } -} diff --git a/examples/triplet/3d_triplet_solver.prototxt b/examples/triplet/3d_triplet_solver.prototxt deleted file mode 100644 index 5008af22df9..00000000000 --- a/examples/triplet/3d_triplet_solver.prototxt +++ /dev/null @@ -1,25 +0,0 @@ -# The train/test net protocol buffer definition -net: "examples/triplet/3d_triplet_train_test.prototxt" -# test_iter specifies how many forward passes the test should carry out. -# In the case of 3d database, we have test batch size 23 and 23 test iterations, -# covering the full 2760 testing images. -test_iter: 100 -# Carry out testing every 500 training iterations. -test_interval: 100 -# The base learning rate, momentum and the weight decay of the network. -base_lr: 0.001 -momentum: 0.9 -weight_decay: 0.0000 -# The learning rate policy -lr_policy: "inv" -gamma: 0.0001 -power: 0.75 -# Display every 100 iterations -display: 100 -# The maximum number of iterations -max_iter: 20000 -# snapshot intermediate results -snapshot: 2000 -snapshot_prefix: "examples/triplet/3d_triplet" -# solver mode: CPU or GPU -solver_mode: CPU diff --git a/examples/triplet/3d_triplet_train_test.prototxt b/examples/triplet/3d_triplet_train_test.prototxt deleted file mode 100644 index 4c766ebe0e4..00000000000 --- a/examples/triplet/3d_triplet_train_test.prototxt +++ /dev/null @@ -1,181 +0,0 @@ -name: "3d_triplet_train_test" -layer { - name: "data" - type: "Data" - top: "data" - top: "sim" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/3d_triplet_train_leveldb" - batch_size: 250 - } -} -layer { - name: "data" - type: "Data" - top: "data" - top: "sim" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/3d_triplet_test_leveldb" - batch_size: 250 - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 16 - kernel_size: 8 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "pool1" - top: "pool1" -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 7 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu2" - type: "ReLU" - bottom: "pool2" - top: "pool2" -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 256 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu3" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip1" - top: "feat" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 4 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "loss" - type: "TripletLoss" - bottom: "feat" - bottom: "sim" - top: "loss" - triplet_loss_param { - margin: 0.02 - losstype: 1 - num_triplets: 3 - } -} diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp deleted file mode 100644 index 7dd2219d8af..00000000000 --- a/examples/triplet/convert_3d_triplet_data.cpp +++ /dev/null @@ -1,218 +0,0 @@ -// Usage: -// convert_3d_data input_image_file input_label_file output_db_file -#include // NOLINT(readability/streams) -#include -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "math.h" -#include "stdint.h" - -uint32_t swap_endian(uint32_t val) { - val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); - return (val << 16) | (val >> 16); -} - -void read_image(std::ifstream* image_file, std::ifstream* label_file, - uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label_temp, signed char* label, int rgb_use) { - if (rgb_use == 0) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index * 4 + 8); - label_file->read(label_temp, 4); - for (int i = 0; i < 4; i++) - *(label+i) = (signed char)*(label_temp+i); - } else { - image_file->seekg(3 * index * rows * cols + 16); - image_file->read(pixels, 3 * rows * cols); - label_file->seekg(index * 4 + 8); - label_file->read(label_temp, 4); - for (int i = 0; i < 4; i++) - *(label+i) = (signed char)*(label_temp+i); - } -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename, const char* class_number, - const char* rgb_use) { - int rgb_use1 = atoi(rgb_use); - int class_num = atoi(class_number); - // Open files - std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); - std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); - CHECK(image_file) << "Unable to open file " << image_filename; - CHECK(label_file) << "Unable to open file " << label_filename; - // Read the magic and the meta data - uint32_t magic; - uint32_t num_items; - uint32_t num_labels; - uint32_t rows; - uint32_t cols; - - image_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2051) << "Incorrect image file magic."; - label_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2050) << "Incorrect label file magic."; - image_file.read(reinterpret_cast(&num_items), 4); - num_items = swap_endian(num_items); - label_file.read(reinterpret_cast(&num_labels), 4); - num_labels = swap_endian(num_labels); - CHECK_EQ(num_items, num_labels); - image_file.read(reinterpret_cast(&rows), 4); - rows = swap_endian(rows); - image_file.read(reinterpret_cast(&cols), 4); - cols = swap_endian(cols); - - // Open leveldb - leveldb::DB* db; - leveldb::Options options; - options.create_if_missing = true; - options.error_if_exists = true; - leveldb::Status status = leveldb::DB::Open( - options, db_filename, &db); - CHECK(status.ok()) << "Failed to open leveldb " << db_filename - << ". Is it already existing?"; - - char* label_temp = new char[4]; // label for unsigned char* - signed char* label_i = new signed char[4]; // label for triplet - signed char* label_j = new signed char[4]; - signed char* label_k = new signed char[4]; - signed char* label_l = new signed char[4]; // label for pair wise - signed char* label_m = new signed char[4]; - int db_size; - if (rgb_use1 == 0) - db_size = rows * cols; - else - db_size = 3 * rows * cols; - char* pixels1 = new char[db_size]; - char* pixels2 = new char[db_size]; - char* pixels3 = new char[db_size]; - char* pixels4 = new char[db_size]; - char* pixels5 = new char[db_size]; - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - caffe::Datum datum; - datum.set_channels(1); - datum.set_height(rows); - datum.set_width(cols); - LOG(INFO) << "A total of " << num_items << " items."; - LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - int counter = 0; - for (unsigned int times = 0; times < 5; ++times) { - // iteration in the samples of all class - for (unsigned int itemid = 0; itemid < num_items/class_num; ++itemid) { - // iteration in the samples in one class - for (unsigned int class_ind = 0; class_ind < class_num; ++class_ind) { - // use reference sample one by one at each iteration - int i = itemid % num_items + class_ind*num_items/class_num; - int j = caffe::caffe_rng_rand() % num_items; // pick triplet groups - int k = caffe::caffe_rng_rand() % num_items; - int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups - int m = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, // read triplet - pixels1, label_temp, label_i, rgb_use1); - read_image(&image_file, &label_file, j, rows, cols, - pixels2, label_temp, label_j, rgb_use1); - read_image(&image_file, &label_file, k, rows, cols, - pixels3, label_temp, label_k, rgb_use1); - read_image(&image_file, &label_file, l, rows, cols, // read pair wise - pixels4, label_temp, label_l, rgb_use1); - read_image(&image_file, &label_file, m, rows, cols, - pixels5, label_temp, label_m, rgb_use1); - - bool pair_pass = false; - bool triplet1_pass = false; - bool triplet2_pass = false; - bool triplet3_class_same = false; - bool triplet3_pass = false; - - int ij_diff_x = static_cast(*(label_i+1)-*(label_j+1)); - int ij_diff_y = static_cast(*(label_i+2)-*(label_j+2)); - int ij_diff_z = static_cast(*(label_i+3)-*(label_j+3)); - int im_diff_x = static_cast(*(label_i+1)-*(label_m+1)); - int im_diff_y = static_cast(*(label_i+2)-*(label_m+2)); - int im_diff_z = static_cast(*(label_i+3)-*(label_m+3)); - - int ij_x = ij_diff_x*ij_diff_x; - int ij_y = ij_diff_y*ij_diff_y; - int ij_z = ij_diff_z*ij_diff_z; - int im_x = im_diff_x*im_diff_x; - int im_y = im_diff_y*im_diff_y; - int im_z = im_diff_z*im_diff_z; - - float dist_ij = std::sqrt(ij_x + ij_y + ij_z); - float dist_im = std::sqrt(im_x + im_y + im_z); - if (*label_i == *label_j && dist_ij < 100/2 && dist_ij != 0) - pair_pass = true; - if (pair_pass && (*label_i != *label_k)) - triplet1_pass = true; - if (pair_pass && (*label_i != *label_l)) - triplet2_pass = true; - if (pair_pass && (*label_i == *label_m)) - triplet3_class_same = true; - if (triplet3_class_same && dist_im > 100*sqrt(2)) - triplet3_pass = true; - if (pair_pass && triplet1_pass && triplet2_pass && triplet3_pass) { - datum.set_data(pixels1, rows*cols); // set data - datum.set_label(static_cast(*label_i)); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - datum.set_data(pixels2, rows*cols); // set data - datum.set_label(static_cast(*label_j)); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - datum.set_data(pixels3, rows*cols); // set data - datum.set_label(static_cast(*label_k)); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - datum.set_data(pixels4, rows*cols); // set data - datum.set_label(static_cast(*label_l)); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - datum.set_data(pixels5, rows*cols); // set data - datum.set_label(static_cast(*label_m)); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - } else { - class_ind--; - } - } // iteration in the samples of all class - } // iteration in the samples in one class - } // iteration in times - delete db; - delete pixels1; - delete pixels2; - delete pixels3; - delete pixels4; - delete pixels5; -} - -int main(int argc, char** argv) { - if (argc != 6) { - printf("This script converts the dataset to the leveldb format used\n" - "by caffe to train a triplet network.\n" - "Usage:\n" - " convert_3d_data input_image_file input_label_file " - "output_db_file class_number rgb_use \n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3], argv[4], argv[5]); - } - return 0; -} diff --git a/examples/triplet/create_3d_triplet.sh b/examples/triplet/create_3d_triplet.sh deleted file mode 100755 index 7f1c9798772..00000000000 --- a/examples/triplet/create_3d_triplet.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env sh -# This script converts the mnist data into leveldb format. - -EXAMPLES=./build/examples/triplet -DATA=./data/linemod - -echo "Creating leveldb..." - -rm -rf ./examples/triplet/3d_triplet_train_leveldb -rm -rf ./examples/triplet/3d_triplet_test_leveldb - -$EXAMPLES/convert_3d_triplet_data.bin \ - $DATA/binary_image_train \ - $DATA/binary_label_train \ - ./examples/triplet/3d_triplet_train_leveldb \ - 4 \ - 0 -$EXAMPLES/convert_3d_triplet_data.bin \ - $DATA/binary_image_test \ - $DATA/binary_label_test \ - ./examples/triplet/3d_triplet_test_leveldb \ - 4 \ - 0 - -echo "Done." diff --git a/examples/triplet/readme.md b/examples/triplet/readme.md deleted file mode 100644 index 7e2310900b1..00000000000 --- a/examples/triplet/readme.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Triplet Network Tutorial -description: Train and test a triplet network on data generated by 3D model. -category: example -include_in_docs: true -layout: default -priority: 100 ---- - -# Triplet Network Training with Caffe -This example shows how you can use weight sharing and a contrastive loss -function to learn a model using a triplet network in Caffe. - -We will assume that you have caffe successfully compiled. If not, please refer -to the [Installation page](../../installation.html). This example builds on the -[MNIST tutorial](mnist.html) so it would be a good idea to read that before -continuing. - -*The guide specifies all paths and assumes all commands are executed from the -root caffe directory* - -## Prepare Datasets - -You will first need to convert the data from the some .ply models using -opencv_contrib cnn_3donj module. After construcing the binary files including images and labels and put them in ./data/linemod folder, just run: - - ./examples/triplet/create_3d_triplet.sh - -After running the script there should be two datasets, -`./examples/triplet/3d_triplet_train_leveldb`, and -`./examples/triplet/3d_triplet_test_leveldb`. - -## The Model -First, we will define the model that we want to train using the triplet network. -We will use the convolutional net defined in -`./examples/triplet/3d_triplet.prototxt`. - -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip1" - top: "feat" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 4 - } -} - -## Define the triplet Network - -In this section we will define the triplet network used for training. The -resulting network is defined in -`./examples/triplet/3d_triplet_train_test.prototxt`. - -### Reading in the Triplet Data - -We start with a data layer that reads from the LevelDB database we created -earlier. Each entry in this database contains the image data for a triplet of -images (`triplet_data`) and the label (`sim`) is not nessesary in our method. - - layers { - name: "triplet_data" - type: DATA - top: "triplet_data" - top: "sim" - data_param { - source: "examples/triplet/3d-triplet-train-leveldb" - scale: 0.00390625 - batch_size: 69 - } - include: { phase: TRAIN } - } - -In order to pack a triplet of images into the same blob in the database we pack one -image per channel. We want to be able to work with these three images separately, -so we add a slice layer after the data layer. This takes the `triplet_data` and -slices it along the channel dimension so that we have a single image in `data` -and its positive image in `data_pos.` & its negative image in `data_neg.`, as described in paper for 3D object classification and pose estimation, a pair wise term is also need alone with the triplet part. - -layer { - name: "slice_triplet" - type: "Slice" - bottom: "triplet_data" - top: "data" - top: "data_true" - top: "data_false" - top: "data_p1" - top: "data_p2" - slice_param { - slice_dim: 1 - slice_point: 1 - slice_point: 2 - slice_point: 3 - slice_point: 4 - } -} - -### Building the First part of the triplet Net - -Now we can specify the first side of the triplet net. This side operates on -`data` and produces `feat`. Starting from the net in -`./examples/triplet/3d_triplet.prototxt` we add default weight fillers. Then -we name the parameters of the convolutional and inner product layers. Naming the -parameters allows Caffe to share the parameters between layers on three channels of -the triplet net. In the definition this looks like: - - ... - param: "conv1_w" - param: "conv1_b" - ... - param: "conv2_w" - param: "conv2_b" - ... - param: "ip1_w" - param: "ip1_b" - ... - param: "ip2_w" - param: "ip2_b" - ... - -### Building the Second Side of the triplet Net - -Now we need to create the second path that operates on `data_pos` and produces -`feat_pos`. This path is exactly the same as the first. So we can just copy and -paste it. Then we change the name of each layer, input, and output by appending -`_pos` to differentiate the "paired" layers from the originals. - -### Building the Third Side of the triplet Net - -Now we need to create the second path that operates on `data_neg` and produces -`feat_neg`. This path is exactly the same as the first. So we can just copy and -paste it. Then we change the name of each layer, input, and output by appending -`_neg` to differentiate the "paired" layers from the originals. - -### Adding the Triplet Loss Function - -To train the network we will optimize a triplet loss function proposed in: -This cost function is implemented with the `TRIPLET_LOSS` layer: - -layer { - name: "loss" - type: "TripletLoss" - bottom: "feat" - bottom: "feat_true" - bottom: "feat_false" - bottom: "feat_p1" - bottom: "feat_p2" - bottom: "sim" - top: "loss" - triplet_loss_param { - margin: 1 - losstype: 1 - } -} - -## Define the Solver - -Nothing special needs to be done to the solver besides pointing it at the -correct model file. The solver is defined in -`./examples/triplet/3d_triplet_solver.prototxt`. - -## Training and Testing the Model - -Training the model is simple after you have written the network definition -protobuf and solver protobuf files. Simply run -`./examples/triplet/train_mnist_triplet.sh`: - - ./examples/triplet/train_3d_triplet.sh - -# Plotting the results - -First, we can draw the model and triplet networks by running the following -commands that draw the DAGs defined in the .prototxt files: - - ./python/draw_net.py \ - ./examples/triplet/3d_triplet.prototxt \ - ./examples/triplet/3d_triplet.png - - ./python/draw_net.py \ - ./examples/triplet/3d_triplet_train_test.prototxt \ - ./examples/triplet/3d_triplet_train_test.png \ No newline at end of file diff --git a/examples/triplet/train_3d_triplet.sh b/examples/triplet/train_3d_triplet.sh deleted file mode 100755 index e421af54493..00000000000 --- a/examples/triplet/train_3d_triplet.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env sh - -TOOLS=./build/tools - -$TOOLS/caffe train --solver=examples/triplet/3d_triplet_solver.prototxt diff --git a/src/caffe/layers/triplet_loss_layer.cu b/src/caffe/layers/triplet_loss_layer.cu index fa526205e27..cd1fbb1201b 100644 --- a/src/caffe/layers/triplet_loss_layer.cu +++ b/src/caffe/layers/triplet_loss_layer.cu @@ -30,7 +30,7 @@ void TripletLossLayer::Forward_gpu( dim, diff_pos.gpu_data() + i*dim, diff_pos.gpu_data() + i*dim, - dist_sq_pos.mutable_gpu_data() + i); + dist_sq_pos.mutable_cpu_data() + i); // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.gpu_data()[i]; @@ -48,7 +48,7 @@ void TripletLossLayer::Forward_gpu( dim, diff_neg.gpu_data() + i*dim, diff_neg.gpu_data() + i*dim, - dist_sq_neg.mutable_gpu_data() + i); + dist_sq_neg.mutable_cpu_data() + i); // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; // loss accumulated accumulated by the triplet part @@ -69,7 +69,7 @@ void TripletLossLayer::Forward_gpu( dim, diff_pos.gpu_data() + i*dim, diff_pos.gpu_data() + i*dim, - dist_sq_pos.mutable_gpu_data() + i); + dist_sq_pos.mutable_cpu_data() + i); // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.gpu_data()[i]; @@ -86,7 +86,7 @@ void TripletLossLayer::Forward_gpu( dim, diff_neg.gpu_data() + i*dim, diff_neg.gpu_data() + i*dim, - dist_sq_neg.mutable_gpu_data() + i); + dist_sq_neg.mutable_cpu_data() + i); // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] = 1 - \ dist_sq_neg.gpu_data()[i] / dist_sq_.mutable_gpu_data()[i]; @@ -134,7 +134,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, - dist_sq_neg.mutable_gpu_data() + j); + dist_sq_neg.mutable_cpu_data() + j); // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; @@ -190,7 +190,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, - dist_sq_neg.mutable_gpu_data() + j); + dist_sq_neg.mutable_cpu_data() + j); // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -226,7 +226,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, - dist_sq_neg.mutable_gpu_data() + j); + dist_sq_neg.mutable_cpu_data() + j); // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -238,7 +238,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); } else { - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); + caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); } } } @@ -273,7 +273,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, - dist_sq_neg.mutable_gpu_data() + j); + dist_sq_neg.mutable_cpu_data() + j); // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -327,7 +327,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, - dist_sq_neg.mutable_gpu_data() + j); + dist_sq_neg.mutable_cpu_data() + j); // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -366,7 +366,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, - dist_sq_neg.mutable_gpu_data() + j); + dist_sq_neg.mutable_cpu_data() + j); // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -379,7 +379,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); } else { - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); + caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); } } } diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 7a6079a37ad..331d31ce8f8 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -385,7 +385,7 @@ message LayerParameter { optional ThresholdParameter threshold_param = 128; optional TileParameter tile_param = 138; optional WindowDataParameter window_data_param = 129; - optional TripletLossParameter triplet_loss_param = 137; + optional TripletLossParameter triplet_loss_param = 139; } // Message that stores parameters used to apply transformation From eab220ebceacc8021fd297ebdad3a417c3c91307 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Thu, 17 Sep 2015 11:47:59 +0800 Subject: [PATCH 47/82] local master update --- docs/multigpu.md | 26 --- examples/triplet/3d_triplet.prototxt | 110 +++++++++ examples/triplet/3d_triplet_solver.prototxt | 25 ++ .../triplet/3d_triplet_train_test.prototxt | 181 ++++++++++++++ examples/triplet/convert_3d_triplet_data.cpp | 221 ++++++++++++++++++ examples/triplet/create_3d_triplet.sh | 24 ++ examples/triplet/readme.md | 186 +++++++++++++++ examples/triplet/train_3d_triplet.sh | 5 + include/caffe/common_layers.hpp | 4 +- include/caffe/solver.hpp | 2 - python/caffe/__init__.py | 2 +- python/caffe/_caffe.cpp | 9 - python/caffe/net_spec.py | 10 +- python/caffe/pycaffe.py | 3 +- python/caffe/test/test_net_spec.py | 3 +- src/caffe/layers/concat_layer.cpp | 6 - src/caffe/layers/concat_layer.cu | 2 - src/caffe/layers/slice_layer.cpp | 7 +- src/caffe/layers/slice_layer.cu | 3 +- src/caffe/net.cpp | 11 +- src/caffe/solver.cpp | 21 +- src/caffe/test/test_accuracy_layer.cpp | 12 +- src/caffe/test/test_concat_layer.cpp | 23 -- src/caffe/test/test_eltwise_layer.cpp | 4 +- src/caffe/test/test_slice_layer.cpp | 27 --- tools/extract_features.cpp | 2 +- 26 files changed, 778 insertions(+), 151 deletions(-) delete mode 100644 docs/multigpu.md create mode 100644 examples/triplet/3d_triplet.prototxt create mode 100644 examples/triplet/3d_triplet_solver.prototxt create mode 100644 examples/triplet/3d_triplet_train_test.prototxt create mode 100644 examples/triplet/convert_3d_triplet_data.cpp create mode 100755 examples/triplet/create_3d_triplet.sh create mode 100644 examples/triplet/readme.md create mode 100755 examples/triplet/train_3d_triplet.sh diff --git a/docs/multigpu.md b/docs/multigpu.md deleted file mode 100644 index 01cfb8938b5..00000000000 --- a/docs/multigpu.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Multi-GPU Usage, Hardware Configuration Assumptions, and Performance ---- - -# Multi-GPU Usage - -Currently Multi-GPU is only supported via the C/C++ paths and only for training. - -The GPUs to be used for training can be set with the "-gpu" flag on the command line to the 'caffe' tool. e.g. "build/tools/caffe train --solver=models/bvlc_alexnet/solver.prototxt --gpu=0,1" will train on GPUs 0 and 1. - -**NOTE**: each GPU runs the batchsize specified in your train_val.prototxt. So if you go from 1 GPU to 2 GPU, your effective batchsize will double. e.g. if your train_val.prototxt specified a batchsize of 256, if you run 2 GPUs your effective batch size is now 512. So you need to adjust the batchsize when running multiple GPUs and/or adjust your solver params, specifically learning rate. - -# Hardware Configuration Assumptions - -The current implementation uses a tree reduction strategy. e.g. if there are 4 GPUs in the system, 0:1, 2:3 will exchange gradients, then 0:2 (top of the tree) will exchange gradients, 0 will calculate -updated model, 0\-\>2, and then 0\-\>1, 2\-\>3. - -For best performance, P2P DMA access between devices is needed. Without P2P access, for example crossing PCIe root complex, data is copied through host and effective exchange bandwidth is greatly reduced. - -Current implementation has a "soft" assumption that the devices being used are homogeneous. In practice, any devices of the same general class should work together, but performance and total size is limited by the smallest device being used. e.g. if you combine a TitanX and a GTX980, peformance will be limited by the 980. Mixing vastly different levels of boards, e.g. Kepler and Fermi, is not supported. - -"nvidia-smi topo -m" will show you the connectivity matrix. You can do P2P through PCIe bridges, but not across socket level links at this time, e.g. across CPU sockets on a multi-socket motherboard. - -# Scaling Performance - -Performance is **heavily** dependent on the PCIe topology of the system, the configuration of the neural network you are training, and the speed of each of the layers. Systems like the DIGITS DevBox have an optimized PCIe topology (X99-E WS chipset). In general, scaling on 2 GPUs tends to be ~1.8X on average for networks like AlexNet, CaffeNet, VGG, GoogleNet. 4 GPUs begins to have falloff in scaling. Generally with "weak scaling" where the batchsize increases with the number of GPUs you will see 3.5x scaling or so. With "strong scaling", the system can become communication bound, especially with layer performance optimizations like those in [cuDNNv3](http://nvidia.com/cudnn), and you will likely see closer to mid 2.x scaling in performance. Networks that have heavy computation compared to the number of parameters tend to have the best scaling performance. \ No newline at end of file diff --git a/examples/triplet/3d_triplet.prototxt b/examples/triplet/3d_triplet.prototxt new file mode 100644 index 00000000000..076e0be5040 --- /dev/null +++ b/examples/triplet/3d_triplet.prototxt @@ -0,0 +1,110 @@ +name: "3d_triplet" +input: "data" +input_dim: 9720 +input_dim: 1 +input_dim: 64 +input_dim: 64 +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 16 + kernel_size: 8 + stride: 1 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "pool1" + top: "pool1" +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 7 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu2" + type: "ReLU" + bottom: "pool2" + top: "pool2" +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 256 + } +} +layer { + name: "relu3" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip1" + top: "feat" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 3 + } +} diff --git a/examples/triplet/3d_triplet_solver.prototxt b/examples/triplet/3d_triplet_solver.prototxt new file mode 100644 index 00000000000..eea97da7603 --- /dev/null +++ b/examples/triplet/3d_triplet_solver.prototxt @@ -0,0 +1,25 @@ +# The train/test net protocol buffer definition +net: "examples/triplet/3d_triplet_train_test.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of 3d database, we have test batch size 250 and 250 test iterations: 50*(2+3)=250, +# covering the full 9720 testing images:162*6*10=9720. +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 100 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.001 +momentum: 0.9 +weight_decay: 0.0000 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +max_iter: 80000 +# snapshot intermediate results +snapshot: 5000 +snapshot_prefix: "examples/triplet/3d_triplet" +# solver mode: CPU or GPU +solver_mode: CPU diff --git a/examples/triplet/3d_triplet_train_test.prototxt b/examples/triplet/3d_triplet_train_test.prototxt new file mode 100644 index 00000000000..60637b1a66b --- /dev/null +++ b/examples/triplet/3d_triplet_train_test.prototxt @@ -0,0 +1,181 @@ +name: "3d_triplet_train_test" +layer { + name: "data" + type: "Data" + top: "data" + top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/3d_triplet_train_leveldb" + batch_size: 250 + } +} +layer { + name: "data" + type: "Data" + top: "data" + top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/3d_triplet_test_leveldb" + batch_size: 250 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 16 + kernel_size: 8 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "pool1" + top: "pool1" +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 7 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu2" + type: "ReLU" + bottom: "pool2" + top: "pool2" +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 256 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu3" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip1" + top: "feat" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "loss" + type: "TripletLoss" + bottom: "feat" + bottom: "sim" + top: "loss" + triplet_loss_param { + margin: 1 + losstype: 0 + num_triplets: 3 + } +} diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp new file mode 100644 index 00000000000..943efd9965f --- /dev/null +++ b/examples/triplet/convert_3d_triplet_data.cpp @@ -0,0 +1,221 @@ +// Usage: +// convert_3d_data input_image_file input_label_file output_db_file +#include // NOLINT(readability/streams) +#include +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "math.h" +#include "stdint.h" + +uint32_t swap_endian(uint32_t val) { + val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); + return (val << 16) | (val >> 16); +} + +void read_image(std::ifstream* image_file, std::ifstream* label_file, + uint32_t index, uint32_t rows, uint32_t cols, + char* pixels, char* label_temp, signed char* label, int rgb_use) { + if (rgb_use == 0) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); + } else { + image_file->seekg(3 * index * rows * cols + 16); + image_file->read(pixels, 3 * rows * cols); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); + } +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename, + const char* class_number, const char* rgb_use) { + int rgb_use1 = atoi(rgb_use); + int class_num = atoi(class_number); + // Open files + std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); + std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); + CHECK(image_file) << "Unable to open file " << image_filename; + CHECK(label_file) << "Unable to open file " << label_filename; + // Read the magic and the meta data + uint32_t magic; + uint32_t num_items; + uint32_t num_labels; + uint32_t rows; + uint32_t cols; + + image_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + CHECK_EQ(magic, 2051) << "Incorrect image file magic."; + label_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + CHECK_EQ(magic, 2050) << "Incorrect label file magic."; + image_file.read(reinterpret_cast(&num_items), 4); + num_items = swap_endian(num_items); + label_file.read(reinterpret_cast(&num_labels), 4); + num_labels = swap_endian(num_labels); + CHECK_EQ(num_items, num_labels); + image_file.read(reinterpret_cast(&rows), 4); + rows = swap_endian(rows); + image_file.read(reinterpret_cast(&cols), 4); + cols = swap_endian(cols); + + // Open leveldb + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = true; + options.error_if_exists = true; + leveldb::Status status = leveldb::DB::Open( + options, db_filename, &db); + CHECK(status.ok()) << "Failed to open leveldb " << db_filename + << ". Is it already existing?"; + + char* label_temp = new char[4]; // label for unsigned char* + signed char* label_i = new signed char[4]; // label for triplet + signed char* label_j = new signed char[4]; + signed char* label_k = new signed char[4]; + signed char* label_l = new signed char[4]; // label for pair wise + signed char* label_m = new signed char[4]; + int db_size; + if (rgb_use1 == 0) + db_size = rows * cols; + else + db_size = 3 * rows * cols; + char* pixels1 = new char[db_size]; + char* pixels2 = new char[db_size]; + char* pixels3 = new char[db_size]; + char* pixels4 = new char[db_size]; + char* pixels5 = new char[db_size]; + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + caffe::Datum datum; + if (rgb_use1 == 0) + datum.set_channels(1); + else + datum.set_channels(3); + datum.set_height(rows); + datum.set_width(cols); + LOG(INFO) << "A total of " << num_items << " items."; + LOG(INFO) << "Rows: " << rows << " Cols: " << cols; + int counter = 0; + for (unsigned int times = 0; times < 10; ++times) { + // iteration in the samples of all class + for (unsigned int itemid = 0; itemid < num_items/class_num; ++itemid) { + // iteration in the samples in one class + for (unsigned int class_ind = 0; class_ind < class_num; ++class_ind) { + // use reference sample one by one at each iteration + int i = itemid % num_items + class_ind*num_items/class_num; + int j = caffe::caffe_rng_rand() % num_items; // pick triplet groups + int k = caffe::caffe_rng_rand() % num_items; + int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups + int m = caffe::caffe_rng_rand() % num_items; + read_image(&image_file, &label_file, i, rows, cols, // read triplet + pixels1, label_temp, label_i, rgb_use1); + read_image(&image_file, &label_file, j, rows, cols, + pixels2, label_temp, label_j, rgb_use1); + read_image(&image_file, &label_file, k, rows, cols, + pixels3, label_temp, label_k, rgb_use1); + read_image(&image_file, &label_file, l, rows, cols, // read pair wise + pixels4, label_temp, label_l, rgb_use1); + read_image(&image_file, &label_file, m, rows, cols, + pixels5, label_temp, label_m, rgb_use1); + + bool pair_pass = false; + bool triplet1_pass = false; + bool triplet2_pass = false; + bool triplet3_class_same = false; + bool triplet3_pass = false; + + int ij_diff_x = static_cast(*(label_i+1)-*(label_j+1)); + int ij_diff_y = static_cast(*(label_i+2)-*(label_j+2)); + int ij_diff_z = static_cast(*(label_i+3)-*(label_j+3)); + int im_diff_x = static_cast(*(label_i+1)-*(label_m+1)); + int im_diff_y = static_cast(*(label_i+2)-*(label_m+2)); + int im_diff_z = static_cast(*(label_i+3)-*(label_m+3)); + + int ij_x = ij_diff_x*ij_diff_x; + int ij_y = ij_diff_y*ij_diff_y; + int ij_z = ij_diff_z*ij_diff_z; + int im_x = im_diff_x*im_diff_x; + int im_y = im_diff_y*im_diff_y; + int im_z = im_diff_z*im_diff_z; + + float dist_ij = std::sqrt(ij_x + ij_y + ij_z); + float dist_im = std::sqrt(im_x + im_y + im_z); + if (*label_i == *label_j && dist_ij < 100/3 && dist_ij != 0) + pair_pass = true; + if (pair_pass && (*label_i != *label_k)) + triplet1_pass = true; + if (pair_pass && (*label_i != *label_l)) + triplet2_pass = true; + if (pair_pass && (*label_i == *label_m)) + triplet3_class_same = true; + if (triplet3_class_same && dist_im > 100/3) + triplet3_pass = true; + if (pair_pass && triplet1_pass && triplet2_pass && triplet3_pass) { + datum.set_data(pixels1, db_size); // set data + datum.set_label(static_cast(*label_i)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels2, db_size); // set data + datum.set_label(static_cast(*label_j)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels3, db_size); // set data + datum.set_label(static_cast(*label_k)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels4, db_size); // set data + datum.set_label(static_cast(*label_l)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels5, db_size); // set data + datum.set_label(static_cast(*label_m)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + } else { + class_ind--; + } + } // iteration in the samples of all class + } // iteration in the samples in one class + } // iteration in times + delete db; + delete pixels1; + delete pixels2; + delete pixels3; + delete pixels4; + delete pixels5; +} + +int main(int argc, char** argv) { + if (argc != 6) { + printf("This script converts the dataset to the leveldb format used\n" + "by caffe to train a triplet network.\n" + "Usage:\n" + " convert_3d_data input_image_file input_label_file " + "output_db_file class_number rgb_use \n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3], argv[4], argv[5]); + } + return 0; +} diff --git a/examples/triplet/create_3d_triplet.sh b/examples/triplet/create_3d_triplet.sh new file mode 100755 index 00000000000..0fadd9b7e09 --- /dev/null +++ b/examples/triplet/create_3d_triplet.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env sh +# This script converts the mnist data into leveldb format. + +EXAMPLES=./build/examples/triplet +DATA=./data/linemod + +echo "Creating leveldb..." + +rm -rf ./examples/triplet/3d_triplet_train_leveldb +rm -rf ./examples/triplet/3d_triplet_test_leveldb + +$EXAMPLES/convert_3d_triplet_data.bin \ + $DATA/binary_image_train \ + $DATA/binary_label_train \ + ./examples/triplet/3d_triplet_train_leveldb \ + 6 \ + 0 +$EXAMPLES/convert_3d_triplet_data.bin \ + $DATA/binary_image_test \ + $DATA/binary_label_test \ + ./examples/triplet/3d_triplet_test_leveldb \ + 6 \ + 0 +echo "Done." diff --git a/examples/triplet/readme.md b/examples/triplet/readme.md new file mode 100644 index 00000000000..6636808691a --- /dev/null +++ b/examples/triplet/readme.md @@ -0,0 +1,186 @@ +--- +title: Triplet Network Tutorial +description: Train and test a triplet network on data generated by 3D model. +category: example +include_in_docs: true +layout: default +priority: 100 +--- + +# Triplet Network Training with Caffe +This example shows how you can use weight sharing and a contrastive loss +function to learn a model using a triplet network in Caffe. + +We will assume that you have caffe successfully compiled. If not, please refer +to the [Installation page](../../installation.html). This example builds on the +[MNIST tutorial](mnist.html) so it would be a good idea to read that before +continuing. + +*The guide specifies all paths and assumes all commands are executed from the +root caffe directory* + +## Prepare Datasets + +You will first need to convert the data from the some .ply models using +opencv_contrib cnn_3donj module. After construcing the binary files including images and labels and put them in ./data/linemod folder, just run: + + ./examples/triplet/create_3d_triplet.sh + +After running the script there should be two datasets, +`./examples/triplet/3d_triplet_train_leveldb`, and +`./examples/triplet/3d_triplet_test_leveldb`. + +## The Model +First, we will define the model that we want to train using the triplet network. +We will use the convolutional net defined in +`./examples/triplet/3d_triplet.prototxt`. + +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip1" + top: "feat" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 4 + } +} + +## Define the triplet Network + +In this section we will define the triplet network used for training. The +resulting network is defined in +`./examples/triplet/3d_triplet_train_test.prototxt`. + +### Reading in the Triplet Data + +We start with a data layer that reads from the LevelDB database we created +earlier. Each entry in this database contains the image data for a triplet of +images (`triplet_data`) and the label (`sim`) is not nessesary in our method. + + layers { + name: "triplet_data" + type: DATA + top: "triplet_data" + top: "sim" + data_param { + source: "examples/triplet/3d-triplet-train-leveldb" + scale: 0.00390625 + batch_size: 250 + } + include: { phase: TRAIN } + } + +In order to pack a triplet of images into the same blob in the database we pack one +image per channel. We want to be able to work with these three images separately, +so we add a slice layer after the data layer. This takes the `triplet_data` and +slices it along the channel dimension so that we have a single image in `data` +and its positive image in `data_pos.` & its negative image in `data_neg.`, as described in paper for 3D object classification and pose estimation, a pair wise term is also need alone with the triplet part. + +layer { + name: "slice_triplet" + type: "Slice" + bottom: "triplet_data" + top: "data" + top: "data_true" + top: "data_false" + top: "data_p1" + top: "data_p2" + slice_param { + slice_dim: 1 + slice_point: 1 + slice_point: 2 + slice_point: 3 + slice_point: 4 + } +} + +### Building the First part of the triplet Net + +Now we can specify the first side of the triplet net. This side operates on +`data` and produces `feat`. Starting from the net in +`./examples/triplet/3d_triplet.prototxt` we add default weight fillers. Then +we name the parameters of the convolutional and inner product layers. Naming the +parameters allows Caffe to share the parameters between layers on three channels of +the triplet net. In the definition this looks like: + + ... + param: "conv1_w" + param: "conv1_b" + ... + param: "conv2_w" + param: "conv2_b" + ... + param: "ip1_w" + param: "ip1_b" + ... + param: "ip2_w" + param: "ip2_b" + ... + +### Building the Second Side of the triplet Net + +Now we need to create the second path that operates on `data_pos` and produces +`feat_pos`. This path is exactly the same as the first. So we can just copy and +paste it. Then we change the name of each layer, input, and output by appending +`_pos` to differentiate the "paired" layers from the originals. + +### Building the Third Side of the triplet Net + +Now we need to create the second path that operates on `data_neg` and produces +`feat_neg`. This path is exactly the same as the first. So we can just copy and +paste it. Then we change the name of each layer, input, and output by appending +`_neg` to differentiate the "paired" layers from the originals. + +### Adding the Triplet Loss Function + +To train the network we will optimize a triplet loss function proposed in: +This cost function is implemented with the `TRIPLET_LOSS` layer: + +layer { + name: "loss" + type: "TripletLoss" + bottom: "feat" + bottom: "feat_true" + bottom: "feat_false" + bottom: "feat_p1" + bottom: "feat_p2" + bottom: "sim" + top: "loss" + triplet_loss_param { + margin: 1 + losstype: 1 + } +} + +## Define the Solver + +Nothing special needs to be done to the solver besides pointing it at the +correct model file. The solver is defined in +`./examples/triplet/3d_triplet_solver.prototxt`. + +## Training and Testing the Model + +Training the model is simple after you have written the network definition +protobuf and solver protobuf files. Simply run +`./examples/triplet/train_mnist_triplet.sh`: + + ./examples/triplet/train_3d_triplet.sh + +# Plotting the results + +First, we can draw the model and triplet networks by running the following +commands that draw the DAGs defined in the .prototxt files: + + ./python/draw_net.py \ + ./examples/triplet/3d_triplet.prototxt \ + ./examples/triplet/3d_triplet.png + + ./python/draw_net.py \ + ./examples/triplet/3d_triplet_train_test.prototxt \ + ./examples/triplet/3d_triplet_train_test.png \ No newline at end of file diff --git a/examples/triplet/train_3d_triplet.sh b/examples/triplet/train_3d_triplet.sh new file mode 100755 index 00000000000..e421af54493 --- /dev/null +++ b/examples/triplet/train_3d_triplet.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env sh + +TOOLS=./build/tools + +$TOOLS/caffe train --solver=examples/triplet/3d_triplet_solver.prototxt diff --git a/include/caffe/common_layers.hpp b/include/caffe/common_layers.hpp index 89bab8d6f3a..8e64b3e5dc5 100644 --- a/include/caffe/common_layers.hpp +++ b/include/caffe/common_layers.hpp @@ -85,7 +85,7 @@ class ConcatLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "Concat"; } - virtual inline int MinBottomBlobs() const { return 1; } + virtual inline int MinBottomBlobs() const { return 2; } virtual inline int ExactNumTopBlobs() const { return 1; } protected: @@ -625,7 +625,7 @@ class SliceLayer : public Layer { virtual inline const char* type() const { return "Slice"; } virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 2; } protected: virtual void Forward_cpu(const vector*>& bottom, diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index 8d52785ac6e..aba3e036004 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -82,8 +82,6 @@ class Solver { callbacks_.push_back(value); } - void CheckSnapshotWritePermissions(); - protected: // Make and apply the update value for the current iteration. virtual void ApplyUpdate() = 0; diff --git a/python/caffe/__init__.py b/python/caffe/__init__.py index ccda1bcae4f..6cc44e729f4 100644 --- a/python/caffe/__init__.py +++ b/python/caffe/__init__.py @@ -1,4 +1,4 @@ -from .pycaffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, RMSPropSolver, AdaDeltaSolver, AdamSolver +from .pycaffe import Net, SGDSolver from ._caffe import set_mode_cpu, set_mode_gpu, set_device, Layer, get_solver, layer_type_list from .proto.caffe_pb2 import TRAIN, TEST from .classifier import Classifier diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index ccd5776ac40..cc49f60ab13 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -297,15 +297,6 @@ BOOST_PYTHON_MODULE(_caffe) { bp::class_, bp::bases >, shared_ptr >, boost::noncopyable>( "AdaGradSolver", bp::init()); - bp::class_, bp::bases >, - shared_ptr >, boost::noncopyable>( - "RMSPropSolver", bp::init()); - bp::class_, bp::bases >, - shared_ptr >, boost::noncopyable>( - "AdaDeltaSolver", bp::init()); - bp::class_, bp::bases >, - shared_ptr >, boost::noncopyable>( - "AdamSolver", bp::init()); bp::def("get_solver", &GetSolverFromFile, bp::return_value_policy()); diff --git a/python/caffe/net_spec.py b/python/caffe/net_spec.py index 93fc01927db..77a0e0070ae 100644 --- a/python/caffe/net_spec.py +++ b/python/caffe/net_spec.py @@ -56,14 +56,8 @@ def to_proto(*tops): def assign_proto(proto, name, val): """Assign a Python object to a protobuf message, based on the Python type (in recursive fashion). Lists become repeated fields/messages, dicts - become messages, and other types are assigned directly. For convenience, - repeated fields whose values are not lists are converted to single-element - lists; e.g., `my_repeated_int_field=3` is converted to - `my_repeated_int_field=[3]`.""" - - is_repeated_field = hasattr(getattr(proto, name), 'extend') - if is_repeated_field and not isinstance(val, list): - val = [val] + become messages, and other types are assigned directly.""" + if isinstance(val, list): if isinstance(val[0], dict): for item in val: diff --git a/python/caffe/pycaffe.py b/python/caffe/pycaffe.py index 8ea24da4fdd..4f980a92c38 100644 --- a/python/caffe/pycaffe.py +++ b/python/caffe/pycaffe.py @@ -10,8 +10,7 @@ from itertools import zip_longest as izip_longest import numpy as np -from ._caffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, \ - RMSPropSolver, AdaDeltaSolver, AdamSolver +from ._caffe import Net, SGDSolver import caffe.io # We directly update methods from Net here (rather than using composition or diff --git a/python/caffe/test/test_net_spec.py b/python/caffe/test/test_net_spec.py index fee3c0aaebe..b4595e6531a 100644 --- a/python/caffe/test/test_net_spec.py +++ b/python/caffe/test/test_net_spec.py @@ -43,7 +43,8 @@ def anon_lenet(batch_size): def silent_net(): n = caffe.NetSpec() - n.data, n.data2 = L.DummyData(shape=dict(dim=3), ntop=2) + n.data, n.data2 = L.DummyData(shape=[dict(dim=[3]), dict(dim=[4, 2])], + ntop=2) n.silence_data = L.Silence(n.data, ntop=0) n.silence_data2 = L.Silence(n.data2, ntop=0) return n.to_proto() diff --git a/src/caffe/layers/concat_layer.cpp b/src/caffe/layers/concat_layer.cpp index 86b500de859..95fba105b9a 100644 --- a/src/caffe/layers/concat_layer.cpp +++ b/src/caffe/layers/concat_layer.cpp @@ -48,16 +48,11 @@ void ConcatLayer::Reshape(const vector*>& bottom, } top[0]->Reshape(top_shape); CHECK_EQ(bottom_count_sum, top[0]->count()); - if (bottom.size() == 1) { - top[0]->ShareData(*bottom[0]); - top[0]->ShareDiff(*bottom[0]); - } } template void ConcatLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - if (bottom.size() == 1) { return; } Dtype* top_data = top[0]->mutable_cpu_data(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); @@ -77,7 +72,6 @@ void ConcatLayer::Forward_cpu(const vector*>& bottom, template void ConcatLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - if (bottom.size() == 1) { return; } const Dtype* top_diff = top[0]->cpu_diff(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); diff --git a/src/caffe/layers/concat_layer.cu b/src/caffe/layers/concat_layer.cu index 617701e2621..3c64c7ef224 100644 --- a/src/caffe/layers/concat_layer.cu +++ b/src/caffe/layers/concat_layer.cu @@ -28,7 +28,6 @@ __global__ void Concat(const int nthreads, const Dtype* in_data, template void ConcatLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - if (bottom.size() == 1) { return; } Dtype* top_data = top[0]->mutable_gpu_data(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); @@ -49,7 +48,6 @@ void ConcatLayer::Forward_gpu(const vector*>& bottom, template void ConcatLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - if (bottom.size() == 1) { return; } const Dtype* top_diff = top[0]->gpu_diff(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); diff --git a/src/caffe/layers/slice_layer.cpp b/src/caffe/layers/slice_layer.cpp index 0a059ae88fe..e4418c9cf9c 100644 --- a/src/caffe/layers/slice_layer.cpp +++ b/src/caffe/layers/slice_layer.cpp @@ -67,16 +67,11 @@ void SliceLayer::Reshape(const vector*>& bottom, } } CHECK_EQ(count, bottom[0]->count()); - if (top.size() == 1) { - top[0]->ShareData(*bottom[0]); - top[0]->ShareDiff(*bottom[0]); - } } template void SliceLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - if (top.size() == 1) { return; } int offset_slice_axis = 0; const Dtype* bottom_data = bottom[0]->cpu_data(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); @@ -97,7 +92,7 @@ void SliceLayer::Forward_cpu(const vector*>& bottom, template void SliceLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - if (!propagate_down[0] || top.size() == 1) { return; } + if (!propagate_down[0]) { return; } int offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); diff --git a/src/caffe/layers/slice_layer.cu b/src/caffe/layers/slice_layer.cu index e8dc6cd98fc..796841d3f52 100644 --- a/src/caffe/layers/slice_layer.cu +++ b/src/caffe/layers/slice_layer.cu @@ -28,7 +28,6 @@ __global__ void Slice(const int nthreads, const Dtype* in_data, template void SliceLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - if (top.size() == 1) { return; } int offset_slice_axis = 0; const Dtype* bottom_data = bottom[0]->gpu_data(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); @@ -49,7 +48,7 @@ void SliceLayer::Forward_gpu(const vector*>& bottom, template void SliceLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - if (!propagate_down[0] || top.size() == 1) { return; } + if (!propagate_down[0]) { return; } int offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index ebb8b5d28c2..89d14013dc9 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -810,11 +810,12 @@ void Net::Backward() { BackwardFromTo(layers_.size() - 1, 0); if (debug_info_) { Dtype asum_data = 0, asum_diff = 0, sumsq_data = 0, sumsq_diff = 0; - for (int i = 0; i < learnable_params_.size(); ++i) { - asum_data += learnable_params_[i]->asum_data(); - asum_diff += learnable_params_[i]->asum_diff(); - sumsq_data += learnable_params_[i]->sumsq_data(); - sumsq_diff += learnable_params_[i]->sumsq_diff(); + for (int i = 0; i < params_.size(); ++i) { + if (param_owners_[i] >= 0) { continue; } + asum_data += params_[i]->asum_data(); + asum_diff += params_[i]->asum_diff(); + sumsq_data += params_[i]->sumsq_data(); + sumsq_diff += params_[i]->sumsq_diff(); } const Dtype l2norm_data = std::sqrt(sumsq_data); const Dtype l2norm_diff = std::sqrt(sumsq_diff); diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 3574ce75046..394ec3b3ad7 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -55,7 +55,6 @@ void Solver::Init(const SolverParameter& param) { << std::endl << param.DebugString(); param_ = param; CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative."; - CheckSnapshotWritePermissions(); if (Caffe::root_solver() && param_.random_seed() >= 0) { Caffe::set_random_seed(param_.random_seed()); } @@ -435,24 +434,6 @@ void Solver::Snapshot() { SnapshotSolverState(model_filename); } -template -void Solver::CheckSnapshotWritePermissions() { - if (Caffe::root_solver() && param_.snapshot()) { - CHECK(param_.has_snapshot_prefix()) - << "In solver params, snapshot is specified but snapshot_prefix is not"; - string probe_filename = SnapshotFilename(".tempfile"); - std::ofstream probe_ofs(probe_filename.c_str()); - if (probe_ofs.good()) { - probe_ofs.close(); - std::remove(probe_filename.c_str()); - } else { - LOG(FATAL) << "Cannot write to snapshot prefix '" - << param_.snapshot_prefix() << "'. Make sure " - << "that the directory exists and is writeable."; - } - } -} - template string Solver::SnapshotFilename(const string extension) { string filename(param_.snapshot_prefix()); @@ -751,7 +732,7 @@ void SGDSolver::SnapshotSolverStateToBinaryProto( } string snapshot_filename = Solver::SnapshotFilename(".solverstate"); LOG(INFO) - << "Snapshotting solver state to binary proto file " << snapshot_filename; + << "Snapshotting solver state to binary proto file" << snapshot_filename; WriteProtoToBinaryFile(state, snapshot_filename.c_str()); } diff --git a/src/caffe/test/test_accuracy_layer.cpp b/src/caffe/test/test_accuracy_layer.cpp index ef0e57a37a1..94e529b5eee 100644 --- a/src/caffe/test/test_accuracy_layer.cpp +++ b/src/caffe/test/test_accuracy_layer.cpp @@ -250,6 +250,7 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPUTopK) { TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClass) { LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); AccuracyLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_per_class_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_per_class_vec_); @@ -278,16 +279,16 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClass) { EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), num_correct_labels / 100.0, 1e-4); for (int i = 0; i < num_class; ++i) { - TypeParam accuracy_per_class = (num_per_class[i] > 0 ? - static_cast(correct_per_class[i]) / num_per_class[i] : 0); EXPECT_NEAR(this->blob_top_per_class_->data_at(i, 0, 0, 0), - accuracy_per_class, 1e-4); + static_cast(correct_per_class[i]) / num_per_class[i], + 1e-4); } } TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClassWithIgnoreLabel) { LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); const TypeParam kIgnoreLabelValue = -1; layer_param.mutable_accuracy_param()->set_ignore_label(kIgnoreLabelValue); AccuracyLayer layer(layer_param); @@ -328,10 +329,9 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClassWithIgnoreLabel) { EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), num_correct_labels / TypeParam(count), 1e-4); for (int i = 0; i < 10; ++i) { - TypeParam accuracy_per_class = (num_per_class[i] > 0 ? - static_cast(correct_per_class[i]) / num_per_class[i] : 0); EXPECT_NEAR(this->blob_top_per_class_->data_at(i, 0, 0, 0), - accuracy_per_class, 1e-4); + TypeParam(correct_per_class[i]) / num_per_class[i], + 1e-4); } } diff --git a/src/caffe/test/test_concat_layer.cpp b/src/caffe/test/test_concat_layer.cpp index ccd97eb1d66..088e0a41685 100644 --- a/src/caffe/test/test_concat_layer.cpp +++ b/src/caffe/test/test_concat_layer.cpp @@ -99,19 +99,6 @@ TYPED_TEST(ConcatLayerTest, TestSetupChannelsNegativeIndexing) { EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0_->width()); } -TYPED_TEST(ConcatLayerTest, TestForwardTrivial) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - ConcatLayer layer(layer_param); - this->blob_bottom_vec_0_.resize(1); - layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_0_, this->blob_top_vec_); - for (int i = 0; i < this->blob_bottom_0_->count(); ++i) { - EXPECT_EQ(this->blob_bottom_0_->cpu_data()[i], - this->blob_top_->cpu_data()[i]); - } -} - TYPED_TEST(ConcatLayerTest, TestForwardNum) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; @@ -167,16 +154,6 @@ TYPED_TEST(ConcatLayerTest, TestForwardChannels) { } } -TYPED_TEST(ConcatLayerTest, TestGradientTrivial) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - ConcatLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-2); - this->blob_bottom_vec_0_.resize(1); - checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_0_, - this->blob_top_vec_); -} - TYPED_TEST(ConcatLayerTest, TestGradientNum) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; diff --git a/src/caffe/test/test_eltwise_layer.cpp b/src/caffe/test/test_eltwise_layer.cpp index 8031f6e9022..be0c1347709 100644 --- a/src/caffe/test/test_eltwise_layer.cpp +++ b/src/caffe/test/test_eltwise_layer.cpp @@ -80,7 +80,7 @@ TYPED_TEST(EltwiseLayerTest, TestProd) { const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); for (int i = 0; i < count; ++i) { - EXPECT_NEAR(data[i], in_data_a[i] * in_data_b[i] * in_data_c[i], 1e-4); + EXPECT_EQ(data[i], in_data_a[i] * in_data_b[i] * in_data_c[i]); } } @@ -99,7 +99,7 @@ TYPED_TEST(EltwiseLayerTest, TestSum) { const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); for (int i = 0; i < count; ++i) { - EXPECT_NEAR(data[i], in_data_a[i] + in_data_b[i] + in_data_c[i], 1e-4); + EXPECT_EQ(data[i], in_data_a[i] + in_data_b[i] + in_data_c[i]); } } diff --git a/src/caffe/test/test_slice_layer.cpp b/src/caffe/test/test_slice_layer.cpp index 2d2d0fdc005..ccd03646d19 100644 --- a/src/caffe/test/test_slice_layer.cpp +++ b/src/caffe/test/test_slice_layer.cpp @@ -88,21 +88,6 @@ TYPED_TEST(SliceLayerTest, TestSetupChannels) { EXPECT_EQ(this->blob_bottom_->width(), this->blob_top_0_->width()); } -TYPED_TEST(SliceLayerTest, TestTrivialSlice) { - // Test the trivial (single output) "slice" operation -- - // should be the identity. - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - SliceLayer layer(layer_param); - this->blob_top_vec_0_.resize(1); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_0_); - ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_0_->shape()); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { - EXPECT_EQ(this->blob_bottom_->cpu_data()[i], - this->blob_top_0_->cpu_data()[i]); - } -} - TYPED_TEST(SliceLayerTest, TestSliceAcrossNum) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; @@ -176,18 +161,6 @@ TYPED_TEST(SliceLayerTest, TestSliceAcrossChannels) { } } -TYPED_TEST(SliceLayerTest, TestGradientTrivial) { - // Test the trivial (single output) "slice" operation -- - // should be the identity. - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - SliceLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3); - this->blob_top_vec_0_.resize(1); - checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, - this->blob_top_vec_0_); -} - TYPED_TEST(SliceLayerTest, TestGradientAcrossNum) { typedef typename TypeParam::Dtype Dtype; // Gradient checks are slow; reduce blob size. diff --git a/tools/extract_features.cpp b/tools/extract_features.cpp index 084c9bf88df..365dd495bbf 100644 --- a/tools/extract_features.cpp +++ b/tools/extract_features.cpp @@ -42,7 +42,7 @@ int feature_extraction_pipeline(int argc, char** argv) { " save_feature_dataset_name1[,name2,...] num_mini_batches db_type" " [CPU/GPU] [DEVICE_ID=0]\n" "Note: you can extract multiple features in one pass by specifying" - " multiple feature blob names and dataset names separated by ','." + " multiple feature blob names and dataset names seperated by ','." " The names cannot contain white space characters and the number of blobs" " and datasets must be equal."; return 1; From 3d24b43d44071126496fb01901aee2c5c38fa24f Mon Sep 17 00:00:00 2001 From: Wangyida Date: Fri, 18 Sep 2015 09:56:04 +0800 Subject: [PATCH 48/82] update master branch by clone upstream and push again --- Makefile | 7 +- Makefile.config.example | 8 +- docs/multigpu.md | 26 ++ examples/triplet/3d_triplet.prototxt | 110 ----- examples/triplet/3d_triplet_solver.prototxt | 25 -- .../triplet/3d_triplet_train_test.prototxt | 181 -------- examples/triplet/convert_3d_triplet_data.cpp | 221 ---------- examples/triplet/create_3d_triplet.sh | 24 -- examples/triplet/readme.md | 186 -------- examples/triplet/train_3d_triplet.sh | 5 - include/caffe/common_layers.hpp | 4 +- include/caffe/loss_layers.hpp | 67 --- include/caffe/solver.hpp | 2 + python/caffe/__init__.py | 2 +- python/caffe/_caffe.cpp | 9 + python/caffe/net_spec.py | 10 +- python/caffe/pycaffe.py | 3 +- python/caffe/test/test_net_spec.py | 3 +- src/caffe/layers/concat_layer.cpp | 6 + src/caffe/layers/concat_layer.cu | 2 + src/caffe/layers/slice_layer.cpp | 7 +- src/caffe/layers/slice_layer.cu | 3 +- src/caffe/layers/triplet_loss_layer.cpp | 397 ------------------ src/caffe/layers/triplet_loss_layer.cu | 392 ----------------- src/caffe/net.cpp | 11 +- src/caffe/proto/caffe.proto | 11 - src/caffe/solver.cpp | 21 +- src/caffe/test/test_accuracy_layer.cpp | 12 +- src/caffe/test/test_concat_layer.cpp | 23 + src/caffe/test/test_eltwise_layer.cpp | 4 +- src/caffe/test/test_slice_layer.cpp | 27 ++ src/caffe/test/test_triplet_loss_layer | 125 ------ src/caffe/util/upgrade_proto.cpp | 6 - tools/caffe.cpp | 1 + tools/extract_features.cpp | 2 +- 35 files changed, 162 insertions(+), 1781 deletions(-) create mode 100644 docs/multigpu.md delete mode 100644 examples/triplet/3d_triplet.prototxt delete mode 100644 examples/triplet/3d_triplet_solver.prototxt delete mode 100644 examples/triplet/3d_triplet_train_test.prototxt delete mode 100644 examples/triplet/convert_3d_triplet_data.cpp delete mode 100755 examples/triplet/create_3d_triplet.sh delete mode 100644 examples/triplet/readme.md delete mode 100755 examples/triplet/train_3d_triplet.sh delete mode 100644 src/caffe/layers/triplet_loss_layer.cpp delete mode 100644 src/caffe/layers/triplet_loss_layer.cu delete mode 100644 src/caffe/test/test_triplet_loss_layer diff --git a/Makefile b/Makefile index ddaed59b0a9..a911133661f 100644 --- a/Makefile +++ b/Makefile @@ -172,6 +172,11 @@ endif LIBRARIES += glog gflags protobuf boost_system m hdf5_hl hdf5 +# handle IO dependencies +USE_LEVELDB ?= 1 +USE_LMDB ?= 1 +USE_OPENCV ?= 1 + ifeq ($(USE_LEVELDB), 1) LIBRARIES += leveldb snappy endif @@ -299,7 +304,7 @@ ifeq ($(USE_CUDNN), 1) COMMON_FLAGS += -DUSE_CUDNN endif -# i/o libraries configuration +# configure IO libraries ifeq ($(USE_OPENCV), 1) COMMON_FLAGS += -DUSE_OPENCV endif diff --git a/Makefile.config.example b/Makefile.config.example index 32e67ee493e..a20bad2f5ce 100644 --- a/Makefile.config.example +++ b/Makefile.config.example @@ -7,10 +7,10 @@ # CPU-only switch (uncomment to build without GPU support). # CPU_ONLY := 1 -# comment out to disable IO dependencies -USE_LEVELDB := 1 -USE_LMDB := 1 -USE_OPENCV := 1 +# uncomment to disable IO dependencies and corresponding data layers +# USE_LEVELDB := 0 +# USE_LMDB := 0 +# USE_OPENCV := 0 # To customize your choice of compiler, uncomment and set the following. # N.B. the default for Linux is g++ and the default for OSX is clang++ diff --git a/docs/multigpu.md b/docs/multigpu.md new file mode 100644 index 00000000000..01cfb8938b5 --- /dev/null +++ b/docs/multigpu.md @@ -0,0 +1,26 @@ +--- +title: Multi-GPU Usage, Hardware Configuration Assumptions, and Performance +--- + +# Multi-GPU Usage + +Currently Multi-GPU is only supported via the C/C++ paths and only for training. + +The GPUs to be used for training can be set with the "-gpu" flag on the command line to the 'caffe' tool. e.g. "build/tools/caffe train --solver=models/bvlc_alexnet/solver.prototxt --gpu=0,1" will train on GPUs 0 and 1. + +**NOTE**: each GPU runs the batchsize specified in your train_val.prototxt. So if you go from 1 GPU to 2 GPU, your effective batchsize will double. e.g. if your train_val.prototxt specified a batchsize of 256, if you run 2 GPUs your effective batch size is now 512. So you need to adjust the batchsize when running multiple GPUs and/or adjust your solver params, specifically learning rate. + +# Hardware Configuration Assumptions + +The current implementation uses a tree reduction strategy. e.g. if there are 4 GPUs in the system, 0:1, 2:3 will exchange gradients, then 0:2 (top of the tree) will exchange gradients, 0 will calculate +updated model, 0\-\>2, and then 0\-\>1, 2\-\>3. + +For best performance, P2P DMA access between devices is needed. Without P2P access, for example crossing PCIe root complex, data is copied through host and effective exchange bandwidth is greatly reduced. + +Current implementation has a "soft" assumption that the devices being used are homogeneous. In practice, any devices of the same general class should work together, but performance and total size is limited by the smallest device being used. e.g. if you combine a TitanX and a GTX980, peformance will be limited by the 980. Mixing vastly different levels of boards, e.g. Kepler and Fermi, is not supported. + +"nvidia-smi topo -m" will show you the connectivity matrix. You can do P2P through PCIe bridges, but not across socket level links at this time, e.g. across CPU sockets on a multi-socket motherboard. + +# Scaling Performance + +Performance is **heavily** dependent on the PCIe topology of the system, the configuration of the neural network you are training, and the speed of each of the layers. Systems like the DIGITS DevBox have an optimized PCIe topology (X99-E WS chipset). In general, scaling on 2 GPUs tends to be ~1.8X on average for networks like AlexNet, CaffeNet, VGG, GoogleNet. 4 GPUs begins to have falloff in scaling. Generally with "weak scaling" where the batchsize increases with the number of GPUs you will see 3.5x scaling or so. With "strong scaling", the system can become communication bound, especially with layer performance optimizations like those in [cuDNNv3](http://nvidia.com/cudnn), and you will likely see closer to mid 2.x scaling in performance. Networks that have heavy computation compared to the number of parameters tend to have the best scaling performance. \ No newline at end of file diff --git a/examples/triplet/3d_triplet.prototxt b/examples/triplet/3d_triplet.prototxt deleted file mode 100644 index 076e0be5040..00000000000 --- a/examples/triplet/3d_triplet.prototxt +++ /dev/null @@ -1,110 +0,0 @@ -name: "3d_triplet" -input: "data" -input_dim: 9720 -input_dim: 1 -input_dim: 64 -input_dim: 64 -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 16 - kernel_size: 8 - stride: 1 - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "pool1" - top: "pool1" -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 7 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu2" - type: "ReLU" - bottom: "pool2" - top: "pool2" -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 256 - } -} -layer { - name: "relu3" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip1" - top: "feat" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 3 - } -} diff --git a/examples/triplet/3d_triplet_solver.prototxt b/examples/triplet/3d_triplet_solver.prototxt deleted file mode 100644 index eea97da7603..00000000000 --- a/examples/triplet/3d_triplet_solver.prototxt +++ /dev/null @@ -1,25 +0,0 @@ -# The train/test net protocol buffer definition -net: "examples/triplet/3d_triplet_train_test.prototxt" -# test_iter specifies how many forward passes the test should carry out. -# In the case of 3d database, we have test batch size 250 and 250 test iterations: 50*(2+3)=250, -# covering the full 9720 testing images:162*6*10=9720. -test_iter: 100 -# Carry out testing every 500 training iterations. -test_interval: 100 -# The base learning rate, momentum and the weight decay of the network. -base_lr: 0.001 -momentum: 0.9 -weight_decay: 0.0000 -# The learning rate policy -lr_policy: "inv" -gamma: 0.0001 -power: 0.75 -# Display every 100 iterations -display: 100 -# The maximum number of iterations -max_iter: 80000 -# snapshot intermediate results -snapshot: 5000 -snapshot_prefix: "examples/triplet/3d_triplet" -# solver mode: CPU or GPU -solver_mode: CPU diff --git a/examples/triplet/3d_triplet_train_test.prototxt b/examples/triplet/3d_triplet_train_test.prototxt deleted file mode 100644 index 60637b1a66b..00000000000 --- a/examples/triplet/3d_triplet_train_test.prototxt +++ /dev/null @@ -1,181 +0,0 @@ -name: "3d_triplet_train_test" -layer { - name: "data" - type: "Data" - top: "data" - top: "sim" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/3d_triplet_train_leveldb" - batch_size: 250 - } -} -layer { - name: "data" - type: "Data" - top: "data" - top: "sim" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/3d_triplet_test_leveldb" - batch_size: 250 - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 16 - kernel_size: 8 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "pool1" - top: "pool1" -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 7 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu2" - type: "ReLU" - bottom: "pool2" - top: "pool2" -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 256 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu3" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip1" - top: "feat" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 3 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "loss" - type: "TripletLoss" - bottom: "feat" - bottom: "sim" - top: "loss" - triplet_loss_param { - margin: 1 - losstype: 0 - num_triplets: 3 - } -} diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp deleted file mode 100644 index 943efd9965f..00000000000 --- a/examples/triplet/convert_3d_triplet_data.cpp +++ /dev/null @@ -1,221 +0,0 @@ -// Usage: -// convert_3d_data input_image_file input_label_file output_db_file -#include // NOLINT(readability/streams) -#include -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "math.h" -#include "stdint.h" - -uint32_t swap_endian(uint32_t val) { - val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); - return (val << 16) | (val >> 16); -} - -void read_image(std::ifstream* image_file, std::ifstream* label_file, - uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label_temp, signed char* label, int rgb_use) { - if (rgb_use == 0) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index * 4 + 8); - label_file->read(label_temp, 4); - for (int i = 0; i < 4; i++) - *(label+i) = (signed char)*(label_temp+i); - } else { - image_file->seekg(3 * index * rows * cols + 16); - image_file->read(pixels, 3 * rows * cols); - label_file->seekg(index * 4 + 8); - label_file->read(label_temp, 4); - for (int i = 0; i < 4; i++) - *(label+i) = (signed char)*(label_temp+i); - } -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename, - const char* class_number, const char* rgb_use) { - int rgb_use1 = atoi(rgb_use); - int class_num = atoi(class_number); - // Open files - std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); - std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); - CHECK(image_file) << "Unable to open file " << image_filename; - CHECK(label_file) << "Unable to open file " << label_filename; - // Read the magic and the meta data - uint32_t magic; - uint32_t num_items; - uint32_t num_labels; - uint32_t rows; - uint32_t cols; - - image_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2051) << "Incorrect image file magic."; - label_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2050) << "Incorrect label file magic."; - image_file.read(reinterpret_cast(&num_items), 4); - num_items = swap_endian(num_items); - label_file.read(reinterpret_cast(&num_labels), 4); - num_labels = swap_endian(num_labels); - CHECK_EQ(num_items, num_labels); - image_file.read(reinterpret_cast(&rows), 4); - rows = swap_endian(rows); - image_file.read(reinterpret_cast(&cols), 4); - cols = swap_endian(cols); - - // Open leveldb - leveldb::DB* db; - leveldb::Options options; - options.create_if_missing = true; - options.error_if_exists = true; - leveldb::Status status = leveldb::DB::Open( - options, db_filename, &db); - CHECK(status.ok()) << "Failed to open leveldb " << db_filename - << ". Is it already existing?"; - - char* label_temp = new char[4]; // label for unsigned char* - signed char* label_i = new signed char[4]; // label for triplet - signed char* label_j = new signed char[4]; - signed char* label_k = new signed char[4]; - signed char* label_l = new signed char[4]; // label for pair wise - signed char* label_m = new signed char[4]; - int db_size; - if (rgb_use1 == 0) - db_size = rows * cols; - else - db_size = 3 * rows * cols; - char* pixels1 = new char[db_size]; - char* pixels2 = new char[db_size]; - char* pixels3 = new char[db_size]; - char* pixels4 = new char[db_size]; - char* pixels5 = new char[db_size]; - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - caffe::Datum datum; - if (rgb_use1 == 0) - datum.set_channels(1); - else - datum.set_channels(3); - datum.set_height(rows); - datum.set_width(cols); - LOG(INFO) << "A total of " << num_items << " items."; - LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - int counter = 0; - for (unsigned int times = 0; times < 10; ++times) { - // iteration in the samples of all class - for (unsigned int itemid = 0; itemid < num_items/class_num; ++itemid) { - // iteration in the samples in one class - for (unsigned int class_ind = 0; class_ind < class_num; ++class_ind) { - // use reference sample one by one at each iteration - int i = itemid % num_items + class_ind*num_items/class_num; - int j = caffe::caffe_rng_rand() % num_items; // pick triplet groups - int k = caffe::caffe_rng_rand() % num_items; - int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups - int m = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, // read triplet - pixels1, label_temp, label_i, rgb_use1); - read_image(&image_file, &label_file, j, rows, cols, - pixels2, label_temp, label_j, rgb_use1); - read_image(&image_file, &label_file, k, rows, cols, - pixels3, label_temp, label_k, rgb_use1); - read_image(&image_file, &label_file, l, rows, cols, // read pair wise - pixels4, label_temp, label_l, rgb_use1); - read_image(&image_file, &label_file, m, rows, cols, - pixels5, label_temp, label_m, rgb_use1); - - bool pair_pass = false; - bool triplet1_pass = false; - bool triplet2_pass = false; - bool triplet3_class_same = false; - bool triplet3_pass = false; - - int ij_diff_x = static_cast(*(label_i+1)-*(label_j+1)); - int ij_diff_y = static_cast(*(label_i+2)-*(label_j+2)); - int ij_diff_z = static_cast(*(label_i+3)-*(label_j+3)); - int im_diff_x = static_cast(*(label_i+1)-*(label_m+1)); - int im_diff_y = static_cast(*(label_i+2)-*(label_m+2)); - int im_diff_z = static_cast(*(label_i+3)-*(label_m+3)); - - int ij_x = ij_diff_x*ij_diff_x; - int ij_y = ij_diff_y*ij_diff_y; - int ij_z = ij_diff_z*ij_diff_z; - int im_x = im_diff_x*im_diff_x; - int im_y = im_diff_y*im_diff_y; - int im_z = im_diff_z*im_diff_z; - - float dist_ij = std::sqrt(ij_x + ij_y + ij_z); - float dist_im = std::sqrt(im_x + im_y + im_z); - if (*label_i == *label_j && dist_ij < 100/3 && dist_ij != 0) - pair_pass = true; - if (pair_pass && (*label_i != *label_k)) - triplet1_pass = true; - if (pair_pass && (*label_i != *label_l)) - triplet2_pass = true; - if (pair_pass && (*label_i == *label_m)) - triplet3_class_same = true; - if (triplet3_class_same && dist_im > 100/3) - triplet3_pass = true; - if (pair_pass && triplet1_pass && triplet2_pass && triplet3_pass) { - datum.set_data(pixels1, db_size); // set data - datum.set_label(static_cast(*label_i)); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - datum.set_data(pixels2, db_size); // set data - datum.set_label(static_cast(*label_j)); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - datum.set_data(pixels3, db_size); // set data - datum.set_label(static_cast(*label_k)); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - datum.set_data(pixels4, db_size); // set data - datum.set_label(static_cast(*label_l)); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - datum.set_data(pixels5, db_size); // set data - datum.set_label(static_cast(*label_m)); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - } else { - class_ind--; - } - } // iteration in the samples of all class - } // iteration in the samples in one class - } // iteration in times - delete db; - delete pixels1; - delete pixels2; - delete pixels3; - delete pixels4; - delete pixels5; -} - -int main(int argc, char** argv) { - if (argc != 6) { - printf("This script converts the dataset to the leveldb format used\n" - "by caffe to train a triplet network.\n" - "Usage:\n" - " convert_3d_data input_image_file input_label_file " - "output_db_file class_number rgb_use \n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3], argv[4], argv[5]); - } - return 0; -} diff --git a/examples/triplet/create_3d_triplet.sh b/examples/triplet/create_3d_triplet.sh deleted file mode 100755 index 0fadd9b7e09..00000000000 --- a/examples/triplet/create_3d_triplet.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env sh -# This script converts the mnist data into leveldb format. - -EXAMPLES=./build/examples/triplet -DATA=./data/linemod - -echo "Creating leveldb..." - -rm -rf ./examples/triplet/3d_triplet_train_leveldb -rm -rf ./examples/triplet/3d_triplet_test_leveldb - -$EXAMPLES/convert_3d_triplet_data.bin \ - $DATA/binary_image_train \ - $DATA/binary_label_train \ - ./examples/triplet/3d_triplet_train_leveldb \ - 6 \ - 0 -$EXAMPLES/convert_3d_triplet_data.bin \ - $DATA/binary_image_test \ - $DATA/binary_label_test \ - ./examples/triplet/3d_triplet_test_leveldb \ - 6 \ - 0 -echo "Done." diff --git a/examples/triplet/readme.md b/examples/triplet/readme.md deleted file mode 100644 index 6636808691a..00000000000 --- a/examples/triplet/readme.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Triplet Network Tutorial -description: Train and test a triplet network on data generated by 3D model. -category: example -include_in_docs: true -layout: default -priority: 100 ---- - -# Triplet Network Training with Caffe -This example shows how you can use weight sharing and a contrastive loss -function to learn a model using a triplet network in Caffe. - -We will assume that you have caffe successfully compiled. If not, please refer -to the [Installation page](../../installation.html). This example builds on the -[MNIST tutorial](mnist.html) so it would be a good idea to read that before -continuing. - -*The guide specifies all paths and assumes all commands are executed from the -root caffe directory* - -## Prepare Datasets - -You will first need to convert the data from the some .ply models using -opencv_contrib cnn_3donj module. After construcing the binary files including images and labels and put them in ./data/linemod folder, just run: - - ./examples/triplet/create_3d_triplet.sh - -After running the script there should be two datasets, -`./examples/triplet/3d_triplet_train_leveldb`, and -`./examples/triplet/3d_triplet_test_leveldb`. - -## The Model -First, we will define the model that we want to train using the triplet network. -We will use the convolutional net defined in -`./examples/triplet/3d_triplet.prototxt`. - -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip1" - top: "feat" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 4 - } -} - -## Define the triplet Network - -In this section we will define the triplet network used for training. The -resulting network is defined in -`./examples/triplet/3d_triplet_train_test.prototxt`. - -### Reading in the Triplet Data - -We start with a data layer that reads from the LevelDB database we created -earlier. Each entry in this database contains the image data for a triplet of -images (`triplet_data`) and the label (`sim`) is not nessesary in our method. - - layers { - name: "triplet_data" - type: DATA - top: "triplet_data" - top: "sim" - data_param { - source: "examples/triplet/3d-triplet-train-leveldb" - scale: 0.00390625 - batch_size: 250 - } - include: { phase: TRAIN } - } - -In order to pack a triplet of images into the same blob in the database we pack one -image per channel. We want to be able to work with these three images separately, -so we add a slice layer after the data layer. This takes the `triplet_data` and -slices it along the channel dimension so that we have a single image in `data` -and its positive image in `data_pos.` & its negative image in `data_neg.`, as described in paper for 3D object classification and pose estimation, a pair wise term is also need alone with the triplet part. - -layer { - name: "slice_triplet" - type: "Slice" - bottom: "triplet_data" - top: "data" - top: "data_true" - top: "data_false" - top: "data_p1" - top: "data_p2" - slice_param { - slice_dim: 1 - slice_point: 1 - slice_point: 2 - slice_point: 3 - slice_point: 4 - } -} - -### Building the First part of the triplet Net - -Now we can specify the first side of the triplet net. This side operates on -`data` and produces `feat`. Starting from the net in -`./examples/triplet/3d_triplet.prototxt` we add default weight fillers. Then -we name the parameters of the convolutional and inner product layers. Naming the -parameters allows Caffe to share the parameters between layers on three channels of -the triplet net. In the definition this looks like: - - ... - param: "conv1_w" - param: "conv1_b" - ... - param: "conv2_w" - param: "conv2_b" - ... - param: "ip1_w" - param: "ip1_b" - ... - param: "ip2_w" - param: "ip2_b" - ... - -### Building the Second Side of the triplet Net - -Now we need to create the second path that operates on `data_pos` and produces -`feat_pos`. This path is exactly the same as the first. So we can just copy and -paste it. Then we change the name of each layer, input, and output by appending -`_pos` to differentiate the "paired" layers from the originals. - -### Building the Third Side of the triplet Net - -Now we need to create the second path that operates on `data_neg` and produces -`feat_neg`. This path is exactly the same as the first. So we can just copy and -paste it. Then we change the name of each layer, input, and output by appending -`_neg` to differentiate the "paired" layers from the originals. - -### Adding the Triplet Loss Function - -To train the network we will optimize a triplet loss function proposed in: -This cost function is implemented with the `TRIPLET_LOSS` layer: - -layer { - name: "loss" - type: "TripletLoss" - bottom: "feat" - bottom: "feat_true" - bottom: "feat_false" - bottom: "feat_p1" - bottom: "feat_p2" - bottom: "sim" - top: "loss" - triplet_loss_param { - margin: 1 - losstype: 1 - } -} - -## Define the Solver - -Nothing special needs to be done to the solver besides pointing it at the -correct model file. The solver is defined in -`./examples/triplet/3d_triplet_solver.prototxt`. - -## Training and Testing the Model - -Training the model is simple after you have written the network definition -protobuf and solver protobuf files. Simply run -`./examples/triplet/train_mnist_triplet.sh`: - - ./examples/triplet/train_3d_triplet.sh - -# Plotting the results - -First, we can draw the model and triplet networks by running the following -commands that draw the DAGs defined in the .prototxt files: - - ./python/draw_net.py \ - ./examples/triplet/3d_triplet.prototxt \ - ./examples/triplet/3d_triplet.png - - ./python/draw_net.py \ - ./examples/triplet/3d_triplet_train_test.prototxt \ - ./examples/triplet/3d_triplet_train_test.png \ No newline at end of file diff --git a/examples/triplet/train_3d_triplet.sh b/examples/triplet/train_3d_triplet.sh deleted file mode 100755 index e421af54493..00000000000 --- a/examples/triplet/train_3d_triplet.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env sh - -TOOLS=./build/tools - -$TOOLS/caffe train --solver=examples/triplet/3d_triplet_solver.prototxt diff --git a/include/caffe/common_layers.hpp b/include/caffe/common_layers.hpp index 8e64b3e5dc5..89bab8d6f3a 100644 --- a/include/caffe/common_layers.hpp +++ b/include/caffe/common_layers.hpp @@ -85,7 +85,7 @@ class ConcatLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "Concat"; } - virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int MinBottomBlobs() const { return 1; } virtual inline int ExactNumTopBlobs() const { return 1; } protected: @@ -625,7 +625,7 @@ class SliceLayer : public Layer { virtual inline const char* type() const { return "Slice"; } virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { return 2; } + virtual inline int MinTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index 0a513ae12a4..8d41af34e88 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -216,73 +216,6 @@ class ContrastiveLossLayer : public LossLayer { Blob summer_vec_; // tmp storage for gpu forward pass }; -template -class TripletLossLayer : public LossLayer { - public: - explicit TripletLossLayer(const LayerParameter& param) - : LossLayer(param), diff_() {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - - virtual inline int ExactNumBottomBlobs() const { return 2; } - virtual inline const char* type() const { return "TripletLoss"; } - /** - * Unlike most loss layers, in the TripletLossLayer we can backpropagate - * to the first three inputs. - */ - virtual inline bool AllowForceBackward(const int bottom_index) const { - return bottom_index != 1; - } - - protected: - /// @copydoc TripletLossLayer - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - - /** - * @brief Computes the Triplet error gradient w.r.t. the inputs. - * - * Computes the gradients with respect to the two input vectors (bottom[0] and - * bottom[1]), but not the similarity label (bottom[2]). - * - * @param top output Blob vector (length 1), providing the error gradient with - * respect to the outputs - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, - * as @f$ \lambda @f$ is the coefficient of this layer's output - * @f$\ell_i@f$ in the overall Net loss - * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence - * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. - * (*Assuming that this top Blob is not used as a bottom (input) by any - * other layer of the Net.) - * @param propagate_down see Layer::Backward. - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times 1 \times 1) @f$ - * the features @f$a@f$; Backward fills their diff with - * gradients if propagate_down[0] - * -# @f$ (N \times C \times 1 \times 1) @f$ - * the features @f$b@f$; Backward fills their diff with gradients if - * propagate_down[1] - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - Blob diff_; // cached for backward pass - Blob diff_pos; - Blob diff_neg; - Blob dist_sq_; // cached for backward pass - Blob dist_sq_pos; - Blob dist_sq_neg; - Blob diff_sq_; // tmp storage for gpu forward pass - Blob diff_sq_pos; - Blob diff_sq_neg; - Blob summer_vec_; // tmp storage for gpu forward pass -}; - /** * @brief Computes the Euclidean (L2) loss @f$ * E = \frac{1}{2N} \sum\limits_{n=1}^N \left| \left| \hat{y}_n - y_n diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index aba3e036004..8d52785ac6e 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -82,6 +82,8 @@ class Solver { callbacks_.push_back(value); } + void CheckSnapshotWritePermissions(); + protected: // Make and apply the update value for the current iteration. virtual void ApplyUpdate() = 0; diff --git a/python/caffe/__init__.py b/python/caffe/__init__.py index 6cc44e729f4..ccda1bcae4f 100644 --- a/python/caffe/__init__.py +++ b/python/caffe/__init__.py @@ -1,4 +1,4 @@ -from .pycaffe import Net, SGDSolver +from .pycaffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, RMSPropSolver, AdaDeltaSolver, AdamSolver from ._caffe import set_mode_cpu, set_mode_gpu, set_device, Layer, get_solver, layer_type_list from .proto.caffe_pb2 import TRAIN, TEST from .classifier import Classifier diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index cc49f60ab13..ccd5776ac40 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -297,6 +297,15 @@ BOOST_PYTHON_MODULE(_caffe) { bp::class_, bp::bases >, shared_ptr >, boost::noncopyable>( "AdaGradSolver", bp::init()); + bp::class_, bp::bases >, + shared_ptr >, boost::noncopyable>( + "RMSPropSolver", bp::init()); + bp::class_, bp::bases >, + shared_ptr >, boost::noncopyable>( + "AdaDeltaSolver", bp::init()); + bp::class_, bp::bases >, + shared_ptr >, boost::noncopyable>( + "AdamSolver", bp::init()); bp::def("get_solver", &GetSolverFromFile, bp::return_value_policy()); diff --git a/python/caffe/net_spec.py b/python/caffe/net_spec.py index 77a0e0070ae..93fc01927db 100644 --- a/python/caffe/net_spec.py +++ b/python/caffe/net_spec.py @@ -56,8 +56,14 @@ def to_proto(*tops): def assign_proto(proto, name, val): """Assign a Python object to a protobuf message, based on the Python type (in recursive fashion). Lists become repeated fields/messages, dicts - become messages, and other types are assigned directly.""" - + become messages, and other types are assigned directly. For convenience, + repeated fields whose values are not lists are converted to single-element + lists; e.g., `my_repeated_int_field=3` is converted to + `my_repeated_int_field=[3]`.""" + + is_repeated_field = hasattr(getattr(proto, name), 'extend') + if is_repeated_field and not isinstance(val, list): + val = [val] if isinstance(val, list): if isinstance(val[0], dict): for item in val: diff --git a/python/caffe/pycaffe.py b/python/caffe/pycaffe.py index 4f980a92c38..8ea24da4fdd 100644 --- a/python/caffe/pycaffe.py +++ b/python/caffe/pycaffe.py @@ -10,7 +10,8 @@ from itertools import zip_longest as izip_longest import numpy as np -from ._caffe import Net, SGDSolver +from ._caffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, \ + RMSPropSolver, AdaDeltaSolver, AdamSolver import caffe.io # We directly update methods from Net here (rather than using composition or diff --git a/python/caffe/test/test_net_spec.py b/python/caffe/test/test_net_spec.py index b4595e6531a..fee3c0aaebe 100644 --- a/python/caffe/test/test_net_spec.py +++ b/python/caffe/test/test_net_spec.py @@ -43,8 +43,7 @@ def anon_lenet(batch_size): def silent_net(): n = caffe.NetSpec() - n.data, n.data2 = L.DummyData(shape=[dict(dim=[3]), dict(dim=[4, 2])], - ntop=2) + n.data, n.data2 = L.DummyData(shape=dict(dim=3), ntop=2) n.silence_data = L.Silence(n.data, ntop=0) n.silence_data2 = L.Silence(n.data2, ntop=0) return n.to_proto() diff --git a/src/caffe/layers/concat_layer.cpp b/src/caffe/layers/concat_layer.cpp index 95fba105b9a..86b500de859 100644 --- a/src/caffe/layers/concat_layer.cpp +++ b/src/caffe/layers/concat_layer.cpp @@ -48,11 +48,16 @@ void ConcatLayer::Reshape(const vector*>& bottom, } top[0]->Reshape(top_shape); CHECK_EQ(bottom_count_sum, top[0]->count()); + if (bottom.size() == 1) { + top[0]->ShareData(*bottom[0]); + top[0]->ShareDiff(*bottom[0]); + } } template void ConcatLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { + if (bottom.size() == 1) { return; } Dtype* top_data = top[0]->mutable_cpu_data(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); @@ -72,6 +77,7 @@ void ConcatLayer::Forward_cpu(const vector*>& bottom, template void ConcatLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { + if (bottom.size() == 1) { return; } const Dtype* top_diff = top[0]->cpu_diff(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); diff --git a/src/caffe/layers/concat_layer.cu b/src/caffe/layers/concat_layer.cu index 3c64c7ef224..617701e2621 100644 --- a/src/caffe/layers/concat_layer.cu +++ b/src/caffe/layers/concat_layer.cu @@ -28,6 +28,7 @@ __global__ void Concat(const int nthreads, const Dtype* in_data, template void ConcatLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { + if (bottom.size() == 1) { return; } Dtype* top_data = top[0]->mutable_gpu_data(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); @@ -48,6 +49,7 @@ void ConcatLayer::Forward_gpu(const vector*>& bottom, template void ConcatLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { + if (bottom.size() == 1) { return; } const Dtype* top_diff = top[0]->gpu_diff(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); diff --git a/src/caffe/layers/slice_layer.cpp b/src/caffe/layers/slice_layer.cpp index e4418c9cf9c..0a059ae88fe 100644 --- a/src/caffe/layers/slice_layer.cpp +++ b/src/caffe/layers/slice_layer.cpp @@ -67,11 +67,16 @@ void SliceLayer::Reshape(const vector*>& bottom, } } CHECK_EQ(count, bottom[0]->count()); + if (top.size() == 1) { + top[0]->ShareData(*bottom[0]); + top[0]->ShareDiff(*bottom[0]); + } } template void SliceLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { + if (top.size() == 1) { return; } int offset_slice_axis = 0; const Dtype* bottom_data = bottom[0]->cpu_data(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); @@ -92,7 +97,7 @@ void SliceLayer::Forward_cpu(const vector*>& bottom, template void SliceLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - if (!propagate_down[0]) { return; } + if (!propagate_down[0] || top.size() == 1) { return; } int offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); diff --git a/src/caffe/layers/slice_layer.cu b/src/caffe/layers/slice_layer.cu index 796841d3f52..e8dc6cd98fc 100644 --- a/src/caffe/layers/slice_layer.cu +++ b/src/caffe/layers/slice_layer.cu @@ -28,6 +28,7 @@ __global__ void Slice(const int nthreads, const Dtype* in_data, template void SliceLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { + if (top.size() == 1) { return; } int offset_slice_axis = 0; const Dtype* bottom_data = bottom[0]->gpu_data(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); @@ -48,7 +49,7 @@ void SliceLayer::Forward_gpu(const vector*>& bottom, template void SliceLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - if (!propagate_down[0]) { return; } + if (!propagate_down[0] || top.size() == 1) { return; } int offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); diff --git a/src/caffe/layers/triplet_loss_layer.cpp b/src/caffe/layers/triplet_loss_layer.cpp deleted file mode 100644 index 7d11ff59ce4..00000000000 --- a/src/caffe/layers/triplet_loss_layer.cpp +++ /dev/null @@ -1,397 +0,0 @@ -#include -#include - -#include "caffe/layer.hpp" -#include "caffe/loss_layers.hpp" -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" - -namespace caffe { - -template -void TripletLossLayer::LayerSetUp( - const vector*>& bottom, const vector*>& top) { - LossLayer::LayerSetUp(bottom, top); - // number of triplet in a batch - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - // dimension of each descriptor - int dim = bottom[0]->count()/bottom[0]->num(); - CHECK_EQ(bottom[0]->channels(), dim); - CHECK_EQ(bottom[0]->height(), 1); - CHECK_EQ(bottom[0]->width(), 1); - CHECK_EQ(bottom[1]->channels(), 1); - CHECK_EQ(bottom[1]->height(), 1); - CHECK_EQ(bottom[1]->width(), 1); - // In each set, we have: - // the descriptor of reference sample, closest sample, and negative samples - // number of sets in the whole batch - int num_set = bottom[0]->num()/(2 + num_triplets); - dist_sq_.Reshape(num_set, 1, 1, 1); - diff_pos.Reshape(num_set, dim, 1, 1); - dist_sq_pos.Reshape(num_set, 1, 1, 1); - diff_neg.Reshape(num_set, dim, 1, 1); - dist_sq_neg.Reshape(num_set, 1, 1, 1); - // vector of ones used to sum along channels - summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); - for (int i = 0; i < bottom[0]->channels(); ++i) - summer_vec_.mutable_cpu_data()[i] = Dtype(1); -} - -template -void TripletLossLayer::Forward_cpu( - const vector*>& bottom, - const vector*>& top) { - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); - Dtype loss(0.0); - int dim = bottom[0]->count()/bottom[0]->num(); - int num_set = bottom[0]->num()/(2 + num_triplets); - if (losstype == 0) { - for (int i = 0; i < num_set; ++i) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive - diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close - // Loss component calculated from reference and close one - dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.cpu_data()[i]; - for (int triplet = 0; triplet < num_triplets; ++triplet) { - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + i*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); - } - } - loss = loss / static_cast(num_set) / Dtype(2); - top[0]->mutable_cpu_data()[0] = loss; - } else { - for (int i = 0; i < num_set; ++i) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive - diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close - // Loss component calculated from reference and close one - dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.cpu_data()[i]; - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] += margin; - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + i*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[i] = 1 - \ - dist_sq_neg.cpu_data()[i] / dist_sq_.mutable_cpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(dist_sq_.cpu_data()[i], Dtype(0.0)); - } - } - loss = loss / static_cast(num_set) / Dtype(2); - top[0]->mutable_cpu_data()[0] = loss; - } -} - -template -void TripletLossLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - int dim = bottom[0]->count()/bottom[0]->num(); - int num_set = bottom[0]->num()/(2 + num_triplets); - if (losstype == 0) { - // BP for feat1(extracted from reference) - for (int i = 0; i < 1; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - // Loss component calculated from negative part - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + (2 + num_triplets)*j*dim); - // dissimilar pair in triplet - caffe_cpu_axpby( - dim, - -alpha, - diff_neg.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - // BP for feat2(extracted from the closest sample) - for (int i = 1; i < 2; ++i) { - if (propagate_down[0]) { - const Dtype sign = -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - // BP for negative feature used in the num_triplets triplet part - for (int i = 2; i < 2 + num_triplets; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // dissimilar pairs - caffe_cpu_axpby( - dim, - alpha, - diff_neg.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - } else { - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } else { - for (int i = 0; i < 1; ++i) { - // BP for data1(feat1) - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - dim, - alpha*dist_sq_neg.mutable_cpu_data()[j]\ - /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ - *(dist_sq_pos.mutable_cpu_data()[j]+margin)), - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - caffe_cpu_axpby( - dim, - -alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), - diff_neg.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - for (int i = 1; i < 2; ++i) { - // BP for positive data(feat2) - if (propagate_down[0]) { - const Dtype sign = -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - dim, - alpha*dist_sq_neg.mutable_cpu_data()[j]\ - /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ - *(dist_sq_pos.mutable_cpu_data()[j]+margin)), - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - for (int i = 2; i < 2 + num_triplets; ++i) { - // BP for negative data(feat3) - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - dim, - alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), - diff_neg.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - } else { - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } -} - -#ifdef CPU_ONLY -STUB_GPU(TripletLossLayer); -#endif - -INSTANTIATE_CLASS(TripletLossLayer); -REGISTER_LAYER_CLASS(TripletLoss); - -} // namespace caffe diff --git a/src/caffe/layers/triplet_loss_layer.cu b/src/caffe/layers/triplet_loss_layer.cu deleted file mode 100644 index cd1fbb1201b..00000000000 --- a/src/caffe/layers/triplet_loss_layer.cu +++ /dev/null @@ -1,392 +0,0 @@ -#include -#include - -#include "caffe/layer.hpp" -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/vision_layers.hpp" - -namespace caffe { - -template -void TripletLossLayer::Forward_gpu( - const vector*>& bottom, - const vector*>& top) { - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); - Dtype loss(0.0); - int dim = bottom[0]->count()/bottom[0]->num(); - int num_set = bottom[0]->num()/(2 + num_triplets); - if (losstype == 0) { - for (int i = 0; i < num_set; ++i) { - caffe_gpu_sub( - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive - diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close - caffe_gpu_dot( - dim, - diff_pos.gpu_data() + i*dim, - diff_pos.gpu_data() + i*dim, - dist_sq_pos.mutable_cpu_data() + i); - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.gpu_data()[i]; - for (int triplet = 0; triplet < num_triplets; ++triplet) { - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; - // Loss component calculated from negative part - caffe_gpu_sub( - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, - diff_neg.mutable_gpu_data() + i*dim); // reference-negative - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + i*dim, - diff_neg.gpu_data() + i*dim, - dist_sq_neg.mutable_cpu_data() + i); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(margin + dist_sq_.gpu_data()[i], Dtype(0.0)); - } - } - loss = loss / static_cast(num_set) / Dtype(2); - top[0]->mutable_gpu_data()[0] = loss; - } else { - for (int i = 0; i < num_set; ++i) { - caffe_gpu_sub( - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive - diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close - // Loss component calculated from reference and close one - caffe_gpu_dot( - dim, - diff_pos.gpu_data() + i*dim, - diff_pos.gpu_data() + i*dim, - dist_sq_pos.mutable_cpu_data() + i); - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.gpu_data()[i]; - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.mutable_gpu_data()[i]; - dist_sq_.mutable_gpu_data()[i] += margin; - // Loss component calculated from negative part - caffe_gpu_sub( - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, - diff_neg.mutable_gpu_data() + i*dim); // reference-negative - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + i*dim, - diff_neg.gpu_data() + i*dim, - dist_sq_neg.mutable_cpu_data() + i); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[i] = 1 - \ - dist_sq_neg.gpu_data()[i] / dist_sq_.mutable_gpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(dist_sq_.gpu_data()[i], Dtype(0.0)); - } - } - loss = loss / static_cast(num_set) / Dtype(2); - top[0]->mutable_gpu_data()[0] = loss; - } -} - -template -void TripletLossLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - int dim = bottom[0]->count()/bottom[0]->num(); - int num_set = bottom[0]->num()/(2 + num_triplets); - if (losstype == 0) { - // BP for feat1(extracted from reference) - for (int i = 0; i < 1; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); - // the pair part - caffe_gpu_axpby( - dim, - alpha, - diff_pos.gpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_gpu_sub( - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_gpu_data() + j*dim); // reference-negative - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + j*dim, - diff_neg.gpu_data() + j*dim, - dist_sq_neg.mutable_cpu_data() + j); - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; - // Loss component calculated from negative part - if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_gpu_axpby( - dim, - alpha, - diff_pos.gpu_data() + (j*dim), - Dtype(1.0), - bout + (2 + num_triplets)*j*dim); - // dissimilar pair in triplet - caffe_gpu_axpby( - dim, - -alpha, - diff_neg.gpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - // BP for feat2(extracted from the closest sample) - for (int i = 1; i < 2; ++i) { - if (propagate_down[0]) { - const Dtype sign = -1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); - // the pair part - caffe_gpu_axpby( - dim, - alpha, - diff_pos.gpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_gpu_sub( - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_gpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + j*dim, - diff_neg.gpu_data() + j*dim, - dist_sq_neg.mutable_cpu_data() + j); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; - if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_gpu_axpby( - dim, - alpha, - diff_pos.gpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - // BP for negative feature used in the num_triplets triplet part - for (int i = 2; i < 2 + num_triplets; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); - caffe_gpu_sub( - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, - diff_neg.mutable_gpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + j*dim, - diff_neg.gpu_data() + j*dim, - dist_sq_neg.mutable_cpu_data() + j); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; - if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - // dissimilar pairs - caffe_gpu_axpby( - dim, - alpha, - diff_neg.gpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - } else { - caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } else { - for (int i = 0; i < 1; ++i) { - // BP for data1(feat1) - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); - // the pair part - caffe_gpu_axpby( - dim, - alpha, - diff_pos.gpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; - dist_sq_.mutable_gpu_data()[j] += margin; - // Loss component calculated from negative part - caffe_gpu_sub( - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_gpu_data() + j*dim); // reference-negative - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + j*dim, - diff_neg.gpu_data() + j*dim, - dist_sq_neg.mutable_cpu_data() + j); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[j] = 1 - \ - dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - caffe_gpu_axpby( - dim, - alpha*dist_sq_neg.mutable_gpu_data()[j]\ - /((dist_sq_pos.mutable_gpu_data()[j]+margin)\ - *(dist_sq_pos.mutable_gpu_data()[j]+margin)), - diff_pos.gpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - caffe_gpu_axpby( - dim, - -alpha/(dist_sq_pos.mutable_gpu_data()[j] + margin), - diff_neg.gpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - for (int i = 1; i < 2; ++i) { - // BP for positive data(feat2) - if (propagate_down[0]) { - const Dtype sign = -1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); - // the pair part - caffe_gpu_axpby( - dim, - alpha, - diff_pos.gpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; - dist_sq_.mutable_gpu_data()[j] += margin; - // Loss component calculated from negative part - caffe_gpu_sub( - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_gpu_data() + j*dim); // reference-negative - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + j*dim, - diff_neg.gpu_data() + j*dim, - dist_sq_neg.mutable_cpu_data() + j); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[j] = 1 - \ - dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - caffe_gpu_axpby( - dim, - alpha*dist_sq_neg.mutable_gpu_data()[j]\ - /((dist_sq_pos.mutable_gpu_data()[j]+margin)\ - *(dist_sq_pos.mutable_gpu_data()[j]+margin)), - diff_pos.gpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - for (int i = 2; i < 2 + num_triplets; ++i) { - // BP for negative data(feat3) - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; - dist_sq_.mutable_gpu_data()[j] += margin; - // Loss component calculated from negative part - caffe_gpu_sub( - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, - diff_neg.mutable_gpu_data() + j*dim); // reference-negative - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + j*dim, - diff_neg.gpu_data() + j*dim, - dist_sq_neg.mutable_cpu_data() + j); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[j] = 1 - \ - dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - caffe_gpu_axpby( - dim, - alpha/(dist_sq_pos.mutable_gpu_data()[j] + margin), - diff_neg.gpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - } else { - caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } -} - -INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer); - -} // namespace caffe diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index 89d14013dc9..ebb8b5d28c2 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -810,12 +810,11 @@ void Net::Backward() { BackwardFromTo(layers_.size() - 1, 0); if (debug_info_) { Dtype asum_data = 0, asum_diff = 0, sumsq_data = 0, sumsq_diff = 0; - for (int i = 0; i < params_.size(); ++i) { - if (param_owners_[i] >= 0) { continue; } - asum_data += params_[i]->asum_data(); - asum_diff += params_[i]->asum_diff(); - sumsq_data += params_[i]->sumsq_data(); - sumsq_diff += params_[i]->sumsq_diff(); + for (int i = 0; i < learnable_params_.size(); ++i) { + asum_data += learnable_params_[i]->asum_data(); + asum_diff += learnable_params_[i]->asum_diff(); + sumsq_data += learnable_params_[i]->sumsq_data(); + sumsq_diff += learnable_params_[i]->sumsq_diff(); } const Dtype l2norm_data = std::sqrt(sumsq_data); const Dtype l2norm_diff = std::sqrt(sumsq_diff); diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 331d31ce8f8..aa299f8660b 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -385,7 +385,6 @@ message LayerParameter { optional ThresholdParameter threshold_param = 128; optional TileParameter tile_param = 138; optional WindowDataParameter window_data_param = 129; - optional TripletLossParameter triplet_loss_param = 139; } // Message that stores parameters used to apply transformation @@ -469,13 +468,6 @@ message ContrastiveLossParameter { optional bool legacy_version = 2 [default = false]; } -message TripletLossParameter { - //margin for negative triplet - optional float margin = 1 [default = 1.0]; - optional uint32 losstype = 2 [default = 1]; - optional uint32 num_triplets = 3 [default = 3]; -} - message ConvolutionParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms @@ -1037,10 +1029,8 @@ message V1LayerParameter { SPLIT = 22; SLICE = 33; TANH = 23; - TRIPLET_LOSS = 40; WINDOW_DATA = 24; THRESHOLD = 31; - } optional LayerType type = 5; repeated BlobProto blobs = 6; @@ -1084,7 +1074,6 @@ message V1LayerParameter { optional TransformationParameter transform_param = 36; optional LossParameter loss_param = 42; optional V0LayerParameter layer = 1; - optional TripletLossParameter triplet_loss_param = 43; } // DEPRECATED: V0LayerParameter is the old way of specifying layer parameters diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 394ec3b3ad7..3574ce75046 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -55,6 +55,7 @@ void Solver::Init(const SolverParameter& param) { << std::endl << param.DebugString(); param_ = param; CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative."; + CheckSnapshotWritePermissions(); if (Caffe::root_solver() && param_.random_seed() >= 0) { Caffe::set_random_seed(param_.random_seed()); } @@ -434,6 +435,24 @@ void Solver::Snapshot() { SnapshotSolverState(model_filename); } +template +void Solver::CheckSnapshotWritePermissions() { + if (Caffe::root_solver() && param_.snapshot()) { + CHECK(param_.has_snapshot_prefix()) + << "In solver params, snapshot is specified but snapshot_prefix is not"; + string probe_filename = SnapshotFilename(".tempfile"); + std::ofstream probe_ofs(probe_filename.c_str()); + if (probe_ofs.good()) { + probe_ofs.close(); + std::remove(probe_filename.c_str()); + } else { + LOG(FATAL) << "Cannot write to snapshot prefix '" + << param_.snapshot_prefix() << "'. Make sure " + << "that the directory exists and is writeable."; + } + } +} + template string Solver::SnapshotFilename(const string extension) { string filename(param_.snapshot_prefix()); @@ -732,7 +751,7 @@ void SGDSolver::SnapshotSolverStateToBinaryProto( } string snapshot_filename = Solver::SnapshotFilename(".solverstate"); LOG(INFO) - << "Snapshotting solver state to binary proto file" << snapshot_filename; + << "Snapshotting solver state to binary proto file " << snapshot_filename; WriteProtoToBinaryFile(state, snapshot_filename.c_str()); } diff --git a/src/caffe/test/test_accuracy_layer.cpp b/src/caffe/test/test_accuracy_layer.cpp index 94e529b5eee..ef0e57a37a1 100644 --- a/src/caffe/test/test_accuracy_layer.cpp +++ b/src/caffe/test/test_accuracy_layer.cpp @@ -250,7 +250,6 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPUTopK) { TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClass) { LayerParameter layer_param; - Caffe::set_mode(Caffe::CPU); AccuracyLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_per_class_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_per_class_vec_); @@ -279,16 +278,16 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClass) { EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), num_correct_labels / 100.0, 1e-4); for (int i = 0; i < num_class; ++i) { + TypeParam accuracy_per_class = (num_per_class[i] > 0 ? + static_cast(correct_per_class[i]) / num_per_class[i] : 0); EXPECT_NEAR(this->blob_top_per_class_->data_at(i, 0, 0, 0), - static_cast(correct_per_class[i]) / num_per_class[i], - 1e-4); + accuracy_per_class, 1e-4); } } TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClassWithIgnoreLabel) { LayerParameter layer_param; - Caffe::set_mode(Caffe::CPU); const TypeParam kIgnoreLabelValue = -1; layer_param.mutable_accuracy_param()->set_ignore_label(kIgnoreLabelValue); AccuracyLayer layer(layer_param); @@ -329,9 +328,10 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClassWithIgnoreLabel) { EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), num_correct_labels / TypeParam(count), 1e-4); for (int i = 0; i < 10; ++i) { + TypeParam accuracy_per_class = (num_per_class[i] > 0 ? + static_cast(correct_per_class[i]) / num_per_class[i] : 0); EXPECT_NEAR(this->blob_top_per_class_->data_at(i, 0, 0, 0), - TypeParam(correct_per_class[i]) / num_per_class[i], - 1e-4); + accuracy_per_class, 1e-4); } } diff --git a/src/caffe/test/test_concat_layer.cpp b/src/caffe/test/test_concat_layer.cpp index 088e0a41685..ccd97eb1d66 100644 --- a/src/caffe/test/test_concat_layer.cpp +++ b/src/caffe/test/test_concat_layer.cpp @@ -99,6 +99,19 @@ TYPED_TEST(ConcatLayerTest, TestSetupChannelsNegativeIndexing) { EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0_->width()); } +TYPED_TEST(ConcatLayerTest, TestForwardTrivial) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConcatLayer layer(layer_param); + this->blob_bottom_vec_0_.resize(1); + layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_0_, this->blob_top_vec_); + for (int i = 0; i < this->blob_bottom_0_->count(); ++i) { + EXPECT_EQ(this->blob_bottom_0_->cpu_data()[i], + this->blob_top_->cpu_data()[i]); + } +} + TYPED_TEST(ConcatLayerTest, TestForwardNum) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; @@ -154,6 +167,16 @@ TYPED_TEST(ConcatLayerTest, TestForwardChannels) { } } +TYPED_TEST(ConcatLayerTest, TestGradientTrivial) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConcatLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + this->blob_bottom_vec_0_.resize(1); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_0_, + this->blob_top_vec_); +} + TYPED_TEST(ConcatLayerTest, TestGradientNum) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; diff --git a/src/caffe/test/test_eltwise_layer.cpp b/src/caffe/test/test_eltwise_layer.cpp index be0c1347709..8031f6e9022 100644 --- a/src/caffe/test/test_eltwise_layer.cpp +++ b/src/caffe/test/test_eltwise_layer.cpp @@ -80,7 +80,7 @@ TYPED_TEST(EltwiseLayerTest, TestProd) { const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); for (int i = 0; i < count; ++i) { - EXPECT_EQ(data[i], in_data_a[i] * in_data_b[i] * in_data_c[i]); + EXPECT_NEAR(data[i], in_data_a[i] * in_data_b[i] * in_data_c[i], 1e-4); } } @@ -99,7 +99,7 @@ TYPED_TEST(EltwiseLayerTest, TestSum) { const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); for (int i = 0; i < count; ++i) { - EXPECT_EQ(data[i], in_data_a[i] + in_data_b[i] + in_data_c[i]); + EXPECT_NEAR(data[i], in_data_a[i] + in_data_b[i] + in_data_c[i], 1e-4); } } diff --git a/src/caffe/test/test_slice_layer.cpp b/src/caffe/test/test_slice_layer.cpp index ccd03646d19..2d2d0fdc005 100644 --- a/src/caffe/test/test_slice_layer.cpp +++ b/src/caffe/test/test_slice_layer.cpp @@ -88,6 +88,21 @@ TYPED_TEST(SliceLayerTest, TestSetupChannels) { EXPECT_EQ(this->blob_bottom_->width(), this->blob_top_0_->width()); } +TYPED_TEST(SliceLayerTest, TestTrivialSlice) { + // Test the trivial (single output) "slice" operation -- + // should be the identity. + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + SliceLayer layer(layer_param); + this->blob_top_vec_0_.resize(1); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_0_); + ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_0_->shape()); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_EQ(this->blob_bottom_->cpu_data()[i], + this->blob_top_0_->cpu_data()[i]); + } +} + TYPED_TEST(SliceLayerTest, TestSliceAcrossNum) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; @@ -161,6 +176,18 @@ TYPED_TEST(SliceLayerTest, TestSliceAcrossChannels) { } } +TYPED_TEST(SliceLayerTest, TestGradientTrivial) { + // Test the trivial (single output) "slice" operation -- + // should be the identity. + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + SliceLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + this->blob_top_vec_0_.resize(1); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_0_); +} + TYPED_TEST(SliceLayerTest, TestGradientAcrossNum) { typedef typename TypeParam::Dtype Dtype; // Gradient checks are slow; reduce blob size. diff --git a/src/caffe/test/test_triplet_loss_layer b/src/caffe/test/test_triplet_loss_layer deleted file mode 100644 index 6c25ce9bd4b..00000000000 --- a/src/caffe/test/test_triplet_loss_layer +++ /dev/null @@ -1,125 +0,0 @@ -#include -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -#include "caffe/filler.hpp" -#include "caffe/vision_layers.hpp" - -#include "caffe/test/test_caffe_main.hpp" -#include "caffe/test/test_gradient_check_util.hpp" - -namespace caffe { - -template -class TripletLossLayerTest : public MultiDeviceTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - TripletLossLayerTest() - : blob_bottom_data_(new Blob(50, 1, 1, 1)), - blob_bottom_y_(new Blob(50, 1, 1, 1)), - blob_top_loss_(new Blob()) { - // fill the values - FillerParameter filler_param; - filler_param.set_min(-1.0); - filler_param.set_max(1.0); // distances~=1.0 to test both sides of margin - UniformFiller filler(filler_param); - filler.Fill(this->blob_bottom_data_); - blob_bottom_vec_.push_back(blob_bottom_data_); - for (int i = 0; i < blob_bottom_y_->count(); ++i) { - blob_bottom_y_->mutable_cpu_data()[i] = caffe_rng_rand() % 2; // 0 or 1 - } - blob_bottom_vec_.push_back(blob_bottom_y_); - blob_top_vec_.push_back(blob_top_loss_); - } - virtual ~TripletLossLayerTest() { - delete blob_bottom_data_; - delete blob_bottom_y_; - delete blob_top_loss_; - } - - Blob* const blob_bottom_data_; - Blob* const blob_bottom_y_; - Blob* const blob_top_loss_; - vector*> blob_bottom_vec_; - vector*> blob_top_vec_; -}; - -TYPED_TEST_CASE(TripletLossLayerTest, TestDtypesAndDevices); - -TYPED_TEST(TripletLossLayerTest, TestForward) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // manually compute to compare - const Dtype margin = layer_param.triplet_loss_param().margin(); - const Dtype losstype = 0;//layer_param.triplet_loss_param().losstype(); - const int num_triplets = 3; - const int num_set = this->blob_bottom_data_->num()/(2 + num_triplets); - const int channels = this->blob_bottom_data_->channels(); - Dtype loss(0); - if (losstype == 0) { - for (int i = 0; i < num_set; ++i) { - Dtype dist_par(0); - for (int j = 0; j < channels; ++j) { - Dtype diff_pos = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - - this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+1)*channels+j]; - dist_par = diff_pos*diff_pos; - loss += dist_par; - } - for (int triplet = 0; triplet < num_triplets; ++triplet) { - Dtype dist_sq(0); - for (int j = 0; j < channels; ++j) { - Dtype diff_pos = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - - this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+1)*channels+j]; - dist_sq += diff_pos*diff_pos; - Dtype diff_neg = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - - this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+2+triplet)*channels+j]; - dist_sq -= diff_neg*diff_neg; - } - loss += std::max(margin + dist_sq, Dtype(0.0)); - } - } - } /*else { - for (int i = 0; i < num; ++i) { - Dtype dist_sq(0); - Dtype dist_par(0); - for (int j = 0; j < channels; ++j) { - Dtype diff_pos = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; - dist_sq += diff_pos*diff_pos; - dist_sq += margin; - Dtype diff_neg = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_k_->cpu_data()[i*channels+j]; - dist_sq = 1 - diff_neg*diff_neg/dist_sq; - Dtype diff_par = this->blob_bottom_data_l_->cpu_data()[i*channels+j] - - this->blob_bottom_data_m_->cpu_data()[i*channels+j]; - dist_par = diff_par*diff_par; - } - loss += std::max(dist_sq, Dtype(0.0)); - loss += dist_par; - } - }*/ - loss /= static_cast(num_set) * Dtype(2); - EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); -} - -TYPED_TEST(TripletLossLayerTest, TestGradient) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - GradientChecker checker(1e-2, 1e-2, 1701); - // check the gradient for the first 5 bottom layers - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 0); -} -} // namespace caffe diff --git a/src/caffe/util/upgrade_proto.cpp b/src/caffe/util/upgrade_proto.cpp index 4703eb4c1b4..92e5cf55fa9 100644 --- a/src/caffe/util/upgrade_proto.cpp +++ b/src/caffe/util/upgrade_proto.cpp @@ -816,10 +816,6 @@ bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param, layer_param->mutable_threshold_param()->CopyFrom( v1_layer_param.threshold_param()); } - if (v1_layer_param.has_triplet_loss_param()) { - layer_param->mutable_triplet_loss_param()->CopyFrom( - v1_layer_param.triplet_loss_param()); - } if (v1_layer_param.has_window_data_param()) { layer_param->mutable_window_data_param()->CopyFrom( v1_layer_param.window_data_param()); @@ -917,8 +913,6 @@ const char* UpgradeV1LayerType(const V1LayerParameter_LayerType type) { return "Slice"; case V1LayerParameter_LayerType_TANH: return "TanH"; - case V1LayerParameter_LayerType_TRIPLET_LOSS: - return "TripletLoss"; case V1LayerParameter_LayerType_WINDOW_DATA: return "WindowData"; case V1LayerParameter_LayerType_THRESHOLD: diff --git a/tools/caffe.cpp b/tools/caffe.cpp index ff63860a3c1..e3f684b5ab3 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -174,6 +174,7 @@ int train() { vector gpus; get_gpus(&gpus); if (gpus.size() == 0) { + LOG(INFO) << "Use CPU."; Caffe::set_mode(Caffe::CPU); } else { ostringstream s; diff --git a/tools/extract_features.cpp b/tools/extract_features.cpp index 365dd495bbf..084c9bf88df 100644 --- a/tools/extract_features.cpp +++ b/tools/extract_features.cpp @@ -42,7 +42,7 @@ int feature_extraction_pipeline(int argc, char** argv) { " save_feature_dataset_name1[,name2,...] num_mini_batches db_type" " [CPU/GPU] [DEVICE_ID=0]\n" "Note: you can extract multiple features in one pass by specifying" - " multiple feature blob names and dataset names seperated by ','." + " multiple feature blob names and dataset names separated by ','." " The names cannot contain white space characters and the number of blobs" " and datasets must be equal."; return 1; From b907e95d385e6fb5dbf85cfc185a1aa06b3aabe0 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Thu, 4 Jun 2015 22:31:06 +0800 Subject: [PATCH 49/82] triplet data generation and network update --- examples/mnist/lenet_train_test.prototxt.orig | 168 ++ examples/siamese/mnist_siamese.prototxt.orig | 115 + .../mnist_siamese_solver.prototxt.orig | 25 + src/caffe/data_transformer.cpp.orig | 567 ++++ src/caffe/test/test_net.cpp | 14 + src/caffe/test/test_net.cpp.orig | 2375 +++++++++++++++++ 6 files changed, 3264 insertions(+) create mode 100644 examples/mnist/lenet_train_test.prototxt.orig create mode 100644 examples/siamese/mnist_siamese.prototxt.orig create mode 100644 examples/siamese/mnist_siamese_solver.prototxt.orig create mode 100644 src/caffe/data_transformer.cpp.orig create mode 100644 src/caffe/test/test_net.cpp.orig diff --git a/examples/mnist/lenet_train_test.prototxt.orig b/examples/mnist/lenet_train_test.prototxt.orig new file mode 100644 index 00000000000..b18fc26cfd8 --- /dev/null +++ b/examples/mnist/lenet_train_test.prototxt.orig @@ -0,0 +1,168 @@ +name: "LeNet" +layer { + name: "mnist" + type: "Data" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/mnist/mnist_train_lmdb" + batch_size: 64 + backend: LMDB + } +} +layer { + name: "mnist" + type: "Data" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/mnist/mnist_test_lmdb" + batch_size: 100 + backend: LMDB + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "accuracy" + type: "Accuracy" + bottom: "ip2" + bottom: "label" + top: "accuracy" + include { + phase: TEST + } +} +layer { + name: "loss" + type: "SoftmaxWithLoss" + bottom: "ip2" + bottom: "label" + top: "loss" +} diff --git a/examples/siamese/mnist_siamese.prototxt.orig b/examples/siamese/mnist_siamese.prototxt.orig new file mode 100644 index 00000000000..332731bd75f --- /dev/null +++ b/examples/siamese/mnist_siamese.prototxt.orig @@ -0,0 +1,115 @@ +name: "mnist_siamese" +input: "data" +input_shape { + dim: 10000 + dim: 1 + dim: 28 + dim: 28 +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 500 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 10 + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 2 + } +} diff --git a/examples/siamese/mnist_siamese_solver.prototxt.orig b/examples/siamese/mnist_siamese_solver.prototxt.orig new file mode 100644 index 00000000000..d4d994d1389 --- /dev/null +++ b/examples/siamese/mnist_siamese_solver.prototxt.orig @@ -0,0 +1,25 @@ +# The train/test net protocol buffer definition +net: "examples/siamese/mnist_siamese_train_test.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of MNIST, we have test batch size 100 and 100 test iterations, +# covering the full 10,000 testing images. +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 500 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.01 +momentum: 0.9 +weight_decay: 0.0000 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +max_iter: 50000 +# snapshot intermediate results +snapshot: 5000 +snapshot_prefix: "examples/siamese/mnist_siamese" +# solver mode: CPU or GPU +solver_mode: GPU diff --git a/src/caffe/data_transformer.cpp.orig b/src/caffe/data_transformer.cpp.orig new file mode 100644 index 00000000000..d8ad1c1e5c6 --- /dev/null +++ b/src/caffe/data_transformer.cpp.orig @@ -0,0 +1,567 @@ +#ifdef USE_OPENCV +#include +#endif // USE_OPENCV + +#include +#include + +#include "caffe/data_transformer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/rng.hpp" + +namespace caffe { + +template +DataTransformer::DataTransformer(const TransformationParameter& param, + Phase phase) + : param_(param), phase_(phase) { + // check if we want to use mean_file + if (param_.has_mean_file()) { + CHECK_EQ(param_.mean_value_size(), 0) << + "Cannot specify mean_file and mean_value at the same time"; + const string& mean_file = param.mean_file(); + if (Caffe::root_solver()) { + LOG(INFO) << "Loading mean file from: " << mean_file; + } + BlobProto blob_proto; + ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto); + data_mean_.FromProto(blob_proto); + } + // check if we want to use mean_value + if (param_.mean_value_size() > 0) { + CHECK(param_.has_mean_file() == false) << + "Cannot specify mean_file and mean_value at the same time"; + for (int c = 0; c < param_.mean_value_size(); ++c) { + mean_values_.push_back(param_.mean_value(c)); + } + } +} + +template +void DataTransformer::Transform(const Datum& datum, + Dtype* transformed_data) { + const string& data = datum.data(); + const int datum_channels = datum.channels(); + const int datum_height = datum.height(); + const int datum_width = datum.width(); + + const int crop_size = param_.crop_size(); + const Dtype scale = param_.scale(); + const bool do_mirror = param_.mirror() && Rand(2); + const bool has_mean_file = param_.has_mean_file(); + const bool has_uint8 = data.size() > 0; + const bool has_mean_values = mean_values_.size() > 0; + + CHECK_GT(datum_channels, 0); + CHECK_GE(datum_height, crop_size); + CHECK_GE(datum_width, crop_size); + + Dtype* mean = NULL; + if (has_mean_file) { + CHECK_EQ(datum_channels, data_mean_.channels()); + CHECK_EQ(datum_height, data_mean_.height()); + CHECK_EQ(datum_width, data_mean_.width()); + mean = data_mean_.mutable_cpu_data(); + } + if (has_mean_values) { + CHECK(mean_values_.size() == 1 || mean_values_.size() == datum_channels) << + "Specify either 1 mean_value or as many as channels: " << datum_channels; + if (datum_channels > 1 && mean_values_.size() == 1) { + // Replicate the mean_value for simplicity + for (int c = 1; c < datum_channels; ++c) { + mean_values_.push_back(mean_values_[0]); + } + } + } + + int height = datum_height; + int width = datum_width; + + int h_off = 0; + int w_off = 0; + if (crop_size) { + height = crop_size; + width = crop_size; + // We only do random crop when we do training. + if (phase_ == TRAIN) { + h_off = Rand(datum_height - crop_size + 1); + w_off = Rand(datum_width - crop_size + 1); + } else { + h_off = (datum_height - crop_size) / 2; + w_off = (datum_width - crop_size) / 2; + } + } + + Dtype datum_element; + int top_index, data_index; + for (int c = 0; c < datum_channels; ++c) { + for (int h = 0; h < height; ++h) { + for (int w = 0; w < width; ++w) { + data_index = (c * datum_height + h_off + h) * datum_width + w_off + w; + if (do_mirror) { + top_index = (c * height + h) * width + (width - 1 - w); + } else { + top_index = (c * height + h) * width + w; + } + if (has_uint8) { + datum_element = + static_cast(static_cast(data[data_index])); + } else { + datum_element = datum.float_data(data_index); + } + if (has_mean_file) { + transformed_data[top_index] = + (datum_element - mean[data_index]) * scale; + } else { + if (has_mean_values) { + transformed_data[top_index] = + (datum_element - mean_values_[c]) * scale; + } else { + transformed_data[top_index] = datum_element * scale; + } + } + } + } + } +} + + +template +void DataTransformer::Transform(const Datum& datum, + Blob* transformed_blob) { + // If datum is encoded, decoded and transform the cv::image. + if (datum.encoded()) { +<<<<<<< be87d80db7eea1220e4347069f0295209f90cef6 +#ifdef USE_OPENCV +======= +<<<<<<< HEAD +<<<<<<< HEAD +>>>>>>> triplet data generation and network update + CHECK(!(param_.force_color() && param_.force_gray())) +======= + CHECK(!param_.force_color() && !param_.force_gray()) +>>>>>>> 011aef0... restore +======= + CHECK(!(param_.force_color() && param_.force_gray())) +>>>>>>> d2acfed... fixed _force_color check, fixes #2635 + << "cannot set both force_color and force_gray"; + cv::Mat cv_img; + if (param_.force_color() || param_.force_gray()) { + // If force_color then decode in color otherwise decode in gray. + cv_img = DecodeDatumToCVMat(datum, param_.force_color()); + } else { + cv_img = DecodeDatumToCVMatNative(datum); + } + // Transform the cv::image into blob. + return Transform(cv_img, transformed_blob); +#else + LOG(FATAL) << "Encoded datum requires OpenCV; compile with USE_OPENCV."; +#endif // USE_OPENCV + } else { + if (param_.force_color() || param_.force_gray()) { + LOG(ERROR) << "force_color and force_gray only for encoded datum"; + } + } + + const int crop_size = param_.crop_size(); + const int datum_channels = datum.channels(); + const int datum_height = datum.height(); + const int datum_width = datum.width(); + + // Check dimensions. + const int channels = transformed_blob->channels(); + const int height = transformed_blob->height(); + const int width = transformed_blob->width(); + const int num = transformed_blob->num(); + + CHECK_EQ(channels, datum_channels); + CHECK_LE(height, datum_height); + CHECK_LE(width, datum_width); + CHECK_GE(num, 1); + + if (crop_size) { + CHECK_EQ(crop_size, height); + CHECK_EQ(crop_size, width); + } else { + CHECK_EQ(datum_height, height); + CHECK_EQ(datum_width, width); + } + + Dtype* transformed_data = transformed_blob->mutable_cpu_data(); + Transform(datum, transformed_data); +} + +template +void DataTransformer::Transform(const vector & datum_vector, + Blob* transformed_blob) { + const int datum_num = datum_vector.size(); + const int num = transformed_blob->num(); + const int channels = transformed_blob->channels(); + const int height = transformed_blob->height(); + const int width = transformed_blob->width(); + + CHECK_GT(datum_num, 0) << "There is no datum to add"; + CHECK_LE(datum_num, num) << + "The size of datum_vector must be no greater than transformed_blob->num()"; + Blob uni_blob(1, channels, height, width); + for (int item_id = 0; item_id < datum_num; ++item_id) { + int offset = transformed_blob->offset(item_id); + uni_blob.set_cpu_data(transformed_blob->mutable_cpu_data() + offset); + Transform(datum_vector[item_id], &uni_blob); + } +} + +#ifdef USE_OPENCV +template +void DataTransformer::Transform(const vector & mat_vector, + Blob* transformed_blob) { + const int mat_num = mat_vector.size(); + const int num = transformed_blob->num(); + const int channels = transformed_blob->channels(); + const int height = transformed_blob->height(); + const int width = transformed_blob->width(); + + CHECK_GT(mat_num, 0) << "There is no MAT to add"; + CHECK_EQ(mat_num, num) << + "The size of mat_vector must be equals to transformed_blob->num()"; + Blob uni_blob(1, channels, height, width); + for (int item_id = 0; item_id < mat_num; ++item_id) { + int offset = transformed_blob->offset(item_id); + uni_blob.set_cpu_data(transformed_blob->mutable_cpu_data() + offset); + Transform(mat_vector[item_id], &uni_blob); + } +} + +template +void DataTransformer::Transform(const cv::Mat& cv_img, + Blob* transformed_blob) { + const int crop_size = param_.crop_size(); + const int img_channels = cv_img.channels(); + const int img_height = cv_img.rows; + const int img_width = cv_img.cols; + + // Check dimensions. + const int channels = transformed_blob->channels(); + const int height = transformed_blob->height(); + const int width = transformed_blob->width(); + const int num = transformed_blob->num(); + + CHECK_EQ(channels, img_channels); + CHECK_LE(height, img_height); + CHECK_LE(width, img_width); + CHECK_GE(num, 1); + + CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte"; + + const Dtype scale = param_.scale(); + const bool do_mirror = param_.mirror() && Rand(2); + const bool has_mean_file = param_.has_mean_file(); + const bool has_mean_values = mean_values_.size() > 0; + + CHECK_GT(img_channels, 0); + CHECK_GE(img_height, crop_size); + CHECK_GE(img_width, crop_size); + + Dtype* mean = NULL; + if (has_mean_file) { + CHECK_EQ(img_channels, data_mean_.channels()); + CHECK_EQ(img_height, data_mean_.height()); + CHECK_EQ(img_width, data_mean_.width()); + mean = data_mean_.mutable_cpu_data(); + } + if (has_mean_values) { + CHECK(mean_values_.size() == 1 || mean_values_.size() == img_channels) << + "Specify either 1 mean_value or as many as channels: " << img_channels; + if (img_channels > 1 && mean_values_.size() == 1) { + // Replicate the mean_value for simplicity + for (int c = 1; c < img_channels; ++c) { + mean_values_.push_back(mean_values_[0]); + } + } + } + + int h_off = 0; + int w_off = 0; + cv::Mat cv_cropped_img = cv_img; + if (crop_size) { + CHECK_EQ(crop_size, height); + CHECK_EQ(crop_size, width); + // We only do random crop when we do training. + if (phase_ == TRAIN) { + h_off = Rand(img_height - crop_size + 1); + w_off = Rand(img_width - crop_size + 1); + } else { + h_off = (img_height - crop_size) / 2; + w_off = (img_width - crop_size) / 2; + } + cv::Rect roi(w_off, h_off, crop_size, crop_size); + cv_cropped_img = cv_img(roi); + } else { + CHECK_EQ(img_height, height); + CHECK_EQ(img_width, width); + } + + CHECK(cv_cropped_img.data); + + Dtype* transformed_data = transformed_blob->mutable_cpu_data(); + int top_index; + for (int h = 0; h < height; ++h) { + const uchar* ptr = cv_cropped_img.ptr(h); + int img_index = 0; + for (int w = 0; w < width; ++w) { + for (int c = 0; c < img_channels; ++c) { + if (do_mirror) { + top_index = (c * height + h) * width + (width - 1 - w); + } else { + top_index = (c * height + h) * width + w; + } + // int top_index = (c * height + h) * width + w; + Dtype pixel = static_cast(ptr[img_index++]); + if (has_mean_file) { + int mean_index = (c * img_height + h_off + h) * img_width + w_off + w; + transformed_data[top_index] = + (pixel - mean[mean_index]) * scale; + } else { + if (has_mean_values) { + transformed_data[top_index] = + (pixel - mean_values_[c]) * scale; + } else { + transformed_data[top_index] = pixel * scale; + } + } + } + } + } +} +#endif // USE_OPENCV + +template +void DataTransformer::Transform(Blob* input_blob, + Blob* transformed_blob) { + const int crop_size = param_.crop_size(); + const int input_num = input_blob->num(); + const int input_channels = input_blob->channels(); + const int input_height = input_blob->height(); + const int input_width = input_blob->width(); + + if (transformed_blob->count() == 0) { + // Initialize transformed_blob with the right shape. + if (crop_size) { + transformed_blob->Reshape(input_num, input_channels, + crop_size, crop_size); + } else { + transformed_blob->Reshape(input_num, input_channels, + input_height, input_width); + } + } + + const int num = transformed_blob->num(); + const int channels = transformed_blob->channels(); + const int height = transformed_blob->height(); + const int width = transformed_blob->width(); + const int size = transformed_blob->count(); + + CHECK_LE(input_num, num); + CHECK_EQ(input_channels, channels); + CHECK_GE(input_height, height); + CHECK_GE(input_width, width); + + + const Dtype scale = param_.scale(); + const bool do_mirror = param_.mirror() && Rand(2); + const bool has_mean_file = param_.has_mean_file(); + const bool has_mean_values = mean_values_.size() > 0; + + int h_off = 0; + int w_off = 0; + if (crop_size) { + CHECK_EQ(crop_size, height); + CHECK_EQ(crop_size, width); + // We only do random crop when we do training. + if (phase_ == TRAIN) { + h_off = Rand(input_height - crop_size + 1); + w_off = Rand(input_width - crop_size + 1); + } else { + h_off = (input_height - crop_size) / 2; + w_off = (input_width - crop_size) / 2; + } + } else { + CHECK_EQ(input_height, height); + CHECK_EQ(input_width, width); + } + + Dtype* input_data = input_blob->mutable_cpu_data(); + if (has_mean_file) { + CHECK_EQ(input_channels, data_mean_.channels()); + CHECK_EQ(input_height, data_mean_.height()); + CHECK_EQ(input_width, data_mean_.width()); + for (int n = 0; n < input_num; ++n) { + int offset = input_blob->offset(n); + caffe_sub(data_mean_.count(), input_data + offset, + data_mean_.cpu_data(), input_data + offset); + } + } + + if (has_mean_values) { + CHECK(mean_values_.size() == 1 || mean_values_.size() == input_channels) << + "Specify either 1 mean_value or as many as channels: " << input_channels; + if (mean_values_.size() == 1) { + caffe_add_scalar(input_blob->count(), -(mean_values_[0]), input_data); + } else { + for (int n = 0; n < input_num; ++n) { + for (int c = 0; c < input_channels; ++c) { + int offset = input_blob->offset(n, c); + caffe_add_scalar(input_height * input_width, -(mean_values_[c]), + input_data + offset); + } + } + } + } + + Dtype* transformed_data = transformed_blob->mutable_cpu_data(); + + for (int n = 0; n < input_num; ++n) { + int top_index_n = n * channels; + int data_index_n = n * channels; + for (int c = 0; c < channels; ++c) { + int top_index_c = (top_index_n + c) * height; + int data_index_c = (data_index_n + c) * input_height + h_off; + for (int h = 0; h < height; ++h) { + int top_index_h = (top_index_c + h) * width; + int data_index_h = (data_index_c + h) * input_width + w_off; + if (do_mirror) { + int top_index_w = top_index_h + width - 1; + for (int w = 0; w < width; ++w) { + transformed_data[top_index_w-w] = input_data[data_index_h + w]; + } + } else { + for (int w = 0; w < width; ++w) { + transformed_data[top_index_h + w] = input_data[data_index_h + w]; + } + } + } + } + } + if (scale != Dtype(1)) { + DLOG(INFO) << "Scale: " << scale; + caffe_scal(size, scale, transformed_data); + } +} + +template +vector DataTransformer::InferBlobShape(const Datum& datum) { + if (datum.encoded()) { +<<<<<<< be87d80db7eea1220e4347069f0295209f90cef6 +#ifdef USE_OPENCV +======= +<<<<<<< HEAD +<<<<<<< HEAD + CHECK(!(param_.force_color() && param_.force_gray())) +======= + CHECK(!param_.force_color() && !param_.force_gray()) +>>>>>>> 011aef0... restore +======= +>>>>>>> triplet data generation and network update + CHECK(!(param_.force_color() && param_.force_gray())) +>>>>>>> d2acfed... fixed _force_color check, fixes #2635 + << "cannot set both force_color and force_gray"; + cv::Mat cv_img; + if (param_.force_color() || param_.force_gray()) { + // If force_color then decode in color otherwise decode in gray. + cv_img = DecodeDatumToCVMat(datum, param_.force_color()); + } else { + cv_img = DecodeDatumToCVMatNative(datum); + } + // InferBlobShape using the cv::image. + return InferBlobShape(cv_img); +#else + LOG(FATAL) << "Encoded datum requires OpenCV; compile with USE_OPENCV."; +#endif // USE_OPENCV + } + const int crop_size = param_.crop_size(); + const int datum_channels = datum.channels(); + const int datum_height = datum.height(); + const int datum_width = datum.width(); + // Check dimensions. + CHECK_GT(datum_channels, 0); + CHECK_GE(datum_height, crop_size); + CHECK_GE(datum_width, crop_size); + // Build BlobShape. + vector shape(4); + shape[0] = 1; + shape[1] = datum_channels; + shape[2] = (crop_size)? crop_size: datum_height; + shape[3] = (crop_size)? crop_size: datum_width; + return shape; +} + +template +vector DataTransformer::InferBlobShape( + const vector & datum_vector) { + const int num = datum_vector.size(); + CHECK_GT(num, 0) << "There is no datum to in the vector"; + // Use first datum in the vector to InferBlobShape. + vector shape = InferBlobShape(datum_vector[0]); + // Adjust num to the size of the vector. + shape[0] = num; + return shape; +} + +#ifdef USE_OPENCV +template +vector DataTransformer::InferBlobShape(const cv::Mat& cv_img) { + const int crop_size = param_.crop_size(); + const int img_channels = cv_img.channels(); + const int img_height = cv_img.rows; + const int img_width = cv_img.cols; + // Check dimensions. + CHECK_GT(img_channels, 0); + CHECK_GE(img_height, crop_size); + CHECK_GE(img_width, crop_size); + // Build BlobShape. + vector shape(4); + shape[0] = 1; + shape[1] = img_channels; + shape[2] = (crop_size)? crop_size: img_height; + shape[3] = (crop_size)? crop_size: img_width; + return shape; +} + +template +vector DataTransformer::InferBlobShape( + const vector & mat_vector) { + const int num = mat_vector.size(); + CHECK_GT(num, 0) << "There is no cv_img to in the vector"; + // Use first cv_img in the vector to InferBlobShape. + vector shape = InferBlobShape(mat_vector[0]); + // Adjust num to the size of the vector. + shape[0] = num; + return shape; +} +#endif // USE_OPENCV + +template +void DataTransformer::InitRand() { + const bool needs_rand = param_.mirror() || + (phase_ == TRAIN && param_.crop_size()); + if (needs_rand) { + const unsigned int rng_seed = caffe_rng_rand(); + rng_.reset(new Caffe::RNG(rng_seed)); + } else { + rng_.reset(); + } +} + +template +int DataTransformer::Rand(int n) { + CHECK(rng_); + CHECK_GT(n, 0); + caffe::rng_t* rng = + static_cast(rng_->generator()); + return ((*rng)() % n); +} + +INSTANTIATE_CLASS(DataTransformer); + +} // namespace caffe diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index 12998d8912f..ea6729e74de 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -700,11 +700,25 @@ class NetTest : public MultiDeviceTest { " bottom: 'innerproduct' " " bottom: 'label_argmax' "; if (test_skip_true) +<<<<<<< HEAD +<<<<<<< HEAD proto += " propagate_down: true " " propagate_down: false "; else proto += " propagate_down: true " " propagate_down: true "; +======= + proto += " propagate_down: [true, false] "; + else + proto += " propagate_down: [true, true] "; +>>>>>>> 011aef0... restore +======= + proto += " propagate_down: true " + " propagate_down: false "; + else + proto += " propagate_down: true " + " propagate_down: true "; +>>>>>>> 98fb438... fixed two bugs with prototext format proto += " top: 'cross_entropy_loss' " " type: 'SigmoidCrossEntropyLoss' " diff --git a/src/caffe/test/test_net.cpp.orig b/src/caffe/test/test_net.cpp.orig new file mode 100644 index 00000000000..56959f4793b --- /dev/null +++ b/src/caffe/test/test_net.cpp.orig @@ -0,0 +1,2375 @@ +#include +#include +#include + +#include "google/protobuf/text_format.h" + +#include "gtest/gtest.h" + +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/net.hpp" +#include "caffe/util/math_functions.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class NetTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + NetTest() : seed_(1701) {} + + virtual void InitNetFromProtoString(const string& proto) { + NetParameter param; + CHECK(google::protobuf::TextFormat::ParseFromString(proto, ¶m)); + net_.reset(new Net(param)); + } + + virtual void CopyNetBlobs(const bool copy_diff, + vector > >* blobs_copy) { + CHECK(net_); + const vector > >& net_blobs = net_->blobs(); + blobs_copy->clear(); + blobs_copy->resize(net_blobs.size()); + const bool kReshape = true; + for (int i = 0; i < net_blobs.size(); ++i) { + (*blobs_copy)[i].reset(new Blob()); + (*blobs_copy)[i]->CopyFrom(*net_blobs[i], copy_diff, kReshape); + } + } + + virtual void CopyNetParams(const bool copy_diff, + vector > >* params_copy) { + CHECK(net_); + const vector > >& net_params = net_->params(); + params_copy->clear(); + params_copy->resize(net_params.size()); + const bool kReshape = true; + for (int i = 0; i < net_params.size(); ++i) { + (*params_copy)[i].reset(new Blob()); + (*params_copy)[i]->CopyFrom(*net_params[i], copy_diff, kReshape); + } + } + + virtual void InitTinyNet(const bool force_backward = false, + const bool accuracy_layer = false) { + string proto = + "name: 'TinyTestNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " shape { " + " dim: 5 " + " dim: 2 " + " dim: 3 " + " dim: 4 " + " } " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " shape { " + " dim: 5 " + " } " + " data_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerproduct' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'data' " + " top: 'innerproduct' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerproduct' " + " bottom: 'label' " + " top: 'top_loss' " + "} "; + if (accuracy_layer) { + proto += + "layer { " + " name: 'loss' " + " type: 'Accuracy' " + " bottom: 'innerproduct' " + " bottom: 'label' " + " top: 'accuracy' " + "} "; + } + if (force_backward) { + proto += "force_backward: true "; + } + InitNetFromProtoString(proto); + } + + virtual void InitTinyNetEuclidean(const bool force_backward = false) { + string proto = + "name: 'TinyTestEuclidLossNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 5 " + " channels: 2 " + " height: 3 " + " width: 4 " + " num: 5 " + " channels: 1 " + " height: 1 " + " width: 1 " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerproduct' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'data' " + " top: 'innerproduct' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'innerproduct' " + " bottom: 'label' " + "} "; + if (force_backward) { + proto += "force_backward: true "; + } + InitNetFromProtoString(proto); + } + + virtual void InitTrickyNet(Dtype* loss_weight = NULL) { + ostringstream loss_weight_stream; + if (loss_weight) { + loss_weight_stream << " loss_weight: " << *loss_weight << " "; + } + const string& proto = + "name: 'TrickyTestNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 5 " + " channels: 2 " + " height: 3 " + " width: 4 " + " num: 5 " + " channels: 1 " + " height: 1 " + " width: 1 " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerproduct' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'data' " + " top: 'transformed_data' " + "} " + "layer { " + " name: 'innerproduct' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'label' " + " top: 'transformed_label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + + loss_weight_stream.str() + + " bottom: 'transformed_data' " + " bottom: 'transformed_label' " + "} "; + InitNetFromProtoString(proto); + } + + // loss_weight is the loss weight for the 'EuclideanLoss' layer output. + // midnet_loss_weight is the loss weight for the first 'InnerProduct' layer + // output. Should both default to 0.0 if unspecified (i.e., if NULL is + // passed to this function). + virtual void InitUnsharedWeightsNet(const Dtype* loss_weight = NULL, + const Dtype* midnet_loss_weight = NULL, + const bool force_backward = false, const bool bias_term = false, + const Dtype blobs_lr_w1 = 1, const Dtype blobs_lr_b1 = 2, + const Dtype blobs_lr_w2 = 1, const Dtype blobs_lr_b2 = 2) { + string bias_str = bias_term ? "true ":"false "; + ostringstream proto; + proto << "name: 'UnsharedWeightsNetwork' "; + if (force_backward) { + proto << "force_backward: true "; + } + proto << + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 5 " + " channels: 2 " + " height: 3 " + " width: 4 " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " } " + " top: 'data' " + "} " + "layer { " + " name: 'innerproduct1' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: " << bias_str << + " weight_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " param { " + " name: 'unsharedweights1' " + " lr_mult: " << blobs_lr_w1 << + " } "; + if (bias_term) { + proto << " param { lr_mult: " << blobs_lr_b1 << " } "; + } + proto << + " bottom: 'data' " + " top: 'innerproduct1' "; + if (midnet_loss_weight) { + proto << " loss_weight: " << *midnet_loss_weight << " "; + } + proto << + "} " + "layer { " + " name: 'innerproduct2' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: " << bias_str << + " weight_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " param { " + " name: 'unsharedweights2' " + " lr_mult: " << blobs_lr_w2 << + " } "; + if (bias_term) { + proto << " param { lr_mult: " << blobs_lr_b2 << " } "; + } + proto << + " bottom: 'data' " + " top: 'innerproduct2' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' "; + if (loss_weight) { + proto << " loss_weight: " << *loss_weight << " "; + } + proto << + " bottom: 'innerproduct1' " + " bottom: 'innerproduct2' " + "} "; + InitNetFromProtoString(proto.str()); + } + + virtual void InitSharedWeightsNet() { + const string& proto = + "name: 'SharedWeightsNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 5 " + " channels: 2 " + " height: 3 " + " width: 4 " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " } " + " top: 'data' " + "} " + "layer { " + " name: 'innerproduct1' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " param { name: 'sharedweights' } " + " bottom: 'data' " + " top: 'innerproduct1' " + "} " + "layer { " + " name: 'innerproduct2' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " param { name: 'sharedweights' } " + " bottom: 'data' " + " top: 'innerproduct2' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'innerproduct1' " + " bottom: 'innerproduct2' " + "} "; + InitNetFromProtoString(proto); + } + + virtual void InitDiffDataUnsharedWeightsNet() { + const string& proto = + "name: 'DiffDataUnsharedWeightsNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 10 " + " channels: 10 " + " height: 1 " + " width: 1 " + " num: 10 " + " channels: 10 " + " height: 1 " + " width: 1 " + " data_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " top: 'data1' " + " top: 'data2' " + "} " + "layer { " + " name: 'innerproduct1' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'constant' " + " value: 0.5 " + " } " + " } " + " param { name: 'unsharedweights1' } " + " bottom: 'data1' " + " top: 'innerproduct1' " + "} " + "layer { " + " name: 'innerproduct2' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'constant' " + " value: 0.5 " + " } " + " } " + " param { name: 'unsharedweights2' } " + " bottom: 'innerproduct1' " + " top: 'innerproduct2' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'data2' " + " bottom: 'innerproduct2' " + "} "; + InitNetFromProtoString(proto); + } + + virtual void InitDiffDataSharedWeightsNet() { + const string& proto = + "name: 'DiffDataSharedWeightsNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 10 " + " channels: 10 " + " height: 1 " + " width: 1 " + " num: 10 " + " channels: 10 " + " height: 1 " + " width: 1 " + " data_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " top: 'data1' " + " top: 'data2' " + "} " + "layer { " + " name: 'innerproduct1' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'constant' " + " value: 0.5 " + " } " + " } " + " param { name: 'sharedweights' } " + " bottom: 'data1' " + " top: 'innerproduct1' " + "} " + "layer { " + " name: 'innerproduct2' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'constant' " + " value: 0.5 " + " } " + " } " + " param { name: 'sharedweights' } " + " bottom: 'innerproduct1' " + " top: 'innerproduct2' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'data2' " + " bottom: 'innerproduct2' " + "} "; + InitNetFromProtoString(proto); + } + + virtual void InitReshapableNet() { + const string& proto = + "name: 'ReshapableNetwork' " + "input: 'data' " + "input_dim: 1 " + "input_dim: 3 " + "input_dim: 100 " + "input_dim: 100 " + "layer { " + " name: 'conv1' " + " type: 'Convolution' " + " bottom: 'data' " + " top: 'conv1' " + " convolution_param { " + " num_output: 5 " + " kernel_size: 3 " + " stride: 2 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0.2 " + " } " + " } " + "} " + "layer { " + " name: 'relu1' " + " type: 'ReLU' " + " bottom: 'conv1' " + " top: 'conv1' " + "} " + "layer { " + " name: 'pool1' " + " type: 'Pooling' " + " bottom: 'conv1' " + " top: 'pool1' " + " pooling_param { " + " pool: MAX " + " kernel_size: 2 " + " stride: 2 " + " } " + "} " + "layer { " + " name: 'norm1' " + " type: 'LRN' " + " bottom: 'pool1' " + " top: 'norm1' " + " lrn_param { " + " local_size: 3 " + " } " + "} " + "layer { " + " name: 'softmax' " + " type: 'Softmax' " + " bottom: 'norm1' " + " top: 'softmax' " + "} "; + InitNetFromProtoString(proto); + } + + virtual void InitSkipPropNet(bool test_skip_true) { + string proto = + "name: 'SkipPropTestNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " shape { " + " dim: 5 " + " dim: 2 " + " dim: 3 " + " dim: 4 " + " } " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " shape { " + " dim: 5 " + " } " + " data_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'silence' " + " bottom: 'label' " + " type: 'Silence' " + "} " + "layer { " + " name: 'innerproduct' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'data' " + " top: 'innerproduct' " + "} " + "layer { " + " name: 'ip_fake_labels' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " bottom: 'data' " + " top: 'fake_labels' " + "} " + "layer { " + " name: 'argmax' " + " bottom: 'fake_labels' " + " top: 'label_argmax' " + " type: 'ArgMax' " + "} " + "layer { " + " name: 'loss' " + " bottom: 'innerproduct' " + " bottom: 'label_argmax' "; + if (test_skip_true) + proto += " propagate_down: true " + " propagate_down: false "; + else + proto += " propagate_down: true " + " propagate_down: true "; + proto += + " top: 'cross_entropy_loss' " + " type: 'SigmoidCrossEntropyLoss' " + " loss_weight: 0.1 " + "} "; + InitNetFromProtoString(proto); + } + + int seed_; + shared_ptr > net_; +}; + +TYPED_TEST_CASE(NetTest, TestDtypesAndDevices); + +TYPED_TEST(NetTest, TestHasBlob) { + this->InitTinyNet(); + EXPECT_TRUE(this->net_->has_blob("data")); + EXPECT_TRUE(this->net_->has_blob("label")); + EXPECT_TRUE(this->net_->has_blob("innerproduct")); + EXPECT_FALSE(this->net_->has_blob("loss")); + EXPECT_TRUE(this->net_->has_blob("top_loss")); +} + +TYPED_TEST(NetTest, TestGetBlob) { + this->InitTinyNet(); + EXPECT_EQ(this->net_->blob_by_name("data"), this->net_->blobs()[0]); + EXPECT_EQ(this->net_->blob_by_name("label"), this->net_->blobs()[1]); + EXPECT_EQ(this->net_->blob_by_name("innerproduct"), this->net_->blobs()[2]); + EXPECT_FALSE(this->net_->blob_by_name("loss")); + EXPECT_EQ(this->net_->blob_by_name("top_loss"), this->net_->blobs()[3]); +} + +TYPED_TEST(NetTest, TestHasLayer) { + this->InitTinyNet(); + EXPECT_TRUE(this->net_->has_layer("data")); + EXPECT_TRUE(this->net_->has_layer("innerproduct")); + EXPECT_TRUE(this->net_->has_layer("loss")); + EXPECT_FALSE(this->net_->has_layer("label")); +} + +TYPED_TEST(NetTest, TestGetLayerByName) { + this->InitTinyNet(); + EXPECT_EQ(this->net_->layer_by_name("data"), this->net_->layers()[0]); + EXPECT_EQ(this->net_->layer_by_name("innerproduct"), this->net_->layers()[1]); + EXPECT_EQ(this->net_->layer_by_name("loss"), this->net_->layers()[2]); + EXPECT_FALSE(this->net_->layer_by_name("label")); +} + +TYPED_TEST(NetTest, TestBottomNeedBackward) { + this->InitTinyNet(); + const vector >& bottom_need_backward = + this->net_->bottom_need_backward(); + EXPECT_EQ(3, bottom_need_backward.size()); + EXPECT_EQ(0, bottom_need_backward[0].size()); + EXPECT_EQ(1, bottom_need_backward[1].size()); + EXPECT_EQ(false, bottom_need_backward[1][0]); + EXPECT_EQ(2, bottom_need_backward[2].size()); + EXPECT_EQ(true, bottom_need_backward[2][0]); + EXPECT_EQ(false, bottom_need_backward[2][1]); +} + +TYPED_TEST(NetTest, TestBottomNeedBackwardForce) { + const bool force_backward = true; + this->InitTinyNet(force_backward); + const vector >& bottom_need_backward = + this->net_->bottom_need_backward(); + EXPECT_EQ(3, bottom_need_backward.size()); + EXPECT_EQ(0, bottom_need_backward[0].size()); + EXPECT_EQ(1, bottom_need_backward[1].size()); + EXPECT_EQ(true, bottom_need_backward[1][0]); + EXPECT_EQ(2, bottom_need_backward[2].size()); + EXPECT_EQ(true, bottom_need_backward[2][0]); + EXPECT_EQ(false, bottom_need_backward[2][1]); +} + +TYPED_TEST(NetTest, TestBottomNeedBackwardEuclideanForce) { + const bool force_backward = true; + this->InitTinyNetEuclidean(force_backward); + const vector >& bottom_need_backward = + this->net_->bottom_need_backward(); + EXPECT_EQ(3, bottom_need_backward.size()); + EXPECT_EQ(0, bottom_need_backward[0].size()); + EXPECT_EQ(1, bottom_need_backward[1].size()); + EXPECT_EQ(true, bottom_need_backward[1][0]); + EXPECT_EQ(2, bottom_need_backward[2].size()); + EXPECT_EQ(true, bottom_need_backward[2][0]); + EXPECT_EQ(true, bottom_need_backward[2][1]); +} + +TYPED_TEST(NetTest, TestBottomNeedBackwardTricky) { + this->InitTrickyNet(); + const vector >& bottom_need_backward = + this->net_->bottom_need_backward(); + EXPECT_EQ(4, bottom_need_backward.size()); + EXPECT_EQ(0, bottom_need_backward[0].size()); + EXPECT_EQ(1, bottom_need_backward[1].size()); + EXPECT_EQ(false, bottom_need_backward[1][0]); + EXPECT_EQ(1, bottom_need_backward[2].size()); + EXPECT_EQ(false, bottom_need_backward[2][0]); + EXPECT_EQ(2, bottom_need_backward[3].size()); + EXPECT_EQ(true, bottom_need_backward[3][0]); + // The label input to the SoftmaxLossLayer should say it "needs backward" + // since it has weights under it, even though we expect this to cause a crash + // at training/test time. + EXPECT_EQ(true, bottom_need_backward[3][1]); +} + +TYPED_TEST(NetTest, TestLossWeight) { + typedef typename TypeParam::Dtype Dtype; + // First, compute the loss and gradients with no loss_weight specified. + // In this case, the loss weight for the 'EuclideanLoss' layer should default + // to 1. + vector*> bottom; + Caffe::set_random_seed(this->seed_); + const bool kForceBackward = true; + this->InitUnsharedWeightsNet(NULL, NULL, kForceBackward); + const Dtype loss = this->net_->ForwardBackward(bottom); + const bool kCopyDiff = true; + vector > > blob_grads; + this->CopyNetBlobs(kCopyDiff, &blob_grads); + vector > > param_grads; + this->CopyNetParams(kCopyDiff, ¶m_grads); + // Check that the loss is non-trivial, otherwise the test doesn't prove much. + const Dtype kMinLossAbsValue = 1e-2; + ASSERT_GE(fabs(loss), kMinLossAbsValue); + const Dtype kErrorMargin = 1e-4; + const int kNumLossWeights = 6; + Dtype kLossWeights[kNumLossWeights] = {2, 0, 1, -1, -2.5, 3.7}; + for (int i = 0; i < kNumLossWeights; ++i) { + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&kLossWeights[i], NULL, kForceBackward); + const Dtype weighted_loss = this->net_->ForwardBackward(bottom); + const Dtype error_margin = kErrorMargin * fabs(kLossWeights[i]); + EXPECT_NEAR(loss * kLossWeights[i], weighted_loss, error_margin) + << "loss weight = " << kLossWeights[i]; + const vector > >& weighted_blobs = + this->net_->blobs(); + ASSERT_EQ(blob_grads.size(), weighted_blobs.size()); + for (int j = 0; j < blob_grads.size(); ++j) { + ASSERT_EQ(blob_grads[j]->count(), weighted_blobs[j]->count()); + for (int k = 0; k < blob_grads[j]->count(); ++k) { + EXPECT_NEAR(blob_grads[j]->cpu_diff()[k] * kLossWeights[i], + weighted_blobs[j]->cpu_diff()[k], error_margin); + } + } + const vector > >& weighted_params = + this->net_->params(); + ASSERT_EQ(param_grads.size(), weighted_params.size()); + for (int j = 0; j < param_grads.size(); ++j) { + ASSERT_EQ(param_grads[j]->count(), weighted_params[j]->count()); + for (int k = 0; k < param_grads[j]->count(); ++k) { + EXPECT_NEAR(param_grads[j]->cpu_diff()[k] * kLossWeights[i], + weighted_params[j]->cpu_diff()[k], error_margin); + } + } + } +} + +TYPED_TEST(NetTest, TestLossWeightMidNet) { + typedef typename TypeParam::Dtype Dtype; + vector*> bottom; + Caffe::set_random_seed(this->seed_); + const bool kForceBackward = true; + Dtype loss_weight = 0; + Dtype midnet_loss_weight = 1; + this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, + kForceBackward); + const Dtype loss = this->net_->ForwardBackward(bottom); + const bool kCopyDiff = true; + const bool kReshape = true; + Blob data_grad; + data_grad.CopyFrom(*this->net_->blob_by_name("data"), kCopyDiff, kReshape); + // Check that the loss is non-trivial, otherwise the test doesn't prove much. + const Dtype kMinLossAbsValue = 1e-2; + ASSERT_GE(fabs(loss), kMinLossAbsValue); + const Dtype kErrorMargin = 1e-4; + const int kNumLossWeights = 6; + Dtype kLossWeights[kNumLossWeights] = {2, 0, 1, -1, -2.5, 3.7}; + for (int i = 0; i < kNumLossWeights; ++i) { + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&loss_weight, &kLossWeights[i], + kForceBackward); + const Dtype weighted_loss = this->net_->ForwardBackward(bottom); + const Dtype error_margin = kErrorMargin * fabs(kLossWeights[i]); + EXPECT_NEAR(loss * kLossWeights[i], weighted_loss, error_margin) + << "loss weight = " << kLossWeights[i]; + const shared_ptr >& weighted_blob = + this->net_->blob_by_name("data"); + ASSERT_EQ(data_grad.count(), weighted_blob->count()); + for (int j = 0; j < data_grad.count(); ++j) { + EXPECT_NEAR(data_grad.cpu_diff()[j] * kLossWeights[i], + weighted_blob->cpu_diff()[j], error_margin); + } + } +} + +TYPED_TEST(NetTest, TestComboLossWeight) { + typedef typename TypeParam::Dtype Dtype; + vector*> bottom; + Dtype loss_weight; + Dtype midnet_loss_weight; + const bool kForceBackward = true; + const Dtype kErrorMargin = 1e-4; + + // Get the loss and gradients with 'EuclideanLoss' weight 1, + // 'InnerProduct' weight 1. + loss_weight = 1; + midnet_loss_weight = 1; + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, + kForceBackward); + const Dtype loss = this->net_->ForwardBackward(bottom); + const bool kCopyDiff = true; + vector > > blob_grads; + this->CopyNetBlobs(kCopyDiff, &blob_grads); + vector > > param_grads; + this->CopyNetParams(kCopyDiff, ¶m_grads); + + loss_weight = 2; + midnet_loss_weight = 1; + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, + kForceBackward); + const Dtype loss_main_2 = this->net_->ForwardBackward(bottom); + vector > > blob_grads_loss_2; + this->CopyNetBlobs(kCopyDiff, &blob_grads_loss_2); + vector > > param_grads_loss_2; + this->CopyNetParams(kCopyDiff, ¶m_grads_loss_2); + + loss_weight = 3; + midnet_loss_weight = 1; + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, + kForceBackward); + const Dtype loss_main_3 = this->net_->ForwardBackward(bottom); + const vector > >& blob_grads_loss_3 = + this->net_->blobs(); + ASSERT_EQ(blob_grads.size(), blob_grads_loss_3.size()); + ASSERT_EQ(blob_grads_loss_2.size(), blob_grads_loss_3.size()); + for (int j = 0; j < blob_grads.size(); ++j) { + const string& blob_name = this->net_->blob_names()[j]; + bool grad_should_change = true; + if (blob_name == "innerproduct1_innerproduct1_0_split_0") { + grad_should_change = false; + } + ASSERT_EQ(blob_grads[j]->count(), blob_grads_loss_3[j]->count()); + ASSERT_EQ(blob_grads_loss_2[j]->count(), blob_grads_loss_3[j]->count()); + for (int k = 0; k < blob_grads[j]->count(); ++k) { + const Dtype grad_diff_2 = blob_grads_loss_2[j]->cpu_diff()[k] - + blob_grads[j]->cpu_diff()[k]; + const Dtype grad_diff_3 = blob_grads_loss_3[j]->cpu_diff()[k] - + blob_grads[j]->cpu_diff()[k]; + if (grad_should_change) { + // Test non-triviality. + const Dtype kMinGradDiffAbsValue = 1e-4; + EXPECT_GT(fabs(grad_diff_2), kMinGradDiffAbsValue) << blob_name; + EXPECT_NEAR(2 * grad_diff_2, grad_diff_3, kErrorMargin) << blob_name; + } else { + EXPECT_EQ(0, grad_diff_2) << blob_name; + EXPECT_EQ(0, grad_diff_3) << blob_name; + } + } + } + + loss_weight = 1; + midnet_loss_weight = 2; + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, + kForceBackward); + const Dtype loss_midnet_2 = this->net_->ForwardBackward(bottom); + this->CopyNetBlobs(kCopyDiff, &blob_grads_loss_2); + this->CopyNetParams(kCopyDiff, ¶m_grads_loss_2); + + loss_weight = 1; + midnet_loss_weight = 3; + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, + kForceBackward); + const Dtype loss_midnet_3 = this->net_->ForwardBackward(bottom); + const vector > >& blob_grads_midnet_loss_3 = + this->net_->blobs(); + ASSERT_EQ(blob_grads.size(), blob_grads_midnet_loss_3.size()); + ASSERT_EQ(blob_grads_loss_2.size(), blob_grads_midnet_loss_3.size()); + const vector& blob_names = this->net_->blob_names(); + for (int j = 0; j < blob_grads.size(); ++j) { + const string& blob_name = blob_names[j]; + bool grad_should_change = false; + if (blob_name == "innerproduct1" || + blob_name == "innerproduct1_innerproduct1_0_split_0" || + blob_name == "data_data_0_split_0" || blob_name == "data") { + grad_should_change = true; + } + ASSERT_EQ(blob_grads[j]->count(), blob_grads_midnet_loss_3[j]->count()); + ASSERT_EQ(blob_grads[j]->count(), blob_grads_loss_2[j]->count()); + for (int k = 0; k < blob_grads[j]->count(); ++k) { + const Dtype grad_diff_2 = blob_grads_loss_2[j]->cpu_diff()[k] - + blob_grads[j]->cpu_diff()[k]; + const Dtype grad_diff_3 = blob_grads_midnet_loss_3[j]->cpu_diff()[k] - + blob_grads[j]->cpu_diff()[k]; + if (grad_should_change) { + // Test non-triviality. + const Dtype kMinGradDiffAbsValue = 1e-4; + EXPECT_GT(fabs(grad_diff_2), kMinGradDiffAbsValue) << blob_name; + EXPECT_NEAR(2 * grad_diff_2, grad_diff_3, kErrorMargin) << blob_name; + } else { + EXPECT_EQ(0, grad_diff_2) << blob_name; + EXPECT_EQ(0, grad_diff_3) << blob_name; + } + } + } + + const Dtype kMinLossDiffAbsValue = 1e-4; + + Dtype loss_diff_2 = loss_main_2 - loss; + // Test non-triviality. + EXPECT_GT(fabs(loss_diff_2), kMinLossDiffAbsValue); + Dtype loss_diff_3 = loss_main_3 - loss; + EXPECT_NEAR(2 * loss_diff_2, loss_diff_3, kErrorMargin); + + loss_diff_2 = loss_midnet_2 - loss; + // Test non-triviality. + EXPECT_GT(fabs(loss_diff_2), kMinLossDiffAbsValue); + loss_diff_3 = loss_midnet_3 - loss; + EXPECT_NEAR(2 * loss_diff_2, loss_diff_3, kErrorMargin); +} + +TYPED_TEST(NetTest, TestBackwardWithAccuracyLayer) { + typedef typename TypeParam::Dtype Dtype; + const bool kForceBackward = false; + const bool kAccuracyLayer = true; + this->InitTinyNet(kForceBackward, kAccuracyLayer); + EXPECT_TRUE(this->net_->has_blob("accuracy")); + vector*> bottom; + // Test that we can do Backward even though we have an 'Accuracy' layer. + this->net_->ForwardBackward(bottom); +} + +TYPED_TEST(NetTest, TestUnsharedWeightsDataNet) { + typedef typename TypeParam::Dtype Dtype; + this->InitUnsharedWeightsNet(); + vector*> bottom; + Dtype loss; + this->net_->Forward(bottom, &loss); + EXPECT_GT(loss, 0); +} + +TYPED_TEST(NetTest, TestSharedWeightsDataNet) { + typedef typename TypeParam::Dtype Dtype; + this->InitSharedWeightsNet(); + vector*> bottom; + Dtype loss; + this->net_->Forward(bottom, &loss); + EXPECT_FLOAT_EQ(loss, 0); +} + +TYPED_TEST(NetTest, TestUnsharedWeightsDiffNet) { + typedef typename TypeParam::Dtype Dtype; + this->InitUnsharedWeightsNet(); + vector*> bottom; + Net* net = this->net_.get(); + net->Forward(bottom); + net->Backward(); + Layer* ip1_layer = net->layer_by_name("innerproduct1").get(); + Layer* ip2_layer = net->layer_by_name("innerproduct2").get(); + const int count = ip1_layer->blobs()[0]->count(); + const Dtype* grad1 = ip1_layer->blobs()[0]->cpu_diff(); + const Dtype* grad2 = ip2_layer->blobs()[0]->cpu_diff(); + for (int i = 0; i < count; ++i) { + EXPECT_GT(fabs(grad1[i]), 0); + EXPECT_FLOAT_EQ(-1 * grad1[i], grad2[i]); + } +} + +TYPED_TEST(NetTest, TestSharedWeightsDiffNet) { + typedef typename TypeParam::Dtype Dtype; + this->InitSharedWeightsNet(); + vector*> bottom; + Net* net = this->net_.get(); + Dtype loss; + net->Forward(bottom, &loss); + net->Backward(); + EXPECT_FLOAT_EQ(loss, 0); + Layer* ip1_layer = net->layer_by_name("innerproduct1").get(); + Layer* ip2_layer = net->layer_by_name("innerproduct2").get(); + const int count = ip1_layer->blobs()[0]->count(); + const Dtype* grad1 = ip1_layer->blobs()[0]->cpu_diff(); + const Dtype* grad2 = ip2_layer->blobs()[0]->cpu_diff(); + for (int i = 0; i < count; ++i) { + EXPECT_FLOAT_EQ(0, grad1[i]); + EXPECT_FLOAT_EQ(0, grad2[i]); + } +} + +TYPED_TEST(NetTest, TestSharedWeightsUpdate) { + typedef typename TypeParam::Dtype Dtype; + Caffe::set_random_seed(this->seed_); + this->InitDiffDataSharedWeightsNet(); + vector*> bottom; + EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1"); + EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); + Blob* ip1_weights = this->net_->layers()[1]->blobs()[0].get(); + Blob* ip2_weights = this->net_->layers()[2]->blobs()[0].get(); + // Check that data blobs of shared weights share the same location in memory. + EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); + // Check that diff blobs of shared weights are at different locations in + // memory. (The diffs should be accumulated at update time.) + EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); + this->net_->Forward(bottom); + this->net_->Backward(); + // Compute the expected update as the data minus the two diffs. + Blob shared_params; + const bool reshape = true; + const bool copy_diff = false; + shared_params.CopyFrom(*ip1_weights, copy_diff, reshape); + shared_params.CopyFrom(*ip1_weights, !copy_diff, reshape); + const int count = ip1_weights->count(); + // Make sure the diffs are non-trivial. + for (int i = 0; i < count; ++i) { + EXPECT_NE(0, ip1_weights->cpu_diff()[i]); + EXPECT_NE(0, ip2_weights->cpu_diff()[i]); + EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]); + } + caffe_axpy(count, Dtype(1), ip2_weights->cpu_diff(), + shared_params.mutable_cpu_diff()); + caffe_axpy(count, Dtype(-1), shared_params.cpu_diff(), + shared_params.mutable_cpu_data()); + const Dtype* expected_updated_params = shared_params.cpu_data(); + this->net_->Update(); + const Dtype* actual_updated_params = ip1_weights->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_EQ(expected_updated_params[i], actual_updated_params[i]); + } + // Check that data blobs of shared weights STILL point to the same memory + // location (because ... who knows). + EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); + + Caffe::set_random_seed(this->seed_); + this->InitDiffDataUnsharedWeightsNet(); + EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1"); + EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); + ip1_weights = this->net_->layers()[1]->blobs()[0].get(); + ip2_weights = this->net_->layers()[2]->blobs()[0].get(); + // Check that data and diff blobs of unshared weights are at different + // locations in memory. + EXPECT_NE(ip1_weights->cpu_data(), ip2_weights->cpu_data()); + EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); + this->net_->Forward(bottom); + this->net_->Backward(); + // Compute the expected update. + Blob unshared_params1; + unshared_params1.CopyFrom(*ip1_weights, copy_diff, reshape); + unshared_params1.CopyFrom(*ip1_weights, !copy_diff, reshape); + Blob unshared_params2; + unshared_params2.CopyFrom(*ip2_weights, copy_diff, reshape); + unshared_params2.CopyFrom(*ip2_weights, !copy_diff, reshape); + // Make sure the diffs are non-trivial and sum to the diff in the shared net. + for (int i = 0; i < count; ++i) { + EXPECT_NE(0, ip1_weights->cpu_diff()[i]); + EXPECT_NE(0, ip2_weights->cpu_diff()[i]); + EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]); + EXPECT_EQ(ip1_weights->cpu_diff()[i] + ip2_weights->cpu_diff()[i], + shared_params.cpu_diff()[i]); + } + caffe_axpy(count, Dtype(-1), ip1_weights->cpu_diff(), + unshared_params1.mutable_cpu_data()); + caffe_axpy(count, Dtype(-1), ip2_weights->cpu_diff(), + unshared_params2.mutable_cpu_data()); + const Dtype* expected_updated_params1 = unshared_params1.cpu_data(); + const Dtype* expected_updated_params2 = unshared_params2.cpu_data(); + this->net_->Update(); + const Dtype* actual_updated_params1 = ip1_weights->cpu_data(); + const Dtype* actual_updated_params2 = ip2_weights->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_EQ(expected_updated_params1[i], actual_updated_params1[i]); + EXPECT_EQ(expected_updated_params2[i], actual_updated_params2[i]); + EXPECT_NE(actual_updated_params1[i], actual_updated_params2[i]); + EXPECT_NE(expected_updated_params, expected_updated_params1); + } +} + +TYPED_TEST(NetTest, TestSharedWeightsResume) { + typedef typename TypeParam::Dtype Dtype; + + // Create a net with weight sharing; Update it once. + Caffe::set_random_seed(this->seed_); + this->InitDiffDataSharedWeightsNet(); + vector*> bottom; + EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1"); + EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); + Blob* ip1_weights = this->net_->layers()[1]->blobs()[0].get(); + Blob* ip2_weights = this->net_->layers()[2]->blobs()[0].get(); + // Check that data blobs of shared weights share the same location in memory. + EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); + // Check that diff blobs of shared weights are at different locations in + // memory. (The diffs should be accumulated at update time.) + EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); + this->net_->ForwardBackward(bottom); + this->net_->Update(); + Blob shared_params; + const bool kReshape = true; + const bool kCopyDiff = false; + shared_params.CopyFrom(*ip1_weights, kCopyDiff, kReshape); + const int count = ip1_weights->count(); + + // Write the net to a NetParameter, as in Solver::Snapshot. + NetParameter net_param; + this->net_->ToProto(&net_param); + + // Reinitialize the net and copy parameters from net_param, as in + // Solver::Restore. + Caffe::set_random_seed(this->seed_); + this->InitDiffDataSharedWeightsNet(); + this->net_->CopyTrainedLayersFrom(net_param); + ip1_weights = this->net_->layers()[1]->blobs()[0].get(); + ip2_weights = this->net_->layers()[2]->blobs()[0].get(); + ASSERT_FALSE(NULL == ip1_weights); + ASSERT_FALSE(NULL == ip2_weights); + EXPECT_NE(ip1_weights, ip2_weights); + // Check that data blobs of shared weights share the same location in memory. + EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); + for (int i = 0; i < count; ++i) { + EXPECT_FLOAT_EQ(shared_params.cpu_data()[i], ip1_weights->cpu_data()[i]); + } + // Check that diff blobs of shared weights are at different locations in + // memory. (The diffs should be accumulated at update time.) + EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); +} + +TYPED_TEST(NetTest, TestParamPropagateDown) { + typedef typename TypeParam::Dtype Dtype; + vector*> bottom; + const bool kBiasTerm = true, kForceBackward = false; + const Dtype* kLossWeight1 = NULL; + const Dtype* kLossWeight2 = NULL; + + // Run the net with all params learned; check that gradients are non-zero. + Caffe::set_random_seed(this->seed_); + Dtype blobs_lr_w1 = 1, blobs_lr_w2 = 1, blobs_lr_b1 = 2, blobs_lr_b2 = 2; + this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, + kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); + this->net_->Forward(bottom); + this->net_->Backward(); + const vector > >& params = this->net_->params(); + const int num_params = params.size(); + ASSERT_EQ(4, num_params); + const Dtype kNonZeroTestMin = 1e-3; + vector param_asums(params.size()); + for (int i = 0; i < num_params; ++i) { + const Dtype param_asum = + caffe_cpu_asum(params[i]->count(), params[i]->cpu_diff()); + param_asums[i] = param_asum; + EXPECT_GT(param_asum, kNonZeroTestMin); + } + + // Change the learning rates to different non-zero values; should see same + // gradients. + Caffe::set_random_seed(this->seed_); + blobs_lr_w1 *= 2, blobs_lr_w2 *= 2, blobs_lr_b1 *= 2, blobs_lr_b2 *= 2; + this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, + kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); + this->net_->Forward(bottom); + this->net_->Backward(); + const vector > >& params2 = this->net_->params(); + ASSERT_EQ(num_params, params2.size()); + for (int i = 0; i < num_params; ++i) { + const Dtype param_asum = + caffe_cpu_asum(params2[i]->count(), params2[i]->cpu_diff()); + EXPECT_FLOAT_EQ(param_asum, param_asums[i]); + } + + // Change a subset of the learning rates to zero; check that we see zero + // gradients for those. + Caffe::set_random_seed(this->seed_); + blobs_lr_w1 = 1, blobs_lr_w2 = 0, blobs_lr_b1 = 0, blobs_lr_b2 = 1; + this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, + kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); + this->net_->Forward(bottom); + this->net_->Backward(); + const vector > >& params3 = this->net_->params(); + ASSERT_EQ(num_params, params3.size()); + for (int i = 0; i < num_params; ++i) { + const Dtype param_asum = + caffe_cpu_asum(params3[i]->count(), params3[i]->cpu_diff()); + if (i == 1 || i == 2) { + EXPECT_FLOAT_EQ(0, param_asum); + } else { + EXPECT_FLOAT_EQ(param_asum, param_asums[i]); + } + } + + // Change the opposite subset of the learning rates to zero. + Caffe::set_random_seed(this->seed_); + blobs_lr_w1 = 0, blobs_lr_w2 = 1, blobs_lr_b1 = 1, blobs_lr_b2 = 0; + this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, + kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); + this->net_->Forward(bottom); + this->net_->Backward(); + const vector > >& params4 = this->net_->params(); + ASSERT_EQ(num_params, params4.size()); + for (int i = 0; i < num_params; ++i) { + const Dtype param_asum = + caffe_cpu_asum(params4[i]->count(), params4[i]->cpu_diff()); + if (i == 0 || i == 3) { + EXPECT_FLOAT_EQ(0, param_asum); + } else { + EXPECT_FLOAT_EQ(param_asum, param_asums[i]); + } + } +} + +TYPED_TEST(NetTest, TestFromTo) { + typedef typename TypeParam::Dtype Dtype; + this->InitTinyNet(); + + // Run Forward and Backward, recording the data diff and loss. + Blob data; + data.ReshapeLike(*this->net_->blob_by_name("data")); + this->net_->ForwardPrefilled(); + this->net_->Backward(); + data.CopyFrom(*this->net_->blob_by_name("data"), true, true); + const Dtype *loss_ptr = this->net_->output_blobs()[0]->cpu_data(); + Dtype loss = *loss_ptr; + + // Check that combining partial Forwards gives the same loss. + for (int i = 1; i < this->net_->layers().size(); ++i) { + // Note that we skip layer zero to keep the same data. + this->net_->ForwardFromTo(1, 1); + if (i < this->net_->layers().size() - 1) { + this->net_->ForwardFrom(i + 1); + } + EXPECT_EQ(loss, *loss_ptr); + } + + // Check that combining partial Backwards gives the same data diff. + for (int i = 1; i < this->net_->layers().size(); ++i) { + this->net_->BackwardTo(i); + this->net_->BackwardFrom(i - 1); + for (int j = 0; j < data.count(); ++j) { + EXPECT_EQ(data.cpu_diff()[j], + this->net_->blob_by_name("data")->cpu_diff()[j]); + } + } +} + +class FilterNetTest : public ::testing::Test { + protected: + void RunFilterNetTest( + const string& input_param_string, const string& filtered_param_string) { + NetParameter input_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + input_param_string, &input_param)); + NetParameter expected_filtered_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + filtered_param_string, &expected_filtered_param)); + NetParameter actual_filtered_param; + Net::FilterNet(input_param, &actual_filtered_param); + EXPECT_EQ(expected_filtered_param.DebugString(), + actual_filtered_param.DebugString()); + // Also test idempotence. + NetParameter double_filtered_param; + Net::FilterNet(actual_filtered_param, &double_filtered_param); + EXPECT_EQ(actual_filtered_param.DebugString(), + double_filtered_param.DebugString()); + } +}; + +TEST_F(FilterNetTest, TestNoFilter) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterLeNetTrainTest) { + const string& input_proto = + "name: 'LeNet' " + "layer { " + " name: 'mnist' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + " data_param { " + " source: 'mnist-train-leveldb' " + " batch_size: 64 " + " } " + " transform_param { " + " scale: 0.00390625 " + " } " + " include: { phase: TRAIN } " + "} " + "layer { " + " name: 'mnist' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + " data_param { " + " source: 'mnist-test-leveldb' " + " batch_size: 100 " + " } " + " transform_param { " + " scale: 0.00390625 " + " } " + " include: { phase: TEST } " + "} " + "layer { " + " name: 'conv1' " + " type: 'Convolution' " + " bottom: 'data' " + " top: 'conv1' " + " param { " + " lr_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " } " + " convolution_param { " + " num_output: 20 " + " kernel_size: 5 " + " stride: 1 " + " weight_filler { " + " type: 'xavier' " + " } " + " bias_filler { " + " type: 'constant' " + " } " + " } " + "} " + "layer { " + " name: 'ip1' " + " type: 'InnerProduct' " + " bottom: 'conv1' " + " top: 'ip1' " + " param { " + " lr_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " } " + " inner_product_param { " + " num_output: 10 " + " weight_filler { " + " type: 'xavier' " + " } " + " bias_filler { " + " type: 'constant' " + " } " + " } " + "} " + "layer { " + " name: 'accuracy' " + " type: 'Accuracy' " + " bottom: 'ip1' " + " bottom: 'label' " + " top: 'accuracy' " + " include: { phase: TEST } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'ip2' " + " bottom: 'label' " + " top: 'loss' " + "} "; + const string input_proto_train = "state: { phase: TRAIN } " + input_proto; + const string input_proto_test = "state: { phase: TEST } " + input_proto; + const string output_proto_train = + "name: 'LeNet' " + "layer { " + " name: 'mnist' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + " data_param { " + " source: 'mnist-train-leveldb' " + " batch_size: 64 " + " } " + " transform_param { " + " scale: 0.00390625 " + " } " + " include: { phase: TRAIN } " + "} " + "layer { " + " name: 'conv1' " + " type: 'Convolution' " + " bottom: 'data' " + " top: 'conv1' " + " param { " + " lr_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " } " + " convolution_param { " + " num_output: 20 " + " kernel_size: 5 " + " stride: 1 " + " weight_filler { " + " type: 'xavier' " + " } " + " bias_filler { " + " type: 'constant' " + " } " + " } " + "} " + "layer { " + " name: 'ip1' " + " type: 'InnerProduct' " + " bottom: 'conv1' " + " top: 'ip1' " + " param { " + " lr_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " } " + " inner_product_param { " + " num_output: 10 " + " weight_filler { " + " type: 'xavier' " + " } " + " bias_filler { " + " type: 'constant' " + " } " + " } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'ip2' " + " bottom: 'label' " + " top: 'loss' " + "} "; + const string& output_proto_test = + "name: 'LeNet' " + "layer { " + " name: 'mnist' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + " data_param { " + " source: 'mnist-test-leveldb' " + " batch_size: 100 " + " } " + " transform_param { " + " scale: 0.00390625 " + " } " + " include: { phase: TEST } " + "} " + "layer { " + " name: 'conv1' " + " type: 'Convolution' " + " bottom: 'data' " + " top: 'conv1' " + " param { " + " lr_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " } " + " convolution_param { " + " num_output: 20 " + " kernel_size: 5 " + " stride: 1 " + " weight_filler { " + " type: 'xavier' " + " } " + " bias_filler { " + " type: 'constant' " + " } " + " } " + "} " + "layer { " + " name: 'ip1' " + " type: 'InnerProduct' " + " bottom: 'conv1' " + " top: 'ip1' " + " param { " + " lr_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " } " + " inner_product_param { " + " num_output: 10 " + " weight_filler { " + " type: 'xavier' " + " } " + " bias_filler { " + " type: 'constant' " + " } " + " } " + "} " + "layer { " + " name: 'accuracy' " + " type: 'Accuracy' " + " bottom: 'ip1' " + " bottom: 'label' " + " top: 'accuracy' " + " include: { phase: TEST } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'ip2' " + " bottom: 'label' " + " top: 'loss' " + "} "; + const string output_proto_train_explicit = + output_proto_train + " state: { phase: TRAIN } "; + const string output_proto_test_explicit = + output_proto_test + " state: { phase: TEST } "; + this->RunFilterNetTest(input_proto_train, output_proto_train_explicit); + this->RunFilterNetTest(input_proto_test, output_proto_test_explicit); +} + +TEST_F(FilterNetTest, TestFilterOutByStage) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + " include: { stage: 'mystage' } " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, output_proto); +} + +TEST_F(FilterNetTest, TestFilterOutByStage2) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { stage: 'mystage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, output_proto); +} + +TEST_F(FilterNetTest, TestFilterInByStage) { + const string& input_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { stage: 'mystage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInByStage2) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " exclude: { stage: 'mystage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterOutByMultipleStage) { + const string& input_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { stage: 'mystage' stage: 'myotherstage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { stage: 'mystage' } " + "} "; + const string& output_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { stage: 'mystage' } " + "} "; + this->RunFilterNetTest(input_proto, output_proto); +} + +TEST_F(FilterNetTest, TestFilterInByMultipleStage) { + const string& input_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { stage: 'myotherstage' } " + " include: { stage: 'mystage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { stage: 'mystage' } " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInByMultipleStage2) { + const string& input_proto = + "state: { stage: 'mystage' stage: 'myotherstage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { stage: 'mystage' stage: 'myotherstage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { stage: 'mystage' } " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInByNotStage) { + const string& input_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { not_stage: 'myotherstage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { not_stage: 'myotherstage' } " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterOutByNotStage) { + const string& input_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { not_stage: 'mystage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { not_stage: 'mystage' } " + "} "; + const string& output_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} "; + this->RunFilterNetTest(input_proto, output_proto); +} + +TEST_F(FilterNetTest, TestFilterOutByMinLevel) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { min_level: 3 } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, output_proto); +} + +TEST_F(FilterNetTest, TestFilterOutByMaxLevel) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { max_level: -3 } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, output_proto); +} + +TEST_F(FilterNetTest, TestFilterInByMinLevel) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { min_level: 0 } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInByMinLevel2) { + const string& input_proto = + "state: { level: 7 } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { min_level: 3 } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInByMaxLevel) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { max_level: 0 } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInByMaxLevel2) { + const string& input_proto = + "state: { level: -7 } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { max_level: -3 } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInOutByIncludeMultiRule) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { min_level: 2 phase: TRAIN } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { min_level: 2 phase: TEST } " + "} "; + const string& input_proto_train = + "state: { level: 4 phase: TRAIN } " + input_proto; + const string& input_proto_test = + "state: { level: 4 phase: TEST } " + input_proto; + const string& output_proto_train = + "state: { level: 4 phase: TRAIN } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { min_level: 2 phase: TRAIN } " + "} "; + const string& output_proto_test = + "state: { level: 4 phase: TEST } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { min_level: 2 phase: TEST } " + "} "; + this->RunFilterNetTest(input_proto_train, output_proto_train); + this->RunFilterNetTest(input_proto_test, output_proto_test); +} + +TEST_F(FilterNetTest, TestFilterInByIncludeMultiRule) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { min_level: 2 phase: TRAIN } " + " include: { phase: TEST } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { min_level: 2 phase: TEST } " + " include: { phase: TRAIN } " + "} "; + const string& input_proto_train = + "state: { level: 2 phase: TRAIN } " + input_proto; + const string& input_proto_test = + "state: { level: 2 phase: TEST } " + input_proto; + this->RunFilterNetTest(input_proto_train, input_proto_train); + this->RunFilterNetTest(input_proto_test, input_proto_test); +} + +TEST_F(FilterNetTest, TestFilterInOutByExcludeMultiRule) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " exclude: { min_level: 2 phase: TRAIN } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " exclude: { min_level: 2 phase: TEST } " + "} "; + const string& input_proto_train = + "state: { level: 4 phase: TRAIN } " + input_proto; + const string& input_proto_test = + "state: { level: 4 phase: TEST } " + input_proto; + const string& output_proto_train = + "state: { level: 4 phase: TRAIN } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " exclude: { min_level: 2 phase: TEST } " + "} "; + const string& output_proto_test = + "state: { level: 4 phase: TEST } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " exclude: { min_level: 2 phase: TRAIN } " + "} "; + this->RunFilterNetTest(input_proto_train, output_proto_train); + this->RunFilterNetTest(input_proto_test, output_proto_test); +} + +TYPED_TEST(NetTest, TestReshape) { + typedef typename TypeParam::Dtype Dtype; + // We set up bottom blobs of two different sizes, switch between + // them, and check that forward and backward both run and the results + // are the same. + Caffe::set_random_seed(this->seed_); + Caffe::set_mode(Caffe::CPU); + FillerParameter filler_param; + filler_param.set_std(1); + GaussianFiller filler(filler_param); + Blob blob1(4, 3, 9, 11); + Blob blob2(2, 3, 12, 10); + filler.Fill(&blob1); + filler.Fill(&blob2); + + this->InitReshapableNet(); + Blob* input_blob = this->net_->input_blobs()[0]; + Blob* output_blob = this->net_->output_blobs()[0]; + input_blob->Reshape(blob1.num(), blob1.channels(), blob1.height(), + blob1.width()); + caffe_copy(blob1.count(), blob1.cpu_data(), input_blob->mutable_cpu_data()); + this->net_->ForwardPrefilled(); + // call backward just to make sure it runs + this->net_->Backward(); + Blob output1(output_blob->num(), output_blob->channels(), + output_blob->height(), output_blob->width()); + caffe_copy(output1.count(), output_blob->cpu_data(), + output1.mutable_cpu_data()); + + input_blob->Reshape(blob2.num(), blob2.channels(), blob2.height(), + blob2.width()); + caffe_copy(blob2.count(), blob2.cpu_data(), input_blob->mutable_cpu_data()); + this->net_->ForwardPrefilled(); + this->net_->Backward(); + Blob output2(output_blob->num(), output_blob->channels(), + output_blob->height(), output_blob->width()); + caffe_copy(output2.count(), output_blob->cpu_data(), + output2.mutable_cpu_data()); + + input_blob->Reshape(blob1.num(), blob1.channels(), blob1.height(), + blob1.width()); + caffe_copy(blob1.count(), blob1.cpu_data(), input_blob->mutable_cpu_data()); + this->net_->ForwardPrefilled(); + this->net_->Backward(); + for (int i = 0; i < output1.count(); ++i) { + CHECK_EQ(*(output1.cpu_data() + i), *(output_blob->cpu_data() + i)); + } + + input_blob->Reshape(blob2.num(), blob2.channels(), blob2.height(), + blob2.width()); + caffe_copy(blob2.count(), blob2.cpu_data(), input_blob->mutable_cpu_data()); + this->net_->ForwardPrefilled(); + this->net_->Backward(); + for (int i = 0; i < output2.count(); ++i) { + CHECK_EQ(*(output2.cpu_data() + i), *(output_blob->cpu_data() + i)); + } +} + +TYPED_TEST(NetTest, TestSkipPropagateDown) { + // check bottom_need_backward if propagate_down is true + this->InitSkipPropNet(false); + vector vec_layer_need_backward = this->net_->layer_need_backward(); + for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { + string layer_name = this->net_->layer_names()[layer_id]; + if (layer_name == "loss") { + // access to bottom_need_backward coresponding to label's blob + bool need_back = this->net_->bottom_need_backward()[layer_id][1]; + // if propagate_down is true, the loss layer will try to + // backpropagate on labels + EXPECT_TRUE(need_back) << "bottom_need_backward should be True"; + } + // layer_need_backward should be True except for data and silence layers + if (layer_name.find("data") != std::string::npos || + layer_name == "silence") { + EXPECT_FALSE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be False"; + } else { + EXPECT_TRUE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be True"; + } + } + // check bottom_need_backward if propagat_down is false + this->InitSkipPropNet(true); + vec_layer_need_backward.clear(); + vec_layer_need_backward = this->net_->layer_need_backward(); + for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { + string layer_name = this->net_->layer_names()[layer_id]; + if (layer_name == "loss") { + // access to bottom_need_backward coresponding to label's blob + bool need_back = this->net_->bottom_need_backward()[layer_id][1]; + // if propagate_down is false, the loss layer will not try to + // backpropagate on labels + EXPECT_FALSE(need_back) << "bottom_need_backward should be False"; + } + // layer_need_backward should be False except for innerproduct and + // loss layers + if (layer_name == "innerproduct" || layer_name == "loss") { + EXPECT_TRUE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be True"; + } else { + EXPECT_FALSE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be False"; + } + } +} + +} // namespace caffe From 1274161690981fb28cf846eda65f0555cf676e85 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Thu, 4 Jun 2015 22:31:06 +0800 Subject: [PATCH 50/82] triplet data generation and network update New triplet loss layer added(beta1 version-no test source files) macro define in upgrade_proto restore --- .../triplet/convert_mnist_triplet_data.cpp | 129 +++++ examples/triplet/create_mnist_triplet.sh | 21 + examples/triplet/mnist_siamese.ipynb | 154 ++++++ examples/triplet/mnist_triplet.prototxt | 113 ++++ .../triplet/mnist_triplet_solver.prototxt | 25 + .../triplet/mnist_triplet_train_test.prototxt | 500 ++++++++++++++++++ examples/triplet/readme.md | 187 +++++++ examples/triplet/train_mnist_triplet.sh | 5 + src/caffe/data_transformer.cpp | 51 ++ src/caffe/layers/triplet_loss_layer.cpp | 261 +++++++++ src/caffe/test/test_net.cpp | 15 + 11 files changed, 1461 insertions(+) create mode 100644 examples/triplet/convert_mnist_triplet_data.cpp create mode 100755 examples/triplet/create_mnist_triplet.sh create mode 100644 examples/triplet/mnist_siamese.ipynb create mode 100644 examples/triplet/mnist_triplet.prototxt create mode 100644 examples/triplet/mnist_triplet_solver.prototxt create mode 100644 examples/triplet/mnist_triplet_train_test.prototxt create mode 100644 examples/triplet/readme.md create mode 100755 examples/triplet/train_mnist_triplet.sh create mode 100644 src/caffe/layers/triplet_loss_layer.cpp diff --git a/examples/triplet/convert_mnist_triplet_data.cpp b/examples/triplet/convert_mnist_triplet_data.cpp new file mode 100644 index 00000000000..e35e7f4f3bf --- /dev/null +++ b/examples/triplet/convert_mnist_triplet_data.cpp @@ -0,0 +1,129 @@ +// This script converts the MNIST dataset to the leveldb format used +// by caffe to train siamese network. +// Usage: +// convert_mnist_data input_image_file input_label_file output_db_file +// The MNIST dataset could be downloaded at +// http://yann.lecun.com/exdb/mnist/ +#include // NOLINT(readability/streams) +#include + +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "stdint.h" + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" + +uint32_t swap_endian(uint32_t val) { + val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); + return (val << 16) | (val >> 16); +} + +void read_image(std::ifstream* image_file, std::ifstream* label_file, + uint32_t index, uint32_t rows, uint32_t cols, + char* pixels, char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index + 8); + label_file->read(label, 1); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename) { + // Open files + std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); + std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); + CHECK(image_file) << "Unable to open file " << image_filename; + CHECK(label_file) << "Unable to open file " << label_filename; + // Read the magic and the meta data + uint32_t magic; + uint32_t num_items; + uint32_t num_labels; + uint32_t rows; + uint32_t cols; + + image_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + CHECK_EQ(magic, 2051) << "Incorrect image file magic."; + label_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + CHECK_EQ(magic, 2049) << "Incorrect label file magic."; + image_file.read(reinterpret_cast(&num_items), 4); + num_items = swap_endian(num_items); + label_file.read(reinterpret_cast(&num_labels), 4); + num_labels = swap_endian(num_labels); + CHECK_EQ(num_items, num_labels); + image_file.read(reinterpret_cast(&rows), 4); + rows = swap_endian(rows); + image_file.read(reinterpret_cast(&cols), 4); + cols = swap_endian(cols); + + // Open leveldb + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = true; + options.error_if_exists = true; + leveldb::Status status = leveldb::DB::Open( + options, db_filename, &db); + CHECK(status.ok()) << "Failed to open leveldb " << db_filename + << ". Is it already existing?"; + + char label_i; + char label_j; + char label_k; + char* pixels = new char[3 * rows * cols]; + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + + caffe::Datum datum; + datum.set_channels(3); // one channel for each image in the pair + datum.set_height(rows); + datum.set_width(cols); + LOG(INFO) << "A total of " << num_items << " items."; + LOG(INFO) << "Rows: " << rows << " Cols: " << cols; + for (int itemid = 0; itemid < num_items; ++itemid) { + int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups + int j = caffe::caffe_rng_rand() % num_items; + int k = caffe::caffe_rng_rand() % num_items; + read_image(&image_file, &label_file, i, rows, cols, + pixels, &label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels + (rows * cols), &label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels + (2 * rows * cols), &label_k); + + datum.set_data(pixels, 3*rows*cols); + if (label_i == label_j && label_i != label_k) { + datum.set_label(1); + + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); + } else { + itemid--; + datum.set_label(0); + } + } + + delete db; + delete pixels; +} + +int main(int argc, char** argv) { + if (argc != 4) { + printf("This script converts the MNIST dataset to the leveldb format used\n" + "by caffe to train a siamese network.\n" + "Usage:\n" + " convert_mnist_data input_image_file input_label_file " + "output_db_file\n" + "The MNIST dataset could be downloaded at\n" + " http://yann.lecun.com/exdb/mnist/\n" + "You should gunzip them after downloading.\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3]); + } + return 0; +} diff --git a/examples/triplet/create_mnist_triplet.sh b/examples/triplet/create_mnist_triplet.sh new file mode 100755 index 00000000000..f404f2aa255 --- /dev/null +++ b/examples/triplet/create_mnist_triplet.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env sh +# This script converts the mnist data into leveldb format. + +EXAMPLES=./build/examples/triplet +DATA=./data/mnist + +echo "Creating leveldb..." + +rm -rf ./examples/triplet/mnist_siamese_train_leveldb +rm -rf ./examples/triplet/mnist_siamese_test_leveldb + +$EXAMPLES/convert_mnist_triplet_data.bin \ + $DATA/train-images-idx3-ubyte \ + $DATA/train-labels-idx1-ubyte \ + ./examples/triplet/mnist_triplet_train_leveldb +$EXAMPLES/convert_mnist_triplet_data.bin \ + $DATA/t10k-images-idx3-ubyte \ + $DATA/t10k-labels-idx1-ubyte \ + ./examples/triplet/mnist_triplet_test_leveldb + +echo "Done." diff --git a/examples/triplet/mnist_siamese.ipynb b/examples/triplet/mnist_siamese.ipynb new file mode 100644 index 00000000000..8e076663ca6 --- /dev/null +++ b/examples/triplet/mnist_siamese.ipynb @@ -0,0 +1,154 @@ +{ + "metadata": { + "description": "Extracting features and plotting the Siamese network embedding.", + "example_name": "Siamese network embedding", + "include_in_docs": true, + "priority": 6, + "signature": "sha256:845bb18929f96543ba2611eb5eca744fd98939cbef876df6bc319c29f616fc64" + }, + "nbformat": 3, + "nbformat_minor": 0, + "worksheets": [ + { + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Setup\n", + "\n", + "Import Caffe and the usual modules." + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "%matplotlib inline\n", + "\n", + "# Make sure that caffe is on the python path:\n", + "caffe_root = '../../' # this file is expected to be in {caffe_root}/examples/siamese\n", + "import sys\n", + "sys.path.insert(0, caffe_root + 'python')\n", + "\n", + "import caffe" + ], + "language": "python", + "metadata": {}, + "outputs": [], + "prompt_number": 1 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load the trained net\n", + "\n", + "Load the model definition and weights and set to CPU mode TEST phase computation with input scaling." + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "MODEL_FILE = 'mnist_siamese.prototxt'\n", + "# decrease if you want to preview during training\n", + "PRETRAINED_FILE = 'mnist_siamese_iter_50000.caffemodel' \n", + "caffe.set_mode_cpu()\n", + "net = caffe.Net(MODEL_FILE, PRETRAINED_FILE, caffe.TEST)" + ], + "language": "python", + "metadata": {}, + "outputs": [], + "prompt_number": 2 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load some MNIST test data" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "TEST_DATA_FILE = '../../data/mnist/t10k-images-idx3-ubyte'\n", + "TEST_LABEL_FILE = '../../data/mnist/t10k-labels-idx1-ubyte'\n", + "n = 10000\n", + "\n", + "with open(TEST_DATA_FILE, 'rb') as f:\n", + " f.read(16) # skip the header\n", + " raw_data = np.fromstring(f.read(n * 28*28), dtype=np.uint8)\n", + "\n", + "with open(TEST_LABEL_FILE, 'rb') as f:\n", + " f.read(8) # skip the header\n", + " labels = np.fromstring(f.read(n), dtype=np.uint8)" + ], + "language": "python", + "metadata": {}, + "outputs": [], + "prompt_number": 3 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Generate the Siamese features" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "# reshape and preprocess\n", + "caffe_in = raw_data.reshape(n, 1, 28, 28) * 0.00390625 # manually scale data instead of using `caffe.io.Transformer`\n", + "out = net.forward_all(data=caffe_in)" + ], + "language": "python", + "metadata": {}, + "outputs": [], + "prompt_number": 4 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Visualize the learned Siamese embedding" + ] + }, + { + "cell_type": "code", + "collapsed": false, + "input": [ + "feat = out['feat']\n", + "f = plt.figure(figsize=(16,9))\n", + "c = ['#ff0000', '#ffff00', '#00ff00', '#00ffff', '#0000ff', \n", + " '#ff00ff', '#990000', '#999900', '#009900', '#009999']\n", + "for i in range(10):\n", + " plt.plot(feat[labels==i,0].flatten(), feat[labels==i,1].flatten(), '.', c=c[i])\n", + "plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])\n", + "plt.grid()\n", + "plt.show()" + ], + "language": "python", + "metadata": {}, + "outputs": [ + { + "metadata": {}, + "output_type": "display_data", + "png": "iVBORw0KGgoAAAANSUhEUgAAA54AAAIXCAYAAAD0R4FDAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXtwXOWZr/usvurWUktqGdmxaawEHEMuthGXITiIyMaJ\nwbEMFmCTDMkkoyqSyTnZMwdqpmYyzEyS2ruKue2ZqSTHO/vYGQbhCxdjwI637ViWMEEEMJhgB4MB\ngSRLsizJkiypuyX1+WP1Wlp971YvSd3y+1S5rF69Lt/6+lOrf/2+v/dVgsEggiAIgiAIgiAIgjBT\nWOZ6AIIgCIIgCIIgCML8RoSnIAiCIAiCIAiCMKOI8BQEQRAEQRAEQRBmFBGegiAIgiAIgiAIwowi\nwlMQBEEQBEEQBEGYUUR4CoIgCIIgCIIgCDNKRsJTUZQ8RVFaFUV5U1GUU4qi/HezBiYIgiAIgiAI\ngiDMD5RM+3gqilIQDAZHFEWxAS8B/08wGHzJlNEJgiAIgiAIgiAIOU/GqbbBYHAk9KMDsAJ9mZ5T\nEARBEARBEARBmD9kLDwVRbEoivIm0A0cDQaDpzIfliAIgiAIgiAIgjBfMCPiORkMBlcAi4EvK4pS\nk/GoBEEQBEEQBEEQhHmDzawTBYPBi4qivAhUA03adkVRMjORCoIgCIIgCIIgCFlNMBhUEj2fkfBU\nFMUDjAeDwQFFUfKBtcDfxxhEJpcRhDC+9a1vsWPHjrkehjCPkDUlmImsJ8FsZE0JZiNrSjAbRUmo\nOYHMI54LgV8pimJBTdt9PBgMHsnwnIIgCIIgCIIgCMI8IiPhGQwG3wZWmTQWQUiJq666aq6HIMwz\nZE0JZiLrSTAbWVOC2ciaEuaCjIsLCcJsU1NTM9dDEOYZsqYEM5H1JJiNrCnBbGRNCXOBCE9BEARB\nEARBEARhRjGtqq0gCIIgCIIgCIIQTSrFd3KF6RaOVWa64qyiKEGpaisIgiAIgiAIwuWKoijzotNH\nvPsIbU+oriXVVhAEQRAEQRAEQZhRRHgKOUdTU9NcD0GYZ8iaEsxE1pNgNrKmBLORNSXMBSI8BUEQ\nBEEQBEEQhBlFPJ6CIAiCIAiCIAgziHg8JeIpCIIgCIIgCIJwWdPX18emTZsoKiriqquu4sknnzT9\nGiI8hZxDfAmC2ciaEsxE1pNgNrKmBLORNSVE8v3vf5+8vDx6enp44okneOihhzh16pSp1xDhKQiC\nIAiCIAiCcJly6dIlnnnmGX784x9TUFDAl770JTZu3Mjjjz9u6nXE4ykIgiAIgiAIgjCDJPV4NjTA\nmTNQUACNjeB2p3eBDI4/ceIEt956K5cuXdK3/fM//zNNTU3s27cvpfsQj6cgCIIgCIIgCEK2c+YM\nHDsGBw6oInIWjx8eHqa4uDhsm8vlYmhoKP1xJECEp5BziC9BMBtZU4KZyHoSzEbWlGA2sqaykIIC\n9f/qati2bVaPLyoqYnBwMGzbxYsXcblc6Y8jASI8BUEQBEEQBEEQ5pLGRqivh0OH0k+zzfD4a665\nhvHxcd5//31921tvvcXnPve59MeRAPF4CoIgCIIgCIIgzCDZ3sdzy5YtKIrCL3/5S9544w3uuusu\nfvvb37J8+fKw/cTjKQiCIAiCIAiCIEyLn/3sZ4yOjrJgwQK+8Y1v8Itf/CJKdGaKCE8h5xBfgmA2\nsqYEM5H1JJiNrCnBbGRNCZGUlpby7LPPMjw8zEcffcT9999v+jVEeAqCIAiCIAiCIAgzing8BUEQ\nBEEQBEEQZpBs93iming8BUEQBEEQBEEQhKxFhKeQc4gvQTAbWVOCmch6EsxG1pRgNrKmhLlAhKcg\nCIIgCIIgCIIwo4jHUxAEQRAEQRAEYQYRj6dEPAVBEARBEARBEIQZRoSnkHOIL0EwG1lTgpnIehLM\nRtaUYDaypoS5QISnIAiCIAiCIAiCMKOIx1MQBEEQBEEQBGEGyWaP53/8x3+wY8cOfv/737Nlyxa2\nb98ed99MPJ62zIcqCIIgCIIgCIIg5CKf+tSn+NGPfsTBgwcZHR2dsetIqq2Qc4gvQTAbWVOCmch6\nEsxG1pRgNrKmBCObNm1i48aNlJeXz+h1RHgKgiAIgiAIgiDMKQ1ADbAeGJiD45nxVGDxeAqCIAiC\nIAiCIMwgyT2eNcCx0M/1wO40r5Dp8fCjH/2I9vZ28XgKgiAIgiBoNDc3MDBwBputgNraRpxO91wP\nSRAEIQMKQv9XA9vm4PiZj3hKqq2Qc4gvQTAbWVOCmch6mh0GBs7Q1XWM9vYDtLQ0zPVwZhRZU4LZ\nyJrKRhpRI5WHgOl8kZbp8WrUciaRiKcgCIIgCDmHzaZ+u+/xVLN69fS+3RcEQcge3EwnPdaM4ycm\nJggEAoyPjzMxMYHP58Nms2G1WjMYTzTi8RQEQRAEIefw+QZoaWlg9eptkmYrCELWk819PP/u7/6O\nf/iHf4ja9rd/+7dR+2bi8RThKQiCIAiCIAiCMINks/BMh0yEp3g8hZxDfAmC2ciaEswkcj01Nzew\nb18N+/evx+ebXon7TMmGMQjTR96jBLORNSXMBSI8BUEQBGEGyYYiONkwBkEQBOHyRlJtBUEQBGEG\n2b9/Pe3tB/B4qrnzzkNz4kfMhjEIgiBczkiqrQhPQRAEQZhRsqEITjaMQRAE4XJGhKek2go5iPgS\nBLORNSWYSeR6cjrdrFmze04FnxljEJ/o3CHvUYLZyJoS5gLp4ykIgiAIOUZzcwMDA2ew2QqorW2c\nFVGr+UQBWloaWLMmk35zU8zFvQiCIAizj6TaCoIgCEKOsW9fjS4Cq6rqTROBiUjHJ5qOmJyLexEE\nQZhtJNVWUm0FQRAEIeew2QoA8HiqWb16W9hzM5USW1vbSFVVfUrFidKpopvoXgRBEIT5gwhPIecQ\nX4JgNrKmBDOZjfWUSATOVOuUdHyi6YjJdATt5Yq8RwlmI2tKmAtEeAqCIAhCDmCMZAJxRWA2RBDz\n8ytwOj0pCclsKL4kCIJwOeP3+/nOd77DVVddRXFxMStXruTXv/616dcRj6cgCIJw2ZMLBW5S9UJm\nQ+sU8W0KgiCEk80ez5GRER577DG+/e1vc+WVV/Liiy+yZcsW3n77bbxeb9i+mXg8paqtIAiCcNkz\nUxVbzSTVSKYWQYyFWQI72XmyIeqqkQtfKgiCIMwlBQUFPProo/rjO++8k6VLl/LGG29ECc9MkFRb\nIecQX4JgNrKmBDOF0kytJzO8kGb5P5OdJ5t8mzPleZ1N5D1KMBtZU9lHAw3UUMN61jNA+oXhMj3e\nSHd3N2fOnOG6667L6DyRSMRTEARBuOyprW2c8/TUZLS2PsLISA9HjmxNO3KnRf36+98BUhPYiSKF\nyYR6oqhrJuza9VlGRrqwWOzcffdruFzJv4nPpuirIAhCPM5whmOomTcNNLCb9N5DMz1eIxAI8MAD\nD/Ctb32La665ZlrniId4PAVBEAQhy4gl+tLxTUYef/BgnX5sYeFiNm9+O6lwTXS9ufKRbt/uJhC4\nCKj38cADnyQ9Jhs8r4IgCMk8nutZzwEOUE01hziEm/TerzI9HmBycpKtW7cyPDzMc889h9VqTfk+\nxOMpCIIgCFlMvKhiLM9prMhdqscbj001/TVRpHCmIprJsFjsAFitBXz96y/F3U+bl6GhsxQWenE4\nimdriIIgCNOikUYaaGAb26YlGjM9PhgM8p3vfIfz58+zf//+mKIzU8TjKeQc4ksQzEbWlADh7Up8\nvun7Y9JZT/H8h4ODZ0M/WRkd7cHnG4jpm4x3fKRonI7nMpt8mhp33/0ahYWLuffeUwnTbLV5uXSp\nnZ6e4znt7wR5jxLMR9ZU9uHGzW52T0s0mnH8Qw89xB/+8Af27duH0+mc1jmSIcJTEARBuGxIJC7n\nogiNJjDt9mJuuukxffvkpD/00wTnzh2jpaUhZr/LeFHJSNE4nV6Z2dhf0+Xy8sADnyT1dmrzYreX\nAOLvFARBSERbWxvbtm3jrbfeorKyEpfLhcvl4sknnzT1OuLxFARBEC4bEvkW9+9fT3v7gbTSUdNF\nSwEdHDxLMBhgdLQXmIgaz44dZfj9/YAqnrZu/SjmeDL1L2baaiRbW5Vo83LTTY/R2vqw+DsFQZhz\nsrmPZzpk4vEU4SkIgiBcNiQSl7NRhMYofI1YLA48nhtwOIqprW3k0KF6OjsPY7eXsGTJ1xgZOYfN\nVsDg4HuMjp5PqaprKqIwlhBvbm6gre15JiZ8eDzXs3btnrjzkU7BI0EQhMsZEZ6SaivkIOJLEMxG\n1tTlQyLfolmppYnW01QK6FSxG4fDTVnZyjAv4tq1e6iqqmfr1o84d65JTwEeHPyIQOAiPl8vu3Zd\nE9eP2tzcwNmzu5OmDsdK1R0YOMPoaBd+fz+dnYdpaWngf/0vB9u2KWzbZuHcuZcSHp8uZnlr5zPy\nHiWYjawpYS4Q4SkIgiBcNqQjLmdCEGnCd/Pmk3i9G/F669iy5UPy8soAVcBZrfns3r2c9vbDHDpU\nz8TEmOEM4/pPk5P+uKJyYOCM3nYELHz88UF+9asKhobawu7twoW3cDrLcTiifaMAZWUrWL16G8Fg\nILQlyPPP36afIxAYJD+/krVrn5q2YJ8Lb60gCIIw+0iqrSAIgpDVzJWPcDbTSI8efZCPP96Px7OC\nCxdOMjbWoz+nKHaD8FOxWBxMTvqx20vYvPktPeVWm6uenleYnPShKDYsljwmJoaBqd6XsVJ+tXv0\n+QZoavo2EKSmZgdOp5tt2yyA+rd8w4YWFi681bT5mQ1vrSAIwlwjqbbSx1MQBEHIcmL1tJwNUk0j\nzUQYa8f29Z3E7++no+MwimIP20cVnQqa8AO1yTdAIHCRZ56ppqLiBmprG8PmSj12XK+Qa7UWYLE4\n2L7dzfj4SNg1HI5SrNZ89u2riXkfGzY08/zzt7FhwzEWLrw1rflJRm1t44x7awVBEIS5RyKeQs7R\n1NRETU3NXA9DmEfImspuzI6IpSoUUy02ZIz8uVxL+eCDYlauXER+vprammpxH1BFnN1eyLlz0QWI\nNCyWPK644ibOnTuGzVbE+LgazayqqsfvH6a9/YC+r93uprj40/T3/55Nm15l374vG1JwVRTFxt13\nv87x4/+XPpaioqUUFV2ZcOyx5idbq9zmOvIeJZiNrKnZRyKe4vEUBEEQspxEBYESEc+jmaqnMFU/\nqDHyV1CwiL6+t2hvP8DHHx9IubhPWdkKvN46Skuvpb//dNR+ZWVf4FOfWktBwSKuuqqOyclx8vMr\nqai4PnSeIsbG+lm9+udhxwUCA1y48DqTkz5eeGGNIdJpwWp1AWpU9MUX12G1qpFWRbExPNyWdOxO\npxuHw83Bg3X6HItfUxAEQYiHRDwFQRCEeYkxmpifX8m9957G6XSbHkE1Rv6OHNmqn9vhcNPZeRib\nrYgrrriZNWvUtiS7dn2WkZEuLBY7d911hBMnfqJHDR9/fCGjo11h53c4SrHZ8pmcHGdyMkAgMGzw\nfNqwWCx6Oq3XW0db296oMRojo/HwejfS1fUyPt/5sGtv2fJBVERzaOgsExMBxsbC+5BqEdd05lai\npIIgXA5IxFOEpyAIgjBP0QSmhrF4jlmeQqNoys+v4OLFs1y48DplZV/Ebndx4cIJfL4LAOTlVeJw\nuBgcfE8/vrBwMUuWfE0/R0/Pb/H7p19B1+vdSFvb88Ckvq2gYBFu97V0dh4O29fpLMfvv0gwOE5Z\n2Qo2bDiqC2dQ27zcc8+bnDjxU318vb2vhxU+0tAEKpD23EovUEEQLgdEeIrwFHIQ8SUIZiNran7i\n8w2we/dyRke7oiJwZkXZjKLJ6fTg8/Xy7ruwbJn6fH5+ZVgEU1FsBINTLVEWLryNiYlxenqOT/Mu\nw1Er4E6iRSGt1gIqK2/hy1/+Jbt3f5aJiTHsdheVlas5f/41XUTabIWhCrg2PJ5V2GyF9Pf/ntHR\n84yPXzKMObzIkXbNpUvvYWTk3LTmczaq2uZ6VFXeowSzkTU1+2S78PzGN77BkSNHuHTpEh6Ph+98\n5zv89V//ddR+4vEUBEEQhAicTjf33ns6pj/ULC+i0d/p8awIe87hKGXhwhq9Sm1Z2QocjpKwfc6d\nO0ZPz8sJrpDen2k1BVcVnYpiZ2JihI6Ow7S2Poz2OSEQGAJgcnKqRcv4+BiBwEV8vgt0dBzmllv+\nldHR8wQCF8OEcqTotNuLuf/+9xgZOTft+dQ8vKWl14b5Rc1EvKeCIAiJ+au/+is+/PBDBgcHOXDg\nAP/+7//Or3/9a1OvIcJTyDnkGzrBbGRNzV/iFQjKtBWIVrgoGAxQVOTFanUCasXZZcvUyOaGDU2M\njJzT/ZiXLn1CWdnnKChYRH5+ZehMViLF3BQKxpRZu70ktC05ijLVLc1ud3HTTY8xOTmmb+vtfQOP\nRy1MVFa2ImJ+guzbdysWiyqYrdYCnE4PAOXlK1myZD1LlqynqMhLWdnnaWl5SN83cj7jFXgyor1G\nQ0PJCxpNF7Nav8wV8h4lmI2sKSGS6667jry8PP2xzWZjwYIFpl5DhKcgCIJw2THdSrkaWgSto+Mw\ngcAluruP09FxGLu9EFArxf6f/1NHX99J/Rif7wLnzh2jouIGios/Hdo6keAqU4JUUewsWfI1Uv2z\nHQyO64I3EBjihRduD3ve41nF2rV7qKqqZ8OGo9x99+toolZRrIyP+5icDKAoDurqfsv9979HUdFS\nrNYC+vpO4vNdxO8fpLv7OO3tB7DbizKOLM+kOMz09RYEQZhpGoAaYD0wnZyPTI8H+N73vkdhYSHX\nXXcdf/M3f8OqVaumeabYiMdTyDnElyCYjawpIV2MvkSn001Hx2G9ku3Ro4f50peqsVqddHdHezeX\nLFnPhQsnGRlpT+laDoebxYu/yocfPm2oZps6dnsJZWWf08eiKHYqKm7k/PlXCAaDKIqVu+9+DYej\nhH37bqWg4FOcP9+qH+90VlBRUY3fPxjTi5rIm5mOfzPdok/NzQ20tT3PxIQPj+d61q7dM29FpbxH\nCWYja2r2SebxrAG0Ds71QLpl1jI9XiMYDHLs2DE2b97M/v37ufHGG8Oez8TjaUv0pCAIgiBc7kRW\nrh0aasNqteP1bqSmZgcwVckV4N1367jzzr0cObI15vk6OtQWK6mgKDbKy79IZ+dvpiU6QSEYnKSn\n51VA9Z0WF18TJiCDwUmefnolDz54gQce+IT9+9eHHe/znae9/QB5eZVEUlCwKGHRptraRp55ZhVW\nq5MjR7YmLOyjpdymysDAGb1wU2fnYVpaGnA43DldREgQhMuXgtD/1cB0cj4yPV5DURRqamqor6/n\nySefjBKemSART0EQBEEwoImnwcGzuFxe+vtP4ff3A1rVWFUAxmr9YRReq1f/nGefvZmxsa6oa4Rj\nBSawWBx6P04zyMtbwPj4GOPjg/p1Fiy4md7e15ic9EWPwppPefkqbLZ8rFYH3d2t+P1qKxiHo5R7\n7jnBM8/coPf5dDjcbNnyYZi4i9UaxbitqGgpRUVXRgnDVKrORu5jbP2itYM5eLBOWrMIgpCVJIt4\nDqCmy24DpvOVWabHR/Ld736XyspKfvKTn4Rtl6q2giAIgmASmi9xZKSd7u7juugEdNEZz4do9DS2\ntj7Mffed1gvzxGPDhiZcrqVYrXkJ90uXsbEeg+i0UF6+gp6e4zFFJ8DExCg9Pcfp7DxMd/crKIr6\n+cFicVBScg0tLQ9RXv5FQBOib6ZUtMm4raBgUUzPZype0Mh9amsb8Xrr8Ho3smHDUZxOd84XERLm\nK2a474T5jhs1PXa6ojGT48+fP8/OnTu5dOkSExMTHDx4kD179rBx48ZpjiY2IjyFnKOpqWmuhyDM\nM2RNXd5EVl7VxIvdXhy1b1nZCrzeurh+RZutgHffnRI+TqebioobEl7/nXf+jcLCKwkEBhPulxmT\n9PefSnlvv78Pn68Xi8VJeflKzp9v1YsIuVxLKS29lpaWh6Iq1cYq4mPc5nCocxopDCMFo/aaPPHE\nEvbuvZX9+9dHVc51Ot2sW/cs69btjXmt+ZRmK+9Ruc4ZVPfdAVQROvfImhKMKIrCL37xCxYvXkx5\neTk/+tGPePzxx7nhhsR/v9JFPJ6CIAhCzpFKamaqaJE0QI+ktbQ0cNNNj7F793ImJkax211UVq7m\nK195Iupakem1p08/SGmpl4MH6/Rte/fezOhoFzZbEePjw/qximJncPBjhobOTnv8qRLej1NDTfMF\nsNkKGR+/hN3uMvT69DE01AaA0+lhdPQ8Pt8AQ0MfAvDMM6soLAxPnXU43Pq9a55YTVhqcxtZQChy\nu/E1uXRJLcLk9dZRVLQUiyW+XzRdn6ggzA5mue8EYWbweDyz8mVERh5PRVGWAP8JLECt+74tGAz+\nW8Q+4vEUBEEQTCWWl3C6JKq8unfvrXohHqfTQ0XFDdTWNtLa+oguNgOBQb1irOZh7Os7qafoVlXV\nY7Xm8/HHBwgEBuOmuloszrjPzSRWax6LF3+VW275V1pbH+ammx4LE8oVFdczOPghPt+AIXUXbLYi\nJiZ8evqx11vHunXPhr02TmeF7gnNy6vkvvtOh81dvC8NtNfEbi8hELiovzbi4RRyE7Pdd0Iukszj\nmSvMZVXbAPDfgsHgm4qiFAGvK4pyKBgMns7wvIIgCIIQFzO9fPn5FTidnpgCSEsNtdmK8Pl6dX/h\nyEiPLoDy8yv1sVgsTn07qGJ1eLiTgYFTYV7RWMy86JyKbhqpqLiJmprtYdHCe+89zeOPVzI+Psy5\nc8dwOsvDRCcQFrkF9MJIxtfG4XDT2XkYgLGxrqi5i1eJ1hh1bm19GKs1n927lzM21guoKc/i4RRy\nB819l000AM8DPuB6YA8iioWZJiOPZzAY7AoGg2+Gfh4GTgOLzBiYIMRDfAmC2ciayj3M9PINDbXh\n8/XS0XE4qrCNdp0rrrgZmBK6mrjS2qIoioOLF9/j4sU/8O67YLUWkpe3AL9/iJ6e44aquOr3vQ6H\nO2nRIQ2bzWV4pKAKyPQxFt8x0tV1TL9vzVt55MhWrNb8qasqqV/T+NqsXbtHb8MSOXfaY2PRoJ07\nr9HbuaxZsxuXy8uaNbsZGmpjdLRLTxd2ua6aVx7OZMh7lGA2TU2vAl1AP3CYbPGeCvMb0zyeiqJc\nBawEWhPvKQiCIAiZYaaXL1H0VLuOzzcQ5kHMz69AUWyMjw/rkb9AQI34KYqViYlLTExcirpWMDiO\nzVaA232d7pNMRFnZ9fT1nTCegVhRy1Tw+S7E3K4oNnp732T7dneowJGaQuVweEJjWMG6dXtpbX2Y\njo7f4PNdwGrNZ9Gi22lvP6Sn2p4//zt8voGo1+a++06HzV2kn9Mo4rWeoTt3XkNFRbUeATV6YMvK\nvkBNzfZpzYEgCBpOw88rEe+pMBuY0sczlGbbBPwkGAzujXgu+OCDD3LVVVcB4Ha7WbFiBTU1NcDU\nt3jyWB7LY3ksj+XxXDz+oz9aQUtLA5OTf4zDURT1vMXSyMDAGVpb3yE/v5JVq5YQCAzS3Kz6Opct\nA4B331X//+IXFzA21kNX1zWMjHRSVTWsP2+x5HH11T4gqO8febz2+IMPigkEBuM+b9bjlSsXMTLS\nzbvvToQ9399/EzZbHn/2Z2rV2KamJkZGuujvf5ivf/0lXn/9Q158cS1XXz2un2/Rotv4i79omtb8\n/+53Z+ntfYNlyyzAZNj59u69lZYWdb7vuGMj69btzZr1I4/ny+O7gHZqahYBjTQ1vZll44v1+B+p\nqRkGCmhq+h4Q/f4V//ELwP+gpqYC2J4j95vbj2+//fZ54/E8evQob775JgMDamXzjz76iF/96ldJ\nPZ4ZC09FUezAC8CBYDD4rzGel+JCgiAIQk7S3NzA2bO7CQQuhm3Pz69kdLQLh6OU0tJr9eJCpaWf\nZ3z8UiiaacHjqaav74Tuf0wVq1Ut6mP0i84csb2fDkcpixevY2TkXNxCQGqU9GJofzdbtnw47RRY\nn2+AnTuv0YsRORylbNnyAU6nO2EBqHQwsxqyMF9oQG13chI17RSgnuzzZMaiBrVNC+TOmC9fpLhQ\nhh5PRe0u/b+BU7FEpyDMBNq3SIJgFrKmhHgMDJzRhZXmz/R4qqmre4Wqqnq2bPmAr371BbzeOgoL\nr2R0tIfXXvsALSW2t7cVu10VN1N9QRP+XQZgYmKYrq7mNEdr0ceYHrFTd/3+fj74YLfuv2xq+nbU\nPgsWqD3eHA4399zzZlwhF9krNRZq2q2W/qdQXFzFkSNb8fkGTPP0Gv2kkX7ebEbeo2YSrcemJjpz\nqeXJ9Nu0TG9NNaCK3fWolXoFIT0yEp7Al4BvALcrinIi9O+rJoxLEARBEOYEo0gaHHwPUEXnXXcd\n1cWPVvTG6XTjdLpZt+5ZJif9jI11R51vwYJqqqrq2bz5JFVV9TgcpWHPxxeL6XwzbmHLlg/Iz1+Q\nxjGxiBTFxsfh42lubiAQGAWsTE5O8Mwz1XrPz0hSFXyFhV79Wr29r+v7a77RTCOUZlZDFuYLmnhb\nAdQBh8id6q6NqJHO2RqzJtIPIMWIhOlgiscz4QUk1VYQBEGYYcxMoQzvQ1luKMqjsHDhl7njjr0x\nz//LXzqjUmq1VNzh4TYKC704HMV0d78clbprJP1+nqrodLm8YX1H06WsbAWjo12MjnZFPWe3u9i8\n+W1OnPgpbW3P4/P1MTk5QWS0tLBwMQ888EnU8ammyk7171T9rZmm1kYSWSRKEOa+x6aW6luAKiTN\nHoOZ51+PKjqryS2Bnh1Iqm3mEU9BEARBmBOMkcm+vlOmpVAao2Iez0rDM0HOnTsW9/zGViWKYiMv\nbwElJdfQ3X2cS5fa6ek5Tnv7AaxWZ8zjVSw4naWk2jJFUWxcdVUdR48+yP7967HZ8pMfFPGn32Yr\nZMmS9ZSXf0Hvk2l8DiAQGKK19WEGBs4wOtoVEtjhotNqLeDrX38p5hXVXqkVOByJP6hqKbVadDhS\ndKaSspsIsyKnwnxC67E5V2vCGEW8mvTTWJOlv5oZpZztCKswF7z33nvk5eXxzW9+0/Rzi/AUcg7x\nughmI2u1e9m8AAAgAElEQVQqNzGmbw4Oqu02pptCaRQ0q1f/XBc9a9bsQVEc+n6lpZ9n9eptMQVQ\nRUU1AO+/n0cwOM7YWA+9vW8AasQQ1JYhbvdyLJZ44nMyFHFM1jJFFbb33/8+Y2MX9Hm4cOFE2Hhj\noSjhf/rHxy/R0XGE999v1Ptkqvs59HtSW530Y7Xaw46120vYsKGFwsLF3HvvKVwuL7FQe6Wep7Mz\nvFdq5DxqwtCYymwkVz2amZJb71HiA0wP7QurIqCX9AWiUViuInruY/tAp7em5lqkC7PB97//fW68\n8UbUUj7mYlofT0EQBEGYTYyRybVrn6K19eGUUygjU3M1QQPQ2vpwWB/KpUvv5sMPn8HhKOarX30e\np9Mdtn9LSwMOh5tAYJS8vEqCwT79WK3HpcXixOFw4PerIlEtCK+hkJ6fE0AVti+//H9H9MGM3avT\niCoup65ptRYwMTESYz8/Docbp9ODz9dLR8dhFMXOpz61FovFjsVip6ZmB06nmwce+CRhunM8b2Xk\nPCbrzTpdj6ZUs51NNCEEqoCajUqrM52uaiaRY20MbesHDqMKxHxUAXkW8ALFxL8vTVh6gAuA1h94\nOXDacP65SiUWcomdO3dSWlrKtddey/vvv2/6+SXiKeQcWk8kQTCLy3VNZZq2ONcYK52eOPFTRkZ6\n9Cqoye4tMnKWSNCMjJwjGPTj8/XS2vowEC2ABgbO0NNznLGxLq65ZjLqej5fLxbLlNjUBGnoERZL\n4ihlOMY/3Qr5+RVYLE7Gx0dTOtpmK+aee97EYsnH4SiLm57rcJRSU7ODioobwsbd3/8OX/vai+Tn\nL+DgwTp9jo1zunPnNWFzH68qbbpCcrrVbXM9Uppb71HTr7Q6fXKp6M3zTI3128AjQE/oOa24UVto\nn3bgOInvS0t/tQCDhu1doWNiRymj15REqueahgaoqYH162FgGi9BpscPDg7y6KOP8i//8i8z5kUV\n4SkIwmVBQ3MzNfv2sX7/fgZ86RRumb9k84fxVNtvaOmYkfeiPf7v7eXcuPP/jXrdjYLHas3H7x/E\nas1DUay6eI21ryaOIgWQcR+HowQARbGiJRaVl69k06ZXyM+vDJ3VmMJkxWo1ir/o9Can04PDUUpe\n3gKuuOKPQtvK6e5+mfffbwwVI0qWnhu6mtVJUdGVLFhwI35/X8woqcNRyj33nMDpdFNb2xj2XHn5\nCiC+eFcjr+fD1lWkt1J7fScnA3i9dSkLyel6NKWa7WwyFz7AuRC706EBVRBq+JkSzYcBO+qcafej\nfVlVAjwWca7PAg6gAlW49kc8n+5c5JJ4n5+cOQPHjsGBA6qInO3jf/SjH/Hd736XRYsWzUiaLYjw\nFHKQ3PK6CNnCmYEBjnV1caC9nYaWlrDnLtc1lc0fxtMVxUbRMzY25UXss32ak75S/XXXBE8wGMDr\n3ciddx5iaKiNnp7jTEyMcf58a9Q1a2sbcbmWYrU6dVEaKYCMQrSi4j8oLFxMeXk1oHomh4ba2Lfv\n1lC7kMjU2gm9yq0qQKMLC/l8vfj9/YyN9dDd/dvQtgHGxnrCfJnxcDrLDec6T1PTt8KKIRlRFDsV\nFdfrAtrpdLNw4W2A6nHNy/Owb18N/f3vAFPrR5uDBQtu1rdbrfkxv0DQXt/OzsNYrfYZT301qw/o\nXJFb71Fz4QPMlaI3ZyIe24ktmrX7WRV6fBG4nfCIZBcQQH2POUb4e8oiEs9FA01NK0jFCyrMHgWh\nl6C6GrZN4yXI5Pg333yTI0eO8MMf/hBAIp6CIAiZUGBTI0/VHg/bVq+e49FkB9n8YTxSFCeLgKq+\nvQrGx4fp7DyMzVZIVVU9i69QP7hpr7smeDo6DmO1OsKilXZ7cdg1NZxON+Pjo3R3q1Vpn3zy01Hj\nMArRgoJKHnjgE/LyykL3UoTf38elS+309rYS6ee0Wl36ddUiRPGFpM1WBGipvKlFODdsaGFiIhCx\nVdFf/8g+osFggI6O8CJAd9yxl6qqejyelXz00XN0dR3D5+ulsHCxvn60OVi7do++roaG2vQvEHbv\nXq7P2Wx/6SHVbLMZM1I8s7nojfH+jN7uzwE7mBKZF4Ey1C+mqlAjnGWhfatRxaQxImk81xeAL4V+\nXgGsQU3bjTWnDaHrvhU617LQPrki3ucvjY1QXw+HDoF7Gi9BJscfO3aMjz76iCuvvJKFCxfyT//0\nTzz99NNUV1enP5AESB9PQRBynobmZs4MDFBgs9FYW4vbGV0xdMDno6GlhW2rV8d8XsguIvstGntr\nVlXVxyxCE6tXZOTrHmsf7Vo33fRYVIEirShNd/fxqMhiVVU9Doc7btEa7bw+X79emEf1dlpRRaON\ngoIrmJjw4/cPUFl5C11dL0f4P9V9S0quxe2+hkBAFdbxsNmKGB8fDtvmci1laOjDsG2LFq1h7do9\nUXOrEa9/ZuS+DkcZ99zzBi6XV5+roaGzFBZ6GR5uY3x8GL9/6oOv9tql0k9TCgLlItMp8lPDVDGi\nemanGNFsUsPU/S1AFZG/B5agFg2qQPV0vkT4l0mLgHdQo56LgNcAX+iYk6F9bkEVmk+EHmtFhOoM\n16xELTKkvRbG8WjMx3nPPrK5j+fo6ChDQ0OAGu38x3/8Rz766CN+8YtfUF5eHrZvJn08RXgKgpDz\n1Ozbx7Eu1TdTX1XF7jVr5nhEgtnEEoyRpCJmUtnHSKTQsttdBAJD+jgOHqxLKIibmxvo6zvF4OBZ\nCgs/xYULrwNgsThYuPDLnDt3nMnJ5EWBvN6NrFu3F59vgJ07r8bn68VudxMIDGH8sLpkyXo++WR/\nxNFWYkVHi4qupKhoKRaLnXPnjhEMBrBa81m06Ha+8pUnosS3zVbA5GQgSvhaLE6++c2usLmIRaLX\nLhapfNkQOb54AtW4T35+BUNDbSJoZ4Qa0heR61Ejb9XkVrQtmcjWnn8FVTBqeFCLAPlDj50Rz2tY\nUFusjBCdBVEJ3IEqWGNdX5tTDeNrEfncCuBojPELZpPNwjOSv//7v+fs2bP853/+Z9RzmQhPSbUV\nco7c8roIs0GmabSyprKfVNKCW1sfCatsG4t0Uy61lNCyshV4vXVs3vx23KJCWsqocT0Zq90ODJwK\nbbUyOemno+NwSqLTYrHT3/8O27e72bnzaiorvxxKK76EUVDa7S5uvfVnRBcnMorOqeeGhz/WfZYO\nRwlWawF2exHd3b/l8OF6fQ6Nflu7vYiqqvowz+jkpI9du5brVXvt9pLQ/2rqcnn5St1Pm2zejSnV\n2vmSpeOm4gc27vPxxweytqhWPHLnPWo6PsFcTfE0FuOpRE2LXctUaqtWvdYoKq2ovTr9hm2xfz/V\nlPpBYqfedwFPEr8YUGNoTBD9WjQCG2lqugnYiIhOIRaPPvpoTNGZKSI8BUHIeRpra6mvquLQnXdK\nGu08JRXBmG5Boli+0chtmuAtL/8CPl8/LS0PhUVLkwliTZg6nR6Dl3IixjYj4cWFNm16jdHR8wQC\nF/H5emlrexaf73xESi4EAkO0tj4c8oFGoyhqlDUWPl8vExOjjI2dx+/vD/N4GsV1Tc121qzZzd13\nv47FMvW7NjbWpYvSzZvfCv1/kqqqeu666zesW7c3JbEfKXIjizrFIhW/qHGf8vIvJt1fmC7TEZHZ\n7M+MJJZf04YqLrU+nNp7T6woZnSrpaltVuBOwr2bidB+/2NVvHUDrtDYPkT1jxqf2wv8D9TU30Re\n0Jo4zwnC9JBUW0EQBGFekEo6LkylXfb1ncTvV1sQaKmc8dI7jdudzgoqKqqTpmka02xdLi/nz7cC\n4HCoVWLHxnrp7j6u719evpKioisZH79ER8dhLBY7mza9Rnn5F/jVryrw+XrDzq+l/Uam/2qpuEYs\nlnzuu+80DkcJO3deg893Puz5SG+oxeLA47kBh6OY1at/zvPP305BwSIcjmL9vn2+AXbtWs7YWJd+\n7dbWRzLyZUa+hslSmSH9FGsgrXRr4XLls6iRxTHUdNQy4HWmem6WoqbJjjDVP9OKWn12D6oAj+/H\nVlkGdAJDMZ6zMyUujdiIjoIuCe3rA64PXf8qpgTnYuCTGOeqIX5qdKLnhOmQS6m2iRCPpyAIOUMq\nhYCE7CJXiryk6t+M9G0aRdMHHzyF399PeflKyso+r3sBNW+jUaAl8h1GXic/v5LR0S69P6bL5dVF\nliY4a2p2hBU7Mt7H0FAbu3YtY3LSp+9/yy3/kxdeuJ28vAUMDbWxadMruFxehobaeO65W1AUK4HA\nCH5/H1dccQvFxZ9maKiN/v538Pl6sVjs3Hnnb3jnnX9jbKxf927a7SWUlHw2VIFXjcwGgxNRIj3W\nnCfzZSZbS5HnS/XLhFjkyroVspEG4P8jtcrRsTzUTtRIZjD0L955FFRBG91LN1pg5qOmMmuCNNYx\nGvWoKbS9ofFVAx2AF7U4keYJTeSvzVXvbfYiwlNSbYUcJHe8LkIsEvXTnCuycU01NDdTs28f6/fv\nZ8AXK2Vr9kg3hXWuSNW/Genb1CvgDpzRxVVR0ZVhrUD6+k7i9W7Ue1Rq/UJjpX9q68mY3llX9wpV\nVfVs2fIBLpcXmErTjUxFjXUfJ078FIejBEWx43AU43CUcPTog/h8A5w/38rYWBetrQ8D4HJ5+cY3\nOnC5qvD7LwBBuruP09b2ot4GxWJxct9977Fw4a16CxSvdyNebx1bt36kt4IBi95DVLuXyFYzxrEm\nS3uNtZaM6c1A3P6o6QrHXFm3qZCN71HzD2Nq6SkSi04tHd5D7PRZH1M9NhOdJxjneIiOavpRxWYX\niUXnClRP52uokc5qoBVoB46jeULVNZUoNTpXvbdCNiPCUxCEWUX6aaZGNgn02e65ONNoYmbDhqOs\nW/dsTNFUU7NDfwwwNtaD1eoItSCZ6heaSNDU1jbici3l0qVPePrplfh8/WHPp1PoaGDgDGNjPQSD\nAc6dO8b77z9JV9exuIJQTfM9GXYOi2XKOzo56dOFqjaWdev2sm7ds7S2PkIgMIii2NE+FCuKjSVL\n1idNYfb7B8nLq2Tt2qcSel6N400kEDPpvznf1q0w0xiLBZ1Nsm9F6N9FIvvypk9/jG2xgkbJoq/F\nqKL5C6i+zYeAt5nqBVoS+t9YbCiRvzaXvLdCriCptoIgzCrSTzM11u/fz4H2dqo9njkvmpRuC5Jc\nITIVE6a8f62tj9DXd4re3t8xOekPS/VMJ/0zMq03WXpuvPRQ7ZqRlJWtwO/vZ3x8lMnJABUV11NQ\nsIiPPnqOQGCqoEh5+UruuONZdu1azuTkKIpix+NZhdNZFtVeJF5bFK2lS7wxx/LMRhJrLaXrzU01\ndXa+rlshVRK1O9GeO8tU+mkA1ZPpCe0T7pMOJ57/ci7ZiFo0qIZwb+Y21Pt9DHg49Fh+H+YCSbUV\n4SkIgpCViECfeeL5EZubGzh7drcu3AoLF7N589u6eElH0BgFY3n5Su666zcpC9W8vEruu++07vts\navo23d0vMzbWE+YLjRSKTqdHLy5kt5ewaNHt1NRsp7X1Ec6e3UUgMBh2TaezQi825HRWAMGo4kQA\nXm8d69Y9m3DMkH6/zul4c5MJeCPi9cxmkvXCzIQawgWY23CtQdS0UyN1qIKyM8ZzELuoT7bgQo1u\n/hR4CjWKWgTcjFpoSNZ8NiDCU1JthRxEvC5CLDLxRJq5pszyZrqdTnavWSOic4YwpqKWla0IS8Uc\nGDiji06HozRMdELy9E9tPWmpp07nApYsWZ9UdAIR6b1d7Nz5Gd37uG7ds9x337u4XEux2QqYmPBH\nHVNevhKPZ4Vh7G/p6cTqfamiU2vjYrMVhQlRn++87gEFtXKudt6amu0Jx2zs19na+khUq5p4pOvN\nTTd1dj54Pefv3z1jeqtZr43m1Xwn9FhLLTVe69XQcy7DPsWoFWtfi3FOK9kpOrXWK0Oo0cwzTKXu\nDhPe3iWc+bumhGxGhKcgCPOCbPFEZss4hMQYCwmNjHSGPacJHK0C7XQjZAMDZ+jpOY7P14PdXpjS\neWprG0PeShWf7wLt7QfYufMaXYAWFl5Jd/dxfXswGGDJkvV4vXXcdddvWLNmj17IaP/+dWzf7uZX\nv6pACX0P7XCUcvfdr+N0ehgfH2ZyMvwLEoejlPvuezfUi/NtvQBSvPFrntmyss/j8w1w5MhW+vtP\nZdxTNd510i00NJ+8nqnMU26hfWli9B1miiYwe1GL62jFcbRrFTGVJrsaNRp6LfBc6LhYXximUt12\nJlmAGnE14gZuC/2szZ92j8UR2wUhO5BUW0EQ5gXZ4onMlnEIiYn0TBYVLaWo6EpstgJWr/45ra0P\nZ+wNnG4rkBdfXEtHx2G9P2dkCxe/f5j29gMptXbZvt2tR28VxU5eXjl1da+EtXMxoig27r//fb3y\nbjoYU2Gt1nwmJkax24vZvPlk0vNNN402FeaT13Mm52luGECNyJnpO4zXBkS7Vj9qJND4fA1TabnZ\nSDmq8OwOPbYCbwBXEj5/2j2KnzMbyfZU25qaGlpbW7GFikAuXryY06dPR+0nHk9BEC57ssUTmS3j\nEBLj8w2wa9dyxsa68HiqsVic9PSovi6zPtD7fAM888wqCgoWYbcXR/kLY3kPm5sb6O8/RW/vG5SU\nXMvISAelpcs4d+6YLmBBLYKk9d50Oj1YLFYmJvxUVFzPmjV7aG19hIGBM3R3v0wwGECtkhkMuz/j\nHIDqB928+a24IjFyvNo1tMdHjmzVhbaiWDl/vjXl+cykX+flhMyTRiJvaDIxG/l8A1O+yGxm6ndY\nZSmq8JwJf6wwE2S78Lz99tv55je/yZ/8yZ8k3E88nsJlhfgShFhk4onMZE1FejrFm5kbOJ1u7rvv\ntJ666XCoqWlmpGNq68npdIelxUamnMbyHqpi8TgTE6P09b3O2FgXfX2/Jz+/kuLiz3DwYB1Hjmxl\n9eptrF2rptS63csYHe3G7++no0Nt8aKdOxgMYLXmsXDhl6PuT5sDr3cjRUVeyso+R0vLQ3FTOCPH\nG/k4PBW2LK35zKRfpxGzUlGzqY8uTK0ps+YpOzH20Uz22kV6Q43HQuI2IMY2IQ2hn7NddFoJF502\n1F6eifyxiedTPksJsZhpYRyZMC4IgnDZ8Y9vvcXfDQ5SYLPRWFsbJRobmps5MzAQ8/nn29roGh0F\n4NtNTTy7bt2sjj1byYVKolpRG1A/0E8nHVO7z8HBs7hcXuz2Ymy27+nPJ/IXDg6qvQLt9mJuuumx\niN6bFmASq7UQn09tFt/RcUSvPtvS0oDD4ebcuRbGxqYq0JaXr2T16m0cObJVv64xShp5f1r/TmMK\nZ0tLQ8wIZeS97Nnz+bDxZzKfxmMj5zadNaSJ4UT3kQqaVxugoaWF3WvWTOs8ZhNrnsxhJqvLpoom\nJrXxJLrPSG9oXZJjY7VPaQSeR+3Fma0UovbfXAK0GrYXMSUmS4nt40xnPoWsINNfQxN+jf/qr/6K\nv/zLv2TZsmX89Kc/5bbbbkt+UBpIqq0gCFlJPLGXSATGe17bdnZwEK/LRbHdHnZszb59+ofM+qoq\ndq9ZE3aewUCA492qt6YyP5/T996rH1u2Ywf9frW66JVFRfgnJvBNTHC9x8OetWsv28inUcg4nRVU\nVFRnrQBNF6Mg+tfea/loLIgDP9/lf1PAaFhqqeYvtFrzw3plOp1unnvuVrq7p9J7R0Z6ovpn5uUt\nYGysB4+nGofDTWfnYTyeakpLr43q1VlQsIj6+nf09iuJhF+kqDOmyRqjacb9Ir2vkeM3QxAZr+f3\nD6ad/qylojqdHkpKluFwRKc4p8L892pHfkI1Crd65kakxPNmQvR4tW1aumyyY3cQ3XfTgxo1zObP\nqG7gj4C3UNu8aCxArcBbCpxAFdORaHPiAZYxJbZz/z04V0maaltDZr+GGR7/6quvct111+FwOHjy\nySf5sz/7M958802qqqrC9pNUW0EQ5h3xqsMat6965pmodLhYx2nb2kdGON7dHXXOgpCRvtrjYdvq\n1VHnOTs41W6ia3Q07NjrPWqz8UKrlUG/n67RUfr9fg53dl7WVW216JjNVoTPd35GWllkklaZybHG\nFNOPfXbeYxnv8Hn+i29ERTa1CNXQUFtUWq3dHp7eq82Z3V6ib9+06VU9tVJLrb3zzkMMDbWFic7y\n8pW66DReN57oPHt2d4I02aljjPfa2vpw2Dkjx28GxutpEeF0zq/dR0nJMnp6Yqc4p0JjbS31VVXz\nVHRCdKpq8uqyxt+Zo0cfnIHquo2on5YjhWOs8WrpsjeHfn4VVWjFEp27iRadCmrV27kUnZH3GOvz\n+gDqPY8ZtpWg3m898AGxRSdMzacVtS/pAeBb0x+uMPNkWuQ5w+NvvPFGCgsLsdvt/PEf/zFf+tKX\n2L9//zQGEh8RnkLOIb6E+UmkpyqWGIRwkbiooCBKZGrPF9ls9Pt8YefSKHU4ws75PZst6kOm8Tqv\n1NVRmZ8fczx71q7F43RyaWKCgVDkE2BFWVnYfmbMyVwxnXFoAmDBgpuBmWllkUl/xkyONaacXll5\nAwCryor5G+8wd955iN/+9s2Ex2jzECn2tMebN79FX1U9P7/zEPe5vFSHxJ5RTE61fHGn3CPUeO/G\nPqVaBDOWUE2UKjwTfkPj9TZteiXt82v3kalnd7a92sm+CDH/717kJ9REok8l7AuXj/fPQG/UR1Cj\neFuZSiON15NTows1VfYCcDLG2OOl0mZDlDPydS6JeKwJ0ULDz6XA14AHUft0JkIT537DtilxK5+l\nspDkv4Yze/wsIMJTEISsIDJSGS/iYNxebFf7HRrFYGNtLR6nk+HxcQ53dOjnyrdaAbApCk0bNoSd\ns8jhiPqQabyO1+Xi9L33xhyP2+nkhooKAFaWl7OksJBypxNPSKiaNSfLd+9OKPpmUqROpzepJgCM\nUTqz02wz6c+YybFGwbX7jjupr6riyIZN1K2Ln9IZS6RFij3tscvl5ddrdnPY6Y5bNkQ735YtH/K1\nr704rb6Wxj6l8YSPcdw/aD0Ztsa08ba2PmJa9Cs/vwKnswKHw43DURI3apuMXCvCk8kXIdMj8hOq\nseBObIy/Mx7PCv1n875QioxqGrdF9uTU0HreFgAvhY5bCJQBawmPFEZ+5E2YETgHXIp4HDRsv4B6\n/xtQ5ydRUaFIrg/9vxLYnvkwhZkj+a/hjB1/8eJFDh48yNjYGOPj4zzxxBO0tLTw1a9+dZqDiY14\nPAVByAqm46mK17ok1rlu3buX4z09wJSPM1WS+UoHfD5WPf00iwoKODUwoHs+66uqcDsc+rEV+fm0\nDQ3FPU+8OdGIHHeYD9Xvj7q/ZONOlWz1u2XSn9Hs3o6ZFlOKPH5TSHTGcqxlSqx7T6U/ZCwvdKrH\npsr861OZGpm1SZmdwkDGdQOxi1VlRiyfZiLvJkAbcCuq6Pwp6qduY4RT80LagMnQv2zAguq97Elx\n/xLgI8K9uA7gBuJ7N7V1YUctRrQ9xj7CbJLN7VR6e3tZv349f/jDH7BarSxfvpwf//jH1NbWRu2b\nicdTqtoKgjCrxBNDjbW1afW/NJ4nkljnKnY4gOhU2VTGF6/CpXHfRQUFuvAzXqfu4EH9WKfFgm9S\n/eCTSgXcxtpalu/eTdfoaMxxG8dVmZcXdX9mVeZM97WZLTKp8JnKsemIyUyrqUYe/98cbm4bOMNy\nWwH5tY2Q5MN9OmONde+pRIDjpb9nEj2OxMxz5RLTraqsMjvVSyPXjXlfChgFUh3hAqmR+D05teM+\njyrMzhAuOhXgfOjncZPGahZfBl5JY/8bUe9fS5EuBa5B9W5C7NfduC48qCnMUlxIiI3H4+HVV1+d\n8etIqq2Qc4gvIbeJl7aZyFMVK4001nm0/bYeORIlkhIVCzGuqcjzNjQ3c7KvD4Ayh4NjnZ2U7djB\n2hde4FR/f1QBohVlZdR5vfp1jB/W8w0iOZXvPB9pbeXTxcVU5ufzVIwKuWE+1E2b4vpUPU4nncPD\n007DNb422eI7nQ1STX80tkEpL1/J5OQfJ9w3VlpqpOAaHThDadcxulJMvcw0VTOV1NR4v0NmprXm\nWoqsWSQqBgXJ/u5lWpFkrtEE0mFU8Wmcg8jcQWNvylPELpCkESQ7vJyxOA7Ee/+0od5fsWGbdm9a\nivQHqOnEMPW6R/bt1I4pQk1VDk/Nlc9SwlwgEU9BEGaMWNHDWFGTRC1QItuZLN+1i9P33ZewEi3A\noscfZ1VFhd465ZHWVnpGRth65EjCtNMwsXbpEofb2/XUWUVR6BlTPUOHOzv1gkMepxOvywWKwt51\n69SfQxijhfWHDnG4s5MVZWXsqKlJOn/GHqE/fPnlqAhpZCQyMhJrt1rZ6PXSOzqqR2Mz7Uk4nSiq\nWSm/s02q0beBgTP4/WoD+qKiK3E4ihLuGysyGhnxSjfyl2mkMJUIsHGNpXusmeMQIkkUFcwFEgln\nYxpxBfAcU1HNyhjHLUctOFQNvBbjWjayI/oZWWW3HNXHaWyPshZVjK9AbQcDU0Icol/3yMi39nx/\n6Dy5+sWEMJ8Qj6cgCDNGLE9YLF9mrP2M2yrz83UBBqrQW+HxUGizsaOmRj9PpCdSo76qip6RkZj+\ntEi08XVeuqSLXVAFrtvh4HCn2kttRVkZe9et4/bnn+fC2BiD4+Nxz20UgpFjbmhu5vm2NrX3Z0UF\newxRX2OP0I1eL3sTpOYm8nsO+/2meTSn4/eM5w1Mh0w9lNMhVR9oOv68VPdN14Nqhmc1kzmei9fH\nbF/t5RRhnXuMgvLnwMPEFs41TImpCqZSZzWBZjzus8A51IJCdwH7yA6RCWqCoZUpwWlFFYKtwBdQ\nxxo5BwOk94WC5octQm0zsyd0XLrnEWaKbPZ4poP08RQEISuJFZWMlVIba7+odiYhD2ORzUavz8fh\njg4cVmtUOq22n1bxVotcvtPfrx+vtVmJhTY+7fhyp5NyhwO3w8Evb7uNOq+XjV4vRzds4KcnTtDn\n8ySAV+QAACAASURBVOmiM7JNi4YWJTSOWUtZfeqDD6Z6f3Z0cPXOnXoaq9YjNJUIaay+o9p8mtmT\ncDrniucNTIfZr/qZPP1RI5300FT3TfXaxv0dDjcHD9ZNu7rsXLWnmS7Ga+7ceU3a9z0XY85+ItM1\nZwpjBduHiV+K0xgN/WLoZ2NU0HhcFzCI2j7kWbJHdK5FjWYaP48XAi6migVF3gukX6K0EdXLOYwa\n4dTWdKalUgXBPCTVVsg5mpqaqEkhTVGYe2IVpdEic2eHhvAWFlLscFDicFDhdOIOFQAyHptvtfLg\n0aN8rqyMm+12vU1KpIjRzvu58nJustn4n7fcwsOtrWGRSwvox3/xqadY6nJRYLPxPZuNu+64I+bY\nO4eHOd7Tw+HOTh5ubQ1Ldz0zMMDFwFTK1BNf+UpMMZYsLVijMCSqNX/pnrVrUy7qY7zGU2vX8nBr\nK9tWr+aR1ta4RZimQ7x0y0SYUZwom4vORKaHJnqPmslU0kwLHM1Vexoj6UQhtWvabEX4fOd1AZnq\nfWfzmorErL97yed3dgoVpe5LNaaTamPKR43o+VBbhWiRPWNrlQDR6azTwYIqwl+I87xWNTeSItQ2\nKM2oVXcJjVvrqTmIKg4row+dNm7UKrdaFeDEa1o+SwlzgaTaCjmHvFnmNsa0Sw2P00lvKAIZmYoZ\nmaa5bfXqmCImXjqnMTX0/YsXGQgJxXKnkwuha942NETTX/wFn921i66REcYmJii22xkPBrEoChd8\nPpwWC5PBIEHgS5WV7L3jDrYeORKW2msFbq2sjPIyxkov1sa1oqyMRYWFOCyWMFGdbnQyXmsZM9Jc\ns4FEqaTZljI5V+9RqabxxpuveHOcyvymkuqbynnSaaeiXXNsrJ/OzsNptyIxu6XOTGLWmko+v8na\nl5hFvPTPVFrD1DAljkEttrObqdYqt4Yem9U6xYNanCcSK3AWeDBiPEa0scGUZ9MFDMXZJ5J0W+Wk\nnlYrn6VmH0m1FeEpCMIsowmuErudi4GA6p10OuMKLm3/IpuNm6+4gkUFBTF7YUbut2fNGh5pbeVU\nXx9nBwd5ZdMmvtvczOGODlaWl9M9MkLn6CjFdjsnN2/G63Lh3r49LIKpoQBWRWHc8F5W5/VS7HDw\nn++9p2+zMPVRR+vhqfs3PR72rF3LzXv30jUygs1i4aYFC8KipPHEoxnznaqYzcVCQJdr78dIjELq\nB0533I+r6c5XuvvHE5jJztPc3MAHHzyF399PeflK7rrrN7Pmb71cSP7lhFl+wOn2Fq1hSsTFE2Sa\nOAa1sutypnpZPkJ0L89MsKPeQ+T5lNC1J1E9mu8DHRH7lKJWn53ybDawijMsoIATNOLHnVTg15B8\nPoRcQYSneDwFQZhlNI/gW5s3617BPWvWxPUNNtbW4nE69Wjgk++/H7MdS0V+PjZF0fdraGnhzMAA\nx3t66Bob4+HWVv06v7nrLpYWq6XqBwMBlu3aRdmOHYyMx/YEBSFMdAI0nzvHzrNnw7ZpolNLqT0z\nMDDl3+zspKGlha6RES4GAlwI+VS3Hjmi+01j+V8zbV+SriczXrubbCaXUiZnEqMv1Oigi3Qvpjtf\nQ0PqOrfbS7jppseS7h/PO5nsupHVgdPxt6bjh72cSe4xTtS+JB3PZ6IVqBHr3Mkq3NagptCuBzai\nOsaOh67zbcJ7edpRo4vTQUvbDRAtOhcBt6D6NvtR7zNWRHQQ+AxqJBbAzRmu5BitHMBPA4tJHlU2\no1XObPl2BSE5IjyFnEN6T+U2mrjyuly6yErUw9PtdHJDRYX+OBASgJq404TZ821tujjUivyE9dC0\nWqk7eJDhUJXYtiE11ckK+E6fpt/vJxAM4rRYuL68POl99Pn9+CfDU7lcdjvrlyzh2tJS6g4e1Asa\ngVogaNvq1dgt6tuuBfBPTnKgvZ2rd+5kyX/9F7c+91yUwMxUCCaa21iYUQhotsm23o+pvEfN9EfB\nRB9X052vwkIvAIHARVpbH066fzyBmey6xuNqanYkvc7lhFl/96JFerKVmIqAjEUqginWubU+lbEE\nmbHf5+9Q/ZLGL+N+DbwV+tkOrCLc95kMLVCzErgt9LM1Yp+VwDuE99gsI7yQkXbNCVRxugxtbgtC\n46jGwza8wFYSvwMkmg8jiV7H2K+hfJYS5gIRnoIgZDUNzc0MBgI4QoJtZXk5VxYW4rRY2HrkCPva\n2jjW1aW3HSl1ODhxzz24nU4q8vPxhITt2YsXdQG36umnGQztPxFxva8tWcKCUH/OVLEp6geWodA4\n24aGONbVRa/Px6KCAr0Krtvp5LW772ZxYSGrFy7Uj+31+WgfGeF4d/eUEH3iCW7du1cXr5p4ziT6\nmQpmVsCdLcyKeM1mXGC6H+dTJdHH1XTny+FQP2SnGiGNJzCTXTfbvkC4PEi2EqcbcYu3Ao2/ZZpA\nM547UQVWbSxFqG1VDgDG96gxpn5zi1FblfQBi1EjlLEwCkstq+VK1KimhymB+TnUCrS/CY2tMfR4\nI2qqr/aXpIQp0arhQ5vbRhqpp55DLMOtR2qvJv67TqoVaRO9jmZETYXLhZ07d7J8+XKKior4zGc+\nw0svvWTq+cXjKQhCVhHpMVy+e7few3NRQQHv1NdTd/CgXjDHrih6FNSCWjRoPBjkeo+H0YkJvaJt\nZV4eXWNjVHs8OC0Wvc+l2+HQxd/K8nJ+c9ddACzftYuusTF9XEU2G8MxUnEVpj6uACxwOvmCx8Ph\njg48TifLSkoodjioyM8P86YCNLS0cKi9nQG/Xz++0Grl0kS4HF5cWMjbmzeH3XesQkHZVmQn16hh\n9txUs1XCxQw0D6XVms/QUFvY+pI1l+skW4lm94CsYeq3rA5VfCY7dwPwPDCKKjSXh85RDTyFWuG2\nK3SuAKqYLEZNg60GrkUtAvQuagQyiCrGalArzx4L7T/I1DxobU5AFa5vJxijNodam5cS4AHgCKro\njDW3xp6bw6FtmbzrJHodpY9ntpDtHs9Dhw7xp3/6p+zevZsbb7yRc+fOEQwGWbRoUdh+4vEUBGHe\nEJla6jOIsPFQaqvWp7LYbufGBQv05yeB8z6f7qk09rOsWbRI7ek5MsKr59Um5FbgKwsXsqykhDyr\nFYfFwuefeoq7Dhzgc+XlrF+yhCWh6Gq8N8vIt94en49jnZ2sX7IEi6JwvKeHA+3tvBCKzGr3paXA\nVhvSiAHyQqmuJaE+otUeD29v3ozb6UyaBit9CTNjNuMCqSbQZQNapHJoqC1qfcmay3WSrcRMekAm\n83BuT/HcZ1CF5UXU1iWnUYXhIdT+l6dD97AqtP8EqujUPJRtqD7QXuBroe2ngBeBvaFjTxI+D8Zx\nJhKdMDWHH4TG4w6du4v4c6sdc7PhOpm86yR6HaWPp5Aajz76KI8++ig33ngjAAsXLowSnZkiwlPI\nOcSXML+JFFfXG4RZz9gYDS0teF1qwYjBQID3Ll5kQV4eoApRjRVlZbxSV6enjZ4bGaHX56NzZESP\nkE4A+z7+mOMtLYxNTNB6/jztly6pfTs7Oii026lyufBNTjIYp/BQLALBIEc6OsI8oFq01ON00jk8\nrKfL7omIWg4FAtR5vWHFl7SUV2Ma7COtrVFpt5pHbo/zh/zDpftZv38/Dx49OuPpufMFs8RgKu9R\nufhRMJZ383Iq7NTc3MC+fTXs378eny+zZOx0zzVzf/fMWomxRKYx/XMVU4WBNpLeb1mkP7MHNbr5\nCLAQqEIVlu8a9lnJlGDUisAVAz8DPkEViDB1/17C5yGdd4N4c5hobrXn9qRxnemMIT7yWSr7aG5o\nYF9NDfvXr8c3kP57TCbHT0xM8Prrr9PT08PVV1/NkiVL+MEPfsCYIfPLDER4CoKQVUR6DPesWUNl\nyHOpidG24WF9/56xMW654grqq6o4uXkzdV6v7qnUChjdvHcvL4VSVItCwhbUiGdktVoNh8XCsc5O\nPVU35j5K/IwS3+QkYwax6p+cxGGxMD45qUdBtchnucFL6Z+cxG618tMTJ+gZGYlb9dYYGb5m507W\n79/P9at3UFVVj8+9mpbuXg60t3Pg449zrkrtXHGyuYFv7KvhZROERTLMFDGxMd+xGsuDORu+zJmf\nq9QwRnd37rw6o/HMv0hxLI+hMWqopbQeRjUopLNWGlHFqpbdokUH/ws1qtgPdDK1ziuZ8mLClMgc\nBD6L2ucz2e/FbH01lItfQQkzxcCZM3QdO0b7gQO0NKT/vpDJ8d3d3QQCAZ5++mleeukl3nzzTU6c\nOMFPfvKTtMeRCBGeQs4hDY/nB/HahERWYXU7nZy+994wMeotLNT3L3U42F5Tg9vh4Oa9ezl27hyX\nDL04G5qbef/iRb30Q5HdrotTlxYhXbYsanzjk5P0jI3pwjRSYlqBP6qsxB5HfHqcTr2Crba/f3KS\ngdDYtCq3AK/ffTfO0L42ReHQJ5+w++xZXTBeHRKWxnnSIsNFNhvnfT4OtLfzg9aTrFmzmyK7GgGu\n9nj4YqhC70xUqc201Uu2YZYYSOU9amDgDI91LeGH7ctZvftnMzB/5pcvilUcaDZamWSLSNOiuzZb\nET5fb0bjSTdSnP1/92IlqmtRw2tRi/xopOtxc6OmxL5LeHQwuueyyirChZyxAu0YpFTUZ/6T/Wvq\n8sNWEHpfqK5m9bb0M0gyOT4/9AX/D37wA6644grKy8v58z//c/bv35/2OBIhwlMQhDkhnTYhRjHa\n0NzMqVAKiRX4QkhYRfbM/OLTT1Ozbx9PffCBLjqtwCt1dTy7bh17162jwJCaG8lkxOMg6OLQqihM\nAMfOnWPt4sUsLiykZcMGFhcWcrfXi8fp5ILPx1Ao4mlTFL0qr8ZVLpcurr0uF13f/CaeUGGkgUCA\niwbx3BsSlvcePqxvq8jPp8Lp1M9rFJbGqPGetWtnrEptLvb8TMRspo3abAV0s4D3WMbvRj0zMH/Z\nVckyk6il8XWxWvPnLPqpRXevuOJmfTzTXSfzr4JvrNRULZrXxlS7kRJgxzSvERkd1ASlhfB+nY4Y\nY6s0XB/Uoj69qAJ0OdHiM52MAemTKZhDbWMjVfX13HnoEE53+u8LmRxfWlrK4sWL075mukhVWyHn\naGpqkm/q5gHr9+/nQHs71R5PWqKoZt8+vbKrxtL/n723D2/ivNNGb1lf/rZsy8QhBgU3hKYfCU7c\n0ha81tZOKSbUboKSJu1F0rO1djdtt/tuN+w53bNnu233fa/T9Lq63Z7Tbjh9NyRN/YKTNIEU3BQT\n/FGSOk1DIF+NuyTQGjDGIGHjD9mY3/lj5hk9Gs1IM9JIlsxzc+nCmo9nnueZkTT33L/f/SstRXhu\nTimpAsS7zTptNmxZuRI9IyOYW1iAx+3G9SUl+N2FC8A772iqnjxYmZaHhoYQmp1F7+nTcX0PDgyg\n+/jxGNIIALVFRTg/O6vkljptNtx7ww0xLrcetxsrfvYzjExNAZBupdTk111QgNkvfSluHpjrrVli\nuXv3+zE9PYqCAifuvPMVlJX5lHVqd+FjQ1+Ncy9N9RzmKph7a1PTjrTIgJHvqEgkjKbuH+G3M15l\n/rTmOHXklpPl3r1+jI5KLqH19QG0thp37+TPy/PPd6TcjlWw6joxg/z+3VO7vvoSb24YJyGFzf4a\nwJcghfE2IDbMFpA+B29ByvV8HsB3IIXn9nLbqB1l/TDucW1m29xBfl9T+Ylcd7X953/+Z/T09GDf\nvn1wOBz4zGc+g09+8pP4l3/5l5jt0nG1dSRaKSAgIGA1GKFx2u1o9/mw0+83RViKOdfXi/Pzkro4\nOxtn/sN/JfJlWGZkl9zzkQguGAxvdNhs6ONyRsORCIKDg9jR1KSosMPhMI5duBBHOovtdvymowM3\n7NqlLFtWVIQ9J04o2960ezfevuce+EpKFOKpJp0A8PJnPxs3D+mQvunpUczPXwQA7N27AZ///J+U\ndUzNBKSyL1+YHlZu+AcHg2ht7UZXS0vMPKihJq+5TkxZ2Gi2jvWru78eM38spBSIznHqkNShXDkH\n6ajJ/HnJBTMjq66TIIIYxjCKUYwudMGTAw8IMoMuZOYhiA+SURAgmfSojxGEFHJ+DBLRBCTS2Q3p\nwcxNkHJEtaICzEQM5FZ0gYBAqvinf/onjI+P48Ybb0RhYSHuuece/OM//qOlxxCKp4CAQFbBK3Va\ntSjVUN84f+3FF9Hzxz/iA5WVqHS7cW52VjEAsgMokOt68ornNYWFIADnZmeVZWpFlEeVy4Wpy5cR\n4Vxp230+PLtxY1x/tg8NYec77yhqJoPDZsNlIqnkS02NUlP05qoqjE5PY0zlFBeor8dLZ88qxPOD\nHg9WV1Tg6zffjNv378dQR4cSVgwgjvxqzZWaZKjX79lVh0hkHHZ7Me6++60YxVOtZr548LMYGemB\n19toODzQ7LnOZaRD4NjtbzGkW3C9mdu/v830HCdDrpwDq1TCxVAbMwU//OiXlbIAAujOE6Usf+BH\nVIkE4mtcakUFsE+rE0AJpLDgZNdZutEFRr8hjG4nkKvIdcXTKITiKSAgkJPQullPVotSDbXyNjY9\njXORCPpHRxGor1dKqNhtNiwQYYEIdSUluMjlWJ7VsAP3ut04F4koyimP8NxcnOI4cOYM2vbvx8Tc\nHA6PjSn9+cXJk3GkE4i65U7Mz6P39GksKyyEr7QUZQ4H3lIprWwu7ujpUYjnDRUVeGbjRgDAzF/8\nRcz2/Lzy749duKCEGwcHB+NIhnou/+edr2Dv3g04cM0j+Omhoyh2vKmcJ7Wa2dLSZfqG3+y5zmWo\n584MgWM2P4B066hHL1KZ42TIlXNglUqYTVU60yiWlbJGNGLHoiplmSY0VrQf28bAwHYDYelMiVwL\n4HpIdUP57VjOKA/+0xow2FetdszA6DeE0e0EBHIXwlxIIO8gak/lD7TMZ9TlUpJBfeN8fGICgFSz\n8+F165T2/vzaa5XtXt+6NaaGpvqLrqG6Gr+9804E6utxdOtWOP7wB2WdQ8elNjQ3h56REYV0Mlfa\nuYUFze3VGJudxclLl3B4bEwhpaUOB9pWrFDmotzlUsZQ4XLpOsby83pTdzfeunAB/aOjCumskOdG\nDfXclZX58PnP/wknpi/HnSe1u3Aq7qVmz3UuwwyBU39HGQ3Ey4RD7FI6B0sNXehCAAEcwIGkYbaZ\n/d1L7ICcfjkbKxyWY9sw5nTMDI8OAXgGxkjkYoTNGj2mke2MGx2JeymBxYAgngICAhmD1s26mtAk\nA3Nv9cikzFcmuRdOzM/joaEhpb0nb78dq0pL4S4owH0HD+JDVVVKG7x6aYNEwD6xZw/6T5/Gjbt3\n4zJHUi8TaeZXqnHswgXUPP44ktHOKpfaYVEKCQaAS5cvo/fUKVyUCWNXSwtWlZXBbbfjuZMnFTJ4\n689/HkNCi7lapKMzMwqhLLFLLV+U50YN9dwx6JEqM+VStLY1e65zGekQODOl6K1GqueAkY3v7m/D\n7ZFw3vh15krNTyPwwINudOdAbmdiQpN+ORsryFxsG8ZyfVOpkbkYn1ajxzSynfVllAQErITI8RQQ\nEMgYtPIQjUIrfDRQX49LsvKoZarD57PVFhVhdGYG5U4nJubnUepw4JLKgCgTYLmdDO0+H4bOnsWo\nHO5rB1DucsU48BYVFOCjy5bh+MQEJubnMcGF/jZUV6PY4VDyWGvcbtzi9eLY+fMYm51Fo9eLp26/\nXXLbjUTQe+oUGr1eXJybw9jMDJwFBXjlzjvhKyvTdaHVO09m8gNzJZdQwBrwLrSv1Afw/7V254Vf\nZzruuVcvEucopp97bIXDcmwbkcj9GBzsQVPTLXC7n0yj3aUG5iCszmcVyAWIHE+heAoICGQQ6She\nLJyUEbRGrxdFdjsm5uZQW1iIp26/Pa5dXrn7TUcHAvX1OLZ1KwL19fjYsmUx25Y6MpPi/ufLl6NI\nVh6dNhtGp6bwoepqtK1YAXdBARaAGNIJQKoJOjqKkenpGNIJSPMwJIf3ljgcOCeTy49fc42i8G7Y\nswcDZ87gt2NjWFZYiBvKy/HuxAQuzs9jPBLBhr17AeirdnrnyUx4KdvW63bj9NSUIZVUIHfBFKWQ\ntxFPNO3IG7/OXHC9XSykrvYmVgbN1xxVh3smat9oaGhsG273SbS2noPb3Quh7PFYzPgKAYHkEIqn\nQN5B1J66OsDUuYbqalyIRFBXUoK3QiGFtBXZ7bjV60W506kY4oQjEdz6859jeXExyp1O1BQVKbUy\nf9zUhBt37cKc/H10+3XXoffUKcnZ1kAdTyP4QEUFGpctw7PvvRdX3qXa7cbFubkYNRSQyKmWOREg\nhelqGR1Vu914v8cTMx88vG43xmXSZwdw/N57lTBbI1BK3hQUoNTpxKMGSt4w1fT01JSizl6tyudS\n+I5i7rE3N+3AV9weU1rVwEDQwnqk5pCK620+lDUxck3ljtrrR+ZrYAplL10she+pfINQPIWrrYCA\nQI6Cd1XteP55JYyTgZUnAYA1u3fjnXvuwfahIVyYncV7k5MApLDUczIB+8jPfx5D4EocDlQ4nQir\nFMZUUVtUhMMdHVj+xBNKrVAe5zn1rwBS3qmroADuggLM64QAT12+rJtvysYOQAknBqTQ3OrCQvSe\nOgWnzaaE2ZoB7+AaqK/H9qGhpKVEmGratn8/gFiV1EgpksUkKwLx4N1jzdIXa+uRmkMqrrfDGFbK\nmgQRzNuyJrmj9maqBibvbPtjAA/B+tqgySBKmggIpAMRaiuQdxBP6K4O8OGfLIyz2u1WnpbZuW3H\nZmcRHBzEcydPKqVRKpxO3CLXvSyVQ1SZ2thQXY1ylytKOtNUO20A3r77bmwfGtIknWowMrlw5YpS\n8oX1k2FtVRWucE8UPXLZGK/bjQV5+YcrK9Hu8+HY1q1o9/nQ4fPhhTvuwJOtrQjU12Ns27aY2p9G\noQ6x1XIn1oNWOK+R/dM3MEkPVprSsO+ofDK6sRKJCJBxz03rEEQQfvjRhjaENY6aO2VN9GHkd898\nSGymYCbc08y2vHHOhwD8CsBqACfT6axJLB3zHnEvJbAYEMRTQEAg58HIjMNmA6NpFTIRA4AyhwMP\nr1uHCEf67HJZlA6fT8nvLLHbsaywEM986lM4KauiPHgyW66TA1pss6FtxYqYZZUuF+7o6cFT775r\neEwFQIwj7u3XXYc3AgF0+Hxo9/lwaMsWlHB9KHO54HW7cZkIYTm8tnj6GIILP8C3XvkNwpGIMv7t\nQ0MYm57GfQcPKnmWZhxq1eTRTK6nVr6okf0XW62xjPgGg4DfD7S1ITz+VrTNbwWzy7YWEYkI0GLc\ntjNFswc9CGoc1UxZk1xGJsrxpAYzbrJmtuXV0QIAFwGMA9iA7D3SyGa5lcV4TCMgkFmIHE+BvIPI\nS8gv8GGWfM6lXshlonb+8/e/V8ia02ZDicOhqJZetxsEKaSVd7AN1NdjR1MTan/6U0TksikdPh9e\nOXcOI9PTUmOqHM+bystxXWkpek+fjutHAYCm2lq8eu4cJg3W8DSClSUlmLtyBZGFBdxWU4PlxcXY\ne/IkwnNzuLmqCmUOh1JDFAAKMYfr8Qd4cBG/wcch6a5A24oVmJqfj3OY5V1na9xuNNbUxJyDROGw\n6bgTG90/ldw8K5G+c6cMvx99/f3wA9j/r7UY8Y7Ce74Rm799AO4Zj7k0tiWIxcjMa0MbetCDRjTm\nLbm8un739MJZeWfb1QDGMTBgRzjcCIdjGC0tIUhfL5n8kFnh0GsUfqSW/2oMV9c1lRvI5RzP0tJS\n2Lg65jMzM3jwwQfx7//+73HbihxPAQGBnMVzJ09idGYGAFDtcuG8rNYFBwcV4xkjOYDD4XCMQjhP\nhGmZXJY6HIqZTl1JCd5fUYHe06cVh9X7Dh7EHFers+/MmYR9/v3EBMpdLk3jnytAXL6pHZJ6yXI3\nU8HU5ctKHmjvqVOocbsVZfOtCxdQLtcDXVtVhT9NTeF8BPg9PogyzICRTgB47fx53CLXMOUVRqY6\nsrBjFvbKzgGf18kvB6IqZqowsn8quXlWoqWlyxriWywrIo2NaPnSUxg89hCa9u2QSGe+WMNaAD3q\n0IXs3bazPjixB+3oxE78W16SzqsPTBcHpLPIvhc83N+vANiAcPg6jI5KNYkHB4HW1kx/yPg+ZBrZ\nVFcFrnZcunRJ+Xtqagq1tbW4++67LT+OUDwFBAQsg5pAbh8ailEplxUWKrUn+RxAIzUgmcstj2q3\nGxtqa/Hbc+dwenoa5U4njm3digqXC7c+/TTOz87GucuqcXNVFd4OhXSdZY2gyuXCBQ13WQZXQUEM\n8dXcxmZTHHdtALR6U1dSgte3bsV9Bw+iZ2QEN7ou4rrq1Th0RlJCCwsK8Pt77kGFy6UojMwYyGm3\no8ThwNT8PHpPn445B+/fvRv/dfEiFgB8yOPBYHt7QmXTyIOCqxbhsBRuu2MH4JFJTjZFEg0shnGT\nH/FaTbZtWbT6IKCP3DH4Mq6LRyMVGrB580q43TsTbp8vGAgGER5+C47i42jp+g3cHt9id0nAAuSy\n4snjsccew7e//W3813/9l+Z6oXgKCAjkBNSq2dj0tEI6PS4XXv7sZ/HQ0FBcyKVWDqCa3DCXW75c\nx/lIBIdHRxXToIn5edy4ezeG77kHK0tL8Z78BM9hs8WVMWGoKynBssJCzbBaI3DYbLipshKHz55F\nid2OKY3wW5fNBp6Waimpc9x7rZ6udk3hmyVP4sWDP0OV629Q43ZjZfVN+ElzMx789a/x2vnzeLG9\nXXGw1VIyA/X12On3x4W9jnLn6cLcXFIiqT7PHpdLEFEGjwfoVlGcbIokGlgMl1ktrUZPx7IKauJU\nLBMnoRcZw2/Dz6FM/lwfHPwi2lqfWaSeGNfFLYtUyDGEh4cx2n8YADAYfAit6u8UgSWJdB/+WPXw\n6LHHHsO2bdtS2jcZhLmQQN6hr69vsbsgoAM1gWTvK10uvHbXXfCVlcUZzwCxZjbbh4bg37sXMEr3\nVgAAIABJREFUT737ruKEeuOuXbjv4EHsaGrCLz79aRTZJRsgO4DxSEQJSQWAuStXsGHv3phjr7/m\nGmV9md0ec2xXQQEK/vCHpGMrQNRZlsdlIrw6Pg4boEk6AeCSarndFv9AkPWKN00qtNlwXXExql0u\nFNFFjI0dxv8YqcYz7x3HuUgEvadP46GhIezbtAmnvvCFmLIpzEzozVAIQPScaJn/zMr9KwDQs2lT\n0rlIx/X2akCufUcthnGTlldppgMH1QZR6j4EB4Lw7/WjbX8bwnnmMpyNa2rCIYX6v+cFnmhaTFXG\nuOHQ4hoqZc78xyGH7HsbG9G0IzOf2Vz7nhJI3+TOCpO8kydPYmBgAPfff39K+yeDIJ4CAgKWQe2G\nyt6/e++9CWtJ8mSIkZiQTCbVOYketxu3yiVCGJ1rqK6GUyZzxXY7fv2ZzyjHri4sxJHxcYk4ulyw\nq4hnKBLBb8+di+vT8uJiLCssVN5fAXRrfi5cuaKpUlbJeZnqZc6C+K/eBUjq69GtW9G2YgWK7Hbc\n4vVi+vJlnJ+bw7H55XgCX8AFx/swTRI5rXS5dF1i2TyORyKoKymJIfVqZ9u1ck7oFQDfOXJEsz0g\nSmbnr1xBh8+XkuutQPZRVFQDt9ub1ZtzLepgpnBGKlATbHUfhsPD6B/tR89ID4KLULJnMZCslAyP\n11puwyv1wMDmtfiRe2fGerR0nFqfQ9Sj+QFLW27p6kJ9IIDNBw7A7Vk6Sq5AYqT7kNCKh4w//elP\n0dTUBJ8vM+HdIsdTQEBgUcBCaY9PTsJXUoKTly7BV1aGd8JhjEciWFtVhevLynBJzklkxPHVu+7C\nXw8OomdkBCV2O0qcTrz82c8CADbs3YsN11yDM9PTStjn9V1dSm1PPahDcddWVeHQli0AgJt278bo\n7KyyzobYUigFAApU+zttNthtNly+cgV8hul1xcWYnJuLyTtlDrylDgc+ds01eFIm4HzeKwDcVl2J\n/7P0GXzl3Cacmp6Bw2bD7+68U7dOJ8uJ5XM59XJptbbVgt7+6breZgq5k7O2uNi716+E2tbXBxbV\nxCmTSOaM3La/DT0jPWj0NuLA5gPwXAXXgx9+9MsBzgEE0J0gwDmMMIIIYgd2ZNCEyY/sZ95mKru4\nCkBI/rsDwGKFJgvkC5LleKbr7m6FO/yNN96Ib3zjG3jggQd0t0knx1MQTwEBgZQQDALDw5KJZ1dX\n1Ecl6X4y4Tx24YKiaqrBTHQ8bjfCkQhqHn9cIXZs3epduxQnW54EqcnRk6ramg3V1Tg1NYUxjkzW\nFBbiHPfeV1qK60tLcXxyEtcVFWFofFxZV2a3JyyjoudsawdQ6nQqJNhhs+FTdXX40YYNuGX347h4\nRVIx7/TV4emNbQoZbKiuxsrSUuz0++Fxu7Fhzx4lx1XPiAnQJoN6BJPflpkRaeVrGiWouQKjhGup\nE1TLSsXkOcKRMIKDQexo2nFVkE4gF0vJpFtQJxUS6UdmyO7tAHoBNAB4wWBfBK5m5Lq50IsvvohP\nfepTOHv2LEpKSnS3E+ZCAlcVRO2p3MDwMNAv/5YHg/F+Knrgy6sAUk7jxfl5lDudmJifR6nDgfd7\nPPjaiy/iVyMjiCwsKMVCWBitx+3GR2pqFBLEh3eysE+v243TnD04Q3VhIZ751Kfw0WeewdjsLNZW\nVeHs0aPAihUAJHLI18EcmZqK2Z+RTlZChY2hwGZDaG5Ot5zKAqCQzgqnE0e3blXCj9/nGMOrc9fB\nh/cwef4U/Hsv4w8XL6La7Ua1262QTgAol3NA2bjVynG5y6UQRjUpZQZNjIxqudMmKqui3j/XYTTs\nyGrzHfYdlcj9NxnZtZIMLyUDlnS0K4/bg+48VXuN/e7Fz04XurKgYppBugV1UrGoylR28ZNYVLvq\nNCHupQTUePzxx3HXXXclJJ3pQuR4CggIpASuXCHMeB9EOLXQXVCAgc98BoH6ehzbuhVetxuXLl9G\n76lT6PnjHzE6M4PQ3BzmiVBot+Otu+/Gvx45ouQZ+kpL4S4owH0HDyo5i10tLVhVWoq5hQUcHhuL\nO37vqVN4aGgI79xzDwL19Ti0ZQvGOCJ8aX4eFxOURgGAQrsdf37ddQAkc6L3V1QoNUWdNptiFGTX\n2f/Ply+PyXn9B+9ruA2v4NtVAzi2UI/+0VGcnpnBedlAqObxx1H4k5/gY888g3kitHP5lYwojkxN\n4fDYWEKDH4/bDY/LhY7nn0fb/v1468KFOFOgRPmaamMilvOpzhnNFbS0dKG+PpBU5cuU+U4i06Vk\nJhBWmEQwLK4Bi7VgtKMH0i1/MqgzCpdShmE84mfHAw+60Z0C6czUTBk3DtJGKiQyU9nF6Y5FQCC3\n8B//8R947LHHMnoMoXgK5B3EE7rcQFdXfLlCPfDKz83V1eg/cwYAELlyBd85ckRR1XgV0+NyKSVO\nGqqr8cIdd8Qpcl63WyGXK372M9htNjgLCvC+8vJoKRVIamOFy4Xw3BwavV4U2e3oeP55hWTZ1qwB\n5LARp82GIocD8/PzUt1LjTqgQx0dWFlaiuDgIPpPn44JxeXLpGgF5DZ6vXhUdQ0/X/IVzLtfxRNF\ndyBy6ULcPpeJcJkIQ7IJUm1RkbKOjYEpx8kMfvj5q5XNk7xuNwZOn0bVzp24uboa7T5fjMpqpC2m\njuZSjU9GuJLBakWQfUclIvHJyO5iONFaiUxl1ZmlHWp9bAyZLemSKah/97QVcSuVvUwXv0n1CklF\nMV3kekY5CnEvJbAYEIqngMASwGIoT6xcoZHcTl758bhcCnFS35DzrrhP3n472n0+dPh8CukEYm/m\nXbJDrdNmw/Tly7g4P4/xSASvnT8PQFIjFyDVxQzPzaG2qAgHNm/GycnJGCXKKxMwO4Cbq6owkcSM\naMsvf4mburvROzKCC6r5ZuVQtNTO5cXFmrmRxydncCxSiV+dGoVLdry9uaoK7T4fHBqlV0ZnZhQF\njc3Z0a1bYxyF9a6J4xMTAKSQ3ec3b0agvh5rKipwdnYWobk59J85A5fdbogwahGrfCytkilFUO3y\nzCOZGmtUreWRS+VCzCqTRmFWu1JTsePy+woAD1vYr+xBUiLD4ac0FHErlb3MFb+RnHZ3og39CJt2\nhBUqo4BAPkMQT4G8g6g9FY9cv9nnCcpOvx9v33235g05H8rpcbvx7MaNeGbjxpht+Jv5VXK46jyR\nkltpB/DyZz+LQH09PlJTE1PmpMBmiyn/wfJAJ994A4CkUL4qk1YboKl2AsDpqSklDJiZHhXb7VhW\nWKiEDm+49lrpmNx+H6mp0SxpwvpT6nCg4Gwlqv/kw7KfbsHOdRtjapDyGJueRjgSwfahIYxNT+Ov\nVbmXz508qVwTD/T1KUT0kkyqJ+bn0fqLX+DS3ByKHNHgl4bqasMlUbSIlSitEv2O0qqZypCM7KZC\nhnOpXEimaItZ2qGmYj4AXxgI4i/2+vGz/W2I5Ek9z+jvnkTpHQ7JTTVWEbeSlGWu+M0whtGPee6h\nREIvEoEMQdxLCSwGBPEUEEiAYBDw+4G2NiCcw/cnuX6zryYoiW7Ik4HflxntMNgAvHrXXfh/3nwT\nY9PTeIc7aYUFBXixvT2mP2sqKnB4bCyGYJLqfy3wdJQplNMLCxibncV3jhzBsfPnldqh7Eu21OHA\nDz7xCc2HBF0tLUp+62jFGZw/a0fvXjeCQeDZjRtjQmsZ+kdHERwc1H3owCuxg2fO4Kl330X/6KhS\ni5Svj1rqdGqqy8mgdR4TqXwCEjIVoVAsh+c2ehuxY5HDczNds9Mo1FSsHMCy8DDWjPbDa0H+rFUY\nGAhi714/9uuQ4e/he3I9zjcRBtDSshb19R0ZdCnOnLJYLD+WkB5KfBjAo5YfwzyWdvavgECuQJRT\nERBIAL8/6twaCBh3bs02crWOYqYRjkRwU3c3RmdmUOly4chdd8FXVhZTUqW2qAgFNhtebG+PMfQB\nouVB1lZV4Y1QKKYWp91mw4LGdxdzs61wOrG+tha/PnNGqcvptNlw7w034Gd/+INmfqdDru8ZuXIF\ndgAbamvx7MaN2D40hKfefRehuTmUhasx+c074P3LIaxpCqO80IF3Ll7Eu5OTMW1VOJ04cd99uO/g\nQc0SJ5WPPqqQTB7q+qj5UhplKUGvHmq6SKVciFamXabyM5P3JYhhDKMYxehCV0ZcWMMAfrS/Dd4c\nKy+TrPRPbD3OOnTjdeRruKlUL/SL2AGCBzuRG+PwI/v1RQWuNuR6ORWjEHU8BQQyhLY2oKdHcm49\ncMB4rUqB5EhmQmPUpMZMvcpE+wZ6e9F76hQKIJHOPRs34rO/+hUiV2ILpNx+3XXwuN3K8Woeewzj\nkQhsAG71evHuxIRufVItOGSCy74l3QUFKL1Qg4WaEMLzc8o2PCkuANB07bV49lOfkuZK46HD7b/4\nBXpPn0a5w4GJy5fj6oHqPawIDgzg5ZO/hX1hAh77AubLb0Wps9BSo6BUa8DmMsyYKuVSPVQ/4m+3\ntZZlpy88uQqgO0NHNlNkPVskPFmt1dyrx7nUkG590Vhk4yGKQP5BEE9BPAXyENmsPRUOG3duFTCH\nZKrPtT/9qVLvs8PnwzMbNxpum5GqIrsdJycnY8gATxBqiopwcnISM2+9he4vfxk37NqlEDxXQQH+\n7Npr4SoowMFTpxC5cgVlTide5+pvAsDJyUls2LsX1xUXK66zDOu83hjHW0AijXq1PrXQ6PXivclJ\nnJdDMvn6oYnUMjYHD69bh4eGhgyr4fx5KcUELqE86bHMIt1IArWj51eHji26ky4/b82Tk+j7+td1\nt82lCAWt221rb8GNIYggnsJTCCGEBjTgBbyQlZv1ZMTSj+yQ8GRk+Bd9v8Dj/sdzqB7nUgMrtmNN\nTc5sPURJB6KOZ/YhiKfI8RQQSAgzzq0C2tDLZ0uWl8rX+zT7Nc1yD9XutUCsEVPPH/+I/tFRvHzu\nHB4aGoKdc5Cdu3IFvadOocTpRGNNDQBgcn4eDw0NxRxr4/79mJybw6sywWyorka1ywUAGBofh1Nu\n0wbA43Jpkk69L2Lmgvu7O+9Esd2OMrtdIZ0Omw0Pr1uXdA58ZWWm8mkVoyNM4gqkcVS5XDg9NZVW\nTiJ/HTgrpDbM1oBlUNe4zAVzLf56/vsPfzjhtunkOFsNrVxMftn2LDlmD2MYIUiGOSuxMmvkKpn7\nrtUmSXqZhMnMpEpRmmI9TgFjsDanNZrH2ogdFrsCCwjkM9Imnjab7T9tNttZm832uhUdEhBIBvGE\nLreQzChFjxQkM6G5zesFIOUkVrhcMccwas6iRW75ZbdUV0t/r1+PHU1NWCu/Z/C4XNjR1KSYGGmR\n5NHpaVycn8c8EWwAqt1uNMh997rduLm6Gu6CArx21134+LJlAKIlVz7k8WB5cTE6fL64osoN1dV4\nMxCAx+2Gr6wMH6mpwSRHxi8TxZFgK9DV0oI7fXXwuRYwDanMzNTlyzh89ix6RkbwRZNOiOxcMXOj\nnpERlP7lIAKB1MPX1TUu2Tm9wXEeW2d/uChOpfz1fIccAp0LSGaZonW7zS/LFqnnb9R3YmfGjhN/\nXMjHjRJLfs5+DGtNklItM5Pq755UusSPNrQhLExzsoYudCGAQE6HRYt7KYHFgBWK56MAPm1BOwIC\nAnmIZDemespmMtXnydtvR6C+Hoe2bIlRLmsefxyPvvOO8n71rl26BFSL3Kprha4qLYW7oAAffuop\n/F5lXfyJa66R8jiLilDjdsMjK5k8nLKrbQEkZbb39GmUOp0I1NejwGbD78bHEblyBf/8yitKO2u9\nXrT7fBhsb8epL3wB5yMRxSm3wunUdJdl88jqelrlYKwm8R63G09vbMPKZR9SjsOXW0mmPqvbY9cH\ny3ttqK7Goy1NSiRBKg6v6hqXXS0tWO8+iS9f/jbCp/cuilNpLqmYPNKtp5kJx2yteqOJbtQz6Teq\nVnyDkEg3m7NGAJcsOA4bw5vye+urY2pDKl3Sjx70IGhpRVUjiD1zi0WCkzkGZwIeeIRCLZB3GBkZ\nwZYtW1BdXY1rr70WX/3qV7GwoGWVmDrSJp5ENAjI8TECAlmAqD2VW0h2Y5pqeQ3+Rp4dowCS0sfy\nMO0AxuWSIFpKnBYZUNcKXVlaisODgxiZmsJFzgV2bVUVfvbJTwKQ8jjPRSLoPX06jly/cuedqCsp\nwTK55Em504lCux1j09MxJU2Ia6f/zBm47Pa42peVLhc2rViBUCSC+w4ejCFibB7/63Ofs7RcCV/v\nc83u3cox+fPWyKnPO5M8JecfRGzZ/d/htseuX1laGtNvvQcXiQipOizR43bjGzVHUIwZVV3DxUEu\nfUelGyqayuc3GVHUqjea6EY9XfKcCEzd3S73+SkAF+V1dgDjFh2XjWEcQB3iFdRkc5bqNbW4IZ+x\nZy4bJFiL3KpD8wUk5NL3lEBu4G/+5m/g9Xpx5swZvPbaa+jv78ePfvQjS48hcjwFBATSQrIbUyuU\nIHaMSlUbBVxOJq/E6ZEWreWM9LHw17VVVejw+XBoy5Y4YqhFrn1lZfjT5z+P95VLJjwT8/PoPXUK\n/aOjCkEusdsxdfmyoo6q22Hje/fee3FmelohYjdxRNBszqZ6rHpzwufSjs3OKuSPHW/70BBmLl9G\nbWEhnt24Melx2Vz58B7umn0Yf+3YhdrCQmXcjLiy/rwZCmnOidkQT7UKKiAh3XqaqXx+k+ZNquqN\napEutmwFgKPysrXInErI+syeolcCqJH/rgDwcJrt8w8AtAqhZIpcL27IZ+xjj2yQYC1yqw7NF+Ah\n6pcKRPHmm2/innvugcvlwjXXXINPf/rTePPNN5PvaALqtKKM4IEHHsD1118PAPB4PFi7dq0SW86e\nuIj34r2Z9wy50p9cfX/HD36AkUuXsLyhAV0tLXjtpZcycjzmdmpm/+DAAF4eHITbbsfz/+2/weN2\nJ9ze43Jh2cmTOH/xIrBmDRq9Xhz/7W+l2pcf/CB+8IlPKNsPT0xIDqPvvIOOt99WHEZfHhzE0QsX\ngDVrEBwcxIMOBxbeeQc1N98Me0EB6J13UHD+PB79u7+L6U9XSwuCg4O4+Prr8H/ve3Hz2VVQgLdC\nIeCdd/C+8nKsamxE76lTKHv3XUxdvoypG29E76lTWB8Oo9lux7P33x833u7WVvT19WHmrbeAqioA\nwOjRo+g4d07pv5n5HQ6H0S9bxwZdLoxNT8e8Z8dbdeYMQnJu6w1nzmCb/F3N2nv58GEclc2V7t+x\nA9+87TZ0FRRgOBzGzFtv4Z9uvVXJaezr68ODDgcmC0/hrtkfYGJ0BYqvvw9v33M7goOD2HblCl57\n6aW4/tXdeisObN4cc30WOxzAO+/gxooK7Lj//qTjdbs9cDgexEsvvZYznz+t998DcMnvRzGAB/v6\nUJqF43dnuP0uvx/DAGb6+vBPAIrl9Tf29WGbtEPs9i1dCA4Gse3KNrz20msY9vsl/8++PnQA6JPb\n62ff9/L+JX19+IKB+VP35w4D4ymWj38DgA/6/dgJoKmvD6MALvr9eEg+Xqrz1QWgo68Pfw/Ak+D4\nNwLYobHe7/encf67TffXmvcPApiG3/8sAA8e7HsQ05jGs/5n4YEnI8efwQzgl8jttr5t6EMfWlq6\nMDgYxJUr23L++yGb76VlL8PvPyr/3QHgmznTv6X6PhGCA0EMh4dR7ChGV0uX4XrMVu2/ceNGdHV1\nobm5GRcuXEBPTw++853vaG7b19eH1157DWE5RenEiROGjmFJORWbzXY9gOeIKM7KT5RTERBYPGSq\nUL0VMNs3fvu6khK8vnUr7vjlL3H47Nm4NvTqJGot59tl0OsPv+2q0lKsLC1FscOBifl5pR9Omw2f\nuOYaVLrdODczg8NjYwCkMNp37703qXIUjkRw0+7dGJ2djet/OrUi7zt4UHNOwpEIHujrgw3Ao35/\nXJusHa97Cmsq9qPc5cTE/Jdw+Oy47lwZqZOYrJalVsmR4MAAnjt5EpGFBdxWU4MnczCnMhn8yE55\njiCsqz/Jt1UD4KSqXT+iY6oF8BsADwEoUm27XadPiUq6MNgB/DmAGQCH5WXq+WP9PIaocml0jrWK\naWSzrIy1xTz0YOVVkZsIy7mkouyMUSxG8aSrF8nKqfj3+tE/KpfhqQ+gu9XcL0S6+1+4cAGtra14\n/fXXsbCwgAceeAD/+Z//GbedKKcicFXByFMjAQks7NE75cXp/7sJbW1SbdJcgFnTEn7717duhcft\n1nWbVYf/srDO+StX0OHzxRAdpqwlcq5lOD45CUAKy11WVKSEgh6fmFC2mSdC/+gonHY7jly4oCzf\nu3Ejtg8NJTXS8bjdePueezTDl82En6rnQC8k2uN249mNG/GMThgt229NxX4cHutFz0gPjk/8Xneu\n3r97N67pegb3ntqM0Tl7XHt6/dOaB3WI53A4jNGZGYTm5tB76tSilU5JhkTfUVaX59CDlaGbfFv/\nS6PdYm7bUUiksxsS6eS3fQJh5f1qXFEC+7qgXdKllmt3AUAvgOPyey+A04gNEFSHy5bDeIislruv\nVr9SgRFTnWTFPKz53ctktmxuQJj6GId0TVl1lQtYAXUaQjb3JyJs3LgRgUAA09PTGB8fx4ULF/AP\n//APpvuRCGkTT5vN9r8AvAjgRpvN9iebzfbF9LslICBgBRTSsH8zDve60dMDBDN0vxEMShFxK74x\ngA0/T+5S2tXSojjKqo109LZP5FDLq2I3dXejd2QEgQMHEI5EFAOd3tOnQUAMmelqaUHz8uVoW7Ei\nzrlWnRfpKykBAFycn8fL584BkBTO64qL4SqIfp2W2O0IRSKwc08E733hhRgjnwcS3Ejq5dWZIese\neSw3dXejaudOBA4cUNRDM06yrC/lLkbMG/Gbji/pkkZWXmY8EsGGvXtNjzERijl33YbqastcVrOJ\nbN3mpUtwg5CUzDYATm45s98qhUTwwogliV5I1KYKUi4j34c57pZjHAW4Sd5fi3RtB/A+1bFdkAio\nTT72YWgTYHaUCfnYibLX+HGqt7GqsuPiOsvyyNxjD2scaxOdDYHMwNr6pQLpoaulC4H6AA5sPmA6\nTDbd/cfHx/G73/0OX/nKV+B0OlFVVYUHHngA+/fvN92PRLAk1DbhAUSorYDAoqOtDejpARobU6+d\nmAx+P9DfD+Dv9gJrjIXQZiIUWB06G6ivR+/IiFLOo8PnwzMbNxrqi3rZpbk5qQ6lw4FLly/HtbG8\nuBiRhQWcl8mcDZLpUbHdjrfuvhsNTz+dtB+AfkitVvip2bnwuFzoPn5ccfA1Ou/hSBjBwSB2NO1I\n+INW89hjGI9ElDH7ysqStm0UycKC9TAwEEQ4PAyHoxgtLV15bT5kNFgy1dBNrXDVlQDOQCKdHkjl\nRdjVz0JZ2fFOIxoKC0gOrsxMx4tXcR63xhyvBhINqgHwKwARALchNqQWkEinG8Ckqr+VAKoBnIMU\njgsALM7ADomo8v1Uw4/Mhz63oQ096EEjGhe5rmPmAnr98KNfnskAAuhOaSb9yE4guoDA4iBZqO1i\ngohQV1eHr33ta/j617+OyclJfPGLX0RJSQmeeOKJmG1FqK2AgEBCdHUBgUB6pDOZSnb8EwPA3+2F\nfaW2S6kWMlEjkFfF1lZVYUdTE26TzXEaqqvxKGeskKwv6mVMYf3YNdco+5VxIbpvBgL4aE2Nso4g\nfcm+1NEBX1mZoX4A+iG1TMXseP75mPOgd2605mI4HFZIZ6XLZXjePW4Pulu7kz5FZeVlrCadUh8S\nhwXrYSmVUzAaLJmqjqEOV22E5CzLlE47oqSzElHdjB3vJNfWhxDr4Po7vA/FGIND9qAuhUQYewDs\nhxSmG0JsSG0DgHa5DTXp9AA4IrdxERLhnOL6toEbA+snr6ndD4lgA8Ycc1PV46xwlrWmFmXm1C1r\nHGuzFYguICCghs1mw89//nM899xz8Hq9WL16NdxuN77//e9behxBPAXyDiLH0zw8HqC7Oz2lM1l+\noa8xDKwZxUJRBHUlJYbq/tUUFcWFt6aLrpYWdPh8aOdKojzZ2opAfT1euOMOzT7d8YMfYGJ+HrVF\nRXjq9tt1Q3m3f9WNse+0Ajta0bbchw6fD5tXrIBXrgnK9mHlQwDgCoDvHDkCAEn7wZCIkGudB71z\nozUXfM3QI3fdZbk5DysvYzXpTAfZLqeQye+oTN+aHx8YAPbuRdn+/VgRicAN4B3umA3y35WQSJ+6\nFuUE9/4GROtjtgGoQAXKsQyXIT0QvyRv9yFIxI+hDMAnIKmg1QB2Ikp8AWAZAB8kFbQBwLS8vBjA\ny5C0sncBPItoWDNfp5PPV2UE+3qNsfghke4Ncv/fQmoZklbkHQ4Ovqz78MSaMNf0YE3ZFpFvmE2I\neykBNdatW4fBwUGEQiGcO3cOu3btQg33MN0KZKWcioCAQP4jmTpZXhhdb7TY/MnJSZyLRNB7+jSC\ng4OWhNp63O64EFaWT8iDD2f90+Qk3pBdaR8aGlK2Ve83PCyHE8ONVbcUYWVjGMcuXIgxu+lubcXb\n99wT40zL5otXLFkY7fahIQyHwzg+MQFfWRnKnU78uKkJDw0NaYbUJlJmSx0OhCIRhCMR6Vgac8FK\nw+iF65pxzs0XsHIKiVx28wVdsDZYUh266wuHMTI6ikkAhYODOCxf/3UAPgBJiWTOtT5VO92IEs9K\nAI8C6EA0ePJWSOqkGhcADEIiquchKZuD8ra98n6MpJZCIpf3c+0yPA/gZkQDNIMAxgDcB+B38t88\nGJllobor5DGVy+Ngob4j8v8sjzUR6TcSCp2Kt6zdLn0OtR6esBxSqe1gimGuZnoZv46R6/TAFFkB\nAYGlCpHjKSAgYAjJ8gvN5h8CyUtqWA1Gqo5PTmIiEsGEnKdZW1SE0ZmZpP3gc2Xd/8deHB6P5k/y\n+wYHBvBWKITjExP4jRxmy6DOGx2bnjZczgXQnudwJILVu3ZhXA6zTSdfVi/vNhiUiHfnNT18AAAg\nAElEQVRxsRS6nYk8YYHsw4/YrLpL3GfSs3kzet1updDCTZDCYQGJUD4D7ZxQJ4A/QCJxrFhDKaLk\nkUcxJCXxXwE8BmBOXs7yM9cCKEFsvqcbUiQBr4Kytj4CiRz75HZZn1i+NSAppXcPBLEsPIw5RzEe\na+nCpOqBRK081gpIYbyNkNTSh5CY9PuRPEvRyDZqJCpRlJkc0kS9TLRucRBEEMMYRjGK0YUu4Wor\nkJPI5RxPMxA5ngICArow42Cqub/sVnvfZ93Y0ajvQpqKS6le2ZNU+5oMLCR1ZGpKIZ2VLhd+09GR\nsLSHUo7lr/aj/d4IDhyIKrxrq6riSrQMh8M4fPYsRmdm8NDQUExbasWSvTdSzgXQnmeP242PyOEw\n6ebL6inbTO3NpDPyUkC++XKqQ3f5z+STbjcCkJROnnQCURKnzgmtBHAXJEWyDcCPIRFFLdIJSGZC\nHwOwG1HSCURNgV5HfGhWBPGks0DuYz8khfIw16dSxN7stAKoCw9jzWg/PjzSg8/JoavMnKgBkqIb\nAHAU0eBPH5JnSBoJhU4lXNrt9qC1tRtDQ9vjcj2tCXM108vcy8XMHedgAQGBRBDEUyDvIPISzMFM\n7UfN/TNIONQkKt2+JgMjVRUyybMDcNvtuOMHP8CluTnd/Vi/ekdH4PrfBuHxRG/QD23ZgmdUNTr/\nINf1rHA68fC6dTFtsf0+UFmJjuefxzwRfKWluMnjicsxNQOj5WkSkfvgwIBmrisgKZ2ApPbuyI17\nzZzEMID+vr6crJSoJsUsJ7MWkprnAbDd7cZYayvuk889MwziSacTwDhiS600QHK//QCkkFeWC7kG\nElHUw4Lc9kSC9YMAEj5Ch6SAHtFYboNEehmRXQvgZwA+Luf9vudtxM+bdsDBbbMSUZJphGzyMJKl\nmEomI/vd0zLKykztykS9zJ1cTJbf+ibeBKBtbpQLObC5CHEvJbAYEMRTQGCJI13n2GwSjuOTkm+l\nFmEzAz1yxUjf0a1b4XW7pZvemRm8EQolJLtac5iINEcWpFvYi/PzcYon2+/k5KREZk+dwtT8PIbO\nndNUSOPGJivQbW1AmLuH8rjdWFlaisNjYwnHwvdz9a5dMXOkp9QGg8DEBFBbCzz1lAiz5aG+1qys\nn2n1LbLaEXcYkjI4CimEVGsbIKpvARLN8CBaQ9MFYBWAU4iWUuHDW62IW7iCqMKqhwpVP/n+AlF3\n3EOQjIZ+2NKFN+oD+H83H8B5t0dx6m2U2/IjtXNgxDc2HW/Z7BllJepl7tR+ZErnOMZRhzpN1Tdf\n1VBBmAWWIgTxFMg7+BOUoRCIhzqc1fT+cimWD/z3AXQMZC4MFgB8JSUAtAmbGTz32yi5+uLB2HIk\n3a2t8JWVKaGpFU4nsGZNQmJuZA55ctpQXa38rdcmv/1arzfp9gyJFGi+zaKnmzQJKm9ENB6JxJDU\nRGG2hw8Do6PAQw9BgINape8CEPD7U9aCjJZLSQVqUqx+H0S0vEgVgAH5/2lI5kLV8rbjsvMt9u/H\n85EILkAy7lFXtk01k6k0hX0uIlpKhUcIQCEkYjwA4IOQwnp73R78sLVbye30QCKmByApvJk6B6mC\n/e61tHShvj6AzZsP5L1RFo9USRZfxuV1vK6p+lpT6iX9vppFpgmzuJcSWAwIcyEBgTxHtkxf9Exn\nrIQRsyEjrqtV/7wfoetGgPe8aD+5Gc926ZshPbxuna6DrBnwpj8Akhotmd2egTc4Utdl5dvs2OiW\nHXilBwfd3bHbhCIR9J46FTPXauMiNtdvHnVg/HgRSq+fxMdudeDJjUvD7dYKWG2Qxcx4mKGPlR9n\nFl7LzHHY+yJIZIs3CHIglkjy5jzYu1d6CgEA9fWAxd8FyxDvQJsMMf0zCVYahrn0ZvIcLDYGBoII\nh4fhcBSjpaUrZ8irH37FmTeAgGGH3DDCCCKIHdihG2psZJts9NUsMmMaJbCYEOZCgngK5CH6+vrE\nkzoOfj80CYbVyIYDrRFnXCME+Pb2CHqvGcTaN5twaJ87KRnPp2sqHJYeNuzYkfghQyKCCsTPtRah\n5+caBCXRrsPniyvTcrVC65pN53pSk0OjYKGzxyGZ9MwDuA3AckikMlHpDj/iS5PwKIAU7qpg/35g\nZATweoHNmwELvwtskGp4HtZZH9eXNFEHycCInxc1ITdT9sQKaBUyseo7au9eP0ZHpbNdXx9Aa+vi\nO9IC2SFZVjnfZosQahFmK9178+l3b6lAEE8RaisgkPfIVg5muiG7RmDEGddIzuqTj7kRCLcaIp35\nBo9HeriQbFwsRFqLdALGjJ3YXAOIcXex+mcz027GmUQqbs4J20Nq2XMsRHcEkloYglQDczeiYaMP\naOzHh9d+GJIDrRpxRK+lRVI6LSadgHRt6ZFOzb4YAH+jUw5JUQWksU4AWA2JYDKwc7BYIbeZDLfO\nXo6oOWTGmTcWRkJXjYTRZqOvgLZplJnwW5EjKpCLEIqngECew6gClktIJzw4lXqhAsmhpWiHIxHc\n1N2N0ZkZlDmdmJyfx9qqKhzassXSuc9GGPdSBVPH3oTkNFuOqENsFaTcR+bWympv8vAjqnZ2ILY0\nihFUAbhgttMyqgGcT3HfVOCEVOrlT5CU4SkAk/K6Onk5j8UKuc3kcRPVAzUKI6pbLtbVNKJUZiuM\nNlWYUVtzfSxXI3Jd8Xz77bfx5S9/Ga+++ipqamrw8MMPo6OjI247EWorICCQV8hUeLCR/M+rCWbm\nQ4/QW50Lq4VMhnFrhS0m3SdPrqMgJGXuovy+DsCvAfwtJOVwHFH10APgPcSPX01yApCUUjuihFUP\ntZAUSLP5mJmGOj+VwQ7JuIjNlwtSWHIxgLcQzfFk14wTQAmAnchunmeq4dbZghFCs5ikR4/0Gsn1\nzPW8SjP5qrk+lqsRuUw8L1++jA984AN48MEH8bWvfQ19fX3YsmULjhw5gtWrV8dsK4inwFWFxcxL\nyJcb0lSQ7tjM7J8s/zBVpKKcBYPAyy/3YflyvyXmTGbmIdG26ZyP4MAAnjt5EudmZhTykMtKohbp\nteqz5kdU0QsAhm5/01Vgs/Ud5Ud0bJUA3kUsUWGkUm2ew4PPZ/wVogpkCRKXErHJLyvzLdOBHUAZ\nJDJ5AMBnEBs+q0YFgF8AuBcSWefnxg/9a4aRmuP4B/hwO8rhQBekEi1mH3CYQS7l4xkhNItJetIh\nvVYbES0G2DXqhBMlKMFO7NQcSy5dU1cLcpl4vvHGG/j4xz+OyclJZdnGjRuxbt06fOtb34rZVuR4\nCghkCVp5cEsF/Nhu/f6gZikOo/snm5tk+YepIpWapcPDwNGj2uVJUsFzJ08q83DL008nzF1MNGda\n64zmQg6HwxjlSGely5VSDddsQStP0qrPWip1NdOtfZstsLExYqn+KHVBIk7vAvhXaNem5PMZRyGZ\nEs0jef1KQmZIZ8I7FhXs3N8FkPo8BuD/AnB9kvYvAvh3AJsAfAxSyPB1ADYA+I28TTmAh1X7sxy7\nERThMBwxNVHVeZmsJusKud1M1GZdDBjJccxkHuTAQBB79/qxf38bIpH4GU2nfIpWXmW+gV2jveiF\nCy7dsXwP3xM5oDmGdP0OrPZLuHLlCt5444202+HhSL6JgEBuIZNP6JLlHubLDalR8ON1/lV0bO4n\nm5RQ2GDQWCismblhBjnpgil7kYUF3Ob14ifNzYbCQXk1zVnRAsBvmTlTZCEaoDg1P68oZ8HBwTjl\nLNGcaa1jZEyvPfW+AOBxuXDkrrvyTp3XmxuzSmgXzIctdrW0pJVHbPV3lDpcmKlrTki1J3dCe2yM\nVAJRYgQAtwJYKbdXA4l0vmlpj1PH5weCWBYexpyjGP+zpQszCfIQ+VBgngTbECXlgERK/wySey1T\ndB2QSOXHIBFuQMptPc3tNyGvfxvR+WWkphxOTCD6QOM+eT3/gIOf8xH5fxYebRaZ+N1LJQwdiJKz\ndLdJFeHwsOLMOzgYjHPm7UJXTqmWeqG/mcqDNUq8L/kvKcpwEEGRA5oDMPobn4n916xZg2XLluHh\nhx/G3/7t3+LQoUMYGBjAJz/5SVN9SAaheAoIcBgelnIP9dQvI86uwSBMqYVmt7cS/HhLdkXHVu6U\nxmaUjAWDwMT3W1B7qh5Pbcic660aTNkLzc2h9/RpPDQ0ZMhhlFfTSv9y0FL19baaGgBAQ3U1Gqqr\nAeiT8a6WFqwqK4Pbbsd9Bw/GPKFUX2vBIHDsdxIZa6hMTO67WlrQ7vOhw+fDe/feC19ZWfoDyzL0\nPmtmldBUXGKtdqpNB0FIxJJ3pmWEphdSaKne2Jji1gbgD/Iy5urK2ntc/nscUrhtJcypjkk7zzpg\n8LttWXgYa0b78eGRHnxhsNPwodgcVAM4B0m1vQ4SOW+CZKr0UW77ywAeAsBrAuxxTTm3bBSxzrJM\nyTuGDyGAqPkPU5d5MyBGfp1cu2oFdTFh1j03V1xSkznz5ppqqedEa8ah1gyMqs3pKMMCmUG64kY6\n+zudTjz77LPYt28frr32Wnz/+9/H3Xffjbq6OtP9SAgiyuhLOoSAgHU4dOhQxtretIkIIGpsJAqF\nUmujuVlqAyAKBKzf3gqsWUNUUUHkdMaOt7NT6k9rK1FHh/E5WIwxEBFt2reP8MgjhEceobVPPkmh\n2VlT+zU+/TSFZmctvaZCs7MUOHCAQrOzMX/roXnPHmUMgQMH9LdrJkLRLKHzALXfa2yci4nO/n5q\n3rOHNu3bZ/i8GIH63OUirLyemin2R7WDiDbJfzcSEfuIdsrbbuKW8fs6ub9dlPzHu8DANklfzdHv\nBQSM7fOVfZvokUdA//vTjVQ0GzJ8LJc8xnKd9W3yvNSq5q6Vm5/b5PVHNbZLBSEiChDROq4fqX49\nGr2mtK4DPWhdR4nQTM0E+V8g5ZGkhk7qpFqqpUqqpE2zzbTvQAfNzsb3upM6qZmaaRNtolDKZ858\n3xIdcxNtIhCokRpj1ustzxaeO/QcBSiwKMe+WpGMExm5Z8jk/mp8/OMfpx07dsQt1xuHvDwxL0y2\nQbovQTwFzIARn02b9ElPJolnKCQRp1RJJ5F58mp2eyNzlAwVFdEbwsLCaDupEkgrCHsqCM3OUscv\nf0ntv/xl0i9angidmJiI+XLe/G//lhGSpHX8uieeoPXPPKMcyyiRWqw5ThXJCHWqxNTqH9ZMwMrv\nKEYOQEQfJokgMELDXwbN3HYBjX3NEMsGIjpBREXcstVEVJVgH82XfM2ikQghY/sUzYao80DAFOks\n0VhWqnrvlOfjBDd3nUS0jOIJK1uvnmMz6O/vpD17mmnfvk30GXks6ZBYo9dUMxknuUbG2N/ZSXua\nm2nfpk30mVBr1oiSmszxpDcR8V0McpzsmCEKaRI8veXZQibvpQS0keuc6NixYzQzM0NTU1P08MMP\nU319Pc3NzcVtlw7xFK62AjmFTJXZyCbM1tU0u70Vc1RTA4yPS7mdb70F+GRLx1TdZtOtJZpOXU+j\nSORUmo06kvwxGAL19djR1GQonzDf6rUmK5EiancaQxjAFyGZ+eyEflitVu3HMICbIIWLNgA4Bcl8\npxGSMc+QTltVANZBqs/JtukA8AqiuYqGO5+huiCrAcwCmIY0N6yWaDGkkik3IZpfyWMVovmtE4iW\nm2EwUzszUY7k3r1+JQ+xrj6Ana3dWSmPYnUN0L1+P0blH5y6QAd2djtjciczlaeodqa9hEvoQQ8A\noAENeAEv5IybrihbImAUuexqCwDbt2/HT37yE8zPz+PP/uzP8MMf/hD19fVx26XjaivMhQRyCsVy\nUoxVRi9WwwhBSmaco9WGGfJoxRy98gqwYQPw619HSScg9UeL3AwEgwgPD8NRXIyWri645ZV682GW\nSLJcU7ZvJh44mDXyydTxK5xOXJyfV47F8gmTwSpDJjNIp6RJMoOepWbUlQqMmLt4IOUnJgMzUSqC\nRBKZcdD75PXPQCohwnggb4YzD+Ao19YFSOSlVn7vALBvIIgr4WHAUQy0dAEq058KROtjxnQ+Q9fs\nJIA1iCeX0/LrEwC8kHJXSwFcgjRWN7dPLbffhwHUQyL3Rkuj6Bk2dSE2D7GlaQfazA8xJfOfVMy0\nEsEh/+B4GxvRsuNRtKlaZXmKUn9TM6jRIq9a+YcP4AHYYMOjeDSG3PH7/xg/xkN4KGvGQgMDQXwp\nPIENjlp8qeUpeFSfi1SJeaL9MkX2BQS++93v4rvf/W5mD5JMEk33hRyXlQVyC0ZCXQ3nuqQZkqre\nv7MzNkQ11VxGo+Gsev1PNkdWhOKqsae5mR4B6BGADnCd5seyalX0uOvXm5unbISRbnvhBfLu3Emt\nv/hFXJjmc88/n/HwTRYiqg7zzWUYzT9NBfkQMpsqMhEWaRR8m16N9jtJCqG1ycvXUzTPkX9VkhSW\nWsOW7WkmPALpdSCQ2RsHAy87Nwb1y8uNq4Niw2v5vMYT8vp2Sh62rEanPEcgKTR5vWqf2dkQHTgQ\n0MxD5NtoJv18TL4fzYsUFjkbCtGBQIBmdb6YrchT1ApVNROGyu9fS7VZDV3ds6eZHnkE9MgjoP9x\nYFVcrmeyMFy9/NBE+1kVTixCbbOPpcKJ9MYBA6G2QvEUyClYqeqkq6Kp9x8bAy7Kj/QrK1NXG5Mp\nlkwtPHYMCIXi+59sjsyM26iixT/1buI6zY/F7Y4et7Y28RjV0FNarcTJyUmMRyLoPXUqzma81OVC\nd4YLafPKZr6ElWZSlTSq9OYjvnf0KL45MZH0c5WsxqhRxYvfjjmoNsrb96rafwLADLfvYQBc0IOC\nmyGpmI2Q1E/ICh68jYCGk2i2saCxzAWgFZLyykJonQBehhRiex+AH0Nys2WKoJaabKT26zCk8iuA\npHTOqfZxuz1xZT602mCKabTMSvRsFmMPACcaAfx9wpYyB7fHg9YEPyJWlC7RUjfNlGMpVs4YMIpR\n3IpbsRIrs6II8sr2k03uOPVXzzmWqZYv4SXMyVfPA3gAz+LZmDFpOc7mixutUGYFNJGMmab7whJh\n9wL5h3RVNPX+7H1lJdGJE6n3S0ux5FVKXi1Mpf9mxm1U0dJ76s2PhT/uiRPpmzRZjXxwQ801LGVV\nMpMw+rlKZu7STMYUUX67Dq5NdfudFP8jXUX6TrBs33YiqpwNSUqnCdMfvZfX5PZOInJTcqfdFfJc\nuIkI/Z2SSrtvU0yfjehDRkx31I6wqZgR8W1sI6Z+vkQhqiAiUIjuT8vgKF9gVN3UUgc7qZPW03py\nkUtRXtfTeksUQSPglW0t9VdvbGqzJBCogzqU9Wy/bbQtbsyLbUpkFEaU2cVwIV5MLBVOpDcOCFdb\ngaWIRKGk/Lp0yY+aIOqFuFoR2sqHrNbWSv83NBC1t5tv04wzr5VkzApH4EziaiNRmQi5FjAGs58r\nPYdfo+Uu1NutIaIKkgge/4ysmWJ/oGsonnR6KEoO11M0DJQd40OUfqmVExRbYiTRS8uxFiSF2fLr\nKik23JUPDXbIocGJ5tFMGRKi9F1v1W00c30P0C6d3prtZXaRaRKhRWT4ZXVURyHSJoCZ6iPf3gk6\noUsI1cdlfSyjMgKBGqhBcz9+fF7y5hVBMxKGfbWR06XCiQTxFFhSSHbDfMsth3TzB9X5k9m4+bai\nhuViqYWMjG37q1nNedKbv6VGapZirsti1Va1ApmqAZotJMoZ1qy3qaOQbiOJALZSYpqhJkGSXia9\n3BRV09RKo149z+UUS+K88rIquS/JSKNe7iXkNowS1zqK5p8yglxBUk1Ovn9OksgsI8d2IknpfARU\n8XQjHZ0NJSWJzVx7i/FxiT48mKcQ3U/q3krfUc20uL1MjEyVMmHEw0veOCJjRmXMRB+NtqfejvUx\nEVnlx1dKpaYJWjLCls7vnhEyaESZtYqc5guWCicSxFNgSSHZDfNHP3pIN5RUHWaq15aVxKmuTmq/\noiL1ENxU1EIrSaHePJldnq/IBvHMNplKJ9Q8o301INpk0tQoG0h0PTVTPHXQU0i1ttWCekrVBJN/\nz0hhMRHVkvYPdztFiZC6HiYS7Gflq5QkMrmN/oZq6AVqpm3UQRHlkmH9YyZIJI9dMUOaDdHyAwEK\nceY+iS49I+pyJvXGZAqqdE0Z1cAlZFspStVoKFk/tVRNBrNhp6yPXvLSelqf9twYHXOqc8PG10rJ\na6iqCRr/3k1uqqRKaqVWZX/+e8rstVJLtUrbfIiwWVhFTvMFS4UTCeIpsGTQ2SnlULJQU60b5lBI\nclBdvz654yu7+fZ6Y7dXh7amQz7NOrjyY02H/FpJCvVIitnlVoLNT12d9rnOZWidW6vJVDJyyH8W\nzBLJjBK/ZkrKpqwKAefHrafqZxta1EEvDNwozeCJYDtJRMxNRI90Er3STHRwE1FFSFILB0lSEk+Q\nKjRVfn2AYnMW1Y635RR1ia3Q2N/Kl5eIKuklAlUoN9OM/LUSkY+IlpFEPpkqzBNmtVLczLXNu/yy\n9tqTzLPW/gzZCYI1F+CbbaUo1dxDvX4mUjrT7WO6eaCsb63USh3Uodsvre06qZNqqTaOCKr30cvr\n1COJalLN5o1XS/XGq3UOEpHRSqpUtm+ndtPzZwb5ktNqBJDKDi+Jl974SBBPgXwCT5raE3yXGSVX\n7OZbTQ4ZcbJCtUuVhKWrGlpJCvUUV7PL04GarPHzk+ghQS6G/WqdW6vNjcyQQ7NEku/rthdesEb9\nZHfmTH5LwKasysflx13zjQM5odKboQ5Gt2VlPUCkaA8hInqjObpiVyCeMG2i+B/t5Rp94NcvI+lU\nGlU97RQlgmZuHopj3u9SSAc3pDhll82VVhkZfrz8pdess60WEj0I4NtZRbmRiWm1UpQpBVWvn4mU\nTjN91iJ56c5NqiG26mVa+ydrW289I2jLaJmyfjktV9RSEGgtrY0ZbyJyn6gfrM0SKtEkzwJXJwTx\nFMg7GCFNhw4dMk2u1NuHQlETn3RVu1RJGOtTaSlRa6t1JkK5bvKjBzVZY/NTXp74IYEVYb9Wh9pq\nXZ9Wmht19vdT5aOPEh55hNY++WTSNs2SXr6vlqmfzRT9ZaijrNyR8+Nu/cxsxlV6hmznDC8naVqd\nJOVfKoRHZkpHGiXFk6mVRBJ5XE8SkXRSlOydoHj1jpFHkJRf2UzxP/YOjWXLKaqOnlCts9EsNdM2\naqMILdPYl4XMNtA8tdP9tI1m455b8GosPzY9gqhF5M0Er4ZIIpW86ZJWO+qanlYglWvKaqUoEwoq\nc6WtpVo6EWOFZX2NUL7f6c5Nsr5pETrmUMuW8USQJ/VaYbXJ1vNQq5HbaBtVUzUto2Uxc3zo0KGE\n5D7RGEMUihnHKoqvYZoqlpKZ0NUGQTwF8g4x4YE6StahQ4dMkyut7a3Mq0wFoZAUAsyTJr79bdsy\nq+Rl+lhac5Vo/rQeDgQCUt5soocEVoT9Wk0UMkn+OzuJKr4VJYMdv/xl8v6kQXotU2rNpafpwkzY\nMD/ubD6QySTx1Arp1AqZDRApTGtjKJ4INXPbekgKzT2qsS7AvS8hiey1EsWUKymYDSnmP4ykrqX4\n0xxLTs8RaB+10/0UIkmpdXLr2yiWJPJ9Ys8tQnK/2XIWJJOuqpzoxpfvRy23H9+O3qXe2d9JzXua\nadO+TTG5p0aQCwZomVBQK+Qwai0yawVxZn0G6TvHpoJkfdMidPyy5bSc2qldU11cSSuphmpilER+\nfTu1Jzw2I6aM1Oo9MDh06FBScmnE+MjqEjZLyUzoaoMgngJ5DSuULKthdZ/UxkR8+2pSajX4Yzkc\n1h9La64SzV8iYpDquqWI5mYifEUig5XfzXxNUsuUWjNsIAHy3XgoHXRSbF6lYk4kv2fr1IRHTYQ6\nKRqey5ckYURKnSd5gmKdaJ1EMeVKquRyJSBJqewg7dMcDc+9rGzfQRFlPVMwtUirHpnTCjNOF4lu\nfNXhyTz5ZNC71Gu5Oes4kCM/aiaQSQW1kipTbncNraEKqiA3uWkdrYvLjWyn9pg8zGwoalqETs/Y\nqJM6FZWygRpiSFwN1VAd1ZGHPIbIs5aCbIRcbqNtVERFZCc7VVN1nPqsFbLMXw9WPpRYSmZCVxsE\n8RTIa2TDwMYsrO6TXu5pY6MUfssfy+pcRj7Ul/WhslK77TVrJHLs9Rp37tWaq1w8p/mGTZuIUDRL\nldsP0Imz+VdqJF1YnSubVZhwoNEsu0LRH9dKbjkLAV1HEhFSf0TVRIjPz1TnSbL9a7hlAYoNtwWR\nUq6k8ulGWj4bilmnNu5hY1Arsw00HzMNfD/V++qRueX0JoGICmiKmmnO8G2qun3+fSttTXCjHp/f\napRCVspzhqcbqd2k4rkUwQhGJVXS5+hzKZNBXjXlCZtWW2qVdRWtSmj0YwTJzIDYNowQrqN1MQ82\n1GqmVgkVfr3WMdl7PszWTFixOiS5juoSrs+EOp2JtgSyC0E8BfIaekqWkZAjK0iaVhtWq2t64aXq\nv4ni1dB0CShzB/Z4KEZ15cHmgFdE6+o0m9Ns34rwZiuRKHx7sZAwdFSDfSz2HC42rMyVzRQObT6k\nTTCbyTBb0dpUq4SI2aY7KT5nU4tIsWN5SSKMcbU5Z0PkPBCgdbMh8nDLeUKs7lOd/HeZfNxEl7DR\n8Syj/QSKxGxrhN+r2+ffd1Ak4Y0vTz55FTnZMVtnQ4QDAVo7a/6WOhdCba0AT5j4GpbphFeyXMMC\nKogjbImMeyqpMkZdNHJsLZKpV1qE35Y/Dtte7T7LHnSoS6gwoszWa4Uoq4lhIzXSalpNFVRBXvIq\nCibfp+cOPaf0lQ9JLqIi3XxbPoTX7DwZhcjxzF8I4imwqMiE22hnJ9EttxxK2GZnp0Si1CGd6v4k\n6x/LKwSIOkzGcBkdu1ESwZeZKSmJH1uq4Mms1hj59QBRcXHqtUpzAXqhvmkV0k6z5mXC0NFmMi+r\n5BhSmZ9s1zxNB1p9PXTLIe3zZiLPVbPsCulHKxttupmiXfNQVE1UEyl2LK380ZGHiMoAACAASURB\nVOUk5VOq19nl9tnx1X0yYrpjwvyYiIgq6Sg3ngUKkf7HJlbVjG3fbAqy+lzoHTPRPmawqA/HEhAB\nsyRBTTD1XFXNtHuCTlAd1dFROhpD2LQUa15lPUEnEuaA8n1gxkBaiqLazIft5yKXsryQChUSyfrJ\nk1E3uWPIHq+Qsu218j0d5IgZRwM1KLmjPDllCmYM8T4UDW8OUYjaqI2W0/I40snWd1AHraSVhuqf\npvMgQeR45i8E8RRYVGQiR9NIm+rcRUaU1PtqtcUTRp68Jirtkmo/9aBZA5Jrb9kySilcVasupjqc\nVw2myN58M9Hy5eZIZy6WOclEqG+6OYcJQ0ctMuRZTKQyP2b3yU4NRW1o9tWMraoOkm1qNBRVDS3V\nlDncaoXpMpWSKZ5a7rFriaiaoj/8Xnk/deivVq4pPwaiWALnovhanGq00pxCOvWOw8Aru2rzonRI\nYaJj5iPU5yURETBLEtT5e1omPKm0yyNRqCaf09hMUn3NNmrTrMXJ96GGapS/Wf9ZG1VURUwJ3Ebb\nNEN/WY4mPx6e9IKiYb8ucpGNbMpyJzljyGAxFcfsx8ZaSqVUTuVKrquTnAQCFVOxEsrMO9GCQD7y\npfXggEj74YNenqaRBwp8qLEo1ZJfEMRTwDDSJQla+6dyk5+sH0ba1KvRqS5fokW6tAje2rXax0rk\nCsv306xjrBZp5dv73OeIamrMl2BRq5eMUCdSXNMJ68zEg4d0kYkw1XRzDhOGjqZ7N5wDSGV+zO7T\nTNEfHe1LLQE1TZO1avY1C+etmZKNWRtaXUvUlpbi2UHxZJU3JFJvz9pMpBJqGRsZGZ/WeEJEVEpn\nqZyOkZdephMUJqJYIyKTzxKTIh8/qnqXfjPFzn0isxezRjBqUqi3v5F2UwnJTJQLqQbfB94p1kc+\nWk/rY9oopuK4ZexfBVVoqrAhCilht2pnWPU/plh2UqcSUsz+HZX9qBnR5P8xJZUnjDypraZq5W+9\nkijJSrlokVE98q+neKvzY3mCLFTP/IEgngKGkS5J0NrfTBgpI2Zqsx01QiGi5uZDScNXtcpvhEJE\nbne0/ba2+NItzEm2sVFS99T95/vKiClAVF0d22+WP7l+fTRE1ujcataA5OaSn+tVq4yTWtYuU3L1\nyLtVSmU+GQmlE8aWDzmHhpEB6TCV+TG7T3K1qZl0aUyCVUawbXaWag4coNbZWeXY/PVk1ZSq27FS\nYePb2qY6DlM8+Vc7xU8bI16tqm35nE+94yZqJ9XxVdARpd06OkxEiV1zGbKlnps9jjWhtrFHbSbt\nS199bRlREFNVpfT2N2uIo2cmxKBFOJMRW74PvFKqVjS1SCMjdw5y0FE6SttoG3nJG6fgrabV5CAH\nVVN1TK6o+l8zNccpxOyfi1xxKij710ZtRBRLolkb7zv0vhgiqVcShe9XG7XFnRczDx8SKd78MYWz\nbX5CEE8Bw0iXJKSzP0+kEtVrZND7AeYJkxZpJIolgcuWRUknH1ZbV6d/bC3VUC/8Vb0tv05PLd22\nTSKrtbXaYa18rmdDQzxRT0Qa+bqYeg8E9PJj9eY5lfzVXAzBXSrGHWmjmdIiYYuF5GpTApqWJoNr\npvgp468nrfWpQN1OojEnIzVsfR1JqmUrSWQypHEcXvF8pJPot81Ec5uItoa0yeoJig1p1TJCYghR\nfG4pwzaSnHWThdrqwUsvE4iomN5QFE8jqmQzZecjYPY41nxHxR7VgojwjCKZoqnl/qqnjqkJG58L\naaYP6vzKNmqjEIWojuoIBCqjMmqjthjn2lW0Ks4MiLVrJ7uynFcs+fxQfj92HPZPrX6q/7Gc0/W0\nngqpkNbROmqlVmqndnru0HMxhFhLzeykTnKQI649fk7MPHwwqngLZ9v8hCCeAoaRbghiOvvzpJWR\nIrPhqUTGVFsWXquX66lXTkTdV74EidNJtG5dPFlk27rdRHa7pIqy9bxxEa+WJqvdyfe1vT2e8GvN\ngVZupxFizeZCTRQzoY4L5AgskNE6+zupeU8zbdq3iUI5UzIiwa10mnfZyaYs1SlNR+FspsSkhl+v\n3k59HPa+gYhe53aMBKLTpj5eiIgc3LI6jfEw6E1/sjEkwwkKUx0dVkinUWjNs7UqqNTaJpkYZzcn\nNHZ0uUIw9ZAsz1Pt/qquj8mDN99hobJGQnTVfVDnZTrIQV7y0m10WwyBZCGsaiXRTnZqpVbNsFpG\nQhuoQXH8VZNBfj8b2RQFlyew7F8Jlegei82nlprJclfVbrwVVJFQpUwFIQrRKlpF62k91VGd4fMi\nkJsQxFPAFLKlRKmJUGur5Kj6/7P39tFtnfed55cEQIgvIgG+GaYp03QiK87YLhmxcRLGBVpT9ZB2\nQ9QTbhRvDtOzO+DO+GS3ezqxN+2cnHZ3JzOd05w5090507VmWuXNTCNbtWVFVhwqAWlVSezaieg0\nTc02Cd3IDi1LASVLFqm33/7x4Ln3dx889w24AEHpfnFwSAD3Pm/3Eryf+3vjffqBE9VNtrvbHrCm\npwUoSoshj8eMREQ7dq61vMRJX5/YJxol2rlTP1a5bXu7+bksRcItrxxg5batraIPdR5yrHKOY2MC\nQCWoc1dhO8ur05rK9pNJfVKmwUFz7F5iX8uN0w21QarkSrR4dZ7+XFrUKnwMNDV37d9ZcFsyr0u6\ng4g6SCTmWSZ/Fk5VXmG4XbOd2o/ltabhHJmxk8PFt3NkgmezzXwqnYNfGMxRjlL0DCVpkcYc6n3q\n1pmPfZD+xndcoVWitQJ10BQdq/Hldb2jplVO7pa6six2rqJEVgsaB6cttMUWdnKUM8Cui7polEap\nj/oMCyCPlYxTvATu+qiPClSgVmot+ayXeg04VD/roi4jk67MbCuTC6ngK/tZpMWShETy92ZqJgnJ\ncj0lXPJ9pFsuXx8JprzWqpxrO7VrM+C6ycmKHBTQhtoYheAZypdqZYnSgZBal9IJTqTLkQQcDnES\nZnXzUN1IJyfFe6OjJoyqlkL+Pi83wvuQ2wwNEW3fLl5HoybEShjkpUhUy6vbU42b5fGl2ax1TVVX\nYTXZkpNVV2e55seCz7urSw+XbueRrg+nRE1uCuKGSehqWyrfJU3SRASi8U+NEx4DjewfKbF4Vlom\npT6tqaUq53zqIPMfZz85g5cbdLnhxTQJwE2TSBTk2bKnaTjNxj2peW+i+F6l5UqsylGaxXB6+Xcl\nLm7zYp+FHHX7OI94wqMOepFQdJss7+K4PBt4kN9R1aiTWI02ndwtdfGdTmVUuHTwJsFMxmCqtTJ5\nEh75kG6ujdRIR+loSYxmL/XSNE1r3WFvpBstLqx8DLo+4xSnJmqidmov2UeCqlyTIRqiPuoz4JBb\nY2Xm4DSlCXlrO5PFv2AO/Ha1Vvm4kpT0lH3WLrGT7E/OLcxmu3kVgmcoX1KtadWyfKpJbrjbqkyW\no7OCSt1/f74EODmkcndYbjXk0NTQIPrnYOlmKZQxoXwO0aj5+cSEFWwleO3eTdTUZLWmTk+XwmUk\nYl0Xaf3UwTefu87lVo13la693JLpVRwUda7GKlw6jcWLi29PjzO4Ou1b7g2TEDxL5VbSpAQii9fT\nhQ8VaOrQlPaCvtLSM+kD6U1hTS3nfOomAUQNB9KUPjROy2sFW/BKk/lPtpv8u4Dy/acc3ucgqiYd\nktJhlO69YG1taRqnQ8U+/sGjFXicUNyn9cCvOZ5HulI1PcQvbv6ygqQn/lciRzn6lfyvBAZ1Xlwl\n/VqUy3W/LBdYdfGdWcqWuIryWEVuIdVlgOXj5/ORYMXhaIRGLEAnrZLSkikfavkS+eD9N1ADdVAH\nbaEtFkiVbekAVffopE5KUYp2027LPvL3IRoy1qOf+i3geQfdYXymAr9aa5UDorpuXs8RuYbcqrtI\ni2E2202uEDxDeZIEA+m26ZZZtlKpSW54WRPed0+PsN7dcIMAJlk+RAXCoSErpHIrI39K+JKApz77\n+pwthdzKJ8eeSJifxWJWILzrLrJk2AWIBgZKrbT8GY8TLS7qkwBxF2UJpXfcYXUB1kFzLCZeT06W\ntuX35oLsx67+p7Qg83hXL2DIYdWttqjTvqHrbnByK2lSApEerqcrLT0zfsjemrrZtUxETQyse+am\ntBf93LW1lfQA6SY7m5v6fpq1H7PpS3fYvaCVDmy8w8540VX1m1TwGMNZoAJN0icpS+s05nIepal0\nrmas6yWapE/W1BrjDRQFHHiJk/OSMTRNzueWCozlZiH1A6y8z920m3qox6ih6VbeQ3Ufla9VeBqm\nYct8JHgu0iIN0iDdTXcbkKmrwzlKo0ZioBEasWSb3UpbCSRcX50y2Eq42027tS68bg872OU1O3ny\nI7kOHdRB3dRNy7RsWWvuwtxCLcYa8DWV6+YkHmcrEzvZxdCGVs/NqRA8Q3mSCga1uJC3y0Db3+8M\nh3x8w8MmTKkgpSsdooIuf27daoISB/GJiVKrKAcoO5fZjg4TlDlk8kRCdk/pdjw9LQBOQjd3r5XP\naNRaz1ONd9WNWXfM/Wp6WvTR12e9MaC7aeHlfOLg7DdRVTVqc4Yimv72v6KeL/wBjR38iPbivByI\nrLT0TGGtQFNzemvqtSAJ1m37RwhrBeIX/RLKeC3KXtIDpJq11mtCH/m+tG52s77k064vvwCZZm3K\nOaYWcoQDacKhcZp0PMaV2U/dzqPqW2z9yRsomhfwbiBX6sJaesRM9+K/ozH6qOF+qoMR2ZbqFuvF\nmsnnprNU2s2Rw5WsoamDYNmmCmO91EtZytIyLVOWsrSNtlEXddEYjRlWOL59IzVa3FwlfPI6nFto\ni+Xz7bS95Jg0UAMdpaOONTtBoAQlSkq/RClqicm0e/BtZNKhDuqwwCa3uLZRm8XS2kiNlmRFco7d\n1G1Zg17qpQmaoCxlPQGi7hxRz+0CFaiHelzPYT83Wpz2DxMZBasQPEN5kgoGlV7I+3Wt5ODDwYW7\nmwIi4c7YGNFXv5ovGZ/anlPpkELB6iKrjkNtSyYS4k/pNlsoEDU22kMkB92hIevvW7aY20nQ5i6s\n3OVUQqZTP+rYec1SmUjJ7pjbaccOAdHd3VYXXdXqLJ929VPrHQyr4mpbq4KAVZKbW+umqF/q9RgE\nfKzKPZ8kEI0VoZODT5pKAXCZ3DPCqnDnRXz/ePHnMJklV/hy8XIrU5r97frVwV2SnXPZDXSl3kjI\n1Gmapqkj3+FoAaosTi5N6hET7sXfJh7PqloNdTDsBKc62SX90W3PIcWp/iQvEaJmgVVBTq4Rt0Dq\n4jl1D2nhbKZmCyzJ9VHrfcpHH/UZc0lSsiRuUyYDUh8TNKGN8XR6SIuwtt186fbqGsmkQj3Uo3VP\n5sfJDuacIM8LjOrPWO83Wtz2D116g1MInqE8iYNBEIla/LpW6oCoq0s802lhdeSWwnQ6b2yfywnY\nkVCmZlq1m48EwK1bS8fB4xjHxkSpFCfLJM9qC5ggOjJC9O53C3huahIutHKtp6etUN3dLdyFJeTy\nDLfqUwXdri4zjlXOq61NzHvbNvH52Jg1aY9TLU8utb6pepz4c3jYe7v1pqqAZ5rKu+rfQPG/l7ED\nG+fWWmkSIkNp8nYMvG7nUZWeTzrwkaA2RNaEQDrJbTuoFO68iEOhDm7TZC6Xrg6nl/Q5qnV1nIjS\nRYvv8DXoSl2J0pQ2IMEN4JZpuYw4Of0RUwHALulMyVjJhC83gLCOwhk4OKTw39X9dGNQXWr5Y4AG\ntO9voS22FsYRGimJ53QCOG5BnKAJCxyrbrcS8Pg+7dRO0zTtyeIpH13UZbS1lbZaYlJBKAHPOMUt\na5egBO2m3bYArbrX2sGcX3dqNZOvDlzlMZdj8+viXa5reChnheB5naoSeKzUBZPI2ZrG3Vh1yYMk\nmHHL5+CgADcJXdLaqI4XEJDqZT6yn927hWWRu6uqMaLSisctjq2t5u+qW/CuXWLMo6PWz+Jxsw8e\n98nHp1p8nZ6plFhDvk82ax27zuXW6diq544uIy+RtSxNY6NYQ79Ji+pVgUFPADUx7RTYGBXxv5fJ\nj2+cW2ulSYgMeT0GVTxWfuRkePVjhZPb6qDRi9z6cgPTAhENkrCG2rn7SqXJvGCYvMZdqcuV34tk\n/xfV+iOuWqOcsszq+raDU/tRuLevQkgHdVAjNRourHZjkBZS3cMteY+M2ZSPPurTutHaPSZowoCv\nO+nOEjjWWVjjFLdAZoYyWiufXZIk1TUYhBLA1Y1TxEJP0gANUC/1auuDykeEIkZMqLruXiyYO2iH\nJa6UyD0+d4qmLHC6SIu+zjE/51oo/wrB8zpVJfDoN75TB7mFggleKvx6HZtdCQ91X9XyFo1a3ULt\n5qMrxdLTY45XxprGYsKimUoJi2U2K+JKeQIcaTWV26sJhXTjdsvIq85Hvnay0Kpt8EQ9/Kkrp6Jb\nD+mq3N9fCpU6V9tqluCpVY1ZogChp4r+euWO0WuN1f3dRJecaKHKqjQJkSGHY2CB9zfX6sK3Mk3m\nP896NpJ7ObXTVHpBoJtTnTB/XcvvRfJGXlTbZUStVkZeDkZbaIt2DPI9p0y2TnCluuumKKUtkWIH\ngKq1NUYxo80RGjFKnzg9uHuu20Pn+ttIjTRKozRBE7SNtlGCEhSnuJHwCCQAWMZMqvGlTo9+6rdd\nd50FU4pbUmUbOkhV3+MAnaUshaofheB5naqS5EB+4/HsQFJ9X016Yzc2nUVUhbFbbskbbqNjY9ZY\nRvlsahJWOZ5hlccrqu6zlpIun12g9v/zAOFThwjNayVgytu99VZzv8ZGAae5nD45UiRC9OCDYtzS\ngtvWJqy0HNp57Ke0Yk5Oip/Ly6VQrx4zNVEPh+CODr1lUgVJt5I68pg4lXwJUkFY4p3EXSMDg54q\nynGMDmYzt3WUSaOOJ8gXAQVtga1F/GhgNxg08utqKw9ZNxE9liN6KU10kR+/HST8ZruJyqjXbtuf\n032FSsNeJVC2kzNYVvH+zDUlr+dUPSRN8Rvnqe7j1aJaoILFKigtnnaSVs8EJShWfIBEmREeCykB\nUwU3O/AqcWH18YhQRBu32U3dBlRL2L2b7naF50ZqpJ2009aKa4nVzcOSEMnro4EajLE1U7MFKFUr\nppObrXQJb6EWow27mwb8PQ7Fk0b1YH/nUajqKATP61S1TOYiLYPt7VagUeGXX/D299uPTXdhXChY\nYzwTibzFCiohTn3ybLRqoh4VVmXG2JERotH95gUpZuZKwJRbILn7bSoloFOXBddprKmUdT343HTW\nSb5GsZjVTVin6Wmxfr299u6w8ngNDYmSLzy+VNZWVa3adsmbpIK0UlY70zK/qNsMSXMcx5gmW2h0\nW0d5bh2S+3s0Q1UT4qqlat5g8AueaTIP2Xf4ix4SBNfO3uvXNlF2f3Yo4GUbJ1Xq7hvKKq/nlO5C\nv9YX417jPMsBVBVCOPQN0mDJPHn2U1kGhGd3lRlxJQQ5xYKqjxjFaJEWPVsivTyaqIkiFHF0//UT\n58kfUYoSj4m9LX9bSVkVdXu1LzU77gRZ45tUK6bOgimPSZrS1Ed9JZZQN8kbCLwuqe7c8xLfHIJq\nsArBM1SJOAzwZDPlXsyrsZh2yWu8goPcTrW4qVZPCXdDQ86gJy2R6ns33mju19oqxj0wIPqMf1pc\nkOIP9hsWTwmYXV1m7c7hYaLOTvG7jIHUuaByi6f8XE0cxK1Pcq5NTcIyK9fAa6kU9XjzBEHcasuP\nPb9ZwefQ1GQdqx9rY5BWys2QGddOtXQTJiJH30XtOjLT1keLrtmZIaJ1tww2vMtNYCVWFeQNhkou\nXnhdziEqWjpBRG1k/idtKv5sIcPiWYlF0ot767M5onya6K/HiVY34d/d9SrdhX6tM3h6jfO0SwIk\nS5o4/U3JvzkJjLrstmofur4SlLB8Zgd1W2mr4b56F91l1KEkElmHvbjx3k63G5ZT7iLcRm10E92k\nrdnpFGPpNOZO6ix5f5EWiciE92ma1pZsAYFaqVW7bk41W3OUM/aXllCdBdPufPT6PerkSu43vrnW\nfxvXukLwvMYUdMZZDjBBJBLS1XCU4+Yur06ySy40OmpNMCQ/y2ZNEFSf0WhpLCWHQPW9hobi781r\nhNycxc1WPrnltbdXuNbyGEhdtlf5HB01gXx5WV96ZMcOMwsuz5Y7NWU9dk6lUqRLcTxutcjyOXML\nsHrs5RySSatLMre+ejkXg7JS1hzcAlY5AF7RnP36LqbJ+MZezxLNDfqP79wMVuJqyrx4eYy66W99\nwWCazH+YWSLz+I2RSYeLJCydy/r9/H59ezlFLlXSQagNU7nlKao9Bp34uHbTbouVz62WIweGOMVp\nmZapn/oNWEtT2gJJEgxjFKOdtNN3vOdO2kljNEZt1Ebt1G6bEMfp0U/9xto8SA9aPnMaj5OFM0KR\nkqyzcYqXWDKTlDSATgKeXRxnX/Eh2weZGWydYjb5OqiWULvjbgekTtZrJzD1G98cZrcNViF4XmMK\nwoKkSzwjy4b4gQopbkFRy5DoMs+qQGpnfbUDWgGfeQMsl5f1CXTk0y7Jj/rkrrR2z85OPeRGIgJI\nl5fF2FU3XgNoYU1gpFqfcjnrfrIdCW7crXlx0Yz7VI8Rt3Cq41ePPYdCp/jaZNK+jqfduejXSml3\n3lU7vpOoSuVUiioHwGsxZ0Oq+StNIXD4lLx4aaOXxNLl856Xztb66EKH1UrKIy2pL1Y7608AtVOD\nLpWbW8hR+kCaxg+NVy2zbrnW8Uq+o+o1g6ddDc8kJS11Op0sWxxOucVTwouEJLs4TJ1lz+sjTnEL\n3PJEPeqjkRrpFrrFiH90cnH1+miiJtt27GC1mZrNmNK8eE+1qk7SpGUtpTtyP/Vb4lHVGwJeIY5b\nXPnfAt/fzXodlHWyXv82NqtC8LzGFIQFSU08wy1fEorsLJde2tZZ0uzGzS+u1f14vUtptRwelsCU\nN7aVgCQBk1tDYzEzGQ+3/KnPeNw6ltZWe0up01PWueT7NjSYbXO4jcfFdmNjRNu3C1jkgAoQ3XST\nsEr39YljwqGXx4WqwKZLtgSIJEb82KtQaBdfq8tQXI2YSzvYqnZ8J1F1wbMcN+FazNmQCjjXcppR\nN1Ipk2TkxcsYXSQQ0W35vOfdp0nkDBrz16Utl1YKY2kSh7+jQHSsmsGZsqMKbnAE0IS1vQNpwmMg\nPAaamqvOXZdyL56r+R1ViYKKkZPQIWFqjMYoS9mSNmV/YzRm1NFU64xKoORutNK9VloH26iNeqlX\na62MUMSSTMjJ4ihrcd5MN9PddHdJ6RW7h86t1u8jRjHP/ekerfnWklIuHdRhWctGatTGmyYpabFE\npihFHdRBvdTrOWZT/Vtwqs3Kz5GNtE7KucqbIyGwWhWC5zWmasS5cSul1apoXvT6sYCqF8xObrY6\n66uM7ezvFz85xE1OijZ5TOfNN5tWuslJqyvs6GhpzCJA1NxcCqLqvioE8ufWraVxlg0NZkZbbnHs\n7RXuqlu2mLGSvAao3bOx0d6FmMOZ2t/UlNU9VkKo6o7r5dhJ2QFpENZML/1v5vjOcrWhc/brqltN\nFSlq4fdzdGB/mg4dGqe1MixREsYW0+RMKm6fu6icpauwS9/tuYFpze47BNCR1ya8wvj4oXHCY6CR\n/SNVs3jWw8VzkAoqmYuEDrckQ7y/ARowwG+apmmURqmXektAkceaLtOyxY13kiZLMtruol1G7GiE\nInSUjlIzNTtCHG/TS6mVXbSrpOSJ7M8NNu0+S1DCyFIr20lS0tbau4t2WWC9gzos2WVV4JTQnqSk\nBS5VeFePm90xd/pb8JLddiPkNtfrXSF4hnKVvMBV3VV55lmvbn86yFT3zeXE5zJpjcy0qovt5E8O\nI3KsQ0PW7WWtTZ45trvbhMTOTgGt6bR1XFu3ijHoINzumc2WWhYnJ52TC3mBWgmd/LVdPOrOnVYw\nlzGYHOCcss7anQvqtkFY33TngQqi1yNghnJRmohAdOD30vTYY6DHHgPNlWGJKjZDh9xIxQPJ6CCm\n2ol+gmwvTc5gWrP7DkpH5bi5eh1rmrzBfWGtQFNzU1WDTqL6uHgmKt9SqR4nGVfZTu2eLF1uoCph\nJE7xklhK/rnqjilBUX1Id1g+bxnbKOMWVbfdSZo0YkaXaZlylNOWPOHxjzImsp3ataDHYbid2mmU\nRn0nE2qmZlqkRUsdS/mIUpSWaZluoBuM9/qoT5tAiEPsIi1SlrJGsiR+XkhraDM10wRNWBJF8WzB\nPMZ0iIZKXGjtjjn/W9gs2WX5uSLPn1CmQvAM5VncXXVkxBpzqVoj7axWOriQYDQ0pLc+qjAr4xgl\nnG3daq1zSUR08GC+JK6Uw58OIKUFlYMjt3BKd9JUyjpGaaWMxcz95fqoVtOJCefkQl6fst22Nqul\n1OnJYzCle+wNN5juvF7Knehg0E9iKCc5ldepegxjUU61Ju+/P1+7BEZBB6ZdyypS1KF/O06PPQba\nv3+kLIunhLFMgWjdiVQ8kEyaSiFGfc+PW2TQoOfWXr16UlfTzbVe5+xHQbvaluvyqx4nr2VQpCSo\ncusaV4EKNEiDFqthP/VbXGylCy6HUB4Tyl1sG6mRuqhLmwhI1oAsUMFw2+2kThqlUQsAuSUPmqRJ\nC/DJtviji7pogiboZrpZC7FOjwZqoDjFLVlpB2jAso1M5sMhM0tZ57HnRdvSZTRHOQtETtCEAd9S\nTomJ+qhPC5perPzViN+shgpUoEma1LqBhwrBM5RP2ZXU6OwU4MFdOHWw4AQX2ayAGLWOZTQqXEol\nHOksnmpf/B/w9LR1295eot27rRldOzsFhMnX0u1UQm4kYoW7m282614++KAA7rEx03o4Pa1P4HPz\nzdbsu8mktV272Evd0642qe7Z11cKS2pSJd3xUuFPB4O8nWw2mHNLPVeCKOvjRbpakxK229rytQPh\nNJWSy/WqHFHu0wuU/twBGj9QekNAUtTamwWam5sqCzpZM74uE+wscDqIHj3k7QAAIABJREFUUd+r\n13g8ovrypOby4+bq995Nvc7Zj4I+p8p1+VWPk992dKCqWrs4hEQoQsu0bHlPlvUoUIHaqM2oNxml\nKKUpTYu0WBL72ERNNE7jtkmLnFx9dXU6pWVStsNBbIImaJImSwC0h3psrY8gZzdaPm8islg9pbWT\niCwW06N0VDt2Dp58rmqCJ102WTWBkwRVuQ47aIcxhjvoDuM4qVZ+bjHldVT9nI+bxUp6PSkEz1Bl\nS2c11JX/4HKCC25RtXtyWJTupmqGXFU6CypPVARYLZuAsIoS6SFXzaLL40mlFVdXN7S93bqfdFWW\n1uLhYQGuXuAzEjH3c4sLvfNO/dpwF9xEQr+Nenx0LrUcgCfss6P7lt1NjmpCn67WpHr+1CSZz7Vg\nfglKaaL075XeEKgHqZYdCaJjh8Zpcq1Aa4x+VgtWsAmN2v7lx801TaX3bsKLUH+yc3N0q5+pHiev\n7pK6ups6i+IgDRpwFqUoLdKixT1WhUK1lIgENOn6CrK6uU7SpKOrswS1buo2Mrn2UE8JFMYoRgM0\nYFhH5RyGadhYwz7qc6yLyduyi/lUH73UWwK6EmrHabwkvvVBerAkVvNd9C7L6wQlLC65shyNnAfv\nSwJvK7VSL/XSIi1a1pMfjz7qsz3/dJZYWW7GqzaLlfR6UgieoRzllPBFjf30Gy8o2+AZUe3KfKiA\nq3uqQCLHLuM3pWtuR4cVJoaHhUVQvr7rLnP80uKpWg7V9zmQ2MVwdnaaUBmJiO102WNVS2ZLS2lb\nN91kurcuLpbG4N5xh4BAmWxJJ7l9ImG6yMr4Wul+qx5PXYwlT3DELZ5uyYL8fK4r7eKkcmtc6mpN\nStgeGtKXpqmKrgXzi07l0NY40finijcE9u33Vwe0ynSnWnZKXEHTVEo/RTl8VKvhbwqpoOI5CRCV\n3rtxvwgNV9xOfO14rKTfi3m7Y6C6cKqAYRe3mRWVbUsghceT2sV28mytchsJk17qQKqWPd2Dx2hO\n0IS2NIx8SCBspEZby6Yue+xtdFvJ9iKD9pjxuo3aXMfKH1nKGvsnKFFiUZYPtV+ZpImveYpSlpsV\nMlFTC7U4xvzKY65aTP3oWkvUdS0oBM9QjvJiaao04Qvvo7dXD209Pc5JeWS9Tql8Pm/ZXiYq4hbN\nyUmigQERI8nb2rLFBGHuOsz7UPeRGWbHxqwQK2HXzhoZi5mlUCQkqZlqJyas69LWZh3X4CAZWXud\nYFOFMbdyKV6ti9xia9eWHCMHQbdzS3XD9nOOBWkhlet08GC+soY2mcqFd0elyZ22VBWICh9fo6lD\nc/6gs9z+fEi17JS4gjpYrt+fz9NjOaKX0kQXbTinysPfFFJBJU3e1kR378b9ItRr6/Upe1fbyoHa\nLlYyKBDgbqEJSpS067WMBgcsCbbLtEx91Ee7aJelvAqfx27aTXGKW9xQdVDNb4RIiynfp5VaDRhT\nY0kbqIESlKCx4oNDle7RT/1a0OSPCEVojMYsfycyoQ2PNZT9eQHQERoxMgAn82Z2WhUE+WOYhi3J\nh1RrKwfRbbSNGqiBOqjDsdyIPOYyYZGbpd2pjRA660cheIZyVC1qBaoZVgcG9Flao1EBoNJNVk3c\nE4uZLrf33583XEnV7Vpbze2cYFYHwm1tzlZZoNRtV93faV9dW6OjYrzcNZaXs+HuuzrAk+JuzNKV\nWEq1DutA0k5eMt3yMcpasBLQ29v1SYn8nnuVWEid2pL7u8VPVQXUypRTkiSvqop7cz+Jb/12Io9l\n3CpTNV2WlWv53EKORp8apdQXU7R8tjg5B8v1wXye/jZNjpxT9vADNNw5NVUL11UVMio5pO4XoZvb\nx93+OypNlQB1jnI0SqOUohQt03JFF/N2+3JQSVLSk8VRF3/pBsV8X/67as2TtSpV8e04FHLLX5ay\nRrvc6sgf0vq5TMvaDLQRilCi+ODv30V3GRDHXXNl1lme0Ib/fUqwlmP+Z/TPjBjXrbSVQKA76U4D\nHo155k0An6Zp6qEeSlHKaOcOuoPaqM2SXVhdyxEaMSC9gzrobrrb8rkb4OvcrYN2mw3d8GunEDxD\nOapa5Sv4RbrqzukGg5OTArDuvlufYEdNgmP3bG52r4M5MWGN19QBMf/8rrtM6NHFecZiRNu2lcKw\n7iktofK1Gv8qY0TtAC+Vsh43vladnfbWx74+/y6lbqVP5Bj5vDlI68BGdcN2Go9aq3RyMjgrvFfo\n0u1TVRh1IAJdkiS/qspNp1Fyvv5V51QpQFXgsszhfXptrXQYaSICUe4TOUr/cZqSe5P+M666cE7Z\nwy+OrUzO8NxU0BeCumRNKmRU1wv9WvVxrwyoaxEnJwGNw5TXWo9c8nxRrWNu+6oJdiZoQruPLhFP\nkpKOCYl0YCmz5eYoZ8l2207tJdbCFmqhJCWpl3ot4M8trmlKl8xL5xrLQVW1KHJglWNoozbDKqlr\nL0tZC3T3UZ+xRsM0TDfTzTRKoxYrKV+PIRpyBXw1gZO0yAYJimEsaO0UgmcoV1Vy8Wy3r5P1TS03\nwoFJJhLigMVrXnZ0WEHHCRL5Mx4XsZLc4ifb0sVY2j1lzVEny6bdGPizrc1aN3RkRGTilfvyhEo6\nwNNBkw6ypQVX164fOYEaL7fC4VBak2UJHlnOxo87rq5/Wau0EpUDXbp9qmI1lEqTLRHokiT5VVVu\nOvktIKm+rqE4vPfMzZUOoziX9B+mDeD0mnHV0DQR9RDRGAXLOgEa7pyaCjp+qprlUjazKrfGVAbU\ntYiTU2FKV0rFCQ6cst6q2VgHabBkPXm5FAlDuv4KVLBYOhuowYDBQRrUxocWqGCJJ+XWVBXmeqnX\nYiUdpmHbcjRqjKm6JnbZanlyI905pQNMXvJElnqR5wNfD2n1tLMkt1EbpSltZPX1k2DKzkIdBCgG\ndY6HllN3heAZylWVXDzb7cutXTIhjYTUsTErnMm4Re7CKsGVgyJAdPSoaOs3fzNvAGlbG9GuXaIN\nHhspP+eJfiQ8yJqXuZyZPdfrs6fHCsPlPmVdTHnxr8v4K7Pocuux3E6FSDUL7siINe6Vj1l3nNXE\nQxxInECNnwMSNmUG36kp5/I4XgFQPW6VSgddbq62un2q6qruQAS6JEl1IbfrX+mK20HCFXcDPR/H\n/2MR3v94P33kzbXSYRTnMn5AxHUOPzlMk9+Y9Ayd+Xy+emAdoOHOqamg46f8lEvxKruSN26f1ZO8\nXmRXq0RPLePknGp+6uBAVzNSVxfSLjkR70Odp6wnyhMVEZnJiiIUMepmEjkfJ9l/kpKWtnRw2Eu9\nNEET2lqk3HrL4yZlXCfXNE1b2o1SlCZowhXcLLGcebNfuT67aTf1UI+tJbSbug3wkm0N0ZAFvu3O\nY96WUwbboG+GBHWOh5ZTd4XgGcpVlVw82+2rS0ijA5TOTtMKpsueq0JLf79oK5nMW96XcMsBZedO\n676yFidPzuPkshuJlJY+0Vk6t261utfq3HXtnhzK1f10WXQl+HAglxAnwYjDrNyupcVshx8rDrXq\nWnM4dbKOqTG8dnDGgdgui66dBb1aLuFc5VzUyXGtTVNgMXdm47T5vAJzRJQioiQR9ZFwvR0nyn2z\n6Nb62UNUaF4zQczrHKuQjLQwtkZTuTkqNK/RlRTRJws28LVWoMHHB2n0qVFfAJPP5zd7SGHg8lMu\nxaucrKibxcLq9SK7nmvDepXdXNU4UyldPKEav0lkBQuv62kHqMu0TP3Ub4zDzkrHrV+qO6uUTACk\n1vPk/cnYSgl63FU1RSlLXCcXX5sYxSzrxqF6N+22WOm4C246X+rCy/uXVkv5Hk9eJMcst/Gy7l6P\nTb0mDQqz6LorBM9Qrqrkot5uX/n+9LQ+IYwOLHt7S2MPuWtpczPRrbfau7K2t4u+BgZEuxzOeNZZ\nHhtp57KrezY2Wi2IgJkJ160+KX9yy6N0Q+Zw2d5uhWM5RlnjNBo1LcpuNwuWlwWsLy/rjxXvl89h\naKi03XKhUAfEdqqq62o1lSbzG28zjTsoSTBMkva/QPrfspjU3Jx/EEuT4/qWZdmSUGjXLoPd9P4y\nAaZGNw9yCzlKfSlFyb1JGjs4VtfWvaDlZEWthoWVKyi3u6AusjeDG6DdXDnsyBIqRMxiuADCAVD7\noXYaW9NnSpXz5zDkJAlnOrdfLg54TdSktQS6Wb84vKnQorbDrbsyVlQnbmXdTbspRSkjHpUn+OG1\nQb1Y6Xj/smaodDWWyZB02YW9nMf1CpRetdnHXwuF4HkdqxqJT3Rt2vWjJoTRlcxQwU9CIXfL3bVL\nJMRZXnbPOCthUP4uE+nwGpgc+NRapX7cbrkLL3ct5gApLbtyTCMjJlwNDQkwT6XMz3nNTSk5RhV6\nm5rKi9fkUq2V2awJvepx1UGhn/PB73g2OnOsL13vlq00lX7zbyVjTcY/W3Rr/cx+KrxrrXSN3Cya\n6voq25dl2SqQsM7aHTc2p/HPVRdgvMgJrvn8a2LdCzKrboXusE5WVDcLayV95xZy1HGgg3AIhLX6\ncLvbrG6AOcpZ4gg5bBkxhgfM8xtz+vnp5u8E405uv1x2pVzsXGT7qd82FlRN8qOzpMo42K20tcRa\nyeWUtZdDrt/yOGqmXrk2vA9etiaEsFBcIXhex+Kg4FSGo9w2JXzYWan4+3YJYXSxjWrWWt6macXM\na2GQu8LyupyFgtVS2tVlXYvpafdMtPLz4WFrQqQtW0wglv0nkyJZkEy6s7hoQje3BqsgzRMxqQDH\n3X6bm/Xr41fcWukGml6T61RitayFS62dKnJj24xusQ4yMr7+x0NUGFtzBwwJhkNEtI2IukiAyaTY\nr/CRolvrhzTQSaS3aHK4WSaiQTJcd9XsuY6WLe7+y5P85IrtpEhf+kXOqY2oMF6gqUP+XEQt51MA\noOYE13L+eAw09MRQ9eE4TaXHq9ymNtAdtpK++b7JuWRNLr5131EcrCqpv1lLOSUK0pU56aZuAfiP\ngbAfNLSmz5Sqy4qqxobabe/FSqeurwqSdkl7dHNWt+fxjhyI3ayVunjQLuqiu+luT+Vx8vl8ydjs\nrLN8vexci0OFCsHzOpZdGQ5dVlIvUJrLmZY9HrtpZ6Xi8Za7d9v3weFzZEQAmewnEhFWQGnZW14W\nVsydO/M0OSmArq9PWEWzWSuQybnK+cnkRRxOJZDrLJa6pyxxYrd9U5MA3HTaWiNUTbCki6lU3VtV\ngJP1TQETouWaB2HddgNNr8l1amG1rIY1//7787Wt01lSJ7Ly2pxBjSe9X3GNdQOMaSLqJgF2HApT\nJECrQFZwVNdXZzFOs3bUDLiKpdLRssX34/NQ21dVIJGRVreNB5C0QIJbXzopfTjBdWGtQNlvZH0l\nP6pIQWbVrUbCIY8up5X0LfdN7k/S8tpyTRIZ6cCTw8skTXqGgY10y1Utk3aJeaSWaZn61vpo19wu\nmlyzd6F1sgB2U3eJFVIHZE7r4uZmaUna4wGA5fZ8bNM0bWw7TMMW2JVtcYsqh9Q+6qMsZT1bconE\nOaUeDx5vyy2uEjaDLnUS6tpSCJ7XsXidRGkpdMtK6rWkBbfM2dVjtLPs2dV0lFDD+1EhkqgUOvjr\nrVutcKa2199vjTXVZVyVTw56gABgOUfdGNXEQ9yCqovllJAcjQpwVt2UJdxKIFVhV0Kwn2PoJC+g\n6XTcnN7zIj8wGcR8a9Gmc4dkgZEganP6FV/zSwwYxz9nZnwtNK+5A0ba3NeAQj+gp7MYq3DDXy9r\nti+ZXLHPbtZvJ5nwO6a0r5MdYDnNxU87TlL68J2YpwoJmQwFaOGvSsIhjy6n5SaOkvvycfuxngYF\nfQu5HH0unaRPjYM+VNBbAe3k1y21XDnVyrSzHPppy06yj67iQ8YmSpfZDuowSoNwleOuLMfVR33U\nRV2UprSREEgFYF35EA6KquWSx6vqLKrywbPe+k2Ao27P++HjaaZmGqVRRytyqFAheF7n4hfTjY2i\n3Ih6Ye+3pIUfeFXjPLnLLb/o1SUh4jGN3Bqo9sVfSxfYSERYQ/m4ZfkRnuSmv9+Ev2TSatFdXrbC\nI3evVcu/qJ8DJuzzsfM15GCbNXMplMxxYMBqsQUEYPNYULdj6AXq/ABjNSyOfsCvGlbVmseXKjBi\nV5uzGmstxdf8ZQmMbUSF8TWaOjRHhTfXvAGGCoW62Ek3+FJBSYUbJ9jRQVYzmf+FeologIja2XsD\nZFpp7eZn16dfkCy2Y2T39WLVrtSqmCbPcFxNRt0I+bnwDsrV14/1tBy40elAOk2PAfQYQIemsu47\n8PFq1iiocXHp2iw3QYuf8emgTs5X1qmULq5cXhMO2Y2LAxt3fx6mYduER9zyKQG5kRpL5qrW2eQP\nNS7Wz/qq2/NzQ433tINoVZsh0VWo6igEz+tcdllbufVwdFRY33RQyuW1pIadu2gsZoUl/hmHsMlJ\n0c/u3QK2GhoEaHV3i/cEHOaNUizcmru4aGZx5ePm7XOo0Vk8uSVRth2JmCDc2ioAVk1Y1Nlp/t7R\noc/iyteQWzClRVRCBp8THyOPU/Va7kRda/XGQDlQE3R7RP7ArxqxoAcP5stus6x5K1BjV5uzmpZY\nvuary2R1LZVusl7E56LGTkqqGSOirEObHBQnfE6EW1nl9VeEvddHVhBLUkmcqC95sPhp3SL9WLUr\ntSr6ANc0lb8U9Si7JC66i+CgXH2dLLcq2JdbkkE9pw6Nj9NjAO0fGaE1n19cOjipRqmIctq0O17l\ntCX34eAnrXgt1FIClxxUB2nQm8u24mLLkxDp3J91MZU6SAYJ92PVQrqbdlOMYsY2uhqfXqX7nuLn\nBo/3lMDrBNFSXm4ShHB6baom4AngnwP4ewD/AOD/0Hxek8mGKhWPn5SWR7vkMJVc3NqBgLywjUbJ\nyACrfjYyYnV/dRqbaVXMWyyAHBZ1cotD5TUmZabZZFJAX1+fgHJ1LBMT1qy1sg6nhE439fUR4RML\n1PjoAUrvP0TT/2rNYh2Wc3JbJy/ycmPAz3EPuj2i6sCkH1WSXKhWcOhpbXyYr0rW3K3EiBellf3V\n13aKsu36HLbTzY+XcZGGn67i6xYSACznllReczDjbVdYm1V3PtlZtasiH+B6rSdldroIroarb2n/\n1j+Bci1+6jm1VijQ3NSUb+hUxcuQ2NWM5NtxUHCDh3Lmane8ymlLdxPibrqb4hSnRVos2Z7DrddY\nSdmHjIF0S/LES8d0UqexdqpF0y7mla9PH/VVBG1e/u+p8yvHfVenaljY60HXO1BXHTwBRAD8I4Bb\nAMQAHAdwu7JNjaYbSienOoryolYHparKseo4WRv5Ra9T4hqd+6pfCLODGt3aqMDLE+2o4KnOT+c2\na6fRUSL8nmkB6fmDOQtgy3jS5WUzhnZsrLTWqU7qsXK7MeAXZCttb8cOcc51d3uD9JqpTJ/Darrp\n+gbyNOlBz8vcCmRaD9vI2Q3VTk6xmbwtOZ5+EtZHCZ7NpM8yK/fpoNL5yXjN4WIfO0iUc2kgoqNs\nblNkAuUYGVl3DaVZ29z6202B+KHaWbX9qBoXNZUaV+tdG130vd7B3isA6LarBjx4OV7l/h24jZeD\narnnjRsg8xhJPhavgFeOO3Ct5eUmwUb/XVZL1ypQe1UtwPODAL7BXn8GwGeUbWoy2VD+5QSlqoK2\njnkdm3Q1veMO6ziDiEnUvc8hU8ZSTk9by5lw91jd9p7X5VPCAtL1+f3UceOacROAW1jVONaentK4\n2HITRvEEUEHEEXo9Jqplt26UJj20uWijrbUWVZoQp0DeXW51MMspxqlkCR+PfG5h2+na5vskbfok\nssKpen7xNtR14GsnYbbNYXsnVSlwkl/UDC4MBp5JNcjsrG5tldNXOcBRroUxCOUWcjR6IE2pQ+O0\nvEE1YN1kV4/Si6trNeDBy/Eq9+Lez3irdd5Ii+hW2lrW2vnJWstVb5a4jfy7rKauVaD2qlqA50cB\n/Df2+hMA/l9lm5pMNlT1ZFdKxYt0F+V+QFC3v3QP8dqOHYjp3i8UrPGa3d2lGWUjEWuNUO72K8HQ\nixV28uNrlD00R723rFksqSqs8wRJvB87uNTBvpPF2glUq5HcRlquW1ocQL3GGU/y+Xz9mya8yM58\n5WZ55Ovs1eU2zbbpodJjxT8fJGs9TQl2EhKdQHmw+FpmqG0iors1/UnJ7aSbrZd1ILKunfxdzX7r\n8bzM/0q+PGB1kcUV8MCokRhn8PFgIDTIuppubZXT12axJsiL/OSBZGDrWVGtYQepAODH1XWj4KFa\n1shaqBzXVa5y5647rtU6p65n1cM5tpGqBXj+ixA8rz05gRsvpVKu/ICgTvLL0ms7bjGeaj1MCUZq\niRTVBVeulQRTvr1d+Red1ERDKmzL19y9WP4us/W6lTThayLrl8oxlZOxuBItLwtLp1N913Ktj+WC\ncj6fv7Z9DnmtTTvAky6lu4koVnyvtXQfucYvSsCzswr2F99rJwGK/L9HlES22UVyB2WeCKi/uJ98\nLV3bORAuFrfT3dTwe4zV7dM2c1WUf3++KjcxLK6ALDHO6FOjtoDjx7IYZF1Nt7bK6WuzWBOMi/xD\nCGw97//P91e9VijR5ljjal3clxPHWkvxGpt+3Wx1xzUEz1BBywt4RlGZXgewjb3eBuCEutHv/M7v\n4JZbbgEAJBIJDA0NIZPJAADm5+cBIHxdR69ffBFYXBSvs9l5XLgAABmMjAD/8l/OY37euv3nPw+c\nO5dBSwvw8MPzaGsTn8/MAC++OI94HHjuuQwSCbE9b296WrQ3O5vBK68AwDze9S5gzx7n8c7MwNj+\n3e+2bq+2DwBtbRns2QMcP262Nzsr5vfpTwOJRAZLS8DCgvi8vz+D97wHOHJEtL+6msGpU6X9vfji\nPAoF0d/amv7zxUXx+cyMWB91PoODQKGQwdCQWN/jx4F9+6zz3bcvg9VVc7wf/nAG27cDp07N48gR\n4PbbM/jxj835qfu3tIjXt902j6YmYGHBPL6f/rR+fQDgwgXxemQkg+ZmYGio9Hjqjo/b65//PINM\nxlzvmZkM9u1j2xfHO3/bPDANZOCtfS/r7fj64Xng+Mb8/dn9vQBAZjYDLAHzP5oHUkBmWwaYBeaP\nzwOfBzLnMkBLcfz/n/K6Dci8lgFOAfNH5oEskJkv9l88vpm24ueH54EOIHOp+Pn5eeAIkLktA4wA\n81fmcf+3gP9wJYMLAA5F59HaUDw+I8D89DwwX5zfADB/Yh44C2S+X2wPxf4uZ4CTwPz/Ng/8EZB5\nVJlfKgNkgfn/eR74v1n7fzgPfJydD9+ZB4aAzD9lgEKx/XeAzM9t1vv4PPAwkEl4PD7q9nK9RjLA\nHof9n8sAM8X1CPB8Oj5/HA/jYSQyCczeO4vsf8ni07d8Gv/18n8FANy2chumb5mG1Pz8PF489iIW\nexYBANn/ksUf7fwj2/Yfjj6Md95+B09/8mkk4omKxsvHl4gnfH+uHd/8w3gH7+DpzNNIIIEH/vQB\nnDh3An3DfZi9dxbHv3u8ovUN6nVLpgUA8K7ou5B6O4Wvf/LrFa/nucFzWFhYAADMNM1g39i+qoz/\nYTyMv8/8PeKI4775+/BZfBYPZB6oyno9MP8ATuAE+jJ9mMUsjs97P377YD//2cwslrCEC/MXfI3/\nxfkXsYhFIAPMYAYPzz+MF/EiFjPFv5/5LP4I9n8/1X7Nx/cIHsHD8w973n8Ws8jOZ/FpfBqJjPh7\nk9tsxPHbLK8/j8/jXOYcWtCCh+cfRhva6mp8G/36+PHjWF1dBQAsLy/Dk9zI1OkJIArgJxDJhZoQ\nJheqa3m1BqkWsELBTHDjx1XT7n03i5xM0OPVPVS1wpYbc8fnPT0t5ptKCQtdoSD6UZP76NxgeXkU\nLy7K09PCdVa1XHodr594TjcLp9N+QVs/ZR3V9naNy22Z1sea1+MMUI7rm6bSb+ApzWd2mWSl9bGD\nrJZAnUup6gbbyNrrptJxgIjiZO/Wyi2iTez3rWwfp/mp54Ic3xBZraHlWBj9unRXySpeqWe5U3bW\nIK2YGyl1jXILOer4i47AXFmDVDUscrU8jrVyafbaj1+rY5AxoPVkAa6nsRBtHtf3SnQ9zDFIodqu\ntqIPjAN4FSK77e9rPq/JZEO5y2/SGa+lMry6sjpJt61dn/l83ti+u1sAYn8/0Q03CNDzC3C6eftd\nK7eSME4uyuUCHS+X4we01ONb7g2JSsVrlNrN26/rbLk3HerB5chxfSXE6WIinTLJyiyucj8OdFwc\nqKRbboqs9TBBlrInV9X/BmoiomkSQNpAJmguklnqhO/jND+nscr9hsi5Tqid0uS8LnZyIcWS88ll\ne2MYCwvUfeAAjR86FFjJlWqUDAkyCZFXpUm5v8JiRKN7orR8Vr3zUXtVc10OPnew6qVfpGoFOF77\n8XvxH2QM6DRNUzd10xiNbQjsceguNy7UTpX+36s3EK6Groc5BqmagKdrByF41o0qAQenfe0son4g\nwKmkitpnPp8vyXprF4NZrvyulZ/xV9qXW79+VckNiUrkZd7ViDHVqR7A03F9JWwtU6nFTbXC8ddp\nsn4jDyv75sia9Ee3j58nLz2ia2eSSpMXDZKZ/dYu5tNOfK7lmA3LTSiVJkdgLTmfXLY3jMMHzBJL\nU3NzPgZUW7klBqoGgJXcXylaAIO2eFYy9iCTM6kK4jvKq+WwVglSvPbj9+I/yPFvtMWrmv1Xek5d\nD4l0roc5BqkQPENZVAk4uO0rLW/cVdar7KxaXsar1iJ1c2v1YkHL5axutuXK63pvdDmOoC2ZXq2U\nfo6vtGwHmV3XrxznVY30v5UqRwIo+TdykpivIlktoSBhjZTutPIz1erZSPpv+1b2+6Cmb5CwSr6b\nvW4hyv1POUr/XprGPzVOhY9r1q7Ex5L0gJlm7Xq9PlOh3av8AqvL9nIYY4dEiaWR/fsDs3hWQxL6\n2v68jca+PlYCaNUAsJL7K2sFSn0pZet+Wi5AljN22Vf3F7rr2q2gjvvyAAAgAElEQVR5oyHKTm5A\nvJEX/xtt8apV/0ElUaqnZEyhaq8QPEP5VrnXz2pmVrUdv+U8/MLL7t2irElvrwmLuja8WND8lBfR\n9VGPDGKnHTtEjGVTE9HiYjBtBmml1Fm21ay8VRWDnPudXIMtaYPnaloKRjdWV8ulGguqPmVWWB7/\nKZ8xm33k+2omWv7cWfpe+vfYhf4fTJWuGR+nXQwrkb+SMZXKL7B63L6wtkZTc3PeoXODvmwKawUD\nsvAYqPsL3RbAq1U8opMbMQfI1FzK80Wwl7GrF9e8r/6v9FdtzpVakmsNUV7HW69ATLTxFq9a9R/U\nMajnYxmq+grBM5RvlQsNMsZxaEgfI+k3RtRpe517iG573Xt2JVT4dZuf8iJe+3XSRoIqtxT393vb\nx6l+aipFFI1az4UgxI+Jl9hQv7J1OUqT8W224BRPaxngJT0YBakdJCyS3STKn6TJamGcIhPEtpIV\nDGMkypvE2fYgUfYEJJIB9RHRDcU202zbDjKhspUsMZ8EEvGcaSqFVYfn+P9avND/zAgVmgti7BwW\n1VqadoBpB3dpqv7xUFQz1+1a+aJrxK2eqoXQa1ypDkyCctM1XHH3g7Dm7SI4t5Cj0adHKfWllGPM\nqHpxXQvQzufzWmus03qpgFxriPJqPd5oq6IfXUsWPf49FdQx2EzHMlTwCsEzlG+V63apuk2q7bjF\niPqJj9Rd1Om2172n9qW7bnNyAfUyLz9rmMtZ4a/G145GzdKWFu9uxXZu1Xwty3G5dpJbVt5K4d0W\nFMaJcp9YoPQfHqCx/Ydo8uNrNjGYfICkB6NKxWFTwqSEPf6tK/uVILZc/KnW0lSfSSJKlL6f+0SO\n0v+m6ArbXCiFTd1ThVqQ6aIrf24RPwvNBZrKTYm2ZQxqmu2XJfsYVjdxd2M1vrWKqhl4bmAaZwmX\nY18fc3S7dZIOTPSwkqPcQorSB5I0fshbH4W1AqXmUoQ194tgCW/JvcmyQKkaCZxU5fN5LeA6wZ0K\nyLVOCuUVyDfaquhH1bLobQTQ8u+poI7BZjqWoYJXCJ6hfCuoeEO1Hb/tBrG9nxjCcpMIeenXq5tx\nMul9vkFZSZeXhaXTTywrd6vu7S0Fbrc420qlW/OqGX4KROk/9pnwxa8bpk7c4icz03Lgk7DZQtY4\nzRgJC+E02397cRs7F9lGTbvsaXGFzU25f+tr2qBeInpQmUNv8WcrCVDtJDPBUVDwnmb93czW5Fq5\nJtroAHEqdbv1E9OpAxM9rKQpfcBfIqHcQo5GD4xS6lCKltecv+A4vEnX4dGnRm0BTXdxXQuo0wGu\nE9ypgFzN5Edex7vZVS2LXuiiGupaUAieoTa1JFz191cvsUwtrtvsoIjX+ezo8Ad/G+hhZ7hV6yzF\nulqntZCvGwg+Y/7Giwlf2v79fhr7iI3FM2ilyfwWVWtnthDRUTLjMKUraqvDPk5POyAtPsc/pbjC\nSlj1Yvm0eyaLY9eNc5Lc4d3rMeS1RLk1N7yuC1TluprqwEQPK+M0fgjFPoY99SETD+ExUPYbzu4X\ncvxDTwxR9htZGn1q1Deg1RrqpJzgTgXka6Wm60aqWha90EU11LWgEDxDbapEN6pU100JOUG6sflZ\nn3LX0g6K+PyyWX/tO4FWtY95oVBe/VA3VTJuXzcQ0lQCIE7nVGFtjXr+YI7QvFZ90JdAJYFshEyw\nvItE7KV6g0JC2phmnwDA0+IKK9/XWTXVZ5fN+xI6mRvsFfb5+i4P65Rm7dkdjxyJOFW1/6BdoDVy\n+47aiDqY1ZQKP8HPr0CFtUmamst6bo+7zU5+Y1I7rtxCjlJfSlHHX3RQ7xd7jbhOP4DmlNE2yHUo\n9/+epQ7k2vI1Z4GsZ/lxn90IF9V6KCMW6tpSCJ6hNtQy5iY30JBw1d5uhZxKvyx5v06JatTxlbuW\ndlCkwqOf9p1AqxbHvFJLsVvG4VSqijdKNG6cJeeUYlHzHUrnN5Oq3J4nCOov7jtNRD0koHPUoU1u\nJSyQqItZ9W94h2cXETVr3lsujjdtvn+RbfNCn4f18uKKy9o3ni3kvIYBye07aqOsYxWDkMe7Q3x+\nasbbcsfhd5+xg2OGFdMuHlJ1sfWbHEltU81oG+RxLvf/XujCuXGq97UPwTNU0ArBM9RG5p5wlRsg\nSbhZXg7WHVYFHC8ZbFMp08U0qLV0S8hUrur5mEs5ZRyu+o0SLzGYaTK/xabKAG1lf1cQ5durQKV+\n5tRmjgTsRYioSbOf16edRdOre61drU+QAGIex9lGtFps9++aiVYlmNrNL03Copu1WUsp2b583knW\nMi/VOL883nDYKJdHJxCyAzvL+2PmnbrcZwdtQdAp463bOMoZu3UiRJQmKnykQFOH9PGQ3V/opt4v\n9lJ0T9Roc/hJby68qhxjLFl/o0/bx4zaTiUAi+lmceGsZXKdEst3FfrOUY6SlCQQaJiG63rtQ4UK\nSiF4hqqH3BO26u8nw6LpJ76xUnEwc4JaFYSy2equZbUSO9Wj7DIOB+XCW7G7caXJbdT90+QMPHL7\nISoFKhWgkpo2e0hYSNupgm9r0ma1tTy3FPuy+zzio68kWeB4rYFoldeS1a2Z2zpyFUjEi06QGTda\nrYzDUh7Ht1FJV5wgyQ7sLLGSf9hr/IGm99vHQaoZb9X+dONwgyxP9TUXcpT+v1gGZuUYFNYKNPj4\nILX/ebvF0tn35b6yj4VjjKWmPz+WzyAsppsly2gtrYMllu8q9M3bnKTJQNoMFareFYLnJtBmjsGs\nVOXWY6zUPcQrmFUrlnHTKsCT1e4YBAXNft2NS84pL1ZRJ+uWun8ReH7aTXS/LlGWU38FMl1Wo0S0\nWOy7Eoum3dMtdjPoPtX++LHSQaJXcLQ7Nl6OayUqji9/Wz64Pvy6bTtIgpAuY6sd2FliJQ9OGH+g\nfPvpb09rodEOynTvu0GWE+DpyqGk/lOKCm+WQi1PHITHQJ17Ox2tkeVaHXVjSu5N+or/5Gt88LmD\nnvv2q3qoTVlLy6x6rlej71pbmss5hqGrbaigFYLnJlA9x2BWW+W6hNbyy3IzWA+rpRLO3ICTNeiE\nTka7yj/pss6pNOmBSaci8By+gSgPokMgyjuV98iRcElNkojt5HU7p5S+y4VI3fZbfe7j9PRi/eSu\nu3eQOyR6BUe+Pj1UuxIqxfHlD+aDazNN3s8zL83ZAJ4d2OliJdXty3HhVVWO+7EO7nQxm3x8qS8K\nC27HX3TQxLMTNPq0CaKpL6U8W4LtxiLnqcaPJvcmjeRFXtvla1zN/3v1EItYS8useq5Xo+9aW5rL\nOYYheIYKWl7As0FsVz01NDRQtfvYzJqYAA4fBkZGgLk5IJHY6BHVTqurwMwMsGePOe+ZGWBpCWhp\nAWZn62M96nFMQclpbpkMsLAgfp+aAvadq/3JWjKGfd72051blnaRwQJEw1OYwj54bJhrAsBhACMA\n5gB4WI5XOoG7CuL3q11A4+niB1PF/ZcAtAA4C+CYTSMdAKIATtt8Xom6fLTbDOCC5v1WAE0ACprP\nOgH80qa9LIA8gHMAGgH8VnEsLQBm4Wl9Dclj01ZsDxBrXMZh3nCVcZ7pNPP8DJZWl/Cjwo9wav0U\nRrpHMHf/HBJx5wZX11cxc3QGe+7ZY9lWttcSbcEluoQjrx+xtDmDGSxhCa888woKK+JkmLp1Comm\nhLHf7L2zRpt2/fD+Dr52EL9c/yVaIi24ePUi1q6s4SquGtsMdw3j9fOv4+TaSWMsj77wKJ786ZMo\nXCxgqHMIT9/3NB554RGjn4lnJ3D4xGGjjalbp7BvzDxR5Odu65V5JoOFFfGd0hPvAYFwav0U2qJt\naIm24MXffhEDWwd8t+tX/Ljw9XXSBCZwGIcxghHMYQ6Jck+yUBum8BiGqgc1NDSAiBoctwnBc2Pl\ndoF8valc0Kim6nFMQclpbiU3RVD7kzWQGzMzMIGuCDCB/JNeLba9B95gYAa4+gTQuApcvhOI3gDg\nCARQvBfAAQBnitumAKwo+0cBXGavGwA4fbW2A0gC6AbwsofxyT6uuLQLCDD8VQAv2HweA3CpuN1V\nm224hgF8G2KsV4rv8flxaHwPxNqsARiCgFkVTOWxKcBc4wqgbUPl9zyzEQej/tZ+/PCjP6wIdnh7\nkwOTaIo0Yc89e/DoC49iaXUJr0RfQeHeAvAtACeAtmgbPnDDB3Dh0gUcOynuqkjI8wJLN375Rqxc\nUP8ohKINUdzUehP6W/rRHG1GW6wNezN78egLj2LfT/bhzCXxh5UdyOKp+56y7Lu6vorb992OlQsr\nWgjkQPzoC4/i4GsHsX5lHTt7duKJsSeMbbc9vg0nzp9ABBFcKZ7ETY1NuHj1omWuunaDgk7AelzU\nPu20ilXMYAZ7sCcElk2q8BiGqgd5Ac9orQYTSq9E4toCmUrV0iJ+jowIvtFpfn4emUymrsa0WeU0\nt9lZlTNrf7KWjsGjOGxy6+EMgH3ALGYt/6TLOqcS8GdBWxLQCQDRdwHYC+B9AOIADsKEziSA7wF4\nP4CTEBBHsEKnnbWR63xxv9d8jPGy+yYABEy+aPNZFAI6ATEXaUHdCuBtzfYpCOhMQIDqFQjo7IGY\nfweACIAMxPH8BcQxBUzwLR5XQ/LY6KBNcyMiaAX6HeXzPLODuJao+GMPysLG2/tC5gtGe0urSwb4\n4CjQ2dyJsw1nce7yORx5/QhSW1LGfnvu2VOyz7bHtyHSEEGsMYaXHnzJsBKuX1m39M8BrzXailRz\nygK0ibiwrEroTDYlsTezt2QeiXgCP/4ffoyZozNojjQj+1wWLdEW9DT34LW3X7Os49LqkgG/R14/\ngtSXU2iJtmBn907c1HwTTpw/YYxppHsEiaYEjrxxxDJXqUdfeBQn3zmJh771kCfLpO6c0h1rflzU\nPu2UQKI8r49QdaNyjmGtr6VChQLEv/lQoepGs7PC8lZPbsf1OKag5DQ3eVNkQ+Y8MwNkMkg8NIF9\ne1b9j2EJwAKEi+JPiu+NQAAIzH/SNbkzPAMBTT9i49gL4FEIt9NjMN1SkwB+AGAAwKsQlr5mlAKh\nA3Q+jxk8gwyevTKB9bdXncfW5XkWpeJWUf6fhI/1fRAutJMAfojSW52tAO5gr18CsAXAcQDbi++d\ngbCayeO5pvTJjqtFM8W+zynv83NjRrOf2szzM8g8k8HEsxNYXXdZzzqQhLjDJw5j5qg5wdl7ZzF1\n61Rgbp127UnwaY22one9F9vPbsdlEidFsimJ7/3290r247DUiEacuXQGp9ZPYcfXdhhrvrN7JwCg\nPdaOba3bMNQ1ZPR55tIZHD993GhDAtdP3hZ//A1owK1tt+Khbz2kPYaJeAL7xvbhmye+aazdV//x\nq8bvt++7Havrq8Y4AWHBXb+6jsLFAo68cQSvnRN3eIY6h5AdyGLu/jk8sesJ2zW3O06A93NO10bQ\nxzlUqFChglToahuqItVr/ONGjKte12JTqlL/Zh4X9ySAR1Cxq6Krpczu8wxQDCcF+iEALKG83wRh\nERwG8ITSdg+AU96H+QwyWCk2fCumMOZ0F1x139VJ59LLXWgjxedFzb6TAJ4u/i6tkmcg5neO9Z0C\n8GNY582PYQKmy+zNAJ6CWK8tEJbXAZQqA3N9uauuz5jJclwXN1J+YwdlLGYLWjCL2YpvxqyuryL1\n5RTWrwoLZe+WXpxcO4lkUxL39d+HX7zzC8f4zu1/uR2n1s0TXq453yb7XNa0qgJoibTgu9nv4t/9\n4N/h+KnjOHnhJNaurCHWGMO5y9Y7D3ZxpjPPz2Dvq3sNSFY1desUmiPNOPTaIUQaI7g9eTsWfiHG\noIsddZN6nKSLcku0BWcvncWxN63uyDpJ996OWAcWP7poiSENFSpUqFordLUNVXUtLZl8MDNTP27D\nGzGuel2LcrThEO3Vv9luoLOwulgGcSykpQwode10+lwaSVTQke8nAdwG4TZ6RNP2SwA+DOAd2Cfm\nkeoComdbgEtAN0Zwj9YUyOTFtTai2Y7HbV4BZj4+g6XeJbRcbMHsn88icSEhrJl/yrbj7sRnlTZW\nYM5bAnwMwmIpvSPl8cxCgPDZ4vN3YcItl1zfNgiL8irE2qvnhovKcV3cSM3eO+srdnAJS0airRnM\n+HbXs3P3XL8owPPq1avIDmSxN7PXAowzR2e0APjSgy9hx9d2YP3qumXNpVUSsFoyCYR4JI6Opg7s\nG9uHxN6E4V4r4TfaEMVluoxGNOKZ5WfQ1NCEt68Iv+/eL/Ui3ZfGhcsXLNB5V+ddWHlnBSfXTqIt\n2obCWgFvXHkDpy8K3/Erp6+gd0svRnpG8PhvPI5HX3gUR39xFLd+9daS+M+SNcMMzt57FqmjKTx5\nz5OGG69cm+ZIMwCgI9aBP7n7T2zXfqB1ACfOn8CZS2fwyAuP1P1NkVChQoUKXW1DVaSNiH+cn593\n3WYjxnUtxYJKiD58WLBdzeXVv9luoBI2PQKzl3PKApC641v8/MdtwFRBJA4DIEBnClbonIGAphSE\na21n8X0OSVL3QcRGcuiM2YzxNHBvxyxu7ZzC/ZhDPAhXYg9wunTDEhZ2LODwnYcx84nicTgPYWmW\n4uNXEw51AXgDwhr5dxAAfwRinglYj2eLsq/dvdVZiGRF52ACPeD73CjHddHT+RSAnp+ZwTOZDJ6d\nmMB68YSTgOZlrDPPz+CVZ14BngWG1oewx+1GhUY6d0/pFgsApy6eQiwS08Yf6vb93A8+h46mDsQa\nYmiNtRrtvOdr70FibwI9X+zBDVtuAABQ0RRfuFjAh5/5MAAg1mj944g1xvDygy8j2hDFVVzF+tV1\nAzoBGBl5v3/q+wBE7Oium3Zh4bcW8OrHXkVPvEfEp75xBD85K4C3LdqG0xdP4+TaSbTGWi3xn4WL\nBRx5/Qhmjs7YuswuYQnH4sew0rSCkedGMPHsBGKRmLE2dyXvAgADKAH9OdXe1G7s0xxp3lQu4aE2\nXrX6ngoViisEz1AVqZ7iH4thgZiYAP7sz2o/rmqsBZ/Tag2vJTYcohMJzCT2IZNNOM+9lgPVAaTy\neb4H+OA54MkjjIN1oLMEEdu5AgFnKiTdBgFhq8VtzsCqXcWx3Fd8zeArfiqBsXP7vEGnCm3N7ruU\nKAa0bCkCxekR7PlK8Ti0ohSipSKa947BNibXolkAvcXfh2FaRFUlIDLvOrVlJxmXOwEkLngHuVpr\ndWkJKwsLOHH4MI6WcYdoaXVJlDo5Adxy9Jay3GxVmJx5fgYXrlxAU0OT5X1AQPzg1kHEI3E89K2H\nDEhUEw2dXDuJS3QJC79YMIB05Z0VI/bzbwt/CwCINIgTqSXSgr/+yF8DAF568CXEG+MARFbZ93W+\nD5954TPoaOqwnUNXvAvRBuEAdgVX8N03v4tbZm9B6sspIyvtUOcQvpcV8akS+BrRiJMXTmJ1fdWw\nwgJAW6wNf3L3n1jAevtfbjegsKV496RttQ2nVk7h8InDaI22Gjc4Ord0lqyLTvymyGtvv2YbMxoq\nVKhQ9aIwxjPUNaNrsezJRs2pHsr8eJp7PQyUqaT8y6PQx32qcYYfgACuyxDAdr64XQrCUsjjJ+8A\ncBSlcaKq+iGAVZdJlisGkTm2ubitHeQPF+dxDNa4zwZgdcsqZj4xgz3P7UHijoRwG5bZbFMAfhPA\n4zBLpXQW57gOsQZvFJ/txbn9Ozi7wrqVGOHuum0QcOrn9MhAHx9aZ3p2YgInDh9G98gI7p+bQ9zn\n30AQtSTVsiBOZVtmnp8pKW8Si8QsbsG8ruZQ5xDyv5XHoy88asRfNjc2Y3zbOI6uHMVtidvws7d/\nhu9MfscS3yjH9Ma5N4xMtxPbJnDk9SMGSBprsG0CL731Ek6unQQg3FuJCGcvn7Vs1xXvwvt73o/Z\ne2fxwDceMGIwARGHyfuS73135bs48c4JNKLRqDea2pLC9z72PTwSfwTHnj2GN068gY7uDizev4iB\n+IB2Tb1IdyzLqekZKlSoUOXKS4xnaPEMdc1ow610VdBGzWlDM9oW5WnuNRqoV8tzidVbzaAqLWmX\nIBLixAE8BJHBVrq08uviFQhw4vp7CBhahel2OgTTXRcQcaM/hD7hj6pLEMmLfg576ERxLj+E+K/B\n/3NQ0Sr43/Yh8U/F2M73K3P4GkzoBARM//PiPN4LM/PsWQjodHOFdXOXlevO3XX9yM2tuk507+ws\nbp2aKgs6Z56fwdmLZ5HaksKTu54sG0pU115uAVVrherKm6jW5J7mHnTFu9C7pRdP3/e04cYq4y9/\n/aZfx+n103hr/S0ce/MYzl48i1958lfQ80VR/gQwS5W8euZVAMI19uLVi/i1G3/NMvYHtj2A85fO\n45frph/4aGoUTZGmknmeXj+NwycO47a/vA2vrr5qvD/cNYw99+wxrKD8PQnDV5lv+craCn59/6/j\n5DMncf7qeWAAOHP/GTwSFy61M8/PIPtcFucuqumYnaVzCXfKnBsqVKhQG6EQPENtOtnFJdST229Q\nuhbn5FW1nLtbrIvXmNcSDlYB5iBMIHodpnspF0FkuZX7vU/5/HJx/9sB/BkEvOVhlhkBgGcgYKsd\nztK5vOqULG4rkwJdcdhuD4R1N1V8bwQi+yxXG4TFcw9EnVFpXIoCsMulwtxfHQEZqBwc3dyqXVSr\n2Kl4IoGxfft8QycgoOTYyWNYWVvBB576gOe4QKdSHyrMPvrCo5ZtJZQmm5L4wb/4QQnsvudr78Hj\n//A4Tq+L+EkZ3yj3645348z6GfyoIGoTjXSPYO3ymuGC+6EDHwIAfOUfvoKFlQWcWj+FKKJGDdFX\nTr2CRJPZ5wsnX8DCyoIBta2RVly8ehHfeuBb2rk3ohFvrb+FU+un0NTQhIltE/j2A99GIp7A7L2z\nmByYRHYga7zXHms32o01xIw2fn7+51hYWcCZN84AJ4Gt2Io/KZ74drDodk7pYnvLSYy12coHhSpf\nYYxnqI1QCJ6hrhnVg5UuaF2Lc/Kqepp72ZbnWQCDMC2bvP6mtHB2wATEhuL7FyHgKV58fwJmXKPU\nCoB3AXgOouYl/zZPQ2TCLcBZV2CfnEeqCSLm1KF2qDH2H8BMBvRjmPAmYy3vhEgkJGNZ+wAssjYu\nw5qQCDCB80l4r79ZITj6TUBUkfwAdYDiNSlX1lYMyJHgse0r2/DhAx8uTYyjAaOZ52dw45dvxF/8\n/V8YMPvIC49Ytn3fX70PZy+dRXOkGdGGKIb3D2PX13dZ2l55ZwVXinc1Yo0xS2zo1K1T2NGxA8dO\nHsOp9VPob+3H3P1zlvM30hDBjV++EReumCfrFXaX5OS6KLMCCJfa9ybfC0CAIQCcv3IeR14/gt9/\n8feNGFUubrm8SBfx49UfI/tcFhPPTgAAnr7vaTx131MG/M3eO4vueDfOXzmPS3TJaMNSsuUC8PbX\n3sbvrv+u5bi4waIXQCwnMVZoJQ0VKlQ1FYJnqE2nTCaz0UMI5UMblSDJj9zOKd/WVwkTD0HAlbRs\nxjXbnoGAxH4A0hNwBCKm8hgEoLVCuON2KvtegbAWnoIZFwoIq+QxuGekbURpjU6md1reAW0hEbN5\nyaWt4zDrac5AWGSPQABgd/F5A8S8pC7AClvDELGmGZggJt1mJUR7sWJK+M2i5kAH+PyOUt2xayBp\nmWxqLE0AJMHjxDsncOzNYyUAwsGoOdJsAOfKBRMak01J7Llnj2XbvpY+HHvzGC5cuYC31t8S2V/f\nOILbv3a7AU4y2VAEEbz02y8ZcYrS9bQ5JrJfNaIRFy5fwL8++q/REhF9vLfjvbh49SJWLqxY5krK\nCX71qoDHM5fO4Kdv/xTRhijOXzlv2eb7p76P4e5hy3vSeik11DmEvpY+R0hLxBP41Z5fLXm/OdKM\nni095htrQMNRQdCz985isG0Q8UaRgEmujTynJHA++dMnXQHRT4Zjqc1WPihU+QqvpUJthELwDBUq\nVFW14aVZApCr9VW1WnGY4Flaf0Oz7wgElL0LZu3KOZhWUAlaCQB3F98zKjAXL6ob1oBDZ0yw9epC\nKw04sr1GGBaky42XEb0YRcPZhtI22TUzUBzrv4EJeEsQFtkCBHwegYDjIxButnblYG6GcL3lICYN\nc8MAJuHdirkBQFeWNiCeVLrZXrx60bAcqjGajcXLg6HOIQuA8My0B187aAFOAEg0JQw3Wm5xk+Cm\nAtzK2gre/dV3Y+LZCXzrgW+hv7UfP/n4T3BX111GMiIJWPNvzAMQVsPT66fxV8t/ZSQB2p7YjsK6\ns4k/2hDFFTLH+sb5N6zWx6Le1/0+dMbFXZ7hrmFMDkzilY++gp64eeL/ePXHeOHkC8Y2KqRJQLx0\n9RJ6twh3hc6mTsQaYnh/7/vxN7/9N8b7w93D2HuPSM+ciCdwc9vNOHayFPoB88ZA4aKYa9CAWI6V\nNFSoUKG8KgTPUJtOYVzC5tJmSPpU8TnFIWc7gB8V3x8B8D2Ybp+/YPu0wwQpCVs8GY7OXbQHAlJH\nUYTFIhTSL4G9OQFugD4GU01SJNUB4OViX6cB/BK4HL2M6NUomi4X3Q2TMGEzBtFPb/F9QFhdea1M\nXmtzqPiU67EXwhUYEBl6e9lnX0ApiMl1+DaAp+Hd/XUDEwT5Op8qdQv2INUtk1u1fqPvNwx30dX1\nVcM9VLqV3rL1FguAJOIJ3Nx6M469ecyAH0AA5cS2Cfzs4z8zkupIi9ujLzyKl0+9jFhDDHcm78S2\n1m2W8Z2+KBL33HfoPvzwoz/EwNaBkgy4ACyQ2BJpMeCwPdaOP/3QnxrWTztdpssGJDei0QLMHTFR\nbiWCCL6z8h384NQPEG+M42dnf4bzl8+jo6kD8YjpsrB+dd0Yz9LqEm6ZvQVb/vsWfOCpD2Db49vw\ntX/8GhZWFnDkjSP44A0fxNStU7g9ebtRJmb7X27H7cnbRUzo/d92jc2U55T8TAJxJYCoc9ctx0oa\nanMqvJYKtREKwTNUqFBV1XWRIEle77ZBWPZOQbjOzkG4n15tJGsAACAASURBVMp4QbldEsI6+gKA\nWyGyxQJWSNLFGX6z2PYCGFyeA+4olpQ5aTO+LRAlWwABmlH2WSuAu4p9PQogC0QuC/MmgYQl96cQ\nFllAuNy+DNEXNzANsbHPinYwUGwfMC25CQBPQMDWUQgXYg5eKoiVG29ZA6ALRDWIJ1Xj9pzqP3L3\n0JHuEezNlBZK5fAzsW0C2YEsXnvoNRwaP2QBFu4WKmtzHjt5DG9eeFM7zpW1Fdy+T7je8gy4XM2R\nZqSaU/jIwEcMt9qzl84i93zOErfqpq54l+W17OsKrmD10ipW1lawfnUdq5dWceT1I+j5Uk+JGy8g\ngDXSEMGZS2ewfnUdL7z1Ak6cP2FYYiOI4Pyl89hzzx68du41Yz8JoAAMl2IJgX92z59Z6p3yGE55\n7L79wLeNrL/lKoznDBUqVK0V1vEMFSpUIJqZEW61LS0CNq8pyJR1IdV6nFKyrqR0LZX1OdXttgNY\nhrB2bocAT6l+CAB1WrdOWGEvfhW491PA4/9eLHgPBJhyNUMk+umAWftye3G7SHGsncV53Q7hIsvV\nCGFl/SVEzOhWiHqfjTBddbdAWHPVsWdgrYcpS8z8BAJKXyv+bId+XUMFIqd6nbrP3OpIrq6v4n37\n34e+lj60N7Xb1ojkNT25Yg0xI9mOTk0NTfhg6oMGmAFAAxoQa4zhgW0P4PT6abzyy1csFtcGNKCj\nqQOrF70F9EYQwVVcLYkB9aNIQwTff/D7uPfr9+LUuvmHF22I4jJdNn4C+lqfANC7pRevfuxVZJ/L\nGms1desUTr5z0ngdb4yjJdqCnd078cSuJyx1USup0xlEHddQoUKFkvJSxzMEz1ChQgWiTEbEcgLC\nwrlv34YOJ1hlYAUou7lJAJUxmaoSEMmEABGPuV78fQiiJIrbdd8uCLCVygJ4qvj7DIBXIJL8HAHw\nH4q/fwfA5wB8BSKZTxTC4roKAdJyPFPF/dwy4U5AWDG/DeGaG4EA18sAdkJYMxNs28MwQTwLcx1V\nOa1rHer5mRmsLi0h2tKCe2dnyyppUitpQbJ4p2i1I4aZ/6UNe+7d6ws8bvzyjYYFMDuQxVP3PVWy\njQSboc4hvP7O63hr7S0Mdw2jPdaOhZUFNKDBFvzijXG0xdpwev205X0Oc3aKNkSRfyCPe79+Ly6S\nl2K25Wvq1im8/NbL+OnbPy2ZT7IpicLFAhrRiGQ8iVhjTGs1HWwbxOrFVRQuFtAWbcMHej8ANABH\nXj+Ctmgbzl0+Z+lv35j4Q+Fgz9/3KrcbDKH0msEMlrCEFrRgFrNIhHfMQoUC4A08Q1fbUJtOYVxC\nfWozxHLayfWc8hov6OY2KZPqtAB4EQLEbv4B0PoA8JBN2l+euOi/w6yJuRXAf2bbLUFYUNcB/D8A\nDkHUCh0ofiYrTFwG8BaEy6yETjmvncXXt8NMVMRzwQxBWD9PQsRn9kFA51swrb3cY091d5Xr2K78\n9BKHuUFlR+y0urSElYUFnDh8GEeVrFn19h2ljdsrZv1KPHME+74Q04KHU8mO9Svrxu928CjdQvO/\nlcfSx5YsLqLd8W7LfhElI9b61XX8cv2Xlvca0egKnYCI5dz17C7c2XknGlzrBZWvtmgbCmsF/NO5\nfwJQug7SIiuTIZ1eMyFajqs10orCesHY9tzlczjyhqg5mh3I4gM3FH3kXxWJnpojzcYxiUXEF0q5\nCYbCeM7ytIQlLGABh3EYM3WducxZ9fY9Fer6UAieoUKFCkTXdCxnUPGCL0G41P4dRFzlUwAungCO\nfRo4/Cngd/730n144qJHYMLh28XXEsh4QiP1GtQu9K0NAuTU2MvvAPgYTPiMQ9TzXAbwTHE8CxCu\nttxjskPpWwVxuY6vABh8Cbj9Y0DqeeDJM+7rapeldoOANFq809I9MoJ7NtudFsDTnSK1DieH0J3d\n4kRsjbTi/OXzFjCVwPrQtx4yrGmJeAKJpgSyz2Xx0LcewlC3yDiVaEpgYtsEIg1W8GyLtpWAXKOP\nS5a1K2t4+fTLIBCaI83oineVwK1Ue6wdu2/d7bntCCKINkQNSOSZcrk6Yh1GPdBGNOL9ve8HYJ3b\n+SvnsXpp1RiH1Mn1k4hFYnhi7AlkB7IYvWEU+d/K45snvmkck6bGJl8ZaL3U/gzlrpbiF+oIRrCn\n1pnLQoXa5ApdbUOFClXfulaDR2Xc6LGzwOXiBWf2ItDTZI2DfBUiHlO6q94J4ASEtfAVAJ+E6b7a\nBFELlLu7AgLI/kcA34ewiHJ3WuniqsaxOrnFcm0BsAbhcvt9CKB2mq/Rfsafb7bqtivnl4E3N+iA\ntb66iqMzM7hnz566crP1HPe3uir+tvbssfxN8f0v0SUcef0IRrpHEI/EcexNEZ+Yak7he9nvYeSv\nRozYxsG2QVy4cgHr/z97bx8U13nne377HZoGGmhkhJBakkvWSyIZJBzJsRS1IyleEyd0XshcM3cs\nu2rdU8luJffurrh3tu7O3Jqb3Joqp27NTO2uK9pkxEzingQpkWLZZhRhCSThGFu2XhxJMQ6KiRBC\nvIgWIKBpoPePp5/T55w+p885/QIN+n1UlOjz8pznvAD97d/Ldy6MKKJCraVaami9tx5XR64KdaKd\ndzsxFmENeRxmB3Y9sktS4+k0OzE5P8nWmxwIR+MRVy2SpeceWHUALftbJDWWRpBfG47NZMN2z3a8\nN/SeIDJXOVcJ12ckPIIiWxHGImMoc5QhGo1ia9lW3Bi9gcHpQaHusqmrSXI/1//reiE66vf6UZ5f\nLlkv3158/9NNzSUYIYQQQACHcZjSbAlCBNV4EgSx9FmuxaM+SIVdzSxwxqos+KrAPEB7AVxBPLLX\nAGACTJC5Yt/z5fLLxIXfNcQbENWA1Wq6AaxEvLGQH0ygtsZe84ZCcmrBBCdvkpRM+InPtwHARB3Q\n6gdcO4Bd24CjtuRRT7X6WTVB+pCSrriQi0O7xY7Dew6j8e1GtPa1Cts1rG/AxMwEWvtaYYYZZlNi\nGqxcPF0bvYbh8DBcVheK7EWYmJkQur+uyFuBwel4W+byvHJE5iJCNJCLx+rSajgsDnQNdUGJUnsp\n7s3cU1wnF6Al9hLcfP4m3A43Vr+2Gn0P+gxdKwB43P04yp3luDBwAdPz03BZXfjCyi/g/sz9BDFq\nN9sxM89qTqsKqnDhqxdwqOuQpOmQ+Jq7HW7J/eDpySPhEaEWdGp2SthX3pTI4/DgifInBAFKzYQI\ngsgmVONJLEuoLkGFQICJtDqVWsGlygIUjy7YMyVOC+X1ntVgQu+MlYmmnthy/qu7CMAFxL0++a2V\n+1zuki2Xw1NVh8FqM/2Ii04g3ugIAKKIRz3rwbrt1iPuucnnfBqsxlN8XLXU1wR/ziDgeQqY2AG0\n2aBZKqVWP7tQtikGUnoX/HeUaG5CGqBK3Z9WuqXYQ7LZ1yzUAAb3BVGRXyEZO7gvCKvJinnMSwTd\nttJtEo9JnrI7HB6G3WzHxOwE+if7BdFZbCvGFyu/KJnH0PQQ8iysoNlldQnj90/249bELdVLoSQ6\nnWYnVuStSPD5HJ0ZxSP/8gieb3seY+Ex1TGTcSV0BW39bZienwbAajQvDl1EvjU/YdsnVzwJgF0/\n7lfasr8FRfYiYbn4mgPx+5H3hzwMh4cxEh4RrmFbfxt6xnqEfQ/vOSxs77K6MBwelliliG10SHQS\n9F6KWAxIeBLEciHWLAStrUyELheUikeXqsgW1ym6AKwD6xDLRV8AAH//yxNFxsBqOfl75howESj3\nueT1mWoCTCz8roHVl4q347Wj1QCaY+uOAzgBlvJ7AnHPzbOi/eXCT3yOm8FSgX1gtaCSebuBJz4T\nn5PaZwpagq8JrNlRo8p6AyQVZWo1ppkYO11Ecwv+PLm40PJuVBMnbocbN751Q7KO120CgNPixIFV\nB+D3+tHxlQ6Jx6RYzH5h5RcSjllkL0LrrVbJspqyGvgqfXCYHZianRKWD04PYnpuWtdlMcMME0yY\nnJ/E4PSgYofbmegMfnHzF4IIlrPKuUry2mlOLJiWNzAanB5Ed6g7YS4wAWsK1sBhZv6cB88ehPMn\nTvym7zewmWz4yd6foKmrSfKc8PuxpWQLgNg1rPiC8P27X3tXck/49rwpkfgDCLVmQlT7SRDEQkGp\ntgSxXKirY6KztnaZdvgRsVTTb3laqAssQjkFgGfjNYAJKJ6OagXrQMtTSL8X2/dxJNZwqiGuq3wV\nTMCqWb1oWcHohZ8jx4N4aq88FVfPMX1IXsOptd4ASdNU00zpzWp9nYG58XRLj8ODje6NKLIp+3Dq\nrRXtHe/F7td348JXL8Bb6FUcw2axocBagGJ7MXrGevDe4HuCj6fVZMUOzw5J6mx5Xjne/9r72HVi\nl6L9iBVWzEK7u61RtKxaHGYHyvLK0D/Zn3ScbaXbUGgtTPDs5GOE59knTfLzqHRWYkPRBsXnRGx9\nAkDTBsWIVQrVfhIEkQn0pNpaF2oyBEFkmWBQsVnIsmSpercEATwGZj/SBqAitpxH/Bpjr0sAtAP4\nPuKirFe0XwDKDYHkt51HwgAmOpO9n3RrrBeT7LhBsEjnQOy83LE5K0U19RxTy8pGr9WNDsSRuYQ0\n1SDSEuZJx04XA3ML7gsicD6A/gf9Qg1i4HxAEBvcn/S3X7qK33lGE9bL8RZ6cevP46mvYsE6FhkT\njtGwvgG9470JdY+z0Vl8OPwhAMBismAuOoeh6SEc6joksWyR7JNh0WmGOSFdWIlCWyEGJhOFsJx1\nheswM5cYXXVZXZLorfw8qsuqcXXkKgDW4faVna8I65q6mjA4OYjGtxvRebcTDyIPcOKPJ3Dx6xex\nrSyxoxePbuohlWdTdxMrgiAIEZRqSyw5qC5BBbebRf6Wu+gEMu7dsmDPlBtMICH2/7uQpqnytNWb\nYN1hxTWNSgJLK/1TTZSla0GS7LhuADcQPy+tFGAttGo4y2NfGXjsk9bAqdWYKqD0PKVaXxc4dw6+\n119H3VtvIRRW6eRqYG5ckHDrDrnY4P6k0Tujiuu1ONl7Ukjl/eT+J5IxuMApc5RJ9olEI6gqqMLT\nK5+WbM8tW7LNPOZ1bTccHtbc1mayodnXjOC+IErtpZJ1E7MTmEPceoVbrQBAHvIwMz+DVQUstXcs\nMoZDXYeE9e+df0+4rmORMcxhDpFoBDtP7ExIlRW/Pnj2oGYabSrPplbKNpFZAgjABx/qUIdQhnyj\n6L0UsRhQxJMgiKUHF9nZJADgJFj95Q7oT2/VQh6dEp9GsgigUlSLC0sPgH4wISmOQKpFwsSRUB49\n1WITmCCeRbzxUQ2Uo4zy80jnVmlFRZUiwRyDVjy6okRaUeZ0xlagOxRCxwCLsgXOn0fL/v2Gx1CC\nRz7lqZjcn/Q/fViNE8+uxU/2HTEklMVRytryWhTYCnB4z2E0dTVhLDKGivwKeF1ejAyNCNtZYMFE\nZAKRaAT13no0+5rhdrhRWVAJj8ODueicYCHisrowMTuRcFwxWimz2cICCy5+/aLQxddsSvxs3wIL\niu3FsJqsmJqbwswsi4xOYxptt9sSGjhxHBaHsPzKyBVEohGYYEKXvwvf7fyukCq74ecbJNer3FGO\nofAQAPXIdSrPZlYj+EQC3ehGR+yXdgABtCyUbxRBZBiKeBJLDp/Pt9hTIBaTdKN1Cig+U91g6aKj\niIuaTGAgOqW5H48GbgSrFZVHINWOlUp66gBYg6AoIAR9/gRpUx899ybT9y/ZuWSj4ZaOJkOZ/B3l\ntLLPh2s9Hhzes8fw/mqNY9QazewLBrG+oQHfevMsfll33HAK5Y5yFqWsKavBa198DS37W9DU1YSW\nnhZ03u3EwNQArt5j6aRWkxXFtmLMYQ6hmZDg28mP2Tvei+HwMEZnRuEwO7C6YDXyLdJusfJmP2ZI\nbV1K7aUosZdItrEqfOZugSWhSVDCNiYLDqw6gJ3lOxPWmWBCz/M92Fa2TdLFVz7mPOZxb+YeBsPx\nJknm2FuxWk8t3vW/KzQT8p/yC/dt085N8Dg8cDvcOPPcGeRZ8nD5G5exrWxbQidbLjprPbV4vOxx\n4ftMCkS9UVJqXJQZhG7VqMXhdGsKYtB7KWIxoOZCBEEsLXzIWDOZpIib5Ij9LnMRo41vUmkkVI54\nkyBA2TfUB+17o2cbI8jORVJ79qMI3K+3Zbbh1gL7hobCYQTOn8fhPXvgdjgM778QjWPE1/zVPa/i\nUNch5Fvy0Tvey2o9Z8aERjsl9hI8VvQYuoZZM6GK/ApJA6EyRxk+V/45BPcFE7xDxdE7Tt3qOpzu\nOy00K+LbiCOjJpgQRfx9yFv/01v4Hx/9D7TdbjN8rmL/Uo4JJkEEBs4FcOzmMYzOjMICiyS11hz7\nx2s7rbDCbDLj7efexj9e+0dJ9HnlT1cK16XeW49QOKR6H3kjodHwKNput6G6tBprC9fiiO8Iuz86\nmwxlA2pclBlCCCGAAA7jMNw5+4eIeNghH09iWUJ1CQ85GWwmw1F8poJg9h9+5LboBIx7WaYSdb0I\n5v95AOyaKPmGiu9NPvT5eRpBKVoqOxdJ7dlfujJaCwxA17XO5O8ot8OBlv37UxKdANAzznwei23F\nkmY1mSJwLoCWnhbhmh/qOoSW/S3oHe8VlnGvyRJ7CS594xJK81jtI4/wrchjBrEuqwsj4RG09rVi\n+6+2Y2xmDHazXdiWR+84BZYCXB65LLx22Vxoe65NYicCQCI6ASbEju4/mlBrqoXL6sJoeBSv7nkV\nDesbsKN0hzD+9y99HwB7/njEUZ5qO495mEzx92SzmMVMdAY/vPrDhOizOGX5VN8pXOy8CIBFkuWR\nSx69Prr/KBrWN+DsV87i+DPHBcsbpcj2QkEpuZnBDTda0JJR0UnvpYjFgGo8CYJYWqTZXVQ3bjDv\nyoXEaP1gKvWGKdYowgvgtui1UtRUfG/8UK4jTef+6ahNlbzR3XcEqMvwQ5Ks5pRf2ykAp5CR55N3\nmbU6ndgXDMJhUEB7C7zoe9CH+5H7gijUQq1jqdLy7lA37kfuA2DpqqPTowiFQ4LYLLIV4dSXT+H7\nl74vRN3k9aUf/9nH2P7L7bgXvgcAqC6tRoGtQOiAazfbcX30OiwmC2wmG56qeApXR67i3sw9PJh8\nIMx7IjKBr576KsJzYdybvqd6fo8WPYrvvfM9zEf1NRUqsBQgYolgYmYCbbfbsPZf16Iiv0LoUFtT\nVoPLw5fhPuLG5OwkAPb89T3ow8DUgBBxLbIVYWvp1oTOvkopvjvKdwgR2em5afAGuLcf3E7YlpNq\nHXE2UaslJgji4YRSbQmCIFIlVRGnhg/G0lCNbp/qPqmQjZRUHWMa8S/MOD6kdW2VhN3rPh8GYp61\n6xsasN9gUy3u21nrqcWWki1C+msyCwy19Eil5Xx8cUOfhvUNEruWdYXrsKZgTdLjisf2e/0Iz4XR\n2teq2EyoqqAKW0u2orWvFcW2YkH4AqxT7Ew00cpEjlLabqrUe+vR3t8umcfeir2YnpuW+JMC7Nyi\niOKdu+9gaHoIDrMDDosD4bkwqsuqUeooRXBfEACw+RebMTA9AJvJJqQSA5SyShBEbkKptgRBENlE\nR6MZQxhNQ00lbTULqcoJBACMgfmUHkPmItM60lwXNbUwzWurZFHBu8x6amuxJwXPWnETGHH6azIL\nDLX0SKXlfHzfSp9kndiupdJZqXlc8dhHfEeEcXetYCmzVhNL0HJanNj9yG6hQ+6+yn1Cs6CtJVuF\ncZJRYi9JSNtNlc+6P4tmXzNsZptkecdAh+BPajOxdTazDXce3MHM3Aze/9r7aFjfAIfFgbHIGMLz\nYXQNdQnXyO1w48af3UDD+gZs92yXzJ1SVgmCWKqQ8CSWHFSXQGSalJ+pTIs4o7WaRrfX2idTHWe7\nwbrsDgA4pLGtEZRqU7PQ5ThlYte2/W/aUxLbSsKOd5n98unThtNsAakQ11tvp9axNLgvCJfVhe5Q\nNzb8fAN6x3vj9YUHjkr2EY+h5Bkq9yWUH1M+7gdf/wBVBVW4/q3ruDN5R+iQe37gvNCsZ33RetSu\nYEa5ltg/JbaVbcOP9/5YELNizAbfFl0PXcf6f12Pje6NcJildbjcn3R7GROOkfkIuoa7JLWwvIZV\n3NmWXyN+DW4/uA18zMR3+1fa0/5QhTrNEgC9lyIWB0q1JZYc7e3t1Aac0I8OL8eUn6lUusPmMj4k\npIp+5Qeb8Mf5AeTBhjf/8iIeWeHVHmchO7/6sDCpwwZI9XnKdppwJsZ3H3ELKaVVBVW49ee3Ujqu\nDz50nOsAQkCFtQI39t2QzClZbas4fdhtd6Otvw21nlqc/vJpAMBjP39MkkZbZCvCWGQMFpMFc1HW\nZdbj8OD+zH1JCmuRtQhOm1PSZVeMFVZB5Crh9/rxzt13MDg9CIDVqj6YfYCbYzcl3W1L7CW4+fxN\nNHU14cQfT2A4PIzPrfgc7jy4g9UFq1FkL5KkJO/+9W50nusENmYmzZY6zRIAvZciMo+eVFsSngRB\nLG98PublCLAOpwZr5B4qFATjZ/+bG9ceYULjC3er0PF/aQuNjAvyZLW0C2xv8rBT/s/lGA4Pw2lx\n4vq3rsNb6FVtRpSM1edWo6+nj3nDIlEA8drWn/57YOyzHqza+oQwtljIAol2IVyY1pTVYI1rDfIt\n+Wi52YL5mAGt0+LE5BxrAmSCCSX2EhTYCrDGtQbXRq8hNJMYBXRanHhixRPouNORYM8CANtKt6Hj\nK+z3zOrXVmNqdgpuhxszczMYnx0XtjPBhO2l27HCuQJjkTFJoyFx3an4eoiFtpZvph4yPR5BEARA\nwpMgCAKoqwNaWzPr5bhcURCMtf+tHB88MoxHB53oDFzXF/HMND6oRzWXWNQ53S61mSQVwdg73ovd\nr+/Gha9egLeQPQvyCJrb7lYdlx/zyr0rgsDjEUDxdm/V1aGvtRX/8DcuXK+cEMbWE52TR1jF8xMj\nblwkbo6khAUWFNmL8OQjT6JrsAsj4RGYYRbEbL23HivyV6A71I3Ou53CWEoilSP2MK0pq0GZo0wS\nvXU73AicC+D6vevoGevBu197V7jm6bCoDbgIgli2UHMhYllCdQnLiIWozwsGNb0cl/UzZeQaK9RQ\nvvmXF/GFu1UZFZ2Ga8yS1dKm4kmaZZI9T6Hubgx0dKCvtRXnA5noSJU6Ss2MtPAWenHrz29JBJC8\ndjTZuCd7T6JjoEMiOi9941KCAOK1rVXbWXMh7qEpfl7UniN5gymlhkNOixMWE6sBLbAWCEKx2FYM\nv9ePYluxZPs5zGF0ZhRX710V6k0dlnhN5/DUMF7vfR0dAx3CWE6LE+e+ck6o4xRTXVqNd/3vot5b\nD7/XjzPPnUmokwXYPeoc7MTAlQEc6ooXTBv5GZJvu9jenkRusKz/7hE5C/l4EgSxeIh9GbdfBNb8\nH0lrMVPC7V6c9FodtaULgg7vy2SprI+s8OpLrzUypZgwAViapGYUa6G8WxeAdLvUZhK9zYa0kHs1\nJhs3PBcWvq90VuJawzVFAeRwu9Hyv7rxYLQfNpMNE7PMQ1P8vIifo+2/2o6p2SmE58LY4dmByoJK\nwTrm1T2v4onjT2BomqWxVpdWo8BagM5BluZaYC3Ag9kHggj2Fnpx4M0Dgo+mmP4H/dj4i42oLqvG\nnQd3hOWdg50SP06H2SGkIu+r3IfWvlbJOKMzozh49mBCVLhlf4skEm2zsI64RbYi9E/0o+6tOgT3\nBQ39DOnZNpXoN0EQhFEo1ZYgiMVDXJ/neA7ofJMtXw61mLlSW6qnBtKHBW3Q8zDXmIVDIZwPBLDn\n8GHNNFuxGPhfTpZj7kYvrE4nfvkfy9Ezpe3HqTXmq3texaGuQxlPueSpnPmW/ATfUC7oaspqcOa5\nM0mPK0+RlT8v4ufIYXFI6iXlvqKH9xzGS+0vIYoomn3N2Hp0K/om+1BkK8L5r57H9y99XzLfV/e8\nisd+/pguT1CA2arcenBLaLzk9/px/JnjwvXgnpwAS6t1WpyC8F3nWoc1rrjPqf+UXzjvem897BY7\n+if6he0b1jdgYmZC8WdISUDq+XmjhkMEQaQL1XgSBJHbiOvzGpdZLebq1UBfH1BUBFy9CnhFaarJ\nmuVkGn6N8wH0qhwz0w16NM7vYasxSzWaJBYDT/WW44X/ziJ2/8/feXC1ZBiAMZEQOBdAS0+LII6y\nLTCUxIyRey9vEtTsa5bsIx6r8e1GIapYYCnAg7kHAJTrR4FYp9i7cSHXsr8lYb6v7HwFW45uQWQu\nIul+y9lctBmjkVFYTVZ4XV78/v7vMRIeURTVoXAIL7a/CBNMOOI7Isy31lMLh9mhKSpX/2y1IJSv\nfvMqiu3FitfRyDUXP5eRaARtt9seyg+DCILIDFTjSSxLqC5hGSGuz9NRi5ktsvJMcaE5NgYckplZ\n8vTXVjCRlk34Ne5NcsxU/ECToXF+y73GTP48adVSqtXriVNWv/u7xwGwFN2KzdXCciMpst2hbkF0\nlthLDO2bivejUsptsnsvPwb39txauhWhcAiNbzeq1nIG9wXh9/pR761HsZ3VZybzveTeouLaUfl8\nvYVePOF5QlF0AsDGko248xd38GjRo+gc7MRIeARVBVWKkVy3w40Tz5zA8WeOY9eJXegc6ITdbMdP\n9v4ERXapz6mSj6r7U/b/WGQMh7oOqV5HI9dc/FwWWAsUvVuJ5Qu9lyIWAxKeBEHkBrwWc6lHOjlF\n7M0kamsBeS1fsmY52WIhG/QsxvnlMFq1lGrCVCxA6v/5KNY3NODLp0/jF88kNqExMg+1hj7JSKUR\nkZKAMnIMLph6x3s1j+12uHH8meM48cwJrCtaBwCYjc7i+5e+rzo3j8MjqR1Vmq9SYyKA1Yke8R2R\nbFPrqcVH3/xI81wHJgcwNjuGmfkZfPnfvpxwXCWhqLce18g1F4/Z7GvW/DAolQ8fCIIgxFCqLUEs\ndXKliQ0hJRRi9+bw4cR7YtQCJBP3eCFtR5aYxUm2EOlmbQAAIABJREFU0UovTaXmNZX03XRSnNOp\nyw0ggG50wwknggjCrfJQqB3D6LH1bq9nu1A4hJfaX8KD2Qf46N5H2Fq6FU6rU5L2K76uTV1NmlYy\n79x9B5FoROKFqoXeYxjB6PNAdaAEQSSDajwJ4mEgV5rY5DpLWaDTPRbIJR/MTJGKIFxoEaCnTlBN\nBPngQ0ese1UDGtCi0r1K7RjJmhUZGUc+32w0V0p2X8Tr8ix5+P23fp+SL+diCcCHuSkYQRDaUI0n\nsSyhugQZMXsGxZROIk53NxNvra1MhIrI+WdqOd9jg16uRn0wzwUCeN3nw1t1dQiHFiY90OjzlErN\na6asUPSip05QLQ3WGcu9rkUtDifJvVY7hpGU22TjyOd7qOtQwnbidNKDZw9mpK5Vad2df3/HkOgU\nP1MLfe85RlOnidwm5//uEcsS8vEkiKVOMKie0vmwI45y2pgfnm7xZrTzbDYjqnrvsWwOgSZ37gd5\n9fiMijDqg8mFKgCcDwSwfwGjxdn0RpR7Zy4kSj6TyURQEEEEEMBhHFZNs00WyebHuzZ6TfNYWuit\ntwWAckc5hsKsk7Auv1kkvy+ZumeLde+5oCcIgkgVSrUlCCKz5FJKqzhF1e9n4lOvQPfBmLdlLqTD\nyubgG2xZ9ClpYtDKxYgPJgC8VVeHvtZWeGpr8eXTpxc0NTeXa+LSEcXi8+I+k+mKoNd9PuEDgvUN\nDZIPCMTHqyqo0tXARw0j9bZuuxtt/Zm3GMnmBxIEQRCLhZ5UW4p4EgSRWXhKK8BE6GKqHXGK6pEj\nxkSw0c6suZAOK5uDs1FlSgEAJwGEAewAcBRAExbOW1RMEIYaETncbkNRy33BoCGhyslELelipUTq\nQRzZ0xvN48i7oaoJJyMCK1kkW3w8w42NzgVwsvckwnNh7PDswNEDR5OeqziaCCArkcV0rj1BEMRS\nhiKexJKjvb0dPp9vsadBqFFXx+ooa2sXxZNTQrLOsiIUnymjnVl1HiuryOagOiUf4tFcgEV0B2Es\nwrvMSRaB04I/T+l0kVUjU9GydBrF6D0vIxHfZJHsdK6jeA565pEJ+D3qGe+Bt8CLInsRyveVo9fR\nCyeciLwVQVtfGzwODza6N6LIVqR5L+nvHpFp6JkiMg1FPAmCWHhyqeaUe4OmtC+Mia90jpUpZHNQ\nnZLYmrAaTFzHoqPkvckwWkuqRDZq4lKNlsktTdKpE9R7XkoRX7VIcrJIdjrXUezDWV1avSCRZ/E9\n6nvQBwAoP1+Oof2sXrR+Xz0azjeg/0E/Ou92AqDIJ0EQDwcU8SQIgnjYCAF4CUAUQDOYyF6i3pvZ\nslcxWkuaCfScS6qRSr2WJplEKVKZTiQ51Tm81P4SoogmTQtOF3EkOhKNoO12G2xmGyLzERTbilH9\nzWp0FHagFrU4jdNww032JARBLCvIx5MgiMyRS02DlhpGO+QSulloIZNN9JxLqmmndahDK1olwidb\nJEsHXsxmT3pINZVZqeHSn8b+hK7hLgCAf70ftv02SWffbKRiEwRBLBbk40ksS8h7apFI4oO5JAkE\nWBfYujq0v/FGWvtDyx+SW4a0gonQ5YxBX850yURKbDLEvo56vRyN/o7ix/jbfdcwmZ/8XJJ5VCbz\nLA0iiAY0ZF10Asm9PfcFg1jf0JCTohPQ50uqhLzhUsv+FpTmlQrLjuw5gha0SK69Ef9W+rtHZBp6\npojFgIQnQRD6yIWurZlELKR/+MP09tcS4kY75C4gRvSzLhZYZGdbyKQqRFI5xgePDOP4X1WlfC7c\ns7SvtRXnZc+kG+4E4ZMtknXz5bWciyk6AwjABx/qUIeQ7NORVDsRB/cF0bC+QZIyq7SMIAjiYYaE\nJ7HkoC5si0QwyMwgF7tTbaYQCWnfiRNp7a8pxINgnWJ1+FQuNBkPZC+wyM62kElFiBj9HSU+xq+b\nPkr5XLId/dVLrguubnSjAx1oRSsCsk9HUp17U1cTBicH0fh2oxAZNxLR1IL+7hGZhp4pYjGgGk+C\nIB5O0rU/yaB9SiYa5KRagptx9xuNJkXZagaUjHSOuRB1eJk6hpGGSJmyZVmKZKPe1Yh1DEEQxHKE\najyJZQnVJRC6SZZH6nazL78f7Tt3Gs8z5V4lGRBOqimSBvJgU41cZjyQzW1oVMZKlg6aLdI5ZipR\nK6O/ozIVGTMS/V2IFOJcJRv1rqmm6OqF/u4RmYaeKWIxIOFJEMTyYNM5wH0ZKH8f6L3PlmmpMb7+\nvfcWtWGSaoqkATWZagluBvWzLvSmg6bS1CfdYz5MZFso5TJ6612NPIO5nl5MEASRC1CqLUEQywP3\nZeB+Nfu+6h3g1ue180i11i+QhYxqiqSBPNgMZv5mFb3poJlMXVwMT85ch6w8tKH0WYIgCP2QjydB\nEA8P5e8Dw08AzmvA9SrAW6ytxrTW+3ws4giwfFS3e2G9TJeKmswCdW/VobWvFbWeWooiEYsCPYME\nQRD6oRpPYllCdQmEIhcfY5HOr/7fwMF6Fi0EkueRxvJM2y9fVl7P81c9HqC/Hzh2bGG9TBc6DzaH\nWMqpi/Q7anmQS88gPVNEpqFnilgMUhaeJpOpwWQyXTOZTHMmk2l7JidFEMRDRKaMJL3FLL32zg1t\ncaj3mLzzzsaNQGcnMDrKli8XL9McJp2GO+cCAbzu8+GtujqEM2JOSjyMZNIOhSAIgkgj1dZkMm0C\nMA/gRwD+92g0+qHKdpRqSxBEHHndpN8fT2etqABu3EgvwqenLlKeQtuiUbvFx6ypAdasAZqbtee4\nQPWhRBxum3Lv6lXMxD4kWN/QgP1a93eRWAxrmUUnAKAbzO81iJzztSUIgiBSQ0+qrTXVwaPR6O/5\nQQiCWMIstEDinVr5sXk6KwAMDLBl6QiFYFC7LtJoC1i1MZNdO/l55qj4WYqoCTZum8LJ9S624vme\nDwQUBfKy89vsBsBvUQDMeocgCIJ4KKAaT2LJQXUJKqSaspqqAWSy4x88qD4XuegLBlmkU7wsHfTU\nRQaDwLp1gMMBNDai/Y03Uhsz2bVL1d+E0ETNl5PbppRWV8Pr9+PLp08vShRR7+8oPTYvy85vk3/O\nVAuAfix0Q3/3iExDzxSxGCSNeJpMptMAKhRW/Z/RaPSk3oO8+OKLWLt2LQDA7XajuroaPp8PQPzB\np9f0Wu/ry5cv59R8cuZ1dzfaY9ETXyzCpmv/qSn4AKC2Fu0vvAC0t6d2/JMn0T4wwF6XlQEjI2gH\nAL8fvth27e3twHe+A5/LBRw+LDT18X3pS0BrK9rn54ELF+B77rnsX681a4TrhclJ4LnnjI83NcVe\nx8Rle3s78MMfwjcxAdhsaH/kEWB6Gr7GRiAYjJ9vLjwvS/g1F2wDjz2GtS+8AI71O9/B+OQkDp44\nAYfbveDz+4fnnsNEXx8sDgeimzbhnStXYHE48B9PnVKcj575Tl2fAkqZ3+YL8y+gPdWfz1x5/R3A\n5/IBh4H2yzkwH3pNrx/S15fp7xG9TvP15cuXEYoFFz799FPoIW07FZPJdBZU40kQi48Bz0cJmbLs\nKC2NN99ZsQIYHIzPpakpeTqvz2es5jITpHq9xChdO/G5eDzA8DD7fqHO6yEgV305X/f5hNRZR3k5\nwkNDANKrMyW/TYIgCGIpsJB2KlToSRCLDe/AalREpWrZEQgAK1cywXngALBtG1teXQ289550Llrp\nvIuRlprq9RLDr11TU/xa/O53bF1tLbsW/HtKt9WNVldah9uN/S0tOSU6AWnqbNnjjwvfp1NnSp1V\nCYIgiOVCOl1tvwbgHwF4ANwHcCkajT6rsB1FPImM0i5KNSMWGHEznbExZjHC8fsBm005cqoVXcxU\n1DVF0nqmeOOg+/fjy6qqgI8+iq9fpPNaqogjh7nclVYOj8TOv/AC9u7enZNRWWJpQn/3iExDzxSR\nabLd1fY4gOOp7k8QxBJg0ybg5k0gGgWeegqYnY2LzQpR+XdNDXDkiLq40uo0yyOHegkEgJMngXAY\n2LEDOHpUeVx511mtlF+dh5YM0d0tFZ01NcCZM/Gxl4hoyiUSmu4EkLYFx0JYl/BIbHt7u/D9YqN1\n3g+lpQtBEASxKKRd46l5AIp4EsTSxe2WiiqbDYhEWArppk3Ab34DWK0stdbrTdxfrNLKy4He3tRF\nX7Joq1r9pLx2dHBQu5ZUw14moRx1IhbNdbuBz38eeO21hYtuZkCQsXGk53yuqWlRxUhCDacPcQuO\nBqRkwZHrUdRkAvBcIIDekycxFw7Ds2MHDhw9qvueaJ13rl8XgiAIYmmQ1YgnQRDLlE2bmJ+mzQaY\nRWXgBQXAgwfs+7VrgTt3gHv32Otdu4AbN9TtRuRs3qy8vRrydFZ5tFVWQye8ib92DfsAOHiNZWMj\n20Ct5lJ+HAX/zcRyVI1objbJlCeizHM0NDio6S+pRTqRNIfbDbvbjVN+P9vfFoQD7rQsOPRYlywm\nSp6e/Breu3oVM7HGXf1tbYbuidZ5q61fdv6hBEEQxKKTqeZCBLFg8JbORJYYGGDCa3iY+VxWVgKr\nV7PIJhBPq+UKjO+TrGmQ0jGMeIaK01lLSoB33wXq61ldqTitNYbg8zg8jPMOB3DsGNtGpaGQ8EzJ\nj6PwRj1hiFSbM2WCTHkiytR0JkSamtdmSvu7AizSeRopR3X3BYNY39CwIN6een5HyRsoKV1zfg24\n6ASYR6mRe6J13mrrl51/6BKH/u4RmYaeKWIxoIgnQTwsJEshFa/jAtPpZALP65Xml65ZExdxmzcz\nEcnDf/JjBIPAI48AMzNsX7MZmJ833uWVC6OSEvb1+OMsInvxoqLgE97EA9gTDgOHDsXFoVKk6Ic/\nBP7rfwWuXYsf59IlxbGNlqNK0EjjNUwQLNJ5GMqCTG8qrqwGd18wmHZjnHTFq2T/I4dTTyOOsRg1\nl8mivvIIp9I159egrKYGzpUrYbbZ4GtuNhw9TnbeauudVnbsWk8tDu/JvQgxQRAEsQSJRqNZ/WKH\nIIglzssvR6N790ajzz4bjY6OLvz+mWDv3miUtQmKRhsapOsqKuLrDhyIRquqotFPP42vf/ZZtq62\nVjr/0VE21ugoO8fi4sRjfPppNFpZGY3W1bHv+fZKbNzIxvB4pMfnx3nhhWjUYokfo6pKcZjp0dHo\n6YqK6LTSnJWOI742VVXZu0fJ7kFWjheN/zZegMOJmR4djZ5uaIhOp3gt090/F/j13r3RHwHRHwHR\n07L7/eazz0Z/BER/WVureo4LfQ06Xn45+uu9e6NvPvtsdODup9GG0w3R0emle/0JgiCIhSOm+ZLq\nQmouRBB6SOgoYzByku7+mSCZpUlpKcDT+errgRMnpPvqsTsRn2NJCeuGqxWZkUcA166Np7pWVQG3\nbqkfw2IBenpYRFYpksjnnP9ToNchjfqJmyZVVQFbt8avzZYt6k2Q9EQsk22jZSuTaeoAtIKl4hpN\nU00zOqsW7XuYuqi+VVeHvtZWeGprE1JZExoo5QDUaIggCIJIFT3NhajGk1hyLEpdQmJHmYXd3wiB\nABNodXVMfHFU6hsBMEsSgHWrLS5O3F9cx6g2fk9P/PstW/TN6+RJJiRbW4GXXmLpswC7XhcuJO4j\nPkZBQfx73hyntTVeO8rn3OtgDXhawVJPgYTjtH/nO/Fr09ubOFay48hJtk2ye5ANgki9NlLPuSZB\nrcYz3drPdJDXVWYL/jtKrX7yXCCAU34/ZiYmMjrPdM9PnN5syc9fkGtF6IPq8YhMQ88UsRhQjSdB\n6EHLhzLb+2uhZjUi7sra1MTsRBobEyNYR4/GooP5wK9/HY8GirvP8mNcvRqPjm7YADzxBBvP6wX6\n+tjyzk7gsceY0ObHOnmS1YMCwIsvsqhqOByfwzvvAG+/DXz5y8Du3axT7vAw8w7l5yI+xtgY2+7W\nreTCXqkBz8WLwO7dOLd7N0IHD+L61BSePHWKiYOkY+n4ACHZNmkViKaAG6l3uk3zwxK1Gs/F7C6r\n1Dk2E8ijuBy1+kmteYjX/3zDBpQ/8QTyy8sx3tsriRTLj5vu+YnrTE/5/Vm5VgRBEMTDC6XaEsRy\nQJyCWlERb/gjjqzJ033d7sRUSvE2HL6t2GZETkMD8NvfxkWh1RoXjGVlTNDydQBw4ABLq5WP6fEw\nISv36eSpu1u3xsfJywN+/3smRg8eZJG5xx9nIlosqkNgkc5LTwBDn8SbEnm9yqmFydKKldbJU1L5\nssWwV8kketKrk6CWSprNFFOtNN5kqa/pYDRFVWsefL3V5cJsLCrq8HgQHh6WHEN+3JmJiYydX7au\nFUEQBLE8oVRbglguqKW3csTRqXffVU7nFG+Tn89EnzyVkm/DO9vyaNfJk3GBaLFIj81tR7ze+DIu\nOgFgZEQqOgHWPVZsXQKwjrfDw2w+4pRaiwVob2fnIj7GF78Yf93bCwwNAW1tiWmhPOo39EncJmb3\nbnaaPPrmcmHP6Ci7tsnsUZTWyVNSNexVFirdMxm65pCmTQyP9skFi9ryTKCVxptfXg6zw4GRK1cQ\nXLcObxw4IJx/OvdFLYrLx3xt9Wqc2L1bGFuvxckju3YJ43qqqxOOwY/r8Hgw0d+P+UgEXr8/I0JR\nj/1MLjzLBEEQxNKBhCex5Hgo6xK06u3EtYNer7JgEG/T2yv1q8zPB1auZFHLFSuADz4A1q1jPp6N\njUw8cubm4t8XF8dtR3p79Z1Lfj5Lq+Uit6aGRUXn59lru52dAxe/c3PA/v1MdOfns2W1tcBrr8XH\nFAvm06dZRFX+Rnh6Ov691wtwAVBeDtfEBBxKolUPBlNSF7PGMZfmkA200njHe3sxHw4jGokgEgqh\nv61NOP+k1yQAwAfWrElBX8lFGv8dxcd80NeHwc5OYWwuvruamhSFG1+//+hRYVzx91wI8uMWb9yI\nwc5O9Le1wWKzZUTU6/mAYLk+R7nIQ/l3j8gq9EwRiwEJT4JYCmiJGz3RKfE2fDy7HZicBP7lX1h6\nbijE6kD/6q+YX2dnJxO78nR5sxlwudjy2lomOsXRSCXsdpYGvHIlS4k9c4bNpayMiU++jcMBdHXF\no6ZmM4tmtrYykVtRARw7Jj3XYJCl6c7OsnNQEpGxiBEAdl5cANTWwp7s2mphsGHQYtY4pjqHpRLZ\n0orS8fPm2AoLsfOVVyTrFK9JNxIbVIlQE2l8TFtxseLYWsJNPK7SMRxuN+xuN0LXrwNgfp/yuWfz\n3uXCs0wQBEEsHajGkyAyRZr2E0nHtNlYF9fmZmlt4cmTrEHPjh3x2kaleciXfe97wC9+wYSaOILJ\nsdlYNHN4mAm6SESaFnvlCvCFL0iXlZTEmw6p0dDAmgpFItLlK1YATz7Jjieu7RRjscTnqmRJw61K\nACYyz55VtjKRr1erZczG/URu2GgYncNSsNlIVt/J11lsNpjtdtz97W8xE3tW8yoq8Gc3bgCA+jVJ\n0ZaGX+edr7yCrkOHEsbORB2l+N546+vxjMgK6VwggJ6WFkRiP6da986o1U0uPMsEQRBEbqCnxpOE\nJ0Fkimx4dSYbU94IaN06FqUUd53l+8jHOX8+3mE2GZWVbFwuBk0m4PJlYNs21txH3JVWiVWrWAQ1\nEmFRzTNngPJyaQ0op6EBmJhg4pA3JyoqYo2GLBb2/eiougdmKMQsWaJRJtB37WLnyJsJFRdL12u9\nUc4F79UcIZcbzXCxdO/qVUFMygWWWhMejqaY5g2qDsO4LU0SMiHckt0b8XnDYkHl00/jwNGjqsda\nCh8wEARBELkJNRciliU5W5eQDa9OPdYeABN1lZVMKHHR6fEA/f0s0sd9K/k4WoKR87nPMcHHx/v8\n54H//J+ZyBOnrirhcrHj8OjmypVM7D31FHv9mc+wSKd4XsEgE7o7drCU2vPnmVCdm2PnVVWlntLq\ndgPHj7OIqtvNRKe4mZB8PSA0bWrfuTOxJnQhvVdzHD2NZhYLnq7KRadS2qfcnzIyNgaT3a66fQK8\nQZXo1M8FAvjpypVoLi3Fm6ImRYD+31GZaLSU7N5IUovn5iQ1rUrkYursUknzzjY5+3ePWLLQM0Us\nBiQ8CSJTGKz1S3vMYBCorwf8fhZJ5AKxpoYt37gxXqPpcknH2bFD/ZguV3wcHnHMz2ciko+3eTNQ\nWJh87hMT0qZEnBMn2FwuXAA+/pgJzT/9CVi/nonRkRFW4zkwwIRvzEICRUVsH73Xlottp5PtpwRv\n2vTee4k1odm4n6mi1dU4y8d0AFnrRJsuXCyVVlerdnQVi7Px3l7c7exEdGYGBVVVKYvpUHc3pgYG\nMDM6itttbfjl9u2CQJqJWaAsBGLxKhdp+4JBOMrLhW3tJSVJBWUufsBADYwIgiCWD5RqSxALQZbq\nBYVxe3pYWuuVK6xxT2kpizS2tbGI3ZYtrAGQ08kiiP/2b6xhj/xnc/VqlhobDrOmPkC826wSNhtb\nz2svS0uBe/fY99XVzHtzbIy9NplYumttLYvO8vnIPTs54ppOjpGU195eFum8cIE1PlK6B7zuUy19\nN1dYjLTfJZJqLE9XPRcIoPfkScyFw/Ds2JGQWpqptGE+DsCa+licTgzGnuNkaao8NXispweFXi9s\nRUXYFwyiq6nJUH2lEkqpsnye9pISfOPSJRRqNQHLMXI5zZsgCIKIQzWeBJErZPpNPBdR4npOJXh9\n43e/Gz++xxOPIsrhtZVGsNuBmRkm2jZuZOI3P599TUzEhaeY8nImfAGWUiuvNzWZWPMicQ1raSmL\ntBYVGRPvSteK3wO1xkK5xmII5KUiymVI6hqRKAL11lUqNdoRL9vz6qt453vfA0wm+I4cwduNjboE\nknx+fI6Tg4OK9ZVGGv6IRVrJli0Y7+2FxWaDtaAAvubmJSnatO6X0YZIBEEQRHYg4UksS9rb2+Hz\n+RZ7GsaimJl+Ey9vLKQUHeRUVbH/+/qYaKupie9rVGh+5jMsPVa8j9vN0m7v31cWmXK2bWPCt7+f\nRUDPnQP+5m+AN99kUVqLBfjwQ9Yo6cUX2TKbTdrx1oh4l18rhXvQ/txz8E1MZD4inSkWQyAvsihP\nVVCII5Gl1dX4ytmzhsRIsmZFyZrvcIFkyc/HO1euoKayUlGwRiMR3G5rg62oCJGxMUGoqglXpWOq\nXRuxSDvl9z8UjYIeloZIOfN3j1g20DNFZBo9wtO6UJMhiGUHrw8E2Bv0ZG94gkFjb+LVRC1ffu0a\ne22N/Qi7XEwoOJ1MqPGGPvn5LNV00yb2emyMRSi9XuDWLePRzU8+SdwnFFKuOzSZElN5AVbTWVjI\nhOf9+8AzzwA3brDvt2wBtm5lDYzKy+Pn1NwMNDay/fU0+xFfP17rWV0NrF0LHDmSeA/6+liklu+b\n7TevRlOvuQfrQqJxzGxHmnhtH8BsTvQKin3BINpj3YvVonzJ5i4+LgDYioo0vT7F6b2IRnEvFELf\nlSv45fbtcK1ZIxGx3vp6rG9okFisdDU1ITI2hvyKChw4dkwiVkdjP+viY6pdG17vmWyuelhKUcRc\nbIhEEARBqBCNRrP6xQ5BEMuQZ5+NRoFotLY2Gh0dTVz/8svR6N69bDul9cnYu5eNDUSjDQ3xsUpK\n4svlX3Z7/PuKimi0sjIa/fRTNp7JFF9nNkejFov6OJn4qqqKRgsLE5d7PNHoU09Fow6HdHlDQ+J5\nl5dL14+Oxv/Xus7icerrlfczci8zjfz+LkF+vXdv9EdA9EdA9HQWzuHNZ5+N/giI/rK2Njpt8J50\nvPxy9F8qKqJHSkqiJ/fvT9g/2dz5cX9kNidsMz06Gj3d0JB0PP71E5cr+k/FxZJlaueiNB/xsp9V\nVUn203Nt1Oaqh2zf20ySznkSBEEQmSOm+ZLqQop4EkSqaEUx5RFRt1t/lKunh/1vsbBmP/39yg14\nOG43iwTyZkI8lXTTJlY/KY48JmsWlAm4X+eGDcD4OFu2cyezU/ntbxPPw2pl5+nzxSO5tbVs/vx8\n+DUWR72Uajd5tFJshaLHs9NoRDpdloFVS7YjTfuCwZRrMXnHWQCChYg4Ypps7vuCQfx8wwaEY3XQ\nJosF06OjCIdCQkRRfswx/vPKMZsxK+psW1ZTA9eaNaoRWKX5iJd9+fRpSfOhPa++KkRL1a6NOPpp\nlGSR3VQjofJ9M9FMCUjvPAmCIIgFRkuZpvsFingSGebs2bOLOwG9kUweReNRPnG0ct06NkZVFVsn\nH+upp5SjmTU1iVFEkyka3bEjGt2/n0X3xOPYbOlFLs1m9XVWq/Jyj4fN4dNPpVHYhgb1iK04ullV\nxfZXi3ByxFFDebRydJRdY6Vrq0DCM5VOtFoPWue2BMiFSFPHyy9Looo8OidELYHo0erqhDlqzV3Y\n32JRjPyJI4L/XFER/dXOncLr/89uj/5vJpPw+qeVlZrXSGk+8mXZikJ2vPxy9Nd790bffPZZ4Vh6\nIrtKc1AaS23fpRRVzQUW/e8eseygZ4rINNAR8SQfT4IwCo9ktrYmej+K4T6Q3E+TR+W4nUhHB6st\n5N6Y4rG4JydnZoY1CTpzJl7XyYlGgQ8+YNHBq1eZr+fq1cxKhNd6pkqy6KhafejwMIt2fvvbrDMt\nEI/sKfmHFhay2k6+3Ucfsagj//L7lf0redSwupptI24Y1NTEbF2Urq0WPGqq5x6nCo/eLnLtnNz3\n0Qhi/8jFItTdjcj9+wCkHpX7gkF4/X546+sTmgudCwRwyu9P6rXJ/SxXPf00gMTI37gowjk9MICJ\n3l5hDmU1NZIMg+INGzTPw+F2w+5245TfL9wL+fXNVoRZySdT7d5qzSGZ56Z8X6rNJAiCeAjRUqbp\nfoEinsRyQ289II+aiaOGxcUsMrl/P3vNay1raqLRF16IR9k+/ZRFL/Py4tvt3cu2KS6W7iv+2rEj\nvQhnJr7E8yovj0b9/vh1euGFaLSsLDFa6vcrRwArKuLb1NdL1yWLGoqjoSUlxiKL6ey7hFCLFi4l\neGTySElJdIzXM2sgjrQ1ezyK0TmOOPInjuZCKrKxAAAgAElEQVSJI5z82Hw7cbRVHBXVinpqRQCz\nFWE2UkurN1KsNJZ831yImBMEQRCZAzoinmSnQhBG0WszIbfxEOP3s26z3E+zvp6NK/f6fOQRVuPJ\nsdniUUwlKxTuqcntVZLZrKSLy8W65g4Nsbns3s3sUTo6pNFJsfWJ0jXZto1FLXt7E+tfS0vjkWK/\nHzh+XN/cuH1NSQlw6RLr4quXdPZdQohtKOwlJXj+5s2Uo5eZ6IJqdIxzgQBGr1/HWE8P/O++i0KN\n+yTuEhseHobV5RLqMPMrKvCtGzeSHlN8vfIrKjA1MAB7SQmqnnkGk3fuwOp0Ir+8HPd7ejD0/vuI\nzsxI9tey+hB7cCbzAc0Ecj9SrXrRhRqLIAiCWLrosVOhVFtiydHe3r64E9CbJslTQS0W6fKaGmbp\n8cQT7DVvgCNuOJOfz0TavXvSfbnoNJmUU13n59k6LjazJTpNJpY2+/77TFgODbH02lAIMIt+rRQU\nMOHIhai8CQvA7FV6e5VTW/Pz2f+FhcDf/33ivoEAu07yVFye5nzzpi7hKDxTgQCznKmoWNaiE4in\nPtpLSvCNS5fSEgrJUiy14Om+N48dSxgjWSpwqLsbdzs7MTUwgK5Dh3TPMTw8jIKqKjyya5ewbmpg\nQHPe/HpZXS64N26Et74ez9+8ick7d4R5/+Ff/xWDnZ34/cwMnJWVMDscAKSWLGrw9F7eSCjVFGgl\n5NdRfL9+vmEDpvmHOykgHqvr0KFFT79eriz63z1i2UHPFLEYkPAkiGzBxc+HH7JIJGfNGiZa+Xpe\nmyh+zYWYWh2lWhbB7Kz6OjW4f6URolE2v8ceA155Jd6xt6ODiWWnkwnuBw9Y7WkgEBd1YqxW4B/+\nQb3LKxfO4+PA976XOA+1etvYhwPnjL6B7+5mdaEDA4AOMbOU4ULn+Zs3NaOFWqRTr8eFC/e5FI/R\ne/KkIGraX3oprWOKt//mRx9h/9GjyK+oUBxDSfDuCwbh8HgwOzGBOx0dGHjnHbzd2Agz94kFEI19\nMFT82GNouHYN5bW1AIDI2JimOBbXVeoR8mqiXGm5fDx+LficeeffVKBaTYIgCEIvJDyJJYfP51vs\nKeiDR0a3bQP27WPLeHRTvJ5HB8Sv+RvDmhqWbqqF1crScI1gsQD/7t8BzzxjbD8xMzMsxTYQYFYp\nAItObt0aF40lJSxy2dKSKDxnZ5nAk4tw8fgck0L2hoYtid5InPBMLQObE72k2hxITZTxaJ3R8bhw\nKa2uhtfvl4wxFw7HN5R9oKJ1TPk85ds73G5868YNxTHUGu5Y8/KEbcJDQ+hrbYXN5YJJ9LPnrKzE\nf+rqgsPtxnis6ZCtqAiwWJJ+CMLn+7PVqzES+zCotLpaVcypPdtKy+XicF8wKIhureMoXUsx6dx7\nQj9L5u8esWSgZ4pYDEh4EsRCoCasxIjTRouLgfJyoKwM2L6drd+4Mb5tYaF03y99KS6a9GC1srTX\nO3dYdM8oXAQ6nUzw/tM/xUXi+DiL2ALxOsneXiDWfVQ4PpDo0Sm/NrwLLk9PlqNxXQ1HY/Tcp4cc\nI11QAe3OuVy4fOXsWTxz/LhkDE/s/pdWV8NeXCwZR0s4y+fZ1dSEycFBvN3YKMzDSPfWc4EAwvIP\nTiwWRCYmUPH5zwNgfp0N164J4/FIcmRsDLfb2pJ+CMLnO9nXh0hsfoVr10rm9otNm3DE7cY/l5eD\nfwwjf7aV5q4mutU6/2pdSzG50N2YIAiCWBpQcyFiydHe3r60P6kLBFhKp7yRzsqVcRFYVgaMjLDv\n6+uZTUplJYscfvIJS2HljYkA1njnvfeA/n59c9i5k0VSOzqAyUlj83e7gS9+EXjjDeDJJ5mwFL8h\nt1qZcB4bAz7/eeDECaCxkaXDihsiVVXFrVPU0NvISYVwKITzgYBms5OMPFNq93UByUSTHy2MNsER\nN+XRarAjR3z/Tvn9muOIz38+lkLK56lnf6Xj8vMTn4ccl9eLyIMHsNjtcK1bh99HIti5aRN6T57E\nzOgoSqurke/x4LZoPvLrxq+rragIkbExxe2OuN2CfYyzshIVTz2FPYcPo6upKasNfhay8RGhzJL/\nu0fkHPRMEZlGT3Mha7KVBEGkiZIY4XWJAItmrlnD1k9Px/fjzT6sVuBv/xb47nfj+4g72wIsZfbU\nKUCclqhFV1d8fD3w7rglJezr+PF4nac8xXd2Ni6aOzqAF19k5x4IsPNqa2ORTj1RRR4JTREejVkQ\nxPeVe4EuMDwyBQDnA4G0z11JyO4LBnWJeY44AmfJz8frPp9uYSy+f3qi1+Lz9/r9WN/QIMxTvr/8\n3MTibV8wmHDtrCoZBVaXCzP372MmFqWc7O/HEIBP3ntP2KZw7Vr4jhwRrpv8WOLruvOVVwTh2NXU\nhN6TJzEXDqN8xw6YYo3KLE4n6t95R4iois/7V088IdSWZgqj95wgCIIglKCIJ0Gkgt7oltg+pKGB\nbXfsGBNgSnYoAEujjUYBbnBvtwNFRdIIZyawWlkHWpntAwAgL48dl0cyy8pYLWdzM7B2rTRt9sAB\ndo5K4wCsM+zatSy1d9Uqlnb77rvGO8bmQEQxKdyGRa+ozgKZjkylE63kGI1aJhvnl9u3w1lZCXtR\nkaJwVTp/LjDvf/IJ5sNhWBwOFK5bh9Hr14WGRo7yckRnZ4XXJquV+Y2Zzfj6xYso27YN4VAIv9i8\nGdOxrAT3Zz+LqTt3EOYfsqhhsaDy6adRUFmJ8d5eWJ1ODH/wAaZjNkkurxeutWsFOxa+zb5gUHK9\nAGB1XR3uXb2Kr164IGkIxc9bbBGT6v0iCIIgiFTQE/Ek4UkQRuHRLC6+xD6VcrgY8XhY1HB4WJ/F\nicnExCf/P1uYzcyCRUxBAUuhjUSknpvl5Uz4bdgQF8EWC/PztFji1i9btjB7laGh+DqxUAWSXzM1\n5CI+195Up5kWrIaR9NlwKIRfxcSZTUWcGUGPkFWbn9Jy8XglW7ZIRJaeeWoJYaUU2Z84nZibmlId\nUyzWABYRHf7wQ+HnwpKXhw1/8RcIdXfDbLPBYrfDbLPB19yMtxsb0dfaitLqajy4dStBhJosFkRj\nP++O8nKEh4bYcptN6IDrKCsT9hNv4ygvR2RsDPOxTIbSbdvwlY4OxevEz3t6dFSSXkzRSYIgCGKh\nIB9PYlmyKN5TgQCrwSwtlYpOi0XqUynfh3tCPvoocPductHpcsW/56JzxYr4cZKxebOx8+FjykWn\n2Ry3QCkokK4bGmLndPEiqze129n53L/PRGdFBas17exkArW8nEVt+bUqLmb/p9oxNosdZzPyTOn1\ndzWIWmMXpaY9DrcbBWvW4G5np2YnX62mP4C+jqVGuquKxxvv7ZWs1zMftXRbvu/bjY0J6aBzski8\nragIAJjHpsWC2ViKO++qW7Jli+TnwrNjB+5dv46Bjg70t7XBVlCAZ06cENJjC9etg62gQNJ1+Q9O\nJ1bX1WHl008L8y17/HHh+0dizYhKq6vhqalJ2MbqciE8NCSITgAoXLdOiOAq3ff9LS0oiHmH3v/D\nH3C6oUFYr+fayklln6XCUjw38lwkMg09U8RiQMKTIPTQ3c0a/4yOSkXn3Fzcp1JpH+4JKar3SsBu\nZ+mqzz0nXR6NxlNxuWC125VF6I0bxs7HYmHpu0C8ztPlkgrRU6ek+xQVMcHn9QK3bycK0127WO2n\n282+HA62vLCQRX6vXEmvY+xD2nFWTWypCT69nXz1WM3o6Viqdryxnh4ATOjtfOWVhPHk++mZj5oQ\n1uq6CgDmvDysrqvDN69exfqGBiY85+aA2VlY8vKErrrcAoVzt7MTY598IpkrFy7Htm7F1MgI7nZ2\nIjw8DJPdjrwVK+D7yU9QsGoVZqemkFdRgQPHjuHA0aMoXLcOD27dwr0rV5C3YgXcmzYhIttmfUMD\nVuzaJZmDvaQEvuZmnAsE8HFzs6q36XhvL+bDYURCIfS3taFl82aEQyHdtkJiUtlnqbCcz40gCCKX\nIeFJLDkWpQubuLHI1q2s02wsmpEQgeO2KNeuxZclS5edmWEi7s4d6fKaGvYlJhLRl6qrhLgJ0Nxc\nvIGRy8UaBonSDYVtOEVFTDz6/ez/UChudQIwr1K53QmvQRsfZ+fn9TLBuHkzixwfOBBPT+U2Msmi\nD1mKKAK57WemJrbUBJ9eX0XDVjMa8yvZsgUtmzejubQUbxw4gIJVqwAwK5GuQ4c0z0vPfMTCVRy1\nssSebaV9v/7BByioqsKf/f73ePbNN1Ho9WJ/SwssdjsAlg5b+vjjgs2KUhOhsscfl8yVC5cHfX2Y\nFXV0js7MYHpwEJaf/xyh7m4MdnZiemAAXYcOCdHoqbt3MRMKYXpwELfffluyDbd8MQFwxLIdzHY7\nih97DG83NmL0+nUhRZcdUPp7RT73qYEBnA8EUrrXmXo+cpGleG65/DuKWJrQM0UsBlTjSRB6CIWA\nl15ib/Sam5n4UavpE9cickwmZi3yySdArKmIhKoqZoUijjiaTCwyqdSAKBWS1Ys6HKwrrrzhkdnM\nROLFiyyiye1e/H4mNF98kY175EiiIFRqtiO/NuXl7HhcBOdi7WaOotcqJp39jdSXyu1G8isqMDUw\noLvekM/Hkp+vWvspns/M2BgGOzsBAN76eljs9qT7yml7/nl8+qtfwZKfL1iU8C645wMB/OnUKUFU\neuvrkb9iBULd3Rjv6cHM+LiwjxJevx/DFy/iQV8fYLFg5e7d+NKJE0JNKMDSaedmZhCdmYGtuBjf\nvHIFZw8elHTlvd3WJqk/zVuxQmhK5P7sZ1F//rzkHMOhENpfegl333kH04ODcHg8KN64Edb8fNhc\nLviOHNH9rKT7fOUyy/ncCIIgFgtqLkQsS3Lee4oLLpcrMYro97N1cusTLvz0UlwMrF4N/O536c/X\nbGbNhPr6WK3m+HjiNuvWAX/8Y/y11cpE5NGj6hFIJWHOrw3AoqAPHsS3V+sGuwDdbHP+mVokjHS1\n5Y2DAFa7+MyJEyn5SSY7pnhdXkUFpmXCVu98zwUC6GlpkYhHuUB+48AB9Le1wVJQAEdxMaYGBxHV\n8SFQ6bZtKPrBDzD5d38nCGM+nz2HD6P9xRcxcOFCQiOi9Q0NmJmYkDRzCq5dK5nj6ro6mO12IBqF\nr7k5QZT3njyJ8L17sOTnwxzr3jscs06iLrdLG/odRWQaeqaITEM+ngRhlEyIHLlnJaekhKWsyhv6\nFBayqKER4enzAR98YHxuQLyTLY+Azs+zWtSyMmXRWVLCmgmJhefsLDu3DRuAJ55QvlZKHpzBYDxy\nzJsYVVczuxWlqCmQE/6YDytGUhL3BYOs5lAkivQKHXEk05wkbVY8nwPHjqHr0CFY8vNxyu9nkcjY\nBz1WlwufvvEGjhQXw2y34+sXL+LSD34giZZyQWcrLkbl008L0UA+F4vNBntpKWbu3cOk+AOSGLai\nIkTGxmCy2WArKGAdb/PyMDkwgIvPP49NsVReACirqREE+DMnTkhEOgDAYkF4dBRf+PGPcfLpp2F2\nOPB2YyPMIp9dW3Exwvfvw15UhPzycpzy+yWR3d6TJzEVy0iYjzVUMpnNqteSIAiCIBYaingShBgt\nyw49wpRvY7MBV6+y1NqSEuDSJVbfqGTtoGRrokZNDXDmDGtGJIqoaKJlzbJiBZurON3W7QYuXwa+\n/e14pJIjjlimkiKr134kB/wxH1YynY6rtq0kkrliBR558smEiB4AnD14EH9qbUXZ44/jwNGjCVFO\nNQqqqjA9MiLYqpgdDsyHwzBZrfj6Bx+gbNs2YdufrlwpCDie2spFJsAEoMlqRelnPwtHSQmmh4Zw\nN/ZzaLJaJVFRS34+rE4nympqhPny69qyeTOmBgYklivrGxowOTgonI/JaoXJbMbKvXsRmZwUIqgO\njwfhmKURj2Q2l5YKPqQAizq7N23C7bY2eKqrsV90fIIgCILINGSnQhBG0bLs4NG31lblTrYAcPIk\n26atjY2zbh3ztvz2t1kjoXT53e9YZ13elZajZbnCRSfvNiumpoZ13m1oAP7wB9Y8ye9nUU6vl4ls\nbu1iszEhnZfHXqdqb6K3WZBWN1u9zYkIw+jpamukQ6hWJ14ArDmP3a54zL7f/AbhoSH0t7Wh/cUX\nAQDjse65ptjzb5P/XAB40Ncn8fLkNiXR2Vlc/Ou/lmwb5n60iDcVWl1XB0dZGfJWrEDxpk2YGRnB\nQEcHbre14e4777CNzWaJ6LQVFqJ02zaER0bQ39YmOV+H241v3biB9Q0NEsuVPYcPS65FdHYW8zMz\ncLjdsMfOy1NbC091tWQfACiPNfsy2Wywud3I93iYt+jwMG7Ljp8NlqJFCUEQBLGwkPAklhwZ8Z5S\nEyvl5eyLv+mVb6fHS1KcMtvWBty6xSKTra0sssnhvp1ud/JIpJxIBHjsMfZ/XR378npZ859kIq6m\nhglKufCsq2Odeg8eZDWpxcXAiRNxaxQ+x48/ZgLwc59jacQjI6wpUrajkFoCVc+HARqQn1nqGEnH\nTdaJ15KfD4BFFGGx4KcrV6K5tBQ/W7UKJ3bvxlt1dYLnJgDBN7Mg1j05OjeHgqoqwS7FKvbFTcLA\nhQv42erV+PXu3Xht9WohTRUAzDYb9re0YPLOHYRHRjA9OIiJmN2Kp7YWc+Fw/GdXlLHwMYDI+Dju\nXbkiLLv1m9/gzQMHEA6F8ItNm/AvK1bgj8ePY25qCt76eqG+dF8wiLyKivg1KyhAeHQUe159Veis\nuz9muyKuSXVWVsJRXg6r04lIKITbbW2CpY3V5UJ4dFRTEKYjHsmiJLvQ7ygi09AzRSwGJDyJhxM1\nsdLbCwwNxb055dvp8ZLkNiMFBSzCyaMgJSWsO2xVFbBzZ7zx0NSUMeFpNrNx29rYMVatYqK4s5P9\nz43sucjlgvPMGSYoRbVnOH8eePNNdt5a4o0LQB5Rqq0FPvoo+6mvWhFNPR8GEFlDr31Lsm0dbjfK\ntm8HAETu38fttjZMDQxgZnQUk/39GOzsRF9rq2CBUlZTA3tREV73+TD4298K40zevYtjjz+O6dFR\nWJQi+3LMZoRHRjDZ14e7nZ2sC62IW7/5DX62apUkqln86KOCUNT6uXV/5jPC9/y82l98EZMDA4hG\nIojOzuJuZ6ckwutwu7H6S1+CKXausw8e4HZbG7oOHYLd7cYpvx9vNzYK6c9cLPaePInw0JBQu2p1\nueDeuBGOsjLMTkzoinqmIx6XokUJQRAEsbBQjSfxcKJWNyhf3thovL6Q1y52djKLFIClwX74IfO7\nlB/nD38wliJaUsIijnxOmzfHbU4A4K232PHffBP4/vcTayh7e4Hdu4ELF4Af/ICJ62vXgOFhfZ1l\nX30VOHRIuzbTKGr1s1p1t3prRYkFwUjNpxjecMdTWwuH243b4sZcgGA5wjvl8hpJNRxlZZidnITZ\nbsfc1JQkkmkpLMScqJGWvDZTC0teHsp27EDo+nVWV2mxKPrrmmw2qe8mgILVqzF5545wPJPVCmtB\nAeYjEZisVljsdhQ9+iiGYt1oAcBeUoLnb97EKb8/oWuvvMbV5nLBZLMJ9Z6Ttgo4IwMoqanFV88k\n/3BAfA/0fJAghixKCIIgHm7IToUg1FASK4EAcP060NMDvPsuE2Xi17GUPmFbuUjiy3p62La8FpPj\n9bLurVy8Pf00a85z7x6Liqq8eVWkspKJRbebpc6Ka0erqlh6rx7Eoq6qSj2CqSX+MoHaMai5kCp6\nRF6qQjDVfY1YsIgRCxcAaH/pJfS+8YaQMbC6rg7PvvmmsL28mY4SjrKyuG2JqGkW9xi1FBRgTqFj\nrRgtUWp1OmEpKEB4aEjzHIF4Y6PkB403AjPZbPBs3w5HaSnmIxH0t7UJwrCrqQk3jx1LuA7cambE\nVYsfThzDN3AIE/WHETyhz0uVxCNBEARhFGouRCxLMlKXoFQ32N3NopQDAyyiJ38tRilVly/r62P7\nyQ3mx8fj+xw6BKxZw7rI8je1zzyj3PhHic99Lj73WG2cgOjNuSbiNNVkabPZSmcVp9HGbDQSjqEn\nvTlNlmqti57UyHTSJ1PZVy3lUqt+UNzIyOF245njx7Eq5jFXVlODL772mmR7TyylvWjzZpjz82Er\nKYGJP0MxTOKGW7GfM09tLfzvvov1DQ2oePJJzfOp2L0bXr9f+VwLC1GydWuC6PwYTLACLOXVHEub\nNVmtmJdFQBWJRuGsrIS1oADRSARDXV1CqvH6hgaUbNmCU36/ougsq6nB12Ln9+6u07gHLy7VtuD/\nbU7ebfh1nw9vNzYK9jTUJCi3WKq/o4jchZ4pYjEgH09i+WLUk1Murhobpa/FY167xl57PCyddvXq\neM2mEtXVbFve6VY8Pl//2mvA+vXafp5mM0u1fewxJlwnJ6Xr//qvWS2nEvJrwj1HtdJU9W5nBO7J\nyQV6fT0TmPJjKPmBLkNSiS7qqatLp/YulX33BYOKUTMuYgHgfCCgGAnl12CspweFXi+s+fnw1tcr\nWqscOHoUv9q+HfmlpZjo6UGEC7BYtNBRVobCdesQDoUQnZlBWU0NXGvWwNfcjK6mJtw5fx6zU1OK\n6bBiBi5cQP6KFYp2RLPj47h39ariftHZWZjtdsyKfi8ki5yu9Plw97e/xXw4LEQ0g2vXSra529UF\na34+ImNjgr0LwDrorti1C9aCAsGP1O5242C/HzsrnPifjwXhFnmUyp8x8b0RW7Wo3SeCIAiCSAWK\neBJLDl8sCqIJtzVpbQVi1gtJkUfWlCJtPKo5PMxSUzduZNHNvj7lOk2Xi0Xztm1jTYQqKoBjx+Lj\n+/1McJ09y5bxxkQAi2TyRkDV1ay2E2DdMzs6WG3o/fusu60YU5IsB3mkVq+lid7tdHIuEMDrLS14\n6/59hAF2bs3NGT2GEczB4KJbQaQSXdTT2MdI8x+1fXmETc/14aJHvr0eEcuvAW/2c7utTdVaxeF2\no2DNGtzt7JTUbyIaRUFVFdybNmGoqwvRmRlYnU5YnU7MxbYLdXdjamAAkfv3EY1EYLLZhAilnOjs\nLCb7+1UbCc2Ju+Da7XCUl2MjWKSTd9a1FRezDVQsjyxOJ8xWK/7s44+Fe9XV1CQRl6b8fMzEGiEJ\ny2PjRcbHhSixWEwOd3bAM9CKq4cCkuurZmejZtVCLD66/+4RhE7omSIWAxKexPJFHDlMJsY4bjf7\n8vuZWOTL+Gu5ncpHH8U7vPL/a2rYtqWl7PXEBOs829ubmLbrdjPLkhUr4sf48Y/Z93Y7E6ozM6ye\n8+xZZpciPh/xG+Gysvjxi4rUu8DmSAfYUHc3Bu7fRx+A8zYbcOnSotZu5oIVRCrRRT0+m3q20dp3\nvLfX0PVRup77gkEUrlsHi8OBtxsbFQUsvwbci9Ph8aC/owPNpaV4I2ZFwjkXCMQ72ooEXWl1Nb75\n0UfCGJ7aWpTV1OBurDPuzyorMXL5srC9yWoVOsymhKgue35mBiaTSegkO3PvHqxOJ9ybNiG/ogJ5\n/OdUPsTkJG63teGd//AfsL+lBV1NTehpaZH8jDsKCyXXxl5SgpW7d7Pr5nJhWmaXovQ8cXsVW1ER\ndr7yirCt+MMJJasWgiAIgsgEJDyJJYfuugRe+1hYCPz93+vbRx4R1LJT4a+vXmX/nznDaiy5wCsu\nBl55Jf7a5WJpsuI33eJjHDrExGhBQXw9r+cMBuMNjsSi0+Vix+XHF1ujbN8uFaELUC+pB+FNcUkJ\n9nzyibRxUwYw6kd4fWqKzWcRozzpRCazjVFRrLS9OEKpJmD5NeBenCazGdODg5gZHUW/zA4k1N0d\nj3TOzcFst2N1XR2+cvas4IfpWrcOZocDoY8/Fvabm5oSLEdgMglzNYJadBQApgcHcZU3NAIwGw5j\nqKsLUwMDmB4cTD5G7Oc61N0dnyOYsCzZvBkurxfuzZuRX1GBb1y6hC+dOAFHeTlmJyYSro/S81QY\n+zmLjI2hS1S3Lq+vTfWDCiJ7UD0ekWnomSIWAxKexPJl3Tr2//h4YnMgNd5/n/1vtQL/5b9II4T/\nf3v3HhzVeeZ5/PdKfdENqYUkLMsYGceY4AQb2fgaKGvWJo4xDp148SSe3eCdyqomrtp1qiZ4s5PL\nTtXEtalJpWaSmirXpioLGSfEBmKIMSYuZK7GNg4bcBJDjA22bAxCCCSEuLRuZ/84fY5Ot7p1aZ1W\nq8X3U0WZVp8+5+3Tr4Ueve/zPMXF9mqkN5fzqafsPMtFi+xcz8ceswM8J5A6d86+9tq1Uk2N/drm\nZmnOnNSrqM4P9c6W24YGafVq+++RiF0VN1l3t902xdmm6g1yz57NbGttlrk/FB87prDPQac09hXM\nW7/3vZwHfZP5B/7kIGakwD5dED1SAOvcg2n19bp/3bqEQjzBioqE1yQHjAM9PTr9hz8knKts1iy1\n7d3r5iwOYVmD21a9uyIKhv+ncUyro2kqVYeSPufK+fPVuGaNJM/Kb0WFVFCgvu5undy1S93Hj7tB\n7Kb4DoiahQsl2avDF06ccD+T5Pm0u6lJHYcOSbILELGNFgAw0WingsllrAWBhpNJG46KCsn5QdRp\nL+IU1YlGB9t91NZKhw8nfs2xYoUdDCZf2xlPWdlg8BoMSvfcYz+/Zs3gGJPbvXiLGrW326+74w57\n+27y++vstAsPeSttLlwo3XSTvRrqx72d5MbTjxAj86Nlymg+k5eWLNGJ5mbJGLdCbe3nPqfPx4tn\nPTd3rmKeVUTJbj9SXFOjstmz1b5//8itS5IU19YqMneuTib/f+2nggLNuPtuheO5nwWhkFsUSJJ2\nrFypo88/r8LiYvUOs2I/bfZsldTVqevoUQ309bkBdn00qgc2bkw49tmrr3b7nia3pgEAYLxop4L8\nk6pNSaa820qfeip93qOXU8ynpER67bXEFULvCktrq11YyGnf4OR4OquWqba0Ol/z5mr29trvNxRK\nXcnVCTrXrUssatTWJr30Uupts5GIPffB4sIAACAASURBVA7JXjFdvtw+xrsFN/neetuaTIEWCpls\nWx3r9tx8Nt736qzIhaur1e1ZZRvJWFd1S+vqFK6pkQoKZPX1yerr08ldu7SnqUnhSERfefddlc2a\nlbBt1ert1cUTJ9S2d2/KoNMEg27Rn1Rm3HmnHdiOJi98rJyV1IEBte3dq/Y//EHn3ntPJ3bs0HNz\n5uh8S4sk6XxLiwZisZRBp/NeqxcuVO+FCzq1d68utbaqx3PsqddfV6yzM+Fz7otvJ5fktncZyZX0\n/wQAIPtY8cTkMopVyp07d469Gltj4+DK5IoV6dtztLTY22Zfe21o3mFnp10IyFtFNhCwCwlt22Zv\nd03VbiR5FVeS5s2zg1fJDg63b098nfc1XV32yqZkV389diz9sc5KZvKKqTT8vR3t/Zmidu7cqa5/\n/MeMVvHy0XArlqNp6+KsXHbHA7zk84ylNUy6Y3c3NenounUJuY6SvSW1uqFB51taFCgpUW9Xl045\n/384Cgrs6s/Ofx2FhTIFBWnbpwQjEVV+5jPqbmnRxZMn026TTSdQXq6+ri69K2muJBMKSZalUHm5\nZtx5p/p7euwVXA8TCLhbdwMlJaq+/XZ1vPPO0O3BhYUyxmjGnXeqqLpajWvW6NfXX+/28fS2QZHs\n1dDLZ8+6969oxgxdbmtTVUODlm3fPqrgP9OVbfgvo3/3gGEwp+A3VjyRf7JV/Ga01Vzr66WPPx4a\ndDY12dtq45UlXX199uqjN8cyWaoWJocP2yuR0ejQoDP5NfFKlKqsTF39NdUqcXIuZ1OTHcB627lk\ncn+msPH0u8w3w73X0eTHOiuXIU/lWO95xpJjm+5Yb4GdYHm5CouLFaqsVMlVV+nc0aPua5xKrdMX\nLNC1S5cqVFXlBptOFdnpN99sf72/P2XQaYJBFc2YoYJgUG179+ri8eNu0BmKRNxKsl4F4fCQr/Wd\nP5/w2OrpkdXbq9iZMwqWlmrJ+vUKJ1W2teLXKSwpUaC0VK27dtkBZHzFtaCkxF7l7O+X1denU3v3\nui1mquO54NMXLNCX9+9XcW2tJPvzKKmrc+9fqLJSX3rrLV2/YsWog04p9TxhFRQAkCkCT0wuoyh+\nk9Fv6JyA9qabEtujjJYT3J09a2+vdbbYSnaPzeEClVRBXSRir552dAwWJHI0NdlVciV7NbW+3g4Y\nDxxIXf11NEHjkSND27l4TZJqt7nS2Ng4qavK+m249zqWADzTIkKjud75eEBpAgF9cc8e1dxxh3o6\nOvRJc7O7yjp9wQJF33xT169YoYd37NCDW7YoGK9mHayo0EPNzfZzu3YpEP+6kyvqLSBk9fbqclub\nYt686Pi1p99yi6obGoaMO5hqu258d8/cFO+z4bvfVTgSUc0ddwx5TWFRkR49dEgD3qJF8XMNXLyY\nUMzIWxhoSbz1ycM7dmhafb0ePXzY/Ty649t2TSCgh3fudAs2jWVup/p8J0ProSsRK1PwG3MKuUDg\niSuDE9AOl+c4HG+l2N5e+09dnb1quWPH8MFauqAuXT7rkSN2QCrZqx779qUNGHc3NenFri69XFur\nWKqVzOTxpwtOJ0m121yazFVl/Zbuve5ualJvV5eKa2u1ZMOGEe9FuvOk69mZarUsXfBaGv8li9XX\npwM/+EHKticXT5xQqKJCoUhEr0SjennpUhVfc40kqffcOR34wQ/c8V08ccI+X3+/CouKFHT6YsYD\nSG/epyksVKiyUlZfn1p37VIoEhmywhnztEwZjd899JB7fwuKitxczekLFug/nTypA08/LSctxRlL\nMF58SIWFUiCggnBYJhRy72ny/Q9HIgpFIlo3b54uOO83fv9SGWn1MtXneyXtDAAA+Ct9MzJgkso4\nL8G7ktjQMLYtpWvX2q/v6LDboYylUq4T1HnH4VSolYYGg94gMRIZvF6K8XYeOaLW+OrPnlWr0udg\nOeNPlYMKcl3iOo8ccfMl9w03n1JIztUsnTXLzQ/c09Sk+9etc1fLvF9zgptkqbbxPj9vni47udGy\ne2b+e02NpMEWJ0We7aaLf/Yzd1zeXM/+y5fVf/myJNnVZSMRxeKrqZIdnDqBZvXChWpcs0a/W7Zs\naC5pGk6Op1fJNdeo49ChIeeYdt11Ckci9tbiePAXKC7WNffdp3t+8hO9sHChm7s50Nen9n37Eu6f\n974X19Tow9/+NiEvNlRZOSRAdF5z9o9/dHNEnfOl4r3G4mee0b5Vq9zKxGPJ50Xm+B4FvzGnkAsE\nnrhyeFcSZ80aWwDmBI+pivZIY2sD46x0SnaF2uQA1hskOudOEzAG4tsRqysqtPhHPxp5/JOFn21z\n4JvxrGYlB5WpzjXS+Xc3Nall82b1x2Kquvlm1Uejaly9WvueekqdR46o6rOfVcGtt+r0/v263Nbm\nVrt1FRaq4lOf0lV33qlQRYVeiUbV9sYbGujpGfY9hyIRdZ84oYJQSAM9PQpXV2tafb2MpPIbbtAr\n0ag633039QmSCxilcXrfPhV6tvta/f12UBvv0+td0b18+rQ+2rJFH//udxrwFDgKlJWpr7tbgbIy\nxTo6FOvsTLjv4ZqahKAzWFGhRw4cGBIMel8jjfx5e49P/oVEql8mAACQClVtceXIpK/naHmrwobD\ndkB1223S+vVDr+PjOGKLFmnP3r1aLCk8nmq0Ex0IXuFVdCcrb59NJ9gb7UpWcu9USQk9O3c3Nanj\n0CF1HT2q6JtvalpSvnKqKrZOJdXk6qrtBw+q6/333TzI5ODv+hUrdLGtLSG4SiUYiWggFlO/p9VI\n6cyZKquvd1cmvVVnTTA4WJyooEChigrJGPWcPatQZaWqbr45ff/PggIFy8rUG+8TXDpzpv7jn/7k\n3tdYZ6fWzZvn9tpMJVRZqd7ubncM4epq9Z4/b7eNKSxUqLxcPR0dCkUiuuqee/QffvWrlJ+b81lV\nNTSobNYsNa5ZM+znm/zZeufGQG+vTjQ30zMXAK5wVLXFlWe4fpRr10qzZ9uBYXJBn/FyVisCASkW\nG9ySmyqP1MdCPuHyct0vKTzaarTp7o+f/VNHgyq6WTHeiqPenL6xFpFJztVMzg90tvFeam3VvhT5\nyt4qtlJiEZ3kvqFdx44NBp2SwtOnu3+fvmCBCouLddbZVl+Q+p+5wqIiTf/MZxKCTkn64muvuf00\nvSuqocpK1d177+CBAwPq6ehQz9mzCpSUKHLTTTLBYPoemQMDbtAZLC/XF197LSFIC0cievTwYYWr\nq1O+vKCkRD0dHW7QGSgrU6y9fbBXaX+/ejo6VFJXp69+8IEe3LLFDfjT5dUu275dD2zaNGKwmPzZ\neudGsKzsiinKBQAYHwJP5J2dO3emf3K4ACoSsbfY7t3rT4DlDeKeecYOJr2VLisqsl/IZ6xBbLr7\nM9GB4CSrojvsnMojflYcHeu225GKM410Puf5YEWFrl26NKHtR9f778sEAurp7LQr2nq2n1bcdJO+\nvH+/yurrFaqqUlF1tbqOHh3sb+kJSh2FJSV69C9/Sdkm5fUnnxxcjY2vooYqK/XIgQO6f/16t2WJ\niVe2DpaXq3L+fLXt3asTzc0a6O1Vmk25dpEgSb1dXUOC791NTVo3b5560vzCIOQUQSotlQoK1BcP\nmANJ1XVrbr894TNINSfGUkhrd1OTXolG1dPd7X7N+1k2rl59xRTlyqWp8j0KkwdzCrlA4ImpJVUA\n5Q0QnTYofgRYmzcPBnFPPmkHkwsX2s8VFtptVrJtrEFsugBzogNBquhmhZ8VR/1uLzPS+ZznH/vw\nQ3e1znGprU1WX5+7+ljozYc8dUp7vvENlcycqZ4zZ3SiuVltb70labC/pVNwqHL+fJXU1enRQ4c0\nrb7eLoI0c2biQIxxA9LC0lIVzZihRw4c0LT6endV8voVK1R9662S7CDSWSFNDgK9XwtFIiqOF0IK\nVlRIhYXuSuSOlSt1dN06XWptdd9jcW2tTHy11gQCWvKb3yhcXa2+CxfsgDgefAfLyhSeMcN9v41r\n1iRef5xzIlXgeiW1HgIA+IccT0wNTo5iMCiVlkpr1gwGNd58wuXLpVDIn+qu06cPFiuKRqWNG+3t\nq3PmSPEqlJMufzFdcSRMCd4czYkOCLJZ3fQXNTWKtbersKREdY2NGujp0SfNzW6xHUl26yHLSsj3\nrI9G9cDGjdqxcqU+2rpVVbfcoiXr1yeMzZs/agIBfeX997X/+9/Xe88+627nrV++XA9s2pTwPjve\neUex9nZ7a6wxQ3qASlLRjBn60ltvuVVgty5b5vYg9eaOhmtqEl5f1dCgZdu361f19erz5IRWzp+v\n41u3uq8tLClR/Re/qAsff5wyd3Z3U5POxvNqvxR/bqyfU3J+J4EmACAVcjxx5XC2kDY324Gl94cj\n7yrfmjX+rbTddpv934YGKV6ZUpGIdPvtg9cb6wrDcDmqfmClcUrLZS9Sv7b5pspJ/PL+/SqdOVOP\nHjqkB7ds0f3r16ts9myZ+NbVwtLSwZzPeNBZvXChQuXlerGxUe/98peKnT6tE83N+vWcOQn5r95q\nslZfn15/8kl7BdPzC9OBeF6lUwCpddcuxdrbVRAKyerrSxl0StJVd9+tA08/rYttbXr1scd05sCB\nhGtJ9kpo1S23SBq6zbgwni9aWFKiqxYtUk9Xl4pqa7Vsxw73flw8edLNnX3h9tsT7lvnkSNq27tX\nlz15tePN3QUAIFMEnsg7KfMShstRzNY20vXr7fNu3z60HUqm15voIj+QRK7LcEZbsCiTLZ3ec+9Y\nuVIvNjbq2IYNQwKjafX1+puPP3ZX7F6JRtXT2ekWIwqWlrrnrJw/X/XRqB7atk3nW1rs1UxPxdue\n9nY9W1en3y5apJeXLtXiZ56xV0vjBnp7E4JRSTr7xz+6Y3MLIBUWaqCnJyEnM1hRoaIZM/SupOC0\nabrnJz9JCPRStXW56p57VFpXZ/cNNUb9nmO8AffFkyfdIPKdn/7UvR/OWANlZYqdPq3jW7dq3bx5\ninV2ZtTSJlkuf5mBQXyPgt+YU8gF+nhiavD2vkz+ASlbPSzTnXc816PaKyaZ0fZpvG/t2jFv803o\nQVldrZizRV3pA6NUPSiXbNig1598UjJGjatXu9d3giynb6Zj4NIlt13KvlWrFKqocAPIglBIjatX\n6xc1NVJ8VfLC8eO6cPy4+3pv65SqhgZdbm9X78WLqm5oUO/581Jbm3rPn9e+VasSAr3zH3yg2Jkz\ng9uCJX28dav794FYTCeam/X83Ln663ffdQNuSeqK9+wNlpfrTk/PXue+X+7o0InmZknSpdZW7Wlq\nSvmZZPI5AQDgB3I8gVxK7p/pfM3vHMyJ7tOJKSObOX7ec4cjEX3S3Dykt2RyTuKG+fN14fhxBadN\nU+3ixWl7VUqDOa8N3/2utj74oPp6etTjCW5DlZX66rFjal6xwr32su3bte+pp3Rs/fohFWZDkYiu\nvvdet4CPE8C9Eo26wXBxba0utbYqXF0tU1CggZ4eFYRC+lK84NGLixbpC1u26KX77ksItJM5PUwd\nv120yA2Wk59z3qvTB9Tbb7Nl82b1x2Kqvu22IfmtAAD4ZTQ5ngSeQC55Cx9lsxDRRF0HEy6bRX2k\n7BYs8p5bUsrreIv/XL9ihbpPnHAL9JTNnq2yWbNG/d5jnZ16ft48XW5ttStPO/82FRTomr/6K3dL\nqfeajlAkokcOHkwo3iPZ9//Yhg3q6eiQCQQUKClRYVGRps2erdP79rnHeYNF72tchYVupVonAPa+\nn9H8AiD5s0p+H6kCVgAA/EBxIUxJ485LyHYBn7EYaWutX2NlC++w8jnXxc/enamMJ8dvpPxQ77nT\nXSc5JzEUb3VSvXChSurqRvXedzc16dmrr9avr79elXPnKlxVZQd5AwP2n74+ffLqq0OuWdXQoGuX\nLlV9NKqvfvCBDjz99JD303nkiBtAWn196u3q0tttbeqOt1iR7DYn3m3D3tdI0jVLluirR4+qfvly\n1UejQ4JOyd4iO232bBWGw3r1sccU6+wccn+T76E3V7WwtFSxjo5h83QxeeXz9yhMTswp5AKBJ648\nk6mAz0iFiPwa60T36byCjLb4Trb42bvTT94KsOMJipOrqnofe4PQ4d5755EjutTaqp6ODp3ctUsF\nTj9fr4GBIX0ql23frge3bNEDGzcqHIkkBPnP3XijXl661D2X0/tTkspvuEHRN99UWX29QlVVKqqu\ndu/Ji42Nat2zJ+HS4UhE0+rr9cCmTe61vMe/vHSpJKl01iyd2rvXvZ8j/dLhvrVrVR+NKlxVpf4L\nF/RJc3NWfjkBAMBosNUWV56lS+1AbuHCkQOxXOdGjmWsyInkraATvZUxl707h+O9L04uZfL4xrtN\neLTv3dmmKtmrmJ/fuFH7Vq3S+Y8+GtwOW1CgqxcvVll9vY6tX6/+S5dkAgFd9bnP6YFNmxSORNzz\nePuHmkBA4enT9dC2bdr//e8nFDjy3oPi2lpZlqXLp04ljM0Eg/paW1vK8SfPrZ7ubvf6M+66S5J0\norl5xPxbenECALKNHE8glc7O0RfwyXVu5FjGipzgh/rUnPsSqqzUIwcODMmNlMYftI82cI11dmrn\n448PqXob6+zUr2+4QT1nzrjHhquq7MqzHsW1tXr08GFJGlJB1nvMzM9/XudbWhQoKVFxTY1aNm9O\n2FJrgkFZ8Z6gjof37NHVixalHHfy3JKk52680e0bWh+NqjAYHDHwnqy/nAAATB3keGJKGndegtPu\nZDQ/gOU6N3IsY0XGxjOnkreCwlZcU6NwTY2qb7tNoYqKlMckbxMe67bl0ea3hiMRFc+YobY339Sz\nV12l1ZGIXlqyRJI04447Eo41hYVDXn+ptVXPz5snSbp/3TotWb9exbW1Q475aOtWte7apVe3btVH\nW7cmBJ3B8nIVTZ8ev8jgv8vv/PSnacedPLfCkYhqFi6UZN+zxtWrR5V/Sy/O/Ec+HvzGnEIuZBx4\nGmN+ZIw5bIx52xjzgjEm9U8WQD4jNxIj4If61M63tCh2+rRODJNXmBxYjbVQ0ljyW508z4GeHvWe\nO+eO6761a1VQVGSfb9o0PbRtm0qvvVYmGJQCg62uL8d7Y0r2Z/7o4cOqX75cRTNmuGOouuUWSVLF\njTeq0MkjLbD/me3t6tKltjb7a/FdQN5xpwq6U80tftEBAMhXGW+1NcYskfSqZVkDxpgfSpJlWd9O\ncRxbbQHgCpPJFuSxvmbHypX66OWXVb1ggUrq6txtrqm23XrzPCUpNH26KufNU7C8XKd//3u3p2Z9\nNKpYR4e7BbggHNZALJZ2TOlawmxdtsxt+5JKSV2dVrzzjnu+Z6++WpdaWyXZ231r7rgjK+1xAADI\nhtFstQ0M9+RwLMva5nm4T9IjmZ4LADC13Ld27ajyCr15moufeUb7Vq1yXzNSDuf5lhbF2tv1SXOz\nwjU1bu7jnqYm3b9u3ZBzv/7Nb2qgp0cFwaAut7frlBMYera+DvT0JKyklt9wg33+NO/BWZV0OH93\nKu4Wlpaq/8KFhNdMX7BAVTffrFeiUfe99cdi7vOxM2fcVV/6bgIApgq/cjz/VtLLPp0LGNao8hIm\nU69OTHrkuvhvtFuQvdtr961alfCakbbenj96VJIUrKjQ9JtukpS4fbVl82b39a9/85t6YONGuz3K\npk1u+5NwdXVC4FkQDCZsZ7148qQb3I62FcnOnTvdc9TefXfCc6UzZ+rhHTt0vqUl4b1V33abJCkw\nTIuYXLfuSWeyjmsq4XsU/MacQi4Mu+JpjNkmqTbFU/9gWdbm+DHfkdRjWdbadOd5/PHHdd1110mS\nIpGIFixYoMbGRkmDE5/HPB7t44MHD458fLz/5U5JikbVGP/6ZBg/jyffY8dkGc+V9PjQpUuaLjvQ\nGvja17Rz5073+UOXLum0pM/Fg7Dk138Qiajj+HHNPXdOoUhE5++9V9d961tu4PpOd7d6Jc2VdHL3\nbv3wzjt16/e+p88vW6b71q7Vv0WjajtzRjPi22yPlpbquq9/3Q2aveMLlJXp90ePauCll1T04ovq\nPHJEhy5dcs/nfX+SHXgHnnhCA93dKvrzn3W5tVWtN96ou378Y/u5khK9KzsfdGU8wPy3aFTz/u7v\nFHrhBS3+2c/0xsGDCe93z1tv6ezbb2uu7FXdwBNP5Pzzk6Su+C8I3pV0OBrV3/P9lsc8nvSPDyZ9\nf8n1eHicf48PHjyozvgvGz/88EONxrjaqRhjHpf0XyXdZ1nW5TTHkOOJiUf/y7HJdb9SXLGGa/Ux\nUhuQkXJCX1qyRCeamxO2u16/YoVC8UJGgZISDfT26kRzs0KVlZr5wAO6ePJkwtbeWGennpszx80B\nLZs9WxeOH3fbotQvX64HNm3S85/+tC62tqogGNSX9+9PaB+T6n1k0uJksrbumazjAgBMnKz28TTG\nfEHSjyXda1lW+zDHEXhi4tH/cmwaG3PbrxTwGEt/zuGCN+f5WEeHPmluVqCsTDPuukv9ly65+Z3e\nXpivRKMp+4p6A6uCcDihaFB9NKoHNm7U6khEvefOSbK30/7Nxx/7ek9G835zZbKOCwAwcbIdeL4n\nKSTpbPxLb1iW9USK4wg84audnq148MkVvkLMnJpcXmxsTBkAjiRdwBrr7NRzN97oFh8qrq3VpdbW\nISt06VbuvIHVq4895lbHnX7zzXp41y6FIxH9oqZGsfZ2FZaU6Oqf/1xLv/KVMY9zrMeM5/zIL3yP\ngt+YU/DbaALPgkxPblnWHMuy6i3Laoj/GRJ0AsgT9CvFJDKW/pxe6YoRhSMR1Sxc6J4z+uabKXth\npuuR6S2UdN/atapfvlz10agbdO5uatK0T31KBeGwom+8oZLaVKURRh7nWI8Zz/kBAJho48rxHNUF\nWPEEAIxBpls3h8s1zOZ20LGu0CaPc99TTw1ZoRxP3iQ5lwCAiZbVFU8AALJhtK1YkqVbsRzPOVNJ\nbh8y1hXa5HGmWqEc7r2MpLimRuHqagJOAMCkQuCJvOOUdAb8wpyaGvwMLoeTHCgmB4kjzafkcaYK\nXMfzXs63tIy59ygmN75HwW/MKeQCgScAAGOQHCiON+Adz+rmaMYHAMBkQI4nAABjMNnbh0z28QEA\npp6stlMZwyAIPAEAAABgiqK4EKYk8hLgN+YU/MR8gt+YU/Abcwq5QOAJAAAAAMgqttoCAAAAADLG\nVlsAAAAAQM4ReCLvkJcAvzGn4CfmE/zGnILfmFPIBQJPAAAAAEBWkeMJAAAAAMgYOZ4AAAAAgJwj\n8ETeIS8BfmNOwU/MJ/iNOQW/MaeQCwSeAAAAAICsIscTAAAAAJAxcjwBAAAAADlH4Im8Q14C/Mac\ngp+YT/Abcwp+Y04hFwg8AQAAAABZRY4nAAAAACBj5HgCAAAAAHKOwBN5h7wE+I05BT8xn+A35hT8\nxpxCLhB4AgAAAACyihxPAAAAAEDGyPEEAAAAAOQcgSfyDnkJ8BtzCn5iPsFvzCn4jTmFXCDwBAAA\nAABkFTmeAAAAAICMkeMJAAAAAMg5Ak/kHfIS4DfmFPzEfILfmFPwG3MKuUDgCQAAAADIKnI8AQAA\nAAAZI8cTAAAAAJBzBJ7IO+QlwG/MKfiJ+QS/MafgN+YUcoHAEwAAAACQVeR4AgAAAAAyRo4nAAAA\nACDnCDyRd8hLgN+YU/AT8wl+Y07Bb8wp5AKBJwAAAAAgq8jxBAAAAABkjBxPAAAAAEDOEXgi75CX\nAL8xp+An5hP8xpyC35hTyAUCTwAAAABAVpHjCQAAAADIGDmeAAAAAICcI/BE3iEvAX5jTsFPzCf4\njTkFvzGnkAsEngAAAACArCLHEwAAAACQMXI8AQAAAAA5R+CJvENeAvzGnIKfmE/wG3MKfmNOIRcI\nPAEAAAAAWUWOJwAAAAAgY+R4AgAAAAByjsATeYe8BPiNOQU/MZ/gN+YU/MacQi4QeAIAAAAAsooc\nTwAAAABAxsjxBAAAAADkHIEn8g55CfAbcwp+Yj7Bb8wp+I05hVwg8AQAAAAAZBU5ngAAAACAjJHj\nCQAAAADIOQJP5B3yEuA35hT8xHyC35hT8BtzCrlA4AkAAAAAyCpyPAEAAAAAGSPHEwAAAACQcwSe\nyDvkJcBvzCn4ifkEvzGn4DfmFHKBwBMAAAAAkFXkeAIAAAAAMkaOJwAAAAAg5wg8kXfIS4DfmFPw\nE/MJfmNOwW/MKeQCgScAAAAAIKvI8QQAAAAAZIwcTwAAAABAzmUceBpj/skY87Yx5qAx5lVjzLV+\nDgxIh7wE+I05BT8xn+A35hT8xpxCLoxnxfOfLcu6xbKsBZI2SfpfPo0JGNbBgwdzPQRMMcwp+In5\nBL8xp+A35hRyIePA07Ks856HZZLaxz8cYGSdnZ25HgKmGOYU/MR8gt+YU/Abcwq5EBjPi40xT0v6\nz5IuSrrLlxEBAAAAAKaUYVc8jTHbjDF/SvHnYUmyLOs7lmXNkrRG0r9MwHgBffjhh7keAqYY5hT8\nxHyC35hT8BtzCrngSzsVY8wsSS9blvXZFM/RSwUAAAAAprCR2qlkvNXWGDPHsqz34g+XSzqQyQAA\nAAAAAFNbxiuexpgNkuZK6pd0VNI3LMtq83FsAAAAAIApwJettgAAAAAApDOePp6jZoz5J2PM28aY\ng8aYV40x107EdTE1GWN+ZIw5HJ9TLxhjKnI9JuQ3Y8wKY8w7xph+Y8ytuR4P8pcx5gvGmL8YY94z\nxvyPXI8H+c0Y83+NMaeMMX/K9VgwNRhjrjXG7Ij/m/dnY8x/z/WYkL+MMUXGmH3xGO+QMeZ/D3v8\nRKx4GmOmOX0/jTH/TdItlmV9PesXxpRkjFki6VXLsgaMMT+UJMuyvp3jYSGPGWM+LWlA0v+R9PeW\nZf0hx0NCHjLGFEp6V9L9kj6R9HtJX7Us63BOB4a8ZYxZLKlb0r9bljU/1+NB/jPG1EqqtSzroDGm\nTNL/kxTl+xQyZYwpsSzrojEm47bo4wAAAphJREFUIOk1Sd+yLOu1VMdOyIqnE3TGlUlqn4jrYmqy\nLGubZVkD8Yf7JM3M5XiQ/yzL+otlWUdyPQ7kvTskvW9Z1oeWZfVKek528T0gI5Zl7ZHUketxYOqw\nLKvVsqyD8b93SzosqS63o0I+syzrYvyvIUmFks6mO3ZCAk9JMsY8bYz5SNJKST+cqOtiyvtbSS/n\nehAAIOkaSR97Hh+Pfw0AJh1jzHWSGmT/Eh/IiDGmwBhzUNIpSTssyzqU7tiM26mkuOg2SbUpnvoH\ny7I2W5b1HUnfMcZ8W9K/SPovfl0bU89I8yl+zHck9ViWtXZCB4e8NJo5BYwT1foA5IX4NtsNkp6M\nr3wCGYnvQlwQr7nyijGm0bKsnamO9S3wtCxrySgPXStWqDCCkeaTMeZxSUsl3TchA0LeG8P3KCBT\nn0jyFs+7VvaqJwBMGsaYoKTfSPqlZVmbcj0eTA2WZZ0zxmyRtFDSzlTHTFRV2zmeh8slHZiI62Jq\nMsZ8QdIqScsty7qc6/FgyjG5HgDy1n5Jc4wx1xljQpL+WtKLOR4TALiMMUbSzyUdsizrX3M9HuQ3\nY0y1MSYS/3uxpCUaJs6bqKq2GyTNldQv6aikb1iW1Zb1C2NKMsa8JzuB2UlefsOyrCdyOCTkOWPM\nlyT9VFK1pHOSDliW9WBuR4V8ZIx5UNK/yi6w8HPLsoYtLQ8Mxxjza0n3SqqS1Cbp+5Zlrc7tqJDP\njDGLJO2W9EcNpgf8T8uyfpe7USFfGWPmS/qF7MXMAknPWpb1o7THT0TgCQAAAAC4ck1YVVsAAAAA\nwJWJwBMAAAAAkFUEngAAAACArCLwBAAAAABkFYEnAAAAACCrCDwBAAAAAFlF4AkAAAAAyCoCTwAA\nAABAVv1/lzHCzGUnjVoAAAAASUVORK5CYII=\n", + "text": [ + "" + ] + } + ], + "prompt_number": 5 + } + ], + "metadata": {} + } + ] +} \ No newline at end of file diff --git a/examples/triplet/mnist_triplet.prototxt b/examples/triplet/mnist_triplet.prototxt new file mode 100644 index 00000000000..0e903f85909 --- /dev/null +++ b/examples/triplet/mnist_triplet.prototxt @@ -0,0 +1,113 @@ +name: "mnist_siamese" +input: "data" +input_dim: 10000 +input_dim: 1 +input_dim: 28 +input_dim: 28 +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 500 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 10 + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 2 + } +} diff --git a/examples/triplet/mnist_triplet_solver.prototxt b/examples/triplet/mnist_triplet_solver.prototxt new file mode 100644 index 00000000000..edd8e1e0338 --- /dev/null +++ b/examples/triplet/mnist_triplet_solver.prototxt @@ -0,0 +1,25 @@ +# The train/test net protocol buffer definition +net: "examples/triplet/mnist_triplet_train_test.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of MNIST, we have test batch size 100 and 100 test iterations, +# covering the full 10,000 testing images. +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 500 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.001 +momentum: 0.9 +weight_decay: 0.0000 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +max_iter: 50000 +# snapshot intermediate results +snapshot: 5000 +snapshot_prefix: "examples/triplet/mnist_triplet" +# solver mode: CPU or GPU +solver_mode: CPU diff --git a/examples/triplet/mnist_triplet_train_test.prototxt b/examples/triplet/mnist_triplet_train_test.prototxt new file mode 100644 index 00000000000..da25dec31de --- /dev/null +++ b/examples/triplet/mnist_triplet_train_test.prototxt @@ -0,0 +1,500 @@ +name: "mnist_triplet_train_test" +layer { + name: "triplet_data" + type: "Data" + top: "triplet_data" + top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/mnist_triplet_train_leveldb" + batch_size: 64 + } +} +layer { + name: "triplet_data" + type: "Data" + top: "triplet_data" + top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/mnist_triplet_test_leveldb" + batch_size: 100 + } +} +layer { + name: "slice_triplet" + type: "Slice" + bottom: "triplet_data" + top: "data" + top: "data_true" + top: "data_false" + slice_param { + slice_dim: 1 + slice_point: 1 + slice_point: 2 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_true" + type: "Convolution" + bottom: "data_true" + top: "conv1_true" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_true" + type: "Pooling" + bottom: "conv1_true" + top: "pool1_true" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_true" + type: "Convolution" + bottom: "pool1_true" + top: "conv2_true" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_true" + type: "Pooling" + bottom: "conv2_true" + top: "pool2_true" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_true" + type: "InnerProduct" + bottom: "pool2_true" + top: "ip1_true" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_true" + type: "ReLU" + bottom: "ip1_true" + top: "ip1_true" +} +layer { + name: "ip2_true" + type: "InnerProduct" + bottom: "ip1_true" + top: "ip2_true" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_true" + type: "InnerProduct" + bottom: "ip2_true" + top: "feat_true" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_false" + type: "Convolution" + bottom: "data_false" + top: "conv1_false" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_false" + type: "Pooling" + bottom: "conv1_false" + top: "pool1_false" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_false" + type: "Convolution" + bottom: "pool1_false" + top: "conv2_false" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_false" + type: "Pooling" + bottom: "conv2_false" + top: "pool2_false" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_false" + type: "InnerProduct" + bottom: "pool2_false" + top: "ip1_false" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_false" + type: "ReLU" + bottom: "ip1_false" + top: "ip1_false" +} +layer { + name: "ip2_false" + type: "InnerProduct" + bottom: "ip1_false" + top: "ip2_false" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_false" + type: "InnerProduct" + bottom: "ip2_false" + top: "feat_false" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "loss" + type: "TripletLoss" + bottom: "feat" + bottom: "feat_true" + bottom: "feat_false" + bottom: "sim" + top: "loss" + triplet_loss_param { + margin: 1 + } +} + diff --git a/examples/triplet/readme.md b/examples/triplet/readme.md new file mode 100644 index 00000000000..524718ce2db --- /dev/null +++ b/examples/triplet/readme.md @@ -0,0 +1,187 @@ +--- +title: Triplet Network Tutorial +description: Train and test a triplet network on MNIST data. +category: example +include_in_docs: true +layout: default +priority: 100 +--- + +# Triplet Network Training with Caffe +This example shows how you can use weight sharing and a contrastive loss +function to learn a model using a triplet network in Caffe. + +We will assume that you have caffe successfully compiled. If not, please refer +to the [Installation page](../../installation.html). This example builds on the +[MNIST tutorial](mnist.html) so it would be a good idea to read that before +continuing. + +*The guide specifies all paths and assumes all commands are executed from the +root caffe directory* + +## Prepare Datasets + +You will first need to download and convert the data from the MNIST +website. To do this, simply run the following commands: + + ./data/mnist/get_mnist.sh + ./examples/triplet/create_mnist_triplet.sh + +After running the script there should be two datasets, +`./examples/triplet/mnist_triplet_train_leveldb`, and +`./examples/triplet/mnist_triplet_test_leveldb`. + +## The Model +First, we will define the model that we want to train using the triplet network. +We will use the convolutional net defined in +`./examples/triplet/mnist_triplet.prototxt`. This model is almost +exactly the same as the [LeNet model](mnist.html), the only difference is that +we have replaced the top layers that produced probabilities over the 10 digit +classes with a linear "feature" layer that produces a 2 dimensional vector. + + layers { + name: "feat" + type: INNER_PRODUCT + bottom: "ip2" + top: "feat" + blobs_lr: 1 + blobs_lr: 2 + inner_product_param { + num_output: 2 + } + } + +## Define the triplet Network + +In this section we will define the triplet network used for training. The +resulting network is defined in +`./examples/triplet/mnist_triplet_train_test.prototxt`. + +### Reading in the Pair Data + +We start with a data layer that reads from the LevelDB database we created +earlier. Each entry in this database contains the image data for a pair of +images (`pair_data`) and a binary label saying if they belong to the same class +or different classes (`sim`). + + layers { + name: "pair_data" + type: DATA + top: "pair_data" + top: "sim" + data_param { + source: "examples/triplet/mnist-triplet-train-leveldb" + scale: 0.00390625 + batch_size: 64 + } + include: { phase: TRAIN } + } + +In order to pack a pair of images into the same blob in the database we pack one +image per channel. We want to be able to work with these two images separately, +so we add a slice layer after the data layer. This takes the `pair_data` and +slices it along the channel dimension so that we have a single image in `data` +and its paired image in `data_p.` + + layers { + name: "slice_pair" + type: SLICE + bottom: "pair_data" + top: "data" + top: "data_p" + slice_param { + slice_dim: 1 + slice_point: 1 + } + } + +### Building the First Side of the triplet Net + +Now we can specify the first side of the triplet net. This side operates on +`data` and produces `feat`. Starting from the net in +`./examples/triplet/mnist_triplet.prototxt` we add default weight fillers. Then +we name the parameters of the convolutional and inner product layers. Naming the +parameters allows Caffe to share the parameters between layers on both sides of +the triplet net. In the definition this looks like: + + ... + param: "conv1_w" + param: "conv1_b" + ... + param: "conv2_w" + param: "conv2_b" + ... + param: "ip1_w" + param: "ip1_b" + ... + param: "ip2_w" + param: "ip2_b" + ... + +### Building the Second Side of the triplet Net + +Now we need to create the second path that operates on `data_pos` and produces +`feat_pos`. This path is exactly the same as the first. So we can just copy and +paste it. Then we change the name of each layer, input, and output by appending +`_pos` to differentiate the "paired" layers from the originals. + +### Building the Third Side of the triplet Net + +Now we need to create the second path that operates on `data_neg` and produces +`feat_neg`. This path is exactly the same as the first. So we can just copy and +paste it. Then we change the name of each layer, input, and output by appending +`_neg` to differentiate the "paired" layers from the originals. + +### Adding the Contrastive Loss Function + +To train the network we will optimize a contrastive loss function proposed in: +Raia Hadsell, Sumit Chopra, and Yann LeCun "Dimensionality Reduction by Learning +an Invariant Mapping". This loss function encourages matching pairs to be close +together in feature space while pushing non-matching pairs apart. This cost +function is implemented with the `TRIPLET_LOSS` layer: + + layers { + name: "loss" + type: TRIPLET_LOSS + triplet_loss_param { + margin: 1.0 + } + bottom: "feat" + bottom: "feat_pos" + bottom: "feat_neg" + bottom: "sim" + top: "loss" + } + +## Define the Solver + +Nothing special needs to be done to the solver besides pointing it at the +correct model file. The solver is defined in +`./examples/triplet/mnist_triplet_solver.prototxt`. + +## Training and Testing the Model + +Training the model is simple after you have written the network definition +protobuf and solver protobuf files. Simply run +`./examples/triplet/train_mnist_triplet.sh`: + + ./examples/triplet/train_mnist_triplet.sh + +# Plotting the results + +First, we can draw the model and triplet networks by running the following +commands that draw the DAGs defined in the .prototxt files: + + ./python/draw_net.py \ + ./examples/triplet/mnist_triplet.prototxt \ + ./examples/triplet/mnist_triplet.png + + ./python/draw_net.py \ + ./examples/triplet/mnist_triplet_train_test.prototxt \ + ./examples/triplet/mnist_triplet_train_test.png + +Second, we can load the learned model and plot the features using the iPython +notebook: + + ipython notebook ./examples/triplet/mnist_triplet.ipynb + diff --git a/examples/triplet/train_mnist_triplet.sh b/examples/triplet/train_mnist_triplet.sh new file mode 100755 index 00000000000..683cda2963b --- /dev/null +++ b/examples/triplet/train_mnist_triplet.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env sh + +TOOLS=./release/tools + +$TOOLS/caffe train --solver=examples/triplet/mnist_triplet_solver.prototxt diff --git a/src/caffe/data_transformer.cpp b/src/caffe/data_transformer.cpp index 7189d67e289..a1d41b0dc65 100644 --- a/src/caffe/data_transformer.cpp +++ b/src/caffe/data_transformer.cpp @@ -130,10 +130,31 @@ void DataTransformer::Transform(const Datum& datum, template void DataTransformer::Transform(const Datum& datum, Blob* transformed_blob) { +<<<<<<< HEAD +<<<<<<< HEAD // If datum is encoded, decoded and transform the cv::image. if (datum.encoded()) { +<<<<<<< fa941056111876e03082cdc5695d75339ed24ed9 #ifdef USE_OPENCV CHECK(!(param_.force_color() && param_.force_gray())) +======= +<<<<<<< HEAD +<<<<<<< HEAD + CHECK(!(param_.force_color() && param_.force_gray())) +======= +<<<<<<< HEAD + CHECK(!param_.force_color() && !param_.force_gray()) +>>>>>>> 011aef0... restore +======= + CHECK(!(param_.force_color() && param_.force_gray())) +>>>>>>> d2acfed... fixed _force_color check, fixes #2635 +======= + // If datum is encoded, decoded and transform the cv::image. + if (datum.encoded()) { + CHECK(!param_.force_color() && !param_.force_gray()) +>>>>>>> 011aef0... restore +>>>>>>> 00341b2... triplet data generation and network update +>>>>>>> triplet data generation and network update << "cannot set both force_color and force_gray"; cv::Mat cv_img; if (param_.force_color() || param_.force_gray()) { @@ -154,6 +175,11 @@ void DataTransformer::Transform(const Datum& datum, } const int crop_size = param_.crop_size(); +<<<<<<< HEAD +======= +>>>>>>> 083f61b... New triplet loss layer added(beta1 version-no test source files) +======= +>>>>>>> 011aef0... restore const int datum_channels = datum.channels(); const int datum_height = datum.height(); const int datum_width = datum.width(); @@ -438,11 +464,31 @@ void DataTransformer::Transform(Blob* input_blob, } } +<<<<<<< HEAD +<<<<<<< HEAD template vector DataTransformer::InferBlobShape(const Datum& datum) { if (datum.encoded()) { #ifdef USE_OPENCV CHECK(!(param_.force_color() && param_.force_gray())) +<<<<<<< fa941056111876e03082cdc5695d75339ed24ed9 +======= +======= + CHECK(!param_.force_color() && !param_.force_gray()) +>>>>>>> 011aef0... restore +======= + CHECK(!(param_.force_color() && param_.force_gray())) +<<<<<<< HEAD +>>>>>>> d2acfed... fixed _force_color check, fixes #2635 +======= +======= +template +vector DataTransformer::InferBlobShape(const Datum& datum) { + if (datum.encoded()) { + CHECK(!param_.force_color() && !param_.force_gray()) +>>>>>>> 011aef0... restore +>>>>>>> 00341b2... triplet data generation and network update +>>>>>>> triplet data generation and network update << "cannot set both force_color and force_gray"; cv::Mat cv_img; if (param_.force_color() || param_.force_gray()) { @@ -519,6 +565,11 @@ vector DataTransformer::InferBlobShape( } #endif // USE_OPENCV +<<<<<<< HEAD +======= +>>>>>>> 083f61b... New triplet loss layer added(beta1 version-no test source files) +======= +>>>>>>> 011aef0... restore template void DataTransformer::InitRand() { const bool needs_rand = param_.mirror() || diff --git a/src/caffe/layers/triplet_loss_layer.cpp b/src/caffe/layers/triplet_loss_layer.cpp new file mode 100644 index 00000000000..add25687899 --- /dev/null +++ b/src/caffe/layers/triplet_loss_layer.cpp @@ -0,0 +1,261 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/loss_layers.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void TripletLossLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + LossLayer::LayerSetUp(bottom, top); + // number of triplet in a batch + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + // dimension of each descriptor + int dim = bottom[0]->count()/bottom[0]->num(); + CHECK_EQ(bottom[0]->channels(), dim); + CHECK_EQ(bottom[0]->height(), 1); + CHECK_EQ(bottom[0]->width(), 1); + CHECK_EQ(bottom[1]->channels(), 1); + CHECK_EQ(bottom[1]->height(), 1); + CHECK_EQ(bottom[1]->width(), 1); + // In each set, we have: + // the descriptor of reference sample, closest sample, and negative samples + // number of sets in the whole batch + int num_set = bottom[0]->num()/(2 + num_triplets); + dist_sq_.Reshape(num_set, 1, 1, 1); + diff_pos.Reshape(num_set, dim, 1, 1); + dist_sq_pos.Reshape(num_set, 1, 1, 1); + diff_neg.Reshape(num_set, dim, 1, 1); + dist_sq_neg.Reshape(num_set, 1, 1, 1); + // vector of ones used to sum along channels + summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); + for (int i = 0; i < bottom[0]->channels(); ++i) + summer_vec_.mutable_cpu_data()[i] = Dtype(1); +} + +template +void TripletLossLayer::Forward_cpu( + const vector*>& bottom, + const vector*>& top) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); + Dtype loss(0.0); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); + if (losstype == 0) { + for (int i = 0; i < num_set; ++i) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close + // Loss component calculated from reference and close one + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.cpu_data()[i]; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + i*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); + } + } + loss = loss / static_cast(num_set) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; +} + +template +void TripletLossLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); + if (losstype == 0) { + // BP for feat1(extracted from reference) + for (int i = 0; i < 1; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(bottom[i]->num()); + int num = bottom[i]->num(); + int channels = bottom[i]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[i]->mutable_cpu_diff(); + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pairs + caffe_cpu_axpby( + dim, + alpha, + diff_neg.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } else { + for (int i = 0; i < 1; ++i) { + // BP for data1(feat1) + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha*dist_sq_neg.mutable_cpu_data()[j]\ + /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_cpu_data()[j]+margin)), + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + caffe_cpu_axpby( + dim, + -alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), + diff_neg.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + for (int i = 1; i < 2; ++i) { + // BP for positive data(feat2) + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha*dist_sq_neg.mutable_cpu_data()[j]\ + /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_cpu_data()[j]+margin)), + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + for (int i = 2; i < 2 + num_triplets; ++i) { + // BP for negative data(feat3) + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), + diff_neg.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(TripletLossLayer); +#endif + +INSTANTIATE_CLASS(TripletLossLayer); +REGISTER_LAYER_CLASS(TripletLoss); + +} // namespace caffe diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index ea6729e74de..edadf4dc24a 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -614,6 +614,10 @@ class NetTest : public MultiDeviceTest { InitNetFromProtoString(proto); } +<<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 011aef0... restore virtual void InitSkipPropNet(bool test_skip_true) { string proto = "name: 'SkipPropTestNetwork' " @@ -702,6 +706,9 @@ class NetTest : public MultiDeviceTest { if (test_skip_true) <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 00341b2... triplet data generation and network update proto += " propagate_down: true " " propagate_down: false "; else @@ -712,6 +719,7 @@ class NetTest : public MultiDeviceTest { else proto += " propagate_down: [true, true] "; >>>>>>> 011aef0... restore +<<<<<<< HEAD ======= proto += " propagate_down: true " " propagate_down: false "; @@ -719,6 +727,8 @@ class NetTest : public MultiDeviceTest { proto += " propagate_down: true " " propagate_down: true "; >>>>>>> 98fb438... fixed two bugs with prototext format +======= +>>>>>>> 00341b2... triplet data generation and network update proto += " top: 'cross_entropy_loss' " " type: 'SigmoidCrossEntropyLoss' " @@ -727,6 +737,11 @@ class NetTest : public MultiDeviceTest { InitNetFromProtoString(proto); } +<<<<<<< HEAD +======= +>>>>>>> 083f61b... New triplet loss layer added(beta1 version-no test source files) +======= +>>>>>>> 011aef0... restore int seed_; shared_ptr > net_; }; From 99302ed5e44d614d1e180d4f4a5268e585c91ddc Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 15 Jun 2015 15:35:57 +0800 Subject: [PATCH 51/82] suit for opencv3.0.0 delete spaces delete libopenccodecsv triplet tranining data code fix --- examples/triplet/convert_mnist_triplet_data.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/triplet/convert_mnist_triplet_data.cpp b/examples/triplet/convert_mnist_triplet_data.cpp index e35e7f4f3bf..0cbab642b7c 100644 --- a/examples/triplet/convert_mnist_triplet_data.cpp +++ b/examples/triplet/convert_mnist_triplet_data.cpp @@ -97,7 +97,6 @@ void convert_dataset(const char* image_filename, const char* label_filename, datum.set_data(pixels, 3*rows*cols); if (label_i == label_j && label_i != label_k) { datum.set_label(1); - datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", itemid); db->Put(leveldb::WriteOptions(), std::string(key), value); From 57b18fd6fa553c19ea78f5adc6ea25cd0db9b7d1 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Tue, 30 Jun 2015 21:29:31 +0800 Subject: [PATCH 52/82] lfw test added in examples of triplet delete unuseful codes --- examples/siamese/convert_lfw_siamese_data.cpp | 121 +++++ examples/siamese/create_lfw_siamese.sh | 21 + examples/siamese/lfw_siamese.prototxt | 113 ++++ examples/siamese/lfw_siamese_solver.prototxt | 25 + .../siamese/lfw_siamese_train_test.prototxt | 349 ++++++++++++ examples/siamese/train_lfw_siamese.sh | 5 + examples/triplet/convert_lfw_triplet_data.cpp | 126 +++++ examples/triplet/create_lfw_triplet.sh | 21 + examples/triplet/lfw_triplet.prototxt | 113 ++++ examples/triplet/lfw_triplet_solver.prototxt | 25 + .../triplet/lfw_triplet_train_test.prototxt | 500 ++++++++++++++++++ examples/triplet/train_lfw_triplet.sh | 5 + 12 files changed, 1424 insertions(+) create mode 100644 examples/siamese/convert_lfw_siamese_data.cpp create mode 100755 examples/siamese/create_lfw_siamese.sh create mode 100644 examples/siamese/lfw_siamese.prototxt create mode 100644 examples/siamese/lfw_siamese_solver.prototxt create mode 100644 examples/siamese/lfw_siamese_train_test.prototxt create mode 100755 examples/siamese/train_lfw_siamese.sh create mode 100644 examples/triplet/convert_lfw_triplet_data.cpp create mode 100755 examples/triplet/create_lfw_triplet.sh create mode 100644 examples/triplet/lfw_triplet.prototxt create mode 100644 examples/triplet/lfw_triplet_solver.prototxt create mode 100644 examples/triplet/lfw_triplet_train_test.prototxt create mode 100755 examples/triplet/train_lfw_triplet.sh diff --git a/examples/siamese/convert_lfw_siamese_data.cpp b/examples/siamese/convert_lfw_siamese_data.cpp new file mode 100644 index 00000000000..fe134ca9b4e --- /dev/null +++ b/examples/siamese/convert_lfw_siamese_data.cpp @@ -0,0 +1,121 @@ +// +// This script converts the MNIST dataset to the leveldb format used +// by caffe to train siamese network. +// Usage: +// convert_mnist_data input_image_file input_label_file output_db_file +// The MNIST dataset could be downloaded at +// http://yann.lecun.com/exdb/mnist/ +#include // NOLINT(readability/streams) +#include + +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "stdint.h" + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" + +uint32_t swap_endian(uint32_t val) { + val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); + return (val << 16) | (val >> 16); +} + +void read_image(std::ifstream* image_file, std::ifstream* label_file, + uint32_t index, uint32_t rows, uint32_t cols, + char* pixels, char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index + 8); + label_file->read(label, 1); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename) { + // Open files + std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); + std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); + CHECK(image_file) << "Unable to open file " << image_filename; + CHECK(label_file) << "Unable to open file " << label_filename; + // Read the magic and the meta data + uint32_t magic; + uint32_t num_items; + uint32_t num_labels; + uint32_t rows; + uint32_t cols; + + image_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + label_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + image_file.read(reinterpret_cast(&num_items), 4); + num_items = swap_endian(num_items); + label_file.read(reinterpret_cast(&num_labels), 4); + num_labels = swap_endian(num_labels); + CHECK_EQ(num_items, num_labels); + image_file.read(reinterpret_cast(&rows), 4); + rows = swap_endian(rows); + image_file.read(reinterpret_cast(&cols), 4); + cols = swap_endian(cols); + + // Open leveldb + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = true; + options.error_if_exists = true; + leveldb::Status status = leveldb::DB::Open( + options, db_filename, &db); + CHECK(status.ok()) << "Failed to open leveldb " << db_filename + << ". Is it already existing?"; + + char label_i; + char label_j; + char* pixels = new char[2 * rows * cols]; + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + + caffe::Datum datum; + datum.set_channels(2); // one channel for each image in the pair + datum.set_height(rows); + datum.set_width(cols); + LOG(INFO) << "A total of " << num_items << " items."; + LOG(INFO) << "Rows: " << rows << " Cols: " << cols; + for (int itemid = 0; itemid < num_items; ++itemid) { + int i = caffe::caffe_rng_rand() % num_items; // pick a random pair + int j = caffe::caffe_rng_rand() % num_items; + read_image(&image_file, &label_file, i, rows, cols, + pixels, &label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels + (rows * cols), &label_j); + datum.set_data(pixels, 2*rows*cols); + if (label_i == label_j) { + datum.set_label(1); + } else { + datum.set_label(0); + } + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); + } + + delete db; + delete pixels; +} + +int main(int argc, char** argv) { + if (argc != 4) { + printf("This script converts the MNIST dataset to the leveldb format used\n" + "by caffe to train a siamese network.\n" + "Usage:\n" + " convert_mnist_data input_image_file input_label_file " + "output_db_file\n" + "The MNIST dataset could be downloaded at\n" + " http://yann.lecun.com/exdb/mnist/\n" + "You should gunzip them after downloading.\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3]); + } + return 0; +} diff --git a/examples/siamese/create_lfw_siamese.sh b/examples/siamese/create_lfw_siamese.sh new file mode 100755 index 00000000000..3790b9eb2a0 --- /dev/null +++ b/examples/siamese/create_lfw_siamese.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env sh +# This script converts the mnist data into leveldb format. + +EXAMPLES=./build/examples/siamese +DATA=./data/lfw + +echo "Creating leveldb..." + +rm -rf ./examples/siamese/lfw_siamese_train_leveldb +rm -rf ./examples/siamese/lfw_siamese_test_leveldb + +$EXAMPLES/convert_lfw_siamese_data.bin \ + $DATA/traindata \ + $DATA/trainlabel \ + ./examples/siamese/lfw_siamese_train_leveldb +$EXAMPLES/convert_mnist_siamese_data.bin \ + $DATA/testdata \ + $DATA/testlabel \ + ./examples/siamese/lfw_siamese_test_leveldb + +echo "Done." diff --git a/examples/siamese/lfw_siamese.prototxt b/examples/siamese/lfw_siamese.prototxt new file mode 100644 index 00000000000..106d9aa76f4 --- /dev/null +++ b/examples/siamese/lfw_siamese.prototxt @@ -0,0 +1,113 @@ +name: "lfw_siamese" +input: "data" +input_dim: 10000 +input_dim: 1 +input_dim: 150 +input_dim: 80 +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 500 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 10 + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 2 + } +} diff --git a/examples/siamese/lfw_siamese_solver.prototxt b/examples/siamese/lfw_siamese_solver.prototxt new file mode 100644 index 00000000000..2aaafb63c1f --- /dev/null +++ b/examples/siamese/lfw_siamese_solver.prototxt @@ -0,0 +1,25 @@ +# The train/test net protocol buffer definition +net: "examples/siamese/lfw_siamese_train_test.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of MNIST, we have test batch size 100 and 100 test iterations, +# covering the full 10,000 testing images. +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 500 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.01 +momentum: 0.9 +weight_decay: 0.0000 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +max_iter: 50000 +# snapshot intermediate results +snapshot: 5000 +snapshot_prefix: "examples/siamese/lfw_siamese" +# solver mode: CPU or GPU +solver_mode: CPU diff --git a/examples/siamese/lfw_siamese_train_test.prototxt b/examples/siamese/lfw_siamese_train_test.prototxt new file mode 100644 index 00000000000..049187bf3d4 --- /dev/null +++ b/examples/siamese/lfw_siamese_train_test.prototxt @@ -0,0 +1,349 @@ +name: "lfw_siamese_train_test" +layer { + name: "pair_data" + type: "Data" + top: "pair_data" + top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/siamese/lfw_siamese_train_leveldb" + batch_size: 64 + } +} +layer { + name: "pair_data" + type: "Data" + top: "pair_data" + top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/siamese/lfw_siamese_test_leveldb" + batch_size: 100 + } +} +layer { + name: "slice_pair" + type: "Slice" + bottom: "pair_data" + top: "data" + top: "data_p" + slice_param { + slice_dim: 1 + slice_point: 1 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_p" + type: "Convolution" + bottom: "data_p" + top: "conv1_p" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_p" + type: "Pooling" + bottom: "conv1_p" + top: "pool1_p" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_p" + type: "Convolution" + bottom: "pool1_p" + top: "conv2_p" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_p" + type: "Pooling" + bottom: "conv2_p" + top: "pool2_p" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_p" + type: "InnerProduct" + bottom: "pool2_p" + top: "ip1_p" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_p" + type: "ReLU" + bottom: "ip1_p" + top: "ip1_p" +} +layer { + name: "ip2_p" + type: "InnerProduct" + bottom: "ip1_p" + top: "ip2_p" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_p" + type: "InnerProduct" + bottom: "ip2_p" + top: "feat_p" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "loss" + type: "ContrastiveLoss" + bottom: "feat" + bottom: "feat_p" + bottom: "sim" + top: "loss" + contrastive_loss_param { + margin: 1 + } +} diff --git a/examples/siamese/train_lfw_siamese.sh b/examples/siamese/train_lfw_siamese.sh new file mode 100755 index 00000000000..0a879a65419 --- /dev/null +++ b/examples/siamese/train_lfw_siamese.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env sh + +TOOLS=./build/tools + +$TOOLS/caffe train --solver=examples/siamese/lfw_siamese_solver.prototxt diff --git a/examples/triplet/convert_lfw_triplet_data.cpp b/examples/triplet/convert_lfw_triplet_data.cpp new file mode 100644 index 00000000000..9f65fab76b4 --- /dev/null +++ b/examples/triplet/convert_lfw_triplet_data.cpp @@ -0,0 +1,126 @@ +// This script converts the MNIST dataset to the leveldb format used +// by caffe to train siamese network. +// Usage: +// convert_mnist_data input_image_file input_label_file output_db_file +// The MNIST dataset could be downloaded at +// http://yann.lecun.com/exdb/mnist/ +#include // NOLINT(readability/streams) +#include + +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "stdint.h" + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" + +uint32_t swap_endian(uint32_t val) { + val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); + return (val << 16) | (val >> 16); +} + +void read_image(std::ifstream* image_file, std::ifstream* label_file, + uint32_t index, uint32_t rows, uint32_t cols, + char* pixels, char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index + 8); + label_file->read(label, 1); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename) { + // Open files + std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); + std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); + CHECK(image_file) << "Unable to open file " << image_filename; + CHECK(label_file) << "Unable to open file " << label_filename; + // Read the magic and the meta data + uint32_t magic; + uint32_t num_items; + uint32_t num_labels; + uint32_t rows; + uint32_t cols; + + image_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + label_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + image_file.read(reinterpret_cast(&num_items), 4); + num_items = swap_endian(num_items); + label_file.read(reinterpret_cast(&num_labels), 4); + num_labels = swap_endian(num_labels); + CHECK_EQ(num_items, num_labels); + image_file.read(reinterpret_cast(&rows), 4); + rows = swap_endian(rows); + image_file.read(reinterpret_cast(&cols), 4); + cols = swap_endian(cols); + + // Open leveldb + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = true; + options.error_if_exists = true; + leveldb::Status status = leveldb::DB::Open( + options, db_filename, &db); + CHECK(status.ok()) << "Failed to open leveldb " << db_filename + << ". Is it already existing?"; + + char label_i; + char label_j; + char label_k; + char* pixels = new char[3 * rows * cols]; + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + + caffe::Datum datum; + datum.set_channels(3); // one channel for each image in the pair + datum.set_height(rows); + datum.set_width(cols); + LOG(INFO) << "A total of " << num_items << " items."; + LOG(INFO) << "Rows: " << rows << " Cols: " << cols; + for (int itemid = 0; itemid < num_items; ++itemid) { + int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups + int j = caffe::caffe_rng_rand() % num_items; + int k = caffe::caffe_rng_rand() % num_items; + read_image(&image_file, &label_file, i, rows, cols, + pixels, &label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels + (rows * cols), &label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels + (2 * rows * cols), &label_k); + + datum.set_data(pixels, 3*rows*cols); + if (label_i == label_j && label_i != label_k) { + datum.set_label(1); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); + } else { + itemid--; + datum.set_label(0); + } + } + + delete db; + delete pixels; +} + +int main(int argc, char** argv) { + if (argc != 4) { + printf("This script converts the MNIST dataset to the leveldb format used\n" + "by caffe to train a siamese network.\n" + "Usage:\n" + " convert_mnist_data input_image_file input_label_file " + "output_db_file\n" + "The MNIST dataset could be downloaded at\n" + " http://yann.lecun.com/exdb/mnist/\n" + "You should gunzip them after downloading.\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3]); + } + return 0; +} diff --git a/examples/triplet/create_lfw_triplet.sh b/examples/triplet/create_lfw_triplet.sh new file mode 100755 index 00000000000..382a9021f10 --- /dev/null +++ b/examples/triplet/create_lfw_triplet.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env sh +# This script converts the lfw data into leveldb format. + +EXAMPLES=./build/examples/triplet +DATA=./data/lfw + +echo "Creating leveldb..." + +rm -rf ./examples/triplet/lfw_triplet_train_leveldb +rm -rf ./examples/triplet/lfw_triplet_test_leveldb + +$EXAMPLES/convert_lfw_triplet_data.bin \ + $DATA/traindata \ + $DATA/trainlabel \ + ./examples/triplet/lfw_triplet_train_leveldb +$EXAMPLES/convert_lfw_triplet_data.bin \ + $DATA/testdata \ + $DATA/testlabel \ + ./examples/triplet/lfw_triplet_test_leveldb + +echo "Done." diff --git a/examples/triplet/lfw_triplet.prototxt b/examples/triplet/lfw_triplet.prototxt new file mode 100644 index 00000000000..9537d1feb8b --- /dev/null +++ b/examples/triplet/lfw_triplet.prototxt @@ -0,0 +1,113 @@ +name: "lfw_siamese" +input: "data" +input_dim: 10000 +input_dim: 1 +input_dim: 150 +input_dim: 130 +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 500 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 10 + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 2 + } +} diff --git a/examples/triplet/lfw_triplet_solver.prototxt b/examples/triplet/lfw_triplet_solver.prototxt new file mode 100644 index 00000000000..eb4c2c369e9 --- /dev/null +++ b/examples/triplet/lfw_triplet_solver.prototxt @@ -0,0 +1,25 @@ +# The train/test net protocol buffer definition +net: "examples/triplet/lfw_triplet_train_test.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of lfw, we have test batch size 100 and 100 test iterations, +# covering the full 10,000 testing images. +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 500 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.01 +momentum: 0.9 +weight_decay: 0.0000 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +max_iter: 50000 +# snapshot intermediate results +snapshot: 5000 +snapshot_prefix: "examples/triplet/lfw_triplet" +# solver mode: CPU or GPU +solver_mode: CPU diff --git a/examples/triplet/lfw_triplet_train_test.prototxt b/examples/triplet/lfw_triplet_train_test.prototxt new file mode 100644 index 00000000000..59ef26e90a4 --- /dev/null +++ b/examples/triplet/lfw_triplet_train_test.prototxt @@ -0,0 +1,500 @@ +name: "lfw_triplet_train_test" +layer { + name: "triplet_data" + type: "Data" + top: "triplet_data" + top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/lfw_triplet_train_leveldb" + batch_size: 64 + } +} +layer { + name: "triplet_data" + type: "Data" + top: "triplet_data" + top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/lfw_triplet_test_leveldb" + batch_size: 100 + } +} +layer { + name: "slice_triplet" + type: "Slice" + bottom: "triplet_data" + top: "data" + top: "data_true" + top: "data_false" + slice_param { + slice_dim: 1 + slice_point: 1 + slice_point: 2 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_true" + type: "Convolution" + bottom: "data_true" + top: "conv1_true" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_true" + type: "Pooling" + bottom: "conv1_true" + top: "pool1_true" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_true" + type: "Convolution" + bottom: "pool1_true" + top: "conv2_true" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_true" + type: "Pooling" + bottom: "conv2_true" + top: "pool2_true" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_true" + type: "InnerProduct" + bottom: "pool2_true" + top: "ip1_true" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_true" + type: "ReLU" + bottom: "ip1_true" + top: "ip1_true" +} +layer { + name: "ip2_true" + type: "InnerProduct" + bottom: "ip1_true" + top: "ip2_true" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_true" + type: "InnerProduct" + bottom: "ip2_true" + top: "feat_true" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_false" + type: "Convolution" + bottom: "data_false" + top: "conv1_false" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_false" + type: "Pooling" + bottom: "conv1_false" + top: "pool1_false" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_false" + type: "Convolution" + bottom: "pool1_false" + top: "conv2_false" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_false" + type: "Pooling" + bottom: "conv2_false" + top: "pool2_false" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_false" + type: "InnerProduct" + bottom: "pool2_false" + top: "ip1_false" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_false" + type: "ReLU" + bottom: "ip1_false" + top: "ip1_false" +} +layer { + name: "ip2_false" + type: "InnerProduct" + bottom: "ip1_false" + top: "ip2_false" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_false" + type: "InnerProduct" + bottom: "ip2_false" + top: "feat_false" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "loss" + type: "TripletLoss" + bottom: "feat" + bottom: "feat_true" + bottom: "feat_false" + bottom: "sim" + top: "loss" + triplet_loss_param { + margin: 0.2 + } +} + diff --git a/examples/triplet/train_lfw_triplet.sh b/examples/triplet/train_lfw_triplet.sh new file mode 100755 index 00000000000..076738a5e63 --- /dev/null +++ b/examples/triplet/train_lfw_triplet.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env sh + +TOOLS=./build/tools + +$TOOLS/caffe train --solver=examples/triplet/lfw_triplet_solver.prototxt From cedc70b5e26ecfec5d4f1e9804d43766c19e2e18 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Fri, 24 Jul 2015 11:36:02 +0800 Subject: [PATCH 53/82] ready for a review on triplet training using the 3D data, net work definition is the same as described in paper delete spaces --- examples/triplet/convert_lfw_triplet_data.cpp | 26 ++++++++++++++----- .../triplet/convert_mnist_triplet_data.cpp | 24 ++++++++++++----- examples/triplet/create_3d_triplet.sh | 21 +++++++++++++++ 3 files changed, 59 insertions(+), 12 deletions(-) create mode 100755 examples/triplet/create_3d_triplet.sh diff --git a/examples/triplet/convert_lfw_triplet_data.cpp b/examples/triplet/convert_lfw_triplet_data.cpp index 9f65fab76b4..0fb87a17b4b 100644 --- a/examples/triplet/convert_lfw_triplet_data.cpp +++ b/examples/triplet/convert_lfw_triplet_data.cpp @@ -45,8 +45,10 @@ void convert_dataset(const char* image_filename, const char* label_filename, image_file.read(reinterpret_cast(&magic), 4); magic = swap_endian(magic); + CHECK_EQ(magic, 2051) << "Incorrect image file magic."; label_file.read(reinterpret_cast(&magic), 4); magic = swap_endian(magic); + CHECK_EQ(magic, 2049) << "Incorrect label file magic."; image_file.read(reinterpret_cast(&num_items), 4); num_items = swap_endian(num_items); label_file.read(reinterpret_cast(&num_labels), 4); @@ -67,33 +69,45 @@ void convert_dataset(const char* image_filename, const char* label_filename, CHECK(status.ok()) << "Failed to open leveldb " << db_filename << ". Is it already existing?"; - char label_i; + char label_i; // label for triplet char label_j; char label_k; - char* pixels = new char[3 * rows * cols]; + char label_l; // label for pair wise + char label_m; + char* pixels = new char[5 * rows * cols]; const int kMaxKeyLength = 10; char key[kMaxKeyLength]; std::string value; caffe::Datum datum; - datum.set_channels(3); // one channel for each image in the pair + datum.set_channels(5); // one channel for each image in the triplet and pair datum.set_height(rows); datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; for (int itemid = 0; itemid < num_items; ++itemid) { - int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups + // pick triplet groups + int i = caffe::caffe_rng_rand() % num_items; int j = caffe::caffe_rng_rand() % num_items; int k = caffe::caffe_rng_rand() % num_items; + // pick pair wise groups + int l = caffe::caffe_rng_rand() % num_items; + int m = caffe::caffe_rng_rand() % num_items; + // read triplet groups read_image(&image_file, &label_file, i, rows, cols, pixels, &label_i); read_image(&image_file, &label_file, j, rows, cols, pixels + (rows * cols), &label_j); read_image(&image_file, &label_file, k, rows, cols, pixels + (2 * rows * cols), &label_k); + // read pair wise groups + read_image(&image_file, &label_file, l, rows, cols, + pixels + (3 * rows * cols), &label_l); + read_image(&image_file, &label_file, m, rows, cols, + pixels + (4 * rows * cols), &label_m); - datum.set_data(pixels, 3*rows*cols); - if (label_i == label_j && label_i != label_k) { + datum.set_data(pixels, 5*rows*cols); // set data + if ((label_i == label_j && label_i != label_k) && (label_l == label_m)) { datum.set_label(1); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", itemid); diff --git a/examples/triplet/convert_mnist_triplet_data.cpp b/examples/triplet/convert_mnist_triplet_data.cpp index 0cbab642b7c..c59a75efe01 100644 --- a/examples/triplet/convert_mnist_triplet_data.cpp +++ b/examples/triplet/convert_mnist_triplet_data.cpp @@ -69,33 +69,45 @@ void convert_dataset(const char* image_filename, const char* label_filename, CHECK(status.ok()) << "Failed to open leveldb " << db_filename << ". Is it already existing?"; - char label_i; + char label_i; // label for triplet char label_j; char label_k; - char* pixels = new char[3 * rows * cols]; + char label_l; // label for pair wise + char label_m; + char* pixels = new char[5 * rows * cols]; const int kMaxKeyLength = 10; char key[kMaxKeyLength]; std::string value; caffe::Datum datum; - datum.set_channels(3); // one channel for each image in the pair + datum.set_channels(5); // one channel for each image in the triplet and pair datum.set_height(rows); datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; for (int itemid = 0; itemid < num_items; ++itemid) { - int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups + // pick triplet groups + int i = caffe::caffe_rng_rand() % num_items; int j = caffe::caffe_rng_rand() % num_items; int k = caffe::caffe_rng_rand() % num_items; + // pick pair wise groups + int l = caffe::caffe_rng_rand() % num_items; + int m = caffe::caffe_rng_rand() % num_items; + // read triplet groups read_image(&image_file, &label_file, i, rows, cols, pixels, &label_i); read_image(&image_file, &label_file, j, rows, cols, pixels + (rows * cols), &label_j); read_image(&image_file, &label_file, k, rows, cols, pixels + (2 * rows * cols), &label_k); + // pair wise groups + read_image(&image_file, &label_file, l, rows, cols, + pixels + (3 * rows * cols), &label_l); + read_image(&image_file, &label_file, m, rows, cols, + pixels + (4 * rows * cols), &label_m); - datum.set_data(pixels, 3*rows*cols); - if (label_i == label_j && label_i != label_k) { + datum.set_data(pixels, 5*rows*cols); // set data + if ((label_i == label_j && label_i != label_k) && (label_l == label_m)) { datum.set_label(1); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", itemid); diff --git a/examples/triplet/create_3d_triplet.sh b/examples/triplet/create_3d_triplet.sh new file mode 100755 index 00000000000..3cd8ee469ce --- /dev/null +++ b/examples/triplet/create_3d_triplet.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env sh +# This script converts the mnist data into leveldb format. + +EXAMPLES=./build/examples/triplet +DATA=./data/linemod + +echo "Creating leveldb..." + +rm -rf ./examples/triplet/3d_triplet_train_leveldb +rm -rf ./examples/triplet/3d_triplet_test_leveldb + +$EXAMPLES/convert_3d_triplet_data.bin \ + $DATA/binary_image_train \ + $DATA/binary_label_train \ + ./examples/triplet/3d_triplet_train_leveldb +$EXAMPLES/convert_3d_triplet_data.bin \ + $DATA/binary_image_test \ + $DATA/binary_label_test \ + ./examples/triplet/3d_triplet_test_leveldb + +echo "Done." From 551de670564af594d585a37d18590c82fb739d84 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Thu, 30 Jul 2015 15:16:19 +0800 Subject: [PATCH 54/82] add pose information for training data arrangement in triplet training delete macro add the codes wrongly deteted... --- examples/triplet/convert_lfw_triplet_data.cpp | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/examples/triplet/convert_lfw_triplet_data.cpp b/examples/triplet/convert_lfw_triplet_data.cpp index 0fb87a17b4b..53e81f972ce 100644 --- a/examples/triplet/convert_lfw_triplet_data.cpp +++ b/examples/triplet/convert_lfw_triplet_data.cpp @@ -1,17 +1,13 @@ -// This script converts the MNIST dataset to the leveldb format used -// by caffe to train siamese network. // Usage: // convert_mnist_data input_image_file input_label_file output_db_file // The MNIST dataset could be downloaded at // http://yann.lecun.com/exdb/mnist/ #include // NOLINT(readability/streams) #include - #include "glog/logging.h" #include "google/protobuf/text_format.h" #include "leveldb/db.h" #include "stdint.h" - #include "caffe/proto/caffe.pb.h" #include "caffe/util/math_functions.hpp" @@ -22,11 +18,13 @@ uint32_t swap_endian(uint32_t val) { void read_image(std::ifstream* image_file, std::ifstream* label_file, uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label) { + char* pixels, char* label_temp, signed char* label) { image_file->seekg(index * rows * cols + 16); image_file->read(pixels, rows * cols); - label_file->seekg(index + 8); - label_file->read(label, 1); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); } void convert_dataset(const char* image_filename, const char* label_filename, @@ -48,7 +46,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, CHECK_EQ(magic, 2051) << "Incorrect image file magic."; label_file.read(reinterpret_cast(&magic), 4); magic = swap_endian(magic); - CHECK_EQ(magic, 2049) << "Incorrect label file magic."; + CHECK_EQ(magic, 2050) << "Incorrect label file magic."; image_file.read(reinterpret_cast(&num_items), 4); num_items = swap_endian(num_items); label_file.read(reinterpret_cast(&num_labels), 4); @@ -69,11 +67,12 @@ void convert_dataset(const char* image_filename, const char* label_filename, CHECK(status.ok()) << "Failed to open leveldb " << db_filename << ". Is it already existing?"; - char label_i; // label for triplet - char label_j; - char label_k; - char label_l; // label for pair wise - char label_m; + char* label_temp = new char[4]; // label for unsigned char* + signed char* label_i = new signed char[4]; // label for triplet + signed char* label_j = new signed char[4]; + signed char* label_k = new signed char[4]; + signed char* label_l = new signed char[4]; // label for pair wise + signed char* label_m = new signed char[4]; char* pixels = new char[5 * rows * cols]; const int kMaxKeyLength = 10; char key[kMaxKeyLength]; @@ -85,29 +84,30 @@ void convert_dataset(const char* image_filename, const char* label_filename, datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (int itemid = 0; itemid < num_items; ++itemid) { - // pick triplet groups - int i = caffe::caffe_rng_rand() % num_items; + for (unsigned int itemid = 0; itemid < 10 * num_items; ++itemid) { + int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups int j = caffe::caffe_rng_rand() % num_items; int k = caffe::caffe_rng_rand() % num_items; - // pick pair wise groups - int l = caffe::caffe_rng_rand() % num_items; + int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups int m = caffe::caffe_rng_rand() % num_items; - // read triplet groups - read_image(&image_file, &label_file, i, rows, cols, - pixels, &label_i); + read_image(&image_file, &label_file, i, rows, cols, // read triplet groups + pixels, label_temp, label_i); read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), &label_j); + pixels + (rows * cols), label_temp, label_j); read_image(&image_file, &label_file, k, rows, cols, - pixels + (2 * rows * cols), &label_k); - // read pair wise groups - read_image(&image_file, &label_file, l, rows, cols, - pixels + (3 * rows * cols), &label_l); + pixels + (2 * rows * cols), label_temp, label_k); + read_image(&image_file, &label_file, l, rows, cols, // read pair wise groups + pixels + (3 * rows * cols), label_temp, label_l); read_image(&image_file, &label_file, m, rows, cols, - pixels + (4 * rows * cols), &label_m); + pixels + (4 * rows * cols), label_temp, label_m); datum.set_data(pixels, 5*rows*cols); // set data - if ((label_i == label_j && label_i != label_k) && (label_l == label_m)) { + bool pose_pass; + int dist_ij = (int)((*(label_i+1)-*(label_j+1))*(*(label_i+1)-*(label_j+1)) + (*(label_i+2)-*(label_j+2))*(*(label_i+2)-*(label_j+2)) + (*(label_i+3)-*(label_j+3))*(*(label_i+3)-*(label_j+3))); + int dist_ik = (int)((*(label_i+1)-*(label_k+1))*(*(label_i+1)-*(label_k+1)) + (*(label_i+2)-*(label_k+2))*(*(label_i+2)-*(label_k+2)) + (*(label_i+3)-*(label_k+3))*(*(label_i+3)-*(label_k+3))); + if (dist_ij < dist_ik ) + pose_pass = true; + if (((*label_i == *label_j && *label_i != *label_k) || ((*label_i == *label_j && *label_i == *label_k) && pose_pass)) && (*label_l == *label_m)) { datum.set_label(1); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", itemid); @@ -117,7 +117,6 @@ void convert_dataset(const char* image_filename, const char* label_filename, datum.set_label(0); } } - delete db; delete pixels; } @@ -138,3 +137,4 @@ int main(int argc, char** argv) { } return 0; } + From b8fb519c0b609749938be6a238cbf901b71498d5 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Fri, 31 Jul 2015 14:08:02 +0800 Subject: [PATCH 55/82] rearrange the training samples selection codes remove static_cast declare modify the data generation condition modify the data generation condition --- examples/triplet/convert_lfw_triplet_data.cpp | 47 ++++++++++++++----- 1 file changed, 35 insertions(+), 12 deletions(-) diff --git a/examples/triplet/convert_lfw_triplet_data.cpp b/examples/triplet/convert_lfw_triplet_data.cpp index 53e81f972ce..280755fddab 100644 --- a/examples/triplet/convert_lfw_triplet_data.cpp +++ b/examples/triplet/convert_lfw_triplet_data.cpp @@ -4,12 +4,12 @@ // http://yann.lecun.com/exdb/mnist/ #include // NOLINT(readability/streams) #include +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" #include "glog/logging.h" #include "google/protobuf/text_format.h" #include "leveldb/db.h" #include "stdint.h" -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" uint32_t swap_endian(uint32_t val) { val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); @@ -84,30 +84,54 @@ void convert_dataset(const char* image_filename, const char* label_filename, datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (unsigned int itemid = 0; itemid < 10 * num_items; ++itemid) { + for (unsigned int itemid = 0; itemid < 10 * num_items; ++itemid) {\ int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups int j = caffe::caffe_rng_rand() % num_items; int k = caffe::caffe_rng_rand() % num_items; int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups int m = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, // read triplet groups + read_image(&image_file, &label_file, i, rows, cols, // read triplet pixels, label_temp, label_i); read_image(&image_file, &label_file, j, rows, cols, pixels + (rows * cols), label_temp, label_j); read_image(&image_file, &label_file, k, rows, cols, pixels + (2 * rows * cols), label_temp, label_k); - read_image(&image_file, &label_file, l, rows, cols, // read pair wise groups + read_image(&image_file, &label_file, l, rows, cols, // read pair wise pixels + (3 * rows * cols), label_temp, label_l); read_image(&image_file, &label_file, m, rows, cols, pixels + (4 * rows * cols), label_temp, label_m); datum.set_data(pixels, 5*rows*cols); // set data - bool pose_pass; - int dist_ij = (int)((*(label_i+1)-*(label_j+1))*(*(label_i+1)-*(label_j+1)) + (*(label_i+2)-*(label_j+2))*(*(label_i+2)-*(label_j+2)) + (*(label_i+3)-*(label_j+3))*(*(label_i+3)-*(label_j+3))); - int dist_ik = (int)((*(label_i+1)-*(label_k+1))*(*(label_i+1)-*(label_k+1)) + (*(label_i+2)-*(label_k+2))*(*(label_i+2)-*(label_k+2)) + (*(label_i+3)-*(label_k+3))*(*(label_i+3)-*(label_k+3))); - if (dist_ij < dist_ik ) - pose_pass = true; - if (((*label_i == *label_j && *label_i != *label_k) || ((*label_i == *label_j && *label_i == *label_k) && pose_pass)) && (*label_l == *label_m)) { + bool triplet_class_pass = false; + bool triplet_class_same = false; + bool triplet_pose_pass = false; + bool pair_class_pass = false; + + int ij_diff_x = static_cast(*(label_i+1)-*(label_j+1)); + int ij_diff_y = static_cast(*(label_i+2)-*(label_j+2)); + int ij_diff_z = static_cast(*(label_i+3)-*(label_j+3)); + int ik_diff_x = static_cast(*(label_i+1)-*(label_k+1)); + int ik_diff_y = static_cast(*(label_i+2)-*(label_k+2)); + int ik_diff_z = static_cast(*(label_i+3)-*(label_k+3)); + + int ij_x = ij_diff_x*ij_diff_x; + int ij_y = ij_diff_y*ij_diff_y; + int ij_z = ij_diff_z*ij_diff_z; + int ik_x = ik_diff_x*ik_diff_x; + int ik_y = ik_diff_y*ik_diff_y; + int ik_z = ik_diff_z*ik_diff_z; + + int dist_ij = ij_x + ij_y + ij_z; + int dist_ik = ik_x + ik_y + ik_z; + if ((*label_i == *label_j) && (*label_i == *label_k)) + triplet_class_same = true; + if ((dist_ij < dist_ik) && (triplet_class_same)) + triplet_pose_pass = true; + if ((*label_i == *label_j) && (*label_i != *label_k)) + triplet_class_pass = true; + if (*label_l == *label_m) + pair_class_pass = true; + if ((triplet_class_pass || triplet_pose_pass) && pair_class_pass) { datum.set_label(1); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", itemid); @@ -137,4 +161,3 @@ int main(int argc, char** argv) { } return 0; } - From 451a136cd68164f18fdc4fec95b08d7498646f69 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Sun, 9 Aug 2015 15:11:32 +0800 Subject: [PATCH 56/82] No sclice layer version which could forward a set of triplets together with 1 pair wise delete file --- examples/siamese/convert_lfw_siamese_data.cpp | 121 ----- examples/siamese/create_lfw_siamese.sh | 21 - examples/siamese/lfw_siamese.prototxt | 113 ---- examples/siamese/lfw_siamese_solver.prototxt | 25 - .../siamese/lfw_siamese_train_test.prototxt | 349 ------------ .../siamese/mnist_siamese_solver.prototxt | 2 +- examples/siamese/train_lfw_siamese.sh | 5 - examples/triplet/convert_3d_triplet_data.cpp | 202 +++++++ examples/triplet/convert_lfw_triplet_data.cpp | 163 ------ .../triplet/convert_mnist_triplet_data.cpp | 140 ----- examples/triplet/create_lfw_triplet.sh | 21 - examples/triplet/create_mnist_triplet.sh | 21 - examples/triplet/lfw_triplet.prototxt | 113 ---- examples/triplet/lfw_triplet_solver.prototxt | 25 - .../triplet/lfw_triplet_train_test.prototxt | 500 ------------------ examples/triplet/mnist_triplet.prototxt | 113 ---- .../triplet/mnist_triplet_solver.prototxt | 25 - .../triplet/mnist_triplet_train_test.prototxt | 500 ------------------ examples/triplet/train_lfw_triplet.sh | 5 - examples/triplet/train_mnist_triplet.sh | 5 - src/caffe/layers/triplet_loss_layer.cpp | 158 +++++- 21 files changed, 350 insertions(+), 2277 deletions(-) delete mode 100644 examples/siamese/convert_lfw_siamese_data.cpp delete mode 100755 examples/siamese/create_lfw_siamese.sh delete mode 100644 examples/siamese/lfw_siamese.prototxt delete mode 100644 examples/siamese/lfw_siamese_solver.prototxt delete mode 100644 examples/siamese/lfw_siamese_train_test.prototxt delete mode 100755 examples/siamese/train_lfw_siamese.sh create mode 100644 examples/triplet/convert_3d_triplet_data.cpp delete mode 100644 examples/triplet/convert_lfw_triplet_data.cpp delete mode 100644 examples/triplet/convert_mnist_triplet_data.cpp delete mode 100755 examples/triplet/create_lfw_triplet.sh delete mode 100755 examples/triplet/create_mnist_triplet.sh delete mode 100644 examples/triplet/lfw_triplet.prototxt delete mode 100644 examples/triplet/lfw_triplet_solver.prototxt delete mode 100644 examples/triplet/lfw_triplet_train_test.prototxt delete mode 100644 examples/triplet/mnist_triplet.prototxt delete mode 100644 examples/triplet/mnist_triplet_solver.prototxt delete mode 100644 examples/triplet/mnist_triplet_train_test.prototxt delete mode 100755 examples/triplet/train_lfw_triplet.sh delete mode 100755 examples/triplet/train_mnist_triplet.sh diff --git a/examples/siamese/convert_lfw_siamese_data.cpp b/examples/siamese/convert_lfw_siamese_data.cpp deleted file mode 100644 index fe134ca9b4e..00000000000 --- a/examples/siamese/convert_lfw_siamese_data.cpp +++ /dev/null @@ -1,121 +0,0 @@ -// -// This script converts the MNIST dataset to the leveldb format used -// by caffe to train siamese network. -// Usage: -// convert_mnist_data input_image_file input_label_file output_db_file -// The MNIST dataset could be downloaded at -// http://yann.lecun.com/exdb/mnist/ -#include // NOLINT(readability/streams) -#include - -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "stdint.h" - -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" - -uint32_t swap_endian(uint32_t val) { - val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); - return (val << 16) | (val >> 16); -} - -void read_image(std::ifstream* image_file, std::ifstream* label_file, - uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index + 8); - label_file->read(label, 1); -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename) { - // Open files - std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); - std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); - CHECK(image_file) << "Unable to open file " << image_filename; - CHECK(label_file) << "Unable to open file " << label_filename; - // Read the magic and the meta data - uint32_t magic; - uint32_t num_items; - uint32_t num_labels; - uint32_t rows; - uint32_t cols; - - image_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - label_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - image_file.read(reinterpret_cast(&num_items), 4); - num_items = swap_endian(num_items); - label_file.read(reinterpret_cast(&num_labels), 4); - num_labels = swap_endian(num_labels); - CHECK_EQ(num_items, num_labels); - image_file.read(reinterpret_cast(&rows), 4); - rows = swap_endian(rows); - image_file.read(reinterpret_cast(&cols), 4); - cols = swap_endian(cols); - - // Open leveldb - leveldb::DB* db; - leveldb::Options options; - options.create_if_missing = true; - options.error_if_exists = true; - leveldb::Status status = leveldb::DB::Open( - options, db_filename, &db); - CHECK(status.ok()) << "Failed to open leveldb " << db_filename - << ". Is it already existing?"; - - char label_i; - char label_j; - char* pixels = new char[2 * rows * cols]; - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - - caffe::Datum datum; - datum.set_channels(2); // one channel for each image in the pair - datum.set_height(rows); - datum.set_width(cols); - LOG(INFO) << "A total of " << num_items << " items."; - LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (int itemid = 0; itemid < num_items; ++itemid) { - int i = caffe::caffe_rng_rand() % num_items; // pick a random pair - int j = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, - pixels, &label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), &label_j); - datum.set_data(pixels, 2*rows*cols); - if (label_i == label_j) { - datum.set_label(1); - } else { - datum.set_label(0); - } - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); - db->Put(leveldb::WriteOptions(), std::string(key), value); - } - - delete db; - delete pixels; -} - -int main(int argc, char** argv) { - if (argc != 4) { - printf("This script converts the MNIST dataset to the leveldb format used\n" - "by caffe to train a siamese network.\n" - "Usage:\n" - " convert_mnist_data input_image_file input_label_file " - "output_db_file\n" - "The MNIST dataset could be downloaded at\n" - " http://yann.lecun.com/exdb/mnist/\n" - "You should gunzip them after downloading.\n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3]); - } - return 0; -} diff --git a/examples/siamese/create_lfw_siamese.sh b/examples/siamese/create_lfw_siamese.sh deleted file mode 100755 index 3790b9eb2a0..00000000000 --- a/examples/siamese/create_lfw_siamese.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env sh -# This script converts the mnist data into leveldb format. - -EXAMPLES=./build/examples/siamese -DATA=./data/lfw - -echo "Creating leveldb..." - -rm -rf ./examples/siamese/lfw_siamese_train_leveldb -rm -rf ./examples/siamese/lfw_siamese_test_leveldb - -$EXAMPLES/convert_lfw_siamese_data.bin \ - $DATA/traindata \ - $DATA/trainlabel \ - ./examples/siamese/lfw_siamese_train_leveldb -$EXAMPLES/convert_mnist_siamese_data.bin \ - $DATA/testdata \ - $DATA/testlabel \ - ./examples/siamese/lfw_siamese_test_leveldb - -echo "Done." diff --git a/examples/siamese/lfw_siamese.prototxt b/examples/siamese/lfw_siamese.prototxt deleted file mode 100644 index 106d9aa76f4..00000000000 --- a/examples/siamese/lfw_siamese.prototxt +++ /dev/null @@ -1,113 +0,0 @@ -name: "lfw_siamese" -input: "data" -input_dim: 10000 -input_dim: 1 -input_dim: 150 -input_dim: 80 -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 500 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 10 - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 2 - } -} diff --git a/examples/siamese/lfw_siamese_solver.prototxt b/examples/siamese/lfw_siamese_solver.prototxt deleted file mode 100644 index 2aaafb63c1f..00000000000 --- a/examples/siamese/lfw_siamese_solver.prototxt +++ /dev/null @@ -1,25 +0,0 @@ -# The train/test net protocol buffer definition -net: "examples/siamese/lfw_siamese_train_test.prototxt" -# test_iter specifies how many forward passes the test should carry out. -# In the case of MNIST, we have test batch size 100 and 100 test iterations, -# covering the full 10,000 testing images. -test_iter: 100 -# Carry out testing every 500 training iterations. -test_interval: 500 -# The base learning rate, momentum and the weight decay of the network. -base_lr: 0.01 -momentum: 0.9 -weight_decay: 0.0000 -# The learning rate policy -lr_policy: "inv" -gamma: 0.0001 -power: 0.75 -# Display every 100 iterations -display: 100 -# The maximum number of iterations -max_iter: 50000 -# snapshot intermediate results -snapshot: 5000 -snapshot_prefix: "examples/siamese/lfw_siamese" -# solver mode: CPU or GPU -solver_mode: CPU diff --git a/examples/siamese/lfw_siamese_train_test.prototxt b/examples/siamese/lfw_siamese_train_test.prototxt deleted file mode 100644 index 049187bf3d4..00000000000 --- a/examples/siamese/lfw_siamese_train_test.prototxt +++ /dev/null @@ -1,349 +0,0 @@ -name: "lfw_siamese_train_test" -layer { - name: "pair_data" - type: "Data" - top: "pair_data" - top: "sim" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/siamese/lfw_siamese_train_leveldb" - batch_size: 64 - } -} -layer { - name: "pair_data" - type: "Data" - top: "pair_data" - top: "sim" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/siamese/lfw_siamese_test_leveldb" - batch_size: 100 - } -} -layer { - name: "slice_pair" - type: "Slice" - bottom: "pair_data" - top: "data" - top: "data_p" - slice_param { - slice_dim: 1 - slice_point: 1 - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_p" - type: "Convolution" - bottom: "data_p" - top: "conv1_p" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_p" - type: "Pooling" - bottom: "conv1_p" - top: "pool1_p" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_p" - type: "Convolution" - bottom: "pool1_p" - top: "conv2_p" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_p" - type: "Pooling" - bottom: "conv2_p" - top: "pool2_p" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_p" - type: "InnerProduct" - bottom: "pool2_p" - top: "ip1_p" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_p" - type: "ReLU" - bottom: "ip1_p" - top: "ip1_p" -} -layer { - name: "ip2_p" - type: "InnerProduct" - bottom: "ip1_p" - top: "ip2_p" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_p" - type: "InnerProduct" - bottom: "ip2_p" - top: "feat_p" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "loss" - type: "ContrastiveLoss" - bottom: "feat" - bottom: "feat_p" - bottom: "sim" - top: "loss" - contrastive_loss_param { - margin: 1 - } -} diff --git a/examples/siamese/mnist_siamese_solver.prototxt b/examples/siamese/mnist_siamese_solver.prototxt index d4d994d1389..07ac88de057 100644 --- a/examples/siamese/mnist_siamese_solver.prototxt +++ b/examples/siamese/mnist_siamese_solver.prototxt @@ -7,7 +7,7 @@ test_iter: 100 # Carry out testing every 500 training iterations. test_interval: 500 # The base learning rate, momentum and the weight decay of the network. -base_lr: 0.01 +base_lr: 0.001 momentum: 0.9 weight_decay: 0.0000 # The learning rate policy diff --git a/examples/siamese/train_lfw_siamese.sh b/examples/siamese/train_lfw_siamese.sh deleted file mode 100755 index 0a879a65419..00000000000 --- a/examples/siamese/train_lfw_siamese.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env sh - -TOOLS=./build/tools - -$TOOLS/caffe train --solver=examples/siamese/lfw_siamese_solver.prototxt diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp new file mode 100644 index 00000000000..ce1981d90da --- /dev/null +++ b/examples/triplet/convert_3d_triplet_data.cpp @@ -0,0 +1,202 @@ +// Usage: +// convert_3d_data input_image_file input_label_file output_db_file +#include // NOLINT(readability/streams) +#include +#include +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "stdint.h" + +uint32_t swap_endian(uint32_t val) { + val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); + return (val << 16) | (val >> 16); +} + +void read_image(std::ifstream* image_file, std::ifstream* label_file, + uint32_t index, uint32_t rows, uint32_t cols, + char* pixels, char* label_temp, signed char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename, const char* class_number) { + int class_num = atoi(class_number); + // Open files + std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); + std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); + CHECK(image_file) << "Unable to open file " << image_filename; + CHECK(label_file) << "Unable to open file " << label_filename; + // Read the magic and the meta data + uint32_t magic; + uint32_t num_items; + uint32_t num_labels; + uint32_t rows; + uint32_t cols; + + image_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + CHECK_EQ(magic, 2051) << "Incorrect image file magic."; + label_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + CHECK_EQ(magic, 2050) << "Incorrect label file magic."; + image_file.read(reinterpret_cast(&num_items), 4); + num_items = swap_endian(num_items); + label_file.read(reinterpret_cast(&num_labels), 4); + num_labels = swap_endian(num_labels); + CHECK_EQ(num_items, num_labels); + image_file.read(reinterpret_cast(&rows), 4); + rows = swap_endian(rows); + image_file.read(reinterpret_cast(&cols), 4); + cols = swap_endian(cols); + + // Open leveldb + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = true; + options.error_if_exists = true; + leveldb::Status status = leveldb::DB::Open( + options, db_filename, &db); + CHECK(status.ok()) << "Failed to open leveldb " << db_filename + << ". Is it already existing?"; + + char* label_temp = new char[4]; // label for unsigned char* + signed char* label_i = new signed char[4]; // label for triplet + signed char* label_j = new signed char[4]; + signed char* label_k = new signed char[4]; + signed char* label_l = new signed char[4]; // label for pair wise + signed char* label_m = new signed char[4]; + char* pixels1 = new char[rows * cols]; + char* pixels2 = new char[rows * cols]; + char* pixels3 = new char[rows * cols]; + char* pixels4 = new char[rows * cols]; + char* pixels5 = new char[rows * cols]; + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + caffe::Datum datum; + datum.set_channels(1); + datum.set_height(rows); + datum.set_width(cols); + LOG(INFO) << "A total of " << num_items << " items."; + LOG(INFO) << "Rows: " << rows << " Cols: " << cols; + int counter = 0; + for (unsigned int times = 0; times < 5; ++times) { + // iteration in the samples of all class + for (unsigned int itemid = 0; itemid < num_items/class_num; ++itemid) { + // iteration in the samples in one class + for (unsigned int class_ind = 0; class_ind < class_num; ++class_ind) { + // use reference sample one by one at each iteration + int i = itemid % num_items + class_ind*num_items/class_num; + int j = caffe::caffe_rng_rand() % num_items; // pick triplet groups + int k = caffe::caffe_rng_rand() % num_items; + int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups + int m = caffe::caffe_rng_rand() % num_items; + read_image(&image_file, &label_file, i, rows, cols, // read triplet + pixels1, label_temp, label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels2, label_temp, label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels3, label_temp, label_k); + read_image(&image_file, &label_file, l, rows, cols, // read pair wise + pixels4, label_temp, label_l); + read_image(&image_file, &label_file, m, rows, cols, + pixels5, label_temp, label_m); + + bool pair_pass = false; + bool triplet1_pass = false; + bool triplet2_pass = false; + bool triplet3_class_same = false; + bool triplet3_pass = false; + + int ij_diff_x = static_cast(*(label_i+1)-*(label_j+1)); + int ij_diff_y = static_cast(*(label_i+2)-*(label_j+2)); + int ij_diff_z = static_cast(*(label_i+3)-*(label_j+3)); + int im_diff_x = static_cast(*(label_i+1)-*(label_m+1)); + int im_diff_y = static_cast(*(label_i+2)-*(label_m+2)); + int im_diff_z = static_cast(*(label_i+3)-*(label_m+3)); + + int ij_x = ij_diff_x*ij_diff_x; + int ij_y = ij_diff_y*ij_diff_y; + int ij_z = ij_diff_z*ij_diff_z; + int im_x = im_diff_x*im_diff_x; + int im_y = im_diff_y*im_diff_y; + int im_z = im_diff_z*im_diff_z; + + float dist_ij = std::sqrt(ij_x + ij_y + ij_z); + float dist_im = std::sqrt(im_x + im_y + im_z); + if (*label_i == *label_j && dist_ij < 100/2) + pair_pass = true; + if (pair_pass && (*label_i != *label_k)) + triplet1_pass = true; + if (pair_pass && (*label_i != *label_l)) + triplet2_pass = true; + if (pair_pass && (*label_i == *label_m)) + triplet3_class_same = true; + if (triplet3_class_same && dist_im > 100*sqrt(2)) + triplet3_pass = true; + if (pair_pass && triplet1_pass && triplet2_pass && triplet3_pass) { + datum.set_data(pixels1, rows*cols); // set data + datum.set_label(int(*label_i)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels2, rows*cols); // set data + datum.set_label(int(*label_j)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels3, rows*cols); // set data + datum.set_label(int(*label_k)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels4, rows*cols); // set data + datum.set_label(int(*label_l)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels5, rows*cols); // set data + datum.set_label(int(*label_m)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + } else { + class_ind--; + } + } // iteration in the samples of all class + } // iteration in the samples in one class + } // iteration in times + delete db; + delete pixels1; + delete pixels2; + delete pixels3; + delete pixels4; + delete pixels5; +} + +int main(int argc, char** argv) { + if (argc != 5) { + printf("This script converts the images dataset to the leveldb format used\n" + "by caffe to train a triplet network.\n" + "Usage:\n" + " convert_3d_data input_image_file input_label_file " + "output_db_file class_number\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3], argv[4]); + } + return 0; +} diff --git a/examples/triplet/convert_lfw_triplet_data.cpp b/examples/triplet/convert_lfw_triplet_data.cpp deleted file mode 100644 index 280755fddab..00000000000 --- a/examples/triplet/convert_lfw_triplet_data.cpp +++ /dev/null @@ -1,163 +0,0 @@ -// Usage: -// convert_mnist_data input_image_file input_label_file output_db_file -// The MNIST dataset could be downloaded at -// http://yann.lecun.com/exdb/mnist/ -#include // NOLINT(readability/streams) -#include -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "stdint.h" - -uint32_t swap_endian(uint32_t val) { - val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); - return (val << 16) | (val >> 16); -} - -void read_image(std::ifstream* image_file, std::ifstream* label_file, - uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label_temp, signed char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index * 4 + 8); - label_file->read(label_temp, 4); - for (int i = 0; i < 4; i++) - *(label+i) = (signed char)*(label_temp+i); -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename) { - // Open files - std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); - std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); - CHECK(image_file) << "Unable to open file " << image_filename; - CHECK(label_file) << "Unable to open file " << label_filename; - // Read the magic and the meta data - uint32_t magic; - uint32_t num_items; - uint32_t num_labels; - uint32_t rows; - uint32_t cols; - - image_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2051) << "Incorrect image file magic."; - label_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2050) << "Incorrect label file magic."; - image_file.read(reinterpret_cast(&num_items), 4); - num_items = swap_endian(num_items); - label_file.read(reinterpret_cast(&num_labels), 4); - num_labels = swap_endian(num_labels); - CHECK_EQ(num_items, num_labels); - image_file.read(reinterpret_cast(&rows), 4); - rows = swap_endian(rows); - image_file.read(reinterpret_cast(&cols), 4); - cols = swap_endian(cols); - - // Open leveldb - leveldb::DB* db; - leveldb::Options options; - options.create_if_missing = true; - options.error_if_exists = true; - leveldb::Status status = leveldb::DB::Open( - options, db_filename, &db); - CHECK(status.ok()) << "Failed to open leveldb " << db_filename - << ". Is it already existing?"; - - char* label_temp = new char[4]; // label for unsigned char* - signed char* label_i = new signed char[4]; // label for triplet - signed char* label_j = new signed char[4]; - signed char* label_k = new signed char[4]; - signed char* label_l = new signed char[4]; // label for pair wise - signed char* label_m = new signed char[4]; - char* pixels = new char[5 * rows * cols]; - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - - caffe::Datum datum; - datum.set_channels(5); // one channel for each image in the triplet and pair - datum.set_height(rows); - datum.set_width(cols); - LOG(INFO) << "A total of " << num_items << " items."; - LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (unsigned int itemid = 0; itemid < 10 * num_items; ++itemid) {\ - int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups - int j = caffe::caffe_rng_rand() % num_items; - int k = caffe::caffe_rng_rand() % num_items; - int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups - int m = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, // read triplet - pixels, label_temp, label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), label_temp, label_j); - read_image(&image_file, &label_file, k, rows, cols, - pixels + (2 * rows * cols), label_temp, label_k); - read_image(&image_file, &label_file, l, rows, cols, // read pair wise - pixels + (3 * rows * cols), label_temp, label_l); - read_image(&image_file, &label_file, m, rows, cols, - pixels + (4 * rows * cols), label_temp, label_m); - - datum.set_data(pixels, 5*rows*cols); // set data - bool triplet_class_pass = false; - bool triplet_class_same = false; - bool triplet_pose_pass = false; - bool pair_class_pass = false; - - int ij_diff_x = static_cast(*(label_i+1)-*(label_j+1)); - int ij_diff_y = static_cast(*(label_i+2)-*(label_j+2)); - int ij_diff_z = static_cast(*(label_i+3)-*(label_j+3)); - int ik_diff_x = static_cast(*(label_i+1)-*(label_k+1)); - int ik_diff_y = static_cast(*(label_i+2)-*(label_k+2)); - int ik_diff_z = static_cast(*(label_i+3)-*(label_k+3)); - - int ij_x = ij_diff_x*ij_diff_x; - int ij_y = ij_diff_y*ij_diff_y; - int ij_z = ij_diff_z*ij_diff_z; - int ik_x = ik_diff_x*ik_diff_x; - int ik_y = ik_diff_y*ik_diff_y; - int ik_z = ik_diff_z*ik_diff_z; - - int dist_ij = ij_x + ij_y + ij_z; - int dist_ik = ik_x + ik_y + ik_z; - if ((*label_i == *label_j) && (*label_i == *label_k)) - triplet_class_same = true; - if ((dist_ij < dist_ik) && (triplet_class_same)) - triplet_pose_pass = true; - if ((*label_i == *label_j) && (*label_i != *label_k)) - triplet_class_pass = true; - if (*label_l == *label_m) - pair_class_pass = true; - if ((triplet_class_pass || triplet_pose_pass) && pair_class_pass) { - datum.set_label(1); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); - db->Put(leveldb::WriteOptions(), std::string(key), value); - } else { - itemid--; - datum.set_label(0); - } - } - delete db; - delete pixels; -} - -int main(int argc, char** argv) { - if (argc != 4) { - printf("This script converts the MNIST dataset to the leveldb format used\n" - "by caffe to train a siamese network.\n" - "Usage:\n" - " convert_mnist_data input_image_file input_label_file " - "output_db_file\n" - "The MNIST dataset could be downloaded at\n" - " http://yann.lecun.com/exdb/mnist/\n" - "You should gunzip them after downloading.\n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3]); - } - return 0; -} diff --git a/examples/triplet/convert_mnist_triplet_data.cpp b/examples/triplet/convert_mnist_triplet_data.cpp deleted file mode 100644 index c59a75efe01..00000000000 --- a/examples/triplet/convert_mnist_triplet_data.cpp +++ /dev/null @@ -1,140 +0,0 @@ -// This script converts the MNIST dataset to the leveldb format used -// by caffe to train siamese network. -// Usage: -// convert_mnist_data input_image_file input_label_file output_db_file -// The MNIST dataset could be downloaded at -// http://yann.lecun.com/exdb/mnist/ -#include // NOLINT(readability/streams) -#include - -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "stdint.h" - -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" - -uint32_t swap_endian(uint32_t val) { - val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); - return (val << 16) | (val >> 16); -} - -void read_image(std::ifstream* image_file, std::ifstream* label_file, - uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index + 8); - label_file->read(label, 1); -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename) { - // Open files - std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); - std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); - CHECK(image_file) << "Unable to open file " << image_filename; - CHECK(label_file) << "Unable to open file " << label_filename; - // Read the magic and the meta data - uint32_t magic; - uint32_t num_items; - uint32_t num_labels; - uint32_t rows; - uint32_t cols; - - image_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2051) << "Incorrect image file magic."; - label_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2049) << "Incorrect label file magic."; - image_file.read(reinterpret_cast(&num_items), 4); - num_items = swap_endian(num_items); - label_file.read(reinterpret_cast(&num_labels), 4); - num_labels = swap_endian(num_labels); - CHECK_EQ(num_items, num_labels); - image_file.read(reinterpret_cast(&rows), 4); - rows = swap_endian(rows); - image_file.read(reinterpret_cast(&cols), 4); - cols = swap_endian(cols); - - // Open leveldb - leveldb::DB* db; - leveldb::Options options; - options.create_if_missing = true; - options.error_if_exists = true; - leveldb::Status status = leveldb::DB::Open( - options, db_filename, &db); - CHECK(status.ok()) << "Failed to open leveldb " << db_filename - << ". Is it already existing?"; - - char label_i; // label for triplet - char label_j; - char label_k; - char label_l; // label for pair wise - char label_m; - char* pixels = new char[5 * rows * cols]; - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - - caffe::Datum datum; - datum.set_channels(5); // one channel for each image in the triplet and pair - datum.set_height(rows); - datum.set_width(cols); - LOG(INFO) << "A total of " << num_items << " items."; - LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (int itemid = 0; itemid < num_items; ++itemid) { - // pick triplet groups - int i = caffe::caffe_rng_rand() % num_items; - int j = caffe::caffe_rng_rand() % num_items; - int k = caffe::caffe_rng_rand() % num_items; - // pick pair wise groups - int l = caffe::caffe_rng_rand() % num_items; - int m = caffe::caffe_rng_rand() % num_items; - // read triplet groups - read_image(&image_file, &label_file, i, rows, cols, - pixels, &label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), &label_j); - read_image(&image_file, &label_file, k, rows, cols, - pixels + (2 * rows * cols), &label_k); - // pair wise groups - read_image(&image_file, &label_file, l, rows, cols, - pixels + (3 * rows * cols), &label_l); - read_image(&image_file, &label_file, m, rows, cols, - pixels + (4 * rows * cols), &label_m); - - datum.set_data(pixels, 5*rows*cols); // set data - if ((label_i == label_j && label_i != label_k) && (label_l == label_m)) { - datum.set_label(1); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); - db->Put(leveldb::WriteOptions(), std::string(key), value); - } else { - itemid--; - datum.set_label(0); - } - } - - delete db; - delete pixels; -} - -int main(int argc, char** argv) { - if (argc != 4) { - printf("This script converts the MNIST dataset to the leveldb format used\n" - "by caffe to train a siamese network.\n" - "Usage:\n" - " convert_mnist_data input_image_file input_label_file " - "output_db_file\n" - "The MNIST dataset could be downloaded at\n" - " http://yann.lecun.com/exdb/mnist/\n" - "You should gunzip them after downloading.\n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3]); - } - return 0; -} diff --git a/examples/triplet/create_lfw_triplet.sh b/examples/triplet/create_lfw_triplet.sh deleted file mode 100755 index 382a9021f10..00000000000 --- a/examples/triplet/create_lfw_triplet.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env sh -# This script converts the lfw data into leveldb format. - -EXAMPLES=./build/examples/triplet -DATA=./data/lfw - -echo "Creating leveldb..." - -rm -rf ./examples/triplet/lfw_triplet_train_leveldb -rm -rf ./examples/triplet/lfw_triplet_test_leveldb - -$EXAMPLES/convert_lfw_triplet_data.bin \ - $DATA/traindata \ - $DATA/trainlabel \ - ./examples/triplet/lfw_triplet_train_leveldb -$EXAMPLES/convert_lfw_triplet_data.bin \ - $DATA/testdata \ - $DATA/testlabel \ - ./examples/triplet/lfw_triplet_test_leveldb - -echo "Done." diff --git a/examples/triplet/create_mnist_triplet.sh b/examples/triplet/create_mnist_triplet.sh deleted file mode 100755 index f404f2aa255..00000000000 --- a/examples/triplet/create_mnist_triplet.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env sh -# This script converts the mnist data into leveldb format. - -EXAMPLES=./build/examples/triplet -DATA=./data/mnist - -echo "Creating leveldb..." - -rm -rf ./examples/triplet/mnist_siamese_train_leveldb -rm -rf ./examples/triplet/mnist_siamese_test_leveldb - -$EXAMPLES/convert_mnist_triplet_data.bin \ - $DATA/train-images-idx3-ubyte \ - $DATA/train-labels-idx1-ubyte \ - ./examples/triplet/mnist_triplet_train_leveldb -$EXAMPLES/convert_mnist_triplet_data.bin \ - $DATA/t10k-images-idx3-ubyte \ - $DATA/t10k-labels-idx1-ubyte \ - ./examples/triplet/mnist_triplet_test_leveldb - -echo "Done." diff --git a/examples/triplet/lfw_triplet.prototxt b/examples/triplet/lfw_triplet.prototxt deleted file mode 100644 index 9537d1feb8b..00000000000 --- a/examples/triplet/lfw_triplet.prototxt +++ /dev/null @@ -1,113 +0,0 @@ -name: "lfw_siamese" -input: "data" -input_dim: 10000 -input_dim: 1 -input_dim: 150 -input_dim: 130 -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 500 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 10 - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 2 - } -} diff --git a/examples/triplet/lfw_triplet_solver.prototxt b/examples/triplet/lfw_triplet_solver.prototxt deleted file mode 100644 index eb4c2c369e9..00000000000 --- a/examples/triplet/lfw_triplet_solver.prototxt +++ /dev/null @@ -1,25 +0,0 @@ -# The train/test net protocol buffer definition -net: "examples/triplet/lfw_triplet_train_test.prototxt" -# test_iter specifies how many forward passes the test should carry out. -# In the case of lfw, we have test batch size 100 and 100 test iterations, -# covering the full 10,000 testing images. -test_iter: 100 -# Carry out testing every 500 training iterations. -test_interval: 500 -# The base learning rate, momentum and the weight decay of the network. -base_lr: 0.01 -momentum: 0.9 -weight_decay: 0.0000 -# The learning rate policy -lr_policy: "inv" -gamma: 0.0001 -power: 0.75 -# Display every 100 iterations -display: 100 -# The maximum number of iterations -max_iter: 50000 -# snapshot intermediate results -snapshot: 5000 -snapshot_prefix: "examples/triplet/lfw_triplet" -# solver mode: CPU or GPU -solver_mode: CPU diff --git a/examples/triplet/lfw_triplet_train_test.prototxt b/examples/triplet/lfw_triplet_train_test.prototxt deleted file mode 100644 index 59ef26e90a4..00000000000 --- a/examples/triplet/lfw_triplet_train_test.prototxt +++ /dev/null @@ -1,500 +0,0 @@ -name: "lfw_triplet_train_test" -layer { - name: "triplet_data" - type: "Data" - top: "triplet_data" - top: "sim" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/lfw_triplet_train_leveldb" - batch_size: 64 - } -} -layer { - name: "triplet_data" - type: "Data" - top: "triplet_data" - top: "sim" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/lfw_triplet_test_leveldb" - batch_size: 100 - } -} -layer { - name: "slice_triplet" - type: "Slice" - bottom: "triplet_data" - top: "data" - top: "data_true" - top: "data_false" - slice_param { - slice_dim: 1 - slice_point: 1 - slice_point: 2 - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_true" - type: "Convolution" - bottom: "data_true" - top: "conv1_true" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_true" - type: "Pooling" - bottom: "conv1_true" - top: "pool1_true" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_true" - type: "Convolution" - bottom: "pool1_true" - top: "conv2_true" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_true" - type: "Pooling" - bottom: "conv2_true" - top: "pool2_true" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_true" - type: "InnerProduct" - bottom: "pool2_true" - top: "ip1_true" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_true" - type: "ReLU" - bottom: "ip1_true" - top: "ip1_true" -} -layer { - name: "ip2_true" - type: "InnerProduct" - bottom: "ip1_true" - top: "ip2_true" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_true" - type: "InnerProduct" - bottom: "ip2_true" - top: "feat_true" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_false" - type: "Convolution" - bottom: "data_false" - top: "conv1_false" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_false" - type: "Pooling" - bottom: "conv1_false" - top: "pool1_false" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_false" - type: "Convolution" - bottom: "pool1_false" - top: "conv2_false" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_false" - type: "Pooling" - bottom: "conv2_false" - top: "pool2_false" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_false" - type: "InnerProduct" - bottom: "pool2_false" - top: "ip1_false" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_false" - type: "ReLU" - bottom: "ip1_false" - top: "ip1_false" -} -layer { - name: "ip2_false" - type: "InnerProduct" - bottom: "ip1_false" - top: "ip2_false" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_false" - type: "InnerProduct" - bottom: "ip2_false" - top: "feat_false" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "loss" - type: "TripletLoss" - bottom: "feat" - bottom: "feat_true" - bottom: "feat_false" - bottom: "sim" - top: "loss" - triplet_loss_param { - margin: 0.2 - } -} - diff --git a/examples/triplet/mnist_triplet.prototxt b/examples/triplet/mnist_triplet.prototxt deleted file mode 100644 index 0e903f85909..00000000000 --- a/examples/triplet/mnist_triplet.prototxt +++ /dev/null @@ -1,113 +0,0 @@ -name: "mnist_siamese" -input: "data" -input_dim: 10000 -input_dim: 1 -input_dim: 28 -input_dim: 28 -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 500 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 10 - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 2 - } -} diff --git a/examples/triplet/mnist_triplet_solver.prototxt b/examples/triplet/mnist_triplet_solver.prototxt deleted file mode 100644 index edd8e1e0338..00000000000 --- a/examples/triplet/mnist_triplet_solver.prototxt +++ /dev/null @@ -1,25 +0,0 @@ -# The train/test net protocol buffer definition -net: "examples/triplet/mnist_triplet_train_test.prototxt" -# test_iter specifies how many forward passes the test should carry out. -# In the case of MNIST, we have test batch size 100 and 100 test iterations, -# covering the full 10,000 testing images. -test_iter: 100 -# Carry out testing every 500 training iterations. -test_interval: 500 -# The base learning rate, momentum and the weight decay of the network. -base_lr: 0.001 -momentum: 0.9 -weight_decay: 0.0000 -# The learning rate policy -lr_policy: "inv" -gamma: 0.0001 -power: 0.75 -# Display every 100 iterations -display: 100 -# The maximum number of iterations -max_iter: 50000 -# snapshot intermediate results -snapshot: 5000 -snapshot_prefix: "examples/triplet/mnist_triplet" -# solver mode: CPU or GPU -solver_mode: CPU diff --git a/examples/triplet/mnist_triplet_train_test.prototxt b/examples/triplet/mnist_triplet_train_test.prototxt deleted file mode 100644 index da25dec31de..00000000000 --- a/examples/triplet/mnist_triplet_train_test.prototxt +++ /dev/null @@ -1,500 +0,0 @@ -name: "mnist_triplet_train_test" -layer { - name: "triplet_data" - type: "Data" - top: "triplet_data" - top: "sim" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/mnist_triplet_train_leveldb" - batch_size: 64 - } -} -layer { - name: "triplet_data" - type: "Data" - top: "triplet_data" - top: "sim" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/mnist_triplet_test_leveldb" - batch_size: 100 - } -} -layer { - name: "slice_triplet" - type: "Slice" - bottom: "triplet_data" - top: "data" - top: "data_true" - top: "data_false" - slice_param { - slice_dim: 1 - slice_point: 1 - slice_point: 2 - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_true" - type: "Convolution" - bottom: "data_true" - top: "conv1_true" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_true" - type: "Pooling" - bottom: "conv1_true" - top: "pool1_true" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_true" - type: "Convolution" - bottom: "pool1_true" - top: "conv2_true" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_true" - type: "Pooling" - bottom: "conv2_true" - top: "pool2_true" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_true" - type: "InnerProduct" - bottom: "pool2_true" - top: "ip1_true" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_true" - type: "ReLU" - bottom: "ip1_true" - top: "ip1_true" -} -layer { - name: "ip2_true" - type: "InnerProduct" - bottom: "ip1_true" - top: "ip2_true" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_true" - type: "InnerProduct" - bottom: "ip2_true" - top: "feat_true" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_false" - type: "Convolution" - bottom: "data_false" - top: "conv1_false" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_false" - type: "Pooling" - bottom: "conv1_false" - top: "pool1_false" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_false" - type: "Convolution" - bottom: "pool1_false" - top: "conv2_false" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_false" - type: "Pooling" - bottom: "conv2_false" - top: "pool2_false" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_false" - type: "InnerProduct" - bottom: "pool2_false" - top: "ip1_false" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_false" - type: "ReLU" - bottom: "ip1_false" - top: "ip1_false" -} -layer { - name: "ip2_false" - type: "InnerProduct" - bottom: "ip1_false" - top: "ip2_false" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_false" - type: "InnerProduct" - bottom: "ip2_false" - top: "feat_false" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "loss" - type: "TripletLoss" - bottom: "feat" - bottom: "feat_true" - bottom: "feat_false" - bottom: "sim" - top: "loss" - triplet_loss_param { - margin: 1 - } -} - diff --git a/examples/triplet/train_lfw_triplet.sh b/examples/triplet/train_lfw_triplet.sh deleted file mode 100755 index 076738a5e63..00000000000 --- a/examples/triplet/train_lfw_triplet.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env sh - -TOOLS=./build/tools - -$TOOLS/caffe train --solver=examples/triplet/lfw_triplet_solver.prototxt diff --git a/examples/triplet/train_mnist_triplet.sh b/examples/triplet/train_mnist_triplet.sh deleted file mode 100755 index 683cda2963b..00000000000 --- a/examples/triplet/train_mnist_triplet.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env sh - -TOOLS=./release/tools - -$TOOLS/caffe train --solver=examples/triplet/mnist_triplet_solver.prototxt diff --git a/src/caffe/layers/triplet_loss_layer.cpp b/src/caffe/layers/triplet_loss_layer.cpp index add25687899..613fa703676 100644 --- a/src/caffe/layers/triplet_loss_layer.cpp +++ b/src/caffe/layers/triplet_loss_layer.cpp @@ -81,6 +81,40 @@ void TripletLossLayer::Forward_cpu( } loss = loss / static_cast(num_set) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; + } else { + for (int i = 0; i < num_set; ++i) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close + // Loss component calculated from reference and close one + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += margin; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + i*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[i] = 1 - \ + dist_sq_neg.cpu_data()[i] / dist_sq_.mutable_cpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(dist_sq_.cpu_data()[i], Dtype(0.0)); + } + } + loss = loss / static_cast(num_set) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; + } } template @@ -97,13 +131,115 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, if (propagate_down[0]) { const Dtype sign = 1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(bottom[i]->num()); - int num = bottom[i]->num(); - int channels = bottom[i]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[i]->mutable_cpu_diff(); - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pairs + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + // Loss component calculated from negative part + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + (2 + num_triplets)*j*dim); + // dissimilar pair in triplet + caffe_cpu_axpby( + dim, + -alpha, + diff_neg.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for feat2(extracted from the closest sample) + for (int i = 1; i < 2; ++i) { + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for negative feature used in the num_triplets triplet part + for (int i = 2; i < 2 + num_triplets; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // dissimilar pairs caffe_cpu_axpby( dim, alpha, @@ -132,10 +268,10 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, diff_pos.cpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; // Loss component calculated from negative part caffe_sub( dim, @@ -183,10 +319,10 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, diff_pos.cpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; // Loss component calculated from negative part caffe_sub( dim, From d0c3f2421fc7cf7dffc9682cd591988b4dc0b768 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 10 Aug 2015 19:54:16 +0800 Subject: [PATCH 57/82] add initiate class name of triplet loss layer add initiate class name of triplet loss layer --- Makefile.orig | 633 +++++++++ Makefile_BACKUP_61727 | 649 +++++++++ Makefile_BASE_61727 | 0 Makefile_LOCAL_61727 | 633 +++++++++ Makefile_REMOTE_61727 | 631 +++++++++ examples/siamese/lfw_siamese.prototxt.orig | 117 ++ .../triplet/convert_3d_triplet_data.cpp.orig | 202 +++ .../triplet/convert_lfw_triplet_data.cpp.orig | 148 ++ .../convert_mnist_triplet_data.cpp.orig | 150 ++ examples/triplet/create_3d_triplet.sh.orig | 21 + .../triplet/lfw_triplet_solver.prototxt.orig | 40 + .../mnist_orpe_train_test.prototxt.orig | 184 +++ include/caffe/loss_layers.hpp.orig | 848 ++++++++++++ python/caffe/test/test_net_spec.py | 3 +- python/caffe/test/test_python_layer.py.orig | 153 +++ src/caffe/data_transformer.cpp | 20 + src/caffe/layers/tile_layer.cu | 53 +- src/caffe/layers/triplet_loss_layer.cpp.orig | 397 ++++++ src/caffe/layers/triplet_loss_layer.cu.orig | 536 ++++++++ .../triplet_loss_layer_BACKUP_62802.cpp | 423 ++++++ .../layers/triplet_loss_layer_BASE_62802.cpp | 298 ++++ .../layers/triplet_loss_layer_LOCAL_62802.cpp | 261 ++++ .../triplet_loss_layer_REMOTE_62802.cpp | 397 ++++++ src/caffe/proto/caffe.proto.orig | 1201 +++++++++++++++++ src/caffe/test/test_net.cpp | 9 + src/caffe/test/test_triplet_loss_layer.orig | 230 ++++ 26 files changed, 8197 insertions(+), 40 deletions(-) create mode 100644 Makefile.orig create mode 100644 Makefile_BACKUP_61727 create mode 100644 Makefile_BASE_61727 create mode 100644 Makefile_LOCAL_61727 create mode 100644 Makefile_REMOTE_61727 create mode 100644 examples/siamese/lfw_siamese.prototxt.orig create mode 100644 examples/triplet/convert_3d_triplet_data.cpp.orig create mode 100644 examples/triplet/convert_lfw_triplet_data.cpp.orig create mode 100644 examples/triplet/convert_mnist_triplet_data.cpp.orig create mode 100755 examples/triplet/create_3d_triplet.sh.orig create mode 100644 examples/triplet/lfw_triplet_solver.prototxt.orig create mode 100644 examples/triplet/mnist_orpe_train_test.prototxt.orig create mode 100644 include/caffe/loss_layers.hpp.orig create mode 100644 python/caffe/test/test_python_layer.py.orig create mode 100644 src/caffe/layers/triplet_loss_layer.cpp.orig create mode 100644 src/caffe/layers/triplet_loss_layer.cu.orig create mode 100644 src/caffe/layers/triplet_loss_layer_BACKUP_62802.cpp create mode 100644 src/caffe/layers/triplet_loss_layer_BASE_62802.cpp create mode 100644 src/caffe/layers/triplet_loss_layer_LOCAL_62802.cpp create mode 100644 src/caffe/layers/triplet_loss_layer_REMOTE_62802.cpp create mode 100644 src/caffe/proto/caffe.proto.orig create mode 100644 src/caffe/test/test_triplet_loss_layer.orig diff --git a/Makefile.orig b/Makefile.orig new file mode 100644 index 00000000000..287fa4ea52f --- /dev/null +++ b/Makefile.orig @@ -0,0 +1,633 @@ +PROJECT := caffe + +CONFIG_FILE := Makefile.config +# Explicitly check for the config file, otherwise make -k will proceed anyway. +ifeq ($(wildcard $(CONFIG_FILE)),) +$(error $(CONFIG_FILE) not found. See $(CONFIG_FILE).example.) +endif +include $(CONFIG_FILE) + +BUILD_DIR_LINK := $(BUILD_DIR) +ifeq ($(RELEASE_BUILD_DIR),) + RELEASE_BUILD_DIR := .$(BUILD_DIR)_release +endif +ifeq ($(DEBUG_BUILD_DIR),) + DEBUG_BUILD_DIR := .$(BUILD_DIR)_debug +endif + +DEBUG ?= 0 +ifeq ($(DEBUG), 1) + BUILD_DIR := $(DEBUG_BUILD_DIR) + OTHER_BUILD_DIR := $(RELEASE_BUILD_DIR) +else + BUILD_DIR := $(RELEASE_BUILD_DIR) + OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR) +endif + +# All of the directories containing code. +SRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \ + \( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print) + +# The target shared library name +LIB_BUILD_DIR := $(BUILD_DIR)/lib +STATIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).a +DYNAMIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).so + +############################## +# Get all source files +############################## +# CXX_SRCS are the source files excluding the test ones. +CXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp") +# CU_SRCS are the cuda source files +CU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu") +# TEST_SRCS are the test source files +TEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp +TEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp") +TEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS)) +TEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu") +GTEST_SRC := src/gtest/gtest-all.cpp +# TOOL_SRCS are the source files for the tool binaries +TOOL_SRCS := $(shell find tools -name "*.cpp") +# EXAMPLE_SRCS are the source files for the example binaries +EXAMPLE_SRCS := $(shell find examples -name "*.cpp") +# BUILD_INCLUDE_DIR contains any generated header files we want to include. +BUILD_INCLUDE_DIR := $(BUILD_DIR)/src +# PROTO_SRCS are the protocol buffer definitions +PROTO_SRC_DIR := src/$(PROJECT)/proto +PROTO_SRCS := $(wildcard $(PROTO_SRC_DIR)/*.proto) +# PROTO_BUILD_DIR will contain the .cc and obj files generated from +# PROTO_SRCS; PROTO_BUILD_INCLUDE_DIR will contain the .h header files +PROTO_BUILD_DIR := $(BUILD_DIR)/$(PROTO_SRC_DIR) +PROTO_BUILD_INCLUDE_DIR := $(BUILD_INCLUDE_DIR)/$(PROJECT)/proto +# NONGEN_CXX_SRCS includes all source/header files except those generated +# automatically (e.g., by proto). +NONGEN_CXX_SRCS := $(shell find \ + src/$(PROJECT) \ + include/$(PROJECT) \ + python/$(PROJECT) \ + matlab/+$(PROJECT)/private \ + examples \ + tools \ + -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh") +LINT_SCRIPT := scripts/cpp_lint.py +LINT_OUTPUT_DIR := $(BUILD_DIR)/.lint +LINT_EXT := lint.txt +LINT_OUTPUTS := $(addsuffix .$(LINT_EXT), $(addprefix $(LINT_OUTPUT_DIR)/, $(NONGEN_CXX_SRCS))) +EMPTY_LINT_REPORT := $(BUILD_DIR)/.$(LINT_EXT) +NONEMPTY_LINT_REPORT := $(BUILD_DIR)/$(LINT_EXT) +# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT) +PY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp +PY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so +PY$(PROJECT)_HXX := include/$(PROJECT)/python_layer.hpp +# MAT$(PROJECT)_SRC is the mex entrance point of matlab package for $(PROJECT) +MAT$(PROJECT)_SRC := matlab/+$(PROJECT)/private/$(PROJECT)_.cpp +ifneq ($(MATLAB_DIR),) + MAT_SO_EXT := $(shell $(MATLAB_DIR)/bin/mexext) +endif +MAT$(PROJECT)_SO := matlab/+$(PROJECT)/private/$(PROJECT)_.$(MAT_SO_EXT) + +############################## +# Derive generated files +############################## +# The generated files for protocol buffers +PROTO_GEN_HEADER_SRCS := $(addprefix $(PROTO_BUILD_DIR)/, \ + $(notdir ${PROTO_SRCS:.proto=.pb.h})) +PROTO_GEN_HEADER := $(addprefix $(PROTO_BUILD_INCLUDE_DIR)/, \ + $(notdir ${PROTO_SRCS:.proto=.pb.h})) +PROTO_GEN_CC := $(addprefix $(BUILD_DIR)/, ${PROTO_SRCS:.proto=.pb.cc}) +PY_PROTO_BUILD_DIR := python/$(PROJECT)/proto +PY_PROTO_INIT := python/$(PROJECT)/proto/__init__.py +PROTO_GEN_PY := $(foreach file,${PROTO_SRCS:.proto=_pb2.py}, \ + $(PY_PROTO_BUILD_DIR)/$(notdir $(file))) +# The objects corresponding to the source files +# These objects will be linked into the final shared library, so we +# exclude the tool, example, and test objects. +CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o}) +CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o}) +PROTO_OBJS := ${PROTO_GEN_CC:.cc=.o} +OBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS) +# tool, example, and test objects +TOOL_OBJS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o}) +TOOL_BUILD_DIR := $(BUILD_DIR)/tools +TEST_CXX_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT)/test +TEST_CU_BUILD_DIR := $(BUILD_DIR)/cuda/src/$(PROJECT)/test +TEST_CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o}) +TEST_CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o}) +TEST_OBJS := $(TEST_CXX_OBJS) $(TEST_CU_OBJS) +GTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o}) +EXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o}) +# Output files for automatic dependency generation +DEPS := ${CXX_OBJS:.o=.d} ${CU_OBJS:.o=.d} ${TEST_CXX_OBJS:.o=.d} \ + ${TEST_CU_OBJS:.o=.d} $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d} +# tool, example, and test bins +TOOL_BINS := ${TOOL_OBJS:.o=.bin} +EXAMPLE_BINS := ${EXAMPLE_OBJS:.o=.bin} +# symlinks to tool bins without the ".bin" extension +TOOL_BIN_LINKS := ${TOOL_BINS:.bin=} +# Put the test binaries in build/test for convenience. +TEST_BIN_DIR := $(BUILD_DIR)/test +TEST_CU_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ + $(foreach obj,$(TEST_CU_OBJS),$(basename $(notdir $(obj)))))) +TEST_CXX_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ + $(foreach obj,$(TEST_CXX_OBJS),$(basename $(notdir $(obj)))))) +TEST_BINS := $(TEST_CXX_BINS) $(TEST_CU_BINS) +# TEST_ALL_BIN is the test binary that links caffe dynamically. +TEST_ALL_BIN := $(TEST_BIN_DIR)/test_all.testbin + +############################## +# Derive compiler warning dump locations +############################## +WARNS_EXT := warnings.txt +CXX_WARNS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o.$(WARNS_EXT)}) +CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o.$(WARNS_EXT)}) +TOOL_WARNS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o.$(WARNS_EXT)}) +EXAMPLE_WARNS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o.$(WARNS_EXT)}) +TEST_WARNS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o.$(WARNS_EXT)}) +TEST_CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o.$(WARNS_EXT)}) +ALL_CXX_WARNS := $(CXX_WARNS) $(TOOL_WARNS) $(EXAMPLE_WARNS) $(TEST_WARNS) +ALL_CU_WARNS := $(CU_WARNS) $(TEST_CU_WARNS) +ALL_WARNS := $(ALL_CXX_WARNS) $(ALL_CU_WARNS) + +EMPTY_WARN_REPORT := $(BUILD_DIR)/.$(WARNS_EXT) +NONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT) + +############################## +# Derive include and lib directories +############################## +CUDA_INCLUDE_DIR := $(CUDA_DIR)/include + +CUDA_LIB_DIR := +# add /lib64 only if it exists +ifneq ("$(wildcard $(CUDA_DIR)/lib64)","") + CUDA_LIB_DIR += $(CUDA_DIR)/lib64 +endif +CUDA_LIB_DIR += $(CUDA_DIR)/lib + +INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include +ifneq ($(CPU_ONLY), 1) + INCLUDE_DIRS += $(CUDA_INCLUDE_DIR) + LIBRARY_DIRS += $(CUDA_LIB_DIR) + LIBRARIES := cudart cublas curand +endif +LIBRARIES += glog gflags protobuf leveldb snappy \ + lmdb boost_system hdf5_hl hdf5 m \ + opencv_core opencv_highgui opencv_imgproc #opencv_imgcodecs +PYTHON_LIBRARIES := boost_python python2.7 +WARNINGS := -Wall -Wno-sign-compare + +############################## +# Set build directories +############################## + +DISTRIBUTE_DIR ?= distribute +DISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib +DIST_ALIASES := dist +ifneq ($(strip $(DISTRIBUTE_DIR)),distribute) + DIST_ALIASES += distribute +endif + +ALL_BUILD_DIRS := $(sort $(BUILD_DIR) $(addprefix $(BUILD_DIR)/, $(SRC_DIRS)) \ + $(addprefix $(BUILD_DIR)/cuda/, $(SRC_DIRS)) \ + $(LIB_BUILD_DIR) $(TEST_BIN_DIR) $(PY_PROTO_BUILD_DIR) $(LINT_OUTPUT_DIR) \ + $(DISTRIBUTE_SUBDIRS) $(PROTO_BUILD_INCLUDE_DIR)) + +############################## +# Set directory for Doxygen-generated documentation +############################## +DOXYGEN_CONFIG_FILE ?= ./.Doxyfile +# should be the same as OUTPUT_DIRECTORY in the .Doxyfile +DOXYGEN_OUTPUT_DIR ?= ./doxygen +DOXYGEN_COMMAND ?= doxygen +# All the files that might have Doxygen documentation. +DOXYGEN_SOURCES := $(shell find \ + src/$(PROJECT) \ + include/$(PROJECT) \ + python/ \ + matlab/ \ + examples \ + tools \ + -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh" -or \ + -name "*.py" -or -name "*.m") +DOXYGEN_SOURCES += $(DOXYGEN_CONFIG_FILE) + + +############################## +# Configure build +############################## + +# Determine platform +UNAME := $(shell uname -s) +ifeq ($(UNAME), Linux) + LINUX := 1 +else ifeq ($(UNAME), Darwin) + OSX := 1 +endif + +# Linux +ifeq ($(LINUX), 1) + CXX ?= /usr/bin/g++ + GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.) + # older versions of gcc are too dumb to build boost with -Wuninitalized + ifeq ($(shell echo | awk '{exit $(GCCVERSION) < 4.6;}'), 1) + WARNINGS += -Wno-uninitialized + endif + # boost::thread is reasonably called boost_thread (compare OS X) + # We will also explicitly add stdc++ to the link target. + LIBRARIES += boost_thread stdc++ +endif + +# OS X: +# clang++ instead of g++ +# libstdc++ for NVCC compatibility on OS X >= 10.9 with CUDA < 7.0 +ifeq ($(OSX), 1) + CXX := /usr/bin/clang++ + ifneq ($(CPU_ONLY), 1) + CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d') + ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1) + CXXFLAGS += -stdlib=libstdc++ + LINKFLAGS += -stdlib=libstdc++ + endif + # clang throws this warning for cuda headers + WARNINGS += -Wno-unneeded-internal-declaration + endif + # gtest needs to use its own tuple to not conflict with clang + COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1 + # boost::thread is called boost_thread-mt to mark multithreading on OS X + LIBRARIES += boost_thread-mt + # we need to explicitly ask for the rpath to be obeyed + DYNAMIC_FLAGS := -install_name @rpath/libcaffe.so + ORIGIN := @loader_path +else + ORIGIN := \$$ORIGIN +endif + +# Custom compiler +ifdef CUSTOM_CXX + CXX := $(CUSTOM_CXX) +endif + +# Static linking +ifneq (,$(findstring clang++,$(CXX))) + STATIC_LINK_COMMAND := -Wl,-force_load $(STATIC_NAME) +else ifneq (,$(findstring g++,$(CXX))) + STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive +else + # The following line must not be indented with a tab, since we are not inside a target + $(error Cannot static link with the $(CXX) compiler) +endif + +# Debugging +ifeq ($(DEBUG), 1) + COMMON_FLAGS += -DDEBUG -g -O0 + NVCCFLAGS += -G +else + COMMON_FLAGS += -DNDEBUG -O2 +endif + +# cuDNN acceleration configuration. +ifeq ($(USE_CUDNN), 1) + LIBRARIES += cudnn + COMMON_FLAGS += -DUSE_CUDNN +endif + +# CPU-only configuration +ifeq ($(CPU_ONLY), 1) + OBJS := $(PROTO_OBJS) $(CXX_OBJS) + TEST_OBJS := $(TEST_CXX_OBJS) + TEST_BINS := $(TEST_CXX_BINS) + ALL_WARNS := $(ALL_CXX_WARNS) + TEST_FILTER := --gtest_filter="-*GPU*" + COMMON_FLAGS += -DCPU_ONLY +endif + +# Python layer support +ifeq ($(WITH_PYTHON_LAYER), 1) + COMMON_FLAGS += -DWITH_PYTHON_LAYER + LIBRARIES += $(PYTHON_LIBRARIES) +endif + +# BLAS configuration (default = ATLAS) +BLAS ?= atlas +ifeq ($(BLAS), mkl) + # MKL + LIBRARIES += mkl_rt + COMMON_FLAGS += -DUSE_MKL + MKL_DIR ?= /opt/intel/mkl + BLAS_INCLUDE ?= $(MKL_DIR)/include + BLAS_LIB ?= $(MKL_DIR)/lib $(MKL_DIR)/lib/intel64 +else ifeq ($(BLAS), open) + # OpenBLAS + LIBRARIES += openblas +else + # ATLAS + ifeq ($(LINUX), 1) + ifeq ($(BLAS), atlas) + # Linux simply has cblas and atlas + LIBRARIES += cblas atlas + endif + else ifeq ($(OSX), 1) + # OS X packages atlas as the vecLib framework + LIBRARIES += cblas + # 10.10 has accelerate while 10.9 has veclib + XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep -o 'version: 6') + ifneq (,$(findstring version: 6,$(XCODE_CLT_VER))) + BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/ + LDFLAGS += -framework Accelerate + else + BLAS_INCLUDE ?= /System/Library/Frameworks/vecLib.framework/Versions/Current/Headers/ + LDFLAGS += -framework vecLib + endif + endif +endif +INCLUDE_DIRS += $(BLAS_INCLUDE) +LIBRARY_DIRS += $(BLAS_LIB) + +LIBRARY_DIRS += $(LIB_BUILD_DIR) + +# Automatic dependency generation (nvcc is handled separately) +CXXFLAGS += -MMD -MP + +# Complete build flags. +COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir)) +CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) +NVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS) +# mex may invoke an older gcc that is too liberal with -Wuninitalized +MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized +LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) + +USE_PKG_CONFIG ?= 0 +ifeq ($(USE_PKG_CONFIG), 1) + PKG_CONFIG := $(shell pkg-config opencv --libs) +else + PKG_CONFIG := +endif +LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) $(PKG_CONFIG) \ + $(foreach library,$(LIBRARIES),-l$(library)) +PYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library)) + +# 'superclean' target recursively* deletes all files ending with an extension +# in $(SUPERCLEAN_EXTS) below. This may be useful if you've built older +# versions of Caffe that do not place all generated files in a location known +# to the 'clean' target. +# +# 'supercleanlist' will list the files to be deleted by make superclean. +# +# * Recursive with the exception that symbolic links are never followed, per the +# default behavior of 'find'. +SUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo + +# Set the sub-targets of the 'everything' target. +EVERYTHING_TARGETS := all py$(PROJECT) test warn lint +# Only build matcaffe as part of "everything" if MATLAB_DIR is specified. +ifneq ($(MATLAB_DIR),) + EVERYTHING_TARGETS += mat$(PROJECT) +endif + +############################## +# Define build targets +############################## +.PHONY: all lib test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ + py mat py$(PROJECT) mat$(PROJECT) proto runtest \ + superclean supercleanlist supercleanfiles warn everything + +all: lib tools examples + +lib: $(STATIC_NAME) $(DYNAMIC_NAME) + +everything: $(EVERYTHING_TARGETS) + +linecount: + cloc --read-lang-def=$(PROJECT).cloc \ + src/$(PROJECT) include/$(PROJECT) tools examples \ + python matlab + +lint: $(EMPTY_LINT_REPORT) + +lintclean: + @ $(RM) -r $(LINT_OUTPUT_DIR) $(EMPTY_LINT_REPORT) $(NONEMPTY_LINT_REPORT) + +docs: $(DOXYGEN_OUTPUT_DIR) + @ cd ./docs ; ln -sfn ../$(DOXYGEN_OUTPUT_DIR)/html doxygen + +$(DOXYGEN_OUTPUT_DIR): $(DOXYGEN_CONFIG_FILE) $(DOXYGEN_SOURCES) + $(DOXYGEN_COMMAND) $(DOXYGEN_CONFIG_FILE) + +$(EMPTY_LINT_REPORT): $(LINT_OUTPUTS) | $(BUILD_DIR) + @ cat $(LINT_OUTPUTS) > $@ + @ if [ -s "$@" ]; then \ + cat $@; \ + mv $@ $(NONEMPTY_LINT_REPORT); \ + echo "Found one or more lint errors."; \ + exit 1; \ + fi; \ + $(RM) $(NONEMPTY_LINT_REPORT); \ + echo "No lint errors!"; + +$(LINT_OUTPUTS): $(LINT_OUTPUT_DIR)/%.lint.txt : % $(LINT_SCRIPT) | $(LINT_OUTPUT_DIR) + @ mkdir -p $(dir $@) + @ python $(LINT_SCRIPT) $< 2>&1 \ + | grep -v "^Done processing " \ + | grep -v "^Total errors found: 0" \ + > $@ \ + || true + +test: $(TEST_ALL_BIN) $(TEST_ALL_DYNLINK_BIN) $(TEST_BINS) + +tools: $(TOOL_BINS) $(TOOL_BIN_LINKS) + +examples: $(EXAMPLE_BINS) + +py$(PROJECT): py + +py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY) + +$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ $< + $(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \ + -o $@ $(LINKFLAGS) -l$(PROJECT) $(PYTHON_LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../../build/lib + +mat$(PROJECT): mat + +mat: $(MAT$(PROJECT)_SO) + +$(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME) + @ if [ -z "$(MATLAB_DIR)" ]; then \ + echo "MATLAB_DIR must be specified in $(CONFIG_FILE)" \ + "to build mat$(PROJECT)."; \ + exit 1; \ + fi + @ echo MEX $< + $(Q)$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) \ + CXX="$(CXX)" \ + CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \ + CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS)" -output $@ + @ if [ -f "$(PROJECT)_.d" ]; then \ + mv -f $(PROJECT)_.d $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}; \ + fi + +runtest: $(TEST_ALL_BIN) + $(TOOL_BUILD_DIR)/caffe + $(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle $(TEST_FILTER) + +pytest: py + cd python; python -m unittest discover -s caffe/test + +mattest: mat + cd matlab; $(MATLAB_DIR)/bin/matlab -nodisplay -r 'caffe.run_tests(), exit()' + +warn: $(EMPTY_WARN_REPORT) + +$(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR) + @ cat $(ALL_WARNS) > $@ + @ if [ -s "$@" ]; then \ + cat $@; \ + mv $@ $(NONEMPTY_WARN_REPORT); \ + echo "Compiler produced one or more warnings."; \ + exit 1; \ + fi; \ + $(RM) $(NONEMPTY_WARN_REPORT); \ + echo "No compiler warnings!"; + +$(ALL_WARNS): %.o.$(WARNS_EXT) : %.o + +$(BUILD_DIR_LINK): $(BUILD_DIR)/.linked + +# Create a target ".linked" in this BUILD_DIR to tell Make that the "build" link +# is currently correct, then delete the one in the OTHER_BUILD_DIR in case it +# exists and $(DEBUG) is toggled later. +$(BUILD_DIR)/.linked: + @ mkdir -p $(BUILD_DIR) + @ $(RM) $(OTHER_BUILD_DIR)/.linked + @ $(RM) -r $(BUILD_DIR_LINK) + @ ln -s $(BUILD_DIR) $(BUILD_DIR_LINK) + @ touch $@ + +$(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK) + @ mkdir -p $@ + +$(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) + @ echo LD -o $@ + $(Q)$(CXX) -shared -o $@ $(OBJS) $(LINKFLAGS) $(LDFLAGS) $(DYNAMIC_FLAGS) + +$(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) + @ echo AR -o $@ + $(Q)ar rcs $@ $(OBJS) + +$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS) + @ echo CXX $< + $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \ + | $(PROTO_BUILD_DIR) + @ echo CXX $< + $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS) + @ echo NVCC $< + $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \ + -odir $(@D) + $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ + | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo CXX/LD -o $@ $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +$(TEST_CU_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CU_BUILD_DIR)/%.o \ + $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo LD $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +$(TEST_CXX_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CXX_BUILD_DIR)/%.o \ + $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo LD $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +# Target for extension-less symlinks to tool binaries with extension '*.bin'. +$(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR) + @ $(RM) $@ + @ ln -s $(abspath $<) $@ + +$(TOOL_BINS): %.bin : %.o | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ + $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../lib + +$(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ + $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../../lib + +proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER) + +$(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \ + $(PROTO_SRC_DIR)/%.proto | $(PROTO_BUILD_DIR) + @ echo PROTOC $< + $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --cpp_out=$(PROTO_BUILD_DIR) $< + +$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \ + $(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR) + @ echo PROTOC \(python\) $< + $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $< + +$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR) + touch $(PY_PROTO_INIT) + +clean: + @- $(RM) -rf $(ALL_BUILD_DIRS) + @- $(RM) -rf $(OTHER_BUILD_DIR) + @- $(RM) -rf $(BUILD_DIR_LINK) + @- $(RM) -rf $(DISTRIBUTE_DIR) + @- $(RM) $(PY$(PROJECT)_SO) + @- $(RM) $(MAT$(PROJECT)_SO) + +supercleanfiles: + $(eval SUPERCLEAN_FILES := $(strip \ + $(foreach ext,$(SUPERCLEAN_EXTS), $(shell find . -name '*$(ext)' \ + -not -path './data/*')))) + +supercleanlist: supercleanfiles + @ \ + if [ -z "$(SUPERCLEAN_FILES)" ]; then \ + echo "No generated files found."; \ + else \ + echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ + fi + +superclean: clean supercleanfiles + @ \ + if [ -z "$(SUPERCLEAN_FILES)" ]; then \ + echo "No generated files found."; \ + else \ + echo "Deleting the following generated files:"; \ + echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ + $(RM) $(SUPERCLEAN_FILES); \ + fi + +$(DIST_ALIASES): $(DISTRIBUTE_DIR) + +$(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS) + # add include + cp -r include $(DISTRIBUTE_DIR)/ + mkdir -p $(DISTRIBUTE_DIR)/include/caffe/proto + cp $(PROTO_GEN_HEADER_SRCS) $(DISTRIBUTE_DIR)/include/caffe/proto + # add tool and example binaries + cp $(TOOL_BINS) $(DISTRIBUTE_DIR)/bin + cp $(EXAMPLE_BINS) $(DISTRIBUTE_DIR)/bin + # add libraries + cp $(STATIC_NAME) $(DISTRIBUTE_DIR)/lib + cp $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib + # add python - it's not the standard way, indeed... + cp -r python $(DISTRIBUTE_DIR)/python + +-include $(DEPS) diff --git a/Makefile_BACKUP_61727 b/Makefile_BACKUP_61727 new file mode 100644 index 00000000000..150cef28a83 --- /dev/null +++ b/Makefile_BACKUP_61727 @@ -0,0 +1,649 @@ +PROJECT := caffe + +CONFIG_FILE := Makefile.config +# Explicitly check for the config file, otherwise make -k will proceed anyway. +ifeq ($(wildcard $(CONFIG_FILE)),) +$(error $(CONFIG_FILE) not found. See $(CONFIG_FILE).example.) +endif +include $(CONFIG_FILE) + +BUILD_DIR_LINK := $(BUILD_DIR) +ifeq ($(RELEASE_BUILD_DIR),) + RELEASE_BUILD_DIR := .$(BUILD_DIR)_release +endif +ifeq ($(DEBUG_BUILD_DIR),) + DEBUG_BUILD_DIR := .$(BUILD_DIR)_debug +endif + +DEBUG ?= 0 +ifeq ($(DEBUG), 1) + BUILD_DIR := $(DEBUG_BUILD_DIR) + OTHER_BUILD_DIR := $(RELEASE_BUILD_DIR) +else + BUILD_DIR := $(RELEASE_BUILD_DIR) + OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR) +endif + +# All of the directories containing code. +SRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \ + \( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print) + +# The target shared library name +LIB_BUILD_DIR := $(BUILD_DIR)/lib +STATIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).a +DYNAMIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).so + +############################## +# Get all source files +############################## +# CXX_SRCS are the source files excluding the test ones. +CXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp") +# CU_SRCS are the cuda source files +CU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu") +# TEST_SRCS are the test source files +TEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp +TEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp") +TEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS)) +TEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu") +GTEST_SRC := src/gtest/gtest-all.cpp +# TOOL_SRCS are the source files for the tool binaries +TOOL_SRCS := $(shell find tools -name "*.cpp") +# EXAMPLE_SRCS are the source files for the example binaries +EXAMPLE_SRCS := $(shell find examples -name "*.cpp") +# BUILD_INCLUDE_DIR contains any generated header files we want to include. +BUILD_INCLUDE_DIR := $(BUILD_DIR)/src +# PROTO_SRCS are the protocol buffer definitions +PROTO_SRC_DIR := src/$(PROJECT)/proto +PROTO_SRCS := $(wildcard $(PROTO_SRC_DIR)/*.proto) +# PROTO_BUILD_DIR will contain the .cc and obj files generated from +# PROTO_SRCS; PROTO_BUILD_INCLUDE_DIR will contain the .h header files +PROTO_BUILD_DIR := $(BUILD_DIR)/$(PROTO_SRC_DIR) +PROTO_BUILD_INCLUDE_DIR := $(BUILD_INCLUDE_DIR)/$(PROJECT)/proto +# NONGEN_CXX_SRCS includes all source/header files except those generated +# automatically (e.g., by proto). +NONGEN_CXX_SRCS := $(shell find \ + src/$(PROJECT) \ + include/$(PROJECT) \ + python/$(PROJECT) \ + matlab/+$(PROJECT)/private \ + examples \ + tools \ + -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh") +LINT_SCRIPT := scripts/cpp_lint.py +LINT_OUTPUT_DIR := $(BUILD_DIR)/.lint +LINT_EXT := lint.txt +LINT_OUTPUTS := $(addsuffix .$(LINT_EXT), $(addprefix $(LINT_OUTPUT_DIR)/, $(NONGEN_CXX_SRCS))) +EMPTY_LINT_REPORT := $(BUILD_DIR)/.$(LINT_EXT) +NONEMPTY_LINT_REPORT := $(BUILD_DIR)/$(LINT_EXT) +# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT) +PY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp +PY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so +PY$(PROJECT)_HXX := include/$(PROJECT)/python_layer.hpp +# MAT$(PROJECT)_SRC is the mex entrance point of matlab package for $(PROJECT) +MAT$(PROJECT)_SRC := matlab/+$(PROJECT)/private/$(PROJECT)_.cpp +ifneq ($(MATLAB_DIR),) + MAT_SO_EXT := $(shell $(MATLAB_DIR)/bin/mexext) +endif +MAT$(PROJECT)_SO := matlab/+$(PROJECT)/private/$(PROJECT)_.$(MAT_SO_EXT) + +############################## +# Derive generated files +############################## +# The generated files for protocol buffers +PROTO_GEN_HEADER_SRCS := $(addprefix $(PROTO_BUILD_DIR)/, \ + $(notdir ${PROTO_SRCS:.proto=.pb.h})) +PROTO_GEN_HEADER := $(addprefix $(PROTO_BUILD_INCLUDE_DIR)/, \ + $(notdir ${PROTO_SRCS:.proto=.pb.h})) +PROTO_GEN_CC := $(addprefix $(BUILD_DIR)/, ${PROTO_SRCS:.proto=.pb.cc}) +PY_PROTO_BUILD_DIR := python/$(PROJECT)/proto +PY_PROTO_INIT := python/$(PROJECT)/proto/__init__.py +PROTO_GEN_PY := $(foreach file,${PROTO_SRCS:.proto=_pb2.py}, \ + $(PY_PROTO_BUILD_DIR)/$(notdir $(file))) +# The objects corresponding to the source files +# These objects will be linked into the final shared library, so we +# exclude the tool, example, and test objects. +CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o}) +CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o}) +PROTO_OBJS := ${PROTO_GEN_CC:.cc=.o} +OBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS) +# tool, example, and test objects +TOOL_OBJS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o}) +TOOL_BUILD_DIR := $(BUILD_DIR)/tools +TEST_CXX_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT)/test +TEST_CU_BUILD_DIR := $(BUILD_DIR)/cuda/src/$(PROJECT)/test +TEST_CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o}) +TEST_CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o}) +TEST_OBJS := $(TEST_CXX_OBJS) $(TEST_CU_OBJS) +GTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o}) +EXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o}) +# Output files for automatic dependency generation +DEPS := ${CXX_OBJS:.o=.d} ${CU_OBJS:.o=.d} ${TEST_CXX_OBJS:.o=.d} \ + ${TEST_CU_OBJS:.o=.d} $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d} +# tool, example, and test bins +TOOL_BINS := ${TOOL_OBJS:.o=.bin} +EXAMPLE_BINS := ${EXAMPLE_OBJS:.o=.bin} +# symlinks to tool bins without the ".bin" extension +TOOL_BIN_LINKS := ${TOOL_BINS:.bin=} +# Put the test binaries in build/test for convenience. +TEST_BIN_DIR := $(BUILD_DIR)/test +TEST_CU_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ + $(foreach obj,$(TEST_CU_OBJS),$(basename $(notdir $(obj)))))) +TEST_CXX_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ + $(foreach obj,$(TEST_CXX_OBJS),$(basename $(notdir $(obj)))))) +TEST_BINS := $(TEST_CXX_BINS) $(TEST_CU_BINS) +# TEST_ALL_BIN is the test binary that links caffe dynamically. +TEST_ALL_BIN := $(TEST_BIN_DIR)/test_all.testbin + +############################## +# Derive compiler warning dump locations +############################## +WARNS_EXT := warnings.txt +CXX_WARNS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o.$(WARNS_EXT)}) +CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o.$(WARNS_EXT)}) +TOOL_WARNS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o.$(WARNS_EXT)}) +EXAMPLE_WARNS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o.$(WARNS_EXT)}) +TEST_WARNS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o.$(WARNS_EXT)}) +TEST_CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o.$(WARNS_EXT)}) +ALL_CXX_WARNS := $(CXX_WARNS) $(TOOL_WARNS) $(EXAMPLE_WARNS) $(TEST_WARNS) +ALL_CU_WARNS := $(CU_WARNS) $(TEST_CU_WARNS) +ALL_WARNS := $(ALL_CXX_WARNS) $(ALL_CU_WARNS) + +EMPTY_WARN_REPORT := $(BUILD_DIR)/.$(WARNS_EXT) +NONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT) + +############################## +# Derive include and lib directories +############################## +CUDA_INCLUDE_DIR := $(CUDA_DIR)/include + +CUDA_LIB_DIR := +# add /lib64 only if it exists +ifneq ("$(wildcard $(CUDA_DIR)/lib64)","") + CUDA_LIB_DIR += $(CUDA_DIR)/lib64 +endif +CUDA_LIB_DIR += $(CUDA_DIR)/lib + +INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include +ifneq ($(CPU_ONLY), 1) + INCLUDE_DIRS += $(CUDA_INCLUDE_DIR) + LIBRARY_DIRS += $(CUDA_LIB_DIR) + LIBRARIES := cudart cublas curand +endif +LIBRARIES += glog gflags protobuf leveldb snappy \ + lmdb boost_system hdf5_hl hdf5 m \ + opencv_core opencv_highgui opencv_imgproc #opencv_imgcodecs +PYTHON_LIBRARIES := boost_python python2.7 +WARNINGS := -Wall -Wno-sign-compare + +############################## +# Set build directories +############################## + +DISTRIBUTE_DIR ?= distribute +DISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib +DIST_ALIASES := dist +ifneq ($(strip $(DISTRIBUTE_DIR)),distribute) + DIST_ALIASES += distribute +endif + +ALL_BUILD_DIRS := $(sort $(BUILD_DIR) $(addprefix $(BUILD_DIR)/, $(SRC_DIRS)) \ + $(addprefix $(BUILD_DIR)/cuda/, $(SRC_DIRS)) \ + $(LIB_BUILD_DIR) $(TEST_BIN_DIR) $(PY_PROTO_BUILD_DIR) $(LINT_OUTPUT_DIR) \ + $(DISTRIBUTE_SUBDIRS) $(PROTO_BUILD_INCLUDE_DIR)) + +############################## +# Set directory for Doxygen-generated documentation +############################## +DOXYGEN_CONFIG_FILE ?= ./.Doxyfile +# should be the same as OUTPUT_DIRECTORY in the .Doxyfile +DOXYGEN_OUTPUT_DIR ?= ./doxygen +DOXYGEN_COMMAND ?= doxygen +# All the files that might have Doxygen documentation. +DOXYGEN_SOURCES := $(shell find \ + src/$(PROJECT) \ + include/$(PROJECT) \ + python/ \ + matlab/ \ + examples \ + tools \ + -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh" -or \ + -name "*.py" -or -name "*.m") +DOXYGEN_SOURCES += $(DOXYGEN_CONFIG_FILE) + + +############################## +# Configure build +############################## + +# Determine platform +UNAME := $(shell uname -s) +ifeq ($(UNAME), Linux) + LINUX := 1 +else ifeq ($(UNAME), Darwin) + OSX := 1 +endif + +# Linux +ifeq ($(LINUX), 1) + CXX ?= /usr/bin/g++ + GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.) + # older versions of gcc are too dumb to build boost with -Wuninitalized +<<<<<<< abf8aae89dd323f86a6c76ff671a08a9d2794749 + ifeq ($(shell echo | awk '{exit $(GCCVERSION) < 4.6;}'), 1) +======= + ifeq ($(shell echo $(GCCVERSION) \< 4.6 | bc), 1) +>>>>>>> GPU version added + WARNINGS += -Wno-uninitialized + endif + # boost::thread is reasonably called boost_thread (compare OS X) + # We will also explicitly add stdc++ to the link target. + LIBRARIES += boost_thread stdc++ +endif + +# OS X: +# clang++ instead of g++ +# libstdc++ for NVCC compatibility on OS X >= 10.9 with CUDA < 7.0 +ifeq ($(OSX), 1) + CXX := /usr/bin/clang++ + ifneq ($(CPU_ONLY), 1) + CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d') +<<<<<<< abf8aae89dd323f86a6c76ff671a08a9d2794749 + ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1) +======= + ifeq ($(shell echo $(CUDA_VERSION) \< 7.0 | bc), 1) +>>>>>>> GPU version added + CXXFLAGS += -stdlib=libstdc++ + LINKFLAGS += -stdlib=libstdc++ + endif + # clang throws this warning for cuda headers + WARNINGS += -Wno-unneeded-internal-declaration + endif + # gtest needs to use its own tuple to not conflict with clang + COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1 + # boost::thread is called boost_thread-mt to mark multithreading on OS X + LIBRARIES += boost_thread-mt + # we need to explicitly ask for the rpath to be obeyed + DYNAMIC_FLAGS := -install_name @rpath/libcaffe.so + ORIGIN := @loader_path +else + ORIGIN := \$$ORIGIN +endif + +# Custom compiler +ifdef CUSTOM_CXX + CXX := $(CUSTOM_CXX) +endif + +# Static linking +ifneq (,$(findstring clang++,$(CXX))) + STATIC_LINK_COMMAND := -Wl,-force_load $(STATIC_NAME) +else ifneq (,$(findstring g++,$(CXX))) + STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive +else + # The following line must not be indented with a tab, since we are not inside a target + $(error Cannot static link with the $(CXX) compiler) +endif + +# Debugging +ifeq ($(DEBUG), 1) + COMMON_FLAGS += -DDEBUG -g -O0 + NVCCFLAGS += -G +else + COMMON_FLAGS += -DNDEBUG -O2 +endif + +# cuDNN acceleration configuration. +ifeq ($(USE_CUDNN), 1) + LIBRARIES += cudnn + COMMON_FLAGS += -DUSE_CUDNN +endif + +# CPU-only configuration +ifeq ($(CPU_ONLY), 1) + OBJS := $(PROTO_OBJS) $(CXX_OBJS) + TEST_OBJS := $(TEST_CXX_OBJS) + TEST_BINS := $(TEST_CXX_BINS) + ALL_WARNS := $(ALL_CXX_WARNS) + TEST_FILTER := --gtest_filter="-*GPU*" + COMMON_FLAGS += -DCPU_ONLY +endif + +# Python layer support +ifeq ($(WITH_PYTHON_LAYER), 1) + COMMON_FLAGS += -DWITH_PYTHON_LAYER + LIBRARIES += $(PYTHON_LIBRARIES) +endif + +# BLAS configuration (default = ATLAS) +BLAS ?= atlas +ifeq ($(BLAS), mkl) + # MKL + LIBRARIES += mkl_rt + COMMON_FLAGS += -DUSE_MKL + MKL_DIR ?= /opt/intel/mkl + BLAS_INCLUDE ?= $(MKL_DIR)/include + BLAS_LIB ?= $(MKL_DIR)/lib $(MKL_DIR)/lib/intel64 +else ifeq ($(BLAS), open) + # OpenBLAS + LIBRARIES += openblas +else + # ATLAS + ifeq ($(LINUX), 1) + ifeq ($(BLAS), atlas) + # Linux simply has cblas and atlas + LIBRARIES += cblas atlas + endif + else ifeq ($(OSX), 1) + # OS X packages atlas as the vecLib framework + LIBRARIES += cblas + # 10.10 has accelerate while 10.9 has veclib + XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep -o 'version: 6') + ifneq (,$(findstring version: 6,$(XCODE_CLT_VER))) + BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/ + LDFLAGS += -framework Accelerate + else + BLAS_INCLUDE ?= /System/Library/Frameworks/vecLib.framework/Versions/Current/Headers/ + LDFLAGS += -framework vecLib + endif + endif +endif +INCLUDE_DIRS += $(BLAS_INCLUDE) +LIBRARY_DIRS += $(BLAS_LIB) + +LIBRARY_DIRS += $(LIB_BUILD_DIR) + +# Automatic dependency generation (nvcc is handled separately) +CXXFLAGS += -MMD -MP + +# Complete build flags. +COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir)) +CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) +NVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS) +# mex may invoke an older gcc that is too liberal with -Wuninitalized +MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized +LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) + +USE_PKG_CONFIG ?= 0 +ifeq ($(USE_PKG_CONFIG), 1) + PKG_CONFIG := $(shell pkg-config opencv --libs) +else + PKG_CONFIG := +endif +LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) $(PKG_CONFIG) \ + $(foreach library,$(LIBRARIES),-l$(library)) +PYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library)) + +# 'superclean' target recursively* deletes all files ending with an extension +# in $(SUPERCLEAN_EXTS) below. This may be useful if you've built older +# versions of Caffe that do not place all generated files in a location known +# to the 'clean' target. +# +# 'supercleanlist' will list the files to be deleted by make superclean. +# +# * Recursive with the exception that symbolic links are never followed, per the +# default behavior of 'find'. +SUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo + +# Set the sub-targets of the 'everything' target. +EVERYTHING_TARGETS := all py$(PROJECT) test warn lint +# Only build matcaffe as part of "everything" if MATLAB_DIR is specified. +ifneq ($(MATLAB_DIR),) + EVERYTHING_TARGETS += mat$(PROJECT) +endif + +############################## +# Define build targets +############################## +<<<<<<< abf8aae89dd323f86a6c76ff671a08a9d2794749 +.PHONY: all lib test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ + py mat py$(PROJECT) mat$(PROJECT) proto runtest \ + superclean supercleanlist supercleanfiles warn everything + +all: lib tools examples + +lib: $(STATIC_NAME) $(DYNAMIC_NAME) +======= +.PHONY: all test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ + py mat py$(PROJECT) mat$(PROJECT) proto runtest \ + superclean supercleanlist supercleanfiles warn everything + +all: $(STATIC_NAME) $(DYNAMIC_NAME) tools examples +>>>>>>> GPU version added + +everything: $(EVERYTHING_TARGETS) + +linecount: + cloc --read-lang-def=$(PROJECT).cloc \ + src/$(PROJECT) include/$(PROJECT) tools examples \ + python matlab + +lint: $(EMPTY_LINT_REPORT) + +lintclean: + @ $(RM) -r $(LINT_OUTPUT_DIR) $(EMPTY_LINT_REPORT) $(NONEMPTY_LINT_REPORT) + +docs: $(DOXYGEN_OUTPUT_DIR) + @ cd ./docs ; ln -sfn ../$(DOXYGEN_OUTPUT_DIR)/html doxygen + +$(DOXYGEN_OUTPUT_DIR): $(DOXYGEN_CONFIG_FILE) $(DOXYGEN_SOURCES) + $(DOXYGEN_COMMAND) $(DOXYGEN_CONFIG_FILE) + +$(EMPTY_LINT_REPORT): $(LINT_OUTPUTS) | $(BUILD_DIR) + @ cat $(LINT_OUTPUTS) > $@ + @ if [ -s "$@" ]; then \ + cat $@; \ + mv $@ $(NONEMPTY_LINT_REPORT); \ + echo "Found one or more lint errors."; \ + exit 1; \ + fi; \ + $(RM) $(NONEMPTY_LINT_REPORT); \ + echo "No lint errors!"; + +$(LINT_OUTPUTS): $(LINT_OUTPUT_DIR)/%.lint.txt : % $(LINT_SCRIPT) | $(LINT_OUTPUT_DIR) + @ mkdir -p $(dir $@) + @ python $(LINT_SCRIPT) $< 2>&1 \ + | grep -v "^Done processing " \ + | grep -v "^Total errors found: 0" \ + > $@ \ + || true + +test: $(TEST_ALL_BIN) $(TEST_ALL_DYNLINK_BIN) $(TEST_BINS) + +tools: $(TOOL_BINS) $(TOOL_BIN_LINKS) + +examples: $(EXAMPLE_BINS) + +py$(PROJECT): py + +py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY) + +$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ $< + $(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \ + -o $@ $(LINKFLAGS) -l$(PROJECT) $(PYTHON_LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../../build/lib + +mat$(PROJECT): mat + +mat: $(MAT$(PROJECT)_SO) + +$(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME) + @ if [ -z "$(MATLAB_DIR)" ]; then \ + echo "MATLAB_DIR must be specified in $(CONFIG_FILE)" \ + "to build mat$(PROJECT)."; \ + exit 1; \ + fi + @ echo MEX $< + $(Q)$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) \ + CXX="$(CXX)" \ + CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \ + CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS)" -output $@ + @ if [ -f "$(PROJECT)_.d" ]; then \ + mv -f $(PROJECT)_.d $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}; \ + fi + +runtest: $(TEST_ALL_BIN) + $(TOOL_BUILD_DIR)/caffe + $(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle $(TEST_FILTER) + +pytest: py + cd python; python -m unittest discover -s caffe/test + +mattest: mat + cd matlab; $(MATLAB_DIR)/bin/matlab -nodisplay -r 'caffe.run_tests(), exit()' + +warn: $(EMPTY_WARN_REPORT) + +$(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR) + @ cat $(ALL_WARNS) > $@ + @ if [ -s "$@" ]; then \ + cat $@; \ + mv $@ $(NONEMPTY_WARN_REPORT); \ + echo "Compiler produced one or more warnings."; \ + exit 1; \ + fi; \ + $(RM) $(NONEMPTY_WARN_REPORT); \ + echo "No compiler warnings!"; + +$(ALL_WARNS): %.o.$(WARNS_EXT) : %.o + +$(BUILD_DIR_LINK): $(BUILD_DIR)/.linked + +# Create a target ".linked" in this BUILD_DIR to tell Make that the "build" link +# is currently correct, then delete the one in the OTHER_BUILD_DIR in case it +# exists and $(DEBUG) is toggled later. +$(BUILD_DIR)/.linked: + @ mkdir -p $(BUILD_DIR) + @ $(RM) $(OTHER_BUILD_DIR)/.linked + @ $(RM) -r $(BUILD_DIR_LINK) + @ ln -s $(BUILD_DIR) $(BUILD_DIR_LINK) + @ touch $@ + +$(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK) + @ mkdir -p $@ + +$(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) + @ echo LD -o $@ + $(Q)$(CXX) -shared -o $@ $(OBJS) $(LINKFLAGS) $(LDFLAGS) $(DYNAMIC_FLAGS) + +$(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) + @ echo AR -o $@ + $(Q)ar rcs $@ $(OBJS) + +$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS) + @ echo CXX $< + $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \ + | $(PROTO_BUILD_DIR) + @ echo CXX $< + $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS) + @ echo NVCC $< + $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \ + -odir $(@D) + $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ + | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo CXX/LD -o $@ $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +$(TEST_CU_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CU_BUILD_DIR)/%.o \ + $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo LD $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +$(TEST_CXX_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CXX_BUILD_DIR)/%.o \ + $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo LD $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +# Target for extension-less symlinks to tool binaries with extension '*.bin'. +$(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR) + @ $(RM) $@ + @ ln -s $(abspath $<) $@ + +$(TOOL_BINS): %.bin : %.o | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ + $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../lib + +$(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ + $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../../lib + +proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER) + +$(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \ + $(PROTO_SRC_DIR)/%.proto | $(PROTO_BUILD_DIR) + @ echo PROTOC $< + $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --cpp_out=$(PROTO_BUILD_DIR) $< + +$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \ + $(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR) + @ echo PROTOC \(python\) $< + $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $< + +$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR) + touch $(PY_PROTO_INIT) + +clean: + @- $(RM) -rf $(ALL_BUILD_DIRS) + @- $(RM) -rf $(OTHER_BUILD_DIR) + @- $(RM) -rf $(BUILD_DIR_LINK) + @- $(RM) -rf $(DISTRIBUTE_DIR) + @- $(RM) $(PY$(PROJECT)_SO) + @- $(RM) $(MAT$(PROJECT)_SO) + +supercleanfiles: + $(eval SUPERCLEAN_FILES := $(strip \ + $(foreach ext,$(SUPERCLEAN_EXTS), $(shell find . -name '*$(ext)' \ + -not -path './data/*')))) + +supercleanlist: supercleanfiles + @ \ + if [ -z "$(SUPERCLEAN_FILES)" ]; then \ + echo "No generated files found."; \ + else \ + echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ + fi + +superclean: clean supercleanfiles + @ \ + if [ -z "$(SUPERCLEAN_FILES)" ]; then \ + echo "No generated files found."; \ + else \ + echo "Deleting the following generated files:"; \ + echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ + $(RM) $(SUPERCLEAN_FILES); \ + fi + +$(DIST_ALIASES): $(DISTRIBUTE_DIR) + +$(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS) + # add include + cp -r include $(DISTRIBUTE_DIR)/ + mkdir -p $(DISTRIBUTE_DIR)/include/caffe/proto + cp $(PROTO_GEN_HEADER_SRCS) $(DISTRIBUTE_DIR)/include/caffe/proto + # add tool and example binaries + cp $(TOOL_BINS) $(DISTRIBUTE_DIR)/bin + cp $(EXAMPLE_BINS) $(DISTRIBUTE_DIR)/bin + # add libraries + cp $(STATIC_NAME) $(DISTRIBUTE_DIR)/lib + cp $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib + # add python - it's not the standard way, indeed... + cp -r python $(DISTRIBUTE_DIR)/python + +-include $(DEPS) diff --git a/Makefile_BASE_61727 b/Makefile_BASE_61727 new file mode 100644 index 00000000000..e69de29bb2d diff --git a/Makefile_LOCAL_61727 b/Makefile_LOCAL_61727 new file mode 100644 index 00000000000..287fa4ea52f --- /dev/null +++ b/Makefile_LOCAL_61727 @@ -0,0 +1,633 @@ +PROJECT := caffe + +CONFIG_FILE := Makefile.config +# Explicitly check for the config file, otherwise make -k will proceed anyway. +ifeq ($(wildcard $(CONFIG_FILE)),) +$(error $(CONFIG_FILE) not found. See $(CONFIG_FILE).example.) +endif +include $(CONFIG_FILE) + +BUILD_DIR_LINK := $(BUILD_DIR) +ifeq ($(RELEASE_BUILD_DIR),) + RELEASE_BUILD_DIR := .$(BUILD_DIR)_release +endif +ifeq ($(DEBUG_BUILD_DIR),) + DEBUG_BUILD_DIR := .$(BUILD_DIR)_debug +endif + +DEBUG ?= 0 +ifeq ($(DEBUG), 1) + BUILD_DIR := $(DEBUG_BUILD_DIR) + OTHER_BUILD_DIR := $(RELEASE_BUILD_DIR) +else + BUILD_DIR := $(RELEASE_BUILD_DIR) + OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR) +endif + +# All of the directories containing code. +SRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \ + \( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print) + +# The target shared library name +LIB_BUILD_DIR := $(BUILD_DIR)/lib +STATIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).a +DYNAMIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).so + +############################## +# Get all source files +############################## +# CXX_SRCS are the source files excluding the test ones. +CXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp") +# CU_SRCS are the cuda source files +CU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu") +# TEST_SRCS are the test source files +TEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp +TEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp") +TEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS)) +TEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu") +GTEST_SRC := src/gtest/gtest-all.cpp +# TOOL_SRCS are the source files for the tool binaries +TOOL_SRCS := $(shell find tools -name "*.cpp") +# EXAMPLE_SRCS are the source files for the example binaries +EXAMPLE_SRCS := $(shell find examples -name "*.cpp") +# BUILD_INCLUDE_DIR contains any generated header files we want to include. +BUILD_INCLUDE_DIR := $(BUILD_DIR)/src +# PROTO_SRCS are the protocol buffer definitions +PROTO_SRC_DIR := src/$(PROJECT)/proto +PROTO_SRCS := $(wildcard $(PROTO_SRC_DIR)/*.proto) +# PROTO_BUILD_DIR will contain the .cc and obj files generated from +# PROTO_SRCS; PROTO_BUILD_INCLUDE_DIR will contain the .h header files +PROTO_BUILD_DIR := $(BUILD_DIR)/$(PROTO_SRC_DIR) +PROTO_BUILD_INCLUDE_DIR := $(BUILD_INCLUDE_DIR)/$(PROJECT)/proto +# NONGEN_CXX_SRCS includes all source/header files except those generated +# automatically (e.g., by proto). +NONGEN_CXX_SRCS := $(shell find \ + src/$(PROJECT) \ + include/$(PROJECT) \ + python/$(PROJECT) \ + matlab/+$(PROJECT)/private \ + examples \ + tools \ + -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh") +LINT_SCRIPT := scripts/cpp_lint.py +LINT_OUTPUT_DIR := $(BUILD_DIR)/.lint +LINT_EXT := lint.txt +LINT_OUTPUTS := $(addsuffix .$(LINT_EXT), $(addprefix $(LINT_OUTPUT_DIR)/, $(NONGEN_CXX_SRCS))) +EMPTY_LINT_REPORT := $(BUILD_DIR)/.$(LINT_EXT) +NONEMPTY_LINT_REPORT := $(BUILD_DIR)/$(LINT_EXT) +# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT) +PY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp +PY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so +PY$(PROJECT)_HXX := include/$(PROJECT)/python_layer.hpp +# MAT$(PROJECT)_SRC is the mex entrance point of matlab package for $(PROJECT) +MAT$(PROJECT)_SRC := matlab/+$(PROJECT)/private/$(PROJECT)_.cpp +ifneq ($(MATLAB_DIR),) + MAT_SO_EXT := $(shell $(MATLAB_DIR)/bin/mexext) +endif +MAT$(PROJECT)_SO := matlab/+$(PROJECT)/private/$(PROJECT)_.$(MAT_SO_EXT) + +############################## +# Derive generated files +############################## +# The generated files for protocol buffers +PROTO_GEN_HEADER_SRCS := $(addprefix $(PROTO_BUILD_DIR)/, \ + $(notdir ${PROTO_SRCS:.proto=.pb.h})) +PROTO_GEN_HEADER := $(addprefix $(PROTO_BUILD_INCLUDE_DIR)/, \ + $(notdir ${PROTO_SRCS:.proto=.pb.h})) +PROTO_GEN_CC := $(addprefix $(BUILD_DIR)/, ${PROTO_SRCS:.proto=.pb.cc}) +PY_PROTO_BUILD_DIR := python/$(PROJECT)/proto +PY_PROTO_INIT := python/$(PROJECT)/proto/__init__.py +PROTO_GEN_PY := $(foreach file,${PROTO_SRCS:.proto=_pb2.py}, \ + $(PY_PROTO_BUILD_DIR)/$(notdir $(file))) +# The objects corresponding to the source files +# These objects will be linked into the final shared library, so we +# exclude the tool, example, and test objects. +CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o}) +CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o}) +PROTO_OBJS := ${PROTO_GEN_CC:.cc=.o} +OBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS) +# tool, example, and test objects +TOOL_OBJS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o}) +TOOL_BUILD_DIR := $(BUILD_DIR)/tools +TEST_CXX_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT)/test +TEST_CU_BUILD_DIR := $(BUILD_DIR)/cuda/src/$(PROJECT)/test +TEST_CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o}) +TEST_CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o}) +TEST_OBJS := $(TEST_CXX_OBJS) $(TEST_CU_OBJS) +GTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o}) +EXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o}) +# Output files for automatic dependency generation +DEPS := ${CXX_OBJS:.o=.d} ${CU_OBJS:.o=.d} ${TEST_CXX_OBJS:.o=.d} \ + ${TEST_CU_OBJS:.o=.d} $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d} +# tool, example, and test bins +TOOL_BINS := ${TOOL_OBJS:.o=.bin} +EXAMPLE_BINS := ${EXAMPLE_OBJS:.o=.bin} +# symlinks to tool bins without the ".bin" extension +TOOL_BIN_LINKS := ${TOOL_BINS:.bin=} +# Put the test binaries in build/test for convenience. +TEST_BIN_DIR := $(BUILD_DIR)/test +TEST_CU_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ + $(foreach obj,$(TEST_CU_OBJS),$(basename $(notdir $(obj)))))) +TEST_CXX_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ + $(foreach obj,$(TEST_CXX_OBJS),$(basename $(notdir $(obj)))))) +TEST_BINS := $(TEST_CXX_BINS) $(TEST_CU_BINS) +# TEST_ALL_BIN is the test binary that links caffe dynamically. +TEST_ALL_BIN := $(TEST_BIN_DIR)/test_all.testbin + +############################## +# Derive compiler warning dump locations +############################## +WARNS_EXT := warnings.txt +CXX_WARNS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o.$(WARNS_EXT)}) +CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o.$(WARNS_EXT)}) +TOOL_WARNS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o.$(WARNS_EXT)}) +EXAMPLE_WARNS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o.$(WARNS_EXT)}) +TEST_WARNS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o.$(WARNS_EXT)}) +TEST_CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o.$(WARNS_EXT)}) +ALL_CXX_WARNS := $(CXX_WARNS) $(TOOL_WARNS) $(EXAMPLE_WARNS) $(TEST_WARNS) +ALL_CU_WARNS := $(CU_WARNS) $(TEST_CU_WARNS) +ALL_WARNS := $(ALL_CXX_WARNS) $(ALL_CU_WARNS) + +EMPTY_WARN_REPORT := $(BUILD_DIR)/.$(WARNS_EXT) +NONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT) + +############################## +# Derive include and lib directories +############################## +CUDA_INCLUDE_DIR := $(CUDA_DIR)/include + +CUDA_LIB_DIR := +# add /lib64 only if it exists +ifneq ("$(wildcard $(CUDA_DIR)/lib64)","") + CUDA_LIB_DIR += $(CUDA_DIR)/lib64 +endif +CUDA_LIB_DIR += $(CUDA_DIR)/lib + +INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include +ifneq ($(CPU_ONLY), 1) + INCLUDE_DIRS += $(CUDA_INCLUDE_DIR) + LIBRARY_DIRS += $(CUDA_LIB_DIR) + LIBRARIES := cudart cublas curand +endif +LIBRARIES += glog gflags protobuf leveldb snappy \ + lmdb boost_system hdf5_hl hdf5 m \ + opencv_core opencv_highgui opencv_imgproc #opencv_imgcodecs +PYTHON_LIBRARIES := boost_python python2.7 +WARNINGS := -Wall -Wno-sign-compare + +############################## +# Set build directories +############################## + +DISTRIBUTE_DIR ?= distribute +DISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib +DIST_ALIASES := dist +ifneq ($(strip $(DISTRIBUTE_DIR)),distribute) + DIST_ALIASES += distribute +endif + +ALL_BUILD_DIRS := $(sort $(BUILD_DIR) $(addprefix $(BUILD_DIR)/, $(SRC_DIRS)) \ + $(addprefix $(BUILD_DIR)/cuda/, $(SRC_DIRS)) \ + $(LIB_BUILD_DIR) $(TEST_BIN_DIR) $(PY_PROTO_BUILD_DIR) $(LINT_OUTPUT_DIR) \ + $(DISTRIBUTE_SUBDIRS) $(PROTO_BUILD_INCLUDE_DIR)) + +############################## +# Set directory for Doxygen-generated documentation +############################## +DOXYGEN_CONFIG_FILE ?= ./.Doxyfile +# should be the same as OUTPUT_DIRECTORY in the .Doxyfile +DOXYGEN_OUTPUT_DIR ?= ./doxygen +DOXYGEN_COMMAND ?= doxygen +# All the files that might have Doxygen documentation. +DOXYGEN_SOURCES := $(shell find \ + src/$(PROJECT) \ + include/$(PROJECT) \ + python/ \ + matlab/ \ + examples \ + tools \ + -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh" -or \ + -name "*.py" -or -name "*.m") +DOXYGEN_SOURCES += $(DOXYGEN_CONFIG_FILE) + + +############################## +# Configure build +############################## + +# Determine platform +UNAME := $(shell uname -s) +ifeq ($(UNAME), Linux) + LINUX := 1 +else ifeq ($(UNAME), Darwin) + OSX := 1 +endif + +# Linux +ifeq ($(LINUX), 1) + CXX ?= /usr/bin/g++ + GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.) + # older versions of gcc are too dumb to build boost with -Wuninitalized + ifeq ($(shell echo | awk '{exit $(GCCVERSION) < 4.6;}'), 1) + WARNINGS += -Wno-uninitialized + endif + # boost::thread is reasonably called boost_thread (compare OS X) + # We will also explicitly add stdc++ to the link target. + LIBRARIES += boost_thread stdc++ +endif + +# OS X: +# clang++ instead of g++ +# libstdc++ for NVCC compatibility on OS X >= 10.9 with CUDA < 7.0 +ifeq ($(OSX), 1) + CXX := /usr/bin/clang++ + ifneq ($(CPU_ONLY), 1) + CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d') + ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1) + CXXFLAGS += -stdlib=libstdc++ + LINKFLAGS += -stdlib=libstdc++ + endif + # clang throws this warning for cuda headers + WARNINGS += -Wno-unneeded-internal-declaration + endif + # gtest needs to use its own tuple to not conflict with clang + COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1 + # boost::thread is called boost_thread-mt to mark multithreading on OS X + LIBRARIES += boost_thread-mt + # we need to explicitly ask for the rpath to be obeyed + DYNAMIC_FLAGS := -install_name @rpath/libcaffe.so + ORIGIN := @loader_path +else + ORIGIN := \$$ORIGIN +endif + +# Custom compiler +ifdef CUSTOM_CXX + CXX := $(CUSTOM_CXX) +endif + +# Static linking +ifneq (,$(findstring clang++,$(CXX))) + STATIC_LINK_COMMAND := -Wl,-force_load $(STATIC_NAME) +else ifneq (,$(findstring g++,$(CXX))) + STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive +else + # The following line must not be indented with a tab, since we are not inside a target + $(error Cannot static link with the $(CXX) compiler) +endif + +# Debugging +ifeq ($(DEBUG), 1) + COMMON_FLAGS += -DDEBUG -g -O0 + NVCCFLAGS += -G +else + COMMON_FLAGS += -DNDEBUG -O2 +endif + +# cuDNN acceleration configuration. +ifeq ($(USE_CUDNN), 1) + LIBRARIES += cudnn + COMMON_FLAGS += -DUSE_CUDNN +endif + +# CPU-only configuration +ifeq ($(CPU_ONLY), 1) + OBJS := $(PROTO_OBJS) $(CXX_OBJS) + TEST_OBJS := $(TEST_CXX_OBJS) + TEST_BINS := $(TEST_CXX_BINS) + ALL_WARNS := $(ALL_CXX_WARNS) + TEST_FILTER := --gtest_filter="-*GPU*" + COMMON_FLAGS += -DCPU_ONLY +endif + +# Python layer support +ifeq ($(WITH_PYTHON_LAYER), 1) + COMMON_FLAGS += -DWITH_PYTHON_LAYER + LIBRARIES += $(PYTHON_LIBRARIES) +endif + +# BLAS configuration (default = ATLAS) +BLAS ?= atlas +ifeq ($(BLAS), mkl) + # MKL + LIBRARIES += mkl_rt + COMMON_FLAGS += -DUSE_MKL + MKL_DIR ?= /opt/intel/mkl + BLAS_INCLUDE ?= $(MKL_DIR)/include + BLAS_LIB ?= $(MKL_DIR)/lib $(MKL_DIR)/lib/intel64 +else ifeq ($(BLAS), open) + # OpenBLAS + LIBRARIES += openblas +else + # ATLAS + ifeq ($(LINUX), 1) + ifeq ($(BLAS), atlas) + # Linux simply has cblas and atlas + LIBRARIES += cblas atlas + endif + else ifeq ($(OSX), 1) + # OS X packages atlas as the vecLib framework + LIBRARIES += cblas + # 10.10 has accelerate while 10.9 has veclib + XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep -o 'version: 6') + ifneq (,$(findstring version: 6,$(XCODE_CLT_VER))) + BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/ + LDFLAGS += -framework Accelerate + else + BLAS_INCLUDE ?= /System/Library/Frameworks/vecLib.framework/Versions/Current/Headers/ + LDFLAGS += -framework vecLib + endif + endif +endif +INCLUDE_DIRS += $(BLAS_INCLUDE) +LIBRARY_DIRS += $(BLAS_LIB) + +LIBRARY_DIRS += $(LIB_BUILD_DIR) + +# Automatic dependency generation (nvcc is handled separately) +CXXFLAGS += -MMD -MP + +# Complete build flags. +COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir)) +CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) +NVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS) +# mex may invoke an older gcc that is too liberal with -Wuninitalized +MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized +LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) + +USE_PKG_CONFIG ?= 0 +ifeq ($(USE_PKG_CONFIG), 1) + PKG_CONFIG := $(shell pkg-config opencv --libs) +else + PKG_CONFIG := +endif +LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) $(PKG_CONFIG) \ + $(foreach library,$(LIBRARIES),-l$(library)) +PYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library)) + +# 'superclean' target recursively* deletes all files ending with an extension +# in $(SUPERCLEAN_EXTS) below. This may be useful if you've built older +# versions of Caffe that do not place all generated files in a location known +# to the 'clean' target. +# +# 'supercleanlist' will list the files to be deleted by make superclean. +# +# * Recursive with the exception that symbolic links are never followed, per the +# default behavior of 'find'. +SUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo + +# Set the sub-targets of the 'everything' target. +EVERYTHING_TARGETS := all py$(PROJECT) test warn lint +# Only build matcaffe as part of "everything" if MATLAB_DIR is specified. +ifneq ($(MATLAB_DIR),) + EVERYTHING_TARGETS += mat$(PROJECT) +endif + +############################## +# Define build targets +############################## +.PHONY: all lib test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ + py mat py$(PROJECT) mat$(PROJECT) proto runtest \ + superclean supercleanlist supercleanfiles warn everything + +all: lib tools examples + +lib: $(STATIC_NAME) $(DYNAMIC_NAME) + +everything: $(EVERYTHING_TARGETS) + +linecount: + cloc --read-lang-def=$(PROJECT).cloc \ + src/$(PROJECT) include/$(PROJECT) tools examples \ + python matlab + +lint: $(EMPTY_LINT_REPORT) + +lintclean: + @ $(RM) -r $(LINT_OUTPUT_DIR) $(EMPTY_LINT_REPORT) $(NONEMPTY_LINT_REPORT) + +docs: $(DOXYGEN_OUTPUT_DIR) + @ cd ./docs ; ln -sfn ../$(DOXYGEN_OUTPUT_DIR)/html doxygen + +$(DOXYGEN_OUTPUT_DIR): $(DOXYGEN_CONFIG_FILE) $(DOXYGEN_SOURCES) + $(DOXYGEN_COMMAND) $(DOXYGEN_CONFIG_FILE) + +$(EMPTY_LINT_REPORT): $(LINT_OUTPUTS) | $(BUILD_DIR) + @ cat $(LINT_OUTPUTS) > $@ + @ if [ -s "$@" ]; then \ + cat $@; \ + mv $@ $(NONEMPTY_LINT_REPORT); \ + echo "Found one or more lint errors."; \ + exit 1; \ + fi; \ + $(RM) $(NONEMPTY_LINT_REPORT); \ + echo "No lint errors!"; + +$(LINT_OUTPUTS): $(LINT_OUTPUT_DIR)/%.lint.txt : % $(LINT_SCRIPT) | $(LINT_OUTPUT_DIR) + @ mkdir -p $(dir $@) + @ python $(LINT_SCRIPT) $< 2>&1 \ + | grep -v "^Done processing " \ + | grep -v "^Total errors found: 0" \ + > $@ \ + || true + +test: $(TEST_ALL_BIN) $(TEST_ALL_DYNLINK_BIN) $(TEST_BINS) + +tools: $(TOOL_BINS) $(TOOL_BIN_LINKS) + +examples: $(EXAMPLE_BINS) + +py$(PROJECT): py + +py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY) + +$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ $< + $(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \ + -o $@ $(LINKFLAGS) -l$(PROJECT) $(PYTHON_LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../../build/lib + +mat$(PROJECT): mat + +mat: $(MAT$(PROJECT)_SO) + +$(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME) + @ if [ -z "$(MATLAB_DIR)" ]; then \ + echo "MATLAB_DIR must be specified in $(CONFIG_FILE)" \ + "to build mat$(PROJECT)."; \ + exit 1; \ + fi + @ echo MEX $< + $(Q)$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) \ + CXX="$(CXX)" \ + CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \ + CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS)" -output $@ + @ if [ -f "$(PROJECT)_.d" ]; then \ + mv -f $(PROJECT)_.d $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}; \ + fi + +runtest: $(TEST_ALL_BIN) + $(TOOL_BUILD_DIR)/caffe + $(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle $(TEST_FILTER) + +pytest: py + cd python; python -m unittest discover -s caffe/test + +mattest: mat + cd matlab; $(MATLAB_DIR)/bin/matlab -nodisplay -r 'caffe.run_tests(), exit()' + +warn: $(EMPTY_WARN_REPORT) + +$(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR) + @ cat $(ALL_WARNS) > $@ + @ if [ -s "$@" ]; then \ + cat $@; \ + mv $@ $(NONEMPTY_WARN_REPORT); \ + echo "Compiler produced one or more warnings."; \ + exit 1; \ + fi; \ + $(RM) $(NONEMPTY_WARN_REPORT); \ + echo "No compiler warnings!"; + +$(ALL_WARNS): %.o.$(WARNS_EXT) : %.o + +$(BUILD_DIR_LINK): $(BUILD_DIR)/.linked + +# Create a target ".linked" in this BUILD_DIR to tell Make that the "build" link +# is currently correct, then delete the one in the OTHER_BUILD_DIR in case it +# exists and $(DEBUG) is toggled later. +$(BUILD_DIR)/.linked: + @ mkdir -p $(BUILD_DIR) + @ $(RM) $(OTHER_BUILD_DIR)/.linked + @ $(RM) -r $(BUILD_DIR_LINK) + @ ln -s $(BUILD_DIR) $(BUILD_DIR_LINK) + @ touch $@ + +$(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK) + @ mkdir -p $@ + +$(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) + @ echo LD -o $@ + $(Q)$(CXX) -shared -o $@ $(OBJS) $(LINKFLAGS) $(LDFLAGS) $(DYNAMIC_FLAGS) + +$(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) + @ echo AR -o $@ + $(Q)ar rcs $@ $(OBJS) + +$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS) + @ echo CXX $< + $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \ + | $(PROTO_BUILD_DIR) + @ echo CXX $< + $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS) + @ echo NVCC $< + $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \ + -odir $(@D) + $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ + | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo CXX/LD -o $@ $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +$(TEST_CU_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CU_BUILD_DIR)/%.o \ + $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo LD $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +$(TEST_CXX_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CXX_BUILD_DIR)/%.o \ + $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo LD $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +# Target for extension-less symlinks to tool binaries with extension '*.bin'. +$(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR) + @ $(RM) $@ + @ ln -s $(abspath $<) $@ + +$(TOOL_BINS): %.bin : %.o | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ + $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../lib + +$(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ + $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../../lib + +proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER) + +$(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \ + $(PROTO_SRC_DIR)/%.proto | $(PROTO_BUILD_DIR) + @ echo PROTOC $< + $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --cpp_out=$(PROTO_BUILD_DIR) $< + +$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \ + $(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR) + @ echo PROTOC \(python\) $< + $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $< + +$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR) + touch $(PY_PROTO_INIT) + +clean: + @- $(RM) -rf $(ALL_BUILD_DIRS) + @- $(RM) -rf $(OTHER_BUILD_DIR) + @- $(RM) -rf $(BUILD_DIR_LINK) + @- $(RM) -rf $(DISTRIBUTE_DIR) + @- $(RM) $(PY$(PROJECT)_SO) + @- $(RM) $(MAT$(PROJECT)_SO) + +supercleanfiles: + $(eval SUPERCLEAN_FILES := $(strip \ + $(foreach ext,$(SUPERCLEAN_EXTS), $(shell find . -name '*$(ext)' \ + -not -path './data/*')))) + +supercleanlist: supercleanfiles + @ \ + if [ -z "$(SUPERCLEAN_FILES)" ]; then \ + echo "No generated files found."; \ + else \ + echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ + fi + +superclean: clean supercleanfiles + @ \ + if [ -z "$(SUPERCLEAN_FILES)" ]; then \ + echo "No generated files found."; \ + else \ + echo "Deleting the following generated files:"; \ + echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ + $(RM) $(SUPERCLEAN_FILES); \ + fi + +$(DIST_ALIASES): $(DISTRIBUTE_DIR) + +$(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS) + # add include + cp -r include $(DISTRIBUTE_DIR)/ + mkdir -p $(DISTRIBUTE_DIR)/include/caffe/proto + cp $(PROTO_GEN_HEADER_SRCS) $(DISTRIBUTE_DIR)/include/caffe/proto + # add tool and example binaries + cp $(TOOL_BINS) $(DISTRIBUTE_DIR)/bin + cp $(EXAMPLE_BINS) $(DISTRIBUTE_DIR)/bin + # add libraries + cp $(STATIC_NAME) $(DISTRIBUTE_DIR)/lib + cp $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib + # add python - it's not the standard way, indeed... + cp -r python $(DISTRIBUTE_DIR)/python + +-include $(DEPS) diff --git a/Makefile_REMOTE_61727 b/Makefile_REMOTE_61727 new file mode 100644 index 00000000000..2b918ffd26d --- /dev/null +++ b/Makefile_REMOTE_61727 @@ -0,0 +1,631 @@ +PROJECT := caffe + +CONFIG_FILE := Makefile.config +# Explicitly check for the config file, otherwise make -k will proceed anyway. +ifeq ($(wildcard $(CONFIG_FILE)),) +$(error $(CONFIG_FILE) not found. See $(CONFIG_FILE).example.) +endif +include $(CONFIG_FILE) + +BUILD_DIR_LINK := $(BUILD_DIR) +ifeq ($(RELEASE_BUILD_DIR),) + RELEASE_BUILD_DIR := .$(BUILD_DIR)_release +endif +ifeq ($(DEBUG_BUILD_DIR),) + DEBUG_BUILD_DIR := .$(BUILD_DIR)_debug +endif + +DEBUG ?= 0 +ifeq ($(DEBUG), 1) + BUILD_DIR := $(DEBUG_BUILD_DIR) + OTHER_BUILD_DIR := $(RELEASE_BUILD_DIR) +else + BUILD_DIR := $(RELEASE_BUILD_DIR) + OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR) +endif + +# All of the directories containing code. +SRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \ + \( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print) + +# The target shared library name +LIB_BUILD_DIR := $(BUILD_DIR)/lib +STATIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).a +DYNAMIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).so + +############################## +# Get all source files +############################## +# CXX_SRCS are the source files excluding the test ones. +CXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp") +# CU_SRCS are the cuda source files +CU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu") +# TEST_SRCS are the test source files +TEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp +TEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp") +TEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS)) +TEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu") +GTEST_SRC := src/gtest/gtest-all.cpp +# TOOL_SRCS are the source files for the tool binaries +TOOL_SRCS := $(shell find tools -name "*.cpp") +# EXAMPLE_SRCS are the source files for the example binaries +EXAMPLE_SRCS := $(shell find examples -name "*.cpp") +# BUILD_INCLUDE_DIR contains any generated header files we want to include. +BUILD_INCLUDE_DIR := $(BUILD_DIR)/src +# PROTO_SRCS are the protocol buffer definitions +PROTO_SRC_DIR := src/$(PROJECT)/proto +PROTO_SRCS := $(wildcard $(PROTO_SRC_DIR)/*.proto) +# PROTO_BUILD_DIR will contain the .cc and obj files generated from +# PROTO_SRCS; PROTO_BUILD_INCLUDE_DIR will contain the .h header files +PROTO_BUILD_DIR := $(BUILD_DIR)/$(PROTO_SRC_DIR) +PROTO_BUILD_INCLUDE_DIR := $(BUILD_INCLUDE_DIR)/$(PROJECT)/proto +# NONGEN_CXX_SRCS includes all source/header files except those generated +# automatically (e.g., by proto). +NONGEN_CXX_SRCS := $(shell find \ + src/$(PROJECT) \ + include/$(PROJECT) \ + python/$(PROJECT) \ + matlab/+$(PROJECT)/private \ + examples \ + tools \ + -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh") +LINT_SCRIPT := scripts/cpp_lint.py +LINT_OUTPUT_DIR := $(BUILD_DIR)/.lint +LINT_EXT := lint.txt +LINT_OUTPUTS := $(addsuffix .$(LINT_EXT), $(addprefix $(LINT_OUTPUT_DIR)/, $(NONGEN_CXX_SRCS))) +EMPTY_LINT_REPORT := $(BUILD_DIR)/.$(LINT_EXT) +NONEMPTY_LINT_REPORT := $(BUILD_DIR)/$(LINT_EXT) +# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT) +PY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp +PY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so +PY$(PROJECT)_HXX := include/$(PROJECT)/python_layer.hpp +# MAT$(PROJECT)_SRC is the mex entrance point of matlab package for $(PROJECT) +MAT$(PROJECT)_SRC := matlab/+$(PROJECT)/private/$(PROJECT)_.cpp +ifneq ($(MATLAB_DIR),) + MAT_SO_EXT := $(shell $(MATLAB_DIR)/bin/mexext) +endif +MAT$(PROJECT)_SO := matlab/+$(PROJECT)/private/$(PROJECT)_.$(MAT_SO_EXT) + +############################## +# Derive generated files +############################## +# The generated files for protocol buffers +PROTO_GEN_HEADER_SRCS := $(addprefix $(PROTO_BUILD_DIR)/, \ + $(notdir ${PROTO_SRCS:.proto=.pb.h})) +PROTO_GEN_HEADER := $(addprefix $(PROTO_BUILD_INCLUDE_DIR)/, \ + $(notdir ${PROTO_SRCS:.proto=.pb.h})) +PROTO_GEN_CC := $(addprefix $(BUILD_DIR)/, ${PROTO_SRCS:.proto=.pb.cc}) +PY_PROTO_BUILD_DIR := python/$(PROJECT)/proto +PY_PROTO_INIT := python/$(PROJECT)/proto/__init__.py +PROTO_GEN_PY := $(foreach file,${PROTO_SRCS:.proto=_pb2.py}, \ + $(PY_PROTO_BUILD_DIR)/$(notdir $(file))) +# The objects corresponding to the source files +# These objects will be linked into the final shared library, so we +# exclude the tool, example, and test objects. +CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o}) +CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o}) +PROTO_OBJS := ${PROTO_GEN_CC:.cc=.o} +OBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS) +# tool, example, and test objects +TOOL_OBJS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o}) +TOOL_BUILD_DIR := $(BUILD_DIR)/tools +TEST_CXX_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT)/test +TEST_CU_BUILD_DIR := $(BUILD_DIR)/cuda/src/$(PROJECT)/test +TEST_CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o}) +TEST_CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o}) +TEST_OBJS := $(TEST_CXX_OBJS) $(TEST_CU_OBJS) +GTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o}) +EXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o}) +# Output files for automatic dependency generation +DEPS := ${CXX_OBJS:.o=.d} ${CU_OBJS:.o=.d} ${TEST_CXX_OBJS:.o=.d} \ + ${TEST_CU_OBJS:.o=.d} $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d} +# tool, example, and test bins +TOOL_BINS := ${TOOL_OBJS:.o=.bin} +EXAMPLE_BINS := ${EXAMPLE_OBJS:.o=.bin} +# symlinks to tool bins without the ".bin" extension +TOOL_BIN_LINKS := ${TOOL_BINS:.bin=} +# Put the test binaries in build/test for convenience. +TEST_BIN_DIR := $(BUILD_DIR)/test +TEST_CU_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ + $(foreach obj,$(TEST_CU_OBJS),$(basename $(notdir $(obj)))))) +TEST_CXX_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ + $(foreach obj,$(TEST_CXX_OBJS),$(basename $(notdir $(obj)))))) +TEST_BINS := $(TEST_CXX_BINS) $(TEST_CU_BINS) +# TEST_ALL_BIN is the test binary that links caffe dynamically. +TEST_ALL_BIN := $(TEST_BIN_DIR)/test_all.testbin + +############################## +# Derive compiler warning dump locations +############################## +WARNS_EXT := warnings.txt +CXX_WARNS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o.$(WARNS_EXT)}) +CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o.$(WARNS_EXT)}) +TOOL_WARNS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o.$(WARNS_EXT)}) +EXAMPLE_WARNS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o.$(WARNS_EXT)}) +TEST_WARNS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o.$(WARNS_EXT)}) +TEST_CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o.$(WARNS_EXT)}) +ALL_CXX_WARNS := $(CXX_WARNS) $(TOOL_WARNS) $(EXAMPLE_WARNS) $(TEST_WARNS) +ALL_CU_WARNS := $(CU_WARNS) $(TEST_CU_WARNS) +ALL_WARNS := $(ALL_CXX_WARNS) $(ALL_CU_WARNS) + +EMPTY_WARN_REPORT := $(BUILD_DIR)/.$(WARNS_EXT) +NONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT) + +############################## +# Derive include and lib directories +############################## +CUDA_INCLUDE_DIR := $(CUDA_DIR)/include + +CUDA_LIB_DIR := +# add /lib64 only if it exists +ifneq ("$(wildcard $(CUDA_DIR)/lib64)","") + CUDA_LIB_DIR += $(CUDA_DIR)/lib64 +endif +CUDA_LIB_DIR += $(CUDA_DIR)/lib + +INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include +ifneq ($(CPU_ONLY), 1) + INCLUDE_DIRS += $(CUDA_INCLUDE_DIR) + LIBRARY_DIRS += $(CUDA_LIB_DIR) + LIBRARIES := cudart cublas curand +endif +LIBRARIES += glog gflags protobuf leveldb snappy \ + lmdb boost_system hdf5_hl hdf5 m \ + opencv_core opencv_highgui opencv_imgproc #opencv_imgcodecs +PYTHON_LIBRARIES := boost_python python2.7 +WARNINGS := -Wall -Wno-sign-compare + +############################## +# Set build directories +############################## + +DISTRIBUTE_DIR ?= distribute +DISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib +DIST_ALIASES := dist +ifneq ($(strip $(DISTRIBUTE_DIR)),distribute) + DIST_ALIASES += distribute +endif + +ALL_BUILD_DIRS := $(sort $(BUILD_DIR) $(addprefix $(BUILD_DIR)/, $(SRC_DIRS)) \ + $(addprefix $(BUILD_DIR)/cuda/, $(SRC_DIRS)) \ + $(LIB_BUILD_DIR) $(TEST_BIN_DIR) $(PY_PROTO_BUILD_DIR) $(LINT_OUTPUT_DIR) \ + $(DISTRIBUTE_SUBDIRS) $(PROTO_BUILD_INCLUDE_DIR)) + +############################## +# Set directory for Doxygen-generated documentation +############################## +DOXYGEN_CONFIG_FILE ?= ./.Doxyfile +# should be the same as OUTPUT_DIRECTORY in the .Doxyfile +DOXYGEN_OUTPUT_DIR ?= ./doxygen +DOXYGEN_COMMAND ?= doxygen +# All the files that might have Doxygen documentation. +DOXYGEN_SOURCES := $(shell find \ + src/$(PROJECT) \ + include/$(PROJECT) \ + python/ \ + matlab/ \ + examples \ + tools \ + -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh" -or \ + -name "*.py" -or -name "*.m") +DOXYGEN_SOURCES += $(DOXYGEN_CONFIG_FILE) + + +############################## +# Configure build +############################## + +# Determine platform +UNAME := $(shell uname -s) +ifeq ($(UNAME), Linux) + LINUX := 1 +else ifeq ($(UNAME), Darwin) + OSX := 1 +endif + +# Linux +ifeq ($(LINUX), 1) + CXX ?= /usr/bin/g++ + GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.) + # older versions of gcc are too dumb to build boost with -Wuninitalized + ifeq ($(shell echo $(GCCVERSION) \< 4.6 | bc), 1) + WARNINGS += -Wno-uninitialized + endif + # boost::thread is reasonably called boost_thread (compare OS X) + # We will also explicitly add stdc++ to the link target. + LIBRARIES += boost_thread stdc++ +endif + +# OS X: +# clang++ instead of g++ +# libstdc++ for NVCC compatibility on OS X >= 10.9 with CUDA < 7.0 +ifeq ($(OSX), 1) + CXX := /usr/bin/clang++ + ifneq ($(CPU_ONLY), 1) + CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d') + ifeq ($(shell echo $(CUDA_VERSION) \< 7.0 | bc), 1) + CXXFLAGS += -stdlib=libstdc++ + LINKFLAGS += -stdlib=libstdc++ + endif + # clang throws this warning for cuda headers + WARNINGS += -Wno-unneeded-internal-declaration + endif + # gtest needs to use its own tuple to not conflict with clang + COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1 + # boost::thread is called boost_thread-mt to mark multithreading on OS X + LIBRARIES += boost_thread-mt + # we need to explicitly ask for the rpath to be obeyed + DYNAMIC_FLAGS := -install_name @rpath/libcaffe.so + ORIGIN := @loader_path +else + ORIGIN := \$$ORIGIN +endif + +# Custom compiler +ifdef CUSTOM_CXX + CXX := $(CUSTOM_CXX) +endif + +# Static linking +ifneq (,$(findstring clang++,$(CXX))) + STATIC_LINK_COMMAND := -Wl,-force_load $(STATIC_NAME) +else ifneq (,$(findstring g++,$(CXX))) + STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive +else + # The following line must not be indented with a tab, since we are not inside a target + $(error Cannot static link with the $(CXX) compiler) +endif + +# Debugging +ifeq ($(DEBUG), 1) + COMMON_FLAGS += -DDEBUG -g -O0 + NVCCFLAGS += -G +else + COMMON_FLAGS += -DNDEBUG -O2 +endif + +# cuDNN acceleration configuration. +ifeq ($(USE_CUDNN), 1) + LIBRARIES += cudnn + COMMON_FLAGS += -DUSE_CUDNN +endif + +# CPU-only configuration +ifeq ($(CPU_ONLY), 1) + OBJS := $(PROTO_OBJS) $(CXX_OBJS) + TEST_OBJS := $(TEST_CXX_OBJS) + TEST_BINS := $(TEST_CXX_BINS) + ALL_WARNS := $(ALL_CXX_WARNS) + TEST_FILTER := --gtest_filter="-*GPU*" + COMMON_FLAGS += -DCPU_ONLY +endif + +# Python layer support +ifeq ($(WITH_PYTHON_LAYER), 1) + COMMON_FLAGS += -DWITH_PYTHON_LAYER + LIBRARIES += $(PYTHON_LIBRARIES) +endif + +# BLAS configuration (default = ATLAS) +BLAS ?= atlas +ifeq ($(BLAS), mkl) + # MKL + LIBRARIES += mkl_rt + COMMON_FLAGS += -DUSE_MKL + MKL_DIR ?= /opt/intel/mkl + BLAS_INCLUDE ?= $(MKL_DIR)/include + BLAS_LIB ?= $(MKL_DIR)/lib $(MKL_DIR)/lib/intel64 +else ifeq ($(BLAS), open) + # OpenBLAS + LIBRARIES += openblas +else + # ATLAS + ifeq ($(LINUX), 1) + ifeq ($(BLAS), atlas) + # Linux simply has cblas and atlas + LIBRARIES += cblas atlas + endif + else ifeq ($(OSX), 1) + # OS X packages atlas as the vecLib framework + LIBRARIES += cblas + # 10.10 has accelerate while 10.9 has veclib + XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep -o 'version: 6') + ifneq (,$(findstring version: 6,$(XCODE_CLT_VER))) + BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/ + LDFLAGS += -framework Accelerate + else + BLAS_INCLUDE ?= /System/Library/Frameworks/vecLib.framework/Versions/Current/Headers/ + LDFLAGS += -framework vecLib + endif + endif +endif +INCLUDE_DIRS += $(BLAS_INCLUDE) +LIBRARY_DIRS += $(BLAS_LIB) + +LIBRARY_DIRS += $(LIB_BUILD_DIR) + +# Automatic dependency generation (nvcc is handled separately) +CXXFLAGS += -MMD -MP + +# Complete build flags. +COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir)) +CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) +NVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS) +# mex may invoke an older gcc that is too liberal with -Wuninitalized +MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized +LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) + +USE_PKG_CONFIG ?= 0 +ifeq ($(USE_PKG_CONFIG), 1) + PKG_CONFIG := $(shell pkg-config opencv --libs) +else + PKG_CONFIG := +endif +LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) $(PKG_CONFIG) \ + $(foreach library,$(LIBRARIES),-l$(library)) +PYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library)) + +# 'superclean' target recursively* deletes all files ending with an extension +# in $(SUPERCLEAN_EXTS) below. This may be useful if you've built older +# versions of Caffe that do not place all generated files in a location known +# to the 'clean' target. +# +# 'supercleanlist' will list the files to be deleted by make superclean. +# +# * Recursive with the exception that symbolic links are never followed, per the +# default behavior of 'find'. +SUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo + +# Set the sub-targets of the 'everything' target. +EVERYTHING_TARGETS := all py$(PROJECT) test warn lint +# Only build matcaffe as part of "everything" if MATLAB_DIR is specified. +ifneq ($(MATLAB_DIR),) + EVERYTHING_TARGETS += mat$(PROJECT) +endif + +############################## +# Define build targets +############################## +.PHONY: all test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ + py mat py$(PROJECT) mat$(PROJECT) proto runtest \ + superclean supercleanlist supercleanfiles warn everything + +all: $(STATIC_NAME) $(DYNAMIC_NAME) tools examples + +everything: $(EVERYTHING_TARGETS) + +linecount: + cloc --read-lang-def=$(PROJECT).cloc \ + src/$(PROJECT) include/$(PROJECT) tools examples \ + python matlab + +lint: $(EMPTY_LINT_REPORT) + +lintclean: + @ $(RM) -r $(LINT_OUTPUT_DIR) $(EMPTY_LINT_REPORT) $(NONEMPTY_LINT_REPORT) + +docs: $(DOXYGEN_OUTPUT_DIR) + @ cd ./docs ; ln -sfn ../$(DOXYGEN_OUTPUT_DIR)/html doxygen + +$(DOXYGEN_OUTPUT_DIR): $(DOXYGEN_CONFIG_FILE) $(DOXYGEN_SOURCES) + $(DOXYGEN_COMMAND) $(DOXYGEN_CONFIG_FILE) + +$(EMPTY_LINT_REPORT): $(LINT_OUTPUTS) | $(BUILD_DIR) + @ cat $(LINT_OUTPUTS) > $@ + @ if [ -s "$@" ]; then \ + cat $@; \ + mv $@ $(NONEMPTY_LINT_REPORT); \ + echo "Found one or more lint errors."; \ + exit 1; \ + fi; \ + $(RM) $(NONEMPTY_LINT_REPORT); \ + echo "No lint errors!"; + +$(LINT_OUTPUTS): $(LINT_OUTPUT_DIR)/%.lint.txt : % $(LINT_SCRIPT) | $(LINT_OUTPUT_DIR) + @ mkdir -p $(dir $@) + @ python $(LINT_SCRIPT) $< 2>&1 \ + | grep -v "^Done processing " \ + | grep -v "^Total errors found: 0" \ + > $@ \ + || true + +test: $(TEST_ALL_BIN) $(TEST_ALL_DYNLINK_BIN) $(TEST_BINS) + +tools: $(TOOL_BINS) $(TOOL_BIN_LINKS) + +examples: $(EXAMPLE_BINS) + +py$(PROJECT): py + +py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY) + +$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ $< + $(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \ + -o $@ $(LINKFLAGS) -l$(PROJECT) $(PYTHON_LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../../build/lib + +mat$(PROJECT): mat + +mat: $(MAT$(PROJECT)_SO) + +$(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME) + @ if [ -z "$(MATLAB_DIR)" ]; then \ + echo "MATLAB_DIR must be specified in $(CONFIG_FILE)" \ + "to build mat$(PROJECT)."; \ + exit 1; \ + fi + @ echo MEX $< + $(Q)$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) \ + CXX="$(CXX)" \ + CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \ + CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS)" -output $@ + @ if [ -f "$(PROJECT)_.d" ]; then \ + mv -f $(PROJECT)_.d $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}; \ + fi + +runtest: $(TEST_ALL_BIN) + $(TOOL_BUILD_DIR)/caffe + $(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle $(TEST_FILTER) + +pytest: py + cd python; python -m unittest discover -s caffe/test + +mattest: mat + cd matlab; $(MATLAB_DIR)/bin/matlab -nodisplay -r 'caffe.run_tests(), exit()' + +warn: $(EMPTY_WARN_REPORT) + +$(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR) + @ cat $(ALL_WARNS) > $@ + @ if [ -s "$@" ]; then \ + cat $@; \ + mv $@ $(NONEMPTY_WARN_REPORT); \ + echo "Compiler produced one or more warnings."; \ + exit 1; \ + fi; \ + $(RM) $(NONEMPTY_WARN_REPORT); \ + echo "No compiler warnings!"; + +$(ALL_WARNS): %.o.$(WARNS_EXT) : %.o + +$(BUILD_DIR_LINK): $(BUILD_DIR)/.linked + +# Create a target ".linked" in this BUILD_DIR to tell Make that the "build" link +# is currently correct, then delete the one in the OTHER_BUILD_DIR in case it +# exists and $(DEBUG) is toggled later. +$(BUILD_DIR)/.linked: + @ mkdir -p $(BUILD_DIR) + @ $(RM) $(OTHER_BUILD_DIR)/.linked + @ $(RM) -r $(BUILD_DIR_LINK) + @ ln -s $(BUILD_DIR) $(BUILD_DIR_LINK) + @ touch $@ + +$(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK) + @ mkdir -p $@ + +$(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) + @ echo LD -o $@ + $(Q)$(CXX) -shared -o $@ $(OBJS) $(LINKFLAGS) $(LDFLAGS) $(DYNAMIC_FLAGS) + +$(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) + @ echo AR -o $@ + $(Q)ar rcs $@ $(OBJS) + +$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS) + @ echo CXX $< + $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \ + | $(PROTO_BUILD_DIR) + @ echo CXX $< + $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS) + @ echo NVCC $< + $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \ + -odir $(@D) + $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ + | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo CXX/LD -o $@ $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +$(TEST_CU_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CU_BUILD_DIR)/%.o \ + $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo LD $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +$(TEST_CXX_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CXX_BUILD_DIR)/%.o \ + $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo LD $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +# Target for extension-less symlinks to tool binaries with extension '*.bin'. +$(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR) + @ $(RM) $@ + @ ln -s $(abspath $<) $@ + +$(TOOL_BINS): %.bin : %.o | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ + $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../lib + +$(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ + $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../../lib + +proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER) + +$(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \ + $(PROTO_SRC_DIR)/%.proto | $(PROTO_BUILD_DIR) + @ echo PROTOC $< + $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --cpp_out=$(PROTO_BUILD_DIR) $< + +$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \ + $(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR) + @ echo PROTOC \(python\) $< + $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $< + +$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR) + touch $(PY_PROTO_INIT) + +clean: + @- $(RM) -rf $(ALL_BUILD_DIRS) + @- $(RM) -rf $(OTHER_BUILD_DIR) + @- $(RM) -rf $(BUILD_DIR_LINK) + @- $(RM) -rf $(DISTRIBUTE_DIR) + @- $(RM) $(PY$(PROJECT)_SO) + @- $(RM) $(MAT$(PROJECT)_SO) + +supercleanfiles: + $(eval SUPERCLEAN_FILES := $(strip \ + $(foreach ext,$(SUPERCLEAN_EXTS), $(shell find . -name '*$(ext)' \ + -not -path './data/*')))) + +supercleanlist: supercleanfiles + @ \ + if [ -z "$(SUPERCLEAN_FILES)" ]; then \ + echo "No generated files found."; \ + else \ + echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ + fi + +superclean: clean supercleanfiles + @ \ + if [ -z "$(SUPERCLEAN_FILES)" ]; then \ + echo "No generated files found."; \ + else \ + echo "Deleting the following generated files:"; \ + echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ + $(RM) $(SUPERCLEAN_FILES); \ + fi + +$(DIST_ALIASES): $(DISTRIBUTE_DIR) + +$(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS) + # add include + cp -r include $(DISTRIBUTE_DIR)/ + mkdir -p $(DISTRIBUTE_DIR)/include/caffe/proto + cp $(PROTO_GEN_HEADER_SRCS) $(DISTRIBUTE_DIR)/include/caffe/proto + # add tool and example binaries + cp $(TOOL_BINS) $(DISTRIBUTE_DIR)/bin + cp $(EXAMPLE_BINS) $(DISTRIBUTE_DIR)/bin + # add libraries + cp $(STATIC_NAME) $(DISTRIBUTE_DIR)/lib + cp $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib + # add python - it's not the standard way, indeed... + cp -r python $(DISTRIBUTE_DIR)/python + +-include $(DEPS) diff --git a/examples/siamese/lfw_siamese.prototxt.orig b/examples/siamese/lfw_siamese.prototxt.orig new file mode 100644 index 00000000000..b76f3e6bcd6 --- /dev/null +++ b/examples/siamese/lfw_siamese.prototxt.orig @@ -0,0 +1,117 @@ +name: "lfw_siamese" +input: "data" +<<<<<<< 86eaaa954fbb3c3cfd1d225c98e16c38af7b34a9:examples/siamese/lfw_siamese.prototxt +input_dim: 10000 +======= +input_dim: 2760 +>>>>>>> rearrange the training samples selection codes:examples/triplet/3d_triplet.prototxt +input_dim: 1 +input_dim: 150 +input_dim: 80 +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 500 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 10 + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 2 + } +} diff --git a/examples/triplet/convert_3d_triplet_data.cpp.orig b/examples/triplet/convert_3d_triplet_data.cpp.orig new file mode 100644 index 00000000000..ce1981d90da --- /dev/null +++ b/examples/triplet/convert_3d_triplet_data.cpp.orig @@ -0,0 +1,202 @@ +// Usage: +// convert_3d_data input_image_file input_label_file output_db_file +#include // NOLINT(readability/streams) +#include +#include +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "stdint.h" + +uint32_t swap_endian(uint32_t val) { + val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); + return (val << 16) | (val >> 16); +} + +void read_image(std::ifstream* image_file, std::ifstream* label_file, + uint32_t index, uint32_t rows, uint32_t cols, + char* pixels, char* label_temp, signed char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename, const char* class_number) { + int class_num = atoi(class_number); + // Open files + std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); + std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); + CHECK(image_file) << "Unable to open file " << image_filename; + CHECK(label_file) << "Unable to open file " << label_filename; + // Read the magic and the meta data + uint32_t magic; + uint32_t num_items; + uint32_t num_labels; + uint32_t rows; + uint32_t cols; + + image_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + CHECK_EQ(magic, 2051) << "Incorrect image file magic."; + label_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + CHECK_EQ(magic, 2050) << "Incorrect label file magic."; + image_file.read(reinterpret_cast(&num_items), 4); + num_items = swap_endian(num_items); + label_file.read(reinterpret_cast(&num_labels), 4); + num_labels = swap_endian(num_labels); + CHECK_EQ(num_items, num_labels); + image_file.read(reinterpret_cast(&rows), 4); + rows = swap_endian(rows); + image_file.read(reinterpret_cast(&cols), 4); + cols = swap_endian(cols); + + // Open leveldb + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = true; + options.error_if_exists = true; + leveldb::Status status = leveldb::DB::Open( + options, db_filename, &db); + CHECK(status.ok()) << "Failed to open leveldb " << db_filename + << ". Is it already existing?"; + + char* label_temp = new char[4]; // label for unsigned char* + signed char* label_i = new signed char[4]; // label for triplet + signed char* label_j = new signed char[4]; + signed char* label_k = new signed char[4]; + signed char* label_l = new signed char[4]; // label for pair wise + signed char* label_m = new signed char[4]; + char* pixels1 = new char[rows * cols]; + char* pixels2 = new char[rows * cols]; + char* pixels3 = new char[rows * cols]; + char* pixels4 = new char[rows * cols]; + char* pixels5 = new char[rows * cols]; + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + caffe::Datum datum; + datum.set_channels(1); + datum.set_height(rows); + datum.set_width(cols); + LOG(INFO) << "A total of " << num_items << " items."; + LOG(INFO) << "Rows: " << rows << " Cols: " << cols; + int counter = 0; + for (unsigned int times = 0; times < 5; ++times) { + // iteration in the samples of all class + for (unsigned int itemid = 0; itemid < num_items/class_num; ++itemid) { + // iteration in the samples in one class + for (unsigned int class_ind = 0; class_ind < class_num; ++class_ind) { + // use reference sample one by one at each iteration + int i = itemid % num_items + class_ind*num_items/class_num; + int j = caffe::caffe_rng_rand() % num_items; // pick triplet groups + int k = caffe::caffe_rng_rand() % num_items; + int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups + int m = caffe::caffe_rng_rand() % num_items; + read_image(&image_file, &label_file, i, rows, cols, // read triplet + pixels1, label_temp, label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels2, label_temp, label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels3, label_temp, label_k); + read_image(&image_file, &label_file, l, rows, cols, // read pair wise + pixels4, label_temp, label_l); + read_image(&image_file, &label_file, m, rows, cols, + pixels5, label_temp, label_m); + + bool pair_pass = false; + bool triplet1_pass = false; + bool triplet2_pass = false; + bool triplet3_class_same = false; + bool triplet3_pass = false; + + int ij_diff_x = static_cast(*(label_i+1)-*(label_j+1)); + int ij_diff_y = static_cast(*(label_i+2)-*(label_j+2)); + int ij_diff_z = static_cast(*(label_i+3)-*(label_j+3)); + int im_diff_x = static_cast(*(label_i+1)-*(label_m+1)); + int im_diff_y = static_cast(*(label_i+2)-*(label_m+2)); + int im_diff_z = static_cast(*(label_i+3)-*(label_m+3)); + + int ij_x = ij_diff_x*ij_diff_x; + int ij_y = ij_diff_y*ij_diff_y; + int ij_z = ij_diff_z*ij_diff_z; + int im_x = im_diff_x*im_diff_x; + int im_y = im_diff_y*im_diff_y; + int im_z = im_diff_z*im_diff_z; + + float dist_ij = std::sqrt(ij_x + ij_y + ij_z); + float dist_im = std::sqrt(im_x + im_y + im_z); + if (*label_i == *label_j && dist_ij < 100/2) + pair_pass = true; + if (pair_pass && (*label_i != *label_k)) + triplet1_pass = true; + if (pair_pass && (*label_i != *label_l)) + triplet2_pass = true; + if (pair_pass && (*label_i == *label_m)) + triplet3_class_same = true; + if (triplet3_class_same && dist_im > 100*sqrt(2)) + triplet3_pass = true; + if (pair_pass && triplet1_pass && triplet2_pass && triplet3_pass) { + datum.set_data(pixels1, rows*cols); // set data + datum.set_label(int(*label_i)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels2, rows*cols); // set data + datum.set_label(int(*label_j)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels3, rows*cols); // set data + datum.set_label(int(*label_k)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels4, rows*cols); // set data + datum.set_label(int(*label_l)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels5, rows*cols); // set data + datum.set_label(int(*label_m)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + } else { + class_ind--; + } + } // iteration in the samples of all class + } // iteration in the samples in one class + } // iteration in times + delete db; + delete pixels1; + delete pixels2; + delete pixels3; + delete pixels4; + delete pixels5; +} + +int main(int argc, char** argv) { + if (argc != 5) { + printf("This script converts the images dataset to the leveldb format used\n" + "by caffe to train a triplet network.\n" + "Usage:\n" + " convert_3d_data input_image_file input_label_file " + "output_db_file class_number\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3], argv[4]); + } + return 0; +} diff --git a/examples/triplet/convert_lfw_triplet_data.cpp.orig b/examples/triplet/convert_lfw_triplet_data.cpp.orig new file mode 100644 index 00000000000..64c1db06e02 --- /dev/null +++ b/examples/triplet/convert_lfw_triplet_data.cpp.orig @@ -0,0 +1,148 @@ +// This script converts the MNIST dataset to the leveldb format used +// by caffe to train siamese network. +// Usage: +// convert_mnist_data input_image_file input_label_file output_db_file +// The MNIST dataset could be downloaded at +// http://yann.lecun.com/exdb/mnist/ +#include // NOLINT(readability/streams) +#include + +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "stdint.h" + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" + +uint32_t swap_endian(uint32_t val) { + val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); + return (val << 16) | (val >> 16); +} + +void read_image(std::ifstream* image_file, std::ifstream* label_file, + uint32_t index, uint32_t rows, uint32_t cols, + char* pixels, char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index + 8); + label_file->read(label, 1); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename) { + // Open files + std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); + std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); + CHECK(image_file) << "Unable to open file " << image_filename; + CHECK(label_file) << "Unable to open file " << label_filename; + // Read the magic and the meta data + uint32_t magic; + uint32_t num_items; + uint32_t num_labels; + uint32_t rows; + uint32_t cols; + + image_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + label_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + image_file.read(reinterpret_cast(&num_items), 4); + num_items = swap_endian(num_items); + label_file.read(reinterpret_cast(&num_labels), 4); + num_labels = swap_endian(num_labels); + CHECK_EQ(num_items, num_labels); + image_file.read(reinterpret_cast(&rows), 4); + rows = swap_endian(rows); + image_file.read(reinterpret_cast(&cols), 4); + cols = swap_endian(cols); + + // Open leveldb + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = true; + options.error_if_exists = true; + leveldb::Status status = leveldb::DB::Open( + options, db_filename, &db); + CHECK(status.ok()) << "Failed to open leveldb " << db_filename + << ". Is it already existing?"; + + char label_i; + char label_j; + char label_k; +<<<<<<< 85bc5292fc9e9df8aeb2b6b6dfc9b0b274ee4db0:examples/triplet/convert_lfw_triplet_data.cpp + char* pixels = new char[3 * rows * cols]; +======= + char label_l; // label for pair wise + char label_m; + char* pixels = new char[5 * rows * cols]; +>>>>>>> ready for a review on triplet training using the 3D data, net work definition is the same as described in paper:examples/triplet/convert_3d_triplet_data.cpp + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + + caffe::Datum datum; + datum.set_channels(3); // one channel for each image in the pair + datum.set_height(rows); + datum.set_width(cols); + LOG(INFO) << "A total of " << num_items << " items."; + LOG(INFO) << "Rows: " << rows << " Cols: " << cols; + for (int itemid = 0; itemid < num_items; ++itemid) { + // pick triplet groups + int i = caffe::caffe_rng_rand() % num_items; + int j = caffe::caffe_rng_rand() % num_items; + int k = caffe::caffe_rng_rand() % num_items; +<<<<<<< 85bc5292fc9e9df8aeb2b6b6dfc9b0b274ee4db0:examples/triplet/convert_lfw_triplet_data.cpp +======= + // pick pair wise groups + int l = caffe::caffe_rng_rand() % num_items; + int m = caffe::caffe_rng_rand() % num_items; + // read triplet groups +>>>>>>> ready for a review on triplet training using the 3D data, net work definition is the same as described in paper:examples/triplet/convert_3d_triplet_data.cpp + read_image(&image_file, &label_file, i, rows, cols, + pixels, &label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels + (rows * cols), &label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels + (2 * rows * cols), &label_k); +<<<<<<< 85bc5292fc9e9df8aeb2b6b6dfc9b0b274ee4db0:examples/triplet/convert_lfw_triplet_data.cpp +======= + // read pair wise groups + read_image(&image_file, &label_file, l, rows, cols, + pixels + (3 * rows * cols), &label_l); + read_image(&image_file, &label_file, m, rows, cols, + pixels + (4 * rows * cols), &label_m); +>>>>>>> ready for a review on triplet training using the 3D data, net work definition is the same as described in paper:examples/triplet/convert_3d_triplet_data.cpp + + datum.set_data(pixels, 3*rows*cols); + if (label_i == label_j && label_i != label_k) { + datum.set_label(1); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); + } else { + itemid--; + datum.set_label(0); + } + } + + delete db; + delete pixels; +} + +int main(int argc, char** argv) { + if (argc != 4) { + printf("This script converts the MNIST dataset to the leveldb format used\n" + "by caffe to train a siamese network.\n" + "Usage:\n" + " convert_mnist_data input_image_file input_label_file " + "output_db_file\n" + "The MNIST dataset could be downloaded at\n" + " http://yann.lecun.com/exdb/mnist/\n" + "You should gunzip them after downloading.\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3]); + } + return 0; +} diff --git a/examples/triplet/convert_mnist_triplet_data.cpp.orig b/examples/triplet/convert_mnist_triplet_data.cpp.orig new file mode 100644 index 00000000000..93ee55c67f0 --- /dev/null +++ b/examples/triplet/convert_mnist_triplet_data.cpp.orig @@ -0,0 +1,150 @@ +// This script converts the MNIST dataset to the leveldb format used +// by caffe to train siamese network. +// Usage: +// convert_mnist_data input_image_file input_label_file output_db_file +// The MNIST dataset could be downloaded at +// http://yann.lecun.com/exdb/mnist/ +#include // NOLINT(readability/streams) +#include + +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "stdint.h" + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" + +uint32_t swap_endian(uint32_t val) { + val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); + return (val << 16) | (val >> 16); +} + +void read_image(std::ifstream* image_file, std::ifstream* label_file, + uint32_t index, uint32_t rows, uint32_t cols, + char* pixels, char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index + 8); + label_file->read(label, 1); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename) { + // Open files + std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); + std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); + CHECK(image_file) << "Unable to open file " << image_filename; + CHECK(label_file) << "Unable to open file " << label_filename; + // Read the magic and the meta data + uint32_t magic; + uint32_t num_items; + uint32_t num_labels; + uint32_t rows; + uint32_t cols; + + image_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + CHECK_EQ(magic, 2051) << "Incorrect image file magic."; + label_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + CHECK_EQ(magic, 2049) << "Incorrect label file magic."; + image_file.read(reinterpret_cast(&num_items), 4); + num_items = swap_endian(num_items); + label_file.read(reinterpret_cast(&num_labels), 4); + num_labels = swap_endian(num_labels); + CHECK_EQ(num_items, num_labels); + image_file.read(reinterpret_cast(&rows), 4); + rows = swap_endian(rows); + image_file.read(reinterpret_cast(&cols), 4); + cols = swap_endian(cols); + + // Open leveldb + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = true; + options.error_if_exists = true; + leveldb::Status status = leveldb::DB::Open( + options, db_filename, &db); + CHECK(status.ok()) << "Failed to open leveldb " << db_filename + << ". Is it already existing?"; + + char label_i; + char label_j; + char label_k; +<<<<<<< 85bc5292fc9e9df8aeb2b6b6dfc9b0b274ee4db0 + char* pixels = new char[3 * rows * cols]; +======= + char label_l; // label for pair wise + char label_m; + char* pixels = new char[5 * rows * cols]; +>>>>>>> ready for a review on triplet training using the 3D data, net work definition is the same as described in paper + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + + caffe::Datum datum; + datum.set_channels(3); // one channel for each image in the pair + datum.set_height(rows); + datum.set_width(cols); + LOG(INFO) << "A total of " << num_items << " items."; + LOG(INFO) << "Rows: " << rows << " Cols: " << cols; + for (int itemid = 0; itemid < num_items; ++itemid) { + // pick triplet groups + int i = caffe::caffe_rng_rand() % num_items; + int j = caffe::caffe_rng_rand() % num_items; + int k = caffe::caffe_rng_rand() % num_items; +<<<<<<< 85bc5292fc9e9df8aeb2b6b6dfc9b0b274ee4db0 +======= + // pick pair wise groups + int l = caffe::caffe_rng_rand() % num_items; + int m = caffe::caffe_rng_rand() % num_items; + // read triplet groups +>>>>>>> ready for a review on triplet training using the 3D data, net work definition is the same as described in paper + read_image(&image_file, &label_file, i, rows, cols, + pixels, &label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels + (rows * cols), &label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels + (2 * rows * cols), &label_k); +<<<<<<< 85bc5292fc9e9df8aeb2b6b6dfc9b0b274ee4db0 +======= + // pair wise groups + read_image(&image_file, &label_file, l, rows, cols, + pixels + (3 * rows * cols), &label_l); + read_image(&image_file, &label_file, m, rows, cols, + pixels + (4 * rows * cols), &label_m); +>>>>>>> ready for a review on triplet training using the 3D data, net work definition is the same as described in paper + + datum.set_data(pixels, 3*rows*cols); + if (label_i == label_j && label_i != label_k) { + datum.set_label(1); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); + } else { + itemid--; + datum.set_label(0); + } + } + + delete db; + delete pixels; +} + +int main(int argc, char** argv) { + if (argc != 4) { + printf("This script converts the MNIST dataset to the leveldb format used\n" + "by caffe to train a siamese network.\n" + "Usage:\n" + " convert_mnist_data input_image_file input_label_file " + "output_db_file\n" + "The MNIST dataset could be downloaded at\n" + " http://yann.lecun.com/exdb/mnist/\n" + "You should gunzip them after downloading.\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3]); + } + return 0; +} diff --git a/examples/triplet/create_3d_triplet.sh.orig b/examples/triplet/create_3d_triplet.sh.orig new file mode 100755 index 00000000000..3cd8ee469ce --- /dev/null +++ b/examples/triplet/create_3d_triplet.sh.orig @@ -0,0 +1,21 @@ +#!/usr/bin/env sh +# This script converts the mnist data into leveldb format. + +EXAMPLES=./build/examples/triplet +DATA=./data/linemod + +echo "Creating leveldb..." + +rm -rf ./examples/triplet/3d_triplet_train_leveldb +rm -rf ./examples/triplet/3d_triplet_test_leveldb + +$EXAMPLES/convert_3d_triplet_data.bin \ + $DATA/binary_image_train \ + $DATA/binary_label_train \ + ./examples/triplet/3d_triplet_train_leveldb +$EXAMPLES/convert_3d_triplet_data.bin \ + $DATA/binary_image_test \ + $DATA/binary_label_test \ + ./examples/triplet/3d_triplet_test_leveldb + +echo "Done." diff --git a/examples/triplet/lfw_triplet_solver.prototxt.orig b/examples/triplet/lfw_triplet_solver.prototxt.orig new file mode 100644 index 00000000000..678ee05e150 --- /dev/null +++ b/examples/triplet/lfw_triplet_solver.prototxt.orig @@ -0,0 +1,40 @@ +# The train/test net protocol buffer definition +net: "examples/triplet/lfw_triplet_train_test.prototxt" +# test_iter specifies how many forward passes the test should carry out. +<<<<<<< 86eaaa954fbb3c3cfd1d225c98e16c38af7b34a9:examples/triplet/lfw_triplet_solver.prototxt +# In the case of lfw, we have test batch size 100 and 100 test iterations, +# covering the full 10,000 testing images. +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 500 +======= +# In the case of 3d database, we have test batch size 23 and 23 test iterations, +# covering the full 2760 testing images. +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 100 +>>>>>>> rearrange the training samples selection codes:examples/triplet/3d_triplet_solver.prototxt +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.01 +momentum: 0.9 +weight_decay: 0.0000 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +<<<<<<< 86eaaa954fbb3c3cfd1d225c98e16c38af7b34a9:examples/triplet/lfw_triplet_solver.prototxt +max_iter: 50000 +# snapshot intermediate results +snapshot: 5000 +snapshot_prefix: "examples/triplet/lfw_triplet" +======= +max_iter: 20000 +# snapshot intermediate results +snapshot: 2000 +snapshot_prefix: "examples/triplet/3d_triplet" +>>>>>>> rearrange the training samples selection codes:examples/triplet/3d_triplet_solver.prototxt +# solver mode: CPU or GPU +solver_mode: CPU diff --git a/examples/triplet/mnist_orpe_train_test.prototxt.orig b/examples/triplet/mnist_orpe_train_test.prototxt.orig new file mode 100644 index 00000000000..f1a24f5a341 --- /dev/null +++ b/examples/triplet/mnist_orpe_train_test.prototxt.orig @@ -0,0 +1,184 @@ +name: "3d_triplet_train_test" +layer { + name: "data" + type: "Data" + top: "data" + top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/3d_triplet_train_leveldb" + batch_size: 250 + } +} +layer { + name: "data" + type: "Data" + top: "data" + top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/3d_triplet_test_leveldb" + batch_size: 250 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 16 + kernel_size: 8 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "pool1" + top: "pool1" +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 7 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu2" + type: "ReLU" + bottom: "pool2" + top: "pool2" +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 256 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu3" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip1" + top: "feat" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 4 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "loss" + type: "TripletLoss" + bottom: "feat" + bottom: "sim" + top: "loss" + triplet_loss_param { + margin: 1 +<<<<<<< 027f1805a9555ff938185eb2ccb9a7599d35ba39:examples/triplet/mnist_orpe_train_test.prototxt +======= + losstype: 0 + num_triplets: 3 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise:examples/triplet/3d_triplet_train_test.prototxt + } +} diff --git a/include/caffe/loss_layers.hpp.orig b/include/caffe/loss_layers.hpp.orig new file mode 100644 index 00000000000..11d86403ad6 --- /dev/null +++ b/include/caffe/loss_layers.hpp.orig @@ -0,0 +1,848 @@ +#ifndef CAFFE_LOSS_LAYERS_HPP_ +#define CAFFE_LOSS_LAYERS_HPP_ + +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/neuron_layers.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +const float kLOG_THRESHOLD = 1e-20; + +/** + * @brief Computes the classification accuracy for a one-of-many + * classification task. + */ +template +class AccuracyLayer : public Layer { + public: + /** + * @param param provides AccuracyParameter accuracy_param, + * with AccuracyLayer options: + * - top_k (\b optional, default 1). + * Sets the maximum rank @f$ k @f$ at which a prediction is considered + * correct. For example, if @f$ k = 5 @f$, a prediction is counted + * correct if the correct label is among the top 5 predicted labels. + */ + explicit AccuracyLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Accuracy"; } + virtual inline int ExactNumBottomBlobs() const { return 2; } + + // If there are two top blobs, then the second blob will contain + // accuracies per class. + virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MaxTopBlos() const { return 2; } + + protected: + /** + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ x @f$, a Blob with values in + * @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of + * the @f$ K = CHW @f$ classes. Each @f$ x_n @f$ is mapped to a predicted + * label @f$ \hat{l}_n @f$ given by its maximal index: + * @f$ \hat{l}_n = \arg\max\limits_k x_{nk} @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels @f$ l @f$, an integer-valued Blob with values + * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ + * indicating the correct class label among the @f$ K @f$ classes + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed accuracy: @f$ + * \frac{1}{N} \sum\limits_{n=1}^N \delta\{ \hat{l}_n = l_n \} + * @f$, where @f$ + * \delta\{\mathrm{condition}\} = \left\{ + * \begin{array}{lr} + * 1 & \mbox{if condition} \\ + * 0 & \mbox{otherwise} + * \end{array} \right. + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + + /// @brief Not implemented -- AccuracyLayer cannot be used as a loss. + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + for (int i = 0; i < propagate_down.size(); ++i) { + if (propagate_down[i]) { NOT_IMPLEMENTED; } + } + } + + int label_axis_, outer_num_, inner_num_; + + int top_k_; + + /// Whether to ignore instances with a certain label. + bool has_ignore_label_; + /// The label indicating that an instance should be ignored. + int ignore_label_; + /// Keeps counts of the number of samples per class. + Blob nums_buffer_; +}; + +/** + * @brief An interface for Layer%s that take two Blob%s as input -- usually + * (1) predictions and (2) ground-truth labels -- and output a + * singleton Blob representing the loss. + * + * LossLayers are typically only capable of backpropagating to their first input + * -- the predictions. + */ +template +class LossLayer : public Layer { + public: + explicit LossLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp( + const vector*>& bottom, const vector*>& top); + virtual void Reshape( + const vector*>& bottom, const vector*>& top); + + virtual inline int ExactNumBottomBlobs() const { return 2; } + + /** + * @brief For convenience and backwards compatibility, instruct the Net to + * automatically allocate a single top Blob for LossLayers, into which + * they output their singleton loss, (even if the user didn't specify + * one in the prototxt, etc.). + */ + virtual inline bool AutoTopBlobs() const { return true; } + virtual inline int ExactNumTopBlobs() const { return 1; } + /** + * We usually cannot backpropagate to the labels; ignore force_backward for + * these inputs. + */ + virtual inline bool AllowForceBackward(const int bottom_index) const { + return bottom_index != 1; + } +}; + +/** + * @brief Computes the contrastive loss @f$ + * E = \frac{1}{2N} \sum\limits_{n=1}^N \left(y\right) d + + * \left(1-y\right) \max \left(margin-d, 0\right)^2 + * @f$ where @f$ + * d = \left| \left| a_n - b_n \right| \right|_2 @f$. This can be + * used to train siamese networks. + * + * @param bottom input Blob vector (length 3) + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$ a \in [-\infty, +\infty]@f$ + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$ b \in [-\infty, +\infty]@f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the binary similarity @f$ s \in [0, 1]@f$ + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed contrastive loss: @f$ E = + * \frac{1}{2N} \sum\limits_{n=1}^N \left(y\right) d + + * \left(1-y\right) \max \left(margin-d, 0\right)^2 + * @f$ where @f$ + * d = \left| \left| a_n - b_n \right| \right|_2 @f$. + * This can be used to train siamese networks. + */ +template +class ContrastiveLossLayer : public LossLayer { + public: + explicit ContrastiveLossLayer(const LayerParameter& param) + : LossLayer(param), diff_() {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline int ExactNumBottomBlobs() const { return 3; } + virtual inline const char* type() const { return "ContrastiveLoss"; } + /** + * Unlike most loss layers, in the ContrastiveLossLayer we can backpropagate + * to the first two inputs. + */ + virtual inline bool AllowForceBackward(const int bottom_index) const { + return bottom_index != 2; + } + + protected: + /// @copydoc ContrastiveLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the Contrastive error gradient w.r.t. the inputs. + * + * Computes the gradients with respect to the two input vectors (bottom[0] and + * bottom[1]), but not the similarity label (bottom[2]). + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$a@f$; Backward fills their diff with + * gradients if propagate_down[0] + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$b@f$; Backward fills their diff with gradients if + * propagate_down[1] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob diff_; // cached for backward pass + Blob dist_sq_; // cached for backward pass + Blob diff_sq_; // tmp storage for gpu forward pass + Blob summer_vec_; // tmp storage for gpu forward pass +}; + +template +class TripletLossLayer : public LossLayer { + public: + explicit TripletLossLayer(const LayerParameter& param) + : LossLayer(param), diff_() {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + +<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 + virtual inline int ExactNumBottomBlobs() const { return 2; } +======= + virtual inline int ExactNumBottomBlobs() const { return 4; } +>>>>>>> triplet data generation and network update + virtual inline const char* type() const { return "TripletLoss"; } + /** + * Unlike most loss layers, in the TripletLossLayer we can backpropagate + * to the first three inputs. + */ + virtual inline bool AllowForceBackward(const int bottom_index) const { +<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 + return bottom_index != 1; +======= + return bottom_index != 3; +>>>>>>> triplet data generation and network update + } + + protected: + /// @copydoc TripletLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the Triplet error gradient w.r.t. the inputs. + * + * Computes the gradients with respect to the two input vectors (bottom[0] and + * bottom[1]), but not the similarity label (bottom[2]). + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$a@f$; Backward fills their diff with + * gradients if propagate_down[0] + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$b@f$; Backward fills their diff with gradients if + * propagate_down[1] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob diff_; // cached for backward pass + Blob diff_pos; + Blob diff_neg; + Blob dist_sq_; // cached for backward pass + Blob dist_sq_pos; + Blob dist_sq_neg; + Blob diff_sq_; // tmp storage for gpu forward pass + Blob diff_sq_pos; + Blob diff_sq_neg; + Blob summer_vec_; // tmp storage for gpu forward pass +}; + +/** + * @brief Computes the Euclidean (L2) loss @f$ + * E = \frac{1}{2N} \sum\limits_{n=1}^N \left| \left| \hat{y}_n - y_n + * \right| \right|_2^2 @f$ for real-valued regression tasks. + * + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ \hat{y} \in [-\infty, +\infty]@f$ + * -# @f$ (N \times C \times H \times W) @f$ + * the targets @f$ y \in [-\infty, +\infty]@f$ + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed Euclidean loss: @f$ E = + * \frac{1}{2n} \sum\limits_{n=1}^N \left| \left| \hat{y}_n - y_n + * \right| \right|_2^2 @f$ + * + * This can be used for least-squares regression tasks. An InnerProductLayer + * input to a EuclideanLossLayer exactly formulates a linear least squares + * regression problem. With non-zero weight decay the problem becomes one of + * ridge regression -- see src/caffe/test/test_sgd_solver.cpp for a concrete + * example wherein we check that the gradients computed for a Net with exactly + * this structure match hand-computed gradient formulas for ridge regression. + * + * (Note: Caffe, and SGD in general, is certainly \b not the best way to solve + * linear least squares problems! We use it only as an instructive example.) + */ +template +class EuclideanLossLayer : public LossLayer { + public: + explicit EuclideanLossLayer(const LayerParameter& param) + : LossLayer(param), diff_() {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "EuclideanLoss"; } + /** + * Unlike most loss layers, in the EuclideanLossLayer we can backpropagate + * to both inputs -- override to return true and always allow force_backward. + */ + virtual inline bool AllowForceBackward(const int bottom_index) const { + return true; + } + + protected: + /// @copydoc EuclideanLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the Euclidean error gradient w.r.t. the inputs. + * + * Unlike other children of LossLayer, EuclideanLossLayer \b can compute + * gradients with respect to the label inputs bottom[1] (but still only will + * if propagate_down[1] is set, due to being produced by learnable parameters + * or if force_backward is set). In fact, this layer is "commutative" -- the + * result is the same regardless of the order of the two bottoms. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$\hat{y}@f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial \hat{y}} = + * \frac{1}{n} \sum\limits_{n=1}^N (\hat{y}_n - y_n) + * @f$ if propagate_down[0] + * -# @f$ (N \times C \times H \times W) @f$ + * the targets @f$y@f$; Backward fills their diff with gradients + * @f$ \frac{\partial E}{\partial y} = + * \frac{1}{n} \sum\limits_{n=1}^N (y_n - \hat{y}_n) + * @f$ if propagate_down[1] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob diff_; +}; + +/** + * @brief Computes the hinge loss for a one-of-many classification task. + * + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ t @f$, a Blob with values in + * @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of + * the @f$ K = CHW @f$ classes. In an SVM, @f$ t @f$ is the result of + * taking the inner product @f$ X^T W @f$ of the D-dimensional features + * @f$ X \in \mathcal{R}^{D \times N} @f$ and the learned hyperplane + * parameters @f$ W \in \mathcal{R}^{D \times K} @f$, so a Net with just + * an InnerProductLayer (with num_output = D) providing predictions to a + * HingeLossLayer and no other learnable parameters or losses is + * equivalent to an SVM. + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels @f$ l @f$, an integer-valued Blob with values + * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ + * indicating the correct class label among the @f$ K @f$ classes + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed hinge loss: @f$ E = + * \frac{1}{N} \sum\limits_{n=1}^N \sum\limits_{k=1}^K + * [\max(0, 1 - \delta\{l_n = k\} t_{nk})] ^ p + * @f$, for the @f$ L^p @f$ norm + * (defaults to @f$ p = 1 @f$, the L1 norm; L2 norm, as in L2-SVM, + * is also available), and @f$ + * \delta\{\mathrm{condition}\} = \left\{ + * \begin{array}{lr} + * 1 & \mbox{if condition} \\ + * -1 & \mbox{otherwise} + * \end{array} \right. + * @f$ + * + * In an SVM, @f$ t \in \mathcal{R}^{N \times K} @f$ is the result of taking + * the inner product @f$ X^T W @f$ of the features + * @f$ X \in \mathcal{R}^{D \times N} @f$ + * and the learned hyperplane parameters + * @f$ W \in \mathcal{R}^{D \times K} @f$. So, a Net with just an + * InnerProductLayer (with num_output = @f$k@f$) providing predictions to a + * HingeLossLayer is equivalent to an SVM (assuming it has no other learned + * outside the InnerProductLayer and no other losses outside the + * HingeLossLayer). + */ +template +class HingeLossLayer : public LossLayer { + public: + explicit HingeLossLayer(const LayerParameter& param) + : LossLayer(param) {} + + virtual inline const char* type() const { return "HingeLoss"; } + + protected: + /// @copydoc HingeLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the hinge loss error gradient w.r.t. the predictions. + * + * Gradients cannot be computed with respect to the label inputs (bottom[1]), + * so this method ignores bottom[1] and requires !propagate_down[1], crashing + * if propagate_down[1] is set. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * propagate_down[1] must be false as we can't compute gradients with + * respect to the labels. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$t@f$; Backward computes diff + * @f$ \frac{\partial E}{\partial t} @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels -- ignored as we can't compute their error gradients + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +/** + * @brief A generalization of MultinomialLogisticLossLayer that takes an + * "information gain" (infogain) matrix specifying the "value" of all label + * pairs. + * + * Equivalent to the MultinomialLogisticLossLayer if the infogain matrix is the + * identity. + * + * @param bottom input Blob vector (length 2-3) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ \hat{p} @f$, a Blob with values in + * @f$ [0, 1] @f$ indicating the predicted probability of each of the + * @f$ K = CHW @f$ classes. Each prediction vector @f$ \hat{p}_n @f$ + * should sum to 1 as in a probability distribution: @f$ + * \forall n \sum\limits_{k=1}^K \hat{p}_{nk} = 1 @f$. + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels @f$ l @f$, an integer-valued Blob with values + * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ + * indicating the correct class label among the @f$ K @f$ classes + * -# @f$ (1 \times 1 \times K \times K) @f$ + * (\b optional) the infogain matrix @f$ H @f$. This must be provided as + * the third bottom blob input if not provided as the infogain_mat in the + * InfogainLossParameter. If @f$ H = I @f$, this layer is equivalent to the + * MultinomialLogisticLossLayer. + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed infogain multinomial logistic loss: @f$ E = + * \frac{-1}{N} \sum\limits_{n=1}^N H_{l_n} \log(\hat{p}_n) = + * \frac{-1}{N} \sum\limits_{n=1}^N \sum\limits_{k=1}^{K} H_{l_n,k} + * \log(\hat{p}_{n,k}) + * @f$, where @f$ H_{l_n} @f$ denotes row @f$l_n@f$ of @f$H@f$. + */ +template +class InfogainLossLayer : public LossLayer { + public: + explicit InfogainLossLayer(const LayerParameter& param) + : LossLayer(param), infogain_() {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + // InfogainLossLayer takes 2-3 bottom Blobs; if there are 3 the third should + // be the infogain matrix. (Otherwise the infogain matrix is loaded from a + // file specified by LayerParameter.) + virtual inline int ExactNumBottomBlobs() const { return -1; } + virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int MaxBottomBlobs() const { return 3; } + + virtual inline const char* type() const { return "InfogainLoss"; } + + protected: + /// @copydoc InfogainLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the infogain loss error gradient w.r.t. the predictions. + * + * Gradients cannot be computed with respect to the label inputs (bottom[1]), + * so this method ignores bottom[1] and requires !propagate_down[1], crashing + * if propagate_down[1] is set. (The same applies to the infogain matrix, if + * provided as bottom[2] rather than in the layer_param.) + * + * @param top output Blob vector (length 1), providing the error gradient + * with respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * propagate_down[1] must be false as we can't compute gradients with + * respect to the labels (similarly for propagate_down[2] and the + * infogain matrix, if provided as bottom[2]) + * @param bottom input Blob vector (length 2-3) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ \hat{p} @f$; Backward computes diff + * @f$ \frac{\partial E}{\partial \hat{p}} @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels -- ignored as we can't compute their error gradients + * -# @f$ (1 \times 1 \times K \times K) @f$ + * (\b optional) the information gain matrix -- ignored as its error + * gradient computation is not implemented. + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob infogain_; +}; + +/** + * @brief Computes the multinomial logistic loss for a one-of-many + * classification task, directly taking a predicted probability + * distribution as input. + * + * When predictions are not already a probability distribution, you should + * instead use the SoftmaxWithLossLayer, which maps predictions to a + * distribution using the SoftmaxLayer, before computing the multinomial + * logistic loss. The SoftmaxWithLossLayer should be preferred over separate + * SoftmaxLayer + MultinomialLogisticLossLayer + * as its gradient computation is more numerically stable. + * + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ \hat{p} @f$, a Blob with values in + * @f$ [0, 1] @f$ indicating the predicted probability of each of the + * @f$ K = CHW @f$ classes. Each prediction vector @f$ \hat{p}_n @f$ + * should sum to 1 as in a probability distribution: @f$ + * \forall n \sum\limits_{k=1}^K \hat{p}_{nk} = 1 @f$. + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels @f$ l @f$, an integer-valued Blob with values + * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ + * indicating the correct class label among the @f$ K @f$ classes + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed multinomial logistic loss: @f$ E = + * \frac{-1}{N} \sum\limits_{n=1}^N \log(\hat{p}_{n,l_n}) + * @f$ + */ +template +class MultinomialLogisticLossLayer : public LossLayer { + public: + explicit MultinomialLogisticLossLayer(const LayerParameter& param) + : LossLayer(param) {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "MultinomialLogisticLoss"; } + + protected: + /// @copydoc MultinomialLogisticLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the multinomial logistic loss error gradient w.r.t. the + * predictions. + * + * Gradients cannot be computed with respect to the label inputs (bottom[1]), + * so this method ignores bottom[1] and requires !propagate_down[1], crashing + * if propagate_down[1] is set. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * propagate_down[1] must be false as we can't compute gradients with + * respect to the labels. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ \hat{p} @f$; Backward computes diff + * @f$ \frac{\partial E}{\partial \hat{p}} @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels -- ignored as we can't compute their error gradients + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +/** + * @brief Computes the cross-entropy (logistic) loss @f$ + * E = \frac{-1}{n} \sum\limits_{n=1}^N \left[ + * p_n \log \hat{p}_n + + * (1 - p_n) \log(1 - \hat{p}_n) + * \right] + * @f$, often used for predicting targets interpreted as probabilities. + * + * This layer is implemented rather than separate + * SigmoidLayer + CrossEntropyLayer + * as its gradient computation is more numerically stable. + * At test time, this layer can be replaced simply by a SigmoidLayer. + * + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the scores @f$ x \in [-\infty, +\infty]@f$, + * which this layer maps to probability predictions + * @f$ \hat{p}_n = \sigma(x_n) \in [0, 1] @f$ + * using the sigmoid function @f$ \sigma(.) @f$ (see SigmoidLayer). + * -# @f$ (N \times C \times H \times W) @f$ + * the targets @f$ y \in [0, 1] @f$ + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed cross-entropy loss: @f$ + * E = \frac{-1}{n} \sum\limits_{n=1}^N \left[ + * p_n \log \hat{p}_n + (1 - p_n) \log(1 - \hat{p}_n) + * \right] + * @f$ + */ +template +class SigmoidCrossEntropyLossLayer : public LossLayer { + public: + explicit SigmoidCrossEntropyLossLayer(const LayerParameter& param) + : LossLayer(param), + sigmoid_layer_(new SigmoidLayer(param)), + sigmoid_output_(new Blob()) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "SigmoidCrossEntropyLoss"; } + + protected: + /// @copydoc SigmoidCrossEntropyLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the sigmoid cross-entropy loss error gradient w.r.t. the + * predictions. + * + * Gradients cannot be computed with respect to the target inputs (bottom[1]), + * so this method ignores bottom[1] and requires !propagate_down[1], crashing + * if propagate_down[1] is set. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * propagate_down[1] must be false as gradient computation with respect + * to the targets is not implemented. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$x@f$; Backward computes diff + * @f$ \frac{\partial E}{\partial x} = + * \frac{1}{n} \sum\limits_{n=1}^N (\hat{p}_n - p_n) + * @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels -- ignored as we can't compute their error gradients + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + /// The internal SigmoidLayer used to map predictions to probabilities. + shared_ptr > sigmoid_layer_; + /// sigmoid_output stores the output of the SigmoidLayer. + shared_ptr > sigmoid_output_; + /// bottom vector holder to call the underlying SigmoidLayer::Forward + vector*> sigmoid_bottom_vec_; + /// top vector holder to call the underlying SigmoidLayer::Forward + vector*> sigmoid_top_vec_; +}; + +// Forward declare SoftmaxLayer for use in SoftmaxWithLossLayer. +template class SoftmaxLayer; + +/** + * @brief Computes the multinomial logistic loss for a one-of-many + * classification task, passing real-valued predictions through a + * softmax to get a probability distribution over classes. + * + * This layer should be preferred over separate + * SoftmaxLayer + MultinomialLogisticLossLayer + * as its gradient computation is more numerically stable. + * At test time, this layer can be replaced simply by a SoftmaxLayer. + * + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ x @f$, a Blob with values in + * @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of + * the @f$ K = CHW @f$ classes. This layer maps these scores to a + * probability distribution over classes using the softmax function + * @f$ \hat{p}_{nk} = \exp(x_{nk}) / + * \left[\sum_{k'} \exp(x_{nk'})\right] @f$ (see SoftmaxLayer). + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels @f$ l @f$, an integer-valued Blob with values + * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ + * indicating the correct class label among the @f$ K @f$ classes + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed cross-entropy classification loss: @f$ E = + * \frac{-1}{N} \sum\limits_{n=1}^N \log(\hat{p}_{n,l_n}) + * @f$, for softmax output class probabilites @f$ \hat{p} @f$ + */ +template +class SoftmaxWithLossLayer : public LossLayer { + public: + /** + * @param param provides LossParameter loss_param, with options: + * - ignore_label (optional) + * Specify a label value that should be ignored when computing the loss. + * - normalize (optional, default true) + * If true, the loss is normalized by the number of (nonignored) labels + * present; otherwise the loss is simply summed over spatial locations. + */ + explicit SoftmaxWithLossLayer(const LayerParameter& param) + : LossLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "SoftmaxWithLoss"; } + virtual inline int ExactNumTopBlobs() const { return -1; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MaxTopBlobs() const { return 2; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + /** + * @brief Computes the softmax loss error gradient w.r.t. the predictions. + * + * Gradients cannot be computed with respect to the label inputs (bottom[1]), + * so this method ignores bottom[1] and requires !propagate_down[1], crashing + * if propagate_down[1] is set. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * propagate_down[1] must be false as we can't compute gradients with + * respect to the labels. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ x @f$; Backward computes diff + * @f$ \frac{\partial E}{\partial x} @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels -- ignored as we can't compute their error gradients + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + + /// The internal SoftmaxLayer used to map predictions to a distribution. + shared_ptr > softmax_layer_; + /// prob stores the output probability predictions from the SoftmaxLayer. + Blob prob_; + /// bottom vector holder used in call to the underlying SoftmaxLayer::Forward + vector*> softmax_bottom_vec_; + /// top vector holder used in call to the underlying SoftmaxLayer::Forward + vector*> softmax_top_vec_; + /// Whether to ignore instances with a certain label. + bool has_ignore_label_; + /// The label indicating that an instance should be ignored. + int ignore_label_; + /// Whether to normalize the loss by the total number of values present + /// (otherwise just by the batch size). + bool normalize_; + + int softmax_axis_, outer_num_, inner_num_; +}; + +} // namespace caffe + +#endif // CAFFE_LOSS_LAYERS_HPP_ diff --git a/python/caffe/test/test_net_spec.py b/python/caffe/test/test_net_spec.py index fee3c0aaebe..b4595e6531a 100644 --- a/python/caffe/test/test_net_spec.py +++ b/python/caffe/test/test_net_spec.py @@ -43,7 +43,8 @@ def anon_lenet(batch_size): def silent_net(): n = caffe.NetSpec() - n.data, n.data2 = L.DummyData(shape=dict(dim=3), ntop=2) + n.data, n.data2 = L.DummyData(shape=[dict(dim=[3]), dict(dim=[4, 2])], + ntop=2) n.silence_data = L.Silence(n.data, ntop=0) n.silence_data2 = L.Silence(n.data2, ntop=0) return n.to_proto() diff --git a/python/caffe/test/test_python_layer.py.orig b/python/caffe/test/test_python_layer.py.orig new file mode 100644 index 00000000000..a244d3da753 --- /dev/null +++ b/python/caffe/test/test_python_layer.py.orig @@ -0,0 +1,153 @@ +import unittest +import tempfile +import os +import six + +import caffe + + +class SimpleLayer(caffe.Layer): + """A layer that just multiplies by ten""" + + def setup(self, bottom, top): + pass + + def reshape(self, bottom, top): + top[0].reshape(*bottom[0].data.shape) + + def forward(self, bottom, top): + top[0].data[...] = 10 * bottom[0].data + + def backward(self, top, propagate_down, bottom): + bottom[0].diff[...] = 10 * top[0].diff + + +class ExceptionLayer(caffe.Layer): + """A layer for checking exceptions from Python""" + + def setup(self, bottom, top): + raise RuntimeError + +<<<<<<< bdae411e81e13783dc650836abdcf1e9a2cd5178 +class ParameterLayer(caffe.Layer): + """A layer that just multiplies by ten""" + + def setup(self, bottom, top): + self.blobs.add_blob(1) + self.blobs[0].data[0] = 0 + + def reshape(self, bottom, top): + top[0].reshape(*bottom[0].data.shape) + + def forward(self, bottom, top): + pass + + def backward(self, top, propagate_down, bottom): + self.blobs[0].diff[0] = 1 +======= +>>>>>>> [pytest] check that Python receives (correct) exceptions from Python layers + +def python_net_file(): + with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f: + f.write("""name: 'pythonnet' force_backward: true + input: 'data' input_shape { dim: 10 dim: 9 dim: 8 } + layer { type: 'Python' name: 'one' bottom: 'data' top: 'one' + python_param { module: 'test_python_layer' layer: 'SimpleLayer' } } + layer { type: 'Python' name: 'two' bottom: 'one' top: 'two' + python_param { module: 'test_python_layer' layer: 'SimpleLayer' } } + layer { type: 'Python' name: 'three' bottom: 'two' top: 'three' + python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }""") + return f.name + + +def exception_net_file(): +<<<<<<< bdae411e81e13783dc650836abdcf1e9a2cd5178 + with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f: +======= + with tempfile.NamedTemporaryFile(delete=False) as f: +>>>>>>> [pytest] check that Python receives (correct) exceptions from Python layers + f.write("""name: 'pythonnet' force_backward: true + input: 'data' input_shape { dim: 10 dim: 9 dim: 8 } + layer { type: 'Python' name: 'layer' bottom: 'data' top: 'top' + python_param { module: 'test_python_layer' layer: 'ExceptionLayer' } } + """) + return f.name + + +<<<<<<< bdae411e81e13783dc650836abdcf1e9a2cd5178 +def parameter_net_file(): + with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f: + f.write("""name: 'pythonnet' force_backward: true + input: 'data' input_shape { dim: 10 dim: 9 dim: 8 } + layer { type: 'Python' name: 'layer' bottom: 'data' top: 'top' + python_param { module: 'test_python_layer' layer: 'ParameterLayer' } } + """) + return f.name + + +======= +>>>>>>> [pytest] check that Python receives (correct) exceptions from Python layers +class TestPythonLayer(unittest.TestCase): + def setUp(self): + net_file = python_net_file() + self.net = caffe.Net(net_file, caffe.TRAIN) + os.remove(net_file) + + def test_forward(self): + x = 8 + self.net.blobs['data'].data[...] = x + self.net.forward() + for y in self.net.blobs['three'].data.flat: + self.assertEqual(y, 10**3 * x) + + def test_backward(self): + x = 7 + self.net.blobs['three'].diff[...] = x + self.net.backward() + for y in self.net.blobs['data'].diff.flat: + self.assertEqual(y, 10**3 * x) + + def test_reshape(self): + s = 4 + self.net.blobs['data'].reshape(s, s, s, s) + self.net.forward() + for blob in six.itervalues(self.net.blobs): + for d in blob.data.shape: + self.assertEqual(s, d) + + def test_exception(self): + net_file = exception_net_file() + self.assertRaises(RuntimeError, caffe.Net, net_file, caffe.TEST) + os.remove(net_file) +<<<<<<< bdae411e81e13783dc650836abdcf1e9a2cd5178 + + def test_parameter(self): + net_file = parameter_net_file() + net = caffe.Net(net_file, caffe.TRAIN) + # Test forward and backward + net.forward() + net.backward() + layer = net.layers[list(net._layer_names).index('layer')] + self.assertEqual(layer.blobs[0].data[0], 0) + self.assertEqual(layer.blobs[0].diff[0], 1) + layer.blobs[0].data[0] += layer.blobs[0].diff[0] + self.assertEqual(layer.blobs[0].data[0], 1) + + # Test saving and loading + h, caffemodel_file = tempfile.mkstemp() + net.save(caffemodel_file) + layer.blobs[0].data[0] = -1 + self.assertEqual(layer.blobs[0].data[0], -1) + net.copy_from(caffemodel_file) + self.assertEqual(layer.blobs[0].data[0], 1) + os.remove(caffemodel_file) + + # Test weight sharing + net2 = caffe.Net(net_file, caffe.TRAIN) + net2.share_with(net) + layer = net.layers[list(net2._layer_names).index('layer')] + self.assertEqual(layer.blobs[0].data[0], 1) + + os.remove(net_file) +======= +>>>>>>> [pytest] check that Python receives (correct) exceptions from Python layers diff --git a/src/caffe/data_transformer.cpp b/src/caffe/data_transformer.cpp index a1d41b0dc65..0120e839c49 100644 --- a/src/caffe/data_transformer.cpp +++ b/src/caffe/data_transformer.cpp @@ -151,10 +151,20 @@ void DataTransformer::Transform(const Datum& datum, ======= // If datum is encoded, decoded and transform the cv::image. if (datum.encoded()) { +<<<<<<< HEAD CHECK(!param_.force_color() && !param_.force_gray()) >>>>>>> 011aef0... restore +<<<<<<< HEAD >>>>>>> 00341b2... triplet data generation and network update +<<<<<<< bd900fc47efb4f9fe6c0698d66ca08f7a5c1ed58 >>>>>>> triplet data generation and network update +======= +======= +======= + CHECK(!(param_.force_color() && param_.force_gray())) +>>>>>>> d2acfed... fixed _force_color check, fixes #2635 +>>>>>>> 1882ac9... add initiate class name of triplet loss layer +>>>>>>> add initiate class name of triplet loss layer << "cannot set both force_color and force_gray"; cv::Mat cv_img; if (param_.force_color() || param_.force_gray()) { @@ -485,10 +495,20 @@ vector DataTransformer::InferBlobShape(const Datum& datum) { template vector DataTransformer::InferBlobShape(const Datum& datum) { if (datum.encoded()) { +<<<<<<< HEAD CHECK(!param_.force_color() && !param_.force_gray()) >>>>>>> 011aef0... restore +<<<<<<< HEAD >>>>>>> 00341b2... triplet data generation and network update +<<<<<<< bd900fc47efb4f9fe6c0698d66ca08f7a5c1ed58 >>>>>>> triplet data generation and network update +======= +======= +======= + CHECK(!(param_.force_color() && param_.force_gray())) +>>>>>>> d2acfed... fixed _force_color check, fixes #2635 +>>>>>>> 1882ac9... add initiate class name of triplet loss layer +>>>>>>> add initiate class name of triplet loss layer << "cannot set both force_color and force_gray"; cv::Mat cv_img; if (param_.force_color() || param_.force_gray()) { diff --git a/src/caffe/layers/tile_layer.cu b/src/caffe/layers/tile_layer.cu index 7fd3bc47d0f..3af8e2eb72f 100644 --- a/src/caffe/layers/tile_layer.cu +++ b/src/caffe/layers/tile_layer.cu @@ -6,45 +6,17 @@ namespace caffe { -template -__global__ void Tile(const int nthreads, const Dtype* bottom_data, - const int tile_size, const int num_tiles, const int bottom_tile_axis, - Dtype* top_data) { - CUDA_KERNEL_LOOP(index, nthreads) { - const int d = index % tile_size; - const int b = (index / tile_size / num_tiles) % bottom_tile_axis; - const int n = index / tile_size / num_tiles / bottom_tile_axis; - const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d; - top_data[index] = bottom_data[bottom_index]; - } -} - template void TileLayer::Forward_gpu( const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - const int bottom_tile_axis = bottom[0]->shape(axis_); - const int nthreads = top[0]->count(); - Tile // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, bottom_data, inner_dim_, tiles_, bottom_tile_axis, top_data); -} - -template -__global__ void TileBackward(const int nthreads, const Dtype* top_diff, - const int tile_size, const int num_tiles, const int bottom_tile_axis, - Dtype* bottom_diff) { - CUDA_KERNEL_LOOP(index, nthreads) { - const int d = index % tile_size; - const int b = (index / tile_size) % bottom_tile_axis; - const int n = index / tile_size / bottom_tile_axis; - bottom_diff[index] = 0; - int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d; - for (int t = 0; t < num_tiles; ++t) { - bottom_diff[index] += top_diff[top_index]; - top_index += bottom_tile_axis * tile_size; + for (int i = 0; i < outer_dim_; ++i) { + for (int t = 0; t < tiles_; ++t) { + caffe_copy(inner_dim_, bottom_data, top_data); + top_data += inner_dim_; } + bottom_data += inner_dim_; } } @@ -54,12 +26,15 @@ void TileLayer::Backward_gpu(const vector*>& top, if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - const int bottom_tile_axis = bottom[0]->shape(axis_); - const int tile_size = inner_dim_ / bottom_tile_axis; - const int nthreads = bottom[0]->count(); - TileBackward // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, top_diff, tile_size, tiles_, bottom_tile_axis, bottom_diff); + for (int i = 0; i < outer_dim_; ++i) { + caffe_copy(inner_dim_, top_diff, bottom_diff); + top_diff += inner_dim_; + for (int t = 1; t < tiles_; ++t) { + caffe_gpu_axpy(inner_dim_, Dtype(1), top_diff, bottom_diff); + top_diff += inner_dim_; + } + bottom_diff += inner_dim_; + } } INSTANTIATE_LAYER_GPU_FUNCS(TileLayer); diff --git a/src/caffe/layers/triplet_loss_layer.cpp.orig b/src/caffe/layers/triplet_loss_layer.cpp.orig new file mode 100644 index 00000000000..613fa703676 --- /dev/null +++ b/src/caffe/layers/triplet_loss_layer.cpp.orig @@ -0,0 +1,397 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/loss_layers.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void TripletLossLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + LossLayer::LayerSetUp(bottom, top); + // number of triplet in a batch + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + // dimension of each descriptor + int dim = bottom[0]->count()/bottom[0]->num(); + CHECK_EQ(bottom[0]->channels(), dim); + CHECK_EQ(bottom[0]->height(), 1); + CHECK_EQ(bottom[0]->width(), 1); + CHECK_EQ(bottom[1]->channels(), 1); + CHECK_EQ(bottom[1]->height(), 1); + CHECK_EQ(bottom[1]->width(), 1); + // In each set, we have: + // the descriptor of reference sample, closest sample, and negative samples + // number of sets in the whole batch + int num_set = bottom[0]->num()/(2 + num_triplets); + dist_sq_.Reshape(num_set, 1, 1, 1); + diff_pos.Reshape(num_set, dim, 1, 1); + dist_sq_pos.Reshape(num_set, 1, 1, 1); + diff_neg.Reshape(num_set, dim, 1, 1); + dist_sq_neg.Reshape(num_set, 1, 1, 1); + // vector of ones used to sum along channels + summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); + for (int i = 0; i < bottom[0]->channels(); ++i) + summer_vec_.mutable_cpu_data()[i] = Dtype(1); +} + +template +void TripletLossLayer::Forward_cpu( + const vector*>& bottom, + const vector*>& top) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); + Dtype loss(0.0); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); + if (losstype == 0) { + for (int i = 0; i < num_set; ++i) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close + // Loss component calculated from reference and close one + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.cpu_data()[i]; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + i*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); + } + } + loss = loss / static_cast(num_set) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; + } else { + for (int i = 0; i < num_set; ++i) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close + // Loss component calculated from reference and close one + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += margin; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + i*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[i] = 1 - \ + dist_sq_neg.cpu_data()[i] / dist_sq_.mutable_cpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(dist_sq_.cpu_data()[i], Dtype(0.0)); + } + } + loss = loss / static_cast(num_set) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; + } +} + +template +void TripletLossLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); + if (losstype == 0) { + // BP for feat1(extracted from reference) + for (int i = 0; i < 1; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + // Loss component calculated from negative part + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + (2 + num_triplets)*j*dim); + // dissimilar pair in triplet + caffe_cpu_axpby( + dim, + -alpha, + diff_neg.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for feat2(extracted from the closest sample) + for (int i = 1; i < 2; ++i) { + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for negative feature used in the num_triplets triplet part + for (int i = 2; i < 2 + num_triplets; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // dissimilar pairs + caffe_cpu_axpby( + dim, + alpha, + diff_neg.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } else { + for (int i = 0; i < 1; ++i) { + // BP for data1(feat1) + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha*dist_sq_neg.mutable_cpu_data()[j]\ + /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_cpu_data()[j]+margin)), + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + caffe_cpu_axpby( + dim, + -alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), + diff_neg.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + for (int i = 1; i < 2; ++i) { + // BP for positive data(feat2) + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha*dist_sq_neg.mutable_cpu_data()[j]\ + /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_cpu_data()[j]+margin)), + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + for (int i = 2; i < 2 + num_triplets; ++i) { + // BP for negative data(feat3) + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), + diff_neg.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(TripletLossLayer); +#endif + +INSTANTIATE_CLASS(TripletLossLayer); +REGISTER_LAYER_CLASS(TripletLoss); + +} // namespace caffe diff --git a/src/caffe/layers/triplet_loss_layer.cu.orig b/src/caffe/layers/triplet_loss_layer.cu.orig new file mode 100644 index 00000000000..1415944a03e --- /dev/null +++ b/src/caffe/layers/triplet_loss_layer.cu.orig @@ -0,0 +1,536 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void TripletLossLayer::Forward_gpu( + const vector*>& bottom, + const vector*>& top) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); + Dtype loss(0.0); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); + if (losstype == 0) { + for (int i = 0; i < num_set; ++i) { + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close + caffe_gpu_dot( + dim, + diff_pos.gpu_data() + i*dim, + diff_pos.gpu_data() + i*dim, + dist_sq_pos.mutable_cpu_data() + i); + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.gpu_data()[i]; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; + // Loss component calculated from negative part + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + i*dim); // reference-negative +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + i*dim, + diff_neg.gpu_data() + i*dim, + dist_sq_neg.mutable_cpu_data() + i); +======= + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + i*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + i); // \Sum(reference-pose_close)^2 +>>>>>>> add initiate class name of triplet loss layer + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(margin + dist_sq_.gpu_data()[i], Dtype(0.0)); + } + } + loss = loss / static_cast(num_set) / Dtype(2); + top[0]->mutable_gpu_data()[0] = loss; + } else { + for (int i = 0; i < num_set; ++i) { + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close + // Loss component calculated from reference and close one + caffe_gpu_dot( + dim, + diff_pos.gpu_data() + i*dim, + diff_pos.gpu_data() + i*dim, + dist_sq_pos.mutable_cpu_data() + i); + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.gpu_data()[i]; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.mutable_gpu_data()[i]; + dist_sq_.mutable_gpu_data()[i] += margin; + // Loss component calculated from negative part + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + i*dim); // reference-negative +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + i*dim, + diff_neg.gpu_data() + i*dim, + dist_sq_neg.mutable_cpu_data() + i); +======= + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + i*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + i); // \Sum(reference-pose_close)^2 +>>>>>>> add initiate class name of triplet loss layer + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[i] = 1 - \ + dist_sq_neg.gpu_data()[i] / dist_sq_.mutable_gpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(dist_sq_.gpu_data()[i], Dtype(0.0)); + } + } + loss = loss / static_cast(num_set) / Dtype(2); + top[0]->mutable_gpu_data()[0] = loss; + } +} + +template +void TripletLossLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); + if (losstype == 0) { + // BP for feat1(extracted from reference) + for (int i = 0; i < 1; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + // the pair part + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, + dist_sq_neg.mutable_cpu_data() + j); +======= + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum(ref-close)^2 +>>>>>>> add initiate class name of triplet loss layer + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; + // Loss component calculated from negative part + if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(1.0), + bout + (2 + num_triplets)*j*dim); + // dissimilar pair in triplet + caffe_gpu_axpby( + dim, + -alpha, + diff_neg.gpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for feat2(extracted from the closest sample) + for (int i = 1; i < 2; ++i) { + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + // the pair part + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, + dist_sq_neg.mutable_cpu_data() + j); +======= + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (ref-close)^2 +>>>>>>> add initiate class name of triplet loss layer + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; + if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for negative feature used in the num_triplets triplet part + for (int i = 2; i < 2 + num_triplets; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, + dist_sq_neg.mutable_cpu_data() + j); +======= + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (ref-close)^2 +>>>>>>> add initiate class name of triplet loss layer + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; + if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + // dissimilar pairs + caffe_gpu_axpby( + dim, + alpha, + diff_neg.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } else { + for (int i = 0; i < 1; ++i) { + // BP for data1(feat1) + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + // the pair part + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; + dist_sq_.mutable_gpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, + dist_sq_neg.mutable_cpu_data() + j); +======= + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (ref-close)^2 +>>>>>>> add initiate class name of triplet loss layer + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] = 1 - \ + dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + caffe_gpu_axpby( + dim, + alpha*dist_sq_neg.mutable_gpu_data()[j]\ + /((dist_sq_pos.mutable_gpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_gpu_data()[j]+margin)), + diff_pos.gpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + caffe_gpu_axpby( + dim, + -alpha/(dist_sq_pos.mutable_gpu_data()[j] + margin), + diff_neg.gpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + for (int i = 1; i < 2; ++i) { + // BP for positive data(feat2) + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + // the pair part + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; + dist_sq_.mutable_gpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, + dist_sq_neg.mutable_cpu_data() + j); +======= + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (ref-close)^2 +>>>>>>> add initiate class name of triplet loss layer + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] = 1 - \ + dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + caffe_gpu_axpby( + dim, + alpha*dist_sq_neg.mutable_gpu_data()[j]\ + /((dist_sq_pos.mutable_gpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_gpu_data()[j]+margin)), + diff_pos.gpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + for (int i = 2; i < 2 + num_triplets; ++i) { + // BP for negative data(feat3) + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; + dist_sq_.mutable_gpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, + dist_sq_neg.mutable_cpu_data() + j); +======= + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (ref-close)^2 +>>>>>>> add initiate class name of triplet loss layer + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] = 1 - \ + dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + caffe_gpu_axpby( + dim, + alpha/(dist_sq_pos.mutable_gpu_data()[j] + margin), + diff_neg.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer); + +} // namespace caffe diff --git a/src/caffe/layers/triplet_loss_layer_BACKUP_62802.cpp b/src/caffe/layers/triplet_loss_layer_BACKUP_62802.cpp new file mode 100644 index 00000000000..5fca131cef5 --- /dev/null +++ b/src/caffe/layers/triplet_loss_layer_BACKUP_62802.cpp @@ -0,0 +1,423 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/loss_layers.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void TripletLossLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + LossLayer::LayerSetUp(bottom, top); + // number of triplet in a batch + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + // dimension of each descriptor + int dim = bottom[0]->count()/bottom[0]->num(); + CHECK_EQ(bottom[0]->channels(), dim); + CHECK_EQ(bottom[0]->height(), 1); + CHECK_EQ(bottom[0]->width(), 1); + CHECK_EQ(bottom[1]->channels(), 1); + CHECK_EQ(bottom[1]->height(), 1); + CHECK_EQ(bottom[1]->width(), 1); + // In each set, we have: + // the descriptor of reference sample, closest sample, and negative samples + // number of sets in the whole batch + int num_set = bottom[0]->num()/(2 + num_triplets); + dist_sq_.Reshape(num_set, 1, 1, 1); + diff_pos.Reshape(num_set, dim, 1, 1); + dist_sq_pos.Reshape(num_set, 1, 1, 1); + diff_neg.Reshape(num_set, dim, 1, 1); + dist_sq_neg.Reshape(num_set, 1, 1, 1); + // vector of ones used to sum along channels + summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); + for (int i = 0; i < bottom[0]->channels(); ++i) + summer_vec_.mutable_cpu_data()[i] = Dtype(1); +} + +template +void TripletLossLayer::Forward_cpu( + const vector*>& bottom, + const vector*>& top) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); + Dtype loss(0.0); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); + if (losstype == 0) { + for (int i = 0; i < num_set; ++i) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close + // Loss component calculated from reference and close one + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.cpu_data()[i]; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + i*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); + } +<<<<<<< 027f1805a9555ff938185eb2ccb9a7599d35ba39 +======= + } + loss = loss / static_cast(num_set) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; + } else { + for (int i = 0; i < num_set; ++i) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close + // Loss component calculated from reference and close one + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += margin; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + i*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[i] = 1 - \ + dist_sq_neg.cpu_data()[i] / dist_sq_.mutable_cpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(dist_sq_.cpu_data()[i], Dtype(0.0)); + } +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise + } + loss = loss / static_cast(num_set) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; +} + +template +void TripletLossLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); + if (losstype == 0) { + // BP for feat1(extracted from reference) + for (int i = 0; i < 1; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / +<<<<<<< 027f1805a9555ff938185eb2ccb9a7599d35ba39 + static_cast(bottom[i]->num()); + int num = bottom[i]->num(); + int channels = bottom[i]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[i]->mutable_cpu_diff(); + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pairs +======= + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + // Loss component calculated from negative part + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + (2 + num_triplets)*j*dim); + // dissimilar pair in triplet + caffe_cpu_axpby( + dim, + -alpha, + diff_neg.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for feat2(extracted from the closest sample) + for (int i = 1; i < 2; ++i) { + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for negative feature used in the num_triplets triplet part + for (int i = 2; i < 2 + num_triplets; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // dissimilar pairs +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise + caffe_cpu_axpby( + dim, + alpha, + diff_neg.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } else { + for (int i = 0; i < 1; ++i) { + // BP for data1(feat1) + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); +<<<<<<< 027f1805a9555ff938185eb2ccb9a7599d35ba39 + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; +======= + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha*dist_sq_neg.mutable_cpu_data()[j]\ + /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_cpu_data()[j]+margin)), + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + caffe_cpu_axpby( + dim, + -alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), + diff_neg.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + for (int i = 1; i < 2; ++i) { + // BP for positive data(feat2) + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); +<<<<<<< 027f1805a9555ff938185eb2ccb9a7599d35ba39 + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; +======= + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha*dist_sq_neg.mutable_cpu_data()[j]\ + /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_cpu_data()[j]+margin)), + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + for (int i = 2; i < 2 + num_triplets; ++i) { + // BP for negative data(feat3) + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), + diff_neg.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(TripletLossLayer); +#endif + +INSTANTIATE_CLASS(TripletLossLayer); +REGISTER_LAYER_CLASS(TripletLoss); + +} // namespace caffe diff --git a/src/caffe/layers/triplet_loss_layer_BASE_62802.cpp b/src/caffe/layers/triplet_loss_layer_BASE_62802.cpp new file mode 100644 index 00000000000..a4e6402c76a --- /dev/null +++ b/src/caffe/layers/triplet_loss_layer_BASE_62802.cpp @@ -0,0 +1,298 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/loss_layers.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void TripletLossLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + LossLayer::LayerSetUp(bottom, top); + CHECK_EQ(bottom[0]->channels(), bottom[1]->channels()); + CHECK_EQ(bottom[0]->channels(), bottom[2]->channels()); + CHECK_EQ(bottom[0]->channels(), bottom[3]->channels()); + CHECK_EQ(bottom[0]->channels(), bottom[4]->channels()); + CHECK_EQ(bottom[0]->height(), 1); + CHECK_EQ(bottom[0]->width(), 1); + CHECK_EQ(bottom[1]->height(), 1); + CHECK_EQ(bottom[1]->width(), 1); + CHECK_EQ(bottom[2]->height(), 1); + CHECK_EQ(bottom[2]->width(), 1); + CHECK_EQ(bottom[3]->height(), 1); + CHECK_EQ(bottom[3]->width(), 1); + CHECK_EQ(bottom[4]->height(), 1); + CHECK_EQ(bottom[4]->width(), 1); + CHECK_EQ(bottom[5]->channels(), 1); + CHECK_EQ(bottom[5]->height(), 1); + CHECK_EQ(bottom[5]->width(), 1); + diff_pos.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + diff_neg.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + diff_par.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + diff_sq_pos.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + diff_sq_neg.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + diff_sq_par.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); + dist_sq_.Reshape(bottom[0]->num(), 1, 1, 1); + dist_sq_pos.Reshape(bottom[0]->num(), 1, 1, 1); + dist_sq_neg.Reshape(bottom[0]->num(), 1, 1, 1); + dist_sq_par.Reshape(bottom[0]->num(), 1, 1, 1); + // vector of ones used to sum along channels + summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); + for (int i = 0; i < bottom[0]->channels(); ++i) + summer_vec_.mutable_cpu_data()[i] = Dtype(1); +} + +template +void TripletLossLayer::Forward_cpu( + const vector*>& bottom, + const vector*>& top) { + int count = bottom[0]->count(); + caffe_sub( + count, + bottom[0]->cpu_data(), // a + bottom[1]->cpu_data(), // b + diff_pos.mutable_cpu_data()); // a_i-b_i: positive + caffe_sub( + count, + bottom[0]->cpu_data(), // a + bottom[2]->cpu_data(), // c + diff_neg.mutable_cpu_data()); // a_i-c_i: negative + caffe_sub( + count, + bottom[3]->cpu_data(), // d + bottom[4]->cpu_data(), // e + diff_par.mutable_cpu_data()); // d_i-e_i: pair wise + const int channels = bottom[0]->channels(); + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + Dtype loss(0.0); + if (losstype == 0) { + for (int i = 0; i < bottom[0]->num(); ++i) { + // Triplet loss accumulation + // Loss component calculated from a and b + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, + diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); + // a b is a similar pair for triplet + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; + // Loss component calculated from a and c + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, + diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); + // a c is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); + // Pair wise loss accumulation + // Loss component calculated from d and e + dist_sq_par.mutable_cpu_data()[i] = caffe_cpu_dot(channels, + diff_par.cpu_data() + (i*channels), diff_par.cpu_data() + (i*channels)); + // d e is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_par.cpu_data()[i]; + } + loss = loss / static_cast(bottom[0]->num()) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; + } else { + for (int i = 0; i < bottom[0]->num(); ++i) { + // softTriplet loss accumulation + // Loss component calculated from a and b + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, + diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); + // a b is a similar pair for triplet + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += margin; + // Loss component calculated from a and c + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, + diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); + // a c is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[i] = 1 - \ +dist_sq_neg.cpu_data()[i] / dist_sq_.mutable_cpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(dist_sq_.cpu_data()[i], Dtype(0.0)); + // Pair wise loss accumulation + // Loss component calculated from d and e + dist_sq_par.mutable_cpu_data()[i] = caffe_cpu_dot(channels, + diff_par.cpu_data() + (i*channels), diff_par.cpu_data() + (i*channels)); + // d e is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_par.cpu_data()[i]; + } + loss = loss / static_cast(bottom[0]->num()) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; + } +} + +template +void TripletLossLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + if (losstype == 0) { + // BP for feat1 + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(bottom[0]->num()); + int num = bottom[0]->num(); + int channels = bottom[0]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pairs + caffe_cpu_axpby( + channels, + alpha, + diff_pos.cpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + // dissimilar pairs + caffe_cpu_axpby( + channels, + -alpha, + diff_neg.cpu_data() + (j*channels), + Dtype(1.0), + bout + (j*channels)); + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); + } + } + } + // BP for feat2 and feat3 + for (int i = 1; i < 3; ++i) { + if (propagate_down[i]) { + const Dtype sign = (i == 1) ? -1 : 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(bottom[i]->num()); + int num = bottom[i]->num(); + int channels = bottom[i]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[i]->mutable_cpu_diff(); + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + if (i == 1) { + // similar pairs + caffe_cpu_axpby( + channels, + alpha, + diff_pos.cpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } else { + // dissimilar pairs + caffe_cpu_axpby( + channels, + alpha, + diff_neg.cpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); + } + } + } + } + } else { + // BP for data1(feat1) + if (propagate_down[0]) { + const Dtype alpha = top[0]->cpu_diff()[0] / + static_cast(bottom[0]->num()); + int num = bottom[0]->num(); + int channels = bottom[0]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + channels, + alpha*dist_sq_neg.mutable_cpu_data()[j]\ +/((dist_sq_pos.mutable_cpu_data()[j]+margin)\ +*(dist_sq_pos.mutable_cpu_data()[j]+margin)), + diff_pos.cpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + caffe_cpu_axpby( + channels, + -alpha*(dist_sq_pos.mutable_cpu_data()[j] + margin)\ +/((dist_sq_pos.mutable_cpu_data()[j] + margin)\ +*(dist_sq_pos.mutable_cpu_data()[j] + margin)), + diff_neg.cpu_data() + (j*channels), + Dtype(1.0), + bout + (j*channels)); + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); + } + } + } + // BP for positive data(feat2) + if (propagate_down[1]) { + const Dtype alpha = top[0]->cpu_diff()[0] / + static_cast(bottom[1]->num()); + int num = bottom[1]->num(); + int channels = bottom[1]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[1]->mutable_cpu_diff(); + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + channels, + -alpha*dist_sq_neg.mutable_cpu_data()[j]\ +/((dist_sq_pos.mutable_cpu_data()[j] + margin)\ +*(dist_sq_pos.mutable_cpu_data()[j] + margin)), + diff_pos.cpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); + } + } + } + // BP for negative data(feat3) + if (propagate_down[2]) { + const Dtype alpha = top[0]->cpu_diff()[0] / + static_cast(bottom[2]->num()); + int num = bottom[2]->num(); + int channels = bottom[2]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[2]->mutable_cpu_diff(); + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + channels, + alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), + diff_neg.cpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); + } + } + } + } + // pair wise back + for (int i = 3; i < 5; ++i) { + if (propagate_down[i]) { + const Dtype sign = (i == 3) ? 1 : -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(bottom[i]->num()); + int num = bottom[i]->num(); + int channels = bottom[i]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[i]->mutable_cpu_diff(); // similar pairs + caffe_cpu_axpby( + channels, + alpha, + diff_par.cpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(TripletLossLayer); +#endif + +INSTANTIATE_CLASS(TripletLossLayer); +REGISTER_LAYER_CLASS(TripletLoss); + +} // namespace caffe diff --git a/src/caffe/layers/triplet_loss_layer_LOCAL_62802.cpp b/src/caffe/layers/triplet_loss_layer_LOCAL_62802.cpp new file mode 100644 index 00000000000..add25687899 --- /dev/null +++ b/src/caffe/layers/triplet_loss_layer_LOCAL_62802.cpp @@ -0,0 +1,261 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/loss_layers.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void TripletLossLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + LossLayer::LayerSetUp(bottom, top); + // number of triplet in a batch + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + // dimension of each descriptor + int dim = bottom[0]->count()/bottom[0]->num(); + CHECK_EQ(bottom[0]->channels(), dim); + CHECK_EQ(bottom[0]->height(), 1); + CHECK_EQ(bottom[0]->width(), 1); + CHECK_EQ(bottom[1]->channels(), 1); + CHECK_EQ(bottom[1]->height(), 1); + CHECK_EQ(bottom[1]->width(), 1); + // In each set, we have: + // the descriptor of reference sample, closest sample, and negative samples + // number of sets in the whole batch + int num_set = bottom[0]->num()/(2 + num_triplets); + dist_sq_.Reshape(num_set, 1, 1, 1); + diff_pos.Reshape(num_set, dim, 1, 1); + dist_sq_pos.Reshape(num_set, 1, 1, 1); + diff_neg.Reshape(num_set, dim, 1, 1); + dist_sq_neg.Reshape(num_set, 1, 1, 1); + // vector of ones used to sum along channels + summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); + for (int i = 0; i < bottom[0]->channels(); ++i) + summer_vec_.mutable_cpu_data()[i] = Dtype(1); +} + +template +void TripletLossLayer::Forward_cpu( + const vector*>& bottom, + const vector*>& top) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); + Dtype loss(0.0); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); + if (losstype == 0) { + for (int i = 0; i < num_set; ++i) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close + // Loss component calculated from reference and close one + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.cpu_data()[i]; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + i*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); + } + } + loss = loss / static_cast(num_set) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; +} + +template +void TripletLossLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); + if (losstype == 0) { + // BP for feat1(extracted from reference) + for (int i = 0; i < 1; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(bottom[i]->num()); + int num = bottom[i]->num(); + int channels = bottom[i]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[i]->mutable_cpu_diff(); + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pairs + caffe_cpu_axpby( + dim, + alpha, + diff_neg.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } else { + for (int i = 0; i < 1; ++i) { + // BP for data1(feat1) + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha*dist_sq_neg.mutable_cpu_data()[j]\ + /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_cpu_data()[j]+margin)), + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + caffe_cpu_axpby( + dim, + -alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), + diff_neg.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + for (int i = 1; i < 2; ++i) { + // BP for positive data(feat2) + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha*dist_sq_neg.mutable_cpu_data()[j]\ + /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_cpu_data()[j]+margin)), + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + for (int i = 2; i < 2 + num_triplets; ++i) { + // BP for negative data(feat3) + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), + diff_neg.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(TripletLossLayer); +#endif + +INSTANTIATE_CLASS(TripletLossLayer); +REGISTER_LAYER_CLASS(TripletLoss); + +} // namespace caffe diff --git a/src/caffe/layers/triplet_loss_layer_REMOTE_62802.cpp b/src/caffe/layers/triplet_loss_layer_REMOTE_62802.cpp new file mode 100644 index 00000000000..613fa703676 --- /dev/null +++ b/src/caffe/layers/triplet_loss_layer_REMOTE_62802.cpp @@ -0,0 +1,397 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/loss_layers.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void TripletLossLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + LossLayer::LayerSetUp(bottom, top); + // number of triplet in a batch + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + // dimension of each descriptor + int dim = bottom[0]->count()/bottom[0]->num(); + CHECK_EQ(bottom[0]->channels(), dim); + CHECK_EQ(bottom[0]->height(), 1); + CHECK_EQ(bottom[0]->width(), 1); + CHECK_EQ(bottom[1]->channels(), 1); + CHECK_EQ(bottom[1]->height(), 1); + CHECK_EQ(bottom[1]->width(), 1); + // In each set, we have: + // the descriptor of reference sample, closest sample, and negative samples + // number of sets in the whole batch + int num_set = bottom[0]->num()/(2 + num_triplets); + dist_sq_.Reshape(num_set, 1, 1, 1); + diff_pos.Reshape(num_set, dim, 1, 1); + dist_sq_pos.Reshape(num_set, 1, 1, 1); + diff_neg.Reshape(num_set, dim, 1, 1); + dist_sq_neg.Reshape(num_set, 1, 1, 1); + // vector of ones used to sum along channels + summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); + for (int i = 0; i < bottom[0]->channels(); ++i) + summer_vec_.mutable_cpu_data()[i] = Dtype(1); +} + +template +void TripletLossLayer::Forward_cpu( + const vector*>& bottom, + const vector*>& top) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); + Dtype loss(0.0); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); + if (losstype == 0) { + for (int i = 0; i < num_set; ++i) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close + // Loss component calculated from reference and close one + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.cpu_data()[i]; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + i*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); + } + } + loss = loss / static_cast(num_set) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; + } else { + for (int i = 0; i < num_set; ++i) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close + // Loss component calculated from reference and close one + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += margin; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + i*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[i] = 1 - \ + dist_sq_neg.cpu_data()[i] / dist_sq_.mutable_cpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(dist_sq_.cpu_data()[i], Dtype(0.0)); + } + } + loss = loss / static_cast(num_set) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; + } +} + +template +void TripletLossLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); + if (losstype == 0) { + // BP for feat1(extracted from reference) + for (int i = 0; i < 1; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + // Loss component calculated from negative part + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + (2 + num_triplets)*j*dim); + // dissimilar pair in triplet + caffe_cpu_axpby( + dim, + -alpha, + diff_neg.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for feat2(extracted from the closest sample) + for (int i = 1; i < 2; ++i) { + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for negative feature used in the num_triplets triplet part + for (int i = 2; i < 2 + num_triplets; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // dissimilar pairs + caffe_cpu_axpby( + dim, + alpha, + diff_neg.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } else { + for (int i = 0; i < 1; ++i) { + // BP for data1(feat1) + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha*dist_sq_neg.mutable_cpu_data()[j]\ + /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_cpu_data()[j]+margin)), + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + caffe_cpu_axpby( + dim, + -alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), + diff_neg.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + for (int i = 1; i < 2; ++i) { + // BP for positive data(feat2) + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha*dist_sq_neg.mutable_cpu_data()[j]\ + /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_cpu_data()[j]+margin)), + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + for (int i = 2; i < 2 + num_triplets; ++i) { + // BP for negative data(feat3) + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), + diff_neg.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(TripletLossLayer); +#endif + +INSTANTIATE_CLASS(TripletLossLayer); +REGISTER_LAYER_CLASS(TripletLoss); + +} // namespace caffe diff --git a/src/caffe/proto/caffe.proto.orig b/src/caffe/proto/caffe.proto.orig new file mode 100644 index 00000000000..5b0f6ce1b0a --- /dev/null +++ b/src/caffe/proto/caffe.proto.orig @@ -0,0 +1,1201 @@ +syntax = "proto2"; + +package caffe; + +// Specifies the shape (dimensions) of a Blob. +message BlobShape { + repeated int64 dim = 1 [packed = true]; +} + +message BlobProto { + optional BlobShape shape = 7; + repeated float data = 5 [packed = true]; + repeated float diff = 6 [packed = true]; + repeated double double_data = 8 [packed = true]; + repeated double double_diff = 9 [packed = true]; + + // 4D dimensions -- deprecated. Use "shape" instead. + optional int32 num = 1 [default = 0]; + optional int32 channels = 2 [default = 0]; + optional int32 height = 3 [default = 0]; + optional int32 width = 4 [default = 0]; +} + +// The BlobProtoVector is simply a way to pass multiple blobproto instances +// around. +message BlobProtoVector { + repeated BlobProto blobs = 1; +} + +message Datum { + optional int32 channels = 1; + optional int32 height = 2; + optional int32 width = 3; + // the actual image data, in bytes + optional bytes data = 4; + optional int32 label = 5; + // Optionally, the datum could also hold float data. + repeated float float_data = 6; + // If true data contains an encoded image that need to be decoded + optional bool encoded = 7 [default = false]; +} + +message FillerParameter { + // The filler type. + optional string type = 1 [default = 'constant']; + optional float value = 2 [default = 0]; // the value in constant filler + optional float min = 3 [default = 0]; // the min value in uniform filler + optional float max = 4 [default = 1]; // the max value in uniform filler + optional float mean = 5 [default = 0]; // the mean value in Gaussian filler + optional float std = 6 [default = 1]; // the std value in Gaussian filler + // The expected number of non-zero output weights for a given input in + // Gaussian filler -- the default -1 means don't perform sparsification. + optional int32 sparse = 7 [default = -1]; + // Normalize the filler variance by fan_in, fan_out, or their average. + // Applies to 'xavier' and 'msra' fillers. + enum VarianceNorm { + FAN_IN = 0; + FAN_OUT = 1; + AVERAGE = 2; + } + optional VarianceNorm variance_norm = 8 [default = FAN_IN]; +} + +message NetParameter { + optional string name = 1; // consider giving the network a name + // The input blobs to the network. + repeated string input = 3; + // The shape of the input blobs. + repeated BlobShape input_shape = 8; + + // 4D input dimensions -- deprecated. Use "shape" instead. + // If specified, for each input blob there should be four + // values specifying the num, channels, height and width of the input blob. + // Thus, there should be a total of (4 * #input) numbers. + repeated int32 input_dim = 4; + + // Whether the network will force every layer to carry out backward operation. + // If set False, then whether to carry out backward is determined + // automatically according to the net structure and learning rates. + optional bool force_backward = 5 [default = false]; + // The current "state" of the network, including the phase, level, and stage. + // Some layers may be included/excluded depending on this state and the states + // specified in the layers' include and exclude fields. + optional NetState state = 6; + + // Print debugging information about results while running Net::Forward, + // Net::Backward, and Net::Update. + optional bool debug_info = 7 [default = false]; + + // The layers that make up the net. Each of their configurations, including + // connectivity and behavior, is specified as a LayerParameter. + repeated LayerParameter layer = 100; // ID 100 so layers are printed last. + + // DEPRECATED: use 'layer' instead. + repeated V1LayerParameter layers = 2; +} + +// NOTE +// Update the next available ID when you add a new SolverParameter field. +// +// SolverParameter next available ID: 40 (last added: momentum2) +message SolverParameter { + ////////////////////////////////////////////////////////////////////////////// + // Specifying the train and test networks + // + // Exactly one train net must be specified using one of the following fields: + // train_net_param, train_net, net_param, net + // One or more test nets may be specified using any of the following fields: + // test_net_param, test_net, net_param, net + // If more than one test net field is specified (e.g., both net and + // test_net are specified), they will be evaluated in the field order given + // above: (1) test_net_param, (2) test_net, (3) net_param/net. + // A test_iter must be specified for each test_net. + // A test_level and/or a test_stage may also be specified for each test_net. + ////////////////////////////////////////////////////////////////////////////// + + // Proto filename for the train net, possibly combined with one or more + // test nets. + optional string net = 24; + // Inline train net param, possibly combined with one or more test nets. + optional NetParameter net_param = 25; + + optional string train_net = 1; // Proto filename for the train net. + repeated string test_net = 2; // Proto filenames for the test nets. + optional NetParameter train_net_param = 21; // Inline train net params. + repeated NetParameter test_net_param = 22; // Inline test net params. + + // The states for the train/test nets. Must be unspecified or + // specified once per net. + // + // By default, all states will have solver = true; + // train_state will have phase = TRAIN, + // and all test_state's will have phase = TEST. + // Other defaults are set according to the NetState defaults. + optional NetState train_state = 26; + repeated NetState test_state = 27; + + // The number of iterations for each test net. + repeated int32 test_iter = 3; + + // The number of iterations between two testing phases. + optional int32 test_interval = 4 [default = 0]; + optional bool test_compute_loss = 19 [default = false]; + // If true, run an initial test pass before the first iteration, + // ensuring memory availability and printing the starting value of the loss. + optional bool test_initialization = 32 [default = true]; + optional float base_lr = 5; // The base learning rate + // the number of iterations between displaying info. If display = 0, no info + // will be displayed. + optional int32 display = 6; + // Display the loss averaged over the last average_loss iterations + optional int32 average_loss = 33 [default = 1]; + optional int32 max_iter = 7; // the maximum number of iterations + // accumulate gradients over `iter_size` x `batch_size` instances + optional int32 iter_size = 36 [default = 1]; + + // The learning rate decay policy. The currently implemented learning rate + // policies are as follows: + // - fixed: always return base_lr. + // - step: return base_lr * gamma ^ (floor(iter / step)) + // - exp: return base_lr * gamma ^ iter + // - inv: return base_lr * (1 + gamma * iter) ^ (- power) + // - multistep: similar to step but it allows non uniform steps defined by + // stepvalue + // - poly: the effective learning rate follows a polynomial decay, to be + // zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) + // - sigmoid: the effective learning rate follows a sigmod decay + // return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) + // + // where base_lr, max_iter, gamma, step, stepvalue and power are defined + // in the solver parameter protocol buffer, and iter is the current iteration. + optional string lr_policy = 8; + optional float gamma = 9; // The parameter to compute the learning rate. + optional float power = 10; // The parameter to compute the learning rate. + optional float momentum = 11; // The momentum value. + optional float weight_decay = 12; // The weight decay. + // regularization types supported: L1 and L2 + // controlled by weight_decay + optional string regularization_type = 29 [default = "L2"]; + // the stepsize for learning rate policy "step" + optional int32 stepsize = 13; + // the stepsize for learning rate policy "multistep" + repeated int32 stepvalue = 34; + + // Set clip_gradients to >= 0 to clip parameter gradients to that L2 norm, + // whenever their actual L2 norm is larger. + optional float clip_gradients = 35 [default = -1]; + + optional int32 snapshot = 14 [default = 0]; // The snapshot interval + optional string snapshot_prefix = 15; // The prefix for the snapshot. + // whether to snapshot diff in the results or not. Snapshotting diff will help + // debugging but the final protocol buffer size will be much larger. + optional bool snapshot_diff = 16 [default = false]; + enum SnapshotFormat { + HDF5 = 0; + BINARYPROTO = 1; + } + optional SnapshotFormat snapshot_format = 37 [default = BINARYPROTO]; + // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default. + enum SolverMode { + CPU = 0; + GPU = 1; + } + optional SolverMode solver_mode = 17 [default = GPU]; + // the device_id will that be used in GPU mode. Use device_id = 0 in default. + optional int32 device_id = 18 [default = 0]; + // If non-negative, the seed with which the Solver will initialize the Caffe + // random number generator -- useful for reproducible results. Otherwise, + // (and by default) initialize using a seed derived from the system clock. + optional int64 random_seed = 20 [default = -1]; + + // Solver type + enum SolverType { + SGD = 0; + NESTEROV = 1; + ADAGRAD = 2; + RMSPROP = 3; + ADADELTA = 4; + ADAM = 5; + } + optional SolverType solver_type = 30 [default = SGD]; + // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam + optional float delta = 31 [default = 1e-8]; + // parameters for the Adam solver + optional float momentum2 = 39 [default = 0.999]; + + // RMSProp decay value + // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t) + optional float rms_decay = 38; + + // If true, print information about the state of the net that may help with + // debugging learning problems. + optional bool debug_info = 23 [default = false]; + + // If false, don't save a snapshot after training finishes. + optional bool snapshot_after_train = 28 [default = true]; +} + +// A message that stores the solver snapshots +message SolverState { + optional int32 iter = 1; // The current iteration + optional string learned_net = 2; // The file that stores the learned net. + repeated BlobProto history = 3; // The history for sgd solvers + optional int32 current_step = 4 [default = 0]; // The current step for learning rate +} + +enum Phase { + TRAIN = 0; + TEST = 1; +} + +message NetState { + optional Phase phase = 1 [default = TEST]; + optional int32 level = 2 [default = 0]; + repeated string stage = 3; +} + +message NetStateRule { + // Set phase to require the NetState have a particular phase (TRAIN or TEST) + // to meet this rule. + optional Phase phase = 1; + + // Set the minimum and/or maximum levels in which the layer should be used. + // Leave undefined to meet the rule regardless of level. + optional int32 min_level = 2; + optional int32 max_level = 3; + + // Customizable sets of stages to include or exclude. + // The net must have ALL of the specified stages and NONE of the specified + // "not_stage"s to meet the rule. + // (Use multiple NetStateRules to specify conjunctions of stages.) + repeated string stage = 4; + repeated string not_stage = 5; +} + +// Specifies training parameters (multipliers on global learning constants, +// and the name and other settings used for weight sharing). +message ParamSpec { + // The names of the parameter blobs -- useful for sharing parameters among + // layers, but never required otherwise. To share a parameter between two + // layers, give it a (non-empty) name. + optional string name = 1; + + // Whether to require shared weights to have the same shape, or just the same + // count -- defaults to STRICT if unspecified. + optional DimCheckMode share_mode = 2; + enum DimCheckMode { + // STRICT (default) requires that num, channels, height, width each match. + STRICT = 0; + // PERMISSIVE requires only the count (num*channels*height*width) to match. + PERMISSIVE = 1; + } + + // The multiplier on the global learning rate for this parameter. + optional float lr_mult = 3 [default = 1.0]; + + // The multiplier on the global weight decay for this parameter. + optional float decay_mult = 4 [default = 1.0]; +} + +// NOTE +// Update the next available ID when you add a new LayerParameter field. +// +// LayerParameter next available layer-specific ID: 139 (last added: tile_param) +message LayerParameter { + optional string name = 1; // the layer name + optional string type = 2; // the layer type + repeated string bottom = 3; // the name of each bottom blob + repeated string top = 4; // the name of each top blob + + // The train / test phase for computation. + optional Phase phase = 10; + + // The amount of weight to assign each top blob in the objective. + // Each layer assigns a default value, usually of either 0 or 1, + // to each top blob. + repeated float loss_weight = 5; + + // Specifies training parameters (multipliers on global learning constants, + // and the name and other settings used for weight sharing). + repeated ParamSpec param = 6; + + // The blobs containing the numeric parameters of the layer. + repeated BlobProto blobs = 7; + + // Specifies on which bottoms the backpropagation should be skipped. + // The size must be either 0 or equal to the number of bottoms. + repeated bool propagate_down = 11; + + // Rules controlling whether and when a layer is included in the network, + // based on the current NetState. You may specify a non-zero number of rules + // to include OR exclude, but not both. If no include or exclude rules are + // specified, the layer is always included. If the current NetState meets + // ANY (i.e., one or more) of the specified rules, the layer is + // included/excluded. + repeated NetStateRule include = 8; + repeated NetStateRule exclude = 9; + + // Parameters for data pre-processing. + optional TransformationParameter transform_param = 100; + + // Parameters shared by loss layers. + optional LossParameter loss_param = 101; + + // Layer type-specific parameters. + // + // Note: certain layers may have more than one computational engine + // for their implementation. These layers include an Engine type and + // engine parameter for selecting the implementation. + // The default for the engine is set by the ENGINE switch at compile-time. + optional AccuracyParameter accuracy_param = 102; + optional ArgMaxParameter argmax_param = 103; + optional ConcatParameter concat_param = 104; + optional ContrastiveLossParameter contrastive_loss_param = 105; + optional ConvolutionParameter convolution_param = 106; + optional DataParameter data_param = 107; + optional DropoutParameter dropout_param = 108; + optional DummyDataParameter dummy_data_param = 109; + optional EltwiseParameter eltwise_param = 110; + optional EmbedParameter embed_param = 137; + optional ExpParameter exp_param = 111; + optional FlattenParameter flatten_param = 135; + optional HDF5DataParameter hdf5_data_param = 112; + optional HDF5OutputParameter hdf5_output_param = 113; + optional HingeLossParameter hinge_loss_param = 114; + optional ImageDataParameter image_data_param = 115; + optional InfogainLossParameter infogain_loss_param = 116; + optional InnerProductParameter inner_product_param = 117; + optional LogParameter log_param = 134; + optional LRNParameter lrn_param = 118; + optional MemoryDataParameter memory_data_param = 119; + optional MVNParameter mvn_param = 120; + optional PoolingParameter pooling_param = 121; + optional PowerParameter power_param = 122; + optional PReLUParameter prelu_param = 131; + optional PythonParameter python_param = 130; + optional ReductionParameter reduction_param = 136; + optional ReLUParameter relu_param = 123; + optional ReshapeParameter reshape_param = 133; + optional SigmoidParameter sigmoid_param = 124; + optional SoftmaxParameter softmax_param = 125; + optional SPPParameter spp_param = 132; + optional SliceParameter slice_param = 126; + optional TanHParameter tanh_param = 127; + optional ThresholdParameter threshold_param = 128; + optional TileParameter tile_param = 138; + optional WindowDataParameter window_data_param = 129; +<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 + optional TripletLossParameter triplet_loss_param = 139; +======= + optional TripletLossParameter triplet_loss_param = 137; +>>>>>>> triplet data generation and network update +} + +// Message that stores parameters used to apply transformation +// to the data layer's data +message TransformationParameter { + // For data pre-processing, we can do simple scaling and subtracting the + // data mean, if provided. Note that the mean subtraction is always carried + // out before scaling. + optional float scale = 1 [default = 1]; + // Specify if we want to randomly mirror data. + optional bool mirror = 2 [default = false]; + // Specify if we would like to randomly crop an image. + optional uint32 crop_size = 3 [default = 0]; + // mean_file and mean_value cannot be specified at the same time + optional string mean_file = 4; + // if specified can be repeated once (would substract it from all the channels) + // or can be repeated the same number of times as channels + // (would subtract them from the corresponding channel) + repeated float mean_value = 5; + // Force the decoded image to have 3 color channels. + optional bool force_color = 6 [default = false]; + // Force the decoded image to have 1 color channels. + optional bool force_gray = 7 [default = false]; +} + +// Message that stores parameters shared by loss layers +message LossParameter { + // If specified, ignore instances with the given label. + optional int32 ignore_label = 1; + // If true, normalize each batch across all instances (including spatial + // dimesions, but not ignored instances); else, divide by batch size only. + optional bool normalize = 2 [default = true]; +} + +// Messages that store parameters used by individual layer types follow, in +// alphabetical order. + +message AccuracyParameter { + // When computing accuracy, count as correct by comparing the true label to + // the top k scoring classes. By default, only compare to the top scoring + // class (i.e. argmax). + optional uint32 top_k = 1 [default = 1]; + + // The "label" axis of the prediction blob, whose argmax corresponds to the + // predicted label -- may be negative to index from the end (e.g., -1 for the + // last axis). For example, if axis == 1 and the predictions are + // (N x C x H x W), the label blob is expected to contain N*H*W ground truth + // labels with integer values in {0, 1, ..., C-1}. + optional int32 axis = 2 [default = 1]; + + // If specified, ignore instances with the given label. + optional int32 ignore_label = 3; +} + +message ArgMaxParameter { + // If true produce pairs (argmax, maxval) + optional bool out_max_val = 1 [default = false]; + optional uint32 top_k = 2 [default = 1]; +} + +message ConcatParameter { + // The axis along which to concatenate -- may be negative to index from the + // end (e.g., -1 for the last axis). Other axes must have the + // same dimension for all the bottom blobs. + // By default, ConcatLayer concatenates blobs along the "channels" axis (1). + optional int32 axis = 2 [default = 1]; + + // DEPRECATED: alias for "axis" -- does not support negative indexing. + optional uint32 concat_dim = 1 [default = 1]; +} + +message ContrastiveLossParameter { + // margin for dissimilar pair + optional float margin = 1 [default = 1.0]; + // The first implementation of this cost did not exactly match the cost of + // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2. + // legacy_version = false (the default) uses (margin - d)^2 as proposed in the + // Hadsell paper. New models should probably use this version. + // legacy_version = true uses (margin - d^2). This is kept to support / + // reproduce existing models and results + optional bool legacy_version = 2 [default = false]; +} + +message TripletLossParameter { + //margin for negative triplet + optional float margin = 1 [default = 1.0]; + optional uint32 losstype = 2 [default = 1]; + optional uint32 num_triplets = 3 [default = 3]; +} + +message TripletLossParameter { + //margin for negative triplet + optional float margin = 1 [default = 1.0]; +} + +message ConvolutionParameter { + optional uint32 num_output = 1; // The number of outputs for the layer + optional bool bias_term = 2 [default = true]; // whether to have bias terms + // Pad, kernel size, and stride are all given as a single value for equal + // dimensions in height and width or as Y, X pairs. + optional uint32 pad = 3 [default = 0]; // The padding size (equal in Y, X) + optional uint32 pad_h = 9 [default = 0]; // The padding height + optional uint32 pad_w = 10 [default = 0]; // The padding width + optional uint32 kernel_size = 4; // The kernel size (square) + optional uint32 kernel_h = 11; // The kernel height + optional uint32 kernel_w = 12; // The kernel width + optional uint32 group = 5 [default = 1]; // The group size for group conv + optional uint32 stride = 6 [default = 1]; // The stride (equal in Y, X) + optional uint32 stride_h = 13; // The stride height + optional uint32 stride_w = 14; // The stride width + optional FillerParameter weight_filler = 7; // The filler for the weight + optional FillerParameter bias_filler = 8; // The filler for the bias + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 15 [default = DEFAULT]; +} + +message DataParameter { + enum DB { + LEVELDB = 0; + LMDB = 1; + } + // Specify the data source. + optional string source = 1; + // Specify the batch size. + optional uint32 batch_size = 4; + // The rand_skip variable is for the data layer to skip a few data points + // to avoid all asynchronous sgd clients to start at the same point. The skip + // point would be set as rand_skip * rand(0,1). Note that rand_skip should not + // be larger than the number of keys in the database. + // DEPRECATED. Each solver accesses a different subset of the database. + optional uint32 rand_skip = 7 [default = 0]; + optional DB backend = 8 [default = LEVELDB]; + // DEPRECATED. See TransformationParameter. For data pre-processing, we can do + // simple scaling and subtracting the data mean, if provided. Note that the + // mean subtraction is always carried out before scaling. + optional float scale = 2 [default = 1]; + optional string mean_file = 3; + // DEPRECATED. See TransformationParameter. Specify if we would like to randomly + // crop an image. + optional uint32 crop_size = 5 [default = 0]; + // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror + // data. + optional bool mirror = 6 [default = false]; + // Force the encoded image to have 3 color channels + optional bool force_encoded_color = 9 [default = false]; + // Prefetch queue (Number of batches to prefetch to host memory, increase if + // data access bandwidth varies). + optional uint32 prefetch = 10 [default = 4]; +} + +message DropoutParameter { + optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio +} + +// DummyDataLayer fills any number of arbitrarily shaped blobs with random +// (or constant) data generated by "Fillers" (see "message FillerParameter"). +message DummyDataParameter { + // This layer produces N >= 1 top blobs. DummyDataParameter must specify 1 or N + // shape fields, and 0, 1 or N data_fillers. + // + // If 0 data_fillers are specified, ConstantFiller with a value of 0 is used. + // If 1 data_filler is specified, it is applied to all top blobs. If N are + // specified, the ith is applied to the ith top blob. + repeated FillerParameter data_filler = 1; + repeated BlobShape shape = 6; + + // 4D dimensions -- deprecated. Use "shape" instead. + repeated uint32 num = 2; + repeated uint32 channels = 3; + repeated uint32 height = 4; + repeated uint32 width = 5; +} + +message EltwiseParameter { + enum EltwiseOp { + PROD = 0; + SUM = 1; + MAX = 2; + } + optional EltwiseOp operation = 1 [default = SUM]; // element-wise operation + repeated float coeff = 2; // blob-wise coefficient for SUM operation + + // Whether to use an asymptotically slower (for >2 inputs) but stabler method + // of computing the gradient for the PROD operation. (No effect for SUM op.) + optional bool stable_prod_grad = 3 [default = true]; +} + +// Message that stores parameters used by EmbedLayer +message EmbedParameter { + optional uint32 num_output = 1; // The number of outputs for the layer + // The input is given as integers to be interpreted as one-hot + // vector indices with dimension num_input. Hence num_input should be + // 1 greater than the maximum possible input value. + optional uint32 input_dim = 2; + + optional bool bias_term = 3 [default = true]; // Whether to use a bias term + optional FillerParameter weight_filler = 4; // The filler for the weight + optional FillerParameter bias_filler = 5; // The filler for the bias + +} + +// Message that stores parameters used by ExpLayer +message ExpParameter { + // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. + // Or if base is set to the default (-1), base is set to e, + // so y = exp(shift + scale * x). + optional float base = 1 [default = -1.0]; + optional float scale = 2 [default = 1.0]; + optional float shift = 3 [default = 0.0]; +} + +/// Message that stores parameters used by FlattenLayer +message FlattenParameter { + // The first axis to flatten: all preceding axes are retained in the output. + // May be negative to index from the end (e.g., -1 for the last axis). + optional int32 axis = 1 [default = 1]; + + // The last axis to flatten: all following axes are retained in the output. + // May be negative to index from the end (e.g., the default -1 for the last + // axis). + optional int32 end_axis = 2 [default = -1]; +} + +// Message that stores parameters used by HDF5DataLayer +message HDF5DataParameter { + // Specify the data source. + optional string source = 1; + // Specify the batch size. + optional uint32 batch_size = 2; + + // Specify whether to shuffle the data. + // If shuffle == true, the ordering of the HDF5 files is shuffled, + // and the ordering of data within any given HDF5 file is shuffled, + // but data between different files are not interleaved; all of a file's + // data are output (in a random order) before moving onto another file. + optional bool shuffle = 3 [default = false]; +} + +message HDF5OutputParameter { + optional string file_name = 1; +} + +message HingeLossParameter { + enum Norm { + L1 = 1; + L2 = 2; + } + // Specify the Norm to use L1 or L2 + optional Norm norm = 1 [default = L1]; +} + +message ImageDataParameter { + // Specify the data source. + optional string source = 1; + // Specify the batch size. + optional uint32 batch_size = 4 [default = 1]; + // The rand_skip variable is for the data layer to skip a few data points + // to avoid all asynchronous sgd clients to start at the same point. The skip + // point would be set as rand_skip * rand(0,1). Note that rand_skip should not + // be larger than the number of keys in the database. + optional uint32 rand_skip = 7 [default = 0]; + // Whether or not ImageLayer should shuffle the list of files at every epoch. + optional bool shuffle = 8 [default = false]; + // It will also resize images if new_height or new_width are not zero. + optional uint32 new_height = 9 [default = 0]; + optional uint32 new_width = 10 [default = 0]; + // Specify if the images are color or gray + optional bool is_color = 11 [default = true]; + // DEPRECATED. See TransformationParameter. For data pre-processing, we can do + // simple scaling and subtracting the data mean, if provided. Note that the + // mean subtraction is always carried out before scaling. + optional float scale = 2 [default = 1]; + optional string mean_file = 3; + // DEPRECATED. See TransformationParameter. Specify if we would like to randomly + // crop an image. + optional uint32 crop_size = 5 [default = 0]; + // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror + // data. + optional bool mirror = 6 [default = false]; + optional string root_folder = 12 [default = ""]; +} + +message InfogainLossParameter { + // Specify the infogain matrix source. + optional string source = 1; +} + +message InnerProductParameter { + optional uint32 num_output = 1; // The number of outputs for the layer + optional bool bias_term = 2 [default = true]; // whether to have bias terms + optional FillerParameter weight_filler = 3; // The filler for the weight + optional FillerParameter bias_filler = 4; // The filler for the bias + + // The first axis to be lumped into a single inner product computation; + // all preceding axes are retained in the output. + // May be negative to index from the end (e.g., -1 for the last axis). + optional int32 axis = 5 [default = 1]; +} + +// Message that stores parameters used by LogLayer +message LogParameter { + // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. + // Or if base is set to the default (-1), base is set to e, + // so y = ln(shift + scale * x) = log_e(shift + scale * x) + optional float base = 1 [default = -1.0]; + optional float scale = 2 [default = 1.0]; + optional float shift = 3 [default = 0.0]; +} + +// Message that stores parameters used by LRNLayer +message LRNParameter { + optional uint32 local_size = 1 [default = 5]; + optional float alpha = 2 [default = 1.]; + optional float beta = 3 [default = 0.75]; + enum NormRegion { + ACROSS_CHANNELS = 0; + WITHIN_CHANNEL = 1; + } + optional NormRegion norm_region = 4 [default = ACROSS_CHANNELS]; + optional float k = 5 [default = 1.]; +} + +message MemoryDataParameter { + optional uint32 batch_size = 1; + optional uint32 channels = 2; + optional uint32 height = 3; + optional uint32 width = 4; +} + +message MVNParameter { + // This parameter can be set to false to normalize mean only + optional bool normalize_variance = 1 [default = true]; + + // This parameter can be set to true to perform DNN-like MVN + optional bool across_channels = 2 [default = false]; + + // Epsilon for not dividing by zero while normalizing variance + optional float eps = 3 [default = 1e-9]; +} + +message PoolingParameter { + enum PoolMethod { + MAX = 0; + AVE = 1; + STOCHASTIC = 2; + } + optional PoolMethod pool = 1 [default = MAX]; // The pooling method + // Pad, kernel size, and stride are all given as a single value for equal + // dimensions in height and width or as Y, X pairs. + optional uint32 pad = 4 [default = 0]; // The padding size (equal in Y, X) + optional uint32 pad_h = 9 [default = 0]; // The padding height + optional uint32 pad_w = 10 [default = 0]; // The padding width + optional uint32 kernel_size = 2; // The kernel size (square) + optional uint32 kernel_h = 5; // The kernel height + optional uint32 kernel_w = 6; // The kernel width + optional uint32 stride = 3 [default = 1]; // The stride (equal in Y, X) + optional uint32 stride_h = 7; // The stride height + optional uint32 stride_w = 8; // The stride width + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 11 [default = DEFAULT]; + // If global_pooling then it will pool over the size of the bottom by doing + // kernel_h = bottom->height and kernel_w = bottom->width + optional bool global_pooling = 12 [default = false]; +} + +message PowerParameter { + // PowerLayer computes outputs y = (shift + scale * x) ^ power. + optional float power = 1 [default = 1.0]; + optional float scale = 2 [default = 1.0]; + optional float shift = 3 [default = 0.0]; +} + +message PythonParameter { + optional string module = 1; + optional string layer = 2; + // This value is set to the attribute `param_str` of the `PythonLayer` object + // in Python before calling the `setup()` method. This could be a number, + // string, dictionary in Python dict format, JSON, etc. You may parse this + // string in `setup` method and use it in `forward` and `backward`. + optional string param_str = 3 [default = '']; + // Whether this PythonLayer is shared among worker solvers during data parallelism. + // If true, each worker solver sequentially run forward from this layer. + // This value should be set true if you are using it as a data layer. + optional bool share_in_parallel = 4 [default = false]; +} + +// Message that stores parameters used by ReductionLayer +message ReductionParameter { + enum ReductionOp { + SUM = 1; + ASUM = 2; + SUMSQ = 3; + MEAN = 4; + } + + optional ReductionOp operation = 1 [default = SUM]; // reduction operation + + // The first axis to reduce to a scalar -- may be negative to index from the + // end (e.g., -1 for the last axis). + // (Currently, only reduction along ALL "tail" axes is supported; reduction + // of axis M through N, where N < num_axes - 1, is unsupported.) + // Suppose we have an n-axis bottom Blob with shape: + // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). + // If axis == m, the output Blob will have shape + // (d0, d1, d2, ..., d(m-1)), + // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) + // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. + // If axis == 0 (the default), the output Blob always has the empty shape + // (count 1), performing reduction across the entire input -- + // often useful for creating new loss functions. + optional int32 axis = 2 [default = 0]; + + optional float coeff = 3 [default = 1.0]; // coefficient for output +} + +// Message that stores parameters used by ReLULayer +message ReLUParameter { + // Allow non-zero slope for negative inputs to speed up optimization + // Described in: + // Maas, A. L., Hannun, A. Y., & Ng, A. Y. (2013). Rectifier nonlinearities + // improve neural network acoustic models. In ICML Workshop on Deep Learning + // for Audio, Speech, and Language Processing. + optional float negative_slope = 1 [default = 0]; + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 2 [default = DEFAULT]; +} + +message ReshapeParameter { + // Specify the output dimensions. If some of the dimensions are set to 0, + // the corresponding dimension from the bottom layer is used (unchanged). + // Exactly one dimension may be set to -1, in which case its value is + // inferred from the count of the bottom blob and the remaining dimensions. + // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8: + // + // layer { + // type: "Reshape" bottom: "input" top: "output" + // reshape_param { ... } + // } + // + // If "input" is 2D with shape 2 x 8, then the following reshape_param + // specifications are all equivalent, producing a 3D blob "output" with shape + // 2 x 2 x 4: + // + // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 0 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 0 dim: 2 dim: -1 } } + // reshape_param { shape { dim: -1 dim: 0 dim: 2 } } + // + optional BlobShape shape = 1; + + // axis and num_axes control the portion of the bottom blob's shape that are + // replaced by (included in) the reshape. By default (axis == 0 and + // num_axes == -1), the entire bottom blob shape is included in the reshape, + // and hence the shape field must specify the entire output shape. + // + // axis may be non-zero to retain some portion of the beginning of the input + // shape (and may be negative to index from the end; e.g., -1 to begin the + // reshape after the last axis, including nothing in the reshape, + // -2 to include only the last axis, etc.). + // + // For example, suppose "input" is a 2D blob with shape 2 x 8. + // Then the following ReshapeLayer specifications are all equivalent, + // producing a blob "output" with shape 2 x 2 x 4: + // + // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 2 dim: 4 } axis: 1 } + // reshape_param { shape { dim: 2 dim: 4 } axis: -3 } + // + // num_axes specifies the extent of the reshape. + // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on + // input axes in the range [axis, axis+num_axes]. + // num_axes may also be -1, the default, to include all remaining axes + // (starting from axis). + // + // For example, suppose "input" is a 2D blob with shape 2 x 8. + // Then the following ReshapeLayer specifications are equivalent, + // producing a blob "output" with shape 1 x 2 x 8. + // + // reshape_param { shape { dim: 1 dim: 2 dim: 8 } } + // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 } + // reshape_param { shape { dim: 1 } num_axes: 0 } + // + // On the other hand, these would produce output blob shape 2 x 1 x 8: + // + // reshape_param { shape { dim: 2 dim: 1 dim: 8 } } + // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 } + // + optional int32 axis = 2 [default = 0]; + optional int32 num_axes = 3 [default = -1]; +} + +message SigmoidParameter { + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 1 [default = DEFAULT]; +} + +message SliceParameter { + // The axis along which to slice -- may be negative to index from the end + // (e.g., -1 for the last axis). + // By default, SliceLayer concatenates blobs along the "channels" axis (1). + optional int32 axis = 3 [default = 1]; + repeated uint32 slice_point = 2; + + // DEPRECATED: alias for "axis" -- does not support negative indexing. + optional uint32 slice_dim = 1 [default = 1]; +} + +// Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer +message SoftmaxParameter { + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 1 [default = DEFAULT]; + + // The axis along which to perform the softmax -- may be negative to index + // from the end (e.g., -1 for the last axis). + // Any other axes will be evaluated as independent softmaxes. + optional int32 axis = 2 [default = 1]; +} + +message TanHParameter { + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 1 [default = DEFAULT]; +} + +// Message that stores parameters used by TileLayer +message TileParameter { + // The index of the axis to tile. + optional int32 axis = 1 [default = 1]; + + // The number of copies (tiles) of the blob to output. + optional int32 tiles = 2; +} + +// Message that stores parameters used by ThresholdLayer +message ThresholdParameter { + optional float threshold = 1 [default = 0]; // Strictly positive values +} + +message WindowDataParameter { + // Specify the data source. + optional string source = 1; + // For data pre-processing, we can do simple scaling and subtracting the + // data mean, if provided. Note that the mean subtraction is always carried + // out before scaling. + optional float scale = 2 [default = 1]; + optional string mean_file = 3; + // Specify the batch size. + optional uint32 batch_size = 4; + // Specify if we would like to randomly crop an image. + optional uint32 crop_size = 5 [default = 0]; + // Specify if we want to randomly mirror data. + optional bool mirror = 6 [default = false]; + // Foreground (object) overlap threshold + optional float fg_threshold = 7 [default = 0.5]; + // Background (non-object) overlap threshold + optional float bg_threshold = 8 [default = 0.5]; + // Fraction of batch that should be foreground objects + optional float fg_fraction = 9 [default = 0.25]; + // Amount of contextual padding to add around a window + // (used only by the window_data_layer) + optional uint32 context_pad = 10 [default = 0]; + // Mode for cropping out a detection window + // warp: cropped window is warped to a fixed size and aspect ratio + // square: the tightest square around the window is cropped + optional string crop_mode = 11 [default = "warp"]; + // cache_images: will load all images in memory for faster access + optional bool cache_images = 12 [default = false]; + // append root_folder to locate images + optional string root_folder = 13 [default = ""]; +} + +message SPPParameter { + enum PoolMethod { + MAX = 0; + AVE = 1; + STOCHASTIC = 2; + } + optional uint32 pyramid_height = 1; + optional PoolMethod pool = 2 [default = MAX]; // The pooling method + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 6 [default = DEFAULT]; +} + +// DEPRECATED: use LayerParameter. +message V1LayerParameter { + repeated string bottom = 2; + repeated string top = 3; + optional string name = 4; + repeated NetStateRule include = 32; + repeated NetStateRule exclude = 33; + enum LayerType { + NONE = 0; + ABSVAL = 35; + ACCURACY = 1; + ARGMAX = 30; + BNLL = 2; + CONCAT = 3; + CONTRASTIVE_LOSS = 37; + CONVOLUTION = 4; + DATA = 5; + DECONVOLUTION = 39; + DROPOUT = 6; + DUMMY_DATA = 32; + EUCLIDEAN_LOSS = 7; + ELTWISE = 25; + EXP = 38; + FLATTEN = 8; + HDF5_DATA = 9; + HDF5_OUTPUT = 10; + HINGE_LOSS = 28; + IM2COL = 11; + IMAGE_DATA = 12; + INFOGAIN_LOSS = 13; + INNER_PRODUCT = 14; + LRN = 15; + MEMORY_DATA = 29; + MULTINOMIAL_LOGISTIC_LOSS = 16; + MVN = 34; + POOLING = 17; + POWER = 26; + RELU = 18; + SIGMOID = 19; + SIGMOID_CROSS_ENTROPY_LOSS = 27; + SILENCE = 36; + SOFTMAX = 20; + SOFTMAX_LOSS = 21; + SPLIT = 22; + SLICE = 33; + TANH = 23; + TRIPLET_LOSS = 40; + WINDOW_DATA = 24; + THRESHOLD = 31; + + } + optional LayerType type = 5; + repeated BlobProto blobs = 6; + repeated string param = 1001; + repeated DimCheckMode blob_share_mode = 1002; + enum DimCheckMode { + STRICT = 0; + PERMISSIVE = 1; + } + repeated float blobs_lr = 7; + repeated float weight_decay = 8; + repeated float loss_weight = 35; + optional AccuracyParameter accuracy_param = 27; + optional ArgMaxParameter argmax_param = 23; + optional ConcatParameter concat_param = 9; + optional ContrastiveLossParameter contrastive_loss_param = 40; + optional ConvolutionParameter convolution_param = 10; + optional DataParameter data_param = 11; + optional DropoutParameter dropout_param = 12; + optional DummyDataParameter dummy_data_param = 26; + optional EltwiseParameter eltwise_param = 24; + optional ExpParameter exp_param = 41; + optional HDF5DataParameter hdf5_data_param = 13; + optional HDF5OutputParameter hdf5_output_param = 14; + optional HingeLossParameter hinge_loss_param = 29; + optional ImageDataParameter image_data_param = 15; + optional InfogainLossParameter infogain_loss_param = 16; + optional InnerProductParameter inner_product_param = 17; + optional LRNParameter lrn_param = 18; + optional MemoryDataParameter memory_data_param = 22; + optional MVNParameter mvn_param = 34; + optional PoolingParameter pooling_param = 19; + optional PowerParameter power_param = 21; + optional ReLUParameter relu_param = 30; + optional SigmoidParameter sigmoid_param = 38; + optional SoftmaxParameter softmax_param = 39; + optional SliceParameter slice_param = 31; + optional TanHParameter tanh_param = 37; + optional ThresholdParameter threshold_param = 25; + optional WindowDataParameter window_data_param = 20; + optional TransformationParameter transform_param = 36; + optional LossParameter loss_param = 42; + optional V0LayerParameter layer = 1; + optional TripletLossParameter triplet_loss_param = 43; +} + +// DEPRECATED: V0LayerParameter is the old way of specifying layer parameters +// in Caffe. We keep this message type around for legacy support. +message V0LayerParameter { + optional string name = 1; // the layer name + optional string type = 2; // the string to specify the layer type + + // Parameters to specify layers with inner products. + optional uint32 num_output = 3; // The number of outputs for the layer + optional bool biasterm = 4 [default = true]; // whether to have bias terms + optional FillerParameter weight_filler = 5; // The filler for the weight + optional FillerParameter bias_filler = 6; // The filler for the bias + + optional uint32 pad = 7 [default = 0]; // The padding size + optional uint32 kernelsize = 8; // The kernel size + optional uint32 group = 9 [default = 1]; // The group size for group conv + optional uint32 stride = 10 [default = 1]; // The stride + enum PoolMethod { + MAX = 0; + AVE = 1; + STOCHASTIC = 2; + } + optional PoolMethod pool = 11 [default = MAX]; // The pooling method + optional float dropout_ratio = 12 [default = 0.5]; // dropout ratio + + optional uint32 local_size = 13 [default = 5]; // for local response norm + optional float alpha = 14 [default = 1.]; // for local response norm + optional float beta = 15 [default = 0.75]; // for local response norm + optional float k = 22 [default = 1.]; + + // For data layers, specify the data source + optional string source = 16; + // For data pre-processing, we can do simple scaling and subtracting the + // data mean, if provided. Note that the mean subtraction is always carried + // out before scaling. + optional float scale = 17 [default = 1]; + optional string meanfile = 18; + // For data layers, specify the batch size. + optional uint32 batchsize = 19; + // For data layers, specify if we would like to randomly crop an image. + optional uint32 cropsize = 20 [default = 0]; + // For data layers, specify if we want to randomly mirror data. + optional bool mirror = 21 [default = false]; + + // The blobs containing the numeric parameters of the layer + repeated BlobProto blobs = 50; + // The ratio that is multiplied on the global learning rate. If you want to + // set the learning ratio for one blob, you need to set it for all blobs. + repeated float blobs_lr = 51; + // The weight decay that is multiplied on the global weight decay. + repeated float weight_decay = 52; + + // The rand_skip variable is for the data layer to skip a few data points + // to avoid all asynchronous sgd clients to start at the same point. The skip + // point would be set as rand_skip * rand(0,1). Note that rand_skip should not + // be larger than the number of keys in the database. + optional uint32 rand_skip = 53 [default = 0]; + + // Fields related to detection (det_*) + // foreground (object) overlap threshold + optional float det_fg_threshold = 54 [default = 0.5]; + // background (non-object) overlap threshold + optional float det_bg_threshold = 55 [default = 0.5]; + // Fraction of batch that should be foreground objects + optional float det_fg_fraction = 56 [default = 0.25]; + + // optional bool OBSOLETE_can_clobber = 57 [default = true]; + + // Amount of contextual padding to add around a window + // (used only by the window_data_layer) + optional uint32 det_context_pad = 58 [default = 0]; + + // Mode for cropping out a detection window + // warp: cropped window is warped to a fixed size and aspect ratio + // square: the tightest square around the window is cropped + optional string det_crop_mode = 59 [default = "warp"]; + + // For ReshapeLayer, one needs to specify the new dimensions. + optional int32 new_num = 60 [default = 0]; + optional int32 new_channels = 61 [default = 0]; + optional int32 new_height = 62 [default = 0]; + optional int32 new_width = 63 [default = 0]; + + // Whether or not ImageLayer should shuffle the list of files at every epoch. + // It will also resize images if new_height or new_width are not zero. + optional bool shuffle_images = 64 [default = false]; + + // For ConcatLayer, one needs to specify the dimension for concatenation, and + // the other dimensions must be the same for all the bottom blobs. + // By default it will concatenate blobs along the channels dimension. + optional uint32 concat_dim = 65 [default = 1]; + + optional HDF5OutputParameter hdf5_output_param = 1001; +} + +message PReLUParameter { + // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: + // Surpassing Human-Level Performance on ImageNet Classification, 2015. + + // Initial value of a_i. Default is a_i=0.25 for all i. + optional FillerParameter filler = 1; + // Whether or not slope paramters are shared across channels. + optional bool channel_shared = 2 [default = false]; +} diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index edadf4dc24a..3d30e9c6723 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -707,8 +707,11 @@ class NetTest : public MultiDeviceTest { <<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD ======= >>>>>>> 00341b2... triplet data generation and network update +======= +>>>>>>> 1882ac9... add initiate class name of triplet loss layer proto += " propagate_down: true " " propagate_down: false "; else @@ -720,6 +723,9 @@ class NetTest : public MultiDeviceTest { proto += " propagate_down: [true, true] "; >>>>>>> 011aef0... restore <<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 1882ac9... add initiate class name of triplet loss layer ======= proto += " propagate_down: true " " propagate_down: false "; @@ -727,8 +733,11 @@ class NetTest : public MultiDeviceTest { proto += " propagate_down: true " " propagate_down: true "; >>>>>>> 98fb438... fixed two bugs with prototext format +<<<<<<< HEAD ======= >>>>>>> 00341b2... triplet data generation and network update +======= +>>>>>>> 1882ac9... add initiate class name of triplet loss layer proto += " top: 'cross_entropy_loss' " " type: 'SigmoidCrossEntropyLoss' " diff --git a/src/caffe/test/test_triplet_loss_layer.orig b/src/caffe/test/test_triplet_loss_layer.orig new file mode 100644 index 00000000000..9eccca9e2e5 --- /dev/null +++ b/src/caffe/test/test_triplet_loss_layer.orig @@ -0,0 +1,230 @@ +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class TripletLossLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + TripletLossLayerTest() +<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c + : blob_bottom_data_(new Blob(50, 1, 1, 1)), + blob_bottom_y_(new Blob(50, 1, 1, 1)), +======= + : blob_bottom_data_i_(new Blob(512, 2, 1, 1)), + blob_bottom_data_j_(new Blob(512, 2, 1, 1)), + blob_bottom_data_k_(new Blob(512, 2, 1, 1)), + blob_bottom_y_(new Blob(512, 1, 1, 1)), +>>>>>>> suit for opencv3.0.0 + blob_top_loss_(new Blob()) { + // fill the values + FillerParameter filler_param; + filler_param.set_min(-1.0); + filler_param.set_max(1.0); // distances~=1.0 to test both sides of margin + UniformFiller filler(filler_param); +<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c + filler.Fill(this->blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_data_); +======= + filler.Fill(this->blob_bottom_data_i_); + blob_bottom_vec_.push_back(blob_bottom_data_i_); + filler.Fill(this->blob_bottom_data_j_); + blob_bottom_vec_.push_back(blob_bottom_data_j_); + filler.Fill(this->blob_bottom_data_k_); + blob_bottom_vec_.push_back(blob_bottom_data_k_); +>>>>>>> suit for opencv3.0.0 + for (int i = 0; i < blob_bottom_y_->count(); ++i) { + blob_bottom_y_->mutable_cpu_data()[i] = caffe_rng_rand() % 2; // 0 or 1 + } + blob_bottom_vec_.push_back(blob_bottom_y_); + blob_top_vec_.push_back(blob_top_loss_); + } + virtual ~TripletLossLayerTest() { +<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c + delete blob_bottom_data_; +======= + delete blob_bottom_data_i_; + delete blob_bottom_data_j_; + delete blob_bottom_data_k_; +>>>>>>> suit for opencv3.0.0 + delete blob_bottom_y_; + delete blob_top_loss_; + } + +<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c + Blob* const blob_bottom_data_; +======= + Blob* const blob_bottom_data_i_; + Blob* const blob_bottom_data_j_; + Blob* const blob_bottom_data_k_; +>>>>>>> suit for opencv3.0.0 + Blob* const blob_bottom_y_; + Blob* const blob_top_loss_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(TripletLossLayerTest, TestDtypesAndDevices); + +TYPED_TEST(TripletLossLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // manually compute to compare + const Dtype margin = layer_param.triplet_loss_param().margin(); +<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c + const Dtype losstype = 0;//layer_param.triplet_loss_param().losstype(); + const int num_triplets = 3; + const int num_set = this->blob_bottom_data_->num()/(2 + num_triplets); + const int channels = this->blob_bottom_data_->channels(); + Dtype loss(0); + if (losstype == 0) { + for (int i = 0; i < num_set; ++i) { + Dtype dist_par(0); + for (int j = 0; j < channels; ++j) { + Dtype diff_pos = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - + this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+1)*channels+j]; + dist_par = diff_pos*diff_pos; + loss += dist_par; + } + for (int triplet = 0; triplet < num_triplets; ++triplet) { + Dtype dist_sq(0); + for (int j = 0; j < channels; ++j) { + Dtype diff_pos = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - + this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+1)*channels+j]; + dist_sq += diff_pos*diff_pos; + Dtype diff_neg = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - + this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+2+triplet)*channels+j]; + dist_sq -= diff_neg*diff_neg; + } + loss += std::max(margin + dist_sq, Dtype(0.0)); + } + } + } /*else { + for (int i = 0; i < num; ++i) { + Dtype dist_sq(0); + Dtype dist_par(0); +======= + const int num = this->blob_bottom_data_i_->num(); + const int channels = this->blob_bottom_data_i_->channels(); + Dtype loss(0); + for (int i = 0; i < num; ++i) { + Dtype dist_sq(0); +>>>>>>> suit for opencv3.0.0 + for (int j = 0; j < channels; ++j) { + Dtype diff_pos = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq += diff_pos*diff_pos; +<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c + dist_sq += margin; + Dtype diff_neg = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_k_->cpu_data()[i*channels+j]; + dist_sq = 1 - diff_neg*diff_neg/dist_sq; + Dtype diff_par = this->blob_bottom_data_l_->cpu_data()[i*channels+j] - + this->blob_bottom_data_m_->cpu_data()[i*channels+j]; + dist_par = diff_par*diff_par; + } + loss += std::max(dist_sq, Dtype(0.0)); + loss += dist_par; + } + }*/ + loss /= static_cast(num_set) * Dtype(2); +======= + Dtype diff_neg = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq -= diff_neg*diff_neg; + } + loss += std::max(margin + dist_sq, 0.0); + /*if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs + loss += dist_sq; + } else { + Dtype dist = std::max(margin - sqrt(dist_sq), 0.0); + loss += dist*dist; + }*/ + } + loss /= static_cast(num) * Dtype(2); +>>>>>>> suit for opencv3.0.0 + EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); +} + +TYPED_TEST(TripletLossLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); +<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c + // check the gradient for the first 5 bottom layers + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); +} +======= + // check the gradient for the first two bottom layers + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 1); +} + +/*TYPED_TEST(TripletLossLayerTest, TestForwardLegacy) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_triplet_loss_param()->set_legacy_version(true); + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // manually compute to compare + const Dtype margin = layer_param.triplet_loss_param().margin(); + const int num = this->blob_bottom_data_i_->num(); + const int channels = this->blob_bottom_data_i_->channels(); + Dtype loss(0); + for (int i = 0; i < num; ++i) { + Dtype dist_sq(0); + for (int j = 0; j < channels; ++j) { + Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq += diff*diff; + } + if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs + loss += dist_sq; + } else { + loss += std::max(margin - dist_sq, Dtype(0.0)); + } + } + loss /= static_cast(num) * Dtype(2); + EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); +} + +TYPED_TEST(TripletLossLayerTest, TestGradientLegacy) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_triplet_loss_param()->set_legacy_version(true); + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + // check the gradient for the first two bottom layers + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 1); +}*/ + +>>>>>>> suit for opencv3.0.0 +} // namespace caffe From 9dcb7fcda9bc7404154c36abfe68d795b297fde3 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 8 Jun 2015 07:56:03 +0800 Subject: [PATCH 58/82] New triplet loss layer added(beta1 version-no test source files) --- include/caffe/data_transformer.hpp | 4 +- include/caffe/layer.hpp | 1 - include/caffe/net.hpp | 3 - include/caffe/neuron_layers.hpp | 70 +-- include/caffe/solver.hpp | 15 +- include/caffe/test/test_caffe_main.hpp | 28 +- .../caffe/test/test_gradient_check_util.hpp | 7 +- include/caffe/util/math_functions.hpp | 6 - include/caffe/util/mkl_alternate.hpp | 1 - src/caffe/blob.cpp | 1 - src/caffe/data_transformer.cpp | 26 +- src/caffe/layers/contrastive_loss_layer.cpp | 25 +- src/caffe/layers/contrastive_loss_layer.cu | 34 +- src/caffe/layers/conv_layer.cpp | 7 + src/caffe/layers/conv_layer.cu | 7 + src/caffe/layers/cudnn_conv_layer.cu | 2 + src/caffe/layers/data_layer.cpp | 143 +++-- src/caffe/layers/deconv_layer.cpp | 7 + src/caffe/layers/deconv_layer.cu | 7 + src/caffe/layers/flatten_layer.cpp | 16 +- src/caffe/layers/image_data_layer.cpp | 65 ++- src/caffe/layers/inner_product_layer.cpp | 4 +- src/caffe/layers/inner_product_layer.cu | 4 +- src/caffe/layers/lrn_layer.cu | 102 ++-- src/caffe/layers/mvn_layer.cpp | 23 +- src/caffe/layers/mvn_layer.cu | 23 +- src/caffe/layers/pooling_layer.cu | 218 ++++---- src/caffe/layers/prelu_layer.cpp | 4 +- src/caffe/layers/prelu_layer.cu | 16 +- .../sigmoid_cross_entropy_loss_layer.cpp | 2 +- .../sigmoid_cross_entropy_loss_layer.cu | 22 +- src/caffe/layers/slice_layer.cu | 47 +- src/caffe/net.cpp | 525 +++++------------- src/caffe/proto/caffe.proto | 251 +-------- src/caffe/test/test_accuracy_layer.cpp | 5 +- src/caffe/test/test_argmax_layer.cpp | 3 +- .../test/test_contrastive_loss_layer.cpp | 58 +- src/caffe/test/test_convolution_layer.cpp | 9 +- src/caffe/test/test_dummy_data_layer.cpp | 5 +- src/caffe/test/test_filler.cpp | 98 ---- src/caffe/test/test_flatten_layer.cpp | 46 +- src/caffe/test/test_gradient_based_solver.cpp | 39 -- src/caffe/test/test_im2col_kernel.cu | 91 +-- src/caffe/test/test_math_functions.cpp | 51 +- .../test_multinomial_logistic_loss_layer.cpp | 3 +- src/caffe/test/test_net.cpp | 51 +- src/caffe/test/test_neuron_layer.cpp | 139 +---- src/caffe/test/test_pooling_layer.cpp | 13 +- src/caffe/test/test_softmax_layer.cpp | 4 +- src/caffe/test/test_stochastic_pooling.cpp | 35 +- src/caffe/test/test_triplet_loss_layer.cpp | 107 ++++ src/caffe/util/math_functions.cpp | 10 - src/caffe/util/math_functions.cu | 21 - 53 files changed, 816 insertions(+), 1688 deletions(-) create mode 100644 src/caffe/test/test_triplet_loss_layer.cpp diff --git a/include/caffe/data_transformer.hpp b/include/caffe/data_transformer.hpp index 97b4ee6a8c4..980c8268c66 100644 --- a/include/caffe/data_transformer.hpp +++ b/include/caffe/data_transformer.hpp @@ -63,7 +63,6 @@ class DataTransformer { */ void Transform(const vector & mat_vector, Blob* transformed_blob); - /** * @brief Applies the transformation defined in the data layer's * transform_param block to a cv::Mat @@ -90,6 +89,7 @@ class DataTransformer { */ void Transform(Blob* input_blob, Blob* transformed_blob); +<<<<<<< ed5d44f53efe58e4cc2cd299f23c5164cbd7172c /** * @brief Infers the shape of transformed_blob will have when * the transformation is applied to the data. @@ -127,6 +127,8 @@ class DataTransformer { vector InferBlobShape(const cv::Mat& cv_img); #endif // USE_OPENCV +======= +>>>>>>> New triplet loss layer added(beta1 version-no test source files) protected: /** * @brief Generates a random integer from Uniform({0, 1, ..., n-1}). diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index a0d1d4ecc94..3103ec6313a 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -453,7 +453,6 @@ inline Dtype Layer::Forward(const vector*>& bottom, // Lock during forward to ensure sequential forward Lock(); Dtype loss = 0; - Reshape(bottom, top); switch (Caffe::mode()) { case Caffe::CPU: Forward_cpu(bottom, top); diff --git a/include/caffe/net.hpp b/include/caffe/net.hpp index 1bf07d28d13..a6246bc9027 100644 --- a/include/caffe/net.hpp +++ b/include/caffe/net.hpp @@ -155,9 +155,6 @@ class Net { inline const vector& blob_loss_weights() const { return blob_loss_weights_; } - inline const vector& layer_need_backward() const { - return layer_need_backward_; - } /// @brief returns the parameters inline const vector > >& params() const { return params_; diff --git a/include/caffe/neuron_layers.hpp b/include/caffe/neuron_layers.hpp index c2e0774aaa2..323215134c7 100644 --- a/include/caffe/neuron_layers.hpp +++ b/include/caffe/neuron_layers.hpp @@ -8,6 +8,7 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/layer.hpp" +#include "caffe/net.hpp" #include "caffe/proto/caffe.pb.h" #define HDF5_DATA_DATASET_NAME "data" @@ -267,72 +268,6 @@ class ExpLayer : public NeuronLayer { Dtype inner_scale_, outer_scale_; }; -/** - * @brief Computes @f$ y = log_{\gamma}(\alpha x + \beta) @f$, - * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, - * and base @f$ \gamma @f$. - */ -template -class LogLayer : public NeuronLayer { - public: - /** - * @param param provides LogParameter log_param, - * with LogLayer options: - * - scale (\b optional, default 1) the scale @f$ \alpha @f$ - * - shift (\b optional, default 0) the shift @f$ \beta @f$ - * - base (\b optional, default -1 for a value of @f$ e \approx 2.718 @f$) - * the base @f$ \gamma @f$ - */ - explicit LogLayer(const LayerParameter& param) - : NeuronLayer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "Log"; } - - protected: - /** - * @param bottom input Blob vector (length 1) - * -# @f$ (N \times C \times H \times W) @f$ - * the inputs @f$ x @f$ - * @param top output Blob vector (length 1) - * -# @f$ (N \times C \times H \times W) @f$ - * the computed outputs @f$ - * y = log_{\gamma}(\alpha x + \beta) - * @f$ - */ - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - - /** - * @brief Computes the error gradient w.r.t. the exp inputs. - * - * @param top output Blob vector (length 1), providing the error gradient with - * respect to the outputs - * -# @f$ (N \times C \times H \times W) @f$ - * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ - * with respect to computed outputs @f$ y @f$ - * @param propagate_down see Layer::Backward. - * @param bottom input Blob vector (length 1) - * -# @f$ (N \times C \times H \times W) @f$ - * the inputs @f$ x @f$; Backward fills their diff with - * gradients @f$ - * \frac{\partial E}{\partial x} = - * \frac{\partial E}{\partial y} y \alpha \log_e(gamma) - * @f$ if propagate_down[0] - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - Dtype base_scale_; - Dtype input_scale_, input_shift_; - Dtype backward_num_scale_; -}; - /** * @brief Computes @f$ y = (\alpha x + \beta) ^ \gamma @f$, * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, @@ -799,8 +734,7 @@ class PReLULayer : public NeuronLayer { const vector& propagate_down, const vector*>& bottom); bool channel_shared_; - Blob multiplier_; // dot multiplier for backward computation of params - Blob backward_buff_; // temporary buffer for backward computation + Blob multiplier_; // dot multipler for backward computation of params Blob bottom_memory_; // memory for in-place computation }; diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index 2ecf539baef..d5b371cd171 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -33,7 +33,7 @@ typedef boost::function ActionCallback; /** * @brief An interface for classes that perform optimization on Net%s. * - * Requires implementation of ApplyUpdate to compute a parameter update + * Requires implementation of ComputeUpdateValue to compute a parameter update * given the current state of the Net parameters. */ template @@ -85,8 +85,8 @@ class Solver { void CheckSnapshotWritePermissions(); protected: - // Make and apply the update value for the current iteration. - virtual void ApplyUpdate() = 0; + // Get the update value for the current iteration. + virtual void ComputeUpdateValue() = 0; // The Solver::Snapshot function implements the basic snapshotting utility // that stores the learned net. You should implement the SnapshotSolverState() // function that produces a SolverState protocol buffer that needs to be @@ -165,10 +165,7 @@ class SGDSolver : public Solver { protected: void PreSolve(); Dtype GetLearningRate(); - virtual void ApplyUpdate(); - virtual void Normalize(int param_id); - virtual void Regularize(int param_id); - virtual void ComputeUpdateValue(int param_id, Dtype rate); + virtual void ComputeUpdateValue(); virtual void ClipGradients(); virtual void SnapshotSolverState(const string& model_filename); virtual void SnapshotSolverStateToBinaryProto(const string& model_filename); @@ -193,7 +190,7 @@ class NesterovSolver : public SGDSolver { : SGDSolver(param_file) {} protected: - virtual void ComputeUpdateValue(int param_id, Dtype rate); + virtual void ComputeUpdateValue(); DISABLE_COPY_AND_ASSIGN(NesterovSolver); }; @@ -207,7 +204,7 @@ class AdaGradSolver : public SGDSolver { : SGDSolver(param_file) { constructor_sanity_check(); } protected: - virtual void ComputeUpdateValue(int param_id, Dtype rate); + virtual void ComputeUpdateValue(); void constructor_sanity_check() { CHECK_EQ(0, this->param_.momentum()) << "Momentum cannot be used with AdaGrad."; diff --git a/include/caffe/test/test_caffe_main.hpp b/include/caffe/test/test_caffe_main.hpp index fc156091476..bd5f31e063f 100644 --- a/include/caffe/test/test_caffe_main.hpp +++ b/include/caffe/test/test_caffe_main.hpp @@ -40,36 +40,34 @@ class MultiDeviceTest : public ::testing::Test { typedef ::testing::Types TestDtypes; -template -struct CPUDevice { - typedef TypeParam Dtype; +struct FloatCPU { + typedef float Dtype; static const Caffe::Brew device = Caffe::CPU; }; -template -class CPUDeviceTest : public MultiDeviceTest > { +struct DoubleCPU { + typedef double Dtype; + static const Caffe::Brew device = Caffe::CPU; }; #ifdef CPU_ONLY -typedef ::testing::Types, - CPUDevice > TestDtypesAndDevices; +typedef ::testing::Types TestDtypesAndDevices; #else -template -struct GPUDevice { - typedef TypeParam Dtype; +struct FloatGPU { + typedef float Dtype; static const Caffe::Brew device = Caffe::GPU; }; -template -class GPUDeviceTest : public MultiDeviceTest > { +struct DoubleGPU { + typedef double Dtype; + static const Caffe::Brew device = Caffe::GPU; }; -typedef ::testing::Types, CPUDevice, - GPUDevice, GPUDevice > - TestDtypesAndDevices; +typedef ::testing::Types + TestDtypesAndDevices; #endif diff --git a/include/caffe/test/test_gradient_check_util.hpp b/include/caffe/test/test_gradient_check_util.hpp index 25f35d1589e..3e63cb52ea6 100644 --- a/include/caffe/test/test_gradient_check_util.hpp +++ b/include/caffe/test/test_gradient_check_util.hpp @@ -84,14 +84,11 @@ void GradientChecker::CheckGradientSingle(Layer* layer, CHECK_EQ(top_count, bottom[blob_id]->count()); } } - // First, figure out what blobs we need to check against, and zero init - // parameter blobs. + // First, figure out what blobs we need to check against. vector*> blobs_to_check; vector propagate_down(bottom.size(), check_bottom == -1); for (int i = 0; i < layer->blobs().size(); ++i) { - Blob* blob = layer->blobs()[i].get(); - caffe_set(blob->count(), static_cast(0), blob->mutable_cpu_diff()); - blobs_to_check.push_back(blob); + blobs_to_check.push_back(layer->blobs()[i].get()); } if (check_bottom == -1) { for (int i = 0; i < bottom.size(); ++i) { diff --git a/include/caffe/util/math_functions.hpp b/include/caffe/util/math_functions.hpp index 2cacd8e72cd..f43036fcebc 100644 --- a/include/caffe/util/math_functions.hpp +++ b/include/caffe/util/math_functions.hpp @@ -88,9 +88,6 @@ void caffe_rng_bernoulli(const int n, const Dtype p, unsigned int* r); template void caffe_exp(const int n, const Dtype* a, Dtype* y); -template -void caffe_log(const int n, const Dtype* a, Dtype* y); - template void caffe_abs(const int n, const Dtype* a, Dtype* y); @@ -206,9 +203,6 @@ void caffe_gpu_abs(const int n, const Dtype* a, Dtype* y); template void caffe_gpu_exp(const int n, const Dtype* a, Dtype* y); -template -void caffe_gpu_log(const int n, const Dtype* a, Dtype* y); - template void caffe_gpu_powx(const int n, const Dtype* a, const Dtype b, Dtype* y); diff --git a/include/caffe/util/mkl_alternate.hpp b/include/caffe/util/mkl_alternate.hpp index 3355b6658a3..32fdbf79932 100644 --- a/include/caffe/util/mkl_alternate.hpp +++ b/include/caffe/util/mkl_alternate.hpp @@ -33,7 +33,6 @@ extern "C" { DEFINE_VSL_UNARY_FUNC(Sqr, y[i] = a[i] * a[i]); DEFINE_VSL_UNARY_FUNC(Exp, y[i] = exp(a[i])); -DEFINE_VSL_UNARY_FUNC(Ln, y[i] = log(a[i])); DEFINE_VSL_UNARY_FUNC(Abs, y[i] = fabs(a[i])); // A simple way to define the vsl unary functions with singular parameter b. diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index c86fd5d1d94..05db15915b1 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -30,7 +30,6 @@ void Blob::Reshape(const vector& shape) { int* shape_data = static_cast(shape_data_->mutable_cpu_data()); for (int i = 0; i < shape.size(); ++i) { CHECK_GE(shape[i], 0); - CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX"; count_ *= shape[i]; shape_[i] = shape[i]; shape_data[i] = shape[i]; diff --git a/src/caffe/data_transformer.cpp b/src/caffe/data_transformer.cpp index 0120e839c49..68e30f982cf 100644 --- a/src/caffe/data_transformer.cpp +++ b/src/caffe/data_transformer.cpp @@ -131,6 +131,7 @@ template void DataTransformer::Transform(const Datum& datum, Blob* transformed_blob) { <<<<<<< HEAD +<<<<<<< HEAD <<<<<<< HEAD // If datum is encoded, decoded and transform the cv::image. if (datum.encoded()) { @@ -190,11 +191,12 @@ void DataTransformer::Transform(const Datum& datum, >>>>>>> 083f61b... New triplet loss layer added(beta1 version-no test source files) ======= >>>>>>> 011aef0... restore +======= +>>>>>>> 4d8130b... New triplet loss layer added(beta1 version-no test source files) const int datum_channels = datum.channels(); const int datum_height = datum.height(); const int datum_width = datum.width(); - // Check dimensions. const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); const int width = transformed_blob->width(); @@ -205,6 +207,8 @@ void DataTransformer::Transform(const Datum& datum, CHECK_LE(width, datum_width); CHECK_GE(num, 1); + const int crop_size = param_.crop_size(); + if (crop_size) { CHECK_EQ(crop_size, height); CHECK_EQ(crop_size, width); @@ -261,12 +265,10 @@ void DataTransformer::Transform(const vector & mat_vector, template void DataTransformer::Transform(const cv::Mat& cv_img, Blob* transformed_blob) { - const int crop_size = param_.crop_size(); const int img_channels = cv_img.channels(); const int img_height = cv_img.rows; const int img_width = cv_img.cols; - // Check dimensions. const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); const int width = transformed_blob->width(); @@ -279,6 +281,7 @@ void DataTransformer::Transform(const cv::Mat& cv_img, CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte"; + const int crop_size = param_.crop_size(); const Dtype scale = param_.scale(); const bool do_mirror = param_.mirror() && Rand(2); const bool has_mean_file = param_.has_mean_file(); @@ -364,23 +367,11 @@ void DataTransformer::Transform(const cv::Mat& cv_img, template void DataTransformer::Transform(Blob* input_blob, Blob* transformed_blob) { - const int crop_size = param_.crop_size(); const int input_num = input_blob->num(); const int input_channels = input_blob->channels(); const int input_height = input_blob->height(); const int input_width = input_blob->width(); - if (transformed_blob->count() == 0) { - // Initialize transformed_blob with the right shape. - if (crop_size) { - transformed_blob->Reshape(input_num, input_channels, - crop_size, crop_size); - } else { - transformed_blob->Reshape(input_num, input_channels, - input_height, input_width); - } - } - const int num = transformed_blob->num(); const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); @@ -392,7 +383,7 @@ void DataTransformer::Transform(Blob* input_blob, CHECK_GE(input_height, height); CHECK_GE(input_width, width); - + const int crop_size = param_.crop_size(); const Dtype scale = param_.scale(); const bool do_mirror = param_.mirror() && Rand(2); const bool has_mean_file = param_.has_mean_file(); @@ -474,6 +465,7 @@ void DataTransformer::Transform(Blob* input_blob, } } +<<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD template @@ -590,6 +582,8 @@ vector DataTransformer::InferBlobShape( >>>>>>> 083f61b... New triplet loss layer added(beta1 version-no test source files) ======= >>>>>>> 011aef0... restore +======= +>>>>>>> 4d8130b... New triplet loss layer added(beta1 version-no test source files) template void DataTransformer::InitRand() { const bool needs_rand = param_.mirror() || diff --git a/src/caffe/layers/contrastive_loss_layer.cpp b/src/caffe/layers/contrastive_loss_layer.cpp index 25e167819d3..0692c11c257 100644 --- a/src/caffe/layers/contrastive_loss_layer.cpp +++ b/src/caffe/layers/contrastive_loss_layer.cpp @@ -41,8 +41,6 @@ void ContrastiveLossLayer::Forward_cpu( diff_.mutable_cpu_data()); // a_i-b_i const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.contrastive_loss_param().margin(); - bool legacy_version = - this->layer_param_.contrastive_loss_param().legacy_version(); Dtype loss(0.0); for (int i = 0; i < bottom[0]->num(); ++i) { dist_sq_.mutable_cpu_data()[i] = caffe_cpu_dot(channels, @@ -50,12 +48,7 @@ void ContrastiveLossLayer::Forward_cpu( if (static_cast(bottom[2]->cpu_data()[i])) { // similar pairs loss += dist_sq_.cpu_data()[i]; } else { // dissimilar pairs - if (legacy_version) { - loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)); - } else { - Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]), 0.0); - loss += dist*dist; - } + loss += std::max(margin-dist_sq_.cpu_data()[i], Dtype(0.0)); } } loss = loss / static_cast(bottom[0]->num()) / Dtype(2); @@ -66,8 +59,6 @@ template void ContrastiveLossLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.contrastive_loss_param().margin(); - bool legacy_version = - this->layer_param_.contrastive_loss_param().legacy_version(); for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; @@ -85,20 +76,10 @@ void ContrastiveLossLayer::Backward_cpu(const vector*>& top, Dtype(0.0), bout + (j*channels)); } else { // dissimilar pairs - Dtype mdist(0.0); - Dtype beta(0.0); - if (legacy_version) { - mdist = margin - dist_sq_.cpu_data()[j]; - beta = -alpha; - } else { - Dtype dist = sqrt(dist_sq_.cpu_data()[j]); - mdist = margin - dist; - beta = -alpha * mdist / (dist + Dtype(1e-4)); - } - if (mdist > Dtype(0.0)) { + if ((margin-dist_sq_.cpu_data()[j]) > Dtype(0.0)) { caffe_cpu_axpby( channels, - beta, + -alpha, diff_.cpu_data() + (j*channels), Dtype(0.0), bout + (j*channels)); diff --git a/src/caffe/layers/contrastive_loss_layer.cu b/src/caffe/layers/contrastive_loss_layer.cu index 931239316ac..78a55995a0a 100644 --- a/src/caffe/layers/contrastive_loss_layer.cu +++ b/src/caffe/layers/contrastive_loss_layer.cu @@ -32,20 +32,12 @@ void ContrastiveLossLayer::Forward_gpu( Dtype(0.0), dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2 Dtype margin = this->layer_param_.contrastive_loss_param().margin(); - bool legacy_version = - this->layer_param_.contrastive_loss_param().legacy_version(); Dtype loss(0.0); for (int i = 0; i < bottom[0]->num(); ++i) { if (static_cast(bottom[2]->cpu_data()[i])) { // similar pairs loss += dist_sq_.cpu_data()[i]; } else { // dissimilar pairs - if (legacy_version) { - loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)); - } else { - Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]), - Dtype(0.0)); - loss += dist*dist; - } + loss += std::max(margin-dist_sq_.cpu_data()[i], Dtype(0.0)); } } loss = loss / static_cast(bottom[0]->num()) / Dtype(2); @@ -53,8 +45,8 @@ void ContrastiveLossLayer::Forward_gpu( } template -__global__ void CLLBackward(const int count, const int channels, - const Dtype margin, const bool legacy_version, const Dtype alpha, +__global__ void CLLForward(const int count, const int channels, + const Dtype margin, const Dtype alpha, const Dtype* y, const Dtype* diff, const Dtype* dist_sq, Dtype *bottom_diff) { CUDA_KERNEL_LOOP(i, count) { @@ -62,18 +54,8 @@ __global__ void CLLBackward(const int count, const int channels, if (static_cast(y[n])) { // similar pairs bottom_diff[i] = alpha * diff[i]; } else { // dissimilar pairs - Dtype mdist(0.0); - Dtype beta(0.0); - if (legacy_version) { - mdist = (margin - dist_sq[n]); - beta = -alpha; - } else { - Dtype dist = sqrt(dist_sq[n]); - mdist = (margin - dist); - beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i]; - } - if (mdist > 0.0) { - bottom_diff[i] = beta; + if ((margin-dist_sq[n]) > 0.0) { + bottom_diff[i] = -alpha * diff[i]; } else { bottom_diff[i] = 0; } @@ -89,14 +71,12 @@ void ContrastiveLossLayer::Backward_gpu(const vector*>& top, const int count = bottom[0]->count(); const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.contrastive_loss_param().margin(); - const bool legacy_version = - this->layer_param_.contrastive_loss_param().legacy_version(); const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast(bottom[0]->num()); // NOLINT_NEXT_LINE(whitespace/operators) - CLLBackward<<>>( - count, channels, margin, legacy_version, alpha, + CLLForward<<>>( + count, channels, margin, alpha, bottom[2]->gpu_data(), // pair similarity 0 or 1 diff_.gpu_data(), // the cached eltwise difference between a and b dist_sq_.gpu_data(), // the cached square distance between a and b diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp index fb50bb095ed..a020c7c70d9 100644 --- a/src/caffe/layers/conv_layer.cpp +++ b/src/caffe/layers/conv_layer.cpp @@ -46,6 +46,13 @@ void ConvolutionLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->cpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); + if (this->param_propagate_down_[0]) { + caffe_set(this->blobs_[0]->count(), Dtype(0), weight_diff); + } + if (this->bias_term_ && this->param_propagate_down_[1]) { + caffe_set(this->blobs_[1]->count(), Dtype(0), + this->blobs_[1]->mutable_cpu_diff()); + } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->cpu_diff(); const Dtype* bottom_data = bottom[i]->cpu_data(); diff --git a/src/caffe/layers/conv_layer.cu b/src/caffe/layers/conv_layer.cu index b429d2b47d0..7474ae04b95 100644 --- a/src/caffe/layers/conv_layer.cu +++ b/src/caffe/layers/conv_layer.cu @@ -31,6 +31,13 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); + if (this->param_propagate_down_[0]) { + caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); + } + if (this->bias_term_ && this->param_propagate_down_[1]) { + caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), + this->blobs_[1]->mutable_gpu_diff()); + } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Bias gradient, if necessary. diff --git a/src/caffe/layers/cudnn_conv_layer.cu b/src/caffe/layers/cudnn_conv_layer.cu index 691152021a3..dcdd2a0005e 100644 --- a/src/caffe/layers/cudnn_conv_layer.cu +++ b/src/caffe/layers/cudnn_conv_layer.cu @@ -101,10 +101,12 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); + caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); + caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp index 71f8cb099e8..2eb7114b67e 100644 --- a/src/caffe/layers/data_layer.cpp +++ b/src/caffe/layers/data_layer.cpp @@ -12,96 +12,149 @@ #include "caffe/proto/caffe.pb.h" #include "caffe/util/benchmark.hpp" #include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/rng.hpp" namespace caffe { template -DataLayer::DataLayer(const LayerParameter& param) - : BasePrefetchingDataLayer(param), - reader_(param) { -} - -template -DataLayer::~DataLayer() { - this->StopInternalThread(); +DataLayer::~DataLayer() { + this->JoinPrefetchThread(); } template void DataLayer::DataLayerSetUp(const vector*>& bottom, const vector*>& top) { - const int batch_size = this->layer_param_.data_param().batch_size(); + // Initialize DB + db_.reset(db::GetDB(this->layer_param_.data_param().backend())); + db_->Open(this->layer_param_.data_param().source(), db::READ); + cursor_.reset(db_->NewCursor()); + + // Check if we should randomly skip a few data points + if (this->layer_param_.data_param().rand_skip()) { + unsigned int skip = caffe_rng_rand() % + this->layer_param_.data_param().rand_skip(); + LOG(INFO) << "Skipping first " << skip << " data points."; + while (skip-- > 0) { + cursor_->Next(); + } + } // Read a data point, and use it to initialize the top blob. - Datum& datum = *(reader_.full().peek()); + Datum datum; + datum.ParseFromString(cursor_->value()); - // Use data_transformer to infer the expected blob shape from datum. - vector top_shape = this->data_transformer_->InferBlobShape(datum); - this->transformed_data_.Reshape(top_shape); - // Reshape top[0] and prefetch_data according to the batch_size. - top_shape[0] = batch_size; - top[0]->Reshape(top_shape); - for (int i = 0; i < this->PREFETCH_COUNT; ++i) { - this->prefetch_[i].data_.Reshape(top_shape); + bool force_color = this->layer_param_.data_param().force_encoded_color(); + if ((force_color && DecodeDatum(&datum, true)) || + DecodeDatumNative(&datum)) { + LOG(INFO) << "Decoding Datum"; + } + // image + int crop_size = this->layer_param_.transform_param().crop_size(); + if (crop_size > 0) { + top[0]->Reshape(this->layer_param_.data_param().batch_size(), + datum.channels(), crop_size, crop_size); + this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), + datum.channels(), crop_size, crop_size); + this->transformed_data_.Reshape(1, datum.channels(), crop_size, crop_size); + } else { + top[0]->Reshape( + this->layer_param_.data_param().batch_size(), datum.channels(), + datum.height(), datum.width()); + this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), + datum.channels(), datum.height(), datum.width()); + this->transformed_data_.Reshape(1, datum.channels(), + datum.height(), datum.width()); } LOG(INFO) << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); // label if (this->output_labels_) { - vector label_shape(1, batch_size); + vector label_shape(1, this->layer_param_.data_param().batch_size()); top[1]->Reshape(label_shape); - for (int i = 0; i < this->PREFETCH_COUNT; ++i) { - this->prefetch_[i].label_.Reshape(label_shape); - } + this->prefetch_label_.Reshape(label_shape); } } -// This function is called on prefetch thread -template -void DataLayer::load_batch(Batch* batch) { +// This function is used to create a thread that prefetches the data. +template +void DataLayer::InternalThreadEntry() { CPUTimer batch_timer; batch_timer.Start(); double read_time = 0; double trans_time = 0; CPUTimer timer; - CHECK(batch->data_.count()); + CHECK(this->prefetch_data_.count()); CHECK(this->transformed_data_.count()); - // Reshape according to the first datum of each batch - // on single input batches allows for inputs of varying dimension. + // Reshape on single input batches for inputs of varying dimension. const int batch_size = this->layer_param_.data_param().batch_size(); - Datum& datum = *(reader_.full().peek()); - // Use data_transformer to infer the expected blob shape from datum. - vector top_shape = this->data_transformer_->InferBlobShape(datum); - this->transformed_data_.Reshape(top_shape); - // Reshape batch according to the batch_size. - top_shape[0] = batch_size; - batch->data_.Reshape(top_shape); + const int crop_size = this->layer_param_.transform_param().crop_size(); + bool force_color = this->layer_param_.data_param().force_encoded_color(); + if (batch_size == 1 && crop_size == 0) { + Datum datum; + datum.ParseFromString(cursor_->value()); + if (datum.encoded()) { + if (force_color) { + DecodeDatum(&datum, true); + } else { + DecodeDatumNative(&datum); + } + } + this->prefetch_data_.Reshape(1, datum.channels(), + datum.height(), datum.width()); + this->transformed_data_.Reshape(1, datum.channels(), + datum.height(), datum.width()); + } - Dtype* top_data = batch->data_.mutable_cpu_data(); + Dtype* top_data = this->prefetch_data_.mutable_cpu_data(); Dtype* top_label = NULL; // suppress warnings about uninitialized variables if (this->output_labels_) { - top_label = batch->label_.mutable_cpu_data(); + top_label = this->prefetch_label_.mutable_cpu_data(); } for (int item_id = 0; item_id < batch_size; ++item_id) { timer.Start(); - // get a datum - Datum& datum = *(reader_.full().pop("Waiting for data")); + // get a blob + Datum datum; + datum.ParseFromString(cursor_->value()); + + cv::Mat cv_img; + if (datum.encoded()) { + if (force_color) { + cv_img = DecodeDatumToCVMat(datum, true); + } else { + cv_img = DecodeDatumToCVMatNative(datum); + } + if (cv_img.channels() != this->transformed_data_.channels()) { + LOG(WARNING) << "Your dataset contains encoded images with mixed " + << "channel sizes. Consider adding a 'force_color' flag to the " + << "model definition, or rebuild your dataset using " + << "convert_imageset."; + } + } read_time += timer.MicroSeconds(); timer.Start(); + // Apply data transformations (mirror, scale, crop...) - int offset = batch->data_.offset(item_id); + int offset = this->prefetch_data_.offset(item_id); this->transformed_data_.set_cpu_data(top_data + offset); - this->data_transformer_->Transform(datum, &(this->transformed_data_)); - // Copy label. + if (datum.encoded()) { + this->data_transformer_->Transform(cv_img, &(this->transformed_data_)); + } else { + this->data_transformer_->Transform(datum, &(this->transformed_data_)); + } if (this->output_labels_) { top_label[item_id] = datum.label(); } trans_time += timer.MicroSeconds(); - - reader_.free().push(const_cast(&datum)); + // go to the next iter + cursor_->Next(); + if (!cursor_->valid()) { + DLOG(INFO) << "Restarting data prefetching from start."; + cursor_->SeekToFirst(); + } } - timer.Stop(); batch_timer.Stop(); DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; diff --git a/src/caffe/layers/deconv_layer.cpp b/src/caffe/layers/deconv_layer.cpp index 91aabb315b2..b471043ebfd 100644 --- a/src/caffe/layers/deconv_layer.cpp +++ b/src/caffe/layers/deconv_layer.cpp @@ -46,6 +46,13 @@ void DeconvolutionLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->cpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); + if (this->param_propagate_down_[0]) { + caffe_set(this->blobs_[0]->count(), Dtype(0), weight_diff); + } + if (this->bias_term_ && this->param_propagate_down_[1]) { + caffe_set(this->blobs_[1]->count(), Dtype(0), + this->blobs_[1]->mutable_cpu_diff()); + } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->cpu_diff(); const Dtype* bottom_data = bottom[i]->cpu_data(); diff --git a/src/caffe/layers/deconv_layer.cu b/src/caffe/layers/deconv_layer.cu index 5dbdcc3149f..d37e9f7ac09 100644 --- a/src/caffe/layers/deconv_layer.cu +++ b/src/caffe/layers/deconv_layer.cu @@ -31,6 +31,13 @@ void DeconvolutionLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); + if (this->param_propagate_down_[0]) { + caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); + } + if (this->bias_term_ && this->param_propagate_down_[1]) { + caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), + this->blobs_[1]->mutable_gpu_diff()); + } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); const Dtype* bottom_data = bottom[i]->gpu_data(); diff --git a/src/caffe/layers/flatten_layer.cpp b/src/caffe/layers/flatten_layer.cpp index f7e5c9c2172..745f271ea45 100644 --- a/src/caffe/layers/flatten_layer.cpp +++ b/src/caffe/layers/flatten_layer.cpp @@ -9,19 +9,9 @@ namespace caffe { template void FlattenLayer::Reshape(const vector*>& bottom, const vector*>& top) { - const int start_axis = bottom[0]->CanonicalAxisIndex( - this->layer_param_.flatten_param().axis()); - const int end_axis = bottom[0]->CanonicalAxisIndex( - this->layer_param_.flatten_param().end_axis()); - vector top_shape; - for (int i = 0; i < start_axis; ++i) { - top_shape.push_back(bottom[0]->shape(i)); - } - const int flattened_dim = bottom[0]->count(start_axis, end_axis + 1); - top_shape.push_back(flattened_dim); - for (int i = end_axis + 1; i < bottom[0]->num_axes(); ++i) { - top_shape.push_back(bottom[0]->shape(i)); - } + vector top_shape(2); + top_shape[0] = bottom[0]->num(); + top_shape[1] = bottom[0]->count() / bottom[0]->num(); top[0]->Reshape(top_shape); CHECK_EQ(top[0]->count(), bottom[0]->count()); } diff --git a/src/caffe/layers/image_data_layer.cpp b/src/caffe/layers/image_data_layer.cpp index 3d2190f8bbb..c1bab09e10e 100644 --- a/src/caffe/layers/image_data_layer.cpp +++ b/src/caffe/layers/image_data_layer.cpp @@ -18,7 +18,7 @@ namespace caffe { template ImageDataLayer::~ImageDataLayer() { - this->StopInternalThread(); + this->JoinPrefetchThread(); } template @@ -63,28 +63,28 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, // Read an image, and use it to initialize the top blob. cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, new_height, new_width, is_color); - CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first; - // Use data_transformer to infer the expected blob shape from a cv_image. - vector top_shape = this->data_transformer_->InferBlobShape(cv_img); - this->transformed_data_.Reshape(top_shape); - // Reshape prefetch_data and top[0] according to the batch_size. + const int channels = cv_img.channels(); + const int height = cv_img.rows; + const int width = cv_img.cols; + // image + const int crop_size = this->layer_param_.transform_param().crop_size(); const int batch_size = this->layer_param_.image_data_param().batch_size(); - CHECK_GT(batch_size, 0) << "Positive batch size required"; - top_shape[0] = batch_size; - for (int i = 0; i < this->PREFETCH_COUNT; ++i) { - this->prefetch_[i].data_.Reshape(top_shape); + if (crop_size > 0) { + top[0]->Reshape(batch_size, channels, crop_size, crop_size); + this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size); + this->transformed_data_.Reshape(1, channels, crop_size, crop_size); + } else { + top[0]->Reshape(batch_size, channels, height, width); + this->prefetch_data_.Reshape(batch_size, channels, height, width); + this->transformed_data_.Reshape(1, channels, height, width); } - top[0]->Reshape(top_shape); - LOG(INFO) << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); // label vector label_shape(1, batch_size); top[1]->Reshape(label_shape); - for (int i = 0; i < this->PREFETCH_COUNT; ++i) { - this->prefetch_[i].label_.Reshape(label_shape); - } + this->prefetch_label_.Reshape(label_shape); } template @@ -94,37 +94,36 @@ void ImageDataLayer::ShuffleImages() { shuffle(lines_.begin(), lines_.end(), prefetch_rng); } -// This function is called on prefetch thread +// This function is used to create a thread that prefetches the data. template -void ImageDataLayer::load_batch(Batch* batch) { +void ImageDataLayer::InternalThreadEntry() { CPUTimer batch_timer; batch_timer.Start(); double read_time = 0; double trans_time = 0; CPUTimer timer; - CHECK(batch->data_.count()); + CHECK(this->prefetch_data_.count()); CHECK(this->transformed_data_.count()); ImageDataParameter image_data_param = this->layer_param_.image_data_param(); const int batch_size = image_data_param.batch_size(); const int new_height = image_data_param.new_height(); const int new_width = image_data_param.new_width(); + const int crop_size = this->layer_param_.transform_param().crop_size(); const bool is_color = image_data_param.is_color(); string root_folder = image_data_param.root_folder(); - // Reshape according to the first image of each batch - // on single input batches allows for inputs of varying dimension. - cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, - new_height, new_width, is_color); - CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first; - // Use data_transformer to infer the expected blob shape from a cv_img. - vector top_shape = this->data_transformer_->InferBlobShape(cv_img); - this->transformed_data_.Reshape(top_shape); - // Reshape batch according to the batch_size. - top_shape[0] = batch_size; - batch->data_.Reshape(top_shape); - - Dtype* prefetch_data = batch->data_.mutable_cpu_data(); - Dtype* prefetch_label = batch->label_.mutable_cpu_data(); + // Reshape on single input batches for inputs of varying dimension. + if (batch_size == 1 && crop_size == 0 && new_height == 0 && new_width == 0) { + cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, + 0, 0, is_color); + this->prefetch_data_.Reshape(1, cv_img.channels(), + cv_img.rows, cv_img.cols); + this->transformed_data_.Reshape(1, cv_img.channels(), + cv_img.rows, cv_img.cols); + } + + Dtype* prefetch_data = this->prefetch_data_.mutable_cpu_data(); + Dtype* prefetch_label = this->prefetch_label_.mutable_cpu_data(); // datum scales const int lines_size = lines_.size(); @@ -138,7 +137,7 @@ void ImageDataLayer::load_batch(Batch* batch) { read_time += timer.MicroSeconds(); timer.Start(); // Apply transformations (mirror, crop...) to the image - int offset = batch->data_.offset(item_id); + int offset = this->prefetch_data_.offset(item_id); this->transformed_data_.set_cpu_data(prefetch_data + offset); this->data_transformer_->Transform(cv_img, &(this->transformed_data_)); trans_time += timer.MicroSeconds(); diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp index 83c3235eb71..89e0c8fbad7 100644 --- a/src/caffe/layers/inner_product_layer.cpp +++ b/src/caffe/layers/inner_product_layer.cpp @@ -101,13 +101,13 @@ void InnerProductLayer::Backward_cpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->cpu_data(); // Gradient with respect to weight caffe_cpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., - top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_cpu_diff()); + top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_cpu_diff()); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->cpu_diff(); // Gradient with respect to bias caffe_cpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, - bias_multiplier_.cpu_data(), (Dtype)1., + bias_multiplier_.cpu_data(), (Dtype)0., this->blobs_[1]->mutable_cpu_diff()); } if (propagate_down[0]) { diff --git a/src/caffe/layers/inner_product_layer.cu b/src/caffe/layers/inner_product_layer.cu index c0ebd2c47da..50f76f4942c 100644 --- a/src/caffe/layers/inner_product_layer.cu +++ b/src/caffe/layers/inner_product_layer.cu @@ -40,13 +40,13 @@ void InnerProductLayer::Backward_gpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight caffe_gpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., - top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_gpu_diff()); + top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_gpu_diff()); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias caffe_gpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, - bias_multiplier_.gpu_data(), (Dtype)1., + bias_multiplier_.gpu_data(), (Dtype)0., this->blobs_[1]->mutable_gpu_diff()); } if (propagate_down[0]) { diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu index 001b3c34ac1..24aa6a30130 100644 --- a/src/caffe/layers/lrn_layer.cu +++ b/src/caffe/layers/lrn_layer.cu @@ -7,46 +7,44 @@ namespace caffe { template -__global__ void LRNFillScale(const int nthreads, const Dtype* const in, +__global__ void LRNFillScale(const int nthreads, const Dtype* in, const int num, const int channels, const int height, const int width, const int size, const Dtype alpha_over_size, - const Dtype k, Dtype* const scale) { + const Dtype k, Dtype* scale) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset - const int w = index % width; - const int h = (index / width) % height; - const int n = index / width / height; - const int offset = (n * channels * height + h) * width + w; - const int step = height * width; - const Dtype* const in_off = in + offset; - Dtype* const scale_off = scale + offset; + int w = index % width; + int h = (index / width) % height; + int n = index / width / height; + int offset = (n * channels * height + h) * width + w; + int step = height * width; + in += offset; + scale += offset; int head = 0; - const int pre_pad = (size - 1) / 2; - const int post_pad = size - pre_pad - 1; + int pre_pad = (size - 1) / 2; + int post_pad = size - pre_pad - 1; Dtype accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad && head < channels) { - accum_scale += in_off[head * step] * in_off[head * step]; + accum_scale += in[head * step] * in[head * step]; ++head; } // both add and subtract while (head < channels) { - accum_scale += in_off[head * step] * in_off[head * step]; + accum_scale += in[head * step] * in[head * step]; if (head - size >= 0) { - accum_scale -= in_off[(head - size) * step] - * in_off[(head - size) * step]; + accum_scale -= in[(head - size) * step] * in[(head - size) * step]; } - scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { - accum_scale -= in_off[(head - size) * step] - * in_off[(head - size) * step]; + accum_scale -= in[(head - size) * step] * in[(head - size) * step]; } - scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } } @@ -70,8 +68,8 @@ void LRNLayer::Forward_gpu(const vector*>& bottom, // TODO: check if it would be faster to just put it into the previous kernel. template -__global__ void LRNComputeOutput(const int nthreads, const Dtype* const in, - const Dtype* const scale, const Dtype negative_beta, Dtype* const out) { +__global__ void LRNComputeOutput(const int nthreads, const Dtype* in, + const Dtype* scale, const Dtype negative_beta, Dtype* out) { CUDA_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } @@ -120,58 +118,56 @@ void LRNLayer::Backward_gpu(const vector*>& top, } template -__global__ void LRNComputeDiff(const int nthreads, - const Dtype* const bottom_data, const Dtype* const top_data, - const Dtype* const scale, const Dtype* const top_diff, +__global__ void LRNComputeDiff(const int nthreads, const Dtype* bottom_data, + const Dtype* top_data, const Dtype* scale, const Dtype* top_diff, const int num, const int channels, const int height, const int width, const int size, const Dtype negative_beta, - const Dtype cache_ratio, Dtype* const bottom_diff) { + const Dtype cache_ratio, + Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset - const int w = index % width; - const int h = (index / width) % height; - const int n = index / width / height; - const int offset = (n * channels * height + h) * width + w; - const int step = height * width; - const Dtype* const bottom_off = bottom_data + offset; - const Dtype* const top_off = top_data + offset; - const Dtype* const scale_off = scale + offset; - const Dtype* const top_diff_off = top_diff + offset; - Dtype* const bottom_diff_off = bottom_diff + offset; + int w = index % width; + int h = (index / width) % height; + int n = index / width / height; + int offset = (n * channels * height + h) * width + w; + int step = height * width; + bottom_data += offset; + top_data += offset; + scale += offset; + top_diff += offset; + bottom_diff += offset; int head = 0; - const int pre_pad = size - (size + 1) / 2; - const int post_pad = size - pre_pad - 1; + int pre_pad = size - (size + 1) / 2; + int post_pad = size - pre_pad - 1; Dtype accum_ratio = 0; // accumulate values while (head < post_pad && head < channels) { - accum_ratio += top_diff_off[head * step] * top_off[head * step] / - scale_off[head * step]; + accum_ratio += top_diff[head * step] * top_data[head * step] / + scale[head * step]; ++head; } // both add and subtract while (head < channels) { - accum_ratio += top_diff_off[head * step] * top_off[head * step] / - scale_off[head * step]; + accum_ratio += top_diff[head * step] * top_data[head * step] / + scale[head * step]; if (head - size >= 0) { - accum_ratio -= top_diff_off[(head - size) * step] * - top_off[(head - size) * step] / scale_off[(head - size) * step]; + accum_ratio -= top_diff[(head - size) * step] * + top_data[(head - size) * step] / scale[(head - size) * step]; } - bottom_diff_off[(head - post_pad) * step] = - top_diff_off[(head - post_pad) * step] - * pow(scale_off[(head - post_pad) * step], negative_beta) - - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] + * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * + bottom_data[(head - post_pad) * step] * accum_ratio; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { - accum_ratio -= top_diff_off[(head - size) * step] * - top_off[(head - size) * step] / scale_off[(head - size) * step]; + accum_ratio -= top_diff[(head - size) * step] * + top_data[(head - size) * step] / scale[(head - size) * step]; } - bottom_diff_off[(head - post_pad) * step] = - top_diff_off[(head - post_pad) * step] - * pow(scale_off[(head - post_pad) * step], negative_beta) - - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; + bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] + * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * + bottom_data[(head - post_pad) * step] * accum_ratio; ++head; } } diff --git a/src/caffe/layers/mvn_layer.cpp b/src/caffe/layers/mvn_layer.cpp index 325691b1875..45994d8f7fd 100644 --- a/src/caffe/layers/mvn_layer.cpp +++ b/src/caffe/layers/mvn_layer.cpp @@ -26,7 +26,6 @@ void MVNLayer::Reshape(const vector*>& bottom, } Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data(); caffe_set(sum_multiplier_.count(), Dtype(1), multiplier_data); - eps_ = this->layer_param_.mvn_param().eps(); } template @@ -41,6 +40,7 @@ void MVNLayer::Forward_cpu(const vector*>& bottom, num = bottom[0]->num() * bottom[0]->channels(); int dim = bottom[0]->count() / num; + Dtype eps = 1e-10; if (this->layer_param_.mvn_param().normalize_variance()) { // put the squares of bottom into temp_ @@ -70,7 +70,7 @@ void MVNLayer::Forward_cpu(const vector*>& bottom, caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5), variance_.mutable_cpu_data()); - caffe_add_scalar(variance_.count(), eps_, variance_.mutable_cpu_data()); + caffe_add_scalar(variance_.count(), eps, variance_.mutable_cpu_data()); caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., @@ -106,6 +106,7 @@ void MVNLayer::Backward_cpu(const vector*>& top, num = bottom[0]->num() * bottom[0]->channels(); int dim = bottom[0]->count() / num; + Dtype eps = 1e-10; if (this->layer_param_.mvn_param().normalize_variance()) { caffe_mul(temp_.count(), top_data, top_diff, bottom_diff); @@ -128,6 +129,24 @@ void MVNLayer::Backward_cpu(const vector*>& top, // put the squares of bottom into temp_ caffe_powx(temp_.count(), bottom_data, Dtype(2), temp_.mutable_cpu_data()); + + // computes variance using var(X) = E(X^2) - (EX)^2 + caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, + sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); // EX + caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, temp_.cpu_data(), + sum_multiplier_.cpu_data(), 0., + variance_.mutable_cpu_data()); // E(X^2) + caffe_powx(mean_.count(), mean_.cpu_data(), Dtype(2), + temp_.mutable_cpu_data()); // (EX)^2 + caffe_sub(mean_.count(), variance_.cpu_data(), temp_.cpu_data(), + variance_.mutable_cpu_data()); // variance + + // normalize variance + caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5), + variance_.mutable_cpu_data()); + + caffe_add_scalar(variance_.count(), eps, variance_.mutable_cpu_data()); + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., temp_.mutable_cpu_data()); diff --git a/src/caffe/layers/mvn_layer.cu b/src/caffe/layers/mvn_layer.cu index d86a2e73fc2..446d7327a5b 100644 --- a/src/caffe/layers/mvn_layer.cu +++ b/src/caffe/layers/mvn_layer.cu @@ -36,6 +36,8 @@ void MVNLayer::Forward_gpu(const vector*>& bottom, caffe_gpu_sub(mean_.count(), variance_.gpu_data(), temp_.gpu_data(), variance_.mutable_gpu_data()); // variance + Dtype eps = 1e-10; + // do mean and variance normalization // subtract mean caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., @@ -48,7 +50,7 @@ void MVNLayer::Forward_gpu(const vector*>& bottom, caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5), variance_.mutable_gpu_data()); - caffe_gpu_add_scalar(variance_.count(), eps_, variance_.mutable_gpu_data()); + caffe_gpu_add_scalar(variance_.count(), eps, variance_.mutable_gpu_data()); caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.gpu_data(), sum_multiplier_.gpu_data(), 0., @@ -85,6 +87,8 @@ void MVNLayer::Backward_gpu(const vector*>& top, int dim = bottom[0]->count() / num; + Dtype eps = 1e-10; + if (this->layer_param_.mvn_param().normalize_variance()) { caffe_gpu_mul(temp_.count(), top_data, top_diff, bottom_diff); caffe_gpu_gemv(CblasNoTrans, num, dim, 1., bottom_diff, @@ -107,6 +111,23 @@ void MVNLayer::Backward_gpu(const vector*>& top, caffe_gpu_powx(temp_.count(), bottom_data, Dtype(2), temp_.mutable_gpu_data()); + // computes variance using var(X) = E(X^2) - (EX)^2 + caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, + sum_multiplier_.gpu_data(), 0., mean_.mutable_gpu_data()); // EX + caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, temp_.gpu_data(), + sum_multiplier_.gpu_data(), 0., + variance_.mutable_gpu_data()); // E(X^2) + caffe_gpu_powx(mean_.count(), mean_.gpu_data(), Dtype(2), + temp_.mutable_gpu_data()); // (EX)^2 + caffe_gpu_sub(mean_.count(), variance_.gpu_data(), temp_.gpu_data(), + variance_.mutable_gpu_data()); // variance + + // normalize variance + caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5), + variance_.mutable_gpu_data()); + + caffe_gpu_add_scalar(variance_.count(), eps, variance_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.gpu_data(), sum_multiplier_.gpu_data(), 0., temp_.mutable_gpu_data()); diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index ca4b13f7c41..d1d48501af3 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -9,32 +9,31 @@ namespace caffe { template -__global__ void MaxPoolForward(const int nthreads, - const Dtype* const bottom_data, const int num, const int channels, - const int height, const int width, const int pooled_height, - const int pooled_width, const int kernel_h, const int kernel_w, - const int stride_h, const int stride_w, const int pad_h, const int pad_w, - Dtype* const top_data, int* mask, Dtype* top_mask) { +__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, const int pad_w, Dtype* top_data, + int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { - const int pw = index % pooled_width; - const int ph = (index / pooled_width) % pooled_height; - const int c = (index / pooled_width / pooled_height) % channels; - const int n = index / pooled_width / pooled_height / channels; + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; - const int hend = min(hstart + kernel_h, height); - const int wend = min(wstart + kernel_w, width); + int hend = min(hstart + kernel_h, height); + int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; - const Dtype* const bottom_slice = - bottom_data + (n * channels + c) * height * width; + bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - if (bottom_slice[h * width + w] > maxval) { + if (bottom_data[h * width + w] > maxval) { maxidx = h * width + w; - maxval = bottom_slice[maxidx]; + maxval = bottom_data[maxidx]; } } } @@ -48,32 +47,30 @@ __global__ void MaxPoolForward(const int nthreads, } template -__global__ void AvePoolForward(const int nthreads, - const Dtype* const bottom_data, const int num, const int channels, - const int height, const int width, const int pooled_height, - const int pooled_width, const int kernel_h, const int kernel_w, - const int stride_h, const int stride_w, const int pad_h, const int pad_w, - Dtype* const top_data) { +__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, + const int num, const int channels, const int height, + const int width, const int pooled_height, const int pooled_width, + const int kernel_h, const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - const int pw = index % pooled_width; - const int ph = (index / pooled_width) % pooled_height; - const int c = (index / pooled_width / pooled_height) % channels; - const int n = index / pooled_width / pooled_height / channels; + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); - const int pool_size = (hend - hstart) * (wend - wstart); + int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; - const Dtype* const bottom_slice = - bottom_data + (n * channels + c) * height * width; + bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - aveval += bottom_slice[h * width + w]; + aveval += bottom_data[h * width + w]; } } top_data[index] = aveval / pool_size; @@ -82,38 +79,37 @@ __global__ void AvePoolForward(const int nthreads, template __global__ void StoPoolForwardTrain(const int nthreads, - const Dtype* const bottom_data, + const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* const rand_idx, Dtype* const top_data) { + const int stride_w, Dtype* rand_idx, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - const int pw = index % pooled_width; - const int ph = (index / pooled_width) % pooled_height; - const int c = (index / pooled_width / pooled_height) % channels; - const int n = index / pooled_width / pooled_height / channels; - const int hstart = ph * stride_h; - const int hend = min(hstart + kernel_h, height); - const int wstart = pw * stride_w; - const int wend = min(wstart + kernel_w, width); + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + kernel_w, width); Dtype cumsum = 0.; - const Dtype* const bottom_slice = - bottom_data + (n * channels + c) * height * width; + bottom_data += (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - cumsum += bottom_slice[h * width + w]; + cumsum += bottom_data[h * width + w]; } } - const float thres = rand_idx[index] * cumsum; + float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - cumsum += bottom_slice[h * width + w]; + cumsum += bottom_data[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; - top_data[index] = bottom_slice[h * width + w]; + top_data[index] = bottom_data[h * width + w]; return; } } @@ -124,30 +120,29 @@ __global__ void StoPoolForwardTrain(const int nthreads, template __global__ void StoPoolForwardTest(const int nthreads, - const Dtype* const bottom_data, + const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* const top_data) { + const int stride_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - const int pw = index % pooled_width; - const int ph = (index / pooled_width) % pooled_height; - const int c = (index / pooled_width / pooled_height) % channels; - const int n = index / pooled_width / pooled_height / channels; - const int hstart = ph * stride_h; - const int hend = min(hstart + kernel_h, height); - const int wstart = pw * stride_w; - const int wend = min(wstart + kernel_w, width); + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + int hstart = ph * stride_h; + int hend = min(hstart + kernel_h, height); + int wstart = pw * stride_w; + int wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = FLT_MIN; Dtype cumvalues = 0.; - const Dtype* const bottom_slice = - bottom_data + (n * channels + c) * height * width; + bottom_data += (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - cumsum += bottom_slice[h * width + w]; - cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; + cumsum += bottom_data[h * width + w]; + cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w]; } } top_data[index] = cumvalues / cumsum; @@ -215,43 +210,43 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, template -__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, - const int* const mask, const Dtype* const top_mask, const int num, - const int channels, const int height, const int width, - const int pooled_height, const int pooled_width, const int kernel_h, - const int kernel_w, const int stride_h, const int stride_w, const int pad_h, - const int pad_w, Dtype* const bottom_diff) { +__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, + const int* mask, const Dtype* top_mask, const int num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, const int pad_h, const int pad_w, + Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - const int w = index % width; - const int h = (index / width) % height; - const int c = (index / width / height) % channels; - const int n = index / width / height / channels; - const int phstart = - (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; - const int phend = min((h + pad_h) / stride_h + 1, pooled_height); - const int pwstart = - (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; - const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); + int w = index % width; + int h = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + int phstart = + (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; + int phend = min((h + pad_h) / stride_h + 1, pooled_height); + int pwstart = + (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; + int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; - const int offset = (n * channels + c) * pooled_height * pooled_width; - const Dtype* const top_diff_slice = top_diff + offset; + int offset = (n * channels + c) * pooled_height * pooled_width; + top_diff += offset; if (mask) { - const int* const mask_slice = mask + offset; + mask += offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { - if (mask_slice[ph * pooled_width + pw] == h * width + w) { - gradient += top_diff_slice[ph * pooled_width + pw]; + if (mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff[ph * pooled_width + pw]; } } } } else { - const Dtype* const top_mask_slice = top_mask + offset; + top_mask += offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { - if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { - gradient += top_diff_slice[ph * pooled_width + pw]; + if (top_mask[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff[ph * pooled_width + pw]; } } } @@ -261,26 +256,25 @@ __global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, } template -__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, +__global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, - Dtype* const bottom_diff) { + Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - const int w = index % width + pad_w; - const int h = (index / width) % height + pad_h; - const int c = (index / width / height) % channels; - const int n = index / width / height / channels; - const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; - const int phend = min(h / stride_h + 1, pooled_height); - const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; - const int pwend = min(w / stride_w + 1, pooled_width); + int w = index % width + pad_w; + int h = (index / width) % height + pad_h; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + int phend = min(h / stride_h + 1, pooled_height); + int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; - const Dtype* const top_diff_slice = - top_diff + (n * channels + c) * pooled_height * pooled_width; + top_diff += (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size @@ -289,7 +283,7 @@ __global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); - gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; + gradient += top_diff[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; @@ -299,31 +293,29 @@ __global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, template __global__ void StoPoolBackward(const int nthreads, - const Dtype* const rand_idx, const Dtype* const top_diff, + const Dtype* rand_idx, const Dtype* top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* const bottom_diff) { + const int stride_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - const int w = index % width; - const int h = (index / width) % height; - const int c = (index / width / height) % channels; - const int n = index / width / height / channels; - const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; - const int phend = min(h / stride_h + 1, pooled_height); - const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; - const int pwend = min(w / stride_w + 1, pooled_width); + int w = index % width; + int h = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + int phend = min(h / stride_h + 1, pooled_height); + int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; - const Dtype* const rand_idx_slice = - rand_idx + (n * channels + c) * pooled_height * pooled_width; - const Dtype* const top_diff_slice = - top_diff + (n * channels + c) * pooled_height * pooled_width; + rand_idx += (n * channels + c) * pooled_height * pooled_width; + top_diff += (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { - gradient += top_diff_slice[ph * pooled_width + pw] * - (index == static_cast(rand_idx_slice[ph * pooled_width + pw])); + gradient += top_diff[ph * pooled_width + pw] * + (index == static_cast(rand_idx[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; diff --git a/src/caffe/layers/prelu_layer.cpp b/src/caffe/layers/prelu_layer.cpp index 81831755512..7119a274dd3 100644 --- a/src/caffe/layers/prelu_layer.cpp +++ b/src/caffe/layers/prelu_layer.cpp @@ -45,8 +45,7 @@ void PReLULayer::LayerSetUp(const vector*>& bottom, // Propagate gradients to the parameters (as directed by backward pass). this->param_propagate_down_.resize(this->blobs_.size(), true); - multiplier_.Reshape(vector(1, bottom[0]->count(1))); - backward_buff_.Reshape(vector(1, bottom[0]->count(1))); + multiplier_.Reshape(vector(1, bottom[0]->count() / bottom[0]->num())); caffe_set(multiplier_.count(), Dtype(1), multiplier_.mutable_cpu_data()); } @@ -113,6 +112,7 @@ void PReLULayer::Backward_cpu(const vector*>& top, // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_cpu_diff(); + caffe_set(this->blobs_[0]->count(), Dtype(0), slope_diff); for (int i = 0; i < count; ++i) { int c = (i / dim) % channels / div_factor; slope_diff[c] += top_diff[i] * bottom_data[i] * (bottom_data[i] <= 0); diff --git a/src/caffe/layers/prelu_layer.cu b/src/caffe/layers/prelu_layer.cu index e1f20048f60..fd0eda5d191 100644 --- a/src/caffe/layers/prelu_layer.cu +++ b/src/caffe/layers/prelu_layer.cu @@ -75,36 +75,38 @@ void PReLULayer::Backward_gpu(const vector*>& top, bottom_data = bottom_memory_.gpu_data(); } - // Propagate to param + // Propagte to param // Since to write bottom diff will affect top diff if top and bottom blobs // are identical (in-place computaion), we first compute param backward to // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); + // slope_diff is set as 0, then accumulated over batches + caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), slope_diff); int cdim = channels * dim; Dtype dsum = 0.; for (int n = 0; n < bottom[0]->num(); ++n) { + Dtype* temp_buff = multiplier_.mutable_gpu_diff(); // compute element-wise diff // NOLINT_NEXT_LINE(whitespace/operators) - PReLUParamBackward<<<<>>( cdim, top_diff + top[0]->offset(n), - bottom_data + bottom[0]->offset(n), - backward_buff_.mutable_gpu_diff()); + bottom_data + bottom[0]->offset(n), multiplier_.mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; if (channel_shared_) { Dtype d; - caffe_gpu_dot(channels * dim, backward_buff_.gpu_diff(), + caffe_gpu_dot(channels * dim, multiplier_.gpu_diff(), multiplier_.gpu_data(), &d); dsum += d; } else { caffe_gpu_gemv(CblasNoTrans, channels, dim, 1., - backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1., + multiplier_.gpu_diff(), multiplier_.gpu_data(), 1., slope_diff); } } if (channel_shared_) { - caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff); + caffe_gpu_set(this->blobs_[0]->count(), Dtype(dsum), slope_diff); } } // Propagate to bottom diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp index cc236fe1e8e..077d949981c 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp @@ -71,7 +71,7 @@ void SigmoidCrossEntropyLossLayer::Backward_cpu( } #ifdef CPU_ONLY -STUB_GPU_BACKWARD(SigmoidCrossEntropyLossLayer, Backward); +STUB_GPU(SigmoidCrossEntropyLossLayer); #endif INSTANTIATE_CLASS(SigmoidCrossEntropyLossLayer); diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu index 547fa80c72f..08f7f492297 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu @@ -8,6 +8,26 @@ namespace caffe { +template +void SigmoidCrossEntropyLossLayer::Forward_gpu( + const vector*>& bottom, const vector*>& top) { + // The forward pass computes the sigmoid outputs. + sigmoid_bottom_vec_[0] = bottom[0]; + sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_); + // Compute the loss (negative log likelihood) + const int count = bottom[0]->count(); + const int num = bottom[0]->num(); + // Stable version of loss computation from input data + const Dtype* input_data = bottom[0]->cpu_data(); + const Dtype* target = bottom[1]->cpu_data(); + Dtype loss = 0; + for (int i = 0; i < count; ++i) { + loss -= input_data[i] * (target[i] - (input_data[i] >= 0)) - + log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); + } + top[0]->mutable_cpu_data()[0] = loss / num; +} + template void SigmoidCrossEntropyLossLayer::Backward_gpu( const vector*>& top, const vector& propagate_down, @@ -31,7 +51,7 @@ void SigmoidCrossEntropyLossLayer::Backward_gpu( } } -INSTANTIATE_LAYER_GPU_BACKWARD(SigmoidCrossEntropyLossLayer); +INSTANTIATE_LAYER_GPU_FUNCS(SigmoidCrossEntropyLossLayer); } // namespace caffe diff --git a/src/caffe/layers/slice_layer.cu b/src/caffe/layers/slice_layer.cu index e8dc6cd98fc..a82b738f6f4 100644 --- a/src/caffe/layers/slice_layer.cu +++ b/src/caffe/layers/slice_layer.cu @@ -6,25 +6,6 @@ namespace caffe { -template -__global__ void Slice(const int nthreads, const Dtype* in_data, - const bool forward, const int num_slices, const int slice_size, - const int bottom_slice_axis, const int top_slice_axis, - const int offset_slice_axis, Dtype* out_data) { - CUDA_KERNEL_LOOP(index, nthreads) { - const int total_slice_size = slice_size * top_slice_axis; - const int slice_num = index / total_slice_size; - const int slice_index = index % total_slice_size; - const int bottom_index = slice_index + - (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; - if (forward) { - out_data[index] = in_data[bottom_index]; - } else { - out_data[bottom_index] = in_data[index]; - } - } -} - template void SliceLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { @@ -32,16 +13,16 @@ void SliceLayer::Forward_gpu(const vector*>& bottom, int offset_slice_axis = 0; const Dtype* bottom_data = bottom[0]->gpu_data(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); - const bool kForward = true; for (int i = 0; i < top.size(); ++i) { Dtype* top_data = top[i]->mutable_gpu_data(); const int top_slice_axis = top[i]->shape(slice_axis_); - const int top_slice_size = top_slice_axis * slice_size_; - const int nthreads = top_slice_size * num_slices_; - Slice // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, bottom_data, kForward, num_slices_, slice_size_, - bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data); + for (int n = 0; n < num_slices_; ++n) { + const int top_offset = n * top_slice_axis * slice_size_; + const int bottom_offset = + (n * bottom_slice_axis + offset_slice_axis) * slice_size_; + caffe_copy(top_slice_axis * slice_size_, + bottom_data + bottom_offset, top_data + top_offset); + } offset_slice_axis += top_slice_axis; } } @@ -53,16 +34,16 @@ void SliceLayer::Backward_gpu(const vector*>& top, int offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); - const bool kForward = false; for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); const int top_slice_axis = top[i]->shape(slice_axis_); - const int top_slice_size = top_slice_axis * slice_size_; - const int nthreads = top_slice_size * num_slices_; - Slice // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, top_diff, kForward, num_slices_, slice_size_, - bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff); + for (int n = 0; n < num_slices_; ++n) { + const int top_offset = n * top_slice_axis * slice_size_; + const int bottom_offset = + (n * bottom_slice_axis + offset_slice_axis) * slice_size_; + caffe_copy(top_slice_axis * slice_size_, + top_diff + top_offset, bottom_diff + bottom_offset); + } offset_slice_axis += top_slice_axis; } } diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index ebb8b5d28c2..888eec1d501 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -5,15 +5,12 @@ #include #include -#include "hdf5.h" - #include "caffe/common.hpp" #include "caffe/layer.hpp" #include "caffe/net.hpp" -#include "caffe/parallel.hpp" #include "caffe/proto/caffe.pb.h" -#include "caffe/util/hdf5.hpp" #include "caffe/util/insert_splits.hpp" +#include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/upgrade_proto.hpp" @@ -22,14 +19,12 @@ namespace caffe { template -Net::Net(const NetParameter& param, const Net* root_net) - : root_net_(root_net) { +Net::Net(const NetParameter& param) { Init(param); } template -Net::Net(const string& param_file, Phase phase, const Net* root_net) - : root_net_(root_net) { +Net::Net(const string& param_file, Phase phase) { NetParameter param; ReadNetParamsFromTextFileOrDie(param_file, ¶m); param.mutable_state()->set_phase(phase); @@ -38,18 +33,14 @@ Net::Net(const string& param_file, Phase phase, const Net* root_net) template void Net::Init(const NetParameter& in_param) { - CHECK(Caffe::root_solver() || root_net_) - << "root_net_ needs to be set for all non-root solvers"; // Set phase from the state. phase_ = in_param.state().phase(); // Filter layers based on their include/exclude rules and // the current NetState. NetParameter filtered_param; FilterNet(in_param, &filtered_param); - if (Caffe::root_solver()) { - LOG(INFO) << "Initializing net from parameters: " << std::endl - << filtered_param.DebugString(); - } + LOG(INFO) << "Initializing net from parameters: " << std::endl + << filtered_param.DebugString(); // Create a copy of filtered_param with splits added where necessary. NetParameter param; InsertSplits(filtered_param, ¶m); @@ -73,8 +64,7 @@ void Net::Init(const NetParameter& in_param) { const int layer_id = -1; // inputs have fake layer ID -1 AppendTop(param, layer_id, input_id, &available_blobs, &blob_name_to_idx); } - DLOG_IF(INFO, Caffe::root_solver()) - << "Memory required for data: " << memory_used_ * sizeof(Dtype); + DLOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); // For each layer, set up its input and output bottom_vecs_.resize(param.layer_size()); top_vecs_.resize(param.layer_size()); @@ -83,34 +73,16 @@ void Net::Init(const NetParameter& in_param) { top_id_vecs_.resize(param.layer_size()); bottom_need_backward_.resize(param.layer_size()); for (int layer_id = 0; layer_id < param.layer_size(); ++layer_id) { - // For non-root solvers, whether this layer is shared from root_net_. - bool share_from_root = !Caffe::root_solver() - && root_net_->layers_[layer_id]->ShareInParallel(); // Inherit phase from net if unset. if (!param.layer(layer_id).has_phase()) { param.mutable_layer(layer_id)->set_phase(phase_); } // Setup layer. const LayerParameter& layer_param = param.layer(layer_id); - if (layer_param.propagate_down_size() > 0) { - CHECK_EQ(layer_param.propagate_down_size(), - layer_param.bottom_size()) - << "propagate_down param must be specified " - << "either 0 or bottom_size times "; - } - if (share_from_root) { - LOG(INFO) << "Sharing layer " << layer_param.name() << " from root net"; - layers_.push_back(root_net_->layers_[layer_id]); - layers_[layer_id]->SetShared(true); - } else { - layers_.push_back(LayerRegistry::CreateLayer(layer_param)); - } + layers_.push_back(LayerRegistry::CreateLayer(layer_param)); layer_names_.push_back(layer_param.name()); - if (Caffe::root_solver()) { - LOG(INFO) << "Creating Layer " << layer_param.name(); - } + LOG(INFO) << "Creating Layer " << layer_param.name(); bool need_backward = false; - // Figure out this layer's input and output for (int bottom_id = 0; bottom_id < layer_param.bottom_size(); ++bottom_id) { @@ -138,42 +110,20 @@ void Net::Init(const NetParameter& in_param) { } } // After this layer is connected, set it up. - if (share_from_root) { - // Set up size of top blobs using root_net_ - const vector*>& base_top = root_net_->top_vecs_[layer_id]; - const vector*>& this_top = this->top_vecs_[layer_id]; - for (int top_id = 0; top_id < base_top.size(); ++top_id) { - this_top[top_id]->ReshapeLike(*base_top[top_id]); - LOG(INFO) << "Created top blob " << top_id << " (shape: " - << this_top[top_id]->shape_string() << ") for shared layer " - << layer_param.name(); - } - } else { - layers_[layer_id]->SetUp(bottom_vecs_[layer_id], top_vecs_[layer_id]); - } - if (Caffe::root_solver()) { - LOG(INFO) << "Setting up " << layer_names_[layer_id]; - } + LOG(INFO) << "Setting up " << layer_names_[layer_id]; + layers_[layer_id]->SetUp(bottom_vecs_[layer_id], top_vecs_[layer_id]); for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { if (blob_loss_weights_.size() <= top_id_vecs_[layer_id][top_id]) { blob_loss_weights_.resize(top_id_vecs_[layer_id][top_id] + 1, Dtype(0)); } blob_loss_weights_[top_id_vecs_[layer_id][top_id]] = layer->loss(top_id); - if (Caffe::root_solver()) { - LOG(INFO) << "Top shape: " - << top_vecs_[layer_id][top_id]->shape_string(); - } + LOG(INFO) << "Top shape: " << top_vecs_[layer_id][top_id]->shape_string(); if (layer->loss(top_id)) { - if (Caffe::root_solver()) { - LOG(INFO) << " with loss weight " << layer->loss(top_id); - } + LOG(INFO) << " with loss weight " << layer->loss(top_id); } memory_used_ += top_vecs_[layer_id][top_id]->count(); } - if (Caffe::root_solver()) { - DLOG(INFO) << "Memory required for data: " - << memory_used_ * sizeof(Dtype); - } + DLOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); const int param_size = layer_param.param_size(); const int num_param_blobs = layers_[layer_id]->blobs().size(); CHECK_LE(param_size, num_param_blobs) @@ -182,7 +132,7 @@ void Net::Init(const NetParameter& in_param) { for (int param_id = 0; param_id < num_param_blobs; ++param_id) { const ParamSpec* param_spec = (param_id < param_size) ? &layer_param.param(param_id) : &default_param_spec; - const bool param_need_backward = param_spec->lr_mult() != 0; + const bool param_need_backward = param_spec->lr_mult() > 0; need_backward |= param_need_backward; layers_[layer_id]->set_param_propagate_down(param_id, param_need_backward); @@ -201,45 +151,23 @@ void Net::Init(const NetParameter& in_param) { // Go through the net backwards to determine which blobs contribute to the // loss. We can skip backward computation for blobs that don't contribute // to the loss. - // Also checks if all bottom blobs don't need backward computation (possible - // because the skip_propagate_down param) and so we can skip bacward - // computation for the entire layer set blobs_under_loss; - set blobs_skip_backp; for (int layer_id = layers_.size() - 1; layer_id >= 0; --layer_id) { bool layer_contributes_loss = false; - bool layer_skip_propagate_down = true; for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]]; if (layers_[layer_id]->loss(top_id) || (blobs_under_loss.find(blob_name) != blobs_under_loss.end())) { layer_contributes_loss = true; - } - if (blobs_skip_backp.find(blob_name) == blobs_skip_backp.end()) { - layer_skip_propagate_down = false; - } - if (layer_contributes_loss && !layer_skip_propagate_down) break; - } - // If this layer can skip backward computation, also all his bottom blobs - // don't need backpropagation - if (layer_need_backward_[layer_id] && layer_skip_propagate_down) { - layer_need_backward_[layer_id] = false; - for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); - ++bottom_id) { - bottom_need_backward_[layer_id][bottom_id] = false; } } if (!layer_contributes_loss) { layer_need_backward_[layer_id] = false; } if (layer_need_backward_[layer_id]) { - if (Caffe::root_solver()) { - LOG(INFO) << layer_names_[layer_id] << " needs backward computation."; - } + LOG(INFO) << layer_names_[layer_id] << " needs backward computation."; } else { - if (Caffe::root_solver()) { - LOG(INFO) << layer_names_[layer_id] - << " does not need backward computation."; - } + LOG(INFO) << layer_names_[layer_id] + << " does not need backward computation."; } for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); ++bottom_id) { @@ -250,11 +178,6 @@ void Net::Init(const NetParameter& in_param) { } else { bottom_need_backward_[layer_id][bottom_id] = false; } - if (!bottom_need_backward_[layer_id][bottom_id]) { - const string& blob_name = - blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; - blobs_skip_backp.insert(blob_name); - } } } // Handle force_backward if needed. @@ -279,9 +202,7 @@ void Net::Init(const NetParameter& in_param) { // In the end, all remaining blobs are considered output blobs. for (set::iterator it = available_blobs.begin(); it != available_blobs.end(); ++it) { - if (Caffe::root_solver()) { - LOG(INFO) << "This network produces output " << *it; - } + LOG(INFO) << "This network produces output " << *it; net_output_blobs_.push_back(blobs_[blob_name_to_idx[*it]].get()); net_output_blob_indices_.push_back(blob_name_to_idx[*it]); } @@ -291,12 +212,10 @@ void Net::Init(const NetParameter& in_param) { for (size_t layer_id = 0; layer_id < layer_names_.size(); ++layer_id) { layer_names_index_[layer_names_[layer_id]] = layer_id; } - ShareWeights(); + GetLearningRateAndWeightDecay(); debug_info_ = param.debug_info(); - if (Caffe::root_solver()) { - LOG(INFO) << "Network initialization done."; - LOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); - } + LOG(INFO) << "Network initialization done."; + LOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); } template @@ -335,33 +254,27 @@ bool Net::StateMeetsRule(const NetState& state, // Check whether the rule is broken due to phase. if (rule.has_phase()) { if (rule.phase() != state.phase()) { - if (Caffe::root_solver()) { - LOG(INFO) << "The NetState phase (" << state.phase() - << ") differed from the phase (" << rule.phase() - << ") specified by a rule in layer " << layer_name; - } + LOG(INFO) << "The NetState phase (" << state.phase() + << ") differed from the phase (" << rule.phase() + << ") specified by a rule in layer " << layer_name; return false; } } // Check whether the rule is broken due to min level. if (rule.has_min_level()) { if (state.level() < rule.min_level()) { - if (Caffe::root_solver()) { - LOG(INFO) << "The NetState level (" << state.level() - << ") is above the min_level (" << rule.min_level() - << ") specified by a rule in layer " << layer_name; - } + LOG(INFO) << "The NetState level (" << state.level() + << ") is above the min_level (" << rule.min_level() + << ") specified by a rule in layer " << layer_name; return false; } } // Check whether the rule is broken due to max level. if (rule.has_max_level()) { if (state.level() > rule.max_level()) { - if (Caffe::root_solver()) { - LOG(INFO) << "The NetState level (" << state.level() - << ") is above the max_level (" << rule.max_level() - << ") specified by a rule in layer " << layer_name; - } + LOG(INFO) << "The NetState level (" << state.level() + << ") is above the max_level (" << rule.max_level() + << ") specified by a rule in layer " << layer_name; return false; } } @@ -374,10 +287,8 @@ bool Net::StateMeetsRule(const NetState& state, if (rule.stage(i) == state.stage(j)) { has_stage = true; } } if (!has_stage) { - if (Caffe::root_solver()) { - LOG(INFO) << "The NetState did not contain stage '" << rule.stage(i) - << "' specified by a rule in layer " << layer_name; - } + LOG(INFO) << "The NetState did not contain stage '" << rule.stage(i) + << "' specified by a rule in layer " << layer_name; return false; } } @@ -390,10 +301,8 @@ bool Net::StateMeetsRule(const NetState& state, if (rule.not_stage(i) == state.stage(j)) { has_stage = true; } } if (has_stage) { - if (Caffe::root_solver()) { - LOG(INFO) << "The NetState contained a not_stage '" << rule.not_stage(i) - << "' specified by a rule in layer " << layer_name; - } + LOG(INFO) << "The NetState contained a not_stage '" << rule.not_stage(i) + << "' specified by a rule in layer " << layer_name; return false; } } @@ -415,25 +324,20 @@ void Net::AppendTop(const NetParameter& param, const int layer_id, if (blob_name_to_idx && layer_param && layer_param->bottom_size() > top_id && blob_name == layer_param->bottom(top_id)) { // In-place computation - if (Caffe::root_solver()) { - LOG(INFO) << layer_param->name() << " -> " << blob_name << " (in-place)"; - } + LOG(INFO) << layer_param->name() << " -> " << blob_name << " (in-place)"; top_vecs_[layer_id].push_back(blobs_[(*blob_name_to_idx)[blob_name]].get()); top_id_vecs_[layer_id].push_back((*blob_name_to_idx)[blob_name]); } else if (blob_name_to_idx && blob_name_to_idx->find(blob_name) != blob_name_to_idx->end()) { // If we are not doing in-place computation but have duplicated blobs, // raise an error. - LOG(FATAL) << "Top blob '" << blob_name - << "' produced by multiple sources."; + LOG(FATAL) << "Duplicate blobs produced by multiple sources."; } else { // Normal output. - if (Caffe::root_solver()) { - if (layer_param) { - LOG(INFO) << layer_param->name() << " -> " << blob_name; - } else { - LOG(INFO) << "Input " << top_id << " -> " << blob_name; - } + if (layer_param) { + LOG(INFO) << layer_param->name() << " -> " << blob_name; + } else { + LOG(INFO) << "Input " << top_id << " -> " << blob_name; } shared_ptr > blob_pointer(new Blob()); const int blob_id = blobs_.size(); @@ -463,28 +367,21 @@ void Net::AppendTop(const NetParameter& param, const int layer_id, // Helper for Net::Init: add a new bottom blob to the net. template -int Net::AppendBottom(const NetParameter& param, const int layer_id, - const int bottom_id, set* available_blobs, - map* blob_name_to_idx) { +int Net::AppendBottom(const NetParameter& param, + const int layer_id, const int bottom_id, + set* available_blobs, map* blob_name_to_idx) { const LayerParameter& layer_param = param.layer(layer_id); const string& blob_name = layer_param.bottom(bottom_id); if (available_blobs->find(blob_name) == available_blobs->end()) { - LOG(FATAL) << "Unknown bottom blob '" << blob_name << "' (layer '" - << layer_param.name() << "', bottom index " << bottom_id << ")"; + LOG(FATAL) << "Unknown blob input " << blob_name + << " (at index " << bottom_id << ") to layer " << layer_id; } const int blob_id = (*blob_name_to_idx)[blob_name]; - if (Caffe::root_solver()) { - LOG(INFO) << layer_names_[layer_id] << " <- " << blob_name; - } + LOG(INFO) << layer_names_[layer_id] << " <- " << blob_name; bottom_vecs_[layer_id].push_back(blobs_[blob_id].get()); bottom_id_vecs_[layer_id].push_back(blob_id); available_blobs->erase(blob_name); - bool propagate_down = true; - // Check if the backpropagation on bottom_id should be skipped - if (layer_param.propagate_down_size() > 0) - propagate_down = layer_param.propagate_down(bottom_id); - const bool need_backward = blob_need_backward_[blob_id] && - propagate_down; + const bool need_backward = blob_need_backward_[blob_id]; bottom_need_backward_[layer_id].push_back(need_backward); return blob_id; } @@ -507,25 +404,15 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, params_.push_back(layers_[layer_id]->blobs()[param_id]); param_id_vecs_[layer_id].push_back(net_param_id); param_layer_indices_.push_back(make_pair(layer_id, param_id)); - ParamSpec default_param_spec; - const ParamSpec* param_spec = (layer_param.param_size() > param_id) ? - &layer_param.param(param_id) : &default_param_spec; if (!param_size || !param_name.size() || (param_name.size() && param_names_index_.find(param_name) == param_names_index_.end())) { // This layer "owns" this parameter blob -- it is either anonymous // (i.e., not given a param_name) or explicitly given a name that we // haven't already seen. param_owners_.push_back(-1); - if (param_name.size()) { + if (param_size) { param_names_index_[param_name] = net_param_id; } - const int learnable_param_id = learnable_params_.size(); - learnable_params_.push_back(params_[net_param_id].get()); - learnable_param_ids_.push_back(learnable_param_id); - has_params_lr_.push_back(param_spec->has_lr_mult()); - has_params_decay_.push_back(param_spec->has_decay_mult()); - params_lr_.push_back(param_spec->lr_mult()); - params_weight_decay_.push_back(param_spec->decay_mult()); } else { // Named param blob with name we've seen before: share params const int owner_net_param_id = param_names_index_[param_name]; @@ -534,10 +421,9 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, param_layer_indices_[owner_net_param_id]; const int owner_layer_id = owner_index.first; const int owner_param_id = owner_index.second; - LOG_IF(INFO, Caffe::root_solver()) << "Sharing parameters '" << param_name - << "' owned by " - << "layer '" << layer_names_[owner_layer_id] << "', param " - << "index " << owner_param_id; + LOG(INFO) << "Sharing parameters '" << param_name << "' owned by " + << "layer '" << layer_names_[owner_layer_id] << "', param " + << "index " << owner_param_id; Blob* this_blob = layers_[layer_id]->blobs()[param_id].get(); Blob* owner_blob = layers_[owner_layer_id]->blobs()[owner_param_id].get(); @@ -546,40 +432,28 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, ParamSpec_DimCheckMode_PERMISSIVE)) { // Permissive dimension checking -- only check counts are the same. CHECK_EQ(this_blob->count(), owner_blob->count()) - << "Cannot share param '" << param_name << "' owned by layer '" - << layer_names_[owner_layer_id] << "' with layer '" - << layer_names_[layer_id] << "'; count mismatch. Owner layer param " - << "shape is " << owner_blob->shape_string() << "; sharing layer " - << "shape is " << this_blob->shape_string(); + << "Shared parameter blobs must have the same count."; } else { // Strict dimension checking -- all dims must be the same. - CHECK(this_blob->shape() == owner_blob->shape()) - << "Cannot share param '" << param_name << "' owned by layer '" - << layer_names_[owner_layer_id] << "' with layer '" - << layer_names_[layer_id] << "'; shape mismatch. Owner layer param " - << "shape is " << owner_blob->shape_string() << "; sharing layer " - << "expects shape " << this_blob->shape_string(); - } - const int learnable_param_id = learnable_param_ids_[owner_net_param_id]; - learnable_param_ids_.push_back(learnable_param_id); - if (param_spec->has_lr_mult()) { - if (has_params_lr_[learnable_param_id]) { - CHECK_EQ(param_spec->lr_mult(), params_lr_[learnable_param_id]) - << "Shared param '" << param_name << "' has mismatched lr_mult."; - } else { - has_params_lr_[learnable_param_id] = true; - params_lr_[learnable_param_id] = param_spec->lr_mult(); - } + CHECK(this_blob->shape() == owner_blob->shape()); } - if (param_spec->has_decay_mult()) { - if (has_params_decay_[learnable_param_id]) { - CHECK_EQ(param_spec->decay_mult(), - params_weight_decay_[learnable_param_id]) - << "Shared param '" << param_name << "' has mismatched decay_mult."; - } else { - has_params_decay_[learnable_param_id] = true; - params_weight_decay_[learnable_param_id] = param_spec->decay_mult(); - } + layers_[layer_id]->blobs()[param_id]->ShareData( + *layers_[owner_layer_id]->blobs()[owner_param_id]); + } +} + +template +void Net::GetLearningRateAndWeightDecay() { + LOG(INFO) << "Collecting Learning Rate and Weight Decay."; + ParamSpec default_param_spec; + for (int i = 0; i < layers_.size(); ++i) { + vector > >& layer_blobs = layers_[i]->blobs(); + for (int j = 0; j < layer_blobs.size(); ++j) { + const ParamSpec* param_spec = + (layers_[i]->layer_param().param_size() > j) ? + &layers_[i]->layer_param().param(j) : &default_param_spec; + params_lr_.push_back(param_spec->lr_mult()); + params_weight_decay_.push_back(param_spec->decay_mult()); } } } @@ -596,6 +470,7 @@ Dtype Net::ForwardFromTo(int start, int end) { } for (int i = start; i <= end; ++i) { // LOG(ERROR) << "Forwarding " << layer_names_[i]; + layers_[i]->Reshape(bottom_vecs_[i], top_vecs_[i]); Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], top_vecs_[i]); loss += layer_loss; if (debug_info_) { ForwardDebugInfo(i); } @@ -672,10 +547,8 @@ void Net::InputDebugInfo(const int input_id) { const Blob& blob = *net_input_blobs_[input_id]; const string& blob_name = blob_names_[net_input_blob_indices_[input_id]]; const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); - if (Caffe::root_solver()) { - LOG(INFO) << " [Forward] " - << "Input " << blob_name << " data: " << data_abs_val_mean; - } + LOG(INFO) << " [Forward] " + << "Input " << blob_name << " data: " << data_abs_val_mean; } template @@ -684,12 +557,9 @@ void Net::ForwardDebugInfo(const int layer_id) { const Blob& blob = *top_vecs_[layer_id][top_id]; const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]]; const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); - if (Caffe::root_solver()) { - LOG(INFO) << " [Forward] " - << "Layer " << layer_names_[layer_id] - << ", top blob " << blob_name - << " data: " << data_abs_val_mean; - } + LOG(INFO) << " [Forward] " + << "Layer " << layer_names_[layer_id] << ", top blob " << blob_name + << " data: " << data_abs_val_mean; } for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); ++param_id) { @@ -697,12 +567,9 @@ void Net::ForwardDebugInfo(const int layer_id) { const int net_param_id = param_id_vecs_[layer_id][param_id]; const string& blob_name = param_display_names_[net_param_id]; const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); - if (Caffe::root_solver()) { - LOG(INFO) << " [Forward] " - << "Layer " << layer_names_[layer_id] - << ", param blob " << blob_name - << " data: " << data_abs_val_mean; - } + LOG(INFO) << " [Forward] " + << "Layer " << layer_names_[layer_id] << ", param blob " << blob_name + << " data: " << data_abs_val_mean; } } @@ -714,24 +581,18 @@ void Net::BackwardDebugInfo(const int layer_id) { const Blob& blob = *bottom_vec[bottom_id]; const string& blob_name = blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); - if (Caffe::root_solver()) { - LOG(INFO) << " [Backward] " - << "Layer " << layer_names_[layer_id] - << ", bottom blob " << blob_name - << " diff: " << diff_abs_val_mean; - } + LOG(INFO) << " [Backward] " + << "Layer " << layer_names_[layer_id] << ", bottom blob " << blob_name + << " diff: " << diff_abs_val_mean; } for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); ++param_id) { if (!layers_[layer_id]->param_propagate_down(param_id)) { continue; } const Blob& blob = *layers_[layer_id]->blobs()[param_id]; const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); - if (Caffe::root_solver()) { - LOG(INFO) << " [Backward] " - << "Layer " << layer_names_[layer_id] - << ", param blob " << param_id - << " diff: " << diff_abs_val_mean; - } + LOG(INFO) << " [Backward] " + << "Layer " << layer_names_[layer_id] << ", param blob " << param_id + << " diff: " << diff_abs_val_mean; } } @@ -744,22 +605,17 @@ void Net::UpdateDebugInfo(const int param_id) { const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); if (param_owner < 0) { const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); - if (Caffe::root_solver()) { - LOG(INFO) << " [Update] Layer " << layer_name - << ", param " << param_display_name - << " data: " << data_abs_val_mean - << "; diff: " << diff_abs_val_mean; - } + LOG(INFO) << " [Update] Layer " << layer_name + << ", param " << param_display_name + << " data: " << data_abs_val_mean << "; diff: " << diff_abs_val_mean; } else { const string& owner_layer_name = layer_names_[param_layer_indices_[param_owner].first]; - if (Caffe::root_solver()) { - LOG(INFO) << " [Update] Layer " << layer_name - << ", param blob " << param_display_name - << " (owned by layer " << owner_layer_name << ", " << "param " - << param_display_names_[param_owners_[param_id]] << ")" - << " diff: " << diff_abs_val_mean; - } + LOG(INFO) << " [Update] Layer " << layer_name + << ", param blob " << param_display_name + << " (owned by layer " << owner_layer_name << ", " + << "param " << param_display_names_[param_owners_[param_id]] << ")" + << " diff: " << diff_abs_val_mean; } } @@ -785,11 +641,7 @@ void Net::ShareTrainedLayersWith(const Net* other) { << "Incompatible number of blobs for layer " << source_layer_name; for (int j = 0; j < target_blobs.size(); ++j) { Blob* source_blob = source_layer->blobs()[j].get(); - CHECK(target_blobs[j]->shape() == source_blob->shape()) - << "Cannot share param " << j << " weights from layer '" - << source_layer_name << "'; shape mismatch. Source param shape is " - << source_blob->shape_string() << "; target param shape is " - << target_blobs[j]->shape_string(); + CHECK(target_blobs[j]->shape() == source_blob->shape()); target_blobs[j]->ShareData(*source_blob); } } @@ -810,17 +662,18 @@ void Net::Backward() { BackwardFromTo(layers_.size() - 1, 0); if (debug_info_) { Dtype asum_data = 0, asum_diff = 0, sumsq_data = 0, sumsq_diff = 0; - for (int i = 0; i < learnable_params_.size(); ++i) { - asum_data += learnable_params_[i]->asum_data(); - asum_diff += learnable_params_[i]->asum_diff(); - sumsq_data += learnable_params_[i]->sumsq_data(); - sumsq_diff += learnable_params_[i]->sumsq_diff(); + for (int i = 0; i < params_.size(); ++i) { + if (param_owners_[i] >= 0) { continue; } + asum_data += params_[i]->asum_data(); + asum_diff += params_[i]->asum_diff(); + sumsq_data += params_[i]->sumsq_data(); + sumsq_diff += params_[i]->sumsq_diff(); } const Dtype l2norm_data = std::sqrt(sumsq_data); const Dtype l2norm_diff = std::sqrt(sumsq_diff); LOG(ERROR) << " [Backward] All net params (data, diff): " - << "L1 norm = (" << asum_data << ", " << asum_diff << "); " - << "L2 norm = (" << l2norm_data << ", " << l2norm_diff << ")"; + << "L1 norm = (" << asum_data << ", " << asum_diff << "); " + << "L2 norm = (" << l2norm_data << ", " << l2norm_diff << ")"; } } @@ -852,17 +705,6 @@ void Net::CopyTrainedLayersFrom(const NetParameter& param) { CHECK_EQ(target_blobs.size(), source_layer.blobs_size()) << "Incompatible number of blobs for layer " << source_layer_name; for (int j = 0; j < target_blobs.size(); ++j) { - if (!target_blobs[j]->ShapeEquals(source_layer.blobs(j))) { - Blob source_blob; - const bool kReshape = true; - source_blob.FromProto(source_layer.blobs(j), kReshape); - LOG(FATAL) << "Cannot copy param " << j << " weights from layer '" - << source_layer_name << "'; shape mismatch. Source param shape is " - << source_blob.shape_string() << "; target param shape is " - << target_blobs[j]->shape_string() << ". " - << "To learn this layer's parameters from scratch rather than " - << "copying from a saved net, rename the layer."; - } const bool kReshape = false; target_blobs[j]->FromProto(source_layer.blobs(j), kReshape); } @@ -871,72 +713,11 @@ void Net::CopyTrainedLayersFrom(const NetParameter& param) { template void Net::CopyTrainedLayersFrom(const string trained_filename) { - if (trained_filename.size() >= 3 && - trained_filename.compare(trained_filename.size() - 3, 3, ".h5") == 0) { - CopyTrainedLayersFromHDF5(trained_filename); - } else { - CopyTrainedLayersFromBinaryProto(trained_filename); - } -} - -template -void Net::CopyTrainedLayersFromBinaryProto( - const string trained_filename) { NetParameter param; ReadNetParamsFromBinaryFileOrDie(trained_filename, ¶m); CopyTrainedLayersFrom(param); } -template -void Net::CopyTrainedLayersFromHDF5(const string trained_filename) { - hid_t file_hid = H5Fopen(trained_filename.c_str(), H5F_ACC_RDONLY, - H5P_DEFAULT); - CHECK_GE(file_hid, 0) << "Couldn't open " << trained_filename; - hid_t data_hid = H5Gopen2(file_hid, "data", H5P_DEFAULT); - CHECK_GE(data_hid, 0) << "Error reading weights from " << trained_filename; - int num_layers = hdf5_get_num_links(data_hid); - for (int i = 0; i < num_layers; ++i) { - string source_layer_name = hdf5_get_name_by_idx(data_hid, i); - if (!layer_names_index_.count(source_layer_name)) { - DLOG(INFO) << "Ignoring source layer " << source_layer_name; - continue; - } - int target_layer_id = layer_names_index_[source_layer_name]; - DLOG(INFO) << "Copying source layer " << source_layer_name; - vector > >& target_blobs = - layers_[target_layer_id]->blobs(); - hid_t layer_hid = H5Gopen2(data_hid, source_layer_name.c_str(), - H5P_DEFAULT); - CHECK_GE(layer_hid, 0) - << "Error reading weights from " << trained_filename; - // Check that source layer doesn't have more params than target layer - int num_source_params = hdf5_get_num_links(layer_hid); - CHECK_LE(num_source_params, target_blobs.size()) - << "Incompatible number of blobs for layer " << source_layer_name; - for (int j = 0; j < target_blobs.size(); ++j) { - ostringstream oss; - oss << j; - string dataset_name = oss.str(); - int target_net_param_id = param_id_vecs_[target_layer_id][j]; - if (!H5Lexists(layer_hid, dataset_name.c_str(), H5P_DEFAULT)) { - // Target param doesn't exist in source weights... - if (param_owners_[target_net_param_id] != -1) { - // ...but it's weight-shared in target, so that's fine. - continue; - } else { - LOG(FATAL) << "Incompatible number of blobs for layer " - << source_layer_name; - } - } - hdf5_load_nd_dataset(layer_hid, dataset_name.c_str(), 0, kMaxBlobAxes, - target_blobs[j].get()); - } - H5Gclose(layer_hid); - } - H5Gclose(data_hid); - H5Fclose(file_hid); -} - template void Net::ToProto(NetParameter* param, bool write_diff) const { param->Clear(); @@ -948,101 +729,51 @@ void Net::ToProto(NetParameter* param, bool write_diff) const { DLOG(INFO) << "Serializing " << layers_.size() << " layers"; for (int i = 0; i < layers_.size(); ++i) { LayerParameter* layer_param = param->add_layer(); - layers_[i]->ToProto(layer_param, write_diff); - } -} - -template -void Net::ToHDF5(const string& filename, bool write_diff) const { - hid_t file_hid = H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, - H5P_DEFAULT); - CHECK_GE(file_hid, 0) - << "Couldn't open " << filename << " to save weights."; - hid_t data_hid = H5Gcreate2(file_hid, "data", H5P_DEFAULT, H5P_DEFAULT, - H5P_DEFAULT); - CHECK_GE(data_hid, 0) << "Error saving weights to " << filename << "."; - hid_t diff_hid = -1; - if (write_diff) { - diff_hid = H5Gcreate2(file_hid, "diff", H5P_DEFAULT, H5P_DEFAULT, - H5P_DEFAULT); - CHECK_GE(diff_hid, 0) << "Error saving weights to " << filename << "."; - } - for (int layer_id = 0; layer_id < layers_.size(); ++layer_id) { - const LayerParameter& layer_param = layers_[layer_id]->layer_param(); - string layer_name = layer_param.name(); - hid_t layer_data_hid = H5Gcreate2(data_hid, layer_name.c_str(), - H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK_GE(layer_data_hid, 0) - << "Error saving weights to " << filename << "."; - hid_t layer_diff_hid = -1; - if (write_diff) { - layer_diff_hid = H5Gcreate2(diff_hid, layer_name.c_str(), - H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - CHECK_GE(layer_diff_hid, 0) - << "Error saving weights to " << filename << "."; - } - int num_params = layers_[layer_id]->blobs().size(); - for (int param_id = 0; param_id < num_params; ++param_id) { - ostringstream dataset_name; - dataset_name << param_id; - const int net_param_id = param_id_vecs_[layer_id][param_id]; - if (param_owners_[net_param_id] == -1) { - // Only save params that own themselves - hdf5_save_nd_dataset(layer_data_hid, dataset_name.str(), - *params_[net_param_id]); - } - if (write_diff) { - // Write diffs regardless of weight-sharing - hdf5_save_nd_dataset(layer_diff_hid, dataset_name.str(), - *params_[net_param_id], true); - } + for (int j = 0; j < bottom_id_vecs_[i].size(); ++j) { + layer_param->add_bottom(blob_names_[bottom_id_vecs_[i][j]]); } - H5Gclose(layer_data_hid); - if (write_diff) { - H5Gclose(layer_diff_hid); + for (int j = 0; j < top_id_vecs_[i].size(); ++j) { + layer_param->add_top(blob_names_[top_id_vecs_[i][j]]); } + layers_[i]->ToProto(layer_param, write_diff); } - H5Gclose(data_hid); - if (write_diff) { - H5Gclose(diff_hid); - } - H5Fclose(file_hid); } template void Net::Update() { - for (int i = 0; i < learnable_params_.size(); ++i) { - learnable_params_[i]->Update(); - } -} - -template -void Net::ClearParamDiffs() { - for (int i = 0; i < learnable_params_.size(); ++i) { - Blob* blob = learnable_params_[i]; + // First, accumulate the diffs of any shared parameters into their owner's + // diff. (Assumes that the learning rate, weight decay, etc. have already been + // accounted for in the current diff.) + for (int i = 0; i < params_.size(); ++i) { + if (param_owners_[i] < 0) { continue; } + if (debug_info_) { UpdateDebugInfo(i); } + const int count = params_[i]->count(); + const Dtype* this_diff; + Dtype* owner_diff; switch (Caffe::mode()) { case Caffe::CPU: - caffe_set(blob->count(), static_cast(0), - blob->mutable_cpu_diff()); + this_diff = params_[i]->cpu_diff(); + owner_diff = params_[param_owners_[i]]->mutable_cpu_diff(); + caffe_add(count, this_diff, owner_diff, owner_diff); break; case Caffe::GPU: #ifndef CPU_ONLY - caffe_gpu_set(blob->count(), static_cast(0), - blob->mutable_gpu_diff()); + this_diff = params_[i]->gpu_diff(); + owner_diff = params_[param_owners_[i]]->mutable_gpu_diff(); + caffe_gpu_add(count, this_diff, owner_diff, owner_diff); #else NO_GPU; #endif break; + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); } } -} - -template -void Net::ShareWeights() { + // Now, update the owned parameters. for (int i = 0; i < params_.size(); ++i) { - if (param_owners_[i] < 0) { continue; } - params_[i]->ShareData(*params_[param_owners_[i]]); - params_[i]->ShareDiff(*params_[param_owners_[i]]); + if (param_owners_[i] >= 0) { continue; } + if (debug_info_) { UpdateDebugInfo(i); } + params_[i]->Update(); } } diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index f52c941b05e..099f2158120 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -11,8 +11,6 @@ message BlobProto { optional BlobShape shape = 7; repeated float data = 5 [packed = true]; repeated float diff = 6 [packed = true]; - repeated double double_data = 8 [packed = true]; - repeated double double_diff = 9 [packed = true]; // 4D dimensions -- deprecated. Use "shape" instead. optional int32 num = 1 [default = 0]; @@ -51,14 +49,6 @@ message FillerParameter { // The expected number of non-zero output weights for a given input in // Gaussian filler -- the default -1 means don't perform sparsification. optional int32 sparse = 7 [default = -1]; - // Normalize the filler variance by fan_in, fan_out, or their average. - // Applies to 'xavier' and 'msra' fillers. - enum VarianceNorm { - FAN_IN = 0; - FAN_OUT = 1; - AVERAGE = 2; - } - optional VarianceNorm variance_norm = 8 [default = FAN_IN]; } message NetParameter { @@ -98,7 +88,7 @@ message NetParameter { // NOTE // Update the next available ID when you add a new SolverParameter field. // -// SolverParameter next available ID: 40 (last added: momentum2) +// SolverParameter next available ID: 36 (last added: clip_gradients) message SolverParameter { ////////////////////////////////////////////////////////////////////////////// // Specifying the train and test networks @@ -151,25 +141,7 @@ message SolverParameter { // Display the loss averaged over the last average_loss iterations optional int32 average_loss = 33 [default = 1]; optional int32 max_iter = 7; // the maximum number of iterations - // accumulate gradients over `iter_size` x `batch_size` instances - optional int32 iter_size = 36 [default = 1]; - - // The learning rate decay policy. The currently implemented learning rate - // policies are as follows: - // - fixed: always return base_lr. - // - step: return base_lr * gamma ^ (floor(iter / step)) - // - exp: return base_lr * gamma ^ iter - // - inv: return base_lr * (1 + gamma * iter) ^ (- power) - // - multistep: similar to step but it allows non uniform steps defined by - // stepvalue - // - poly: the effective learning rate follows a polynomial decay, to be - // zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) - // - sigmoid: the effective learning rate follows a sigmod decay - // return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) - // - // where base_lr, max_iter, gamma, step, stepvalue and power are defined - // in the solver parameter protocol buffer, and iter is the current iteration. - optional string lr_policy = 8; + optional string lr_policy = 8; // The learning rate decay policy. optional float gamma = 9; // The parameter to compute the learning rate. optional float power = 10; // The parameter to compute the learning rate. optional float momentum = 11; // The momentum value. @@ -191,11 +163,6 @@ message SolverParameter { // whether to snapshot diff in the results or not. Snapshotting diff will help // debugging but the final protocol buffer size will be much larger. optional bool snapshot_diff = 16 [default = false]; - enum SnapshotFormat { - HDF5 = 0; - BINARYPROTO = 1; - } - optional SnapshotFormat snapshot_format = 37 [default = BINARYPROTO]; // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default. enum SolverMode { CPU = 0; @@ -214,19 +181,10 @@ message SolverParameter { SGD = 0; NESTEROV = 1; ADAGRAD = 2; - RMSPROP = 3; - ADADELTA = 4; - ADAM = 5; } optional SolverType solver_type = 30 [default = SGD]; - // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam + // numerical stability for AdaGrad optional float delta = 31 [default = 1e-8]; - // parameters for the Adam solver - optional float momentum2 = 39 [default = 0.999]; - - // RMSProp decay value - // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t) - optional float rms_decay = 38; // If true, print information about the state of the net that may help with // debugging learning problems. @@ -301,7 +259,7 @@ message ParamSpec { // NOTE // Update the next available ID when you add a new LayerParameter field. // -// LayerParameter next available layer-specific ID: 139 (last added: tile_param) +// LayerParameter next available layer-specific ID: 132 (last added: prelu_param) message LayerParameter { optional string name = 1; // the layer name optional string type = 2; // the layer type @@ -323,10 +281,6 @@ message LayerParameter { // The blobs containing the numeric parameters of the layer. repeated BlobProto blobs = 7; - // Specifies on which bottoms the backpropagation should be skipped. - // The size must be either 0 or equal to the number of bottoms. - repeated bool propagate_down = 11; - // Rules controlling whether and when a layer is included in the network, // based on the current NetState. You may specify a non-zero number of rules // to include OR exclude, but not both. If no include or exclude rules are @@ -357,16 +311,13 @@ message LayerParameter { optional DropoutParameter dropout_param = 108; optional DummyDataParameter dummy_data_param = 109; optional EltwiseParameter eltwise_param = 110; - optional EmbedParameter embed_param = 137; optional ExpParameter exp_param = 111; - optional FlattenParameter flatten_param = 135; optional HDF5DataParameter hdf5_data_param = 112; optional HDF5OutputParameter hdf5_output_param = 113; optional HingeLossParameter hinge_loss_param = 114; optional ImageDataParameter image_data_param = 115; optional InfogainLossParameter infogain_loss_param = 116; optional InnerProductParameter inner_product_param = 117; - optional LogParameter log_param = 134; optional LRNParameter lrn_param = 118; optional MemoryDataParameter memory_data_param = 119; optional MVNParameter mvn_param = 120; @@ -374,16 +325,12 @@ message LayerParameter { optional PowerParameter power_param = 122; optional PReLUParameter prelu_param = 131; optional PythonParameter python_param = 130; - optional ReductionParameter reduction_param = 136; optional ReLUParameter relu_param = 123; - optional ReshapeParameter reshape_param = 133; optional SigmoidParameter sigmoid_param = 124; optional SoftmaxParameter softmax_param = 125; - optional SPPParameter spp_param = 132; optional SliceParameter slice_param = 126; optional TanHParameter tanh_param = 127; optional ThresholdParameter threshold_param = 128; - optional TileParameter tile_param = 138; optional WindowDataParameter window_data_param = 129; } @@ -517,6 +464,7 @@ message ConvolutionParameter { optional bool force_nd_im2col = 17 [default = false]; } +// Message that stores parameters used by DataLayer message DataParameter { enum DB { LEVELDB = 0; @@ -530,7 +478,6 @@ message DataParameter { // to avoid all asynchronous sgd clients to start at the same point. The skip // point would be set as rand_skip * rand(0,1). Note that rand_skip should not // be larger than the number of keys in the database. - // DEPRECATED. Each solver accesses a different subset of the database. optional uint32 rand_skip = 7 [default = 0]; optional DB backend = 8 [default = LEVELDB]; // DEPRECATED. See TransformationParameter. For data pre-processing, we can do @@ -546,15 +493,14 @@ message DataParameter { optional bool mirror = 6 [default = false]; // Force the encoded image to have 3 color channels optional bool force_encoded_color = 9 [default = false]; - // Prefetch queue (Number of batches to prefetch to host memory, increase if - // data access bandwidth varies). - optional uint32 prefetch = 10 [default = 4]; } +// Message that stores parameters used by DropoutLayer message DropoutParameter { optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio } +// Message that stores parameters used by DummyDataLayer. // DummyDataLayer fills any number of arbitrarily shaped blobs with random // (or constant) data generated by "Fillers" (see "message FillerParameter"). message DummyDataParameter { @@ -574,6 +520,7 @@ message DummyDataParameter { repeated uint32 width = 5; } +// Message that stores parameters used by EltwiseLayer message EltwiseParameter { enum EltwiseOp { PROD = 0; @@ -588,20 +535,6 @@ message EltwiseParameter { optional bool stable_prod_grad = 3 [default = true]; } -// Message that stores parameters used by EmbedLayer -message EmbedParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - // The input is given as integers to be interpreted as one-hot - // vector indices with dimension num_input. Hence num_input should be - // 1 greater than the maximum possible input value. - optional uint32 input_dim = 2; - - optional bool bias_term = 3 [default = true]; // Whether to use a bias term - optional FillerParameter weight_filler = 4; // The filler for the weight - optional FillerParameter bias_filler = 5; // The filler for the bias - -} - // Message that stores parameters used by ExpLayer message ExpParameter { // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. @@ -612,18 +545,6 @@ message ExpParameter { optional float shift = 3 [default = 0.0]; } -/// Message that stores parameters used by FlattenLayer -message FlattenParameter { - // The first axis to flatten: all preceding axes are retained in the output. - // May be negative to index from the end (e.g., -1 for the last axis). - optional int32 axis = 1 [default = 1]; - - // The last axis to flatten: all following axes are retained in the output. - // May be negative to index from the end (e.g., the default -1 for the last - // axis). - optional int32 end_axis = 2 [default = -1]; -} - // Message that stores parameters used by HDF5DataLayer message HDF5DataParameter { // Specify the data source. @@ -639,6 +560,7 @@ message HDF5DataParameter { optional bool shuffle = 3 [default = false]; } +// Message that stores parameters used by HDF5OutputLayer message HDF5OutputParameter { optional string file_name = 1; } @@ -652,11 +574,12 @@ message HingeLossParameter { optional Norm norm = 1 [default = L1]; } +// Message that stores parameters used by ImageDataLayer message ImageDataParameter { // Specify the data source. optional string source = 1; // Specify the batch size. - optional uint32 batch_size = 4 [default = 1]; + optional uint32 batch_size = 4; // The rand_skip variable is for the data layer to skip a few data points // to avoid all asynchronous sgd clients to start at the same point. The skip // point would be set as rand_skip * rand(0,1). Note that rand_skip should not @@ -683,11 +606,13 @@ message ImageDataParameter { optional string root_folder = 12 [default = ""]; } +// Message that stores parameters InfogainLossLayer message InfogainLossParameter { // Specify the infogain matrix source. optional string source = 1; } +// Message that stores parameters used by InnerProductLayer message InnerProductParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms @@ -700,16 +625,6 @@ message InnerProductParameter { optional int32 axis = 5 [default = 1]; } -// Message that stores parameters used by LogLayer -message LogParameter { - // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. - // Or if base is set to the default (-1), base is set to e, - // so y = ln(shift + scale * x) = log_e(shift + scale * x) - optional float base = 1 [default = -1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - // Message that stores parameters used by LRNLayer message LRNParameter { optional uint32 local_size = 1 [default = 5]; @@ -723,6 +638,7 @@ message LRNParameter { optional float k = 5 [default = 1.]; } +// Message that stores parameters used by MemoryDataLayer message MemoryDataParameter { optional uint32 batch_size = 1; optional uint32 channels = 2; @@ -730,17 +646,16 @@ message MemoryDataParameter { optional uint32 width = 4; } +// Message that stores parameters used by MVNLayer message MVNParameter { // This parameter can be set to false to normalize mean only optional bool normalize_variance = 1 [default = true]; // This parameter can be set to true to perform DNN-like MVN optional bool across_channels = 2 [default = false]; - - // Epsilon for not dividing by zero while normalizing variance - optional float eps = 3 [default = 1e-9]; } +// Message that stores parameters used by PoolingLayer message PoolingParameter { enum PoolMethod { MAX = 0; @@ -770,6 +685,7 @@ message PoolingParameter { optional bool global_pooling = 12 [default = false]; } +// Message that stores parameters used by PowerLayer message PowerParameter { // PowerLayer computes outputs y = (shift + scale * x) ^ power. optional float power = 1 [default = 1.0]; @@ -777,47 +693,10 @@ message PowerParameter { optional float shift = 3 [default = 0.0]; } +// Message that stores parameters used by PythonLayer message PythonParameter { optional string module = 1; optional string layer = 2; - // This value is set to the attribute `param_str` of the `PythonLayer` object - // in Python before calling the `setup()` method. This could be a number, - // string, dictionary in Python dict format, JSON, etc. You may parse this - // string in `setup` method and use it in `forward` and `backward`. - optional string param_str = 3 [default = '']; - // Whether this PythonLayer is shared among worker solvers during data parallelism. - // If true, each worker solver sequentially run forward from this layer. - // This value should be set true if you are using it as a data layer. - optional bool share_in_parallel = 4 [default = false]; -} - -// Message that stores parameters used by ReductionLayer -message ReductionParameter { - enum ReductionOp { - SUM = 1; - ASUM = 2; - SUMSQ = 3; - MEAN = 4; - } - - optional ReductionOp operation = 1 [default = SUM]; // reduction operation - - // The first axis to reduce to a scalar -- may be negative to index from the - // end (e.g., -1 for the last axis). - // (Currently, only reduction along ALL "tail" axes is supported; reduction - // of axis M through N, where N < num_axes - 1, is unsupported.) - // Suppose we have an n-axis bottom Blob with shape: - // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). - // If axis == m, the output Blob will have shape - // (d0, d1, d2, ..., d(m-1)), - // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) - // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. - // If axis == 0 (the default), the output Blob always has the empty shape - // (count 1), performing reduction across the entire input -- - // often useful for creating new loss functions. - optional int32 axis = 2 [default = 0]; - - optional float coeff = 3 [default = 1.0]; // coefficient for output } // Message that stores parameters used by ReLULayer @@ -836,70 +715,7 @@ message ReLUParameter { optional Engine engine = 2 [default = DEFAULT]; } -message ReshapeParameter { - // Specify the output dimensions. If some of the dimensions are set to 0, - // the corresponding dimension from the bottom layer is used (unchanged). - // Exactly one dimension may be set to -1, in which case its value is - // inferred from the count of the bottom blob and the remaining dimensions. - // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8: - // - // layer { - // type: "Reshape" bottom: "input" top: "output" - // reshape_param { ... } - // } - // - // If "input" is 2D with shape 2 x 8, then the following reshape_param - // specifications are all equivalent, producing a 3D blob "output" with shape - // 2 x 2 x 4: - // - // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 0 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 0 dim: 2 dim: -1 } } - // reshape_param { shape { dim: -1 dim: 0 dim: 2 } } - // - optional BlobShape shape = 1; - - // axis and num_axes control the portion of the bottom blob's shape that are - // replaced by (included in) the reshape. By default (axis == 0 and - // num_axes == -1), the entire bottom blob shape is included in the reshape, - // and hence the shape field must specify the entire output shape. - // - // axis may be non-zero to retain some portion of the beginning of the input - // shape (and may be negative to index from the end; e.g., -1 to begin the - // reshape after the last axis, including nothing in the reshape, - // -2 to include only the last axis, etc.). - // - // For example, suppose "input" is a 2D blob with shape 2 x 8. - // Then the following ReshapeLayer specifications are all equivalent, - // producing a blob "output" with shape 2 x 2 x 4: - // - // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 2 dim: 4 } axis: 1 } - // reshape_param { shape { dim: 2 dim: 4 } axis: -3 } - // - // num_axes specifies the extent of the reshape. - // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on - // input axes in the range [axis, axis+num_axes]. - // num_axes may also be -1, the default, to include all remaining axes - // (starting from axis). - // - // For example, suppose "input" is a 2D blob with shape 2 x 8. - // Then the following ReshapeLayer specifications are equivalent, - // producing a blob "output" with shape 1 x 2 x 8. - // - // reshape_param { shape { dim: 1 dim: 2 dim: 8 } } - // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 } - // reshape_param { shape { dim: 1 } num_axes: 0 } - // - // On the other hand, these would produce output blob shape 2 x 1 x 8: - // - // reshape_param { shape { dim: 2 dim: 1 dim: 8 } } - // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 } - // - optional int32 axis = 2 [default = 0]; - optional int32 num_axes = 3 [default = -1]; -} - +// Message that stores parameters used by SigmoidLayer message SigmoidParameter { enum Engine { DEFAULT = 0; @@ -909,6 +725,7 @@ message SigmoidParameter { optional Engine engine = 1 [default = DEFAULT]; } +// Message that stores parameters used by SliceLayer message SliceParameter { // The axis along which to slice -- may be negative to index from the end // (e.g., -1 for the last axis). @@ -935,6 +752,7 @@ message SoftmaxParameter { optional int32 axis = 2 [default = 1]; } +// Message that stores parameters used by TanHLayer message TanHParameter { enum Engine { DEFAULT = 0; @@ -944,20 +762,12 @@ message TanHParameter { optional Engine engine = 1 [default = DEFAULT]; } -// Message that stores parameters used by TileLayer -message TileParameter { - // The index of the axis to tile. - optional int32 axis = 1 [default = 1]; - - // The number of copies (tiles) of the blob to output. - optional int32 tiles = 2; -} - // Message that stores parameters used by ThresholdLayer message ThresholdParameter { optional float threshold = 1 [default = 0]; // Strictly positive values } +// Message that stores parameters used by WindowDataLayer message WindowDataParameter { // Specify the data source. optional string source = 1; @@ -991,22 +801,6 @@ message WindowDataParameter { optional string root_folder = 13 [default = ""]; } -message SPPParameter { - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional uint32 pyramid_height = 1; - optional PoolMethod pool = 2 [default = MAX]; // The pooling method - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 6 [default = DEFAULT]; -} - // DEPRECATED: use LayerParameter. message V1LayerParameter { repeated string bottom = 2; @@ -1194,6 +988,7 @@ message V0LayerParameter { optional HDF5OutputParameter hdf5_output_param = 1001; } +// Message that stores parameters used by PReLULayer message PReLUParameter { // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: // Surpassing Human-Level Performance on ImageNet Classification, 2015. diff --git a/src/caffe/test/test_accuracy_layer.cpp b/src/caffe/test/test_accuracy_layer.cpp index ef0e57a37a1..b5083630217 100644 --- a/src/caffe/test/test_accuracy_layer.cpp +++ b/src/caffe/test/test_accuracy_layer.cpp @@ -16,7 +16,7 @@ namespace caffe { template -class AccuracyLayerTest : public CPUDeviceTest { +class AccuracyLayerTest : public ::testing::Test { protected: AccuracyLayerTest() : blob_bottom_data_(new Blob()), @@ -112,6 +112,7 @@ TYPED_TEST(AccuracyLayerTest, TestSetupOutputPerClass) { TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); AccuracyLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); @@ -137,6 +138,7 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { } TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { + Caffe::set_mode(Caffe::CPU); this->blob_bottom_data_->Reshape(2, 10, 4, 5); vector label_shape(3); label_shape[0] = 2; label_shape[1] = 4; label_shape[2] = 5; @@ -180,6 +182,7 @@ TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { } TYPED_TEST(AccuracyLayerTest, TestForwardIgnoreLabel) { + Caffe::set_mode(Caffe::CPU); LayerParameter layer_param; const TypeParam kIgnoreLabelValue = -1; layer_param.mutable_accuracy_param()->set_ignore_label(kIgnoreLabelValue); diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp index 895c3d372ff..3487d42f21e 100644 --- a/src/caffe/test/test_argmax_layer.cpp +++ b/src/caffe/test/test_argmax_layer.cpp @@ -13,12 +13,13 @@ namespace caffe { template -class ArgMaxLayerTest : public CPUDeviceTest { +class ArgMaxLayerTest : public ::testing::Test { protected: ArgMaxLayerTest() : blob_bottom_(new Blob(10, 20, 1, 1)), blob_top_(new Blob()), top_k_(5) { + Caffe::set_mode(Caffe::CPU); Caffe::set_random_seed(1701); // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_contrastive_loss_layer.cpp b/src/caffe/test/test_contrastive_loss_layer.cpp index 1e9447cbc51..d269fbc26f2 100644 --- a/src/caffe/test/test_contrastive_loss_layer.cpp +++ b/src/caffe/test/test_contrastive_loss_layer.cpp @@ -22,15 +22,15 @@ class ContrastiveLossLayerTest : public MultiDeviceTest { protected: ContrastiveLossLayerTest() - : blob_bottom_data_i_(new Blob(512, 2, 1, 1)), - blob_bottom_data_j_(new Blob(512, 2, 1, 1)), - blob_bottom_y_(new Blob(512, 1, 1, 1)), + : blob_bottom_data_i_(new Blob(128, 10, 1, 1)), + blob_bottom_data_j_(new Blob(128, 10, 1, 1)), + blob_bottom_y_(new Blob(128, 1, 1, 1)), blob_top_loss_(new Blob()) { // fill the values FillerParameter filler_param; - filler_param.set_min(-1.0); - filler_param.set_max(1.0); // distances~=1.0 to test both sides of margin - UniformFiller filler(filler_param); + filler_param.set_mean(0.0); + filler_param.set_std(0.3); // distances~=1.0 to test both sides of margin + GaussianFiller filler(filler_param); filler.Fill(this->blob_bottom_data_i_); blob_bottom_vec_.push_back(blob_bottom_data_i_); filler.Fill(this->blob_bottom_data_j_); @@ -79,8 +79,7 @@ TYPED_TEST(ContrastiveLossLayerTest, TestForward) { if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs loss += dist_sq; } else { - Dtype dist = std::max(margin - sqrt(dist_sq), 0.0); - loss += dist*dist; + loss += std::max(margin-dist_sq, Dtype(0)); } } loss /= static_cast(num) * Dtype(2); @@ -100,47 +99,4 @@ TYPED_TEST(ContrastiveLossLayerTest, TestGradient) { this->blob_top_vec_, 1); } -TYPED_TEST(ContrastiveLossLayerTest, TestForwardLegacy) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_contrastive_loss_param()->set_legacy_version(true); - ContrastiveLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // manually compute to compare - const Dtype margin = layer_param.contrastive_loss_param().margin(); - const int num = this->blob_bottom_data_i_->num(); - const int channels = this->blob_bottom_data_i_->channels(); - Dtype loss(0); - for (int i = 0; i < num; ++i) { - Dtype dist_sq(0); - for (int j = 0; j < channels; ++j) { - Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; - dist_sq += diff*diff; - } - if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs - loss += dist_sq; - } else { - loss += std::max(margin - dist_sq, Dtype(0.0)); - } - } - loss /= static_cast(num) * Dtype(2); - EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); -} - -TYPED_TEST(ContrastiveLossLayerTest, TestGradientLegacy) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_contrastive_loss_param()->set_legacy_version(true); - ContrastiveLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - GradientChecker checker(1e-2, 1e-2, 1701); - // check the gradient for the first two bottom layers - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 0); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 1); -} - } // namespace caffe diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index 9df979a2d27..a58b3e1fe74 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -692,7 +692,7 @@ TYPED_TEST(ConvolutionLayerTest, TestGradientGroup) { #ifdef USE_CUDNN template -class CuDNNConvolutionLayerTest : public GPUDeviceTest { +class CuDNNConvolutionLayerTest : public ::testing::Test { protected: CuDNNConvolutionLayerTest() : blob_bottom_(new Blob(2, 3, 6, 4)), @@ -735,6 +735,7 @@ class CuDNNConvolutionLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNConvolutionLayerTest, TestDtypes); TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { + Caffe::set_mode(Caffe::GPU); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); LayerParameter layer_param; @@ -772,6 +773,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { + Caffe::set_mode(Caffe::GPU); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); LayerParameter layer_param; @@ -807,6 +809,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionGroupCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); @@ -837,7 +840,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { // Test separable convolution by computing the Sobel operator // as a single filter then comparing the result // as the convolution of two rectangular filters. - + Caffe::set_mode(Caffe::GPU); // Fill bottoms with identical Gaussian noise. shared_ptr > filler; FillerParameter filler_param; @@ -927,6 +930,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); @@ -944,6 +948,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientGroupCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); diff --git a/src/caffe/test/test_dummy_data_layer.cpp b/src/caffe/test/test_dummy_data_layer.cpp index c9ed38db3a5..99548352746 100644 --- a/src/caffe/test/test_dummy_data_layer.cpp +++ b/src/caffe/test/test_dummy_data_layer.cpp @@ -13,7 +13,7 @@ namespace caffe { template -class DummyDataLayerTest : public CPUDeviceTest { +class DummyDataLayerTest : public ::testing::Test { protected: DummyDataLayerTest() : blob_top_a_(new Blob()), @@ -44,6 +44,7 @@ class DummyDataLayerTest : public CPUDeviceTest { TYPED_TEST_CASE(DummyDataLayerTest, TestDtypes); TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) { + Caffe::set_mode(Caffe::CPU); LayerParameter param; DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); dummy_data_param->add_num(5); @@ -73,6 +74,7 @@ TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) { } TYPED_TEST(DummyDataLayerTest, TestTwoTopConstant) { + Caffe::set_mode(Caffe::CPU); LayerParameter param; DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); dummy_data_param->add_num(5); @@ -111,6 +113,7 @@ TYPED_TEST(DummyDataLayerTest, TestTwoTopConstant) { } TYPED_TEST(DummyDataLayerTest, TestThreeTopConstantGaussianConstant) { + Caffe::set_mode(Caffe::CPU); LayerParameter param; DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); dummy_data_param->add_num(5); diff --git a/src/caffe/test/test_filler.cpp b/src/caffe/test/test_filler.cpp index 728b8dc5f0d..e04b0fd22af 100644 --- a/src/caffe/test/test_filler.cpp +++ b/src/caffe/test/test_filler.cpp @@ -142,102 +142,4 @@ TYPED_TEST(GaussianFillerTest, TestFill) { EXPECT_LE(var, target_var * 5.); } -template -class XavierFillerTest : public ::testing::Test { - protected: - XavierFillerTest() - : blob_(new Blob(1000, 2, 4, 5)), - filler_param_() { - } - virtual void test_params(FillerParameter_VarianceNorm variance_norm, - Dtype n) { - this->filler_param_.set_variance_norm(variance_norm); - this->filler_.reset(new XavierFiller(this->filler_param_)); - this->filler_->Fill(blob_); - EXPECT_TRUE(this->blob_); - const int count = this->blob_->count(); - const Dtype* data = this->blob_->cpu_data(); - Dtype mean = 0.; - Dtype ex2 = 0.; - for (int i = 0; i < count; ++i) { - mean += data[i]; - ex2 += data[i] * data[i]; - } - mean /= count; - ex2 /= count; - Dtype std = sqrt(ex2 - mean*mean); - Dtype target_std = sqrt(2.0 / n); - EXPECT_NEAR(mean, 0.0, 0.1); - EXPECT_NEAR(std, target_std, 0.1); - } - virtual ~XavierFillerTest() { delete blob_; } - Blob* const blob_; - FillerParameter filler_param_; - shared_ptr > filler_; -}; - -TYPED_TEST_CASE(XavierFillerTest, TestDtypes); - -TYPED_TEST(XavierFillerTest, TestFillFanIn) { - TypeParam n = 2*4*5; - this->test_params(FillerParameter_VarianceNorm_FAN_IN, n); -} -TYPED_TEST(XavierFillerTest, TestFillFanOut) { - TypeParam n = 1000*4*5; - this->test_params(FillerParameter_VarianceNorm_FAN_OUT, n); -} -TYPED_TEST(XavierFillerTest, TestFillAverage) { - TypeParam n = (2*4*5 + 1000*4*5) / 2.0; - this->test_params(FillerParameter_VarianceNorm_AVERAGE, n); -} - -template -class MSRAFillerTest : public ::testing::Test { - protected: - MSRAFillerTest() - : blob_(new Blob(1000, 2, 4, 5)), - filler_param_() { - } - virtual void test_params(FillerParameter_VarianceNorm variance_norm, - Dtype n) { - this->filler_param_.set_variance_norm(variance_norm); - this->filler_.reset(new MSRAFiller(this->filler_param_)); - this->filler_->Fill(blob_); - EXPECT_TRUE(this->blob_); - const int count = this->blob_->count(); - const Dtype* data = this->blob_->cpu_data(); - Dtype mean = 0.; - Dtype ex2 = 0.; - for (int i = 0; i < count; ++i) { - mean += data[i]; - ex2 += data[i] * data[i]; - } - mean /= count; - ex2 /= count; - Dtype std = sqrt(ex2 - mean*mean); - Dtype target_std = sqrt(2.0 / n); - EXPECT_NEAR(mean, 0.0, 0.1); - EXPECT_NEAR(std, target_std, 0.1); - } - virtual ~MSRAFillerTest() { delete blob_; } - Blob* const blob_; - FillerParameter filler_param_; - shared_ptr > filler_; -}; - -TYPED_TEST_CASE(MSRAFillerTest, TestDtypes); - -TYPED_TEST(MSRAFillerTest, TestFillFanIn) { - TypeParam n = 2*4*5; - this->test_params(FillerParameter_VarianceNorm_FAN_IN, n); -} -TYPED_TEST(MSRAFillerTest, TestFillFanOut) { - TypeParam n = 1000*4*5; - this->test_params(FillerParameter_VarianceNorm_FAN_OUT, n); -} -TYPED_TEST(MSRAFillerTest, TestFillAverage) { - TypeParam n = (2*4*5 + 1000*4*5) / 2.0; - this->test_params(FillerParameter_VarianceNorm_AVERAGE, n); -} - } // namespace caffe diff --git a/src/caffe/test/test_flatten_layer.cpp b/src/caffe/test/test_flatten_layer.cpp index 7b6757cba32..3042d293cf7 100644 --- a/src/caffe/test/test_flatten_layer.cpp +++ b/src/caffe/test/test_flatten_layer.cpp @@ -42,48 +42,13 @@ TYPED_TEST(FlattenLayerTest, TestSetup) { LayerParameter layer_param; FlattenLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - ASSERT_EQ(this->blob_top_->num_axes(), 2); - EXPECT_EQ(this->blob_top_->shape(0), 2); - EXPECT_EQ(this->blob_top_->shape(1), 3 * 6 * 5); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3 * 6 * 5); + EXPECT_EQ(this->blob_top_->height(), 1); + EXPECT_EQ(this->blob_top_->width(), 1); } -TYPED_TEST(FlattenLayerTest, TestSetupWithAxis) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_flatten_param()->set_axis(2); - FlattenLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - ASSERT_EQ(this->blob_top_->num_axes(), 3); - EXPECT_EQ(this->blob_top_->shape(0), 2); - EXPECT_EQ(this->blob_top_->shape(1), 3); - EXPECT_EQ(this->blob_top_->shape(2), 6 * 5); -} - -TYPED_TEST(FlattenLayerTest, TestSetupWithEndAxis) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_flatten_param()->set_end_axis(-2); - FlattenLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - ASSERT_EQ(this->blob_top_->num_axes(), 3); - EXPECT_EQ(this->blob_top_->shape(0), 2); - EXPECT_EQ(this->blob_top_->shape(1), 3 * 6); - EXPECT_EQ(this->blob_top_->shape(2), 5); -} - -TYPED_TEST(FlattenLayerTest, TestSetupWithStartAndEndAxis) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_flatten_param()->set_axis(0); - layer_param.mutable_flatten_param()->set_end_axis(-2); - FlattenLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - ASSERT_EQ(this->blob_top_->num_axes(), 2); - EXPECT_EQ(this->blob_top_->shape(0), 2 * 3 * 6); - EXPECT_EQ(this->blob_top_->shape(1), 5); -} - -TYPED_TEST(FlattenLayerTest, TestForward) { +TYPED_TEST(FlattenLayerTest, Test) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; FlattenLayer layer(layer_param); @@ -106,4 +71,5 @@ TYPED_TEST(FlattenLayerTest, TestGradient) { this->blob_top_vec_); } + } // namespace caffe diff --git a/src/caffe/test/test_gradient_based_solver.cpp b/src/caffe/test/test_gradient_based_solver.cpp index 7ad7467f86f..2a7dd90f0f5 100644 --- a/src/caffe/test/test_gradient_based_solver.cpp +++ b/src/caffe/test/test_gradient_based_solver.cpp @@ -410,45 +410,6 @@ class GradientBasedSolverTest : public MultiDeviceTest { } } - void CheckAccumulation(const Dtype kLearningRate, const Dtype kWeightDecay, - const Dtype kMomentum, const int kNumIters, const int kIterSize) { - const double kPrecision = 1e-2; - const double kMinPrecision = 1e-7; - // Solve without accumulation and save parameters. - this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, - kNumIters); - // Save parameters for comparison. - Net& net = *this->solver_->net(); - const vector > >& param_blobs = - net.layer_by_name("innerprod")->blobs(); - vector > > noaccum_params(param_blobs.size()); - for (int i = 0; i < param_blobs.size(); ++i) { - noaccum_params[i].reset(new Blob()); - noaccum_params[i]->CopyFrom(*param_blobs[i], false, true); - } - // Solve by equivalent accumulation of gradients over divided batches. - this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, - kNumIters, kIterSize); - Net& net_accum = *this->solver_->net(); - const vector > >& accum_params = - net_accum.layer_by_name("innerprod")->blobs(); - // Compare accumulated parameters against no accumulation standard. - const int D = this->channels_ * this->height_ * this->width_; - for (int i = 0; i < D; ++i) { - const Dtype expected_param = noaccum_params[0]->cpu_data()[i]; - const Dtype accum_param = accum_params[0]->cpu_data()[i]; - const Dtype error_margin = std::max(kMinPrecision, kPrecision * - std::min(fabs(expected_param), fabs(accum_param))); - EXPECT_NEAR(expected_param, accum_param, error_margin); - } - ASSERT_EQ(1, accum_params[1]->count()); - const Dtype expected_bias = noaccum_params[1]->cpu_data()[0]; - const Dtype accum_bias = accum_params[1]->cpu_data()[0]; - const Dtype error_margin = std::max(kMinPrecision, kPrecision * - std::min(fabs(expected_bias), fabs(accum_bias))); - EXPECT_NEAR(expected_bias, accum_bias, error_margin); - } - // Test that the correct update is computed for a regularized least squares // problem: // diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index f0b75fcc68d..ee684c00255 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -22,32 +22,19 @@ __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, const int height_col, const int width_col, Dtype* data_col); -template -__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - Dtype* data_col); - extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; template -class Im2colKernelTest : public GPUDeviceTest { +class Im2colKernelTest : public ::testing::Test { protected: Im2colKernelTest() // big so launches > 1024 threads : blob_bottom_(new Blob(5, 500, 10, 10)), - blob_kernel_shape_(new Blob()), - blob_stride_(new Blob()), - blob_pad_(new Blob()), blob_top_(new Blob()), blob_top_cpu_(new Blob()) { FillerParameter filler_param; GaussianFiller filler(filler_param); filler.Fill(this->blob_bottom_); - vector dim_blob_shape(1, 2); - blob_kernel_shape_->Reshape(dim_blob_shape); - blob_stride_->Reshape(dim_blob_shape); - blob_pad_->Reshape(dim_blob_shape); height_ = blob_bottom_->height(); width_ = blob_bottom_->width(); @@ -57,26 +44,14 @@ class Im2colKernelTest : public GPUDeviceTest { kernel_size_ = 3; height_col_ = (height_ + 2 * pad_ - kernel_size_) / stride_ + 1; width_col_ = (width_ + 2 * pad_ - kernel_size_) / stride_ + 1; - - for (int i = 0; i < 2; ++i) { - blob_kernel_shape_->mutable_cpu_data()[i] = kernel_size_; - blob_stride_->mutable_cpu_data()[i] = stride_; - blob_pad_->mutable_cpu_data()[i] = pad_; - } } virtual ~Im2colKernelTest() { - delete blob_bottom_; - delete blob_top_; - delete blob_top_cpu_; - delete blob_kernel_shape_; - delete blob_stride_; - delete blob_pad_; + delete blob_bottom_; + delete blob_top_; + delete blob_top_cpu_; } - Blob* const blob_kernel_shape_; - Blob* const blob_stride_; - Blob* const blob_pad_; Blob* const blob_bottom_; Blob* const blob_top_; Blob* const blob_top_cpu_; @@ -92,7 +67,9 @@ class Im2colKernelTest : public GPUDeviceTest { TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); -TYPED_TEST(Im2colKernelTest, Test2D) { +TYPED_TEST(Im2colKernelTest, TestGPU) { + Caffe::set_mode(Caffe::GPU); + // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, @@ -147,58 +124,4 @@ TYPED_TEST(Im2colKernelTest, Test2D) { } } -TYPED_TEST(Im2colKernelTest, TestND) { - // Reshape the blobs to correct size for im2col output - this->blob_top_->Reshape(this->blob_bottom_->num(), - this->channels_ * this->kernel_size_ * this->kernel_size_, - this->height_col_, - this->width_col_); - - this->blob_top_cpu_->ReshapeLike(*this->blob_top_); - - const TypeParam* bottom_data_cpu = this->blob_bottom_->cpu_data(); - TypeParam* top_data_cpu = this->blob_top_cpu_->mutable_cpu_data(); - - // CPU Version - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - im2col_nd_cpu(bottom_data_cpu + this->blob_bottom_->offset(n), 2, - this->blob_bottom_->shape().data() + 1, - this->blob_top_cpu_->shape().data() + 1, - this->blob_kernel_shape_->cpu_data(), - this->blob_pad_->cpu_data(), this->blob_stride_->cpu_data(), - top_data_cpu + this->blob_top_cpu_->offset(n)); - } - - // GPU version - int num_kernels = this->channels_ * this->height_col_ * this->width_col_; - int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); - const TypeParam* bottom_data_gpu = this->blob_bottom_->gpu_data(); - - // Launch with different grid sizes - for (int grid_div = 2; grid_div <= 8; grid_div++) { - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - const int grid_dim = default_grid_dim / grid_div; - TypeParam* top_data_gpu = this->blob_top_->mutable_gpu_data(); - // NOLINT_NEXT_LINE(whitespace/operators) - im2col_nd_gpu_kernel<<>>( - num_kernels, bottom_data_gpu + this->blob_bottom_->offset(n), - this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1, - this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(), - this->blob_stride_->gpu_data(), - top_data_gpu + this->blob_top_->offset(n)); - CUDA_POST_KERNEL_CHECK; - } - - // Compare results against CPU version - for (int i = 0; i < this->blob_top_->count(); ++i) { - TypeParam cpuval = top_data_cpu[i]; - TypeParam gpuval = this->blob_top_->cpu_data()[i]; - EXPECT_EQ(cpuval, gpuval); - if (cpuval != gpuval) { - break; - } - } - } -} - } // namespace caffe diff --git a/src/caffe/test/test_math_functions.cpp b/src/caffe/test/test_math_functions.cpp index a095b544e17..667f744bdd7 100644 --- a/src/caffe/test/test_math_functions.cpp +++ b/src/caffe/test/test_math_functions.cpp @@ -15,10 +15,8 @@ namespace caffe { -template -class MathFunctionsTest : public MultiDeviceTest { - typedef typename TypeParam::Dtype Dtype; - +template +class MathFunctionsTest : public ::testing::Test { protected: MathFunctionsTest() : blob_bottom_(new Blob()), @@ -66,19 +64,14 @@ class MathFunctionsTest : public MultiDeviceTest { Blob* const blob_top_; }; -template -class CPUMathFunctionsTest - : public MathFunctionsTest > { -}; - -TYPED_TEST_CASE(CPUMathFunctionsTest, TestDtypes); +TYPED_TEST_CASE(MathFunctionsTest, TestDtypes); -TYPED_TEST(CPUMathFunctionsTest, TestNothing) { +TYPED_TEST(MathFunctionsTest, TestNothing) { // The first test case of a test suite takes the longest time // due to the set up overhead. } -TYPED_TEST(CPUMathFunctionsTest, TestHammingDistance) { +TYPED_TEST(MathFunctionsTest, TestHammingDistanceCPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); const TypeParam* y = this->blob_top_->cpu_data(); @@ -86,7 +79,7 @@ TYPED_TEST(CPUMathFunctionsTest, TestHammingDistance) { caffe_cpu_hamming_distance(n, x, y)); } -TYPED_TEST(CPUMathFunctionsTest, TestAsum) { +TYPED_TEST(MathFunctionsTest, TestAsumCPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); TypeParam std_asum = 0; @@ -97,7 +90,7 @@ TYPED_TEST(CPUMathFunctionsTest, TestAsum) { EXPECT_LT((cpu_asum - std_asum) / std_asum, 1e-2); } -TYPED_TEST(CPUMathFunctionsTest, TestSign) { +TYPED_TEST(MathFunctionsTest, TestSignCPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_cpu_sign(n, x, this->blob_bottom_->mutable_cpu_diff()); @@ -107,7 +100,7 @@ TYPED_TEST(CPUMathFunctionsTest, TestSign) { } } -TYPED_TEST(CPUMathFunctionsTest, TestSgnbit) { +TYPED_TEST(MathFunctionsTest, TestSgnbitCPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_cpu_sgnbit(n, x, this->blob_bottom_->mutable_cpu_diff()); @@ -117,7 +110,7 @@ TYPED_TEST(CPUMathFunctionsTest, TestSgnbit) { } } -TYPED_TEST(CPUMathFunctionsTest, TestFabs) { +TYPED_TEST(MathFunctionsTest, TestFabsCPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_abs(n, x, this->blob_bottom_->mutable_cpu_diff()); @@ -127,7 +120,7 @@ TYPED_TEST(CPUMathFunctionsTest, TestFabs) { } } -TYPED_TEST(CPUMathFunctionsTest, TestScale) { +TYPED_TEST(MathFunctionsTest, TestScaleCPU) { int n = this->blob_bottom_->count(); TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % this->blob_bottom_->count()]; @@ -140,10 +133,11 @@ TYPED_TEST(CPUMathFunctionsTest, TestScale) { } } -TYPED_TEST(CPUMathFunctionsTest, TestCopy) { +TYPED_TEST(MathFunctionsTest, TestCopyCPU) { const int n = this->blob_bottom_->count(); const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); TypeParam* top_data = this->blob_top_->mutable_cpu_data(); + Caffe::set_mode(Caffe::CPU); caffe_copy(n, bottom_data, top_data); for (int i = 0; i < n; ++i) { EXPECT_EQ(bottom_data[i], top_data[i]); @@ -152,14 +146,8 @@ TYPED_TEST(CPUMathFunctionsTest, TestCopy) { #ifndef CPU_ONLY -template -class GPUMathFunctionsTest : public MathFunctionsTest > { -}; - -TYPED_TEST_CASE(GPUMathFunctionsTest, TestDtypes); - // TODO: Fix caffe_gpu_hamming_distance and re-enable this test. -TYPED_TEST(GPUMathFunctionsTest, DISABLED_TestHammingDistance) { +TYPED_TEST(MathFunctionsTest, DISABLED_TestHammingDistanceGPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); const TypeParam* y = this->blob_top_->cpu_data(); @@ -170,7 +158,7 @@ TYPED_TEST(GPUMathFunctionsTest, DISABLED_TestHammingDistance) { EXPECT_EQ(reference_distance, computed_distance); } -TYPED_TEST(GPUMathFunctionsTest, TestAsum) { +TYPED_TEST(MathFunctionsTest, TestAsumGPU) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); TypeParam std_asum = 0; @@ -182,7 +170,7 @@ TYPED_TEST(GPUMathFunctionsTest, TestAsum) { EXPECT_LT((gpu_asum - std_asum) / std_asum, 1e-2); } -TYPED_TEST(GPUMathFunctionsTest, TestSign) { +TYPED_TEST(MathFunctionsTest, TestSignGPU) { int n = this->blob_bottom_->count(); caffe_gpu_sign(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); @@ -193,7 +181,7 @@ TYPED_TEST(GPUMathFunctionsTest, TestSign) { } } -TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) { +TYPED_TEST(MathFunctionsTest, TestSgnbitGPU) { int n = this->blob_bottom_->count(); caffe_gpu_sgnbit(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); @@ -204,7 +192,7 @@ TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) { } } -TYPED_TEST(GPUMathFunctionsTest, TestFabs) { +TYPED_TEST(MathFunctionsTest, TestFabsGPU) { int n = this->blob_bottom_->count(); caffe_gpu_abs(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); @@ -215,7 +203,7 @@ TYPED_TEST(GPUMathFunctionsTest, TestFabs) { } } -TYPED_TEST(GPUMathFunctionsTest, TestScale) { +TYPED_TEST(MathFunctionsTest, TestScaleGPU) { int n = this->blob_bottom_->count(); TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % this->blob_bottom_->count()]; @@ -228,10 +216,11 @@ TYPED_TEST(GPUMathFunctionsTest, TestScale) { } } -TYPED_TEST(GPUMathFunctionsTest, TestCopy) { +TYPED_TEST(MathFunctionsTest, TestCopyGPU) { const int n = this->blob_bottom_->count(); const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); TypeParam* top_data = this->blob_top_->mutable_gpu_data(); + Caffe::set_mode(Caffe::GPU); caffe_copy(n, bottom_data, top_data); bottom_data = this->blob_bottom_->cpu_data(); top_data = this->blob_top_->mutable_cpu_data(); diff --git a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp index b2db984feb1..9038017e3e2 100644 --- a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp +++ b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp @@ -16,7 +16,7 @@ namespace caffe { template -class MultinomialLogisticLossLayerTest : public CPUDeviceTest { +class MultinomialLogisticLossLayerTest : public ::testing::Test { protected: MultinomialLogisticLossLayerTest() : blob_bottom_data_(new Blob(10, 5, 1, 1)), @@ -51,6 +51,7 @@ TYPED_TEST_CASE(MultinomialLogisticLossLayerTest, TestDtypes); TYPED_TEST(MultinomialLogisticLossLayerTest, TestGradientCPU) { LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); MultinomialLogisticLossLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); GradientChecker checker(1e-2, 2*1e-2, 1701, 0, 0.05); diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index 3d30e9c6723..b42b2c8ac45 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -614,6 +614,7 @@ class NetTest : public MultiDeviceTest { InitNetFromProtoString(proto); } +<<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD ======= @@ -751,6 +752,8 @@ class NetTest : public MultiDeviceTest { >>>>>>> 083f61b... New triplet loss layer added(beta1 version-no test source files) ======= >>>>>>> 011aef0... restore +======= +>>>>>>> 4d8130b... New triplet loss layer added(beta1 version-no test source files) int seed_; shared_ptr > net_; }; @@ -2355,52 +2358,4 @@ TYPED_TEST(NetTest, TestReshape) { } } -TYPED_TEST(NetTest, TestSkipPropagateDown) { - // check bottom_need_backward if propagate_down is true - this->InitSkipPropNet(false); - vector vec_layer_need_backward = this->net_->layer_need_backward(); - for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { - string layer_name = this->net_->layer_names()[layer_id]; - if (layer_name == "loss") { - // access to bottom_need_backward coresponding to label's blob - bool need_back = this->net_->bottom_need_backward()[layer_id][1]; - // if propagate_down is true, the loss layer will try to - // backpropagate on labels - EXPECT_TRUE(need_back) << "bottom_need_backward should be True"; - } - // layer_need_backward should be True except for data and silence layers - if (layer_name.find("data") != std::string::npos || - layer_name == "silence") { - EXPECT_FALSE(vec_layer_need_backward[layer_id]) - << "layer_need_backward for " << layer_name << " should be False"; - } else { - EXPECT_TRUE(vec_layer_need_backward[layer_id]) - << "layer_need_backward for " << layer_name << " should be True"; - } - } - // check bottom_need_backward if propagat_down is false - this->InitSkipPropNet(true); - vec_layer_need_backward.clear(); - vec_layer_need_backward = this->net_->layer_need_backward(); - for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { - string layer_name = this->net_->layer_names()[layer_id]; - if (layer_name == "loss") { - // access to bottom_need_backward coresponding to label's blob - bool need_back = this->net_->bottom_need_backward()[layer_id][1]; - // if propagate_down is false, the loss layer will not try to - // backpropagate on labels - EXPECT_FALSE(need_back) << "bottom_need_backward should be False"; - } - // layer_need_backward should be False except for innerproduct and - // loss layers - if (layer_name == "innerproduct" || layer_name == "loss") { - EXPECT_TRUE(vec_layer_need_backward[layer_id]) - << "layer_need_backward for " << layer_name << " should be True"; - } else { - EXPECT_FALSE(vec_layer_need_backward[layer_id]) - << "layer_need_backward for " << layer_name << " should be False"; - } - } -} - } // namespace caffe diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index c6e4d27b903..c9d52f247a6 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -117,49 +117,6 @@ class NeuronLayerTest : public MultiDeviceTest { + slope_data[c] * std::min(bottom_data[i], (Dtype)(0))); } } - - void LogBottomInit() { - FillerParameter filler_param; - GaussianFiller filler(filler_param); - filler.Fill(this->blob_bottom_); - Dtype* bottom_data = this->blob_bottom_->mutable_cpu_data(); - caffe_exp(this->blob_bottom_->count(), bottom_data, bottom_data); - } - - void TestLogForward(const float base, const float scale, const float shift) { - LogBottomInit(); - LayerParameter layer_param; - layer_param.mutable_log_param()->set_base(base); - layer_param.mutable_log_param()->set_scale(scale); - layer_param.mutable_log_param()->set_shift(shift); - LogLayer layer(layer_param); - layer.SetUp(blob_bottom_vec_, blob_top_vec_); - layer.Forward(blob_bottom_vec_, blob_top_vec_); - const Dtype kDelta = 2e-4; - const Dtype* bottom_data = blob_bottom_->cpu_data(); - const Dtype* top_data = blob_top_->cpu_data(); - for (int i = 0; i < blob_bottom_->count(); ++i) { - const Dtype bottom_val = bottom_data[i]; - const Dtype top_val = top_data[i]; - if (base == -1) { - EXPECT_NEAR(top_val, log(shift + scale * bottom_val), kDelta); - } else { - EXPECT_NEAR(top_val, log(shift + scale * bottom_val) / log(base), - kDelta); - } - } - } - - void TestLogGradient(const float base, const float scale, const float shift) { - LogBottomInit(); - LayerParameter layer_param; - layer_param.mutable_log_param()->set_base(base); - layer_param.mutable_log_param()->set_scale(scale); - layer_param.mutable_log_param()->set_shift(shift); - LogLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-2); - checker.CheckGradientEltwise(&layer, blob_bottom_vec_, blob_top_vec_); - } }; TYPED_TEST_CASE(NeuronLayerTest, TestDtypesAndDevices); @@ -382,88 +339,6 @@ TYPED_TEST(NeuronLayerTest, TestExpGradientBase2Shift1Scale3) { this->TestExpGradient(kBase, kScale, kShift); } -TYPED_TEST(NeuronLayerTest, TestLogLayer) { - typedef typename TypeParam::Dtype Dtype; - // Test default base of "-1" -- should actually set base := e. - const Dtype kBase = -1; - const Dtype kScale = 1; - const Dtype kShift = 0; - this->TestLogForward(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogGradient) { - typedef typename TypeParam::Dtype Dtype; - // Test default base of "-1" -- should actually set base := e. - const Dtype kBase = -1; - const Dtype kScale = 1; - const Dtype kShift = 0; - this->TestLogGradient(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogLayerBase2) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 1; - const Dtype kShift = 0; - this->TestLogForward(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogGradientBase2) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 1; - const Dtype kShift = 0; - this->TestLogGradient(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Shift1) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 1; - const Dtype kShift = 1; - this->TestLogForward(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Shift1) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 1; - const Dtype kShift = 1; - this->TestLogGradient(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Scale3) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 3; - const Dtype kShift = 0; - this->TestLogForward(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Scale3) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 3; - const Dtype kShift = 0; - this->TestLogGradient(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Shift1Scale3) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 3; - const Dtype kShift = 1; - this->TestLogForward(kBase, kScale, kShift); -} - -TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Shift1Scale3) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kBase = 2; - const Dtype kScale = 3; - const Dtype kShift = 1; - this->TestLogGradient(kBase, kScale, kShift); -} - TYPED_TEST(NeuronLayerTest, TestDropoutHalf) { const float kDropoutRatio = 0.5; this->TestDropoutForward(kDropoutRatio); @@ -666,10 +541,14 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { caffe_copy(ip2.blobs()[0]->count(), ip.blobs()[0]->cpu_data(), ip2.blobs()[0]->mutable_cpu_data()); // Forward in-place + ip.Reshape(this->blob_bottom_vec_, this->blob_top_vec_); ip.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + prelu.Reshape(this->blob_top_vec_, this->blob_top_vec_); prelu.Forward(this->blob_top_vec_, this->blob_top_vec_); // Forward non-in-place + ip2.Reshape(blob_bottom_vec_2, blob_middle_vec_2); ip2.Forward(blob_bottom_vec_2, blob_middle_vec_2); + prelu2.Reshape(blob_middle_vec_2, blob_top_vec_2); prelu2.Forward(blob_middle_vec_2, blob_top_vec_2); // Check numbers for (int s = 0; s < blob_top_2->count(); ++s) { @@ -711,7 +590,7 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { #ifdef USE_CUDNN template -class CuDNNNeuronLayerTest : public GPUDeviceTest { +class CuDNNNeuronLayerTest : public ::testing::Test { protected: CuDNNNeuronLayerTest() : blob_bottom_(new Blob(2, 3, 4, 5)), @@ -734,6 +613,7 @@ class CuDNNNeuronLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNNeuronLayerTest, TestDtypes); TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNReLULayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -748,6 +628,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNReLULayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); @@ -756,6 +637,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CHECK(google::protobuf::TextFormat::ParseFromString( "relu_param { negative_slope: 0.01 }", &layer_param)); @@ -775,6 +657,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CHECK(google::protobuf::TextFormat::ParseFromString( "relu_param { negative_slope: 0.01 }", &layer_param)); @@ -785,6 +668,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSigmoidLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -801,6 +685,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSigmoidLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); @@ -809,6 +694,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNTanHLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -831,6 +717,7 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestTanHGradientCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNTanHLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index 69f2d5c1135..e9964e7f0b7 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -608,7 +608,7 @@ TYPED_TEST(PoolingLayerTest, TestGradientAvePadded) { #ifdef USE_CUDNN template -class CuDNNPoolingLayerTest : public GPUDeviceTest { +class CuDNNPoolingLayerTest : public ::testing::Test { protected: CuDNNPoolingLayerTest() : blob_bottom_(new Blob()), @@ -963,6 +963,7 @@ class CuDNNPoolingLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNPoolingLayerTest, TestDtypes); TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -976,6 +977,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -992,6 +994,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { /* TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; layer_param.set_kernelsize(3); layer_param.set_stride(2); @@ -1017,6 +1020,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxCuDNN) { + Caffe::set_mode(Caffe::GPU); this->TestForwardSquare(); this->TestForwardRectHigh(); this->TestForwardRectWide(); @@ -1026,6 +1030,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxCuDNN) { // the corresponding backward test. /* TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxTopMaskCuDNN) { + Caffe::set_mode(Caffe::GPU); this->blob_top_vec_.push_back(this->blob_top_mask_); this->TestForwardSquare(); this->TestForwardRectHigh(); @@ -1034,6 +1039,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxTopMaskCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { + Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1053,6 +1059,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -1098,6 +1105,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { /* TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { + Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1118,6 +1126,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -1143,6 +1152,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { + Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1160,6 +1170,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAvePaddedCuDNN) { + Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; diff --git a/src/caffe/test/test_softmax_layer.cpp b/src/caffe/test/test_softmax_layer.cpp index 996da4b8f7c..f6674422e56 100644 --- a/src/caffe/test/test_softmax_layer.cpp +++ b/src/caffe/test/test_softmax_layer.cpp @@ -82,7 +82,7 @@ TYPED_TEST(SoftmaxLayerTest, TestGradient) { #ifdef USE_CUDNN template -class CuDNNSoftmaxLayerTest : public GPUDeviceTest { +class CuDNNSoftmaxLayerTest : public ::testing::Test { protected: CuDNNSoftmaxLayerTest() : blob_bottom_(new Blob(2, 10, 2, 3)), @@ -104,6 +104,7 @@ class CuDNNSoftmaxLayerTest : public GPUDeviceTest { TYPED_TEST_CASE(CuDNNSoftmaxLayerTest, TestDtypes); TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSoftmaxLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -137,6 +138,7 @@ TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { } TYPED_TEST(CuDNNSoftmaxLayerTest, TestGradientCuDNN) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSoftmaxLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); diff --git a/src/caffe/test/test_stochastic_pooling.cpp b/src/caffe/test/test_stochastic_pooling.cpp index f84464c322c..12962c65d85 100644 --- a/src/caffe/test/test_stochastic_pooling.cpp +++ b/src/caffe/test/test_stochastic_pooling.cpp @@ -16,10 +16,8 @@ using std::min; namespace caffe { -template -class StochasticPoolingLayerTest : public MultiDeviceTest { - typedef typename TypeParam::Dtype Dtype; - +template +class StochasticPoolingLayerTest : public ::testing::Test { protected: StochasticPoolingLayerTest() : blob_bottom_(new Blob()), @@ -47,14 +45,9 @@ class StochasticPoolingLayerTest : public MultiDeviceTest { vector*> blob_top_vec_; }; -template -class CPUStochasticPoolingLayerTest - : public StochasticPoolingLayerTest > { -}; - -TYPED_TEST_CASE(CPUStochasticPoolingLayerTest, TestDtypes); +TYPED_TEST_CASE(StochasticPoolingLayerTest, TestDtypes); -TYPED_TEST(CPUStochasticPoolingLayerTest, TestSetup) { +TYPED_TEST(StochasticPoolingLayerTest, TestSetup) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -67,16 +60,8 @@ TYPED_TEST(CPUStochasticPoolingLayerTest, TestSetup) { EXPECT_EQ(this->blob_top_->width(), 2); } -#ifndef CPU_ONLY - -template -class GPUStochasticPoolingLayerTest - : public StochasticPoolingLayerTest > { -}; - -TYPED_TEST_CASE(GPUStochasticPoolingLayerTest, TestDtypes); - -TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochastic) { +TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPU) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; layer_param.set_phase(TRAIN); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); @@ -119,7 +104,8 @@ TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochastic) { EXPECT_GE(total / this->blob_top_->count(), 0.55); } -TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochasticTestPhase) { +TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPUTestPhase) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; layer_param.set_phase(TEST); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); @@ -156,7 +142,8 @@ TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochasticTestPhase) { } } -TYPED_TEST(GPUStochasticPoolingLayerTest, TestGradient) { +TYPED_TEST(StochasticPoolingLayerTest, TestGradientGPU) { + Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; layer_param.set_phase(TRAIN); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); @@ -171,6 +158,6 @@ TYPED_TEST(GPUStochasticPoolingLayerTest, TestGradient) { this->blob_top_vec_); } -#endif + } // namespace caffe diff --git a/src/caffe/test/test_triplet_loss_layer.cpp b/src/caffe/test/test_triplet_loss_layer.cpp new file mode 100644 index 00000000000..c8d9377fa23 --- /dev/null +++ b/src/caffe/test/test_triplet_loss_layer.cpp @@ -0,0 +1,107 @@ +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class TripletLossLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + TripletLossLayerTest() + : blob_bottom_data_i_(new Blob(128, 10, 1, 1)), + blob_bottom_data_j_(new Blob(128, 10, 1, 1)), + blob_bottom_data_k_(new Blob(128, 10, 1, 1)), + blob_bottom_y_(new Blob(128, 1, 1, 1)), + blob_top_loss_(new Blob()) { + // fill the values + FillerParameter filler_param; + filler_param.set_mean(0.0); + filler_param.set_std(0.3); // distances~=1.0 to test both sides of margin + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_data_i_); + blob_bottom_vec_.push_back(blob_bottom_data_i_); + filler.Fill(this->blob_bottom_data_j_); + blob_bottom_vec_.push_back(blob_bottom_data_j_); + filler.Fill(this->blob_bottom_data_k_); + blob_bottom_vec_.push_back(blob_bottom_data_k_); + for (int i = 0; i < blob_bottom_y_->count(); ++i) { + blob_bottom_y_->mutable_cpu_data()[i] = caffe_rng_rand() % 2; // 0 or 1 + } + blob_bottom_vec_.push_back(blob_bottom_y_); + blob_top_vec_.push_back(blob_top_loss_); + } + virtual ~TripletLossLayerTest() { + delete blob_bottom_data_i_; + delete blob_bottom_data_j_; + delete blob_bottom_data_k_; + delete blob_bottom_y_; + delete blob_top_loss_; + } + + Blob* const blob_bottom_data_i_; + Blob* const blob_bottom_data_j_; + Blob* const blob_bottom_data_k_; + Blob* const blob_bottom_y_; + Blob* const blob_top_loss_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(TripletLossLayerTest, TestDtypesAndDevices); + +TYPED_TEST(TripletLossLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // manually compute to compare + const Dtype margin = layer_param.contrastive_loss_param().margin(); + const int num = this->blob_bottom_data_i_->num(); + const int channels = this->blob_bottom_data_i_->channels(); + Dtype loss(0); + for (int i = 0; i < num; ++i) { + Dtype dist_sq(0); + for (int j = 0; j < channels; ++j) { + Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq += diff*diff; + } + if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs + loss += dist_sq; + } else { + loss += std::max(margin-dist_sq, Dtype(0)); + } + } + loss /= static_cast(num) * Dtype(2); + EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); +} + +TYPED_TEST(TripletLossLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + // check the gradient for the first two bottom layers + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 1); +} + +} // namespace caffe diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index 0aab6b17b85..13e17be582b 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -206,16 +206,6 @@ void caffe_exp(const int n, const double* a, double* y) { vdExp(n, a, y); } -template <> -void caffe_log(const int n, const float* a, float* y) { - vsLn(n, a, y); -} - -template <> -void caffe_log(const int n, const double* a, double* y) { - vdLn(n, a, y); -} - template <> void caffe_abs(const int n, const float* a, float* y) { vsAbs(n, a, y); diff --git a/src/caffe/util/math_functions.cu b/src/caffe/util/math_functions.cu index 2631a0740d6..43e65eb9a69 100644 --- a/src/caffe/util/math_functions.cu +++ b/src/caffe/util/math_functions.cu @@ -324,27 +324,6 @@ void caffe_gpu_exp(const int N, const double* a, double* y) { N, a, y); } -template -__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { - CUDA_KERNEL_LOOP(index, n) { - y[index] = log(a[index]); - } -} - -template <> -void caffe_gpu_log(const int N, const float* a, float* y) { - // NOLINT_NEXT_LINE(whitespace/operators) - log_kernel<<>>( - N, a, y); -} - -template <> -void caffe_gpu_log(const int N, const double* a, double* y) { - // NOLINT_NEXT_LINE(whitespace/operators) - log_kernel<<>>( - N, a, y); -} - template __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { From 0e83e8292cfae423052e7667eb1440e7a7cac6db Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 15 Jun 2015 10:17:44 +0800 Subject: [PATCH 59/82] macro define in upgrade_proto restore --- .../siamese/convert_mnist_siamese_data.cpp | 26 +- include/caffe/data_layers.hpp.orig | 332 +++++++ include/caffe/data_transformer.hpp | 9 +- include/caffe/data_transformer.hpp.orig | 167 ++++ include/caffe/filler.hpp | 56 -- include/caffe/layer.hpp | 1 + include/caffe/net.hpp | 3 + include/caffe/neuron_layers.hpp | 70 +- include/caffe/python_layer.hpp | 35 +- include/caffe/solver.hpp | 15 +- include/caffe/test/test_caffe_main.hpp | 28 +- .../caffe/test/test_gradient_check_util.hpp | 7 +- include/caffe/util/math_functions.hpp | 6 + include/caffe/util/mkl_alternate.hpp | 1 + include/caffe/vision_layers.hpp | 10 +- src/caffe/blob.cpp | 1 + src/caffe/data_transformer.cpp.orig | 98 +- src/caffe/layers/base_data_layer.cpp | 94 +- src/caffe/layers/base_data_layer.cu | 17 +- src/caffe/layers/concat_layer.cu | 19 +- src/caffe/layers/contrastive_loss_layer.cpp | 25 +- src/caffe/layers/contrastive_loss_layer.cu | 34 +- src/caffe/layers/conv_layer.cpp | 7 - src/caffe/layers/conv_layer.cu | 7 - src/caffe/layers/cudnn_conv_layer.cu | 2 - src/caffe/layers/data_layer.cpp | 90 +- src/caffe/layers/deconv_layer.cpp | 7 - src/caffe/layers/deconv_layer.cu | 7 - src/caffe/layers/flatten_layer.cpp | 16 +- src/caffe/layers/image_data_layer.cpp | 42 +- src/caffe/layers/inner_product_layer.cpp | 4 +- src/caffe/layers/inner_product_layer.cu | 4 +- src/caffe/layers/lrn_layer.cu | 102 +- src/caffe/layers/mvn_layer.cpp | 23 +- src/caffe/layers/mvn_layer.cu | 23 +- src/caffe/layers/pooling_layer.cu | 218 +++-- src/caffe/layers/prelu_layer.cpp | 4 +- src/caffe/layers/prelu_layer.cu | 16 +- .../sigmoid_cross_entropy_loss_layer.cpp | 2 +- .../sigmoid_cross_entropy_loss_layer.cu | 22 +- src/caffe/layers/slice_layer.cu | 47 +- src/caffe/net.cpp | 46 +- src/caffe/proto/caffe.proto | 180 +++- src/caffe/solver.cpp | 688 ++----------- src/caffe/test/test_accuracy_layer.cpp | 5 +- src/caffe/test/test_argmax_layer.cpp | 3 +- .../test/test_contrastive_loss_layer.cpp | 58 +- src/caffe/test/test_convolution_layer.cpp | 9 +- .../test/test_data/generate_sample_data.py | 28 +- src/caffe/test/test_dummy_data_layer.cpp | 5 +- src/caffe/test/test_filler.cpp | 98 ++ src/caffe/test/test_flatten_layer.cpp | 46 +- src/caffe/test/test_gradient_based_solver.cpp | 915 +++--------------- src/caffe/test/test_im2col_kernel.cu | 4 +- src/caffe/test/test_math_functions.cpp | 51 +- .../test_multinomial_logistic_loss_layer.cpp | 3 +- src/caffe/test/test_net.cpp | 65 ++ src/caffe/test/test_neuron_layer.cpp | 139 ++- src/caffe/test/test_pooling_layer.cpp | 13 +- src/caffe/test/test_softmax_layer.cpp | 4 +- src/caffe/test/test_stochastic_pooling.cpp | 35 +- src/caffe/test/test_triplet_loss_layer.cpp | 107 -- src/caffe/util/math_functions.cpp | 10 + src/caffe/util/math_functions.cu | 21 + 64 files changed, 1980 insertions(+), 2250 deletions(-) create mode 100644 include/caffe/data_layers.hpp.orig create mode 100644 include/caffe/data_transformer.hpp.orig delete mode 100644 src/caffe/test/test_triplet_loss_layer.cpp diff --git a/examples/siamese/convert_mnist_siamese_data.cpp b/examples/siamese/convert_mnist_siamese_data.cpp index ad08036fb08..e9403d7ab94 100644 --- a/examples/siamese/convert_mnist_siamese_data.cpp +++ b/examples/siamese/convert_mnist_siamese_data.cpp @@ -1,4 +1,3 @@ -// // This script converts the MNIST dataset to the leveldb format used // by caffe to train siamese network. // Usage: @@ -74,37 +73,44 @@ void convert_dataset(const char* image_filename, const char* label_filename, char label_i; char label_j; - char* pixels = new char[2 * rows * cols]; + char label_k; + char* pixels = new char[3 * rows * cols]; const int kMaxKeyLength = 10; char key[kMaxKeyLength]; std::string value; caffe::Datum datum; - datum.set_channels(2); // one channel for each image in the pair + datum.set_channels(3); // one channel for each image in the pair datum.set_height(rows); datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; for (int itemid = 0; itemid < num_items; ++itemid) { - int i = caffe::caffe_rng_rand() % num_items; // pick a random pair + int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups int j = caffe::caffe_rng_rand() % num_items; + int k = caffe::caffe_rng_rand() % num_items; read_image(&image_file, &label_file, i, rows, cols, pixels, &label_i); read_image(&image_file, &label_file, j, rows, cols, pixels + (rows * cols), &label_j); - datum.set_data(pixels, 2*rows*cols); - if (label_i == label_j) { + read_image(&image_file, &label_file, k, rows, cols, + pixels + (2 * rows * cols), &label_k); + + datum.set_data(pixels, 3*rows*cols); + if (label_i == label_j && label_i != label_k) { datum.set_label(1); + + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); } else { + itemid--; datum.set_label(0); } - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); - db->Put(leveldb::WriteOptions(), std::string(key), value); } delete db; - delete [] pixels; + delete pixels; } int main(int argc, char** argv) { diff --git a/include/caffe/data_layers.hpp.orig b/include/caffe/data_layers.hpp.orig new file mode 100644 index 00000000000..f213c7f1afe --- /dev/null +++ b/include/caffe/data_layers.hpp.orig @@ -0,0 +1,332 @@ +#ifndef CAFFE_DATA_LAYERS_HPP_ +#define CAFFE_DATA_LAYERS_HPP_ + +#include +#include +#include +<<<<<<< ca976a2b520c8024cc7f41a048d2c1abe73f6022 +======= + +#include "boost/scoped_ptr.hpp" +>>>>>>> macro define in upgrade_proto +#include "hdf5.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/data_transformer.hpp" +#include "caffe/filler.hpp" +#include "caffe/internal_thread.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/db.hpp" + +namespace caffe { + +/** + * @brief Provides base for data layers that feed blobs to the Net. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class BaseDataLayer : public Layer { + public: + explicit BaseDataLayer(const LayerParameter& param); + // LayerSetUp: implements common data layer setup functionality, and calls + // DataLayerSetUp to do special data layer setup for individual layer types. + // This method may not be overridden except by the BasePrefetchingDataLayer. + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top) {} + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + + protected: + TransformationParameter transform_param_; + shared_ptr > data_transformer_; + bool output_labels_; +}; + +template +class BasePrefetchingDataLayer : + public BaseDataLayer, public InternalThread { + public: + explicit BasePrefetchingDataLayer(const LayerParameter& param) + : BaseDataLayer(param) {} + // LayerSetUp: implements common data layer setup functionality, and calls + // DataLayerSetUp to do special data layer setup for individual layer types. + // This method may not be overridden. + void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + virtual void CreatePrefetchThread(); + virtual void JoinPrefetchThread(); + // The thread's function + virtual void InternalThreadEntry() {} + + protected: + Blob prefetch_data_; + Blob prefetch_label_; + Blob transformed_data_; +}; + +template +class DataLayer : public BasePrefetchingDataLayer { + public: + explicit DataLayer(const LayerParameter& param) + : BasePrefetchingDataLayer(param) {} + virtual ~DataLayer(); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Data"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MaxTopBlobs() const { return 2; } + + protected: + virtual void InternalThreadEntry(); + + shared_ptr db_; + shared_ptr cursor_; +}; + +/** + * @brief Provides data to the Net generated by a Filler. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class DummyDataLayer : public Layer { + public: + explicit DummyDataLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual inline const char* type() const { return "DummyData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + + vector > > fillers_; + vector refill_; +}; + +/** + * @brief Provides data to the Net from HDF5 files. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class HDF5DataLayer : public Layer { + public: + explicit HDF5DataLayer(const LayerParameter& param) + : Layer(param) {} + virtual ~HDF5DataLayer(); + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual inline const char* type() const { return "HDF5Data"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void LoadHDF5FileData(const char* filename); + + std::vector hdf_filenames_; + unsigned int num_files_; + unsigned int current_file_; + hsize_t current_row_; + std::vector > > hdf_blobs_; + std::vector data_permutation_; + std::vector file_permutation_; +}; + +/** + * @brief Write blobs to disk as HDF5 files. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class HDF5OutputLayer : public Layer { + public: + explicit HDF5OutputLayer(const LayerParameter& param) + : Layer(param), file_opened_(false) {} + virtual ~HDF5OutputLayer(); + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual inline const char* type() const { return "HDF5Output"; } + // TODO: no limit on the number of blobs + virtual inline int ExactNumBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 0; } + + inline std::string file_name() const { return file_name_; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void SaveBlobs(); + + bool file_opened_; + std::string file_name_; + hid_t file_id_; + Blob data_blob_; + Blob label_blob_; +}; + +/** + * @brief Provides data to the Net from image files. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class ImageDataLayer : public BasePrefetchingDataLayer { + public: + explicit ImageDataLayer(const LayerParameter& param) + : BasePrefetchingDataLayer(param) {} + virtual ~ImageDataLayer(); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "ImageData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + + protected: + shared_ptr prefetch_rng_; + virtual void ShuffleImages(); + virtual void InternalThreadEntry(); + + vector > lines_; + int lines_id_; +}; + +/** + * @brief Provides data to the Net from memory. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class MemoryDataLayer : public BaseDataLayer { + public: + explicit MemoryDataLayer(const LayerParameter& param) + : BaseDataLayer(param), has_new_data_(false) {} + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "MemoryData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + + virtual void AddDatumVector(const vector& datum_vector); +#ifdef USE_OPENCV + virtual void AddMatVector(const vector& mat_vector, + const vector& labels); +#endif // USE_OPENCV + + // Reset should accept const pointers, but can't, because the memory + // will be given to Blob, which is mutable + void Reset(Dtype* data, Dtype* label, int n); + void set_batch_size(int new_size); + + int batch_size() { return batch_size_; } + int channels() { return channels_; } + int height() { return height_; } + int width() { return width_; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + int batch_size_, channels_, height_, width_, size_; + Dtype* data_; + Dtype* labels_; + int n_; + size_t pos_; + Blob added_data_; + Blob added_label_; + bool has_new_data_; +}; + +/** + * @brief Provides data to the Net from windows of images files, specified + * by a window data file. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class WindowDataLayer : public BasePrefetchingDataLayer { + public: + explicit WindowDataLayer(const LayerParameter& param) + : BasePrefetchingDataLayer(param) {} + virtual ~WindowDataLayer(); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "WindowData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + + protected: + virtual unsigned int PrefetchRand(); + virtual void InternalThreadEntry(); + + shared_ptr prefetch_rng_; + vector > > image_database_; + enum WindowField { IMAGE_INDEX, LABEL, OVERLAP, X1, Y1, X2, Y2, NUM }; + vector > fg_windows_; + vector > bg_windows_; + Blob data_mean_; + vector mean_values_; + bool has_mean_file_; + bool has_mean_values_; + bool cache_images_; + vector > image_database_cache_; +}; + +} // namespace caffe + +#endif // CAFFE_DATA_LAYERS_HPP_ diff --git a/include/caffe/data_transformer.hpp b/include/caffe/data_transformer.hpp index 980c8268c66..0ad68c80216 100644 --- a/include/caffe/data_transformer.hpp +++ b/include/caffe/data_transformer.hpp @@ -50,7 +50,6 @@ class DataTransformer { void Transform(const vector & datum_vector, Blob* transformed_blob); -#ifdef USE_OPENCV /** * @brief Applies the transformation defined in the data layer's * transform_param block to a vector of Mat. @@ -63,6 +62,7 @@ class DataTransformer { */ void Transform(const vector & mat_vector, Blob* transformed_blob); + /** * @brief Applies the transformation defined in the data layer's * transform_param block to a cv::Mat @@ -74,7 +74,6 @@ class DataTransformer { * set_cpu_data() is used. See image_data_layer.cpp for an example. */ void Transform(const cv::Mat& cv_img, Blob* transformed_blob); -#endif // USE_OPENCV /** * @brief Applies the same transformation defined in the data layer's @@ -89,7 +88,6 @@ class DataTransformer { */ void Transform(Blob* input_blob, Blob* transformed_blob); -<<<<<<< ed5d44f53efe58e4cc2cd299f23c5164cbd7172c /** * @brief Infers the shape of transformed_blob will have when * the transformation is applied to the data. @@ -115,7 +113,6 @@ class DataTransformer { * @param mat_vector * A vector of Mat containing the data to be transformed. */ -#ifdef USE_OPENCV vector InferBlobShape(const vector & mat_vector); /** * @brief Infers the shape of transformed_blob will have when @@ -125,10 +122,7 @@ class DataTransformer { * cv::Mat containing the data to be transformed. */ vector InferBlobShape(const cv::Mat& cv_img); -#endif // USE_OPENCV -======= ->>>>>>> New triplet loss layer added(beta1 version-no test source files) protected: /** * @brief Generates a random integer from Uniform({0, 1, ..., n-1}). @@ -154,3 +148,4 @@ class DataTransformer { } // namespace caffe #endif // CAFFE_DATA_TRANSFORMER_HPP_ + diff --git a/include/caffe/data_transformer.hpp.orig b/include/caffe/data_transformer.hpp.orig new file mode 100644 index 00000000000..2cf205c6c8c --- /dev/null +++ b/include/caffe/data_transformer.hpp.orig @@ -0,0 +1,167 @@ +#ifndef CAFFE_DATA_TRANSFORMER_HPP +#define CAFFE_DATA_TRANSFORMER_HPP + +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Applies common transformations to the input data, such as + * scaling, mirroring, substracting the image mean... + */ +template +class DataTransformer { + public: + explicit DataTransformer(const TransformationParameter& param, Phase phase); + virtual ~DataTransformer() {} + + /** + * @brief Initialize the Random number generations if needed by the + * transformation. + */ + void InitRand(); + + /** + * @brief Applies the transformation defined in the data layer's + * transform_param block to the data. + * + * @param datum + * Datum containing the data to be transformed. + * @param transformed_blob + * This is destination blob. It can be part of top blob's data if + * set_cpu_data() is used. See data_layer.cpp for an example. + */ + void Transform(const Datum& datum, Blob* transformed_blob); + + /** + * @brief Applies the transformation defined in the data layer's + * transform_param block to a vector of Datum. + * + * @param datum_vector + * A vector of Datum containing the data to be transformed. + * @param transformed_blob + * This is destination blob. It can be part of top blob's data if + * set_cpu_data() is used. See memory_layer.cpp for an example. + */ + void Transform(const vector & datum_vector, + Blob* transformed_blob); + +#ifdef USE_OPENCV + /** + * @brief Applies the transformation defined in the data layer's + * transform_param block to a vector of Mat. + * + * @param mat_vector + * A vector of Mat containing the data to be transformed. + * @param transformed_blob + * This is destination blob. It can be part of top blob's data if + * set_cpu_data() is used. See memory_layer.cpp for an example. + */ + void Transform(const vector & mat_vector, + Blob* transformed_blob); + + /** + * @brief Applies the transformation defined in the data layer's + * transform_param block to a cv::Mat + * + * @param cv_img + * cv::Mat containing the data to be transformed. + * @param transformed_blob + * This is destination blob. It can be part of top blob's data if + * set_cpu_data() is used. See image_data_layer.cpp for an example. + */ + void Transform(const cv::Mat& cv_img, Blob* transformed_blob); +#endif // USE_OPENCV + + /** + * @brief Applies the same transformation defined in the data layer's + * transform_param block to all the num images in a input_blob. + * + * @param input_blob + * A Blob containing the data to be transformed. It applies the same + * transformation to all the num images in the blob. + * @param transformed_blob + * This is destination blob, it will contain as many images as the + * input blob. It can be part of top blob's data. + */ + void Transform(Blob* input_blob, Blob* transformed_blob); + +<<<<<<< ca976a2b520c8024cc7f41a048d2c1abe73f6022 +<<<<<<< ed5d44f53efe58e4cc2cd299f23c5164cbd7172c +======= +>>>>>>> macro define in upgrade_proto + /** + * @brief Infers the shape of transformed_blob will have when + * the transformation is applied to the data. + * + * @param datum + * Datum containing the data to be transformed. + */ + vector InferBlobShape(const Datum& datum); + /** + * @brief Infers the shape of transformed_blob will have when + * the transformation is applied to the data. + * It uses the first element to infer the shape of the blob. + * + * @param datum_vector + * A vector of Datum containing the data to be transformed. + */ + vector InferBlobShape(const vector & datum_vector); + /** + * @brief Infers the shape of transformed_blob will have when + * the transformation is applied to the data. + * It uses the first element to infer the shape of the blob. + * + * @param mat_vector + * A vector of Mat containing the data to be transformed. + */ +<<<<<<< ca976a2b520c8024cc7f41a048d2c1abe73f6022 +#ifdef USE_OPENCV +======= +>>>>>>> macro define in upgrade_proto + vector InferBlobShape(const vector & mat_vector); + /** + * @brief Infers the shape of transformed_blob will have when + * the transformation is applied to the data. + * + * @param cv_img + * cv::Mat containing the data to be transformed. + */ + vector InferBlobShape(const cv::Mat& cv_img); +<<<<<<< ca976a2b520c8024cc7f41a048d2c1abe73f6022 +#endif // USE_OPENCV + +======= +>>>>>>> New triplet loss layer added(beta1 version-no test source files) +======= + +>>>>>>> macro define in upgrade_proto + protected: + /** + * @brief Generates a random integer from Uniform({0, 1, ..., n-1}). + * + * @param n + * The upperbound (exclusive) value of the random number. + * @return + * A uniformly random integer value from ({0, 1, ..., n-1}). + */ + virtual int Rand(int n); + + void Transform(const Datum& datum, Dtype* transformed_data); + // Tranformation parameters + TransformationParameter param_; + + + shared_ptr rng_; + Phase phase_; + Blob data_mean_; + vector mean_values_; +}; + +} // namespace caffe + +#endif // CAFFE_DATA_TRANSFORMER_HPP_ diff --git a/include/caffe/filler.hpp b/include/caffe/filler.hpp index 888f4a4ba3b..ff3542e1f99 100644 --- a/include/caffe/filler.hpp +++ b/include/caffe/filler.hpp @@ -208,60 +208,6 @@ class MSRAFiller : public Filler { } }; -/*! -@brief Fills a Blob with coefficients for bilinear interpolation. - -A common use case is with the DeconvolutionLayer acting as upsampling. -You can upsample a feature map with shape of (B, C, H, W) by any integer factor -using the following proto. -\code -layer { - name: "upsample", type: "Deconvolution" - bottom: "{{bottom_name}}" top: "{{top_name}}" - convolution_param { - kernel_size: {{2 * factor - factor % 2}} stride: {{factor}} - num_output: {{C}} group: {{C}} - pad: {{ceil((factor - 1) / 2.)}} - weight_filler: { type: "bilinear" } bias_term: false - } - param { lr_mult: 0 decay_mult: 0 } -} -\endcode -Please use this by replacing `{{}}` with your values. By specifying -`num_output: {{C}} group: {{C}}`, it behaves as -channel-wise convolution. The filter shape of this deconvolution layer will be -(C, 1, K, K) where K is `kernel_size`, and this filler will set a (K, K) -interpolation kernel for every channel of the filter identically. The resulting -shape of the top feature map will be (B, C, factor * H, factor * W). -Note that the learning rate and the -weight decay are set to 0 in order to keep coefficient values of bilinear -interpolation unchanged during training. If you apply this to an image, this -operation is equivalent to the following call in Python with Scikit.Image. -\code{.py} -out = skimage.transform.rescale(img, factor, mode='constant', cval=0) -\endcode - */ -template -class BilinearFiller : public Filler { - public: - explicit BilinearFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { - CHECK_EQ(blob->num_axes(), 4) << "Blob must be 4 dim."; - CHECK_EQ(blob->width(), blob->height()) << "Filter must be square"; - Dtype* data = blob->mutable_cpu_data(); - int f = ceil(blob->width() / 2.); - float c = (2 * f - 1 - f % 2) / (2. * f); - for (int i = 0; i < blob->count(); ++i) { - float x = i % blob->width(); - float y = (i / blob->width()) % blob->height(); - data[i] = (1 - fabs(x / f - c)) * (1 - fabs(y / f - c)); - } - CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; - } -}; - /** * @brief Get a specific filler from the specification given in FillerParameter. * @@ -283,8 +229,6 @@ Filler* GetFiller(const FillerParameter& param) { return new XavierFiller(param); } else if (type == "msra") { return new MSRAFiller(param); - } else if (type == "bilinear") { - return new BilinearFiller(param); } else { CHECK(false) << "Unknown filler name: " << param.type(); } diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index 3103ec6313a..a0d1d4ecc94 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -453,6 +453,7 @@ inline Dtype Layer::Forward(const vector*>& bottom, // Lock during forward to ensure sequential forward Lock(); Dtype loss = 0; + Reshape(bottom, top); switch (Caffe::mode()) { case Caffe::CPU: Forward_cpu(bottom, top); diff --git a/include/caffe/net.hpp b/include/caffe/net.hpp index a6246bc9027..1bf07d28d13 100644 --- a/include/caffe/net.hpp +++ b/include/caffe/net.hpp @@ -155,6 +155,9 @@ class Net { inline const vector& blob_loss_weights() const { return blob_loss_weights_; } + inline const vector& layer_need_backward() const { + return layer_need_backward_; + } /// @brief returns the parameters inline const vector > >& params() const { return params_; diff --git a/include/caffe/neuron_layers.hpp b/include/caffe/neuron_layers.hpp index 323215134c7..c2e0774aaa2 100644 --- a/include/caffe/neuron_layers.hpp +++ b/include/caffe/neuron_layers.hpp @@ -8,7 +8,6 @@ #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/layer.hpp" -#include "caffe/net.hpp" #include "caffe/proto/caffe.pb.h" #define HDF5_DATA_DATASET_NAME "data" @@ -268,6 +267,72 @@ class ExpLayer : public NeuronLayer { Dtype inner_scale_, outer_scale_; }; +/** + * @brief Computes @f$ y = log_{\gamma}(\alpha x + \beta) @f$, + * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, + * and base @f$ \gamma @f$. + */ +template +class LogLayer : public NeuronLayer { + public: + /** + * @param param provides LogParameter log_param, + * with LogLayer options: + * - scale (\b optional, default 1) the scale @f$ \alpha @f$ + * - shift (\b optional, default 0) the shift @f$ \beta @f$ + * - base (\b optional, default -1 for a value of @f$ e \approx 2.718 @f$) + * the base @f$ \gamma @f$ + */ + explicit LogLayer(const LayerParameter& param) + : NeuronLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Log"; } + + protected: + /** + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the computed outputs @f$ + * y = log_{\gamma}(\alpha x + \beta) + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the error gradient w.r.t. the exp inputs. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (N \times C \times H \times W) @f$ + * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ + * with respect to computed outputs @f$ y @f$ + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 1) + * -# @f$ (N \times C \times H \times W) @f$ + * the inputs @f$ x @f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial x} = + * \frac{\partial E}{\partial y} y \alpha \log_e(gamma) + * @f$ if propagate_down[0] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Dtype base_scale_; + Dtype input_scale_, input_shift_; + Dtype backward_num_scale_; +}; + /** * @brief Computes @f$ y = (\alpha x + \beta) ^ \gamma @f$, * as specified by the scale @f$ \alpha @f$, shift @f$ \beta @f$, @@ -734,7 +799,8 @@ class PReLULayer : public NeuronLayer { const vector& propagate_down, const vector*>& bottom); bool channel_shared_; - Blob multiplier_; // dot multipler for backward computation of params + Blob multiplier_; // dot multiplier for backward computation of params + Blob backward_buff_; // temporary buffer for backward computation Blob bottom_memory_; // memory for in-place computation }; diff --git a/include/caffe/python_layer.hpp b/include/caffe/python_layer.hpp index c43c1e8a91b..19cf18c9742 100644 --- a/include/caffe/python_layer.hpp +++ b/include/caffe/python_layer.hpp @@ -18,17 +18,22 @@ class PythonLayer : public Layer { virtual void LayerSetUp(const vector*>& bottom, const vector*>& top) { - self_.attr("param_str") = bp::str( - this->layer_param_.python_param().param_str()); - self_.attr("setup")(bottom, top); + try { + self_.attr("setup")(bottom, top); + } catch (bp::error_already_set) { + PyErr_Print(); + throw; + } } + virtual void Reshape(const vector*>& bottom, const vector*>& top) { - self_.attr("reshape")(bottom, top); - } - - virtual inline bool ShareInParallel() const { - return this->layer_param_.python_param().share_in_parallel(); + try { + self_.attr("reshape")(bottom, top); + } catch (bp::error_already_set) { + PyErr_Print(); + throw; + } } virtual inline const char* type() const { return "Python"; } @@ -36,11 +41,21 @@ class PythonLayer : public Layer { protected: virtual void Forward_cpu(const vector*>& bottom, const vector*>& top) { - self_.attr("forward")(bottom, top); + try { + self_.attr("forward")(bottom, top); + } catch (bp::error_already_set) { + PyErr_Print(); + throw; + } } virtual void Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - self_.attr("backward")(top, propagate_down, bottom); + try { + self_.attr("backward")(top, propagate_down, bottom); + } catch (bp::error_already_set) { + PyErr_Print(); + throw; + } } private: diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index d5b371cd171..2ecf539baef 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -33,7 +33,7 @@ typedef boost::function ActionCallback; /** * @brief An interface for classes that perform optimization on Net%s. * - * Requires implementation of ComputeUpdateValue to compute a parameter update + * Requires implementation of ApplyUpdate to compute a parameter update * given the current state of the Net parameters. */ template @@ -85,8 +85,8 @@ class Solver { void CheckSnapshotWritePermissions(); protected: - // Get the update value for the current iteration. - virtual void ComputeUpdateValue() = 0; + // Make and apply the update value for the current iteration. + virtual void ApplyUpdate() = 0; // The Solver::Snapshot function implements the basic snapshotting utility // that stores the learned net. You should implement the SnapshotSolverState() // function that produces a SolverState protocol buffer that needs to be @@ -165,7 +165,10 @@ class SGDSolver : public Solver { protected: void PreSolve(); Dtype GetLearningRate(); - virtual void ComputeUpdateValue(); + virtual void ApplyUpdate(); + virtual void Normalize(int param_id); + virtual void Regularize(int param_id); + virtual void ComputeUpdateValue(int param_id, Dtype rate); virtual void ClipGradients(); virtual void SnapshotSolverState(const string& model_filename); virtual void SnapshotSolverStateToBinaryProto(const string& model_filename); @@ -190,7 +193,7 @@ class NesterovSolver : public SGDSolver { : SGDSolver(param_file) {} protected: - virtual void ComputeUpdateValue(); + virtual void ComputeUpdateValue(int param_id, Dtype rate); DISABLE_COPY_AND_ASSIGN(NesterovSolver); }; @@ -204,7 +207,7 @@ class AdaGradSolver : public SGDSolver { : SGDSolver(param_file) { constructor_sanity_check(); } protected: - virtual void ComputeUpdateValue(); + virtual void ComputeUpdateValue(int param_id, Dtype rate); void constructor_sanity_check() { CHECK_EQ(0, this->param_.momentum()) << "Momentum cannot be used with AdaGrad."; diff --git a/include/caffe/test/test_caffe_main.hpp b/include/caffe/test/test_caffe_main.hpp index bd5f31e063f..fc156091476 100644 --- a/include/caffe/test/test_caffe_main.hpp +++ b/include/caffe/test/test_caffe_main.hpp @@ -40,34 +40,36 @@ class MultiDeviceTest : public ::testing::Test { typedef ::testing::Types TestDtypes; -struct FloatCPU { - typedef float Dtype; +template +struct CPUDevice { + typedef TypeParam Dtype; static const Caffe::Brew device = Caffe::CPU; }; -struct DoubleCPU { - typedef double Dtype; - static const Caffe::Brew device = Caffe::CPU; +template +class CPUDeviceTest : public MultiDeviceTest > { }; #ifdef CPU_ONLY -typedef ::testing::Types TestDtypesAndDevices; +typedef ::testing::Types, + CPUDevice > TestDtypesAndDevices; #else -struct FloatGPU { - typedef float Dtype; +template +struct GPUDevice { + typedef TypeParam Dtype; static const Caffe::Brew device = Caffe::GPU; }; -struct DoubleGPU { - typedef double Dtype; - static const Caffe::Brew device = Caffe::GPU; +template +class GPUDeviceTest : public MultiDeviceTest > { }; -typedef ::testing::Types - TestDtypesAndDevices; +typedef ::testing::Types, CPUDevice, + GPUDevice, GPUDevice > + TestDtypesAndDevices; #endif diff --git a/include/caffe/test/test_gradient_check_util.hpp b/include/caffe/test/test_gradient_check_util.hpp index 3e63cb52ea6..25f35d1589e 100644 --- a/include/caffe/test/test_gradient_check_util.hpp +++ b/include/caffe/test/test_gradient_check_util.hpp @@ -84,11 +84,14 @@ void GradientChecker::CheckGradientSingle(Layer* layer, CHECK_EQ(top_count, bottom[blob_id]->count()); } } - // First, figure out what blobs we need to check against. + // First, figure out what blobs we need to check against, and zero init + // parameter blobs. vector*> blobs_to_check; vector propagate_down(bottom.size(), check_bottom == -1); for (int i = 0; i < layer->blobs().size(); ++i) { - blobs_to_check.push_back(layer->blobs()[i].get()); + Blob* blob = layer->blobs()[i].get(); + caffe_set(blob->count(), static_cast(0), blob->mutable_cpu_diff()); + blobs_to_check.push_back(blob); } if (check_bottom == -1) { for (int i = 0; i < bottom.size(); ++i) { diff --git a/include/caffe/util/math_functions.hpp b/include/caffe/util/math_functions.hpp index f43036fcebc..2cacd8e72cd 100644 --- a/include/caffe/util/math_functions.hpp +++ b/include/caffe/util/math_functions.hpp @@ -88,6 +88,9 @@ void caffe_rng_bernoulli(const int n, const Dtype p, unsigned int* r); template void caffe_exp(const int n, const Dtype* a, Dtype* y); +template +void caffe_log(const int n, const Dtype* a, Dtype* y); + template void caffe_abs(const int n, const Dtype* a, Dtype* y); @@ -203,6 +206,9 @@ void caffe_gpu_abs(const int n, const Dtype* a, Dtype* y); template void caffe_gpu_exp(const int n, const Dtype* a, Dtype* y); +template +void caffe_gpu_log(const int n, const Dtype* a, Dtype* y); + template void caffe_gpu_powx(const int n, const Dtype* a, const Dtype b, Dtype* y); diff --git a/include/caffe/util/mkl_alternate.hpp b/include/caffe/util/mkl_alternate.hpp index 32fdbf79932..3355b6658a3 100644 --- a/include/caffe/util/mkl_alternate.hpp +++ b/include/caffe/util/mkl_alternate.hpp @@ -33,6 +33,7 @@ extern "C" { DEFINE_VSL_UNARY_FUNC(Sqr, y[i] = a[i] * a[i]); DEFINE_VSL_UNARY_FUNC(Exp, y[i] = exp(a[i])); +DEFINE_VSL_UNARY_FUNC(Ln, y[i] = log(a[i])); DEFINE_VSL_UNARY_FUNC(Abs, y[i] = fabs(a[i])); // A simple way to define the vsl unary functions with singular parameter b. diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 06bc0457e2d..26beb8f943d 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -540,7 +540,13 @@ class SPPLayer : public Layer { virtual inline const char* type() const { return "SPP"; } virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + // MAX POOL layers can output an extra top blob for the mask; + // others can only output the pooled inputs. + virtual inline int MaxTopBlobs() const { + return (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_MAX) ? 2 : 1; + } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -554,11 +560,9 @@ class SPPLayer : public Layer { int pyramid_height_; int bottom_h_, bottom_w_; - int num_; int channels_; int kernel_h_, kernel_w_; int pad_h_, pad_w_; - bool reshaped_first_time_; /// the internal Split layer that feeds the pooling layers shared_ptr > split_layer_; diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 05db15915b1..c86fd5d1d94 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -30,6 +30,7 @@ void Blob::Reshape(const vector& shape) { int* shape_data = static_cast(shape_data_->mutable_cpu_data()); for (int i = 0; i < shape.size(); ++i) { CHECK_GE(shape[i], 0); + CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX"; count_ *= shape[i]; shape_[i] = shape[i]; shape_data[i] = shape[i]; diff --git a/src/caffe/data_transformer.cpp.orig b/src/caffe/data_transformer.cpp.orig index d8ad1c1e5c6..56f97c0ae04 100644 --- a/src/caffe/data_transformer.cpp.orig +++ b/src/caffe/data_transformer.cpp.orig @@ -130,21 +130,54 @@ void DataTransformer::Transform(const Datum& datum, template void DataTransformer::Transform(const Datum& datum, Blob* transformed_blob) { +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD // If datum is encoded, decoded and transform the cv::image. if (datum.encoded()) { -<<<<<<< be87d80db7eea1220e4347069f0295209f90cef6 +<<<<<<< fa941056111876e03082cdc5695d75339ed24ed9 #ifdef USE_OPENCV + CHECK(!(param_.force_color() && param_.force_gray())) ======= <<<<<<< HEAD <<<<<<< HEAD ->>>>>>> triplet data generation and network update CHECK(!(param_.force_color() && param_.force_gray())) ======= +<<<<<<< HEAD + CHECK(!param_.force_color() && !param_.force_gray()) +>>>>>>> 011aef0... restore +======= + CHECK(!(param_.force_color() && param_.force_gray())) +>>>>>>> d2acfed... fixed _force_color check, fixes #2635 +======= + // If datum is encoded, decoded and transform the cv::image. + if (datum.encoded()) { +<<<<<<< HEAD CHECK(!param_.force_color() && !param_.force_gray()) >>>>>>> 011aef0... restore +<<<<<<< HEAD +>>>>>>> 00341b2... triplet data generation and network update +<<<<<<< bd900fc47efb4f9fe6c0698d66ca08f7a5c1ed58 +>>>>>>> triplet data generation and network update +======= +======= ======= CHECK(!(param_.force_color() && param_.force_gray())) >>>>>>> d2acfed... fixed _force_color check, fixes #2635 +<<<<<<< HEAD +>>>>>>> 1882ac9... add initiate class name of triplet loss layer +<<<<<<< ca976a2b520c8024cc7f41a048d2c1abe73f6022 +>>>>>>> add initiate class name of triplet loss layer +======= +======= +======= + // If datum is encoded, decoded and transform the cv::image. + if (datum.encoded()) { + CHECK(!param_.force_color() && !param_.force_gray()) +>>>>>>> 80a07dd... macro define in upgrade_proto +>>>>>>> 08d5d6d... macro define in upgrade_proto +>>>>>>> macro define in upgrade_proto << "cannot set both force_color and force_gray"; cv::Mat cv_img; if (param_.force_color() || param_.force_gray()) { @@ -165,6 +198,16 @@ void DataTransformer::Transform(const Datum& datum, } const int crop_size = param_.crop_size(); +<<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 083f61b... New triplet loss layer added(beta1 version-no test source files) +======= +>>>>>>> 011aef0... restore +======= +>>>>>>> 4d8130b... New triplet loss layer added(beta1 version-no test source files) +======= +>>>>>>> 80a07dd... macro define in upgrade_proto const int datum_channels = datum.channels(); const int datum_height = datum.height(); const int datum_width = datum.width(); @@ -449,22 +492,55 @@ void DataTransformer::Transform(Blob* input_blob, } } +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD template vector DataTransformer::InferBlobShape(const Datum& datum) { if (datum.encoded()) { -<<<<<<< be87d80db7eea1220e4347069f0295209f90cef6 #ifdef USE_OPENCV -======= -<<<<<<< HEAD -<<<<<<< HEAD CHECK(!(param_.force_color() && param_.force_gray())) +<<<<<<< fa941056111876e03082cdc5695d75339ed24ed9 +======= ======= CHECK(!param_.force_color() && !param_.force_gray()) >>>>>>> 011aef0... restore ======= + CHECK(!(param_.force_color() && param_.force_gray())) +<<<<<<< HEAD +>>>>>>> d2acfed... fixed _force_color check, fixes #2635 +======= +======= +template +vector DataTransformer::InferBlobShape(const Datum& datum) { + if (datum.encoded()) { +<<<<<<< HEAD + CHECK(!param_.force_color() && !param_.force_gray()) +>>>>>>> 011aef0... restore +<<<<<<< HEAD +>>>>>>> 00341b2... triplet data generation and network update +<<<<<<< bd900fc47efb4f9fe6c0698d66ca08f7a5c1ed58 >>>>>>> triplet data generation and network update +======= +======= +======= CHECK(!(param_.force_color() && param_.force_gray())) >>>>>>> d2acfed... fixed _force_color check, fixes #2635 +<<<<<<< HEAD +>>>>>>> 1882ac9... add initiate class name of triplet loss layer +<<<<<<< ca976a2b520c8024cc7f41a048d2c1abe73f6022 +>>>>>>> add initiate class name of triplet loss layer +======= +======= +======= +template +vector DataTransformer::InferBlobShape(const Datum& datum) { + if (datum.encoded()) { + CHECK(!param_.force_color() && !param_.force_gray()) +>>>>>>> 80a07dd... macro define in upgrade_proto +>>>>>>> 08d5d6d... macro define in upgrade_proto +>>>>>>> macro define in upgrade_proto << "cannot set both force_color and force_gray"; cv::Mat cv_img; if (param_.force_color() || param_.force_gray()) { @@ -541,6 +617,16 @@ vector DataTransformer::InferBlobShape( } #endif // USE_OPENCV +<<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 083f61b... New triplet loss layer added(beta1 version-no test source files) +======= +>>>>>>> 011aef0... restore +======= +>>>>>>> 4d8130b... New triplet loss layer added(beta1 version-no test source files) +======= +>>>>>>> 80a07dd... macro define in upgrade_proto template void DataTransformer::InitRand() { const bool needs_rand = param_.mirror() || diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index b90bd4e0caf..26a1118282f 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -1,9 +1,7 @@ -#include #include #include #include "caffe/data_layers.hpp" -#include "caffe/net.hpp" #include "caffe/util/io.hpp" namespace caffe { @@ -29,96 +27,56 @@ void BaseDataLayer::LayerSetUp(const vector*>& bottom, DataLayerSetUp(bottom, top); } -template -BasePrefetchingDataLayer::BasePrefetchingDataLayer( - const LayerParameter& param) - : BaseDataLayer(param), - prefetch_free_(), prefetch_full_() { - for (int i = 0; i < PREFETCH_COUNT; ++i) { - prefetch_free_.push(&prefetch_[i]); - } -} - template void BasePrefetchingDataLayer::LayerSetUp( const vector*>& bottom, const vector*>& top) { BaseDataLayer::LayerSetUp(bottom, top); - // Before starting the prefetch thread, we make cpu_data and gpu_data - // calls so that the prefetch thread does not accidentally make simultaneous - // cudaMalloc calls when the main thread is running. In some GPUs this - // seems to cause failures if we do not so. - for (int i = 0; i < PREFETCH_COUNT; ++i) { - prefetch_[i].data_.mutable_cpu_data(); - if (this->output_labels_) { - prefetch_[i].label_.mutable_cpu_data(); - } - } -#ifndef CPU_ONLY - if (Caffe::mode() == Caffe::GPU) { - for (int i = 0; i < PREFETCH_COUNT; ++i) { - prefetch_[i].data_.mutable_gpu_data(); - if (this->output_labels_) { - prefetch_[i].label_.mutable_gpu_data(); - } - } + // Now, start the prefetch thread. Before calling prefetch, we make two + // cpu_data calls so that the prefetch thread does not accidentally make + // simultaneous cudaMalloc calls when the main thread is running. In some + // GPUs this seems to cause failures if we do not so. + this->prefetch_data_.mutable_cpu_data(); + if (this->output_labels_) { + this->prefetch_label_.mutable_cpu_data(); } -#endif DLOG(INFO) << "Initializing prefetch"; - this->data_transformer_->InitRand(); - StartInternalThread(); + this->CreatePrefetchThread(); DLOG(INFO) << "Prefetch initialized."; } template -void BasePrefetchingDataLayer::InternalThreadEntry() { -#ifndef CPU_ONLY - cudaStream_t stream; - if (Caffe::mode() == Caffe::GPU) { - CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); - } -#endif +void BasePrefetchingDataLayer::CreatePrefetchThread() { + this->data_transformer_->InitRand(); + CHECK(StartInternalThread()) << "Thread execution failed"; +} - try { - while (!must_stop()) { - Batch* batch = prefetch_free_.pop(); - load_batch(batch); -#ifndef CPU_ONLY - if (Caffe::mode() == Caffe::GPU) { - batch->data_.data().get()->async_gpu_push(stream); - CUDA_CHECK(cudaStreamSynchronize(stream)); - } -#endif - prefetch_full_.push(batch); - } - } catch (boost::thread_interrupted&) { - // Interrupted exception is expected on shutdown - } -#ifndef CPU_ONLY - if (Caffe::mode() == Caffe::GPU) { - CUDA_CHECK(cudaStreamDestroy(stream)); - } -#endif +template +void BasePrefetchingDataLayer::JoinPrefetchThread() { + CHECK(WaitForInternalThreadToExit()) << "Thread joining failed"; } template void BasePrefetchingDataLayer::Forward_cpu( const vector*>& bottom, const vector*>& top) { - Batch* batch = prefetch_full_.pop("Data layer prefetch queue empty"); + // First, join the thread + JoinPrefetchThread(); + DLOG(INFO) << "Thread joined"; // Reshape to loaded data. - top[0]->ReshapeLike(batch->data_); + top[0]->ReshapeLike(prefetch_data_); // Copy the data - caffe_copy(batch->data_.count(), batch->data_.cpu_data(), + caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), top[0]->mutable_cpu_data()); DLOG(INFO) << "Prefetch copied"; if (this->output_labels_) { // Reshape to loaded labels. - top[1]->ReshapeLike(batch->label_); + top[1]->ReshapeLike(prefetch_label_); // Copy the labels. - caffe_copy(batch->label_.count(), batch->label_.cpu_data(), - top[1]->mutable_cpu_data()); + caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), + top[1]->mutable_cpu_data()); } - - prefetch_free_.push(batch); + // Start a new prefetch thread + DLOG(INFO) << "CreatePrefetchThread"; + CreatePrefetchThread(); } #ifdef CPU_ONLY diff --git a/src/caffe/layers/base_data_layer.cu b/src/caffe/layers/base_data_layer.cu index ff6e412aba6..9335a5bc9a9 100644 --- a/src/caffe/layers/base_data_layer.cu +++ b/src/caffe/layers/base_data_layer.cu @@ -7,23 +7,22 @@ namespace caffe { template void BasePrefetchingDataLayer::Forward_gpu( const vector*>& bottom, const vector*>& top) { - Batch* batch = prefetch_full_.pop("Data layer prefetch queue empty"); + // First, join the thread + JoinPrefetchThread(); // Reshape to loaded data. - top[0]->ReshapeLike(batch->data_); + top[0]->ReshapeLike(this->prefetch_data_); // Copy the data - caffe_copy(batch->data_.count(), batch->data_.gpu_data(), + caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { // Reshape to loaded labels. - top[1]->ReshapeLike(batch->label_); + top[1]->ReshapeLike(prefetch_label_); // Copy the labels. - caffe_copy(batch->label_.count(), batch->label_.gpu_data(), + caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), top[1]->mutable_gpu_data()); } - // Ensure the copy is synchronous wrt the host, so that the next batch isn't - // copied in meanwhile. - CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); - prefetch_free_.push(batch); + // Start a new prefetch thread + CreatePrefetchThread(); } INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); diff --git a/src/caffe/layers/concat_layer.cu b/src/caffe/layers/concat_layer.cu index 617701e2621..8f2e85d8f52 100644 --- a/src/caffe/layers/concat_layer.cu +++ b/src/caffe/layers/concat_layer.cu @@ -28,7 +28,6 @@ __global__ void Concat(const int nthreads, const Dtype* in_data, template void ConcatLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - if (bottom.size() == 1) { return; } Dtype* top_data = top[0]->mutable_gpu_data(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); @@ -49,22 +48,20 @@ void ConcatLayer::Forward_gpu(const vector*>& bottom, template void ConcatLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - if (bottom.size() == 1) { return; } const Dtype* top_diff = top[0]->gpu_diff(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); const bool kForward = false; for (int i = 0; i < bottom.size(); ++i) { + if (!propagate_down[i]) { continue; } + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); - if (propagate_down[i]) { - Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); - const int bottom_concat_size = bottom_concat_axis * concat_input_size_; - const int nthreads = bottom_concat_size * num_concats_; - Concat // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, top_diff, kForward, num_concats_, concat_input_size_, - top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); - } + const int bottom_concat_size = bottom_concat_axis * concat_input_size_; + const int nthreads = bottom_concat_size * num_concats_; + Concat // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, top_diff, kForward, num_concats_, concat_input_size_, + top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); offset_concat_axis += bottom_concat_axis; } } diff --git a/src/caffe/layers/contrastive_loss_layer.cpp b/src/caffe/layers/contrastive_loss_layer.cpp index 0692c11c257..25e167819d3 100644 --- a/src/caffe/layers/contrastive_loss_layer.cpp +++ b/src/caffe/layers/contrastive_loss_layer.cpp @@ -41,6 +41,8 @@ void ContrastiveLossLayer::Forward_cpu( diff_.mutable_cpu_data()); // a_i-b_i const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.contrastive_loss_param().margin(); + bool legacy_version = + this->layer_param_.contrastive_loss_param().legacy_version(); Dtype loss(0.0); for (int i = 0; i < bottom[0]->num(); ++i) { dist_sq_.mutable_cpu_data()[i] = caffe_cpu_dot(channels, @@ -48,7 +50,12 @@ void ContrastiveLossLayer::Forward_cpu( if (static_cast(bottom[2]->cpu_data()[i])) { // similar pairs loss += dist_sq_.cpu_data()[i]; } else { // dissimilar pairs - loss += std::max(margin-dist_sq_.cpu_data()[i], Dtype(0.0)); + if (legacy_version) { + loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)); + } else { + Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]), 0.0); + loss += dist*dist; + } } } loss = loss / static_cast(bottom[0]->num()) / Dtype(2); @@ -59,6 +66,8 @@ template void ContrastiveLossLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.contrastive_loss_param().margin(); + bool legacy_version = + this->layer_param_.contrastive_loss_param().legacy_version(); for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; @@ -76,10 +85,20 @@ void ContrastiveLossLayer::Backward_cpu(const vector*>& top, Dtype(0.0), bout + (j*channels)); } else { // dissimilar pairs - if ((margin-dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + Dtype mdist(0.0); + Dtype beta(0.0); + if (legacy_version) { + mdist = margin - dist_sq_.cpu_data()[j]; + beta = -alpha; + } else { + Dtype dist = sqrt(dist_sq_.cpu_data()[j]); + mdist = margin - dist; + beta = -alpha * mdist / (dist + Dtype(1e-4)); + } + if (mdist > Dtype(0.0)) { caffe_cpu_axpby( channels, - -alpha, + beta, diff_.cpu_data() + (j*channels), Dtype(0.0), bout + (j*channels)); diff --git a/src/caffe/layers/contrastive_loss_layer.cu b/src/caffe/layers/contrastive_loss_layer.cu index 78a55995a0a..931239316ac 100644 --- a/src/caffe/layers/contrastive_loss_layer.cu +++ b/src/caffe/layers/contrastive_loss_layer.cu @@ -32,12 +32,20 @@ void ContrastiveLossLayer::Forward_gpu( Dtype(0.0), dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2 Dtype margin = this->layer_param_.contrastive_loss_param().margin(); + bool legacy_version = + this->layer_param_.contrastive_loss_param().legacy_version(); Dtype loss(0.0); for (int i = 0; i < bottom[0]->num(); ++i) { if (static_cast(bottom[2]->cpu_data()[i])) { // similar pairs loss += dist_sq_.cpu_data()[i]; } else { // dissimilar pairs - loss += std::max(margin-dist_sq_.cpu_data()[i], Dtype(0.0)); + if (legacy_version) { + loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)); + } else { + Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]), + Dtype(0.0)); + loss += dist*dist; + } } } loss = loss / static_cast(bottom[0]->num()) / Dtype(2); @@ -45,8 +53,8 @@ void ContrastiveLossLayer::Forward_gpu( } template -__global__ void CLLForward(const int count, const int channels, - const Dtype margin, const Dtype alpha, +__global__ void CLLBackward(const int count, const int channels, + const Dtype margin, const bool legacy_version, const Dtype alpha, const Dtype* y, const Dtype* diff, const Dtype* dist_sq, Dtype *bottom_diff) { CUDA_KERNEL_LOOP(i, count) { @@ -54,8 +62,18 @@ __global__ void CLLForward(const int count, const int channels, if (static_cast(y[n])) { // similar pairs bottom_diff[i] = alpha * diff[i]; } else { // dissimilar pairs - if ((margin-dist_sq[n]) > 0.0) { - bottom_diff[i] = -alpha * diff[i]; + Dtype mdist(0.0); + Dtype beta(0.0); + if (legacy_version) { + mdist = (margin - dist_sq[n]); + beta = -alpha; + } else { + Dtype dist = sqrt(dist_sq[n]); + mdist = (margin - dist); + beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i]; + } + if (mdist > 0.0) { + bottom_diff[i] = beta; } else { bottom_diff[i] = 0; } @@ -71,12 +89,14 @@ void ContrastiveLossLayer::Backward_gpu(const vector*>& top, const int count = bottom[0]->count(); const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.contrastive_loss_param().margin(); + const bool legacy_version = + this->layer_param_.contrastive_loss_param().legacy_version(); const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast(bottom[0]->num()); // NOLINT_NEXT_LINE(whitespace/operators) - CLLForward<<>>( - count, channels, margin, alpha, + CLLBackward<<>>( + count, channels, margin, legacy_version, alpha, bottom[2]->gpu_data(), // pair similarity 0 or 1 diff_.gpu_data(), // the cached eltwise difference between a and b dist_sq_.gpu_data(), // the cached square distance between a and b diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp index a020c7c70d9..fb50bb095ed 100644 --- a/src/caffe/layers/conv_layer.cpp +++ b/src/caffe/layers/conv_layer.cpp @@ -46,13 +46,6 @@ void ConvolutionLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->cpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); - if (this->param_propagate_down_[0]) { - caffe_set(this->blobs_[0]->count(), Dtype(0), weight_diff); - } - if (this->bias_term_ && this->param_propagate_down_[1]) { - caffe_set(this->blobs_[1]->count(), Dtype(0), - this->blobs_[1]->mutable_cpu_diff()); - } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->cpu_diff(); const Dtype* bottom_data = bottom[i]->cpu_data(); diff --git a/src/caffe/layers/conv_layer.cu b/src/caffe/layers/conv_layer.cu index 7474ae04b95..b429d2b47d0 100644 --- a/src/caffe/layers/conv_layer.cu +++ b/src/caffe/layers/conv_layer.cu @@ -31,13 +31,6 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); - if (this->param_propagate_down_[0]) { - caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); - } - if (this->bias_term_ && this->param_propagate_down_[1]) { - caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), - this->blobs_[1]->mutable_gpu_diff()); - } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Bias gradient, if necessary. diff --git a/src/caffe/layers/cudnn_conv_layer.cu b/src/caffe/layers/cudnn_conv_layer.cu index dcdd2a0005e..691152021a3 100644 --- a/src/caffe/layers/cudnn_conv_layer.cu +++ b/src/caffe/layers/cudnn_conv_layer.cu @@ -101,12 +101,10 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); - caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); - caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp index 2eb7114b67e..fdf228bb269 100644 --- a/src/caffe/layers/data_layer.cpp +++ b/src/caffe/layers/data_layer.cpp @@ -39,32 +39,17 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, cursor_->Next(); } } - // Read a data point, and use it to initialize the top blob. + // Read a data point, to initialize the prefetch and top blobs. Datum datum; datum.ParseFromString(cursor_->value()); + // Use data_transformer to infer the expected blob shape from datum. + vector top_shape = this->data_transformer_->InferBlobShape(datum); + this->transformed_data_.Reshape(top_shape); + // Reshape top[0] and prefetch_data according to the batch_size. + top_shape[0] = this->layer_param_.data_param().batch_size(); + this->prefetch_data_.Reshape(top_shape); + top[0]->ReshapeLike(this->prefetch_data_); - bool force_color = this->layer_param_.data_param().force_encoded_color(); - if ((force_color && DecodeDatum(&datum, true)) || - DecodeDatumNative(&datum)) { - LOG(INFO) << "Decoding Datum"; - } - // image - int crop_size = this->layer_param_.transform_param().crop_size(); - if (crop_size > 0) { - top[0]->Reshape(this->layer_param_.data_param().batch_size(), - datum.channels(), crop_size, crop_size); - this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), - datum.channels(), crop_size, crop_size); - this->transformed_data_.Reshape(1, datum.channels(), crop_size, crop_size); - } else { - top[0]->Reshape( - this->layer_param_.data_param().batch_size(), datum.channels(), - datum.height(), datum.width()); - this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), - datum.channels(), datum.height(), datum.width()); - this->transformed_data_.Reshape(1, datum.channels(), - datum.height(), datum.width()); - } LOG(INFO) << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); @@ -87,25 +72,17 @@ void DataLayer::InternalThreadEntry() { CHECK(this->prefetch_data_.count()); CHECK(this->transformed_data_.count()); - // Reshape on single input batches for inputs of varying dimension. + // Reshape according to the first datum of each batch + // on single input batches allows for inputs of varying dimension. const int batch_size = this->layer_param_.data_param().batch_size(); - const int crop_size = this->layer_param_.transform_param().crop_size(); - bool force_color = this->layer_param_.data_param().force_encoded_color(); - if (batch_size == 1 && crop_size == 0) { - Datum datum; - datum.ParseFromString(cursor_->value()); - if (datum.encoded()) { - if (force_color) { - DecodeDatum(&datum, true); - } else { - DecodeDatumNative(&datum); - } - } - this->prefetch_data_.Reshape(1, datum.channels(), - datum.height(), datum.width()); - this->transformed_data_.Reshape(1, datum.channels(), - datum.height(), datum.width()); - } + Datum datum; + datum.ParseFromString(cursor_->value()); + // Use data_transformer to infer the expected blob shape from datum. + vector top_shape = this->data_transformer_->InferBlobShape(datum); + this->transformed_data_.Reshape(top_shape); + // Reshape prefetch_data according to the batch_size. + top_shape[0] = batch_size; + this->prefetch_data_.Reshape(top_shape); Dtype* top_data = this->prefetch_data_.mutable_cpu_data(); Dtype* top_label = NULL; // suppress warnings about uninitialized variables @@ -113,48 +90,31 @@ void DataLayer::InternalThreadEntry() { if (this->output_labels_) { top_label = this->prefetch_label_.mutable_cpu_data(); } + timer.Start(); for (int item_id = 0; item_id < batch_size; ++item_id) { - timer.Start(); - // get a blob + // get a datum Datum datum; datum.ParseFromString(cursor_->value()); - - cv::Mat cv_img; - if (datum.encoded()) { - if (force_color) { - cv_img = DecodeDatumToCVMat(datum, true); - } else { - cv_img = DecodeDatumToCVMatNative(datum); - } - if (cv_img.channels() != this->transformed_data_.channels()) { - LOG(WARNING) << "Your dataset contains encoded images with mixed " - << "channel sizes. Consider adding a 'force_color' flag to the " - << "model definition, or rebuild your dataset using " - << "convert_imageset."; - } - } read_time += timer.MicroSeconds(); timer.Start(); - // Apply data transformations (mirror, scale, crop...) int offset = this->prefetch_data_.offset(item_id); this->transformed_data_.set_cpu_data(top_data + offset); - if (datum.encoded()) { - this->data_transformer_->Transform(cv_img, &(this->transformed_data_)); - } else { - this->data_transformer_->Transform(datum, &(this->transformed_data_)); - } + this->data_transformer_->Transform(datum, &(this->transformed_data_)); + // Copy label. if (this->output_labels_) { top_label[item_id] = datum.label(); } trans_time += timer.MicroSeconds(); - // go to the next iter + timer.Start(); + // go to the next item. cursor_->Next(); if (!cursor_->valid()) { DLOG(INFO) << "Restarting data prefetching from start."; cursor_->SeekToFirst(); } } + timer.Stop(); batch_timer.Stop(); DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; diff --git a/src/caffe/layers/deconv_layer.cpp b/src/caffe/layers/deconv_layer.cpp index b471043ebfd..91aabb315b2 100644 --- a/src/caffe/layers/deconv_layer.cpp +++ b/src/caffe/layers/deconv_layer.cpp @@ -46,13 +46,6 @@ void DeconvolutionLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->cpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_cpu_diff(); - if (this->param_propagate_down_[0]) { - caffe_set(this->blobs_[0]->count(), Dtype(0), weight_diff); - } - if (this->bias_term_ && this->param_propagate_down_[1]) { - caffe_set(this->blobs_[1]->count(), Dtype(0), - this->blobs_[1]->mutable_cpu_diff()); - } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->cpu_diff(); const Dtype* bottom_data = bottom[i]->cpu_data(); diff --git a/src/caffe/layers/deconv_layer.cu b/src/caffe/layers/deconv_layer.cu index d37e9f7ac09..5dbdcc3149f 100644 --- a/src/caffe/layers/deconv_layer.cu +++ b/src/caffe/layers/deconv_layer.cu @@ -31,13 +31,6 @@ void DeconvolutionLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); - if (this->param_propagate_down_[0]) { - caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); - } - if (this->bias_term_ && this->param_propagate_down_[1]) { - caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), - this->blobs_[1]->mutable_gpu_diff()); - } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); const Dtype* bottom_data = bottom[i]->gpu_data(); diff --git a/src/caffe/layers/flatten_layer.cpp b/src/caffe/layers/flatten_layer.cpp index 745f271ea45..f7e5c9c2172 100644 --- a/src/caffe/layers/flatten_layer.cpp +++ b/src/caffe/layers/flatten_layer.cpp @@ -9,9 +9,19 @@ namespace caffe { template void FlattenLayer::Reshape(const vector*>& bottom, const vector*>& top) { - vector top_shape(2); - top_shape[0] = bottom[0]->num(); - top_shape[1] = bottom[0]->count() / bottom[0]->num(); + const int start_axis = bottom[0]->CanonicalAxisIndex( + this->layer_param_.flatten_param().axis()); + const int end_axis = bottom[0]->CanonicalAxisIndex( + this->layer_param_.flatten_param().end_axis()); + vector top_shape; + for (int i = 0; i < start_axis; ++i) { + top_shape.push_back(bottom[0]->shape(i)); + } + const int flattened_dim = bottom[0]->count(start_axis, end_axis + 1); + top_shape.push_back(flattened_dim); + for (int i = end_axis + 1; i < bottom[0]->num_axes(); ++i) { + top_shape.push_back(bottom[0]->shape(i)); + } top[0]->Reshape(top_shape); CHECK_EQ(top[0]->count(), bottom[0]->count()); } diff --git a/src/caffe/layers/image_data_layer.cpp b/src/caffe/layers/image_data_layer.cpp index c1bab09e10e..832597496b3 100644 --- a/src/caffe/layers/image_data_layer.cpp +++ b/src/caffe/layers/image_data_layer.cpp @@ -63,21 +63,15 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, // Read an image, and use it to initialize the top blob. cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, new_height, new_width, is_color); - const int channels = cv_img.channels(); - const int height = cv_img.rows; - const int width = cv_img.cols; - // image - const int crop_size = this->layer_param_.transform_param().crop_size(); + // Use data_transformer to infer the expected blob shape from a cv_image. + vector top_shape = this->data_transformer_->InferBlobShape(cv_img); + this->transformed_data_.Reshape(top_shape); + // Reshape prefetch_data and top[0] according to the batch_size. const int batch_size = this->layer_param_.image_data_param().batch_size(); - if (crop_size > 0) { - top[0]->Reshape(batch_size, channels, crop_size, crop_size); - this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size); - this->transformed_data_.Reshape(1, channels, crop_size, crop_size); - } else { - top[0]->Reshape(batch_size, channels, height, width); - this->prefetch_data_.Reshape(batch_size, channels, height, width); - this->transformed_data_.Reshape(1, channels, height, width); - } + top_shape[0] = batch_size; + this->prefetch_data_.Reshape(top_shape); + top[0]->ReshapeLike(this->prefetch_data_); + LOG(INFO) << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); @@ -108,19 +102,19 @@ void ImageDataLayer::InternalThreadEntry() { const int batch_size = image_data_param.batch_size(); const int new_height = image_data_param.new_height(); const int new_width = image_data_param.new_width(); - const int crop_size = this->layer_param_.transform_param().crop_size(); const bool is_color = image_data_param.is_color(); string root_folder = image_data_param.root_folder(); - // Reshape on single input batches for inputs of varying dimension. - if (batch_size == 1 && crop_size == 0 && new_height == 0 && new_width == 0) { - cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, - 0, 0, is_color); - this->prefetch_data_.Reshape(1, cv_img.channels(), - cv_img.rows, cv_img.cols); - this->transformed_data_.Reshape(1, cv_img.channels(), - cv_img.rows, cv_img.cols); - } + // Reshape according to the first image of each batch + // on single input batches allows for inputs of varying dimension. + cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, + new_height, new_width, is_color); + // Use data_transformer to infer the expected blob shape from a cv_img. + vector top_shape = this->data_transformer_->InferBlobShape(cv_img); + this->transformed_data_.Reshape(top_shape); + // Reshape prefetch_data according to the batch_size. + top_shape[0] = batch_size; + this->prefetch_data_.Reshape(top_shape); Dtype* prefetch_data = this->prefetch_data_.mutable_cpu_data(); Dtype* prefetch_label = this->prefetch_label_.mutable_cpu_data(); diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp index 89e0c8fbad7..83c3235eb71 100644 --- a/src/caffe/layers/inner_product_layer.cpp +++ b/src/caffe/layers/inner_product_layer.cpp @@ -101,13 +101,13 @@ void InnerProductLayer::Backward_cpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->cpu_data(); // Gradient with respect to weight caffe_cpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., - top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_cpu_diff()); + top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_cpu_diff()); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->cpu_diff(); // Gradient with respect to bias caffe_cpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, - bias_multiplier_.cpu_data(), (Dtype)0., + bias_multiplier_.cpu_data(), (Dtype)1., this->blobs_[1]->mutable_cpu_diff()); } if (propagate_down[0]) { diff --git a/src/caffe/layers/inner_product_layer.cu b/src/caffe/layers/inner_product_layer.cu index 50f76f4942c..c0ebd2c47da 100644 --- a/src/caffe/layers/inner_product_layer.cu +++ b/src/caffe/layers/inner_product_layer.cu @@ -40,13 +40,13 @@ void InnerProductLayer::Backward_gpu(const vector*>& top, const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight caffe_gpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., - top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_gpu_diff()); + top_diff, bottom_data, (Dtype)1., this->blobs_[0]->mutable_gpu_diff()); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias caffe_gpu_gemv(CblasTrans, M_, N_, (Dtype)1., top_diff, - bias_multiplier_.gpu_data(), (Dtype)0., + bias_multiplier_.gpu_data(), (Dtype)1., this->blobs_[1]->mutable_gpu_diff()); } if (propagate_down[0]) { diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu index 24aa6a30130..001b3c34ac1 100644 --- a/src/caffe/layers/lrn_layer.cu +++ b/src/caffe/layers/lrn_layer.cu @@ -7,44 +7,46 @@ namespace caffe { template -__global__ void LRNFillScale(const int nthreads, const Dtype* in, +__global__ void LRNFillScale(const int nthreads, const Dtype* const in, const int num, const int channels, const int height, const int width, const int size, const Dtype alpha_over_size, - const Dtype k, Dtype* scale) { + const Dtype k, Dtype* const scale) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset - int w = index % width; - int h = (index / width) % height; - int n = index / width / height; - int offset = (n * channels * height + h) * width + w; - int step = height * width; - in += offset; - scale += offset; + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + const Dtype* const in_off = in + offset; + Dtype* const scale_off = scale + offset; int head = 0; - int pre_pad = (size - 1) / 2; - int post_pad = size - pre_pad - 1; + const int pre_pad = (size - 1) / 2; + const int post_pad = size - pre_pad - 1; Dtype accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad && head < channels) { - accum_scale += in[head * step] * in[head * step]; + accum_scale += in_off[head * step] * in_off[head * step]; ++head; } // both add and subtract while (head < channels) { - accum_scale += in[head * step] * in[head * step]; + accum_scale += in_off[head * step] * in_off[head * step]; if (head - size >= 0) { - accum_scale -= in[(head - size) * step] * in[(head - size) * step]; + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; } - scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { - accum_scale -= in[(head - size) * step] * in[(head - size) * step]; + accum_scale -= in_off[(head - size) * step] + * in_off[(head - size) * step]; } - scale[(head - post_pad) * step] = k + accum_scale * alpha_over_size; + scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } } @@ -68,8 +70,8 @@ void LRNLayer::Forward_gpu(const vector*>& bottom, // TODO: check if it would be faster to just put it into the previous kernel. template -__global__ void LRNComputeOutput(const int nthreads, const Dtype* in, - const Dtype* scale, const Dtype negative_beta, Dtype* out) { +__global__ void LRNComputeOutput(const int nthreads, const Dtype* const in, + const Dtype* const scale, const Dtype negative_beta, Dtype* const out) { CUDA_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } @@ -118,56 +120,58 @@ void LRNLayer::Backward_gpu(const vector*>& top, } template -__global__ void LRNComputeDiff(const int nthreads, const Dtype* bottom_data, - const Dtype* top_data, const Dtype* scale, const Dtype* top_diff, +__global__ void LRNComputeDiff(const int nthreads, + const Dtype* const bottom_data, const Dtype* const top_data, + const Dtype* const scale, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int size, const Dtype negative_beta, - const Dtype cache_ratio, - Dtype* bottom_diff) { + const Dtype cache_ratio, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset - int w = index % width; - int h = (index / width) % height; - int n = index / width / height; - int offset = (n * channels * height + h) * width + w; - int step = height * width; - bottom_data += offset; - top_data += offset; - scale += offset; - top_diff += offset; - bottom_diff += offset; + const int w = index % width; + const int h = (index / width) % height; + const int n = index / width / height; + const int offset = (n * channels * height + h) * width + w; + const int step = height * width; + const Dtype* const bottom_off = bottom_data + offset; + const Dtype* const top_off = top_data + offset; + const Dtype* const scale_off = scale + offset; + const Dtype* const top_diff_off = top_diff + offset; + Dtype* const bottom_diff_off = bottom_diff + offset; int head = 0; - int pre_pad = size - (size + 1) / 2; - int post_pad = size - pre_pad - 1; + const int pre_pad = size - (size + 1) / 2; + const int post_pad = size - pre_pad - 1; Dtype accum_ratio = 0; // accumulate values while (head < post_pad && head < channels) { - accum_ratio += top_diff[head * step] * top_data[head * step] / - scale[head * step]; + accum_ratio += top_diff_off[head * step] * top_off[head * step] / + scale_off[head * step]; ++head; } // both add and subtract while (head < channels) { - accum_ratio += top_diff[head * step] * top_data[head * step] / - scale[head * step]; + accum_ratio += top_diff_off[head * step] * top_off[head * step] / + scale_off[head * step]; if (head - size >= 0) { - accum_ratio -= top_diff[(head - size) * step] * - top_data[(head - size) * step] / scale[(head - size) * step]; + accum_ratio -= top_diff_off[(head - size) * step] * + top_off[(head - size) * step] / scale_off[(head - size) * step]; } - bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] - * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * - bottom_data[(head - post_pad) * step] * accum_ratio; + bottom_diff_off[(head - post_pad) * step] = + top_diff_off[(head - post_pad) * step] + * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { - accum_ratio -= top_diff[(head - size) * step] * - top_data[(head - size) * step] / scale[(head - size) * step]; + accum_ratio -= top_diff_off[(head - size) * step] * + top_off[(head - size) * step] / scale_off[(head - size) * step]; } - bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] - * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * - bottom_data[(head - post_pad) * step] * accum_ratio; + bottom_diff_off[(head - post_pad) * step] = + top_diff_off[(head - post_pad) * step] + * pow(scale_off[(head - post_pad) * step], negative_beta) + - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } } diff --git a/src/caffe/layers/mvn_layer.cpp b/src/caffe/layers/mvn_layer.cpp index 45994d8f7fd..325691b1875 100644 --- a/src/caffe/layers/mvn_layer.cpp +++ b/src/caffe/layers/mvn_layer.cpp @@ -26,6 +26,7 @@ void MVNLayer::Reshape(const vector*>& bottom, } Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data(); caffe_set(sum_multiplier_.count(), Dtype(1), multiplier_data); + eps_ = this->layer_param_.mvn_param().eps(); } template @@ -40,7 +41,6 @@ void MVNLayer::Forward_cpu(const vector*>& bottom, num = bottom[0]->num() * bottom[0]->channels(); int dim = bottom[0]->count() / num; - Dtype eps = 1e-10; if (this->layer_param_.mvn_param().normalize_variance()) { // put the squares of bottom into temp_ @@ -70,7 +70,7 @@ void MVNLayer::Forward_cpu(const vector*>& bottom, caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5), variance_.mutable_cpu_data()); - caffe_add_scalar(variance_.count(), eps, variance_.mutable_cpu_data()); + caffe_add_scalar(variance_.count(), eps_, variance_.mutable_cpu_data()); caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., @@ -106,7 +106,6 @@ void MVNLayer::Backward_cpu(const vector*>& top, num = bottom[0]->num() * bottom[0]->channels(); int dim = bottom[0]->count() / num; - Dtype eps = 1e-10; if (this->layer_param_.mvn_param().normalize_variance()) { caffe_mul(temp_.count(), top_data, top_diff, bottom_diff); @@ -129,24 +128,6 @@ void MVNLayer::Backward_cpu(const vector*>& top, // put the squares of bottom into temp_ caffe_powx(temp_.count(), bottom_data, Dtype(2), temp_.mutable_cpu_data()); - - // computes variance using var(X) = E(X^2) - (EX)^2 - caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, - sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); // EX - caffe_cpu_gemv(CblasNoTrans, num, dim, 1. / dim, temp_.cpu_data(), - sum_multiplier_.cpu_data(), 0., - variance_.mutable_cpu_data()); // E(X^2) - caffe_powx(mean_.count(), mean_.cpu_data(), Dtype(2), - temp_.mutable_cpu_data()); // (EX)^2 - caffe_sub(mean_.count(), variance_.cpu_data(), temp_.cpu_data(), - variance_.mutable_cpu_data()); // variance - - // normalize variance - caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5), - variance_.mutable_cpu_data()); - - caffe_add_scalar(variance_.count(), eps, variance_.mutable_cpu_data()); - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.cpu_data(), sum_multiplier_.cpu_data(), 0., temp_.mutable_cpu_data()); diff --git a/src/caffe/layers/mvn_layer.cu b/src/caffe/layers/mvn_layer.cu index 446d7327a5b..d86a2e73fc2 100644 --- a/src/caffe/layers/mvn_layer.cu +++ b/src/caffe/layers/mvn_layer.cu @@ -36,8 +36,6 @@ void MVNLayer::Forward_gpu(const vector*>& bottom, caffe_gpu_sub(mean_.count(), variance_.gpu_data(), temp_.gpu_data(), variance_.mutable_gpu_data()); // variance - Dtype eps = 1e-10; - // do mean and variance normalization // subtract mean caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., @@ -50,7 +48,7 @@ void MVNLayer::Forward_gpu(const vector*>& bottom, caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5), variance_.mutable_gpu_data()); - caffe_gpu_add_scalar(variance_.count(), eps, variance_.mutable_gpu_data()); + caffe_gpu_add_scalar(variance_.count(), eps_, variance_.mutable_gpu_data()); caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.gpu_data(), sum_multiplier_.gpu_data(), 0., @@ -87,8 +85,6 @@ void MVNLayer::Backward_gpu(const vector*>& top, int dim = bottom[0]->count() / num; - Dtype eps = 1e-10; - if (this->layer_param_.mvn_param().normalize_variance()) { caffe_gpu_mul(temp_.count(), top_data, top_diff, bottom_diff); caffe_gpu_gemv(CblasNoTrans, num, dim, 1., bottom_diff, @@ -111,23 +107,6 @@ void MVNLayer::Backward_gpu(const vector*>& top, caffe_gpu_powx(temp_.count(), bottom_data, Dtype(2), temp_.mutable_gpu_data()); - // computes variance using var(X) = E(X^2) - (EX)^2 - caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, bottom_data, - sum_multiplier_.gpu_data(), 0., mean_.mutable_gpu_data()); // EX - caffe_gpu_gemv(CblasNoTrans, num, dim, 1. / dim, temp_.gpu_data(), - sum_multiplier_.gpu_data(), 0., - variance_.mutable_gpu_data()); // E(X^2) - caffe_gpu_powx(mean_.count(), mean_.gpu_data(), Dtype(2), - temp_.mutable_gpu_data()); // (EX)^2 - caffe_gpu_sub(mean_.count(), variance_.gpu_data(), temp_.gpu_data(), - variance_.mutable_gpu_data()); // variance - - // normalize variance - caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5), - variance_.mutable_gpu_data()); - - caffe_gpu_add_scalar(variance_.count(), eps, variance_.mutable_gpu_data()); - caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, 1., variance_.gpu_data(), sum_multiplier_.gpu_data(), 0., temp_.mutable_gpu_data()); diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index d1d48501af3..ca4b13f7c41 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -9,31 +9,32 @@ namespace caffe { template -__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, - const int num, const int channels, const int height, - const int width, const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, const int pad_h, const int pad_w, Dtype* top_data, - int* mask, Dtype* top_mask) { +__global__ void MaxPoolForward(const int nthreads, + const Dtype* const bottom_data, const int num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, const int pad_h, const int pad_w, + Dtype* const top_data, int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; - int hend = min(hstart + kernel_h, height); - int wend = min(wstart + kernel_w, width); + const int hend = min(hstart + kernel_h, height); + const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; - bottom_data += (n * channels + c) * height * width; + const Dtype* const bottom_slice = + bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - if (bottom_data[h * width + w] > maxval) { + if (bottom_slice[h * width + w] > maxval) { maxidx = h * width + w; - maxval = bottom_data[maxidx]; + maxval = bottom_slice[maxidx]; } } } @@ -47,30 +48,32 @@ __global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, } template -__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, - const int num, const int channels, const int height, - const int width, const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { +__global__ void AvePoolForward(const int nthreads, + const Dtype* const bottom_data, const int num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, const int pad_h, const int pad_w, + Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); - int pool_size = (hend - hstart) * (wend - wstart); + const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; - bottom_data += (n * channels + c) * height * width; + const Dtype* const bottom_slice = + bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - aveval += bottom_data[h * width + w]; + aveval += bottom_slice[h * width + w]; } } top_data[index] = aveval / pool_size; @@ -79,37 +82,38 @@ __global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, template __global__ void StoPoolForwardTrain(const int nthreads, - const Dtype* bottom_data, + const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* rand_idx, Dtype* top_data) { + const int stride_w, Dtype* const rand_idx, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h; - int hend = min(hstart + kernel_h, height); - int wstart = pw * stride_w; - int wend = min(wstart + kernel_w, width); + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); Dtype cumsum = 0.; - bottom_data += (n * channels + c) * height * width; + const Dtype* const bottom_slice = + bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - cumsum += bottom_data[h * width + w]; + cumsum += bottom_slice[h * width + w]; } } - float thres = rand_idx[index] * cumsum; + const float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - cumsum += bottom_data[h * width + w]; + cumsum += bottom_slice[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; - top_data[index] = bottom_data[h * width + w]; + top_data[index] = bottom_slice[h * width + w]; return; } } @@ -120,29 +124,30 @@ __global__ void StoPoolForwardTrain(const int nthreads, template __global__ void StoPoolForwardTest(const int nthreads, - const Dtype* bottom_data, + const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* top_data) { + const int stride_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; - int hstart = ph * stride_h; - int hend = min(hstart + kernel_h, height); - int wstart = pw * stride_w; - int wend = min(wstart + kernel_w, width); + const int pw = index % pooled_width; + const int ph = (index / pooled_width) % pooled_height; + const int c = (index / pooled_width / pooled_height) % channels; + const int n = index / pooled_width / pooled_height / channels; + const int hstart = ph * stride_h; + const int hend = min(hstart + kernel_h, height); + const int wstart = pw * stride_w; + const int wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = FLT_MIN; Dtype cumvalues = 0.; - bottom_data += (n * channels + c) * height * width; + const Dtype* const bottom_slice = + bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - cumsum += bottom_data[h * width + w]; - cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w]; + cumsum += bottom_slice[h * width + w]; + cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; } } top_data[index] = cumvalues / cumsum; @@ -210,43 +215,43 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, template -__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, - const int* mask, const Dtype* top_mask, const int num, const int channels, - const int height, const int width, const int pooled_height, - const int pooled_width, const int kernel_h, const int kernel_w, - const int stride_h, const int stride_w, const int pad_h, const int pad_w, - Dtype* bottom_diff) { +__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, + const int* const mask, const Dtype* const top_mask, const int num, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - int w = index % width; - int h = (index / width) % height; - int c = (index / width / height) % channels; - int n = index / width / height / channels; - int phstart = - (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; - int phend = min((h + pad_h) / stride_h + 1, pooled_height); - int pwstart = - (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; - int pwend = min((w + pad_w) / stride_w + 1, pooled_width); + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = + (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; + const int phend = min((h + pad_h) / stride_h + 1, pooled_height); + const int pwstart = + (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; + const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; - int offset = (n * channels + c) * pooled_height * pooled_width; - top_diff += offset; + const int offset = (n * channels + c) * pooled_height * pooled_width; + const Dtype* const top_diff_slice = top_diff + offset; if (mask) { - mask += offset; + const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { - if (mask[ph * pooled_width + pw] == h * width + w) { - gradient += top_diff[ph * pooled_width + pw]; + if (mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; } } } } else { - top_mask += offset; + const Dtype* const top_mask_slice = top_mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { - if (top_mask[ph * pooled_width + pw] == h * width + w) { - gradient += top_diff[ph * pooled_width + pw]; + if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { + gradient += top_diff_slice[ph * pooled_width + pw]; } } } @@ -256,25 +261,26 @@ __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, } template -__global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, +__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, - Dtype* bottom_diff) { + Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - int w = index % width + pad_w; - int h = (index / width) % height + pad_h; - int c = (index / width / height) % channels; - int n = index / width / height / channels; - int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; - int phend = min(h / stride_h + 1, pooled_height); - int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; - int pwend = min(w / stride_w + 1, pooled_width); + const int w = index % width + pad_w; + const int h = (index / width) % height + pad_h; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; - top_diff += (n * channels + c) * pooled_height * pooled_width; + const Dtype* const top_diff_slice = + top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size @@ -283,7 +289,7 @@ __global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); - gradient += top_diff[ph * pooled_width + pw] / pool_size; + gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; @@ -293,29 +299,31 @@ __global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, template __global__ void StoPoolBackward(const int nthreads, - const Dtype* rand_idx, const Dtype* top_diff, + const Dtype* const rand_idx, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, Dtype* bottom_diff) { + const int stride_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset - int w = index % width; - int h = (index / width) % height; - int c = (index / width / height) % channels; - int n = index / width / height / channels; - int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; - int phend = min(h / stride_h + 1, pooled_height); - int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; - int pwend = min(w / stride_w + 1, pooled_width); + const int w = index % width; + const int h = (index / width) % height; + const int c = (index / width / height) % channels; + const int n = index / width / height / channels; + const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; + const int phend = min(h / stride_h + 1, pooled_height); + const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; + const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; - rand_idx += (n * channels + c) * pooled_height * pooled_width; - top_diff += (n * channels + c) * pooled_height * pooled_width; + const Dtype* const rand_idx_slice = + rand_idx + (n * channels + c) * pooled_height * pooled_width; + const Dtype* const top_diff_slice = + top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { - gradient += top_diff[ph * pooled_width + pw] * - (index == static_cast(rand_idx[ph * pooled_width + pw])); + gradient += top_diff_slice[ph * pooled_width + pw] * + (index == static_cast(rand_idx_slice[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; diff --git a/src/caffe/layers/prelu_layer.cpp b/src/caffe/layers/prelu_layer.cpp index 7119a274dd3..81831755512 100644 --- a/src/caffe/layers/prelu_layer.cpp +++ b/src/caffe/layers/prelu_layer.cpp @@ -45,7 +45,8 @@ void PReLULayer::LayerSetUp(const vector*>& bottom, // Propagate gradients to the parameters (as directed by backward pass). this->param_propagate_down_.resize(this->blobs_.size(), true); - multiplier_.Reshape(vector(1, bottom[0]->count() / bottom[0]->num())); + multiplier_.Reshape(vector(1, bottom[0]->count(1))); + backward_buff_.Reshape(vector(1, bottom[0]->count(1))); caffe_set(multiplier_.count(), Dtype(1), multiplier_.mutable_cpu_data()); } @@ -112,7 +113,6 @@ void PReLULayer::Backward_cpu(const vector*>& top, // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_cpu_diff(); - caffe_set(this->blobs_[0]->count(), Dtype(0), slope_diff); for (int i = 0; i < count; ++i) { int c = (i / dim) % channels / div_factor; slope_diff[c] += top_diff[i] * bottom_data[i] * (bottom_data[i] <= 0); diff --git a/src/caffe/layers/prelu_layer.cu b/src/caffe/layers/prelu_layer.cu index fd0eda5d191..e1f20048f60 100644 --- a/src/caffe/layers/prelu_layer.cu +++ b/src/caffe/layers/prelu_layer.cu @@ -75,38 +75,36 @@ void PReLULayer::Backward_gpu(const vector*>& top, bottom_data = bottom_memory_.gpu_data(); } - // Propagte to param + // Propagate to param // Since to write bottom diff will affect top diff if top and bottom blobs // are identical (in-place computaion), we first compute param backward to // keep top_diff unchanged. if (this->param_propagate_down_[0]) { Dtype* slope_diff = this->blobs_[0]->mutable_gpu_diff(); - // slope_diff is set as 0, then accumulated over batches - caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), slope_diff); int cdim = channels * dim; Dtype dsum = 0.; for (int n = 0; n < bottom[0]->num(); ++n) { - Dtype* temp_buff = multiplier_.mutable_gpu_diff(); // compute element-wise diff // NOLINT_NEXT_LINE(whitespace/operators) - PReLUParamBackward<<<<>>( cdim, top_diff + top[0]->offset(n), - bottom_data + bottom[0]->offset(n), multiplier_.mutable_gpu_diff()); + bottom_data + bottom[0]->offset(n), + backward_buff_.mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; if (channel_shared_) { Dtype d; - caffe_gpu_dot(channels * dim, multiplier_.gpu_diff(), + caffe_gpu_dot(channels * dim, backward_buff_.gpu_diff(), multiplier_.gpu_data(), &d); dsum += d; } else { caffe_gpu_gemv(CblasNoTrans, channels, dim, 1., - multiplier_.gpu_diff(), multiplier_.gpu_data(), 1., + backward_buff_.gpu_diff(), multiplier_.gpu_data(), 1., slope_diff); } } if (channel_shared_) { - caffe_gpu_set(this->blobs_[0]->count(), Dtype(dsum), slope_diff); + caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), slope_diff); } } // Propagate to bottom diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp index 077d949981c..cc236fe1e8e 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp @@ -71,7 +71,7 @@ void SigmoidCrossEntropyLossLayer::Backward_cpu( } #ifdef CPU_ONLY -STUB_GPU(SigmoidCrossEntropyLossLayer); +STUB_GPU_BACKWARD(SigmoidCrossEntropyLossLayer, Backward); #endif INSTANTIATE_CLASS(SigmoidCrossEntropyLossLayer); diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu index 08f7f492297..547fa80c72f 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu @@ -8,26 +8,6 @@ namespace caffe { -template -void SigmoidCrossEntropyLossLayer::Forward_gpu( - const vector*>& bottom, const vector*>& top) { - // The forward pass computes the sigmoid outputs. - sigmoid_bottom_vec_[0] = bottom[0]; - sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_); - // Compute the loss (negative log likelihood) - const int count = bottom[0]->count(); - const int num = bottom[0]->num(); - // Stable version of loss computation from input data - const Dtype* input_data = bottom[0]->cpu_data(); - const Dtype* target = bottom[1]->cpu_data(); - Dtype loss = 0; - for (int i = 0; i < count; ++i) { - loss -= input_data[i] * (target[i] - (input_data[i] >= 0)) - - log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); - } - top[0]->mutable_cpu_data()[0] = loss / num; -} - template void SigmoidCrossEntropyLossLayer::Backward_gpu( const vector*>& top, const vector& propagate_down, @@ -51,7 +31,7 @@ void SigmoidCrossEntropyLossLayer::Backward_gpu( } } -INSTANTIATE_LAYER_GPU_FUNCS(SigmoidCrossEntropyLossLayer); +INSTANTIATE_LAYER_GPU_BACKWARD(SigmoidCrossEntropyLossLayer); } // namespace caffe diff --git a/src/caffe/layers/slice_layer.cu b/src/caffe/layers/slice_layer.cu index a82b738f6f4..e8dc6cd98fc 100644 --- a/src/caffe/layers/slice_layer.cu +++ b/src/caffe/layers/slice_layer.cu @@ -6,6 +6,25 @@ namespace caffe { +template +__global__ void Slice(const int nthreads, const Dtype* in_data, + const bool forward, const int num_slices, const int slice_size, + const int bottom_slice_axis, const int top_slice_axis, + const int offset_slice_axis, Dtype* out_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int total_slice_size = slice_size * top_slice_axis; + const int slice_num = index / total_slice_size; + const int slice_index = index % total_slice_size; + const int bottom_index = slice_index + + (slice_num * bottom_slice_axis + offset_slice_axis) * slice_size; + if (forward) { + out_data[index] = in_data[bottom_index]; + } else { + out_data[bottom_index] = in_data[index]; + } + } +} + template void SliceLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { @@ -13,16 +32,16 @@ void SliceLayer::Forward_gpu(const vector*>& bottom, int offset_slice_axis = 0; const Dtype* bottom_data = bottom[0]->gpu_data(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); + const bool kForward = true; for (int i = 0; i < top.size(); ++i) { Dtype* top_data = top[i]->mutable_gpu_data(); const int top_slice_axis = top[i]->shape(slice_axis_); - for (int n = 0; n < num_slices_; ++n) { - const int top_offset = n * top_slice_axis * slice_size_; - const int bottom_offset = - (n * bottom_slice_axis + offset_slice_axis) * slice_size_; - caffe_copy(top_slice_axis * slice_size_, - bottom_data + bottom_offset, top_data + top_offset); - } + const int top_slice_size = top_slice_axis * slice_size_; + const int nthreads = top_slice_size * num_slices_; + Slice // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, bottom_data, kForward, num_slices_, slice_size_, + bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data); offset_slice_axis += top_slice_axis; } } @@ -34,16 +53,16 @@ void SliceLayer::Backward_gpu(const vector*>& top, int offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); + const bool kForward = false; for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); const int top_slice_axis = top[i]->shape(slice_axis_); - for (int n = 0; n < num_slices_; ++n) { - const int top_offset = n * top_slice_axis * slice_size_; - const int bottom_offset = - (n * bottom_slice_axis + offset_slice_axis) * slice_size_; - caffe_copy(top_slice_axis * slice_size_, - top_diff + top_offset, bottom_diff + bottom_offset); - } + const int top_slice_size = top_slice_axis * slice_size_; + const int nthreads = top_slice_size * num_slices_; + Slice // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, top_diff, kForward, num_slices_, slice_size_, + bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff); offset_slice_axis += top_slice_axis; } } diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index 888eec1d501..a18ee63818e 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -79,10 +79,17 @@ void Net::Init(const NetParameter& in_param) { } // Setup layer. const LayerParameter& layer_param = param.layer(layer_id); + if (layer_param.propagate_down_size() > 0) { + CHECK_EQ(layer_param.propagate_down_size(), + layer_param.bottom_size()) + << "propagate_down param must be specified " + << "either 0 or bottom_size times "; + } layers_.push_back(LayerRegistry::CreateLayer(layer_param)); layer_names_.push_back(layer_param.name()); LOG(INFO) << "Creating Layer " << layer_param.name(); bool need_backward = false; + // Figure out this layer's input and output for (int bottom_id = 0; bottom_id < layer_param.bottom_size(); ++bottom_id) { @@ -151,15 +158,33 @@ void Net::Init(const NetParameter& in_param) { // Go through the net backwards to determine which blobs contribute to the // loss. We can skip backward computation for blobs that don't contribute // to the loss. + // Also checks if all bottom blobs don't need backward computation (possible + // because the skip_propagate_down param) and so we can skip bacward + // computation for the entire layer set blobs_under_loss; + set blobs_skip_backp; for (int layer_id = layers_.size() - 1; layer_id >= 0; --layer_id) { bool layer_contributes_loss = false; + bool layer_skip_propagate_down = true; for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]]; if (layers_[layer_id]->loss(top_id) || (blobs_under_loss.find(blob_name) != blobs_under_loss.end())) { layer_contributes_loss = true; + } + if (blobs_skip_backp.find(blob_name) == blobs_skip_backp.end()) { + layer_skip_propagate_down = false; + } + if (layer_contributes_loss && !layer_skip_propagate_down) break; + } + // If this layer can skip backward computation, also all his bottom blobs + // don't need backpropagation + if (layer_need_backward_[layer_id] && layer_skip_propagate_down) { + layer_need_backward_[layer_id] = false; + for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); + ++bottom_id) { + bottom_need_backward_[layer_id][bottom_id] = false; } } if (!layer_contributes_loss) { layer_need_backward_[layer_id] = false; } @@ -178,6 +203,11 @@ void Net::Init(const NetParameter& in_param) { } else { bottom_need_backward_[layer_id][bottom_id] = false; } + if (!bottom_need_backward_[layer_id][bottom_id]) { + const string& blob_name = + blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; + blobs_skip_backp.insert(blob_name); + } } } // Handle force_backward if needed. @@ -367,9 +397,9 @@ void Net::AppendTop(const NetParameter& param, const int layer_id, // Helper for Net::Init: add a new bottom blob to the net. template -int Net::AppendBottom(const NetParameter& param, - const int layer_id, const int bottom_id, - set* available_blobs, map* blob_name_to_idx) { +int Net::AppendBottom(const NetParameter& param, const int layer_id, + const int bottom_id, set* available_blobs, + map* blob_name_to_idx) { const LayerParameter& layer_param = param.layer(layer_id); const string& blob_name = layer_param.bottom(bottom_id); if (available_blobs->find(blob_name) == available_blobs->end()) { @@ -381,7 +411,12 @@ int Net::AppendBottom(const NetParameter& param, bottom_vecs_[layer_id].push_back(blobs_[blob_id].get()); bottom_id_vecs_[layer_id].push_back(blob_id); available_blobs->erase(blob_name); - const bool need_backward = blob_need_backward_[blob_id]; + bool propagate_down = true; + // Check if the backpropagation on bottom_id should be skipped + if (layer_param.propagate_down_size() > 0) + propagate_down = layer_param.propagate_down(bottom_id); + const bool need_backward = blob_need_backward_[blob_id] && + propagate_down; bottom_need_backward_[layer_id].push_back(need_backward); return blob_id; } @@ -410,7 +445,7 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, // (i.e., not given a param_name) or explicitly given a name that we // haven't already seen. param_owners_.push_back(-1); - if (param_size) { + if (param_name.size()) { param_names_index_[param_name] = net_param_id; } } else { @@ -470,7 +505,6 @@ Dtype Net::ForwardFromTo(int start, int end) { } for (int i = start; i <= end; ++i) { // LOG(ERROR) << "Forwarding " << layer_names_[i]; - layers_[i]->Reshape(bottom_vecs_[i], top_vecs_[i]); Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], top_vecs_[i]); loss += layer_loss; if (debug_info_) { ForwardDebugInfo(i); } diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 099f2158120..b4294777259 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -49,6 +49,14 @@ message FillerParameter { // The expected number of non-zero output weights for a given input in // Gaussian filler -- the default -1 means don't perform sparsification. optional int32 sparse = 7 [default = -1]; + // Normalize the filler variance by fan_in, fan_out, or their average. + // Applies to 'xavier' and 'msra' fillers. + enum VarianceNorm { + FAN_IN = 0; + FAN_OUT = 1; + AVERAGE = 2; + } + optional VarianceNorm variance_norm = 8 [default = FAN_IN]; } message NetParameter { @@ -88,7 +96,7 @@ message NetParameter { // NOTE // Update the next available ID when you add a new SolverParameter field. // -// SolverParameter next available ID: 36 (last added: clip_gradients) +// SolverParameter next available ID: 37 (last added: iter_size) message SolverParameter { ////////////////////////////////////////////////////////////////////////////// // Specifying the train and test networks @@ -141,6 +149,8 @@ message SolverParameter { // Display the loss averaged over the last average_loss iterations optional int32 average_loss = 33 [default = 1]; optional int32 max_iter = 7; // the maximum number of iterations + // accumulate gradients over `iter_size` x `batch_size` instances + optional int32 iter_size = 36 [default = 1]; optional string lr_policy = 8; // The learning rate decay policy. optional float gamma = 9; // The parameter to compute the learning rate. optional float power = 10; // The parameter to compute the learning rate. @@ -259,7 +269,7 @@ message ParamSpec { // NOTE // Update the next available ID when you add a new LayerParameter field. // -// LayerParameter next available layer-specific ID: 132 (last added: prelu_param) +// LayerParameter next available layer-specific ID: 137 (last added: reduction_param) message LayerParameter { optional string name = 1; // the layer name optional string type = 2; // the layer type @@ -280,6 +290,10 @@ message LayerParameter { // The blobs containing the numeric parameters of the layer. repeated BlobProto blobs = 7; + + // Specifies on which bottoms the backpropagation should be skipped. + // The size must be either 0 or equal to the number of bottoms. + repeated bool propagate_down = 11; // Rules controlling whether and when a layer is included in the network, // based on the current NetState. You may specify a non-zero number of rules @@ -312,12 +326,14 @@ message LayerParameter { optional DummyDataParameter dummy_data_param = 109; optional EltwiseParameter eltwise_param = 110; optional ExpParameter exp_param = 111; + optional FlattenParameter flatten_param = 135; optional HDF5DataParameter hdf5_data_param = 112; optional HDF5OutputParameter hdf5_output_param = 113; optional HingeLossParameter hinge_loss_param = 114; optional ImageDataParameter image_data_param = 115; optional InfogainLossParameter infogain_loss_param = 116; optional InnerProductParameter inner_product_param = 117; + optional LogParameter log_param = 134; optional LRNParameter lrn_param = 118; optional MemoryDataParameter memory_data_param = 119; optional MVNParameter mvn_param = 120; @@ -325,9 +341,12 @@ message LayerParameter { optional PowerParameter power_param = 122; optional PReLUParameter prelu_param = 131; optional PythonParameter python_param = 130; + optional ReductionParameter reduction_param = 136; optional ReLUParameter relu_param = 123; + optional ReshapeParameter reshape_param = 133; optional SigmoidParameter sigmoid_param = 124; optional SoftmaxParameter softmax_param = 125; + optional SPPParameter spp_param = 132; optional SliceParameter slice_param = 126; optional TanHParameter tanh_param = 127; optional ThresholdParameter threshold_param = 128; @@ -412,7 +431,7 @@ message ContrastiveLossParameter { // Hadsell paper. New models should probably use this version. // legacy_version = true uses (margin - d^2). This is kept to support / // reproduce existing models and results - optional bool legacy_version = 2 [default = false]; + optional bool legacy_version = 2 [default = false]; } message ConvolutionParameter { @@ -464,7 +483,6 @@ message ConvolutionParameter { optional bool force_nd_im2col = 17 [default = false]; } -// Message that stores parameters used by DataLayer message DataParameter { enum DB { LEVELDB = 0; @@ -495,12 +513,10 @@ message DataParameter { optional bool force_encoded_color = 9 [default = false]; } -// Message that stores parameters used by DropoutLayer message DropoutParameter { optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio } -// Message that stores parameters used by DummyDataLayer. // DummyDataLayer fills any number of arbitrarily shaped blobs with random // (or constant) data generated by "Fillers" (see "message FillerParameter"). message DummyDataParameter { @@ -520,7 +536,6 @@ message DummyDataParameter { repeated uint32 width = 5; } -// Message that stores parameters used by EltwiseLayer message EltwiseParameter { enum EltwiseOp { PROD = 0; @@ -535,7 +550,6 @@ message EltwiseParameter { optional bool stable_prod_grad = 3 [default = true]; } -// Message that stores parameters used by ExpLayer message ExpParameter { // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. // Or if base is set to the default (-1), base is set to e, @@ -545,6 +559,18 @@ message ExpParameter { optional float shift = 3 [default = 0.0]; } +/// Message that stores parameters used by FlattenLayer +message FlattenParameter { + // The first axis to flatten: all preceding axes are retained in the output. + // May be negative to index from the end (e.g., -1 for the last axis). + optional int32 axis = 1 [default = 1]; + + // The last axis to flatten: all following axes are retained in the output. + // May be negative to index from the end (e.g., the default -1 for the last + // axis). + optional int32 end_axis = 2 [default = -1]; +} + // Message that stores parameters used by HDF5DataLayer message HDF5DataParameter { // Specify the data source. @@ -560,7 +586,6 @@ message HDF5DataParameter { optional bool shuffle = 3 [default = false]; } -// Message that stores parameters used by HDF5OutputLayer message HDF5OutputParameter { optional string file_name = 1; } @@ -574,7 +599,6 @@ message HingeLossParameter { optional Norm norm = 1 [default = L1]; } -// Message that stores parameters used by ImageDataLayer message ImageDataParameter { // Specify the data source. optional string source = 1; @@ -606,13 +630,11 @@ message ImageDataParameter { optional string root_folder = 12 [default = ""]; } -// Message that stores parameters InfogainLossLayer message InfogainLossParameter { // Specify the infogain matrix source. optional string source = 1; } -// Message that stores parameters used by InnerProductLayer message InnerProductParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms @@ -625,6 +647,16 @@ message InnerProductParameter { optional int32 axis = 5 [default = 1]; } +// Message that stores parameters used by LogLayer +message LogParameter { + // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. + // Or if base is set to the default (-1), base is set to e, + // so y = ln(shift + scale * x) = log_e(shift + scale * x) + optional float base = 1 [default = -1.0]; + optional float scale = 2 [default = 1.0]; + optional float shift = 3 [default = 0.0]; +} + // Message that stores parameters used by LRNLayer message LRNParameter { optional uint32 local_size = 1 [default = 5]; @@ -638,7 +670,6 @@ message LRNParameter { optional float k = 5 [default = 1.]; } -// Message that stores parameters used by MemoryDataLayer message MemoryDataParameter { optional uint32 batch_size = 1; optional uint32 channels = 2; @@ -646,16 +677,17 @@ message MemoryDataParameter { optional uint32 width = 4; } -// Message that stores parameters used by MVNLayer message MVNParameter { // This parameter can be set to false to normalize mean only optional bool normalize_variance = 1 [default = true]; // This parameter can be set to true to perform DNN-like MVN optional bool across_channels = 2 [default = false]; + + // Epsilon for not dividing by zero while normalizing variance + optional float eps = 3 [default = 1e-9]; } -// Message that stores parameters used by PoolingLayer message PoolingParameter { enum PoolMethod { MAX = 0; @@ -685,7 +717,6 @@ message PoolingParameter { optional bool global_pooling = 12 [default = false]; } -// Message that stores parameters used by PowerLayer message PowerParameter { // PowerLayer computes outputs y = (shift + scale * x) ^ power. optional float power = 1 [default = 1.0]; @@ -693,12 +724,40 @@ message PowerParameter { optional float shift = 3 [default = 0.0]; } -// Message that stores parameters used by PythonLayer message PythonParameter { optional string module = 1; optional string layer = 2; } +// Message that stores parameters used by ReductionLayer +message ReductionParameter { + enum ReductionOp { + SUM = 1; + ASUM = 2; + SUMSQ = 3; + MEAN = 4; + } + + optional ReductionOp operation = 1 [default = SUM]; // reduction operation + + // The first axis to reduce to a scalar -- may be negative to index from the + // end (e.g., -1 for the last axis). + // (Currently, only reduction along ALL "tail" axes is supported; reduction + // of axis M through N, where N < num_axes - 1, is unsupported.) + // Suppose we have an n-axis bottom Blob with shape: + // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). + // If axis == m, the output Blob will have shape + // (d0, d1, d2, ..., d(m-1)), + // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) + // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. + // If axis == 0 (the default), the output Blob always has the empty shape + // (count 1), performing reduction across the entire input -- + // often useful for creating new loss functions. + optional int32 axis = 2 [default = 0]; + + optional float coeff = 3 [default = 1.0]; // coefficient for output +} + // Message that stores parameters used by ReLULayer message ReLUParameter { // Allow non-zero slope for negative inputs to speed up optimization @@ -715,7 +774,70 @@ message ReLUParameter { optional Engine engine = 2 [default = DEFAULT]; } -// Message that stores parameters used by SigmoidLayer +message ReshapeParameter { + // Specify the output dimensions. If some of the dimensions are set to 0, + // the corresponding dimension from the bottom layer is used (unchanged). + // Exactly one dimension may be set to -1, in which case its value is + // inferred from the count of the bottom blob and the remaining dimensions. + // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8: + // + // layer { + // type: "Reshape" bottom: "input" top: "output" + // reshape_param { ... } + // } + // + // If "input" is 2D with shape 2 x 8, then the following reshape_param + // specifications are all equivalent, producing a 3D blob "output" with shape + // 2 x 2 x 4: + // + // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 0 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 0 dim: 2 dim: -1 } } + // reshape_param { shape { dim: -1 dim: 0 dim: 2 } } + // + optional BlobShape shape = 1; + + // axis and num_axes control the portion of the bottom blob's shape that are + // replaced by (included in) the reshape. By default (axis == 0 and + // num_axes == -1), the entire bottom blob shape is included in the reshape, + // and hence the shape field must specify the entire output shape. + // + // axis may be non-zero to retain some portion of the beginning of the input + // shape (and may be negative to index from the end; e.g., -1 to begin the + // reshape after the last axis, including nothing in the reshape, + // -2 to include only the last axis, etc.). + // + // For example, suppose "input" is a 2D blob with shape 2 x 8. + // Then the following ReshapeLayer specifications are all equivalent, + // producing a blob "output" with shape 2 x 2 x 4: + // + // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 2 dim: 4 } axis: 1 } + // reshape_param { shape { dim: 2 dim: 4 } axis: -3 } + // + // num_axes specifies the extent of the reshape. + // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on + // input axes in the range [axis, axis+num_axes]. + // num_axes may also be -1, the default, to include all remaining axes + // (starting from axis). + // + // For example, suppose "input" is a 2D blob with shape 2 x 8. + // Then the following ReshapeLayer specifications are equivalent, + // producing a blob "output" with shape 1 x 2 x 8. + // + // reshape_param { shape { dim: 1 dim: 2 dim: 8 } } + // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 } + // reshape_param { shape { dim: 1 } num_axes: 0 } + // + // On the other hand, these would produce output blob shape 2 x 1 x 8: + // + // reshape_param { shape { dim: 2 dim: 1 dim: 8 } } + // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 } + // + optional int32 axis = 2 [default = 0]; + optional int32 num_axes = 3 [default = -1]; +} + message SigmoidParameter { enum Engine { DEFAULT = 0; @@ -725,7 +847,6 @@ message SigmoidParameter { optional Engine engine = 1 [default = DEFAULT]; } -// Message that stores parameters used by SliceLayer message SliceParameter { // The axis along which to slice -- may be negative to index from the end // (e.g., -1 for the last axis). @@ -752,7 +873,6 @@ message SoftmaxParameter { optional int32 axis = 2 [default = 1]; } -// Message that stores parameters used by TanHLayer message TanHParameter { enum Engine { DEFAULT = 0; @@ -762,12 +882,10 @@ message TanHParameter { optional Engine engine = 1 [default = DEFAULT]; } -// Message that stores parameters used by ThresholdLayer message ThresholdParameter { optional float threshold = 1 [default = 0]; // Strictly positive values } -// Message that stores parameters used by WindowDataLayer message WindowDataParameter { // Specify the data source. optional string source = 1; @@ -801,6 +919,22 @@ message WindowDataParameter { optional string root_folder = 13 [default = ""]; } +message SPPParameter { + enum PoolMethod { + MAX = 0; + AVE = 1; + STOCHASTIC = 2; + } + optional uint32 pyramid_height = 1; + optional PoolMethod pool = 2 [default = MAX]; // The pooling method + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 6 [default = DEFAULT]; +} + // DEPRECATED: use LayerParameter. message V1LayerParameter { repeated string bottom = 2; @@ -849,6 +983,7 @@ message V1LayerParameter { TANH = 23; WINDOW_DATA = 24; THRESHOLD = 31; + } optional LayerType type = 5; repeated BlobProto blobs = 6; @@ -988,7 +1123,6 @@ message V0LayerParameter { optional HDF5OutputParameter hdf5_output_param = 1001; } -// Message that stores parameters used by PReLULayer message PReLUParameter { // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: // Surpassing Human-Level Performance on ImageNet Classification, 2015. diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 12c13dd8385..aabe0edec80 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -4,44 +4,24 @@ #include #include -#include "hdf5.h" -#include "hdf5_hl.h" - #include "caffe/net.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/solver.hpp" -#include "caffe/util/hdf5.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/upgrade_proto.hpp" namespace caffe { -template -void Solver::SetActionFunction(ActionCallback func) { - action_request_function_ = func; -} - -template -SolverAction::Enum Solver::GetRequestedAction() { - if (action_request_function_) { - // If the external request function has been set, call it. - return action_request_function_(); - } - return SolverAction::NONE; -} - template -Solver::Solver(const SolverParameter& param, const Solver* root_solver) - : net_(), callbacks_(), root_solver_(root_solver), - requested_early_exit_(false) { +Solver::Solver(const SolverParameter& param) + : net_() { Init(param); } template -Solver::Solver(const string& param_file, const Solver* root_solver) - : net_(), callbacks_(), root_solver_(root_solver), - requested_early_exit_(false) { +Solver::Solver(const string& param_file) + : net_() { SolverParameter param; ReadProtoFromTextFileOrDie(param_file, ¶m); Init(param); @@ -49,22 +29,17 @@ Solver::Solver(const string& param_file, const Solver* root_solver) template void Solver::Init(const SolverParameter& param) { - CHECK(Caffe::root_solver() || root_solver_) - << "root_solver_ needs to be set for all non-root solvers"; - LOG_IF(INFO, Caffe::root_solver()) << "Initializing solver from parameters: " - << std::endl << param.DebugString(); + LOG(INFO) << "Initializing solver from parameters: " << std::endl + << param.DebugString(); param_ = param; CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative."; - CheckSnapshotWritePermissions(); - if (Caffe::root_solver() && param_.random_seed() >= 0) { + if (param_.random_seed() >= 0) { Caffe::set_random_seed(param_.random_seed()); } // Scaffolding code InitTrainNet(); - if (Caffe::root_solver()) { - InitTestNets(); - LOG(INFO) << "Solver scaffolding done."; - } + InitTestNets(); + LOG(INFO) << "Solver scaffolding done."; iter_ = 0; current_step_ = 0; } @@ -80,22 +55,19 @@ void Solver::InitTrainNet() { << "one of these fields specifying a train_net: " << field_names; NetParameter net_param; if (param_.has_train_net_param()) { - LOG_IF(INFO, Caffe::root_solver()) - << "Creating training net specified in train_net_param."; + LOG(INFO) << "Creating training net specified in train_net_param."; net_param.CopyFrom(param_.train_net_param()); } else if (param_.has_train_net()) { - LOG_IF(INFO, Caffe::root_solver()) - << "Creating training net from train_net file: " << param_.train_net(); + LOG(INFO) << "Creating training net from train_net file: " + << param_.train_net(); ReadNetParamsFromTextFileOrDie(param_.train_net(), &net_param); } if (param_.has_net_param()) { - LOG_IF(INFO, Caffe::root_solver()) - << "Creating training net specified in net_param."; + LOG(INFO) << "Creating training net specified in net_param."; net_param.CopyFrom(param_.net_param()); } if (param_.has_net()) { - LOG_IF(INFO, Caffe::root_solver()) - << "Creating training net from net file: " << param_.net(); + LOG(INFO) << "Creating training net from net file: " << param_.net(); ReadNetParamsFromTextFileOrDie(param_.net(), &net_param); } // Set the correct NetState. We start with the solver defaults (lowest @@ -107,16 +79,11 @@ void Solver::InitTrainNet() { net_state.MergeFrom(net_param.state()); net_state.MergeFrom(param_.train_state()); net_param.mutable_state()->CopyFrom(net_state); - if (Caffe::root_solver()) { - net_.reset(new Net(net_param)); - } else { - net_.reset(new Net(net_param, root_solver_->net_.get())); - } + net_.reset(new Net(net_param)); } template void Solver::InitTestNets() { - CHECK(Caffe::root_solver()); const bool has_net_param = param_.has_net_param(); const bool has_net_file = param_.has_net(); const int num_generic_nets = has_net_param + has_net_file; @@ -186,12 +153,7 @@ void Solver::InitTestNets() { net_params[i].mutable_state()->CopyFrom(net_state); LOG(INFO) << "Creating test net (#" << i << ") specified by " << sources[i]; - if (Caffe::root_solver()) { - test_nets_[i].reset(new Net(net_params[i])); - } else { - test_nets_[i].reset(new Net(net_params[i], - root_solver_->test_nets_[i].get())); - } + test_nets_[i].reset(new Net(net_params[i])); test_nets_[i]->set_debug_info(param_.debug_info()); } } @@ -207,20 +169,29 @@ void Solver::Step(int iters) { while (iter_ < stop_iter) { // zero-init the params - net_->ClearParamDiffs(); - if (param_.test_interval() && iter_ % param_.test_interval() == 0 - && (iter_ > 0 || param_.test_initialization()) - && Caffe::root_solver()) { - TestAll(); - if (requested_early_exit_) { - // Break out of the while loop because stop was requested while testing. + for (int i = 0; i < net_->params().size(); ++i) { + shared_ptr > blob = net_->params()[i]; + switch (Caffe::mode()) { + case Caffe::CPU: + caffe_set(blob->count(), static_cast(0), + blob->mutable_cpu_diff()); + break; + case Caffe::GPU: +#ifndef CPU_ONLY + caffe_gpu_set(blob->count(), static_cast(0), + blob->mutable_gpu_diff()); +#else + NO_GPU; +#endif break; } } - for (int i = 0; i < callbacks_.size(); ++i) { - callbacks_[i]->on_start(); + if (param_.test_interval() && iter_ % param_.test_interval() == 0 + && (iter_ > 0 || param_.test_initialization())) { + TestAll(); } + const bool display = param_.display() && iter_ % param_.display() == 0; net_->set_debug_info(display && param_.debug_info()); // accumulate the loss and gradient @@ -240,8 +211,7 @@ void Solver::Step(int iters) { losses[idx] = loss; } if (display) { - LOG_IF(INFO, Caffe::root_solver()) << "Iteration " << iter_ - << ", loss = " << smoothed_loss; + LOG(INFO) << "Iteration " << iter_ << ", loss = " << smoothed_loss; const vector*>& result = net_->output_blobs(); int score_index = 0; for (int j = 0; j < result.size(); ++j) { @@ -256,47 +226,30 @@ void Solver::Step(int iters) { loss_msg_stream << " (* " << loss_weight << " = " << loss_weight * result_vec[k] << " loss)"; } - LOG_IF(INFO, Caffe::root_solver()) << " Train net output #" + LOG(INFO) << " Train net output #" << score_index++ << ": " << output_name << " = " << result_vec[k] << loss_msg_stream.str(); } } } - for (int i = 0; i < callbacks_.size(); ++i) { - callbacks_[i]->on_gradients_ready(); - } ApplyUpdate(); // Increment the internal iter_ counter -- its value should always indicate // the number of times the weights have been updated. ++iter_; - SolverAction::Enum request = GetRequestedAction(); - // Save a snapshot if needed. - if ((param_.snapshot() - && iter_ % param_.snapshot() == 0 - && Caffe::root_solver()) || - (request == SolverAction::SNAPSHOT)) { + if (param_.snapshot() && iter_ % param_.snapshot() == 0) { Snapshot(); } - if (SolverAction::STOP == request) { - requested_early_exit_ = true; - // Break out of training loop. - break; - } } } template void Solver::Solve(const char* resume_file) { - CHECK(Caffe::root_solver()); LOG(INFO) << "Solving " << net_->name(); LOG(INFO) << "Learning Rate Policy: " << param_.lr_policy(); - // Initialize to false every time we start solving. - requested_early_exit_ = false; - if (resume_file) { LOG(INFO) << "Restoring previous solver status from " << resume_file; Restore(resume_file); @@ -311,10 +264,6 @@ void Solver::Solve(const char* resume_file) { && (!param_.snapshot() || iter_ % param_.snapshot() != 0)) { Snapshot(); } - if (requested_early_exit_) { - LOG(INFO) << "Optimization stopped early."; - return; - } // After the optimization is done, run an additional train and test pass to // display the train and test loss/outputs if appropriate (based on the // display and test_interval settings, respectively). Unlike in the rest of @@ -332,18 +281,16 @@ void Solver::Solve(const char* resume_file) { LOG(INFO) << "Optimization Done."; } + template void Solver::TestAll() { - for (int test_net_id = 0; - test_net_id < test_nets_.size() && !requested_early_exit_; - ++test_net_id) { + for (int test_net_id = 0; test_net_id < test_nets_.size(); ++test_net_id) { Test(test_net_id); } } template void Solver::Test(const int test_net_id) { - CHECK(Caffe::root_solver()); LOG(INFO) << "Iteration " << iter_ << ", Testing net (#" << test_net_id << ")"; CHECK_NOTNULL(test_nets_[test_net_id].get())-> @@ -354,21 +301,6 @@ void Solver::Test(const int test_net_id) { const shared_ptr >& test_net = test_nets_[test_net_id]; Dtype loss = 0; for (int i = 0; i < param_.test_iter(test_net_id); ++i) { - SolverAction::Enum request = GetRequestedAction(); - // Check to see if stoppage of testing/training has been requested. - while (request != SolverAction::NONE) { - if (SolverAction::SNAPSHOT == request) { - Snapshot(); - } else if (SolverAction::STOP == request) { - requested_early_exit_ = true; - } - request = GetRequestedAction(); - } - if (requested_early_exit_) { - // break out of test loop. - break; - } - Dtype iter_loss; const vector*>& result = test_net->Forward(bottom_vec, &iter_loss); @@ -393,10 +325,6 @@ void Solver::Test(const int test_net_id) { } } } - if (requested_early_exit_) { - LOG(INFO) << "Test interrupted."; - return; - } if (param_.test_compute_loss()) { loss /= param_.test_iter(test_net_id); LOG(INFO) << "Test loss: " << loss; @@ -413,85 +341,50 @@ void Solver::Test(const int test_net_id) { << " = " << loss_weight * mean_score << " loss)"; } LOG(INFO) << " Test net output #" << i << ": " << output_name << " = " - << mean_score << loss_msg_stream.str(); - } -} - -template -void Solver::Snapshot() { - CHECK(Caffe::root_solver()); - string model_filename; - switch (param_.snapshot_format()) { - case caffe::SolverParameter_SnapshotFormat_BINARYPROTO: - model_filename = SnapshotToBinaryProto(); - break; - case caffe::SolverParameter_SnapshotFormat_HDF5: - model_filename = SnapshotToHDF5(); - break; - default: - LOG(FATAL) << "Unsupported snapshot format."; + << mean_score << loss_msg_stream.str(); } - - SnapshotSolverState(model_filename); } -template -void Solver::CheckSnapshotWritePermissions() { - if (Caffe::root_solver() && param_.snapshot()) { - CHECK(param_.has_snapshot_prefix()) - << "In solver params, snapshot is specified but snapshot_prefix is not"; - string probe_filename = SnapshotFilename(".tempfile"); - std::ofstream probe_ofs(probe_filename.c_str()); - if (probe_ofs.good()) { - probe_ofs.close(); - std::remove(probe_filename.c_str()); - } else { - LOG(FATAL) << "Cannot write to snapshot prefix '" - << param_.snapshot_prefix() << "'. Make sure " - << "that the directory exists and is writeable."; - } - } -} template -string Solver::SnapshotFilename(const string extension) { +void Solver::Snapshot() { + NetParameter net_param; + // For intermediate results, we will also dump the gradient values. + net_->ToProto(&net_param, param_.snapshot_diff()); string filename(param_.snapshot_prefix()); + string model_filename, snapshot_filename; const int kBufferSize = 20; char iter_str_buffer[kBufferSize]; snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); - return filename + iter_str_buffer + extension; -} - -template -string Solver::SnapshotToBinaryProto() { - string model_filename = SnapshotFilename(".caffemodel"); - LOG(INFO) << "Snapshotting to binary proto file " << model_filename; - NetParameter net_param; - net_->ToProto(&net_param, param_.snapshot_diff()); - WriteProtoToBinaryFile(net_param, model_filename); - return model_filename; -} - -template -string Solver::SnapshotToHDF5() { - string model_filename = SnapshotFilename(".caffemodel.h5"); - LOG(INFO) << "Snapshotting to HDF5 file " << model_filename; - net_->ToHDF5(model_filename, param_.snapshot_diff()); - return model_filename; + filename += iter_str_buffer; + model_filename = filename + ".caffemodel"; + LOG(INFO) << "Snapshotting to " << model_filename; + WriteProtoToBinaryFile(net_param, model_filename.c_str()); + SolverState state; + SnapshotSolverState(&state); + state.set_iter(iter_); + state.set_learned_net(model_filename); + state.set_current_step(current_step_); + snapshot_filename = filename + ".solverstate"; + LOG(INFO) << "Snapshotting solver state to " << snapshot_filename; + WriteProtoToBinaryFile(state, snapshot_filename.c_str()); } template void Solver::Restore(const char* state_file) { - CHECK(Caffe::root_solver()); - string state_filename(state_file); - if (state_filename.size() >= 3 && - state_filename.compare(state_filename.size() - 3, 3, ".h5") == 0) { - RestoreSolverStateFromHDF5(state_filename); - } else { - RestoreSolverStateFromBinaryProto(state_filename); + SolverState state; + NetParameter net_param; + ReadProtoFromBinaryFile(state_file, &state); + if (state.has_learned_net()) { + ReadNetParamsFromBinaryFileOrDie(state.learned_net().c_str(), &net_param); + net_->CopyTrainedLayersFrom(net_param); } + iter_ = state.iter(); + current_step_ = state.current_step(); + RestoreSolverState(state); } + // Return the current learning rate. The currently implemented learning rate // policies are as follows: // - fixed: always return base_lr. @@ -549,7 +442,7 @@ Dtype SGDSolver::GetLearningRate() { template void SGDSolver::PreSolve() { // Initialize the history - const vector*>& net_params = this->net_->learnable_params(); + const vector > >& net_params = this->net_->params(); history_.clear(); update_.clear(); temp_.clear(); @@ -565,10 +458,12 @@ template void SGDSolver::ClipGradients() { const Dtype clip_gradients = this->param_.clip_gradients(); if (clip_gradients < 0) { return; } - const vector*>& net_params = this->net_->learnable_params(); + const vector > >& net_params = this->net_->params(); Dtype sumsq_diff = 0; for (int i = 0; i < net_params.size(); ++i) { - sumsq_diff += net_params[i]->sumsq_diff(); + if (this->net_->param_owners()[i] < 0) { + sumsq_diff += net_params[i]->sumsq_diff(); + } } const Dtype l2norm_diff = std::sqrt(sumsq_diff); if (l2norm_diff > clip_gradients) { @@ -577,21 +472,21 @@ void SGDSolver::ClipGradients() { << l2norm_diff << " > " << clip_gradients << ") " << "by scale factor " << scale_factor; for (int i = 0; i < net_params.size(); ++i) { - net_params[i]->scale_diff(scale_factor); + if (this->net_->param_owners()[i] < 0) { + net_params[i]->scale_diff(scale_factor); + } } } } template void SGDSolver::ApplyUpdate() { - CHECK(Caffe::root_solver()); Dtype rate = GetLearningRate(); if (this->param_.display() && this->iter_ % this->param_.display() == 0) { LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; } ClipGradients(); - for (int param_id = 0; param_id < this->net_->learnable_params().size(); - ++param_id) { + for (int param_id = 0; param_id < this->net_->params().size(); ++param_id) { Normalize(param_id); Regularize(param_id); ComputeUpdateValue(param_id, rate); @@ -603,7 +498,7 @@ template void SGDSolver::Normalize(int param_id) { if (this->param_.iter_size() == 1) { return; } // Scale gradient to counterbalance accumulation. - const vector*>& net_params = this->net_->learnable_params(); + const vector > >& net_params = this->net_->params(); const Dtype accum_normalization = Dtype(1.) / this->param_.iter_size(); switch (Caffe::mode()) { case Caffe::CPU: { @@ -627,7 +522,7 @@ void SGDSolver::Normalize(int param_id) { template void SGDSolver::Regularize(int param_id) { - const vector*>& net_params = this->net_->learnable_params(); + const vector > >& net_params = this->net_->params(); const vector& net_params_weight_decay = this->net_->params_weight_decay(); Dtype weight_decay = this->param_.weight_decay(); @@ -689,7 +584,7 @@ void SGDSolver::Regularize(int param_id) { template void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector*>& net_params = this->net_->learnable_params(); + const vector > >& net_params = this->net_->params(); const vector& net_params_lr = this->net_->params_lr(); Dtype momentum = this->param_.momentum(); Dtype local_rate = rate * net_params_lr[param_id]; @@ -723,76 +618,17 @@ void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { } template -void SGDSolver::SnapshotSolverState(const string& model_filename) { - switch (this->param_.snapshot_format()) { - case caffe::SolverParameter_SnapshotFormat_BINARYPROTO: - SnapshotSolverStateToBinaryProto(model_filename); - break; - case caffe::SolverParameter_SnapshotFormat_HDF5: - SnapshotSolverStateToHDF5(model_filename); - break; - default: - LOG(FATAL) << "Unsupported snapshot format."; - } -} - -template -void SGDSolver::SnapshotSolverStateToBinaryProto( - const string& model_filename) { - SolverState state; - state.set_iter(this->iter_); - state.set_learned_net(model_filename); - state.set_current_step(this->current_step_); - state.clear_history(); +void SGDSolver::SnapshotSolverState(SolverState* state) { + state->clear_history(); for (int i = 0; i < history_.size(); ++i) { // Add history - BlobProto* history_blob = state.add_history(); + BlobProto* history_blob = state->add_history(); history_[i]->ToProto(history_blob); } - string snapshot_filename = Solver::SnapshotFilename(".solverstate"); - LOG(INFO) - << "Snapshotting solver state to binary proto file " << snapshot_filename; - WriteProtoToBinaryFile(state, snapshot_filename.c_str()); } template -void SGDSolver::SnapshotSolverStateToHDF5( - const string& model_filename) { - string snapshot_filename = - Solver::SnapshotFilename(".solverstate.h5"); - LOG(INFO) << "Snapshotting solver state to HDF5 file " << snapshot_filename; - hid_t file_hid = H5Fcreate(snapshot_filename.c_str(), H5F_ACC_TRUNC, - H5P_DEFAULT, H5P_DEFAULT); - CHECK_GE(file_hid, 0) - << "Couldn't open " << snapshot_filename << " to save solver state."; - hdf5_save_int(file_hid, "iter", this->iter_); - hdf5_save_string(file_hid, "learned_net", model_filename); - hdf5_save_int(file_hid, "current_step", this->current_step_); - hid_t history_hid = H5Gcreate2(file_hid, "history", H5P_DEFAULT, H5P_DEFAULT, - H5P_DEFAULT); - CHECK_GE(history_hid, 0) - << "Error saving solver state to " << snapshot_filename << "."; - for (int i = 0; i < history_.size(); ++i) { - ostringstream oss; - oss << i; - hdf5_save_nd_dataset(history_hid, oss.str(), *history_[i]); - } - H5Gclose(history_hid); - H5Fclose(file_hid); -} - -template -void SGDSolver::RestoreSolverStateFromBinaryProto( - const string& state_file) { - SolverState state; - ReadProtoFromBinaryFile(state_file, &state); - this->iter_ = state.iter(); - if (state.has_learned_net()) { - NetParameter net_param; - ReadNetParamsFromBinaryFileOrDie(state.learned_net().c_str(), &net_param); - this->net_->CopyTrainedLayersFrom(net_param); - } - this->current_step_ = state.current_step(); +void SGDSolver::RestoreSolverState(const SolverState& state) { CHECK_EQ(state.history_size(), history_.size()) << "Incorrect length of history blobs."; LOG(INFO) << "SGDSolver: restoring history"; @@ -801,35 +637,9 @@ void SGDSolver::RestoreSolverStateFromBinaryProto( } } -template -void SGDSolver::RestoreSolverStateFromHDF5(const string& state_file) { - hid_t file_hid = H5Fopen(state_file.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK_GE(file_hid, 0) << "Couldn't open solver state file " << state_file; - this->iter_ = hdf5_load_int(file_hid, "iter"); - if (H5LTfind_dataset(file_hid, "learned_net")) { - string learned_net = hdf5_load_string(file_hid, "learned_net"); - this->net_->CopyTrainedLayersFrom(learned_net); - } - this->current_step_ = hdf5_load_int(file_hid, "current_step"); - hid_t history_hid = H5Gopen2(file_hid, "history", H5P_DEFAULT); - CHECK_GE(history_hid, 0) << "Error reading history from " << state_file; - int state_history_size = hdf5_get_num_links(history_hid); - CHECK_EQ(state_history_size, history_.size()) - << "Incorrect length of history blobs."; - for (int i = 0; i < history_.size(); ++i) { - ostringstream oss; - oss << i; - hdf5_load_nd_dataset(history_hid, oss.str().c_str(), 0, - kMaxBlobAxes, history_[i].get()); - } - H5Gclose(history_hid); - H5Fclose(file_hid); -} - template void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { - CHECK(Caffe::root_solver()); - const vector*>& net_params = this->net_->learnable_params(); + const vector > >& net_params = this->net_->params(); const vector& net_params_lr = this->net_->params_lr(); Dtype momentum = this->param_.momentum(); Dtype local_rate = rate * net_params_lr[param_id]; @@ -889,8 +699,7 @@ void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { template void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { - CHECK(Caffe::root_solver()); - const vector*>& net_params = this->net_->learnable_params(); + const vector > >& net_params = this->net_->params(); const vector& net_params_lr = this->net_->params_lr(); Dtype delta = this->param_.delta(); Dtype local_rate = rate * net_params_lr[param_id]; @@ -966,336 +775,9 @@ void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { } } -template -void RMSPropSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_lr = this->net_->params_lr(); - - // get the learning rate - Dtype delta = this->param_.delta(); - Dtype rms_decay = this->param_.rms_decay(); - Dtype local_rate = rate * net_params_lr[param_id]; - - switch (Caffe::mode()) { - case Caffe::CPU: - // compute square of gradient in update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history - caffe_cpu_axpby(net_params[param_id] -> count(), - Dtype(1-rms_decay), this->update_[param_id]->cpu_data(), - rms_decay, this->history_[param_id]-> mutable_cpu_data()); - - // prepare update - caffe_powx(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_cpu_data()); - - caffe_div(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), this->update_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // scale and copy - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->cpu_data(), Dtype(0), - net_params[param_id]->mutable_cpu_diff()); - break; - case Caffe::GPU: -#ifndef CPU_ONLY - // compute square of gradient in update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history - caffe_gpu_axpby(net_params[param_id] -> count(), - Dtype(1-rms_decay), this->update_[param_id]->gpu_data(), - rms_decay, this->history_[param_id]-> mutable_gpu_data()); - - // prepare update - caffe_gpu_powx(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_div(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), this->update_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->gpu_data(), Dtype(0), - net_params[param_id]->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void AdaDeltaSolver::AdaDeltaPreSolve() { - // Add the extra history entries for AdaDelta after those from - // SGDSolver::PreSolve - const vector*>& net_params = this->net_->learnable_params(); - for (int i = 0; i < net_params.size(); ++i) { - const vector& shape = net_params[i]->shape(); - this->history_.push_back( - shared_ptr >(new Blob(shape))); - } -} - -template -void AdaDeltaSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype delta = this->param_.delta(); - Dtype momentum = this->param_.momentum(); - Dtype local_rate = rate * net_params_lr[param_id]; - size_t update_history_offset = net_params.size(); - switch (Caffe::mode()) { - case Caffe::CPU: { - // compute square of gradient in update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history of gradients - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, - this->update_[param_id]->cpu_data(), momentum, - this->history_[param_id]->mutable_cpu_data()); - - // add delta to history to guard against dividing by zero later - caffe_set(net_params[param_id]->count(), delta, - this->temp_[param_id]->mutable_cpu_data()); - - caffe_add(net_params[param_id]->count(), - this->temp_[param_id]->cpu_data(), - this->history_[update_history_offset + param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add(net_params[param_id]->count(), - this->temp_[param_id]->cpu_data(), - this->history_[param_id]->cpu_data(), - this->temp_[param_id]->mutable_cpu_data()); - - // divide history of updates by history of gradients - caffe_div(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - this->temp_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // jointly compute the RMS of both for update and gradient history - caffe_powx(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - // compute the update - caffe_mul(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), - this->update_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - - // compute square of update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history of updates - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, - this->update_[param_id]->cpu_data(), momentum, - this->history_[update_history_offset + param_id]->mutable_cpu_data()); - - // apply learning rate - caffe_cpu_scale(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - // compute square of gradient in update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history of gradients - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, - this->update_[param_id]->gpu_data(), momentum, - this->history_[param_id]->mutable_gpu_data()); - - // add delta to history to guard against dividing by zero later - caffe_gpu_set(net_params[param_id]->count(), delta, - this->temp_[param_id]->mutable_gpu_data()); - - caffe_gpu_add(net_params[param_id]->count(), - this->temp_[param_id]->gpu_data(), - this->history_[update_history_offset + param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add(net_params[param_id]->count(), - this->temp_[param_id]->gpu_data(), - this->history_[param_id]->gpu_data(), - this->temp_[param_id]->mutable_gpu_data()); - - // divide history of updates by history of gradients - caffe_gpu_div(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - this->temp_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // jointly compute the RMS of both for update and gradient history - caffe_gpu_powx(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - // compute the update and copy to net_diff - caffe_gpu_mul(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), - this->update_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - - // compute square of update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history of updates - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, - this->update_[param_id]->gpu_data(), momentum, - this->history_[update_history_offset + param_id]->mutable_gpu_data()); - - // apply learning rate - caffe_gpu_scale(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), - net_params[param_id]->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void AdamSolver::AdamPreSolve() { - // Add the extra history entries for Adam after those from - // SGDSolver::PreSolve - const vector*>& net_params = this->net_->learnable_params(); - for (int i = 0; i < net_params.size(); ++i) { - const vector& shape = net_params[i]->shape(); - this->history_.push_back( - shared_ptr >(new Blob(shape))); - } -} - -template -void AdamSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype local_rate = rate * net_params_lr[param_id]; - const Dtype beta1 = this->param_.momentum(); - const Dtype beta2 = this->param_.momentum2(); - - // we create aliases for convenience - size_t update_history_offset = net_params.size(); - Blob* val_m = this->history_[param_id].get(); - Blob* val_v = this->history_[param_id + update_history_offset].get(); - Blob* val_t = this->temp_[param_id].get(); - - const int t = this->iter_ + 1; - const Dtype correction = std::sqrt(Dtype(1) - pow(beta2, t)) / - (Dtype(1.) - pow(beta1, t)); - const int N = net_params[param_id]->count(); - const Dtype eps_hat = this->param_.delta(); - - switch (Caffe::mode()) { - case Caffe::CPU: { - // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t - caffe_cpu_axpby(N, Dtype(1)-beta1, - net_params[param_id]->cpu_diff(), beta1, - val_m->mutable_cpu_data()); - - // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 - caffe_mul(N, - net_params[param_id]->cpu_diff(), - net_params[param_id]->cpu_diff(), - val_t->mutable_cpu_data()); - caffe_cpu_axpby(N, Dtype(1)-beta2, - val_t->cpu_data(), beta2, - val_v->mutable_cpu_data()); - - // set update - caffe_powx(N, - val_v->cpu_data(), Dtype(0.5), - val_t->mutable_cpu_data()); - caffe_add_scalar(N, eps_hat, val_t->mutable_cpu_data()); - caffe_div(N, - val_m->cpu_data(), - val_t->cpu_data(), - val_t->mutable_cpu_data()); - - caffe_cpu_scale(N, local_rate*correction, - val_t->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t - caffe_gpu_axpby(N, Dtype(1)-beta1, - net_params[param_id]->gpu_diff(), beta1, - val_m->mutable_gpu_data()); - - // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 - caffe_gpu_mul(N, - net_params[param_id]->gpu_diff(), - net_params[param_id]->gpu_diff(), - val_t->mutable_gpu_data()); - caffe_gpu_axpby(N, Dtype(1)-beta2, - val_t->gpu_data(), beta2, - val_v->mutable_gpu_data()); - - // set update - caffe_gpu_powx(N, - val_v->gpu_data(), Dtype(0.5), - val_t->mutable_gpu_data()); - caffe_gpu_add_scalar(N, eps_hat, - val_t->mutable_gpu_data()); - caffe_gpu_div(N, - val_m->gpu_data(), - val_t->gpu_data(), - val_t->mutable_gpu_data()); - - caffe_gpu_scale(N, local_rate*correction, - val_t->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - INSTANTIATE_CLASS(Solver); INSTANTIATE_CLASS(SGDSolver); INSTANTIATE_CLASS(NesterovSolver); INSTANTIATE_CLASS(AdaGradSolver); -INSTANTIATE_CLASS(RMSPropSolver); -INSTANTIATE_CLASS(AdaDeltaSolver); -INSTANTIATE_CLASS(AdamSolver); } // namespace caffe diff --git a/src/caffe/test/test_accuracy_layer.cpp b/src/caffe/test/test_accuracy_layer.cpp index b5083630217..ef0e57a37a1 100644 --- a/src/caffe/test/test_accuracy_layer.cpp +++ b/src/caffe/test/test_accuracy_layer.cpp @@ -16,7 +16,7 @@ namespace caffe { template -class AccuracyLayerTest : public ::testing::Test { +class AccuracyLayerTest : public CPUDeviceTest { protected: AccuracyLayerTest() : blob_bottom_data_(new Blob()), @@ -112,7 +112,6 @@ TYPED_TEST(AccuracyLayerTest, TestSetupOutputPerClass) { TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { LayerParameter layer_param; - Caffe::set_mode(Caffe::CPU); AccuracyLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); @@ -138,7 +137,6 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { } TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { - Caffe::set_mode(Caffe::CPU); this->blob_bottom_data_->Reshape(2, 10, 4, 5); vector label_shape(3); label_shape[0] = 2; label_shape[1] = 4; label_shape[2] = 5; @@ -182,7 +180,6 @@ TYPED_TEST(AccuracyLayerTest, TestForwardWithSpatialAxes) { } TYPED_TEST(AccuracyLayerTest, TestForwardIgnoreLabel) { - Caffe::set_mode(Caffe::CPU); LayerParameter layer_param; const TypeParam kIgnoreLabelValue = -1; layer_param.mutable_accuracy_param()->set_ignore_label(kIgnoreLabelValue); diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp index 3487d42f21e..895c3d372ff 100644 --- a/src/caffe/test/test_argmax_layer.cpp +++ b/src/caffe/test/test_argmax_layer.cpp @@ -13,13 +13,12 @@ namespace caffe { template -class ArgMaxLayerTest : public ::testing::Test { +class ArgMaxLayerTest : public CPUDeviceTest { protected: ArgMaxLayerTest() : blob_bottom_(new Blob(10, 20, 1, 1)), blob_top_(new Blob()), top_k_(5) { - Caffe::set_mode(Caffe::CPU); Caffe::set_random_seed(1701); // fill the values FillerParameter filler_param; diff --git a/src/caffe/test/test_contrastive_loss_layer.cpp b/src/caffe/test/test_contrastive_loss_layer.cpp index d269fbc26f2..1e9447cbc51 100644 --- a/src/caffe/test/test_contrastive_loss_layer.cpp +++ b/src/caffe/test/test_contrastive_loss_layer.cpp @@ -22,15 +22,15 @@ class ContrastiveLossLayerTest : public MultiDeviceTest { protected: ContrastiveLossLayerTest() - : blob_bottom_data_i_(new Blob(128, 10, 1, 1)), - blob_bottom_data_j_(new Blob(128, 10, 1, 1)), - blob_bottom_y_(new Blob(128, 1, 1, 1)), + : blob_bottom_data_i_(new Blob(512, 2, 1, 1)), + blob_bottom_data_j_(new Blob(512, 2, 1, 1)), + blob_bottom_y_(new Blob(512, 1, 1, 1)), blob_top_loss_(new Blob()) { // fill the values FillerParameter filler_param; - filler_param.set_mean(0.0); - filler_param.set_std(0.3); // distances~=1.0 to test both sides of margin - GaussianFiller filler(filler_param); + filler_param.set_min(-1.0); + filler_param.set_max(1.0); // distances~=1.0 to test both sides of margin + UniformFiller filler(filler_param); filler.Fill(this->blob_bottom_data_i_); blob_bottom_vec_.push_back(blob_bottom_data_i_); filler.Fill(this->blob_bottom_data_j_); @@ -79,7 +79,8 @@ TYPED_TEST(ContrastiveLossLayerTest, TestForward) { if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs loss += dist_sq; } else { - loss += std::max(margin-dist_sq, Dtype(0)); + Dtype dist = std::max(margin - sqrt(dist_sq), 0.0); + loss += dist*dist; } } loss /= static_cast(num) * Dtype(2); @@ -99,4 +100,47 @@ TYPED_TEST(ContrastiveLossLayerTest, TestGradient) { this->blob_top_vec_, 1); } +TYPED_TEST(ContrastiveLossLayerTest, TestForwardLegacy) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_contrastive_loss_param()->set_legacy_version(true); + ContrastiveLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // manually compute to compare + const Dtype margin = layer_param.contrastive_loss_param().margin(); + const int num = this->blob_bottom_data_i_->num(); + const int channels = this->blob_bottom_data_i_->channels(); + Dtype loss(0); + for (int i = 0; i < num; ++i) { + Dtype dist_sq(0); + for (int j = 0; j < channels; ++j) { + Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq += diff*diff; + } + if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs + loss += dist_sq; + } else { + loss += std::max(margin - dist_sq, Dtype(0.0)); + } + } + loss /= static_cast(num) * Dtype(2); + EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); +} + +TYPED_TEST(ContrastiveLossLayerTest, TestGradientLegacy) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_contrastive_loss_param()->set_legacy_version(true); + ContrastiveLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + // check the gradient for the first two bottom layers + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 1); +} + } // namespace caffe diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index a58b3e1fe74..9df979a2d27 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -692,7 +692,7 @@ TYPED_TEST(ConvolutionLayerTest, TestGradientGroup) { #ifdef USE_CUDNN template -class CuDNNConvolutionLayerTest : public ::testing::Test { +class CuDNNConvolutionLayerTest : public GPUDeviceTest { protected: CuDNNConvolutionLayerTest() : blob_bottom_(new Blob(2, 3, 6, 4)), @@ -735,7 +735,6 @@ class CuDNNConvolutionLayerTest : public ::testing::Test { TYPED_TEST_CASE(CuDNNConvolutionLayerTest, TestDtypes); TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { - Caffe::set_mode(Caffe::GPU); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); LayerParameter layer_param; @@ -773,7 +772,6 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { - Caffe::set_mode(Caffe::GPU); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); LayerParameter layer_param; @@ -809,7 +807,6 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionGroupCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); @@ -840,7 +837,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { // Test separable convolution by computing the Sobel operator // as a single filter then comparing the result // as the convolution of two rectangular filters. - Caffe::set_mode(Caffe::GPU); + // Fill bottoms with identical Gaussian noise. shared_ptr > filler; FillerParameter filler_param; @@ -930,7 +927,6 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); @@ -948,7 +944,6 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) { } TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientGroupCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); diff --git a/src/caffe/test/test_data/generate_sample_data.py b/src/caffe/test/test_data/generate_sample_data.py index 3703b41823b..ab5572685cb 100644 --- a/src/caffe/test/test_data/generate_sample_data.py +++ b/src/caffe/test/test_data/generate_sample_data.py @@ -1,5 +1,5 @@ """ -Generate data used in the HDF5DataLayer and GradientBasedSolver tests. +Generate data used in the HDF5DataLayer test. """ import os import numpy as np @@ -7,8 +7,6 @@ script_dir = os.path.dirname(os.path.abspath(__file__)) -# Generate HDF5DataLayer sample_data.h5 - num_cols = 8 num_rows = 10 height = 6 @@ -53,27 +51,3 @@ with open(script_dir + '/sample_data_list.txt', 'w') as f: f.write(script_dir + '/sample_data.h5\n') f.write(script_dir + '/sample_data_2_gzip.h5\n') - -# Generate GradientBasedSolver solver_data.h5 - -num_cols = 3 -num_rows = 8 -height = 10 -width = 10 - -data = np.random.randn(num_rows, num_cols, height, width) -data = data.reshape(num_rows, num_cols, height, width) -data = data.astype('float32') - -targets = np.random.randn(num_rows, 1) -targets = targets.astype('float32') - -print data -print targets - -with h5py.File(script_dir + '/solver_data.h5', 'w') as f: - f['data'] = data - f['targets'] = targets - -with open(script_dir + '/solver_data_list.txt', 'w') as f: - f.write(script_dir + '/solver_data.h5\n') diff --git a/src/caffe/test/test_dummy_data_layer.cpp b/src/caffe/test/test_dummy_data_layer.cpp index 99548352746..c9ed38db3a5 100644 --- a/src/caffe/test/test_dummy_data_layer.cpp +++ b/src/caffe/test/test_dummy_data_layer.cpp @@ -13,7 +13,7 @@ namespace caffe { template -class DummyDataLayerTest : public ::testing::Test { +class DummyDataLayerTest : public CPUDeviceTest { protected: DummyDataLayerTest() : blob_top_a_(new Blob()), @@ -44,7 +44,6 @@ class DummyDataLayerTest : public ::testing::Test { TYPED_TEST_CASE(DummyDataLayerTest, TestDtypes); TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) { - Caffe::set_mode(Caffe::CPU); LayerParameter param; DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); dummy_data_param->add_num(5); @@ -74,7 +73,6 @@ TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) { } TYPED_TEST(DummyDataLayerTest, TestTwoTopConstant) { - Caffe::set_mode(Caffe::CPU); LayerParameter param; DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); dummy_data_param->add_num(5); @@ -113,7 +111,6 @@ TYPED_TEST(DummyDataLayerTest, TestTwoTopConstant) { } TYPED_TEST(DummyDataLayerTest, TestThreeTopConstantGaussianConstant) { - Caffe::set_mode(Caffe::CPU); LayerParameter param; DummyDataParameter* dummy_data_param = param.mutable_dummy_data_param(); dummy_data_param->add_num(5); diff --git a/src/caffe/test/test_filler.cpp b/src/caffe/test/test_filler.cpp index e04b0fd22af..728b8dc5f0d 100644 --- a/src/caffe/test/test_filler.cpp +++ b/src/caffe/test/test_filler.cpp @@ -142,4 +142,102 @@ TYPED_TEST(GaussianFillerTest, TestFill) { EXPECT_LE(var, target_var * 5.); } +template +class XavierFillerTest : public ::testing::Test { + protected: + XavierFillerTest() + : blob_(new Blob(1000, 2, 4, 5)), + filler_param_() { + } + virtual void test_params(FillerParameter_VarianceNorm variance_norm, + Dtype n) { + this->filler_param_.set_variance_norm(variance_norm); + this->filler_.reset(new XavierFiller(this->filler_param_)); + this->filler_->Fill(blob_); + EXPECT_TRUE(this->blob_); + const int count = this->blob_->count(); + const Dtype* data = this->blob_->cpu_data(); + Dtype mean = 0.; + Dtype ex2 = 0.; + for (int i = 0; i < count; ++i) { + mean += data[i]; + ex2 += data[i] * data[i]; + } + mean /= count; + ex2 /= count; + Dtype std = sqrt(ex2 - mean*mean); + Dtype target_std = sqrt(2.0 / n); + EXPECT_NEAR(mean, 0.0, 0.1); + EXPECT_NEAR(std, target_std, 0.1); + } + virtual ~XavierFillerTest() { delete blob_; } + Blob* const blob_; + FillerParameter filler_param_; + shared_ptr > filler_; +}; + +TYPED_TEST_CASE(XavierFillerTest, TestDtypes); + +TYPED_TEST(XavierFillerTest, TestFillFanIn) { + TypeParam n = 2*4*5; + this->test_params(FillerParameter_VarianceNorm_FAN_IN, n); +} +TYPED_TEST(XavierFillerTest, TestFillFanOut) { + TypeParam n = 1000*4*5; + this->test_params(FillerParameter_VarianceNorm_FAN_OUT, n); +} +TYPED_TEST(XavierFillerTest, TestFillAverage) { + TypeParam n = (2*4*5 + 1000*4*5) / 2.0; + this->test_params(FillerParameter_VarianceNorm_AVERAGE, n); +} + +template +class MSRAFillerTest : public ::testing::Test { + protected: + MSRAFillerTest() + : blob_(new Blob(1000, 2, 4, 5)), + filler_param_() { + } + virtual void test_params(FillerParameter_VarianceNorm variance_norm, + Dtype n) { + this->filler_param_.set_variance_norm(variance_norm); + this->filler_.reset(new MSRAFiller(this->filler_param_)); + this->filler_->Fill(blob_); + EXPECT_TRUE(this->blob_); + const int count = this->blob_->count(); + const Dtype* data = this->blob_->cpu_data(); + Dtype mean = 0.; + Dtype ex2 = 0.; + for (int i = 0; i < count; ++i) { + mean += data[i]; + ex2 += data[i] * data[i]; + } + mean /= count; + ex2 /= count; + Dtype std = sqrt(ex2 - mean*mean); + Dtype target_std = sqrt(2.0 / n); + EXPECT_NEAR(mean, 0.0, 0.1); + EXPECT_NEAR(std, target_std, 0.1); + } + virtual ~MSRAFillerTest() { delete blob_; } + Blob* const blob_; + FillerParameter filler_param_; + shared_ptr > filler_; +}; + +TYPED_TEST_CASE(MSRAFillerTest, TestDtypes); + +TYPED_TEST(MSRAFillerTest, TestFillFanIn) { + TypeParam n = 2*4*5; + this->test_params(FillerParameter_VarianceNorm_FAN_IN, n); +} +TYPED_TEST(MSRAFillerTest, TestFillFanOut) { + TypeParam n = 1000*4*5; + this->test_params(FillerParameter_VarianceNorm_FAN_OUT, n); +} +TYPED_TEST(MSRAFillerTest, TestFillAverage) { + TypeParam n = (2*4*5 + 1000*4*5) / 2.0; + this->test_params(FillerParameter_VarianceNorm_AVERAGE, n); +} + } // namespace caffe diff --git a/src/caffe/test/test_flatten_layer.cpp b/src/caffe/test/test_flatten_layer.cpp index 3042d293cf7..7b6757cba32 100644 --- a/src/caffe/test/test_flatten_layer.cpp +++ b/src/caffe/test/test_flatten_layer.cpp @@ -42,13 +42,48 @@ TYPED_TEST(FlattenLayerTest, TestSetup) { LayerParameter layer_param; FlattenLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - EXPECT_EQ(this->blob_top_->num(), 2); - EXPECT_EQ(this->blob_top_->channels(), 3 * 6 * 5); - EXPECT_EQ(this->blob_top_->height(), 1); - EXPECT_EQ(this->blob_top_->width(), 1); + ASSERT_EQ(this->blob_top_->num_axes(), 2); + EXPECT_EQ(this->blob_top_->shape(0), 2); + EXPECT_EQ(this->blob_top_->shape(1), 3 * 6 * 5); } -TYPED_TEST(FlattenLayerTest, Test) { +TYPED_TEST(FlattenLayerTest, TestSetupWithAxis) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_flatten_param()->set_axis(2); + FlattenLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(this->blob_top_->num_axes(), 3); + EXPECT_EQ(this->blob_top_->shape(0), 2); + EXPECT_EQ(this->blob_top_->shape(1), 3); + EXPECT_EQ(this->blob_top_->shape(2), 6 * 5); +} + +TYPED_TEST(FlattenLayerTest, TestSetupWithEndAxis) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_flatten_param()->set_end_axis(-2); + FlattenLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(this->blob_top_->num_axes(), 3); + EXPECT_EQ(this->blob_top_->shape(0), 2); + EXPECT_EQ(this->blob_top_->shape(1), 3 * 6); + EXPECT_EQ(this->blob_top_->shape(2), 5); +} + +TYPED_TEST(FlattenLayerTest, TestSetupWithStartAndEndAxis) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_flatten_param()->set_axis(0); + layer_param.mutable_flatten_param()->set_end_axis(-2); + FlattenLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(this->blob_top_->num_axes(), 2); + EXPECT_EQ(this->blob_top_->shape(0), 2 * 3 * 6); + EXPECT_EQ(this->blob_top_->shape(1), 5); +} + +TYPED_TEST(FlattenLayerTest, TestForward) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; FlattenLayer layer(layer_param); @@ -71,5 +106,4 @@ TYPED_TEST(FlattenLayerTest, TestGradient) { this->blob_top_vec_); } - } // namespace caffe diff --git a/src/caffe/test/test_gradient_based_solver.cpp b/src/caffe/test/test_gradient_based_solver.cpp index 2a7dd90f0f5..c9135d64e70 100644 --- a/src/caffe/test/test_gradient_based_solver.cpp +++ b/src/caffe/test/test_gradient_based_solver.cpp @@ -8,10 +8,8 @@ #include "gtest/gtest.h" #include "caffe/common.hpp" -#include "caffe/parallel.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/solver.hpp" -#include "caffe/util/io.hpp" #include "caffe/test/test_caffe_main.hpp" @@ -25,27 +23,12 @@ class GradientBasedSolverTest : public MultiDeviceTest { protected: GradientBasedSolverTest() : - seed_(1701), num_(4), channels_(3), height_(10), width_(10), - share_(false) { - input_file_ = new string( - CMAKE_SOURCE_DIR "caffe/test/test_data/solver_data_list.txt" CMAKE_EXT); - } - ~GradientBasedSolverTest() { - delete input_file_; - } + seed_(1701), num_(4), channels_(3), height_(10), width_(10) {} - string snapshot_prefix_; shared_ptr > solver_; - shared_ptr > sync_; int seed_; - // Dimensions are determined by generate_sample_data.py - // TODO this is brittle and the hdf5 file should be checked instead. int num_, channels_, height_, width_; - bool share_; - Dtype delta_; // Stability constant for RMSProp, AdaGrad, AdaDelta and Adam - - // Test data: check out generate_sample_data.py in the same directory. - string* input_file_; + Dtype delta_; // Stability constant for AdaGrad. virtual SolverParameter_SolverType solver_type() = 0; virtual void InitSolver(const SolverParameter& param) = 0; @@ -53,6 +36,9 @@ class GradientBasedSolverTest : public MultiDeviceTest { virtual void InitSolverFromProtoString(const string& proto) { SolverParameter param; CHECK(google::protobuf::TextFormat::ParseFromString(proto, ¶m)); + // Disable saving a final snapshot so the tests don't pollute the user's + // working directory with useless snapshots. + param.set_snapshot_after_train(false); // Set the solver_mode according to current Caffe::mode. switch (Caffe::mode()) { case Caffe::CPU: @@ -65,58 +51,47 @@ class GradientBasedSolverTest : public MultiDeviceTest { LOG(FATAL) << "Unknown Caffe mode: " << Caffe::mode(); } InitSolver(param); - delta_ = param.delta(); + delta_ = (solver_type() == SolverParameter_SolverType_ADAGRAD) ? + param.delta() : 0; } - string RunLeastSquaresSolver(const Dtype learning_rate, + void RunLeastSquaresSolver(const Dtype learning_rate, const Dtype weight_decay, const Dtype momentum, const int num_iters, - const int iter_size = 1, const int devices = 1, - const bool snapshot = false, const char* from_snapshot = NULL) { + const int iter_size = 1) { ostringstream proto; - int device_id = 0; -#ifndef CPU_ONLY - if (Caffe::mode() == Caffe::GPU) { - CUDA_CHECK(cudaGetDevice(&device_id)); - } -#endif proto << - "snapshot_after_train: " << snapshot << " " "max_iter: " << num_iters << " " "base_lr: " << learning_rate << " " "lr_policy: 'fixed' " "iter_size: " << iter_size << " " - "device_id: " << device_id << " " "net_param { " " name: 'TestNetwork' " " layer { " " name: 'data' " - " type: 'HDF5Data' " - " hdf5_data_param { " - " source: '" << *(this->input_file_) << "' " - " batch_size: " << num_ / iter_size << " " + " type: 'DummyData' " + " dummy_data_param { " + " num: " << num_ / iter_size << " " + " channels: " << channels_ << " " + " height: " << height_ << " " + " width: " << width_ << " " + " channels: 1 " + " height: 1 " + " width: 1 " + " data_filler { " + " type: 'constant' " + " value: 1.0 " + " } " + " data_filler { " + " type: 'gaussian' " + " std: 1.0 " + " } " " } " " top: 'data' " " top: 'targets' " - " } "; - if (share_) { - proto << - " layer { " - " name: 'slice' " - " type: 'Slice' " - " bottom: 'data' " - " top: 'data1' " - " top: 'data2' " - " slice_param { " - " axis: 0 " - " } " - " } "; - } - proto << + " } " " layer { " " name: 'innerprod' " " type: 'InnerProduct' " - " param { name: 'weights' } " - " param { name: 'bias' } " " inner_product_param { " " num_output: 1 " " weight_filler { " @@ -128,42 +103,9 @@ class GradientBasedSolverTest : public MultiDeviceTest { " std: 1.0 " " } " " } " - " bottom: '" << string(share_ ? "data1": "data") << "' " - " top: '" << string(share_ ? "innerprod1": "innerprod") << "' " - " } "; - if (share_) { - proto << - " layer { " - " name: 'innerprod2' " - " type: 'InnerProduct' " - " param { name: 'weights' } " - " param { name: 'bias' } " - " inner_product_param { " - " num_output: 1 " - " weight_filler { " - " type: 'gaussian' " - " std: 1.0 " - " } " - " bias_filler { " - " type: 'gaussian' " - " std: 1.0 " - " } " - " } " - " bottom: 'data2' " - " top: 'innerprod2' " - " } " - " layer { " - " name: 'concat' " - " type: 'Concat' " - " bottom: 'innerprod1' " - " bottom: 'innerprod2' " - " top: 'innerprod' " - " concat_param { " - " axis: 0 " - " } " - " } "; - } - proto << + " bottom: 'data' " + " top: 'innerprod' " + " } " " layer { " " name: 'loss' " " type: 'EuclideanLoss' " @@ -177,46 +119,9 @@ class GradientBasedSolverTest : public MultiDeviceTest { if (momentum != 0) { proto << "momentum: " << momentum << " "; } - MakeTempDir(&snapshot_prefix_); - proto << "snapshot_prefix: '" << snapshot_prefix_ << "/' "; - if (snapshot) { - proto << "snapshot: " << num_iters << " "; - } Caffe::set_random_seed(this->seed_); this->InitSolverFromProtoString(proto.str()); - if (from_snapshot != NULL) { - this->solver_->Restore(from_snapshot); - vector*> empty_bottom_vec; - for (int i = 0; i < this->solver_->iter(); ++i) { - this->solver_->net()->Forward(empty_bottom_vec); - } - } - if (devices == 1) { - this->solver_->Solve(); - } else { - LOG(INFO) << "Multi-GPU test on " << devices << " devices"; - vector gpus; - // put current device at the beginning - int device_id = solver_->param().device_id(); - gpus.push_back(device_id); - for (int i = 0; gpus.size() < devices; ++i) { - if (i != device_id) - gpus.push_back(i); - } - Caffe::set_solver_count(gpus.size()); - this->sync_.reset(new P2PSync( - this->solver_, NULL, this->solver_->param())); - this->sync_->run(gpus); - Caffe::set_solver_count(1); - } - if (snapshot) { - ostringstream resume_file; - resume_file << snapshot_prefix_ << "/_iter_" << num_iters - << ".solverstate"; - string resume_filename = resume_file.str(); - return resume_filename; - } - return string(); + this->solver_->Solve(); } // Compute an update value given the current state of the train net, @@ -224,7 +129,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { // updated_params will store the updated weight and bias results, // using the blobs' diffs to hold the update values themselves. void ComputeLeastSquaresUpdate(const Dtype learning_rate, - const Dtype weight_decay, const Dtype momentum, const int num_iters, + const Dtype weight_decay, const Dtype momentum, vector > >* updated_params) { const int N = num_; const int D = channels_ * height_ * width_; @@ -290,12 +195,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { ((i == D) ? bias.cpu_data()[0] : weights.cpu_data()[i]); // Finally, compute update. const vector > >& history = solver_->history(); - if (solver_type() != SolverParameter_SolverType_ADADELTA - && solver_type() != SolverParameter_SolverType_ADAM) { - ASSERT_EQ(2, history.size()); // 1 blob for weights, 1 for bias - } else { - ASSERT_EQ(4, history.size()); // additional blobs for update history - } + ASSERT_EQ(2, history.size()); // 1 blob for weights, 1 for bias Dtype update_value = learning_rate * grad; const Dtype history_value = (i == D) ? history[1]->cpu_data()[0] : history[0]->cpu_data()[i]; @@ -312,40 +212,6 @@ class GradientBasedSolverTest : public MultiDeviceTest { case SolverParameter_SolverType_ADAGRAD: update_value /= std::sqrt(history_value + grad * grad) + delta_; break; - case SolverParameter_SolverType_RMSPROP: { - const Dtype rms_decay = 0.95; - update_value /= std::sqrt(rms_decay*history_value - + grad * grad * (1 - rms_decay)) + delta_; - } - break; - case SolverParameter_SolverType_ADADELTA: - { - const Dtype update_history_value = (i == D) ? - history[1 + num_param_blobs]->cpu_data()[0] : - history[0 + num_param_blobs]->cpu_data()[i]; - const Dtype weighted_gradient_average = - momentum * history_value + (1 - momentum) * (grad * grad); - update_value = grad * std::sqrt((update_history_value + delta_) / - (weighted_gradient_average + delta_)) * learning_rate; - // not actually needed, just here for illustrative purposes - // const Dtype weighted_update_average = - // momentum * update_history_value + (1 - momentum) * (update_value); - break; - } - case SolverParameter_SolverType_ADAM: { - const Dtype momentum2 = 0.999; - const Dtype m = history_value; - const Dtype v = (i == D) ? - history[1 + num_param_blobs]->cpu_data()[0] : - history[0 + num_param_blobs]->cpu_data()[i]; - const Dtype val_m = (1 - momentum) * grad + momentum * m; - const Dtype val_v = (1 - momentum2) * grad * grad + momentum2 * v; - Dtype alpha_t = learning_rate * - std::sqrt(Dtype(1) - pow(momentum2, num_iters)) / - (Dtype(1.) - pow(momentum, num_iters)); - update_value = alpha_t * val_m / (std::sqrt(val_v) + delta_); - break; - } default: LOG(FATAL) << "Unknown solver type: " << solver_type(); } @@ -410,6 +276,45 @@ class GradientBasedSolverTest : public MultiDeviceTest { } } + void CheckAccumulation(const Dtype kLearningRate, const Dtype kWeightDecay, + const Dtype kMomentum, const int kNumIters, const int kIterSize) { + const double kPrecision = 1e-2; + const double kMinPrecision = 1e-7; + // Solve without accumulation and save parameters. + this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, + kNumIters); + // Save parameters for comparison. + Net& net = *this->solver_->net(); + const vector > >& param_blobs = + net.layer_by_name("innerprod")->blobs(); + vector > > noaccum_params(param_blobs.size()); + for (int i = 0; i < param_blobs.size(); ++i) { + noaccum_params[i].reset(new Blob()); + noaccum_params[i]->CopyFrom(*param_blobs[i], false, true); + } + // Solve by equivalent accumulation of gradients over divided batches. + this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, + kNumIters, kIterSize); + Net& net_accum = *this->solver_->net(); + const vector > >& accum_params = + net_accum.layer_by_name("innerprod")->blobs(); + // Compare accumulated parameters against no accumulation standard. + const int D = this->channels_ * this->height_ * this->width_; + for (int i = 0; i < D; ++i) { + const Dtype expected_param = noaccum_params[0]->cpu_data()[i]; + const Dtype accum_param = accum_params[0]->cpu_data()[i]; + const Dtype error_margin = std::max(kMinPrecision, kPrecision * + std::min(fabs(expected_param), fabs(accum_param))); + EXPECT_NEAR(expected_param, accum_param, error_margin); + } + ASSERT_EQ(1, accum_params[1]->count()); + const Dtype expected_bias = noaccum_params[1]->cpu_data()[0]; + const Dtype accum_bias = accum_params[1]->cpu_data()[0]; + const Dtype error_margin = std::max(kMinPrecision, kPrecision * + std::min(fabs(expected_bias), fabs(accum_bias))); + EXPECT_NEAR(expected_bias, accum_bias, error_margin); + } + // Test that the correct update is computed for a regularized least squares // problem: // @@ -428,108 +333,20 @@ class GradientBasedSolverTest : public MultiDeviceTest { void TestLeastSquaresUpdate(const Dtype learning_rate = 1.0, const Dtype weight_decay = 0.0, const Dtype momentum = 0.0, const int iter_to_check = 0) { - const int kNum = num_; - const int kIterSize = 1; - // Test over all numbers of devices. - int available_devices = 1; -#ifndef CPU_ONLY - if (Caffe::mode() == Caffe::GPU) { - CUDA_CHECK(cudaGetDeviceCount(&available_devices)); - } -#endif - for (int devices = 1; devices <= available_devices; ++devices) { - // Configure batch size for single / multi device equivalence. - // Constant data is needed for multi device as for accumulation. - num_ = kNum * devices; - - // Initialize the solver and run K (= iter_to_check) solver iterations - // (on single device). - RunLeastSquaresSolver(learning_rate, weight_decay, momentum, - iter_to_check, kIterSize, 1); - - // Compute the (K+1)th update using the analytic least squares gradient. - vector > > updated_params; - ComputeLeastSquaresUpdate(learning_rate, weight_decay, momentum, - iter_to_check + 1, &updated_params); - - // Reinitialize the solver and run K+1 solver iterations. - num_ = kNum; - RunLeastSquaresSolver(learning_rate, weight_decay, momentum, - iter_to_check + 1, kIterSize, devices); - - // Check that the solver's solution matches ours. - CheckLeastSquaresUpdate(updated_params); - } - } - - void TestSnapshot(const Dtype learning_rate = 1.0, - const Dtype weight_decay = 0.0, const Dtype momentum = 0.0, - const int num_iters = 1) { - // Run the solver for num_iters * 2 iterations. - const int total_num_iters = num_iters * 2; - bool snapshot = false; - const int kIterSize = 1; - const int kDevices = 1; - RunLeastSquaresSolver(learning_rate, weight_decay, momentum, - total_num_iters, kIterSize, kDevices, snapshot); + // Initialize the solver and run K (= iter_to_check) solver iterations. + RunLeastSquaresSolver(learning_rate, weight_decay, momentum, iter_to_check); - // Save the resulting param values. - vector > > param_copies; - const vector*>& orig_params = - solver_->net()->learnable_params(); - param_copies.resize(orig_params.size()); - for (int i = 0; i < orig_params.size(); ++i) { - param_copies[i].reset(new Blob()); - const bool kReshape = true; - for (int copy_diff = false; copy_diff <= true; ++copy_diff) { - param_copies[i]->CopyFrom(*orig_params[i], copy_diff, kReshape); - } - } + // Compute the (K+1)th update using the analytic least squares gradient. + vector > > updated_params; + ComputeLeastSquaresUpdate(learning_rate, weight_decay, momentum, + &updated_params); - // Save the solver history - vector > > history_copies; - const vector > >& orig_history = solver_->history(); - history_copies.resize(orig_history.size()); - for (int i = 0; i < orig_history.size(); ++i) { - history_copies[i].reset(new Blob()); - const bool kReshape = true; - for (int copy_diff = false; copy_diff <= true; ++copy_diff) { - history_copies[i]->CopyFrom(*orig_history[i], copy_diff, kReshape); - } - } - - // Run the solver for num_iters iterations and snapshot. - snapshot = true; - string snapshot_name = RunLeastSquaresSolver(learning_rate, weight_decay, - momentum, num_iters, kIterSize, kDevices, snapshot); - - // Reinitialize the solver and run for num_iters more iterations. - snapshot = false; + // Reinitialize the solver and run K+1 solver iterations. RunLeastSquaresSolver(learning_rate, weight_decay, momentum, - total_num_iters, kIterSize, kDevices, - snapshot, snapshot_name.c_str()); + iter_to_check + 1); - // Check that params now match. - const vector*>& params = solver_->net()->learnable_params(); - for (int i = 0; i < params.size(); ++i) { - for (int j = 0; j < params[i]->count(); ++j) { - EXPECT_EQ(param_copies[i]->cpu_data()[j], params[i]->cpu_data()[j]) - << "param " << i << " data differed at dim " << j; - EXPECT_EQ(param_copies[i]->cpu_diff()[j], params[i]->cpu_diff()[j]) - << "param " << i << " diff differed at dim " << j; - } - } - - // Check that history now matches. - const vector > >& history = solver_->history(); - for (int i = 0; i < history.size(); ++i) { - for (int j = 0; j < history[i]->count(); ++j) { - EXPECT_EQ(history_copies[i]->cpu_data()[j], history[i]->cpu_data()[j]) - << "history blob " << i << " data differed at dim " << j; - EXPECT_EQ(history_copies[i]->cpu_diff()[j], history[i]->cpu_diff()[j]) - << "history blob " << i << " diff differed at dim " << j; - } - } + // Check that the solver's solution matches ours. + CheckLeastSquaresUpdate(updated_params); } }; @@ -554,38 +371,23 @@ TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdate) { this->TestLeastSquaresUpdate(); } -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateLROneHundredth) { +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateLROneTenth) { typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; + const Dtype kLearningRate = 0.1; this->TestLeastSquaresUpdate(kLearningRate); } TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecay) { typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 1; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecayMultiIter) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; + const Dtype kLearningRate = 1.0; const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); } TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentum) { typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0; + const Dtype kLearningRate = 1.0; + const Dtype kWeightDecay = 0.0; const Dtype kMomentum = 0.5; const int kNumIters = 1; for (int i = 0; i <= kNumIters; ++i) { @@ -595,8 +397,8 @@ TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentum) { TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0; + const Dtype kLearningRate = 1.0; + const Dtype kWeightDecay = 0.0; const Dtype kMomentum = 0.5; const int kNumIters = 4; for (int i = 0; i <= kNumIters; ++i) { @@ -607,21 +409,9 @@ TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverything) { typedef typename TypeParam::Dtype Dtype; const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.5; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.5; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.9; const int kNumIters = 4; - this->share_ = true; for (int i = 0; i <= kNumIters; ++i) { this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); } @@ -630,50 +420,14 @@ TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingShare) { TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { typedef typename TypeParam::Dtype Dtype; const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; + const Dtype kWeightDecay = 0.1; const Dtype kMomentum = 0.9; const int kNumIters = 4; const int kIterSize = 2; - this->share_ = true; this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, kIterSize); } -TYPED_TEST(SGDSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(SGDSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - - template class AdaGradSolverTest : public GradientBasedSolverTest { typedef typename TypeParam::Dtype Dtype; @@ -693,15 +447,15 @@ TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdate) { this->TestLeastSquaresUpdate(); } -TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateLROneHundredth) { +TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateLROneTenth) { typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; + const Dtype kLearningRate = 0.1; this->TestLeastSquaresUpdate(kLearningRate); } TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithWeightDecay) { typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; + const Dtype kLearningRate = 1.0; const Dtype kWeightDecay = 0.5; this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); } @@ -709,22 +463,9 @@ TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithWeightDecay) { TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithEverything) { typedef typename TypeParam::Dtype Dtype; const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdaGradSolverTest, - TestAdaGradLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.0; const int kNumIters = 4; - this->share_ = true; for (int i = 0; i <= kNumIters; ++i) { this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); } @@ -733,50 +474,14 @@ TYPED_TEST(AdaGradSolverTest, TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { typedef typename TypeParam::Dtype Dtype; const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.0; const int kNumIters = 4; const int kIterSize = 2; - this->share_ = true; this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, kIterSize); } -TYPED_TEST(AdaGradSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdaGradSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - - template class NesterovSolverTest : public GradientBasedSolverTest { typedef typename TypeParam::Dtype Dtype; @@ -796,35 +501,23 @@ TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdate) { this->TestLeastSquaresUpdate(); } -TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateLROneHundredth) { +TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateLROneTenth) { typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; + const Dtype kLearningRate = 0.1; this->TestLeastSquaresUpdate(kLearningRate); } TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithWeightDecay) { typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; + const Dtype kLearningRate = 1.0; const Dtype kWeightDecay = 0.5; this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); } -TYPED_TEST(NesterovSolverTest, - TestNesterovLeastSquaresUpdateWithWeightDecayMultiIter) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithMomentum) { typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0; + const Dtype kLearningRate = 1.0; + const Dtype kWeightDecay = 0.0; const Dtype kMomentum = 0.5; const int kNumIters = 1; for (int i = 0; i <= kNumIters; ++i) { @@ -834,8 +527,8 @@ TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithMomentum) { TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0; + const Dtype kLearningRate = 1.0; + const Dtype kWeightDecay = 0.0; const Dtype kMomentum = 0.5; const int kNumIters = 4; for (int i = 0; i <= kNumIters; ++i) { @@ -846,22 +539,9 @@ TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithEverything) { typedef typename TypeParam::Dtype Dtype; const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(NesterovSolverTest, - TestNesterovLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; + const Dtype kWeightDecay = 0.1; const Dtype kMomentum = 0.9; const int kNumIters = 4; - this->share_ = true; for (int i = 0; i <= kNumIters; ++i) { this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); } @@ -870,245 +550,7 @@ TYPED_TEST(NesterovSolverTest, TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { typedef typename TypeParam::Dtype Dtype; const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->share_ = true; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(NesterovSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(NesterovSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -template -class AdaDeltaSolverTest : public GradientBasedSolverTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - virtual void InitSolver(const SolverParameter& param) { - this->solver_.reset(new AdaDeltaSolver(param)); - } - - virtual SolverParameter_SolverType solver_type() { - return SolverParameter_SolverType_ADADELTA; - } -}; - -TYPED_TEST_CASE(AdaDeltaSolverTest, TestDtypesAndDevices); - -TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdate) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - this->TestLeastSquaresUpdate(kLearningRate); -} - -TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithWeightDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.95; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); -} - -TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithHalfMomentum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.0; - const Dtype kMomentum = 0.5; - const int kNumIters = 1; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); - } -} - -TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithMomentum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.0; - const Dtype kMomentum = 0.95; - const int kNumIters = 1; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); - } -} - -TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.0; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithEverything) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdaDeltaSolverTest, - TestAdaDeltaLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - this->share_ = true; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - const int kIterSize = 2; - this->share_ = true; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(AdaDeltaSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdaDeltaSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.95; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -template -class AdamSolverTest : public GradientBasedSolverTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - virtual void InitSolver(const SolverParameter& param) { - SolverParameter new_param = param; - const Dtype momentum = 0.9; - new_param.set_momentum(momentum); - const Dtype momentum2 = 0.999; - new_param.set_momentum2(momentum2); - this->solver_.reset(new AdamSolver(new_param)); - } - virtual SolverParameter_SolverType solver_type() { - return SolverParameter_SolverType_ADAM; - } -}; - -TYPED_TEST_CASE(AdamSolverTest, TestDtypesAndDevices); - -TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdate) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0; - const Dtype kMomentum = 0.9; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); -} - -TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithWeightDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); -} - -TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverything) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - this->share_ = true; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; const Dtype kMomentum = 0.9; const int kNumIters = 4; const int kIterSize = 2; @@ -1116,145 +558,4 @@ TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { kIterSize); } -TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->share_ = true; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(AdamSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdamSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -template -class RMSPropSolverTest : public GradientBasedSolverTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - virtual void InitSolver(const SolverParameter& param) { - const Dtype rms_decay = 0.95; - SolverParameter new_param = param; - new_param.set_rms_decay(rms_decay); - this->solver_.reset(new RMSPropSolver(new_param)); - } - virtual SolverParameter_SolverType solver_type() { - return SolverParameter_SolverType_RMSPROP; - } -}; - -TYPED_TEST_CASE(RMSPropSolverTest, TestDtypesAndDevices); - -TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithWeightDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 1.0; - const Dtype kWeightDecay = 0.5; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); -} - -TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithRmsDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.0; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithEverything) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(RMSPropSolverTest, - TestRMSPropLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - this->share_ = true; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - const int kIterSize = 2; - this->share_ = true; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(RMSPropSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(RMSPropSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - } // namespace caffe diff --git a/src/caffe/test/test_im2col_kernel.cu b/src/caffe/test/test_im2col_kernel.cu index ee684c00255..0017ac23e69 100644 --- a/src/caffe/test/test_im2col_kernel.cu +++ b/src/caffe/test/test_im2col_kernel.cu @@ -25,7 +25,7 @@ __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; template -class Im2colKernelTest : public ::testing::Test { +class Im2colKernelTest : public GPUDeviceTest { protected: Im2colKernelTest() // big so launches > 1024 threads @@ -68,8 +68,6 @@ class Im2colKernelTest : public ::testing::Test { TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); TYPED_TEST(Im2colKernelTest, TestGPU) { - Caffe::set_mode(Caffe::GPU); - // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, diff --git a/src/caffe/test/test_math_functions.cpp b/src/caffe/test/test_math_functions.cpp index 667f744bdd7..a095b544e17 100644 --- a/src/caffe/test/test_math_functions.cpp +++ b/src/caffe/test/test_math_functions.cpp @@ -15,8 +15,10 @@ namespace caffe { -template -class MathFunctionsTest : public ::testing::Test { +template +class MathFunctionsTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + protected: MathFunctionsTest() : blob_bottom_(new Blob()), @@ -64,14 +66,19 @@ class MathFunctionsTest : public ::testing::Test { Blob* const blob_top_; }; -TYPED_TEST_CASE(MathFunctionsTest, TestDtypes); +template +class CPUMathFunctionsTest + : public MathFunctionsTest > { +}; + +TYPED_TEST_CASE(CPUMathFunctionsTest, TestDtypes); -TYPED_TEST(MathFunctionsTest, TestNothing) { +TYPED_TEST(CPUMathFunctionsTest, TestNothing) { // The first test case of a test suite takes the longest time // due to the set up overhead. } -TYPED_TEST(MathFunctionsTest, TestHammingDistanceCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestHammingDistance) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); const TypeParam* y = this->blob_top_->cpu_data(); @@ -79,7 +86,7 @@ TYPED_TEST(MathFunctionsTest, TestHammingDistanceCPU) { caffe_cpu_hamming_distance(n, x, y)); } -TYPED_TEST(MathFunctionsTest, TestAsumCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestAsum) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); TypeParam std_asum = 0; @@ -90,7 +97,7 @@ TYPED_TEST(MathFunctionsTest, TestAsumCPU) { EXPECT_LT((cpu_asum - std_asum) / std_asum, 1e-2); } -TYPED_TEST(MathFunctionsTest, TestSignCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestSign) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_cpu_sign(n, x, this->blob_bottom_->mutable_cpu_diff()); @@ -100,7 +107,7 @@ TYPED_TEST(MathFunctionsTest, TestSignCPU) { } } -TYPED_TEST(MathFunctionsTest, TestSgnbitCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestSgnbit) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_cpu_sgnbit(n, x, this->blob_bottom_->mutable_cpu_diff()); @@ -110,7 +117,7 @@ TYPED_TEST(MathFunctionsTest, TestSgnbitCPU) { } } -TYPED_TEST(MathFunctionsTest, TestFabsCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestFabs) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); caffe_abs(n, x, this->blob_bottom_->mutable_cpu_diff()); @@ -120,7 +127,7 @@ TYPED_TEST(MathFunctionsTest, TestFabsCPU) { } } -TYPED_TEST(MathFunctionsTest, TestScaleCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestScale) { int n = this->blob_bottom_->count(); TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % this->blob_bottom_->count()]; @@ -133,11 +140,10 @@ TYPED_TEST(MathFunctionsTest, TestScaleCPU) { } } -TYPED_TEST(MathFunctionsTest, TestCopyCPU) { +TYPED_TEST(CPUMathFunctionsTest, TestCopy) { const int n = this->blob_bottom_->count(); const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); TypeParam* top_data = this->blob_top_->mutable_cpu_data(); - Caffe::set_mode(Caffe::CPU); caffe_copy(n, bottom_data, top_data); for (int i = 0; i < n; ++i) { EXPECT_EQ(bottom_data[i], top_data[i]); @@ -146,8 +152,14 @@ TYPED_TEST(MathFunctionsTest, TestCopyCPU) { #ifndef CPU_ONLY +template +class GPUMathFunctionsTest : public MathFunctionsTest > { +}; + +TYPED_TEST_CASE(GPUMathFunctionsTest, TestDtypes); + // TODO: Fix caffe_gpu_hamming_distance and re-enable this test. -TYPED_TEST(MathFunctionsTest, DISABLED_TestHammingDistanceGPU) { +TYPED_TEST(GPUMathFunctionsTest, DISABLED_TestHammingDistance) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); const TypeParam* y = this->blob_top_->cpu_data(); @@ -158,7 +170,7 @@ TYPED_TEST(MathFunctionsTest, DISABLED_TestHammingDistanceGPU) { EXPECT_EQ(reference_distance, computed_distance); } -TYPED_TEST(MathFunctionsTest, TestAsumGPU) { +TYPED_TEST(GPUMathFunctionsTest, TestAsum) { int n = this->blob_bottom_->count(); const TypeParam* x = this->blob_bottom_->cpu_data(); TypeParam std_asum = 0; @@ -170,7 +182,7 @@ TYPED_TEST(MathFunctionsTest, TestAsumGPU) { EXPECT_LT((gpu_asum - std_asum) / std_asum, 1e-2); } -TYPED_TEST(MathFunctionsTest, TestSignGPU) { +TYPED_TEST(GPUMathFunctionsTest, TestSign) { int n = this->blob_bottom_->count(); caffe_gpu_sign(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); @@ -181,7 +193,7 @@ TYPED_TEST(MathFunctionsTest, TestSignGPU) { } } -TYPED_TEST(MathFunctionsTest, TestSgnbitGPU) { +TYPED_TEST(GPUMathFunctionsTest, TestSgnbit) { int n = this->blob_bottom_->count(); caffe_gpu_sgnbit(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); @@ -192,7 +204,7 @@ TYPED_TEST(MathFunctionsTest, TestSgnbitGPU) { } } -TYPED_TEST(MathFunctionsTest, TestFabsGPU) { +TYPED_TEST(GPUMathFunctionsTest, TestFabs) { int n = this->blob_bottom_->count(); caffe_gpu_abs(n, this->blob_bottom_->gpu_data(), this->blob_bottom_->mutable_gpu_diff()); @@ -203,7 +215,7 @@ TYPED_TEST(MathFunctionsTest, TestFabsGPU) { } } -TYPED_TEST(MathFunctionsTest, TestScaleGPU) { +TYPED_TEST(GPUMathFunctionsTest, TestScale) { int n = this->blob_bottom_->count(); TypeParam alpha = this->blob_bottom_->cpu_diff()[caffe_rng_rand() % this->blob_bottom_->count()]; @@ -216,11 +228,10 @@ TYPED_TEST(MathFunctionsTest, TestScaleGPU) { } } -TYPED_TEST(MathFunctionsTest, TestCopyGPU) { +TYPED_TEST(GPUMathFunctionsTest, TestCopy) { const int n = this->blob_bottom_->count(); const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); TypeParam* top_data = this->blob_top_->mutable_gpu_data(); - Caffe::set_mode(Caffe::GPU); caffe_copy(n, bottom_data, top_data); bottom_data = this->blob_bottom_->cpu_data(); top_data = this->blob_top_->mutable_cpu_data(); diff --git a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp index 9038017e3e2..b2db984feb1 100644 --- a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp +++ b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp @@ -16,7 +16,7 @@ namespace caffe { template -class MultinomialLogisticLossLayerTest : public ::testing::Test { +class MultinomialLogisticLossLayerTest : public CPUDeviceTest { protected: MultinomialLogisticLossLayerTest() : blob_bottom_data_(new Blob(10, 5, 1, 1)), @@ -51,7 +51,6 @@ TYPED_TEST_CASE(MultinomialLogisticLossLayerTest, TestDtypes); TYPED_TEST(MultinomialLogisticLossLayerTest, TestGradientCPU) { LayerParameter layer_param; - Caffe::set_mode(Caffe::CPU); MultinomialLogisticLossLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); GradientChecker checker(1e-2, 2*1e-2, 1701, 0, 0.05); diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index b42b2c8ac45..32abc893559 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -617,8 +617,11 @@ class NetTest : public MultiDeviceTest { <<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD ======= >>>>>>> 011aef0... restore +======= +>>>>>>> 80a07dd... macro define in upgrade_proto virtual void InitSkipPropNet(bool test_skip_true) { string proto = "name: 'SkipPropTestNetwork' " @@ -709,10 +712,13 @@ class NetTest : public MultiDeviceTest { <<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD ======= >>>>>>> 00341b2... triplet data generation and network update ======= >>>>>>> 1882ac9... add initiate class name of triplet loss layer +======= +>>>>>>> 08d5d6d... macro define in upgrade_proto proto += " propagate_down: true " " propagate_down: false "; else @@ -735,10 +741,18 @@ class NetTest : public MultiDeviceTest { " propagate_down: true "; >>>>>>> 98fb438... fixed two bugs with prototext format <<<<<<< HEAD +<<<<<<< HEAD ======= >>>>>>> 00341b2... triplet data generation and network update ======= >>>>>>> 1882ac9... add initiate class name of triplet loss layer +======= +======= + proto += " propagate_down: [true, false] "; + else + proto += " propagate_down: [true, true] "; +>>>>>>> 80a07dd... macro define in upgrade_proto +>>>>>>> 08d5d6d... macro define in upgrade_proto proto += " top: 'cross_entropy_loss' " " type: 'SigmoidCrossEntropyLoss' " @@ -747,6 +761,7 @@ class NetTest : public MultiDeviceTest { InitNetFromProtoString(proto); } +<<<<<<< HEAD <<<<<<< HEAD ======= >>>>>>> 083f61b... New triplet loss layer added(beta1 version-no test source files) @@ -754,6 +769,8 @@ class NetTest : public MultiDeviceTest { >>>>>>> 011aef0... restore ======= >>>>>>> 4d8130b... New triplet loss layer added(beta1 version-no test source files) +======= +>>>>>>> 80a07dd... macro define in upgrade_proto int seed_; shared_ptr > net_; }; @@ -2358,4 +2375,52 @@ TYPED_TEST(NetTest, TestReshape) { } } +TYPED_TEST(NetTest, TestSkipPropagateDown) { + // check bottom_need_backward if propagate_down is true + this->InitSkipPropNet(false); + vector vec_layer_need_backward = this->net_->layer_need_backward(); + for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { + string layer_name = this->net_->layer_names()[layer_id]; + if (layer_name == "loss") { + // access to bottom_need_backward coresponding to label's blob + bool need_back = this->net_->bottom_need_backward()[layer_id][1]; + // if propagate_down is true, the loss layer will try to + // backpropagate on labels + EXPECT_TRUE(need_back) << "bottom_need_backward should be True"; + } + // layer_need_backward should be True except for data and silence layers + if (layer_name.find("data") != std::string::npos || + layer_name == "silence") { + EXPECT_FALSE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be False"; + } else { + EXPECT_TRUE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be True"; + } + } + // check bottom_need_backward if propagat_down is false + this->InitSkipPropNet(true); + vec_layer_need_backward.clear(); + vec_layer_need_backward = this->net_->layer_need_backward(); + for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { + string layer_name = this->net_->layer_names()[layer_id]; + if (layer_name == "loss") { + // access to bottom_need_backward coresponding to label's blob + bool need_back = this->net_->bottom_need_backward()[layer_id][1]; + // if propagate_down is false, the loss layer will not try to + // backpropagate on labels + EXPECT_FALSE(need_back) << "bottom_need_backward should be False"; + } + // layer_need_backward should be False except for innerproduct and + // loss layers + if (layer_name == "innerproduct" || layer_name == "loss") { + EXPECT_TRUE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be True"; + } else { + EXPECT_FALSE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be False"; + } + } +} + } // namespace caffe diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index c9d52f247a6..c6e4d27b903 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -117,6 +117,49 @@ class NeuronLayerTest : public MultiDeviceTest { + slope_data[c] * std::min(bottom_data[i], (Dtype)(0))); } } + + void LogBottomInit() { + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + Dtype* bottom_data = this->blob_bottom_->mutable_cpu_data(); + caffe_exp(this->blob_bottom_->count(), bottom_data, bottom_data); + } + + void TestLogForward(const float base, const float scale, const float shift) { + LogBottomInit(); + LayerParameter layer_param; + layer_param.mutable_log_param()->set_base(base); + layer_param.mutable_log_param()->set_scale(scale); + layer_param.mutable_log_param()->set_shift(shift); + LogLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + layer.Forward(blob_bottom_vec_, blob_top_vec_); + const Dtype kDelta = 2e-4; + const Dtype* bottom_data = blob_bottom_->cpu_data(); + const Dtype* top_data = blob_top_->cpu_data(); + for (int i = 0; i < blob_bottom_->count(); ++i) { + const Dtype bottom_val = bottom_data[i]; + const Dtype top_val = top_data[i]; + if (base == -1) { + EXPECT_NEAR(top_val, log(shift + scale * bottom_val), kDelta); + } else { + EXPECT_NEAR(top_val, log(shift + scale * bottom_val) / log(base), + kDelta); + } + } + } + + void TestLogGradient(const float base, const float scale, const float shift) { + LogBottomInit(); + LayerParameter layer_param; + layer_param.mutable_log_param()->set_base(base); + layer_param.mutable_log_param()->set_scale(scale); + layer_param.mutable_log_param()->set_shift(shift); + LogLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientEltwise(&layer, blob_bottom_vec_, blob_top_vec_); + } }; TYPED_TEST_CASE(NeuronLayerTest, TestDtypesAndDevices); @@ -339,6 +382,88 @@ TYPED_TEST(NeuronLayerTest, TestExpGradientBase2Shift1Scale3) { this->TestExpGradient(kBase, kScale, kShift); } +TYPED_TEST(NeuronLayerTest, TestLogLayer) { + typedef typename TypeParam::Dtype Dtype; + // Test default base of "-1" -- should actually set base := e. + const Dtype kBase = -1; + const Dtype kScale = 1; + const Dtype kShift = 0; + this->TestLogForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogGradient) { + typedef typename TypeParam::Dtype Dtype; + // Test default base of "-1" -- should actually set base := e. + const Dtype kBase = -1; + const Dtype kScale = 1; + const Dtype kShift = 0; + this->TestLogGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogLayerBase2) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 1; + const Dtype kShift = 0; + this->TestLogForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogGradientBase2) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 1; + const Dtype kShift = 0; + this->TestLogGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Shift1) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 1; + const Dtype kShift = 1; + this->TestLogForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Shift1) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 1; + const Dtype kShift = 1; + this->TestLogGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Scale3) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 3; + const Dtype kShift = 0; + this->TestLogForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Scale3) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 3; + const Dtype kShift = 0; + this->TestLogGradient(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogLayerBase2Shift1Scale3) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 3; + const Dtype kShift = 1; + this->TestLogForward(kBase, kScale, kShift); +} + +TYPED_TEST(NeuronLayerTest, TestLogGradientBase2Shift1Scale3) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kBase = 2; + const Dtype kScale = 3; + const Dtype kShift = 1; + this->TestLogGradient(kBase, kScale, kShift); +} + TYPED_TEST(NeuronLayerTest, TestDropoutHalf) { const float kDropoutRatio = 0.5; this->TestDropoutForward(kDropoutRatio); @@ -541,14 +666,10 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { caffe_copy(ip2.blobs()[0]->count(), ip.blobs()[0]->cpu_data(), ip2.blobs()[0]->mutable_cpu_data()); // Forward in-place - ip.Reshape(this->blob_bottom_vec_, this->blob_top_vec_); ip.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - prelu.Reshape(this->blob_top_vec_, this->blob_top_vec_); prelu.Forward(this->blob_top_vec_, this->blob_top_vec_); // Forward non-in-place - ip2.Reshape(blob_bottom_vec_2, blob_middle_vec_2); ip2.Forward(blob_bottom_vec_2, blob_middle_vec_2); - prelu2.Reshape(blob_middle_vec_2, blob_top_vec_2); prelu2.Forward(blob_middle_vec_2, blob_top_vec_2); // Check numbers for (int s = 0; s < blob_top_2->count(); ++s) { @@ -590,7 +711,7 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) { #ifdef USE_CUDNN template -class CuDNNNeuronLayerTest : public ::testing::Test { +class CuDNNNeuronLayerTest : public GPUDeviceTest { protected: CuDNNNeuronLayerTest() : blob_bottom_(new Blob(2, 3, 4, 5)), @@ -613,7 +734,6 @@ class CuDNNNeuronLayerTest : public ::testing::Test { TYPED_TEST_CASE(CuDNNNeuronLayerTest, TestDtypes); TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNReLULayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -628,7 +748,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNReLULayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); @@ -637,7 +756,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CHECK(google::protobuf::TextFormat::ParseFromString( "relu_param { negative_slope: 0.01 }", &layer_param)); @@ -657,7 +775,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CHECK(google::protobuf::TextFormat::ParseFromString( "relu_param { negative_slope: 0.01 }", &layer_param)); @@ -668,7 +785,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSigmoidLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -685,7 +801,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSigmoidLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); @@ -694,7 +809,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNTanHLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -717,7 +831,6 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { } TYPED_TEST(CuDNNNeuronLayerTest, TestTanHGradientCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNTanHLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index e9964e7f0b7..69f2d5c1135 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -608,7 +608,7 @@ TYPED_TEST(PoolingLayerTest, TestGradientAvePadded) { #ifdef USE_CUDNN template -class CuDNNPoolingLayerTest : public ::testing::Test { +class CuDNNPoolingLayerTest : public GPUDeviceTest { protected: CuDNNPoolingLayerTest() : blob_bottom_(new Blob()), @@ -963,7 +963,6 @@ class CuDNNPoolingLayerTest : public ::testing::Test { TYPED_TEST_CASE(CuDNNPoolingLayerTest, TestDtypes); TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -977,7 +976,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -994,7 +992,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { /* TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; layer_param.set_kernelsize(3); layer_param.set_stride(2); @@ -1020,7 +1017,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxCuDNN) { - Caffe::set_mode(Caffe::GPU); this->TestForwardSquare(); this->TestForwardRectHigh(); this->TestForwardRectWide(); @@ -1030,7 +1026,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxCuDNN) { // the corresponding backward test. /* TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxTopMaskCuDNN) { - Caffe::set_mode(Caffe::GPU); this->blob_top_vec_.push_back(this->blob_top_mask_); this->TestForwardSquare(); this->TestForwardRectHigh(); @@ -1039,7 +1034,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxTopMaskCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { - Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1059,7 +1053,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -1105,7 +1098,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { /* TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { - Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1126,7 +1118,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { */ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -1152,7 +1143,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { - Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; @@ -1170,7 +1160,6 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { } TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAvePaddedCuDNN) { - Caffe::set_mode(Caffe::GPU); for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { LayerParameter layer_param; diff --git a/src/caffe/test/test_softmax_layer.cpp b/src/caffe/test/test_softmax_layer.cpp index f6674422e56..996da4b8f7c 100644 --- a/src/caffe/test/test_softmax_layer.cpp +++ b/src/caffe/test/test_softmax_layer.cpp @@ -82,7 +82,7 @@ TYPED_TEST(SoftmaxLayerTest, TestGradient) { #ifdef USE_CUDNN template -class CuDNNSoftmaxLayerTest : public ::testing::Test { +class CuDNNSoftmaxLayerTest : public GPUDeviceTest { protected: CuDNNSoftmaxLayerTest() : blob_bottom_(new Blob(2, 10, 2, 3)), @@ -104,7 +104,6 @@ class CuDNNSoftmaxLayerTest : public ::testing::Test { TYPED_TEST_CASE(CuDNNSoftmaxLayerTest, TestDtypes); TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSoftmaxLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); @@ -138,7 +137,6 @@ TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { } TYPED_TEST(CuDNNSoftmaxLayerTest, TestGradientCuDNN) { - Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSoftmaxLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); diff --git a/src/caffe/test/test_stochastic_pooling.cpp b/src/caffe/test/test_stochastic_pooling.cpp index 12962c65d85..f84464c322c 100644 --- a/src/caffe/test/test_stochastic_pooling.cpp +++ b/src/caffe/test/test_stochastic_pooling.cpp @@ -16,8 +16,10 @@ using std::min; namespace caffe { -template -class StochasticPoolingLayerTest : public ::testing::Test { +template +class StochasticPoolingLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + protected: StochasticPoolingLayerTest() : blob_bottom_(new Blob()), @@ -45,9 +47,14 @@ class StochasticPoolingLayerTest : public ::testing::Test { vector*> blob_top_vec_; }; -TYPED_TEST_CASE(StochasticPoolingLayerTest, TestDtypes); +template +class CPUStochasticPoolingLayerTest + : public StochasticPoolingLayerTest > { +}; + +TYPED_TEST_CASE(CPUStochasticPoolingLayerTest, TestDtypes); -TYPED_TEST(StochasticPoolingLayerTest, TestSetup) { +TYPED_TEST(CPUStochasticPoolingLayerTest, TestSetup) { LayerParameter layer_param; PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); pooling_param->set_kernel_size(3); @@ -60,8 +67,16 @@ TYPED_TEST(StochasticPoolingLayerTest, TestSetup) { EXPECT_EQ(this->blob_top_->width(), 2); } -TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPU) { - Caffe::set_mode(Caffe::GPU); +#ifndef CPU_ONLY + +template +class GPUStochasticPoolingLayerTest + : public StochasticPoolingLayerTest > { +}; + +TYPED_TEST_CASE(GPUStochasticPoolingLayerTest, TestDtypes); + +TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochastic) { LayerParameter layer_param; layer_param.set_phase(TRAIN); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); @@ -104,8 +119,7 @@ TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPU) { EXPECT_GE(total / this->blob_top_->count(), 0.55); } -TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPUTestPhase) { - Caffe::set_mode(Caffe::GPU); +TYPED_TEST(GPUStochasticPoolingLayerTest, TestStochasticTestPhase) { LayerParameter layer_param; layer_param.set_phase(TEST); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); @@ -142,8 +156,7 @@ TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPUTestPhase) { } } -TYPED_TEST(StochasticPoolingLayerTest, TestGradientGPU) { - Caffe::set_mode(Caffe::GPU); +TYPED_TEST(GPUStochasticPoolingLayerTest, TestGradient) { LayerParameter layer_param; layer_param.set_phase(TRAIN); PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); @@ -158,6 +171,6 @@ TYPED_TEST(StochasticPoolingLayerTest, TestGradientGPU) { this->blob_top_vec_); } - +#endif } // namespace caffe diff --git a/src/caffe/test/test_triplet_loss_layer.cpp b/src/caffe/test/test_triplet_loss_layer.cpp deleted file mode 100644 index c8d9377fa23..00000000000 --- a/src/caffe/test/test_triplet_loss_layer.cpp +++ /dev/null @@ -1,107 +0,0 @@ -#include -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -#include "caffe/filler.hpp" -#include "caffe/vision_layers.hpp" - -#include "caffe/test/test_caffe_main.hpp" -#include "caffe/test/test_gradient_check_util.hpp" - -namespace caffe { - -template -class TripletLossLayerTest : public MultiDeviceTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - TripletLossLayerTest() - : blob_bottom_data_i_(new Blob(128, 10, 1, 1)), - blob_bottom_data_j_(new Blob(128, 10, 1, 1)), - blob_bottom_data_k_(new Blob(128, 10, 1, 1)), - blob_bottom_y_(new Blob(128, 1, 1, 1)), - blob_top_loss_(new Blob()) { - // fill the values - FillerParameter filler_param; - filler_param.set_mean(0.0); - filler_param.set_std(0.3); // distances~=1.0 to test both sides of margin - GaussianFiller filler(filler_param); - filler.Fill(this->blob_bottom_data_i_); - blob_bottom_vec_.push_back(blob_bottom_data_i_); - filler.Fill(this->blob_bottom_data_j_); - blob_bottom_vec_.push_back(blob_bottom_data_j_); - filler.Fill(this->blob_bottom_data_k_); - blob_bottom_vec_.push_back(blob_bottom_data_k_); - for (int i = 0; i < blob_bottom_y_->count(); ++i) { - blob_bottom_y_->mutable_cpu_data()[i] = caffe_rng_rand() % 2; // 0 or 1 - } - blob_bottom_vec_.push_back(blob_bottom_y_); - blob_top_vec_.push_back(blob_top_loss_); - } - virtual ~TripletLossLayerTest() { - delete blob_bottom_data_i_; - delete blob_bottom_data_j_; - delete blob_bottom_data_k_; - delete blob_bottom_y_; - delete blob_top_loss_; - } - - Blob* const blob_bottom_data_i_; - Blob* const blob_bottom_data_j_; - Blob* const blob_bottom_data_k_; - Blob* const blob_bottom_y_; - Blob* const blob_top_loss_; - vector*> blob_bottom_vec_; - vector*> blob_top_vec_; -}; - -TYPED_TEST_CASE(TripletLossLayerTest, TestDtypesAndDevices); - -TYPED_TEST(TripletLossLayerTest, TestForward) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // manually compute to compare - const Dtype margin = layer_param.contrastive_loss_param().margin(); - const int num = this->blob_bottom_data_i_->num(); - const int channels = this->blob_bottom_data_i_->channels(); - Dtype loss(0); - for (int i = 0; i < num; ++i) { - Dtype dist_sq(0); - for (int j = 0; j < channels; ++j) { - Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; - dist_sq += diff*diff; - } - if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs - loss += dist_sq; - } else { - loss += std::max(margin-dist_sq, Dtype(0)); - } - } - loss /= static_cast(num) * Dtype(2); - EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); -} - -TYPED_TEST(TripletLossLayerTest, TestGradient) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - GradientChecker checker(1e-2, 1e-2, 1701); - // check the gradient for the first two bottom layers - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 0); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 1); -} - -} // namespace caffe diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index 13e17be582b..0aab6b17b85 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -206,6 +206,16 @@ void caffe_exp(const int n, const double* a, double* y) { vdExp(n, a, y); } +template <> +void caffe_log(const int n, const float* a, float* y) { + vsLn(n, a, y); +} + +template <> +void caffe_log(const int n, const double* a, double* y) { + vdLn(n, a, y); +} + template <> void caffe_abs(const int n, const float* a, float* y) { vsAbs(n, a, y); diff --git a/src/caffe/util/math_functions.cu b/src/caffe/util/math_functions.cu index 43e65eb9a69..2631a0740d6 100644 --- a/src/caffe/util/math_functions.cu +++ b/src/caffe/util/math_functions.cu @@ -324,6 +324,27 @@ void caffe_gpu_exp(const int N, const double* a, double* y) { N, a, y); } +template +__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { + CUDA_KERNEL_LOOP(index, n) { + y[index] = log(a[index]); + } +} + +template <> +void caffe_gpu_log(const int N, const float* a, float* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + log_kernel<<>>( + N, a, y); +} + +template <> +void caffe_gpu_log(const int N, const double* a, double* y) { + // NOLINT_NEXT_LINE(whitespace/operators) + log_kernel<<>>( + N, a, y); +} + template __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { From 898ce4ce3d72c07f34ade76e653eeff3c81c547c Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 15 Jun 2015 15:35:57 +0800 Subject: [PATCH 60/82] suit for opencv3.0.0 delete spaces delete libopenccodecsv triplet tranining data code fix --- examples/siamese/convert_mnist_siamese_data.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/siamese/convert_mnist_siamese_data.cpp b/examples/siamese/convert_mnist_siamese_data.cpp index e9403d7ab94..2a563b6bbf8 100644 --- a/examples/siamese/convert_mnist_siamese_data.cpp +++ b/examples/siamese/convert_mnist_siamese_data.cpp @@ -99,7 +99,6 @@ void convert_dataset(const char* image_filename, const char* label_filename, datum.set_data(pixels, 3*rows*cols); if (label_i == label_j && label_i != label_k) { datum.set_label(1); - datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", itemid); db->Put(leveldb::WriteOptions(), std::string(key), value); From 0553e2ec28a9fa50841debc1792d722b41a3f4b1 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 15 Jun 2015 20:43:15 +0800 Subject: [PATCH 61/82] cpu only cpu only cpu only bug fixes on triplet_loss_layer.cpp and the test on mnist works well --- Makefile.config.example | 2 +- examples/siamese/mnist_siamese_solver.prototxt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile.config.example b/Makefile.config.example index a20bad2f5ce..d870f965737 100644 --- a/Makefile.config.example +++ b/Makefile.config.example @@ -5,7 +5,7 @@ # USE_CUDNN := 1 # CPU-only switch (uncomment to build without GPU support). -# CPU_ONLY := 1 +CPU_ONLY := 1 # uncomment to disable IO dependencies and corresponding data layers # USE_LEVELDB := 0 diff --git a/examples/siamese/mnist_siamese_solver.prototxt b/examples/siamese/mnist_siamese_solver.prototxt index 07ac88de057..d4d994d1389 100644 --- a/examples/siamese/mnist_siamese_solver.prototxt +++ b/examples/siamese/mnist_siamese_solver.prototxt @@ -7,7 +7,7 @@ test_iter: 100 # Carry out testing every 500 training iterations. test_interval: 500 # The base learning rate, momentum and the weight decay of the network. -base_lr: 0.001 +base_lr: 0.01 momentum: 0.9 weight_decay: 0.0000 # The learning rate policy From e93feb53b2c2cfc865701fc177cb2e6b0a707e33 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Tue, 30 Jun 2015 21:29:31 +0800 Subject: [PATCH 62/82] lfw test added in examples of triplet delete unuseful codes --- examples/siamese/convert_lfw_siamese_data.cpp | 121 +++++ examples/siamese/create_lfw_siamese.sh | 21 + examples/siamese/lfw_siamese.prototxt | 113 ++++ examples/siamese/lfw_siamese_solver.prototxt | 25 + .../siamese/lfw_siamese_train_test.prototxt | 349 ++++++++++++ examples/siamese/train_lfw_siamese.sh | 5 + examples/triplet/convert_lfw_triplet_data.cpp | 126 +++++ examples/triplet/create_lfw_triplet.sh | 21 + examples/triplet/lfw_triplet.prototxt | 113 ++++ examples/triplet/lfw_triplet_solver.prototxt | 25 + .../triplet/lfw_triplet_train_test.prototxt | 500 ++++++++++++++++++ examples/triplet/train_lfw_triplet.sh | 5 + 12 files changed, 1424 insertions(+) create mode 100644 examples/siamese/convert_lfw_siamese_data.cpp create mode 100755 examples/siamese/create_lfw_siamese.sh create mode 100644 examples/siamese/lfw_siamese.prototxt create mode 100644 examples/siamese/lfw_siamese_solver.prototxt create mode 100644 examples/siamese/lfw_siamese_train_test.prototxt create mode 100755 examples/siamese/train_lfw_siamese.sh create mode 100644 examples/triplet/convert_lfw_triplet_data.cpp create mode 100755 examples/triplet/create_lfw_triplet.sh create mode 100644 examples/triplet/lfw_triplet.prototxt create mode 100644 examples/triplet/lfw_triplet_solver.prototxt create mode 100644 examples/triplet/lfw_triplet_train_test.prototxt create mode 100755 examples/triplet/train_lfw_triplet.sh diff --git a/examples/siamese/convert_lfw_siamese_data.cpp b/examples/siamese/convert_lfw_siamese_data.cpp new file mode 100644 index 00000000000..fe134ca9b4e --- /dev/null +++ b/examples/siamese/convert_lfw_siamese_data.cpp @@ -0,0 +1,121 @@ +// +// This script converts the MNIST dataset to the leveldb format used +// by caffe to train siamese network. +// Usage: +// convert_mnist_data input_image_file input_label_file output_db_file +// The MNIST dataset could be downloaded at +// http://yann.lecun.com/exdb/mnist/ +#include // NOLINT(readability/streams) +#include + +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "stdint.h" + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" + +uint32_t swap_endian(uint32_t val) { + val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); + return (val << 16) | (val >> 16); +} + +void read_image(std::ifstream* image_file, std::ifstream* label_file, + uint32_t index, uint32_t rows, uint32_t cols, + char* pixels, char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index + 8); + label_file->read(label, 1); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename) { + // Open files + std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); + std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); + CHECK(image_file) << "Unable to open file " << image_filename; + CHECK(label_file) << "Unable to open file " << label_filename; + // Read the magic and the meta data + uint32_t magic; + uint32_t num_items; + uint32_t num_labels; + uint32_t rows; + uint32_t cols; + + image_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + label_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + image_file.read(reinterpret_cast(&num_items), 4); + num_items = swap_endian(num_items); + label_file.read(reinterpret_cast(&num_labels), 4); + num_labels = swap_endian(num_labels); + CHECK_EQ(num_items, num_labels); + image_file.read(reinterpret_cast(&rows), 4); + rows = swap_endian(rows); + image_file.read(reinterpret_cast(&cols), 4); + cols = swap_endian(cols); + + // Open leveldb + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = true; + options.error_if_exists = true; + leveldb::Status status = leveldb::DB::Open( + options, db_filename, &db); + CHECK(status.ok()) << "Failed to open leveldb " << db_filename + << ". Is it already existing?"; + + char label_i; + char label_j; + char* pixels = new char[2 * rows * cols]; + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + + caffe::Datum datum; + datum.set_channels(2); // one channel for each image in the pair + datum.set_height(rows); + datum.set_width(cols); + LOG(INFO) << "A total of " << num_items << " items."; + LOG(INFO) << "Rows: " << rows << " Cols: " << cols; + for (int itemid = 0; itemid < num_items; ++itemid) { + int i = caffe::caffe_rng_rand() % num_items; // pick a random pair + int j = caffe::caffe_rng_rand() % num_items; + read_image(&image_file, &label_file, i, rows, cols, + pixels, &label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels + (rows * cols), &label_j); + datum.set_data(pixels, 2*rows*cols); + if (label_i == label_j) { + datum.set_label(1); + } else { + datum.set_label(0); + } + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); + } + + delete db; + delete pixels; +} + +int main(int argc, char** argv) { + if (argc != 4) { + printf("This script converts the MNIST dataset to the leveldb format used\n" + "by caffe to train a siamese network.\n" + "Usage:\n" + " convert_mnist_data input_image_file input_label_file " + "output_db_file\n" + "The MNIST dataset could be downloaded at\n" + " http://yann.lecun.com/exdb/mnist/\n" + "You should gunzip them after downloading.\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3]); + } + return 0; +} diff --git a/examples/siamese/create_lfw_siamese.sh b/examples/siamese/create_lfw_siamese.sh new file mode 100755 index 00000000000..3790b9eb2a0 --- /dev/null +++ b/examples/siamese/create_lfw_siamese.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env sh +# This script converts the mnist data into leveldb format. + +EXAMPLES=./build/examples/siamese +DATA=./data/lfw + +echo "Creating leveldb..." + +rm -rf ./examples/siamese/lfw_siamese_train_leveldb +rm -rf ./examples/siamese/lfw_siamese_test_leveldb + +$EXAMPLES/convert_lfw_siamese_data.bin \ + $DATA/traindata \ + $DATA/trainlabel \ + ./examples/siamese/lfw_siamese_train_leveldb +$EXAMPLES/convert_mnist_siamese_data.bin \ + $DATA/testdata \ + $DATA/testlabel \ + ./examples/siamese/lfw_siamese_test_leveldb + +echo "Done." diff --git a/examples/siamese/lfw_siamese.prototxt b/examples/siamese/lfw_siamese.prototxt new file mode 100644 index 00000000000..106d9aa76f4 --- /dev/null +++ b/examples/siamese/lfw_siamese.prototxt @@ -0,0 +1,113 @@ +name: "lfw_siamese" +input: "data" +input_dim: 10000 +input_dim: 1 +input_dim: 150 +input_dim: 80 +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 500 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 10 + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 2 + } +} diff --git a/examples/siamese/lfw_siamese_solver.prototxt b/examples/siamese/lfw_siamese_solver.prototxt new file mode 100644 index 00000000000..2aaafb63c1f --- /dev/null +++ b/examples/siamese/lfw_siamese_solver.prototxt @@ -0,0 +1,25 @@ +# The train/test net protocol buffer definition +net: "examples/siamese/lfw_siamese_train_test.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of MNIST, we have test batch size 100 and 100 test iterations, +# covering the full 10,000 testing images. +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 500 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.01 +momentum: 0.9 +weight_decay: 0.0000 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +max_iter: 50000 +# snapshot intermediate results +snapshot: 5000 +snapshot_prefix: "examples/siamese/lfw_siamese" +# solver mode: CPU or GPU +solver_mode: CPU diff --git a/examples/siamese/lfw_siamese_train_test.prototxt b/examples/siamese/lfw_siamese_train_test.prototxt new file mode 100644 index 00000000000..049187bf3d4 --- /dev/null +++ b/examples/siamese/lfw_siamese_train_test.prototxt @@ -0,0 +1,349 @@ +name: "lfw_siamese_train_test" +layer { + name: "pair_data" + type: "Data" + top: "pair_data" + top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/siamese/lfw_siamese_train_leveldb" + batch_size: 64 + } +} +layer { + name: "pair_data" + type: "Data" + top: "pair_data" + top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/siamese/lfw_siamese_test_leveldb" + batch_size: 100 + } +} +layer { + name: "slice_pair" + type: "Slice" + bottom: "pair_data" + top: "data" + top: "data_p" + slice_param { + slice_dim: 1 + slice_point: 1 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_p" + type: "Convolution" + bottom: "data_p" + top: "conv1_p" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_p" + type: "Pooling" + bottom: "conv1_p" + top: "pool1_p" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_p" + type: "Convolution" + bottom: "pool1_p" + top: "conv2_p" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_p" + type: "Pooling" + bottom: "conv2_p" + top: "pool2_p" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_p" + type: "InnerProduct" + bottom: "pool2_p" + top: "ip1_p" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_p" + type: "ReLU" + bottom: "ip1_p" + top: "ip1_p" +} +layer { + name: "ip2_p" + type: "InnerProduct" + bottom: "ip1_p" + top: "ip2_p" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_p" + type: "InnerProduct" + bottom: "ip2_p" + top: "feat_p" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "loss" + type: "ContrastiveLoss" + bottom: "feat" + bottom: "feat_p" + bottom: "sim" + top: "loss" + contrastive_loss_param { + margin: 1 + } +} diff --git a/examples/siamese/train_lfw_siamese.sh b/examples/siamese/train_lfw_siamese.sh new file mode 100755 index 00000000000..0a879a65419 --- /dev/null +++ b/examples/siamese/train_lfw_siamese.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env sh + +TOOLS=./build/tools + +$TOOLS/caffe train --solver=examples/siamese/lfw_siamese_solver.prototxt diff --git a/examples/triplet/convert_lfw_triplet_data.cpp b/examples/triplet/convert_lfw_triplet_data.cpp new file mode 100644 index 00000000000..9f65fab76b4 --- /dev/null +++ b/examples/triplet/convert_lfw_triplet_data.cpp @@ -0,0 +1,126 @@ +// This script converts the MNIST dataset to the leveldb format used +// by caffe to train siamese network. +// Usage: +// convert_mnist_data input_image_file input_label_file output_db_file +// The MNIST dataset could be downloaded at +// http://yann.lecun.com/exdb/mnist/ +#include // NOLINT(readability/streams) +#include + +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "stdint.h" + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" + +uint32_t swap_endian(uint32_t val) { + val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); + return (val << 16) | (val >> 16); +} + +void read_image(std::ifstream* image_file, std::ifstream* label_file, + uint32_t index, uint32_t rows, uint32_t cols, + char* pixels, char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index + 8); + label_file->read(label, 1); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename) { + // Open files + std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); + std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); + CHECK(image_file) << "Unable to open file " << image_filename; + CHECK(label_file) << "Unable to open file " << label_filename; + // Read the magic and the meta data + uint32_t magic; + uint32_t num_items; + uint32_t num_labels; + uint32_t rows; + uint32_t cols; + + image_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + label_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + image_file.read(reinterpret_cast(&num_items), 4); + num_items = swap_endian(num_items); + label_file.read(reinterpret_cast(&num_labels), 4); + num_labels = swap_endian(num_labels); + CHECK_EQ(num_items, num_labels); + image_file.read(reinterpret_cast(&rows), 4); + rows = swap_endian(rows); + image_file.read(reinterpret_cast(&cols), 4); + cols = swap_endian(cols); + + // Open leveldb + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = true; + options.error_if_exists = true; + leveldb::Status status = leveldb::DB::Open( + options, db_filename, &db); + CHECK(status.ok()) << "Failed to open leveldb " << db_filename + << ". Is it already existing?"; + + char label_i; + char label_j; + char label_k; + char* pixels = new char[3 * rows * cols]; + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + + caffe::Datum datum; + datum.set_channels(3); // one channel for each image in the pair + datum.set_height(rows); + datum.set_width(cols); + LOG(INFO) << "A total of " << num_items << " items."; + LOG(INFO) << "Rows: " << rows << " Cols: " << cols; + for (int itemid = 0; itemid < num_items; ++itemid) { + int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups + int j = caffe::caffe_rng_rand() % num_items; + int k = caffe::caffe_rng_rand() % num_items; + read_image(&image_file, &label_file, i, rows, cols, + pixels, &label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels + (rows * cols), &label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels + (2 * rows * cols), &label_k); + + datum.set_data(pixels, 3*rows*cols); + if (label_i == label_j && label_i != label_k) { + datum.set_label(1); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); + } else { + itemid--; + datum.set_label(0); + } + } + + delete db; + delete pixels; +} + +int main(int argc, char** argv) { + if (argc != 4) { + printf("This script converts the MNIST dataset to the leveldb format used\n" + "by caffe to train a siamese network.\n" + "Usage:\n" + " convert_mnist_data input_image_file input_label_file " + "output_db_file\n" + "The MNIST dataset could be downloaded at\n" + " http://yann.lecun.com/exdb/mnist/\n" + "You should gunzip them after downloading.\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3]); + } + return 0; +} diff --git a/examples/triplet/create_lfw_triplet.sh b/examples/triplet/create_lfw_triplet.sh new file mode 100755 index 00000000000..382a9021f10 --- /dev/null +++ b/examples/triplet/create_lfw_triplet.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env sh +# This script converts the lfw data into leveldb format. + +EXAMPLES=./build/examples/triplet +DATA=./data/lfw + +echo "Creating leveldb..." + +rm -rf ./examples/triplet/lfw_triplet_train_leveldb +rm -rf ./examples/triplet/lfw_triplet_test_leveldb + +$EXAMPLES/convert_lfw_triplet_data.bin \ + $DATA/traindata \ + $DATA/trainlabel \ + ./examples/triplet/lfw_triplet_train_leveldb +$EXAMPLES/convert_lfw_triplet_data.bin \ + $DATA/testdata \ + $DATA/testlabel \ + ./examples/triplet/lfw_triplet_test_leveldb + +echo "Done." diff --git a/examples/triplet/lfw_triplet.prototxt b/examples/triplet/lfw_triplet.prototxt new file mode 100644 index 00000000000..9537d1feb8b --- /dev/null +++ b/examples/triplet/lfw_triplet.prototxt @@ -0,0 +1,113 @@ +name: "lfw_siamese" +input: "data" +input_dim: 10000 +input_dim: 1 +input_dim: 150 +input_dim: 130 +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 500 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 10 + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 2 + } +} diff --git a/examples/triplet/lfw_triplet_solver.prototxt b/examples/triplet/lfw_triplet_solver.prototxt new file mode 100644 index 00000000000..eb4c2c369e9 --- /dev/null +++ b/examples/triplet/lfw_triplet_solver.prototxt @@ -0,0 +1,25 @@ +# The train/test net protocol buffer definition +net: "examples/triplet/lfw_triplet_train_test.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of lfw, we have test batch size 100 and 100 test iterations, +# covering the full 10,000 testing images. +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 500 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.01 +momentum: 0.9 +weight_decay: 0.0000 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +max_iter: 50000 +# snapshot intermediate results +snapshot: 5000 +snapshot_prefix: "examples/triplet/lfw_triplet" +# solver mode: CPU or GPU +solver_mode: CPU diff --git a/examples/triplet/lfw_triplet_train_test.prototxt b/examples/triplet/lfw_triplet_train_test.prototxt new file mode 100644 index 00000000000..59ef26e90a4 --- /dev/null +++ b/examples/triplet/lfw_triplet_train_test.prototxt @@ -0,0 +1,500 @@ +name: "lfw_triplet_train_test" +layer { + name: "triplet_data" + type: "Data" + top: "triplet_data" + top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/lfw_triplet_train_leveldb" + batch_size: 64 + } +} +layer { + name: "triplet_data" + type: "Data" + top: "triplet_data" + top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/lfw_triplet_test_leveldb" + batch_size: 100 + } +} +layer { + name: "slice_triplet" + type: "Slice" + bottom: "triplet_data" + top: "data" + top: "data_true" + top: "data_false" + slice_param { + slice_dim: 1 + slice_point: 1 + slice_point: 2 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_true" + type: "Convolution" + bottom: "data_true" + top: "conv1_true" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_true" + type: "Pooling" + bottom: "conv1_true" + top: "pool1_true" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_true" + type: "Convolution" + bottom: "pool1_true" + top: "conv2_true" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_true" + type: "Pooling" + bottom: "conv2_true" + top: "pool2_true" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_true" + type: "InnerProduct" + bottom: "pool2_true" + top: "ip1_true" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_true" + type: "ReLU" + bottom: "ip1_true" + top: "ip1_true" +} +layer { + name: "ip2_true" + type: "InnerProduct" + bottom: "ip1_true" + top: "ip2_true" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_true" + type: "InnerProduct" + bottom: "ip2_true" + top: "feat_true" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_false" + type: "Convolution" + bottom: "data_false" + top: "conv1_false" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_false" + type: "Pooling" + bottom: "conv1_false" + top: "pool1_false" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_false" + type: "Convolution" + bottom: "pool1_false" + top: "conv2_false" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_false" + type: "Pooling" + bottom: "conv2_false" + top: "pool2_false" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_false" + type: "InnerProduct" + bottom: "pool2_false" + top: "ip1_false" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_false" + type: "ReLU" + bottom: "ip1_false" + top: "ip1_false" +} +layer { + name: "ip2_false" + type: "InnerProduct" + bottom: "ip1_false" + top: "ip2_false" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_false" + type: "InnerProduct" + bottom: "ip2_false" + top: "feat_false" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "loss" + type: "TripletLoss" + bottom: "feat" + bottom: "feat_true" + bottom: "feat_false" + bottom: "sim" + top: "loss" + triplet_loss_param { + margin: 0.2 + } +} + diff --git a/examples/triplet/train_lfw_triplet.sh b/examples/triplet/train_lfw_triplet.sh new file mode 100755 index 00000000000..076738a5e63 --- /dev/null +++ b/examples/triplet/train_lfw_triplet.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env sh + +TOOLS=./build/tools + +$TOOLS/caffe train --solver=examples/triplet/lfw_triplet_solver.prototxt From 98f595ea7e1d94fe1d86179aa54fa5248773c453 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Fri, 24 Jul 2015 11:36:02 +0800 Subject: [PATCH 63/82] ready for a review on triplet training using the 3D data, net work definition is the same as described in paper delete spaces --- examples/triplet/convert_lfw_triplet_data.cpp | 26 ++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/examples/triplet/convert_lfw_triplet_data.cpp b/examples/triplet/convert_lfw_triplet_data.cpp index 9f65fab76b4..0fb87a17b4b 100644 --- a/examples/triplet/convert_lfw_triplet_data.cpp +++ b/examples/triplet/convert_lfw_triplet_data.cpp @@ -45,8 +45,10 @@ void convert_dataset(const char* image_filename, const char* label_filename, image_file.read(reinterpret_cast(&magic), 4); magic = swap_endian(magic); + CHECK_EQ(magic, 2051) << "Incorrect image file magic."; label_file.read(reinterpret_cast(&magic), 4); magic = swap_endian(magic); + CHECK_EQ(magic, 2049) << "Incorrect label file magic."; image_file.read(reinterpret_cast(&num_items), 4); num_items = swap_endian(num_items); label_file.read(reinterpret_cast(&num_labels), 4); @@ -67,33 +69,45 @@ void convert_dataset(const char* image_filename, const char* label_filename, CHECK(status.ok()) << "Failed to open leveldb " << db_filename << ". Is it already existing?"; - char label_i; + char label_i; // label for triplet char label_j; char label_k; - char* pixels = new char[3 * rows * cols]; + char label_l; // label for pair wise + char label_m; + char* pixels = new char[5 * rows * cols]; const int kMaxKeyLength = 10; char key[kMaxKeyLength]; std::string value; caffe::Datum datum; - datum.set_channels(3); // one channel for each image in the pair + datum.set_channels(5); // one channel for each image in the triplet and pair datum.set_height(rows); datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; for (int itemid = 0; itemid < num_items; ++itemid) { - int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups + // pick triplet groups + int i = caffe::caffe_rng_rand() % num_items; int j = caffe::caffe_rng_rand() % num_items; int k = caffe::caffe_rng_rand() % num_items; + // pick pair wise groups + int l = caffe::caffe_rng_rand() % num_items; + int m = caffe::caffe_rng_rand() % num_items; + // read triplet groups read_image(&image_file, &label_file, i, rows, cols, pixels, &label_i); read_image(&image_file, &label_file, j, rows, cols, pixels + (rows * cols), &label_j); read_image(&image_file, &label_file, k, rows, cols, pixels + (2 * rows * cols), &label_k); + // read pair wise groups + read_image(&image_file, &label_file, l, rows, cols, + pixels + (3 * rows * cols), &label_l); + read_image(&image_file, &label_file, m, rows, cols, + pixels + (4 * rows * cols), &label_m); - datum.set_data(pixels, 3*rows*cols); - if (label_i == label_j && label_i != label_k) { + datum.set_data(pixels, 5*rows*cols); // set data + if ((label_i == label_j && label_i != label_k) && (label_l == label_m)) { datum.set_label(1); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", itemid); From db14ebfe72158f3559c0a8b82d1aa736489561c2 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Thu, 30 Jul 2015 15:16:19 +0800 Subject: [PATCH 64/82] add pose information for training data arrangement in triplet training delete macro --- examples/triplet/convert_lfw_triplet_data.cpp | 72 ++++++++----------- 1 file changed, 30 insertions(+), 42 deletions(-) diff --git a/examples/triplet/convert_lfw_triplet_data.cpp b/examples/triplet/convert_lfw_triplet_data.cpp index 0fb87a17b4b..94414887995 100644 --- a/examples/triplet/convert_lfw_triplet_data.cpp +++ b/examples/triplet/convert_lfw_triplet_data.cpp @@ -1,17 +1,13 @@ -// This script converts the MNIST dataset to the leveldb format used -// by caffe to train siamese network. // Usage: // convert_mnist_data input_image_file input_label_file output_db_file // The MNIST dataset could be downloaded at // http://yann.lecun.com/exdb/mnist/ #include // NOLINT(readability/streams) #include - #include "glog/logging.h" #include "google/protobuf/text_format.h" #include "leveldb/db.h" #include "stdint.h" - #include "caffe/proto/caffe.pb.h" #include "caffe/util/math_functions.hpp" @@ -22,11 +18,13 @@ uint32_t swap_endian(uint32_t val) { void read_image(std::ifstream* image_file, std::ifstream* label_file, uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label) { + char* pixels, char* label_temp, signed char* label) { image_file->seekg(index * rows * cols + 16); image_file->read(pixels, rows * cols); - label_file->seekg(index + 8); - label_file->read(label, 1); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); } void convert_dataset(const char* image_filename, const char* label_filename, @@ -48,7 +46,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, CHECK_EQ(magic, 2051) << "Incorrect image file magic."; label_file.read(reinterpret_cast(&magic), 4); magic = swap_endian(magic); - CHECK_EQ(magic, 2049) << "Incorrect label file magic."; + CHECK_EQ(magic, 2050) << "Incorrect label file magic."; image_file.read(reinterpret_cast(&num_items), 4); num_items = swap_endian(num_items); label_file.read(reinterpret_cast(&num_labels), 4); @@ -69,11 +67,12 @@ void convert_dataset(const char* image_filename, const char* label_filename, CHECK(status.ok()) << "Failed to open leveldb " << db_filename << ". Is it already existing?"; - char label_i; // label for triplet - char label_j; - char label_k; - char label_l; // label for pair wise - char label_m; + char* label_temp = new char[4]; // label for unsigned char* + signed char* label_i = new signed char[4]; // label for triplet + signed char* label_j = new signed char[4]; + signed char* label_k = new signed char[4]; + signed char* label_l = new signed char[4]; // label for pair wise + signed char* label_m = new signed char[4]; char* pixels = new char[5 * rows * cols]; const int kMaxKeyLength = 10; char key[kMaxKeyLength]; @@ -85,29 +84,30 @@ void convert_dataset(const char* image_filename, const char* label_filename, datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (int itemid = 0; itemid < num_items; ++itemid) { - // pick triplet groups - int i = caffe::caffe_rng_rand() % num_items; + for (unsigned int itemid = 0; itemid < 10 * num_items; ++itemid) { + int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups int j = caffe::caffe_rng_rand() % num_items; int k = caffe::caffe_rng_rand() % num_items; - // pick pair wise groups - int l = caffe::caffe_rng_rand() % num_items; + int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups int m = caffe::caffe_rng_rand() % num_items; - // read triplet groups - read_image(&image_file, &label_file, i, rows, cols, - pixels, &label_i); + read_image(&image_file, &label_file, i, rows, cols, // read triplet groups + pixels, label_temp, label_i); read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), &label_j); + pixels + (rows * cols), label_temp, label_j); read_image(&image_file, &label_file, k, rows, cols, - pixels + (2 * rows * cols), &label_k); - // read pair wise groups - read_image(&image_file, &label_file, l, rows, cols, - pixels + (3 * rows * cols), &label_l); + pixels + (2 * rows * cols), label_temp, label_k); + read_image(&image_file, &label_file, l, rows, cols, // read pair wise groups + pixels + (3 * rows * cols), label_temp, label_l); read_image(&image_file, &label_file, m, rows, cols, - pixels + (4 * rows * cols), &label_m); + pixels + (4 * rows * cols), label_temp, label_m); datum.set_data(pixels, 5*rows*cols); // set data - if ((label_i == label_j && label_i != label_k) && (label_l == label_m)) { + bool pose_pass; + int dist_ij = (int)((*(label_i+1)-*(label_j+1))*(*(label_i+1)-*(label_j+1)) + (*(label_i+2)-*(label_j+2))*(*(label_i+2)-*(label_j+2)) + (*(label_i+3)-*(label_j+3))*(*(label_i+3)-*(label_j+3))); + int dist_ik = (int)((*(label_i+1)-*(label_k+1))*(*(label_i+1)-*(label_k+1)) + (*(label_i+2)-*(label_k+2))*(*(label_i+2)-*(label_k+2)) + (*(label_i+3)-*(label_k+3))*(*(label_i+3)-*(label_k+3))); + if (dist_ij < dist_ik ) + pose_pass = true; + if (((*label_i == *label_j && *label_i != *label_k) || ((*label_i == *label_j && *label_i == *label_k) && pose_pass)) && (*label_l == *label_m)) { datum.set_label(1); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", itemid); @@ -117,24 +117,12 @@ void convert_dataset(const char* image_filename, const char* label_filename, datum.set_label(0); } } - delete db; delete pixels; } int main(int argc, char** argv) { - if (argc != 4) { - printf("This script converts the MNIST dataset to the leveldb format used\n" - "by caffe to train a siamese network.\n" - "Usage:\n" - " convert_mnist_data input_image_file input_label_file " - "output_db_file\n" - "The MNIST dataset could be downloaded at\n" - " http://yann.lecun.com/exdb/mnist/\n" - "You should gunzip them after downloading.\n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3]); - } + convert_dataset("/home/wangyida/Desktop/caffe/data/linemod/binary_image_train", "/home/wangyida/Desktop/caffe/data/linemod/binary_label_train", "/home/wangyida/Desktop/caffe/data/linemod/leveldb"); return 0; } + From 9245f64309d84533b588bc6fc2c0fc4061aa00e1 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Sun, 9 Aug 2015 15:11:32 +0800 Subject: [PATCH 65/82] No sclice layer version which could forward a set of triplets together with 1 pair wise delete file --- examples/mnist/lenet_solver.prototxt | 2 +- examples/siamese/convert_lfw_siamese_data.cpp | 121 ----- examples/siamese/create_lfw_siamese.sh | 21 - examples/siamese/lfw_siamese.prototxt | 113 ---- examples/siamese/lfw_siamese_solver.prototxt | 25 - .../siamese/lfw_siamese_train_test.prototxt | 349 ------------ examples/siamese/train_lfw_siamese.sh | 5 - .../triplet/3d_triplet_train_test.prototxt | 181 +++++++ examples/triplet/convert_lfw_triplet_data.cpp | 128 ----- examples/triplet/create_lfw_triplet.sh | 21 - examples/triplet/lfw_triplet.prototxt | 113 ---- examples/triplet/lfw_triplet_solver.prototxt | 25 - .../triplet/lfw_triplet_train_test.prototxt | 500 ------------------ examples/triplet/train_lfw_triplet.sh | 5 - 14 files changed, 182 insertions(+), 1427 deletions(-) delete mode 100644 examples/siamese/convert_lfw_siamese_data.cpp delete mode 100755 examples/siamese/create_lfw_siamese.sh delete mode 100644 examples/siamese/lfw_siamese.prototxt delete mode 100644 examples/siamese/lfw_siamese_solver.prototxt delete mode 100644 examples/siamese/lfw_siamese_train_test.prototxt delete mode 100755 examples/siamese/train_lfw_siamese.sh create mode 100644 examples/triplet/3d_triplet_train_test.prototxt delete mode 100644 examples/triplet/convert_lfw_triplet_data.cpp delete mode 100755 examples/triplet/create_lfw_triplet.sh delete mode 100644 examples/triplet/lfw_triplet.prototxt delete mode 100644 examples/triplet/lfw_triplet_solver.prototxt delete mode 100644 examples/triplet/lfw_triplet_train_test.prototxt delete mode 100755 examples/triplet/train_lfw_triplet.sh diff --git a/examples/mnist/lenet_solver.prototxt b/examples/mnist/lenet_solver.prototxt index 2dfbc834f41..c4c7d34c478 100644 --- a/examples/mnist/lenet_solver.prototxt +++ b/examples/mnist/lenet_solver.prototxt @@ -7,7 +7,7 @@ test_iter: 100 # Carry out testing every 500 training iterations. test_interval: 500 # The base learning rate, momentum and the weight decay of the network. -base_lr: 0.01 +base_lr: 0.001 momentum: 0.9 weight_decay: 0.0005 # The learning rate policy diff --git a/examples/siamese/convert_lfw_siamese_data.cpp b/examples/siamese/convert_lfw_siamese_data.cpp deleted file mode 100644 index fe134ca9b4e..00000000000 --- a/examples/siamese/convert_lfw_siamese_data.cpp +++ /dev/null @@ -1,121 +0,0 @@ -// -// This script converts the MNIST dataset to the leveldb format used -// by caffe to train siamese network. -// Usage: -// convert_mnist_data input_image_file input_label_file output_db_file -// The MNIST dataset could be downloaded at -// http://yann.lecun.com/exdb/mnist/ -#include // NOLINT(readability/streams) -#include - -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "stdint.h" - -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" - -uint32_t swap_endian(uint32_t val) { - val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); - return (val << 16) | (val >> 16); -} - -void read_image(std::ifstream* image_file, std::ifstream* label_file, - uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index + 8); - label_file->read(label, 1); -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename) { - // Open files - std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); - std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); - CHECK(image_file) << "Unable to open file " << image_filename; - CHECK(label_file) << "Unable to open file " << label_filename; - // Read the magic and the meta data - uint32_t magic; - uint32_t num_items; - uint32_t num_labels; - uint32_t rows; - uint32_t cols; - - image_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - label_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - image_file.read(reinterpret_cast(&num_items), 4); - num_items = swap_endian(num_items); - label_file.read(reinterpret_cast(&num_labels), 4); - num_labels = swap_endian(num_labels); - CHECK_EQ(num_items, num_labels); - image_file.read(reinterpret_cast(&rows), 4); - rows = swap_endian(rows); - image_file.read(reinterpret_cast(&cols), 4); - cols = swap_endian(cols); - - // Open leveldb - leveldb::DB* db; - leveldb::Options options; - options.create_if_missing = true; - options.error_if_exists = true; - leveldb::Status status = leveldb::DB::Open( - options, db_filename, &db); - CHECK(status.ok()) << "Failed to open leveldb " << db_filename - << ". Is it already existing?"; - - char label_i; - char label_j; - char* pixels = new char[2 * rows * cols]; - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - - caffe::Datum datum; - datum.set_channels(2); // one channel for each image in the pair - datum.set_height(rows); - datum.set_width(cols); - LOG(INFO) << "A total of " << num_items << " items."; - LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (int itemid = 0; itemid < num_items; ++itemid) { - int i = caffe::caffe_rng_rand() % num_items; // pick a random pair - int j = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, - pixels, &label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), &label_j); - datum.set_data(pixels, 2*rows*cols); - if (label_i == label_j) { - datum.set_label(1); - } else { - datum.set_label(0); - } - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); - db->Put(leveldb::WriteOptions(), std::string(key), value); - } - - delete db; - delete pixels; -} - -int main(int argc, char** argv) { - if (argc != 4) { - printf("This script converts the MNIST dataset to the leveldb format used\n" - "by caffe to train a siamese network.\n" - "Usage:\n" - " convert_mnist_data input_image_file input_label_file " - "output_db_file\n" - "The MNIST dataset could be downloaded at\n" - " http://yann.lecun.com/exdb/mnist/\n" - "You should gunzip them after downloading.\n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3]); - } - return 0; -} diff --git a/examples/siamese/create_lfw_siamese.sh b/examples/siamese/create_lfw_siamese.sh deleted file mode 100755 index 3790b9eb2a0..00000000000 --- a/examples/siamese/create_lfw_siamese.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env sh -# This script converts the mnist data into leveldb format. - -EXAMPLES=./build/examples/siamese -DATA=./data/lfw - -echo "Creating leveldb..." - -rm -rf ./examples/siamese/lfw_siamese_train_leveldb -rm -rf ./examples/siamese/lfw_siamese_test_leveldb - -$EXAMPLES/convert_lfw_siamese_data.bin \ - $DATA/traindata \ - $DATA/trainlabel \ - ./examples/siamese/lfw_siamese_train_leveldb -$EXAMPLES/convert_mnist_siamese_data.bin \ - $DATA/testdata \ - $DATA/testlabel \ - ./examples/siamese/lfw_siamese_test_leveldb - -echo "Done." diff --git a/examples/siamese/lfw_siamese.prototxt b/examples/siamese/lfw_siamese.prototxt deleted file mode 100644 index 106d9aa76f4..00000000000 --- a/examples/siamese/lfw_siamese.prototxt +++ /dev/null @@ -1,113 +0,0 @@ -name: "lfw_siamese" -input: "data" -input_dim: 10000 -input_dim: 1 -input_dim: 150 -input_dim: 80 -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 500 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 10 - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 2 - } -} diff --git a/examples/siamese/lfw_siamese_solver.prototxt b/examples/siamese/lfw_siamese_solver.prototxt deleted file mode 100644 index 2aaafb63c1f..00000000000 --- a/examples/siamese/lfw_siamese_solver.prototxt +++ /dev/null @@ -1,25 +0,0 @@ -# The train/test net protocol buffer definition -net: "examples/siamese/lfw_siamese_train_test.prototxt" -# test_iter specifies how many forward passes the test should carry out. -# In the case of MNIST, we have test batch size 100 and 100 test iterations, -# covering the full 10,000 testing images. -test_iter: 100 -# Carry out testing every 500 training iterations. -test_interval: 500 -# The base learning rate, momentum and the weight decay of the network. -base_lr: 0.01 -momentum: 0.9 -weight_decay: 0.0000 -# The learning rate policy -lr_policy: "inv" -gamma: 0.0001 -power: 0.75 -# Display every 100 iterations -display: 100 -# The maximum number of iterations -max_iter: 50000 -# snapshot intermediate results -snapshot: 5000 -snapshot_prefix: "examples/siamese/lfw_siamese" -# solver mode: CPU or GPU -solver_mode: CPU diff --git a/examples/siamese/lfw_siamese_train_test.prototxt b/examples/siamese/lfw_siamese_train_test.prototxt deleted file mode 100644 index 049187bf3d4..00000000000 --- a/examples/siamese/lfw_siamese_train_test.prototxt +++ /dev/null @@ -1,349 +0,0 @@ -name: "lfw_siamese_train_test" -layer { - name: "pair_data" - type: "Data" - top: "pair_data" - top: "sim" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/siamese/lfw_siamese_train_leveldb" - batch_size: 64 - } -} -layer { - name: "pair_data" - type: "Data" - top: "pair_data" - top: "sim" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/siamese/lfw_siamese_test_leveldb" - batch_size: 100 - } -} -layer { - name: "slice_pair" - type: "Slice" - bottom: "pair_data" - top: "data" - top: "data_p" - slice_param { - slice_dim: 1 - slice_point: 1 - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_p" - type: "Convolution" - bottom: "data_p" - top: "conv1_p" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_p" - type: "Pooling" - bottom: "conv1_p" - top: "pool1_p" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_p" - type: "Convolution" - bottom: "pool1_p" - top: "conv2_p" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_p" - type: "Pooling" - bottom: "conv2_p" - top: "pool2_p" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_p" - type: "InnerProduct" - bottom: "pool2_p" - top: "ip1_p" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_p" - type: "ReLU" - bottom: "ip1_p" - top: "ip1_p" -} -layer { - name: "ip2_p" - type: "InnerProduct" - bottom: "ip1_p" - top: "ip2_p" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_p" - type: "InnerProduct" - bottom: "ip2_p" - top: "feat_p" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "loss" - type: "ContrastiveLoss" - bottom: "feat" - bottom: "feat_p" - bottom: "sim" - top: "loss" - contrastive_loss_param { - margin: 1 - } -} diff --git a/examples/siamese/train_lfw_siamese.sh b/examples/siamese/train_lfw_siamese.sh deleted file mode 100755 index 0a879a65419..00000000000 --- a/examples/siamese/train_lfw_siamese.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env sh - -TOOLS=./build/tools - -$TOOLS/caffe train --solver=examples/siamese/lfw_siamese_solver.prototxt diff --git a/examples/triplet/3d_triplet_train_test.prototxt b/examples/triplet/3d_triplet_train_test.prototxt new file mode 100644 index 00000000000..1ac185aa2cc --- /dev/null +++ b/examples/triplet/3d_triplet_train_test.prototxt @@ -0,0 +1,181 @@ +name: "3d_triplet_train_test" +layer { + name: "data" + type: "Data" + top: "data" + top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/3d_triplet_train_leveldb" + batch_size: 250 + } +} +layer { + name: "data" + type: "Data" + top: "data" + top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/3d_triplet_test_leveldb" + batch_size: 250 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 16 + kernel_size: 8 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "pool1" + top: "pool1" +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 7 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu2" + type: "ReLU" + bottom: "pool2" + top: "pool2" +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 256 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu3" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip1" + top: "feat" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 4 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "loss" + type: "TripletLoss" + bottom: "feat" + bottom: "sim" + top: "loss" + triplet_loss_param { + margin: 1 + losstype: 0 + num_triplets: 3 + } +} diff --git a/examples/triplet/convert_lfw_triplet_data.cpp b/examples/triplet/convert_lfw_triplet_data.cpp deleted file mode 100644 index 94414887995..00000000000 --- a/examples/triplet/convert_lfw_triplet_data.cpp +++ /dev/null @@ -1,128 +0,0 @@ -// Usage: -// convert_mnist_data input_image_file input_label_file output_db_file -// The MNIST dataset could be downloaded at -// http://yann.lecun.com/exdb/mnist/ -#include // NOLINT(readability/streams) -#include -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "stdint.h" -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" - -uint32_t swap_endian(uint32_t val) { - val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); - return (val << 16) | (val >> 16); -} - -void read_image(std::ifstream* image_file, std::ifstream* label_file, - uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label_temp, signed char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index * 4 + 8); - label_file->read(label_temp, 4); - for (int i = 0; i < 4; i++) - *(label+i) = (signed char)*(label_temp+i); -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename) { - // Open files - std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); - std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); - CHECK(image_file) << "Unable to open file " << image_filename; - CHECK(label_file) << "Unable to open file " << label_filename; - // Read the magic and the meta data - uint32_t magic; - uint32_t num_items; - uint32_t num_labels; - uint32_t rows; - uint32_t cols; - - image_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2051) << "Incorrect image file magic."; - label_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2050) << "Incorrect label file magic."; - image_file.read(reinterpret_cast(&num_items), 4); - num_items = swap_endian(num_items); - label_file.read(reinterpret_cast(&num_labels), 4); - num_labels = swap_endian(num_labels); - CHECK_EQ(num_items, num_labels); - image_file.read(reinterpret_cast(&rows), 4); - rows = swap_endian(rows); - image_file.read(reinterpret_cast(&cols), 4); - cols = swap_endian(cols); - - // Open leveldb - leveldb::DB* db; - leveldb::Options options; - options.create_if_missing = true; - options.error_if_exists = true; - leveldb::Status status = leveldb::DB::Open( - options, db_filename, &db); - CHECK(status.ok()) << "Failed to open leveldb " << db_filename - << ". Is it already existing?"; - - char* label_temp = new char[4]; // label for unsigned char* - signed char* label_i = new signed char[4]; // label for triplet - signed char* label_j = new signed char[4]; - signed char* label_k = new signed char[4]; - signed char* label_l = new signed char[4]; // label for pair wise - signed char* label_m = new signed char[4]; - char* pixels = new char[5 * rows * cols]; - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - - caffe::Datum datum; - datum.set_channels(5); // one channel for each image in the triplet and pair - datum.set_height(rows); - datum.set_width(cols); - LOG(INFO) << "A total of " << num_items << " items."; - LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (unsigned int itemid = 0; itemid < 10 * num_items; ++itemid) { - int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups - int j = caffe::caffe_rng_rand() % num_items; - int k = caffe::caffe_rng_rand() % num_items; - int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups - int m = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, // read triplet groups - pixels, label_temp, label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), label_temp, label_j); - read_image(&image_file, &label_file, k, rows, cols, - pixels + (2 * rows * cols), label_temp, label_k); - read_image(&image_file, &label_file, l, rows, cols, // read pair wise groups - pixels + (3 * rows * cols), label_temp, label_l); - read_image(&image_file, &label_file, m, rows, cols, - pixels + (4 * rows * cols), label_temp, label_m); - - datum.set_data(pixels, 5*rows*cols); // set data - bool pose_pass; - int dist_ij = (int)((*(label_i+1)-*(label_j+1))*(*(label_i+1)-*(label_j+1)) + (*(label_i+2)-*(label_j+2))*(*(label_i+2)-*(label_j+2)) + (*(label_i+3)-*(label_j+3))*(*(label_i+3)-*(label_j+3))); - int dist_ik = (int)((*(label_i+1)-*(label_k+1))*(*(label_i+1)-*(label_k+1)) + (*(label_i+2)-*(label_k+2))*(*(label_i+2)-*(label_k+2)) + (*(label_i+3)-*(label_k+3))*(*(label_i+3)-*(label_k+3))); - if (dist_ij < dist_ik ) - pose_pass = true; - if (((*label_i == *label_j && *label_i != *label_k) || ((*label_i == *label_j && *label_i == *label_k) && pose_pass)) && (*label_l == *label_m)) { - datum.set_label(1); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); - db->Put(leveldb::WriteOptions(), std::string(key), value); - } else { - itemid--; - datum.set_label(0); - } - } - delete db; - delete pixels; -} - -int main(int argc, char** argv) { - convert_dataset("/home/wangyida/Desktop/caffe/data/linemod/binary_image_train", "/home/wangyida/Desktop/caffe/data/linemod/binary_label_train", "/home/wangyida/Desktop/caffe/data/linemod/leveldb"); - return 0; -} - diff --git a/examples/triplet/create_lfw_triplet.sh b/examples/triplet/create_lfw_triplet.sh deleted file mode 100755 index 382a9021f10..00000000000 --- a/examples/triplet/create_lfw_triplet.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env sh -# This script converts the lfw data into leveldb format. - -EXAMPLES=./build/examples/triplet -DATA=./data/lfw - -echo "Creating leveldb..." - -rm -rf ./examples/triplet/lfw_triplet_train_leveldb -rm -rf ./examples/triplet/lfw_triplet_test_leveldb - -$EXAMPLES/convert_lfw_triplet_data.bin \ - $DATA/traindata \ - $DATA/trainlabel \ - ./examples/triplet/lfw_triplet_train_leveldb -$EXAMPLES/convert_lfw_triplet_data.bin \ - $DATA/testdata \ - $DATA/testlabel \ - ./examples/triplet/lfw_triplet_test_leveldb - -echo "Done." diff --git a/examples/triplet/lfw_triplet.prototxt b/examples/triplet/lfw_triplet.prototxt deleted file mode 100644 index 9537d1feb8b..00000000000 --- a/examples/triplet/lfw_triplet.prototxt +++ /dev/null @@ -1,113 +0,0 @@ -name: "lfw_siamese" -input: "data" -input_dim: 10000 -input_dim: 1 -input_dim: 150 -input_dim: 130 -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 500 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 10 - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 2 - } -} diff --git a/examples/triplet/lfw_triplet_solver.prototxt b/examples/triplet/lfw_triplet_solver.prototxt deleted file mode 100644 index eb4c2c369e9..00000000000 --- a/examples/triplet/lfw_triplet_solver.prototxt +++ /dev/null @@ -1,25 +0,0 @@ -# The train/test net protocol buffer definition -net: "examples/triplet/lfw_triplet_train_test.prototxt" -# test_iter specifies how many forward passes the test should carry out. -# In the case of lfw, we have test batch size 100 and 100 test iterations, -# covering the full 10,000 testing images. -test_iter: 100 -# Carry out testing every 500 training iterations. -test_interval: 500 -# The base learning rate, momentum and the weight decay of the network. -base_lr: 0.01 -momentum: 0.9 -weight_decay: 0.0000 -# The learning rate policy -lr_policy: "inv" -gamma: 0.0001 -power: 0.75 -# Display every 100 iterations -display: 100 -# The maximum number of iterations -max_iter: 50000 -# snapshot intermediate results -snapshot: 5000 -snapshot_prefix: "examples/triplet/lfw_triplet" -# solver mode: CPU or GPU -solver_mode: CPU diff --git a/examples/triplet/lfw_triplet_train_test.prototxt b/examples/triplet/lfw_triplet_train_test.prototxt deleted file mode 100644 index 59ef26e90a4..00000000000 --- a/examples/triplet/lfw_triplet_train_test.prototxt +++ /dev/null @@ -1,500 +0,0 @@ -name: "lfw_triplet_train_test" -layer { - name: "triplet_data" - type: "Data" - top: "triplet_data" - top: "sim" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/lfw_triplet_train_leveldb" - batch_size: 64 - } -} -layer { - name: "triplet_data" - type: "Data" - top: "triplet_data" - top: "sim" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/lfw_triplet_test_leveldb" - batch_size: 100 - } -} -layer { - name: "slice_triplet" - type: "Slice" - bottom: "triplet_data" - top: "data" - top: "data_true" - top: "data_false" - slice_param { - slice_dim: 1 - slice_point: 1 - slice_point: 2 - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_true" - type: "Convolution" - bottom: "data_true" - top: "conv1_true" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_true" - type: "Pooling" - bottom: "conv1_true" - top: "pool1_true" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_true" - type: "Convolution" - bottom: "pool1_true" - top: "conv2_true" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_true" - type: "Pooling" - bottom: "conv2_true" - top: "pool2_true" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_true" - type: "InnerProduct" - bottom: "pool2_true" - top: "ip1_true" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_true" - type: "ReLU" - bottom: "ip1_true" - top: "ip1_true" -} -layer { - name: "ip2_true" - type: "InnerProduct" - bottom: "ip1_true" - top: "ip2_true" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_true" - type: "InnerProduct" - bottom: "ip2_true" - top: "feat_true" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_false" - type: "Convolution" - bottom: "data_false" - top: "conv1_false" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_false" - type: "Pooling" - bottom: "conv1_false" - top: "pool1_false" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_false" - type: "Convolution" - bottom: "pool1_false" - top: "conv2_false" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_false" - type: "Pooling" - bottom: "conv2_false" - top: "pool2_false" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_false" - type: "InnerProduct" - bottom: "pool2_false" - top: "ip1_false" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_false" - type: "ReLU" - bottom: "ip1_false" - top: "ip1_false" -} -layer { - name: "ip2_false" - type: "InnerProduct" - bottom: "ip1_false" - top: "ip2_false" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_false" - type: "InnerProduct" - bottom: "ip2_false" - top: "feat_false" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "loss" - type: "TripletLoss" - bottom: "feat" - bottom: "feat_true" - bottom: "feat_false" - bottom: "sim" - top: "loss" - triplet_loss_param { - margin: 0.2 - } -} - diff --git a/examples/triplet/train_lfw_triplet.sh b/examples/triplet/train_lfw_triplet.sh deleted file mode 100755 index 076738a5e63..00000000000 --- a/examples/triplet/train_lfw_triplet.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env sh - -TOOLS=./build/tools - -$TOOLS/caffe train --solver=examples/triplet/lfw_triplet_solver.prototxt From 4d0104b5691a4e3271023e65199672d5f878dcfc Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 10 Aug 2015 19:54:16 +0800 Subject: [PATCH 66/82] add initiate class name of triplet loss layer add initiate class name of triplet loss layer --- src/caffe/solver.cpp.orig | 803 ++++++++++++++++++++++ src/caffe/test/test_im2col_kernel.cu.orig | 210 ++++++ 2 files changed, 1013 insertions(+) create mode 100644 src/caffe/solver.cpp.orig create mode 100644 src/caffe/test/test_im2col_kernel.cu.orig diff --git a/src/caffe/solver.cpp.orig b/src/caffe/solver.cpp.orig new file mode 100644 index 00000000000..5b9ac36e2d7 --- /dev/null +++ b/src/caffe/solver.cpp.orig @@ -0,0 +1,803 @@ +#include + +#include +#include +#include + +#include "caffe/net.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/solver.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/upgrade_proto.hpp" + +namespace caffe { + +template +Solver::Solver(const SolverParameter& param) + : net_() { + Init(param); +} + +template +Solver::Solver(const string& param_file) + : net_() { + SolverParameter param; + ReadProtoFromTextFileOrDie(param_file, ¶m); + Init(param); +} + +template +void Solver::Init(const SolverParameter& param) { + LOG(INFO) << "Initializing solver from parameters: " << std::endl + << param.DebugString(); + param_ = param; + CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative."; + if (param_.random_seed() >= 0) { + Caffe::set_random_seed(param_.random_seed()); + } + // Scaffolding code + InitTrainNet(); + InitTestNets(); + LOG(INFO) << "Solver scaffolding done."; + iter_ = 0; + current_step_ = 0; +} + +template +void Solver::InitTrainNet() { + const int num_train_nets = param_.has_net() + param_.has_net_param() + + param_.has_train_net() + param_.has_train_net_param(); + const string& field_names = "net, net_param, train_net, train_net_param"; + CHECK_GE(num_train_nets, 1) << "SolverParameter must specify a train net " + << "using one of these fields: " << field_names; + CHECK_LE(num_train_nets, 1) << "SolverParameter must not contain more than " + << "one of these fields specifying a train_net: " << field_names; + NetParameter net_param; + if (param_.has_train_net_param()) { + LOG(INFO) << "Creating training net specified in train_net_param."; + net_param.CopyFrom(param_.train_net_param()); + } else if (param_.has_train_net()) { + LOG(INFO) << "Creating training net from train_net file: " + << param_.train_net(); + ReadNetParamsFromTextFileOrDie(param_.train_net(), &net_param); + } + if (param_.has_net_param()) { + LOG(INFO) << "Creating training net specified in net_param."; + net_param.CopyFrom(param_.net_param()); + } + if (param_.has_net()) { + LOG(INFO) << "Creating training net from net file: " << param_.net(); + ReadNetParamsFromTextFileOrDie(param_.net(), &net_param); + } + // Set the correct NetState. We start with the solver defaults (lowest + // precedence); then, merge in any NetState specified by the net_param itself; + // finally, merge in any NetState specified by the train_state (highest + // precedence). + NetState net_state; + net_state.set_phase(TRAIN); + net_state.MergeFrom(net_param.state()); + net_state.MergeFrom(param_.train_state()); + net_param.mutable_state()->CopyFrom(net_state); + net_.reset(new Net(net_param)); +} + +template +void Solver::InitTestNets() { + const bool has_net_param = param_.has_net_param(); + const bool has_net_file = param_.has_net(); + const int num_generic_nets = has_net_param + has_net_file; + CHECK_LE(num_generic_nets, 1) + << "Both net_param and net_file may not be specified."; + const int num_test_net_params = param_.test_net_param_size(); + const int num_test_net_files = param_.test_net_size(); + const int num_test_nets = num_test_net_params + num_test_net_files; + if (num_generic_nets) { + CHECK_GE(param_.test_iter_size(), num_test_nets) + << "test_iter must be specified for each test network."; + } else { + CHECK_EQ(param_.test_iter_size(), num_test_nets) + << "test_iter must be specified for each test network."; + } + // If we have a generic net (specified by net or net_param, rather than + // test_net or test_net_param), we may have an unlimited number of actual + // test networks -- the actual number is given by the number of remaining + // test_iters after any test nets specified by test_net_param and/or test_net + // are evaluated. + const int num_generic_net_instances = param_.test_iter_size() - num_test_nets; + const int num_test_net_instances = num_test_nets + num_generic_net_instances; + if (param_.test_state_size()) { + CHECK_EQ(param_.test_state_size(), num_test_net_instances) + << "test_state must be unspecified or specified once per test net."; + } + if (num_test_net_instances) { + CHECK_GT(param_.test_interval(), 0); + } + int test_net_id = 0; + vector sources(num_test_net_instances); + vector net_params(num_test_net_instances); + for (int i = 0; i < num_test_net_params; ++i, ++test_net_id) { + sources[test_net_id] = "test_net_param"; + net_params[test_net_id].CopyFrom(param_.test_net_param(i)); + } + for (int i = 0; i < num_test_net_files; ++i, ++test_net_id) { + sources[test_net_id] = "test_net file: " + param_.test_net(i); + ReadNetParamsFromTextFileOrDie(param_.test_net(i), + &net_params[test_net_id]); + } + const int remaining_test_nets = param_.test_iter_size() - test_net_id; + if (has_net_param) { + for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) { + sources[test_net_id] = "net_param"; + net_params[test_net_id].CopyFrom(param_.net_param()); + } + } + if (has_net_file) { + for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) { + sources[test_net_id] = "net file: " + param_.net(); + ReadNetParamsFromTextFileOrDie(param_.net(), &net_params[test_net_id]); + } + } + test_nets_.resize(num_test_net_instances); + for (int i = 0; i < num_test_net_instances; ++i) { + // Set the correct NetState. We start with the solver defaults (lowest + // precedence); then, merge in any NetState specified by the net_param + // itself; finally, merge in any NetState specified by the test_state + // (highest precedence). + NetState net_state; + net_state.set_phase(TEST); + net_state.MergeFrom(net_params[i].state()); + if (param_.test_state_size()) { + net_state.MergeFrom(param_.test_state(i)); + } + net_params[i].mutable_state()->CopyFrom(net_state); + LOG(INFO) + << "Creating test net (#" << i << ") specified by " << sources[i]; + test_nets_[i].reset(new Net(net_params[i])); + test_nets_[i]->set_debug_info(param_.debug_info()); + } +} + +template +void Solver::Step(int iters) { + vector*> bottom_vec; + const int start_iter = iter_; + const int stop_iter = iter_ + iters; + int average_loss = this->param_.average_loss(); + vector losses; + Dtype smoothed_loss = 0; + + while (iter_ < stop_iter) { + // zero-init the params + for (int i = 0; i < net_->params().size(); ++i) { + shared_ptr > blob = net_->params()[i]; + switch (Caffe::mode()) { + case Caffe::CPU: + caffe_set(blob->count(), static_cast(0), + blob->mutable_cpu_diff()); + break; + case Caffe::GPU: +#ifndef CPU_ONLY + caffe_gpu_set(blob->count(), static_cast(0), + blob->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + } + + if (param_.test_interval() && iter_ % param_.test_interval() == 0 + && (iter_ > 0 || param_.test_initialization())) { + TestAll(); + } + + const bool display = param_.display() && iter_ % param_.display() == 0; + net_->set_debug_info(display && param_.debug_info()); + // accumulate the loss and gradient + Dtype loss = 0; + for (int i = 0; i < param_.iter_size(); ++i) { + loss += net_->ForwardBackward(bottom_vec); + } + loss /= param_.iter_size(); + // average the loss across iterations for smoothed reporting + if (losses.size() < average_loss) { + losses.push_back(loss); + int size = losses.size(); + smoothed_loss = (smoothed_loss * (size - 1) + loss) / size; + } else { + int idx = (iter_ - start_iter) % average_loss; + smoothed_loss += (loss - losses[idx]) / average_loss; + losses[idx] = loss; + } + if (display) { + LOG(INFO) << "Iteration " << iter_ << ", loss = " << smoothed_loss; + const vector*>& result = net_->output_blobs(); + int score_index = 0; + for (int j = 0; j < result.size(); ++j) { + const Dtype* result_vec = result[j]->cpu_data(); + const string& output_name = + net_->blob_names()[net_->output_blob_indices()[j]]; + const Dtype loss_weight = + net_->blob_loss_weights()[net_->output_blob_indices()[j]]; + for (int k = 0; k < result[j]->count(); ++k) { + ostringstream loss_msg_stream; + if (loss_weight) { + loss_msg_stream << " (* " << loss_weight + << " = " << loss_weight * result_vec[k] << " loss)"; + } + LOG(INFO) << " Train net output #" + << score_index++ << ": " << output_name << " = " + << result_vec[k] << loss_msg_stream.str(); + } + } + } + ApplyUpdate(); + + // Increment the internal iter_ counter -- its value should always indicate + // the number of times the weights have been updated. + ++iter_; + + // Save a snapshot if needed. + if (param_.snapshot() && iter_ % param_.snapshot() == 0) { + Snapshot(); + } + } +} + +template +void Solver::Solve(const char* resume_file) { + LOG(INFO) << "Solving " << net_->name(); + LOG(INFO) << "Learning Rate Policy: " << param_.lr_policy(); + + if (resume_file) { + LOG(INFO) << "Restoring previous solver status from " << resume_file; + Restore(resume_file); + } + + // For a network that is trained by the solver, no bottom or top vecs + // should be given, and we will just provide dummy vecs. + Step(param_.max_iter() - iter_); + // If we haven't already, save a snapshot after optimization, unless + // overridden by setting snapshot_after_train := false + if (param_.snapshot_after_train() + && (!param_.snapshot() || iter_ % param_.snapshot() != 0)) { + Snapshot(); + } + // After the optimization is done, run an additional train and test pass to + // display the train and test loss/outputs if appropriate (based on the + // display and test_interval settings, respectively). Unlike in the rest of + // training, for the train net we only run a forward pass as we've already + // updated the parameters "max_iter" times -- this final pass is only done to + // display the loss, which is computed in the forward pass. + if (param_.display() && iter_ % param_.display() == 0) { + Dtype loss; + net_->ForwardPrefilled(&loss); + LOG(INFO) << "Iteration " << iter_ << ", loss = " << loss; + } + if (param_.test_interval() && iter_ % param_.test_interval() == 0) { + TestAll(); + } + LOG(INFO) << "Optimization Done."; +} + + +template +void Solver::TestAll() { + for (int test_net_id = 0; test_net_id < test_nets_.size(); ++test_net_id) { + Test(test_net_id); + } +} + +template +void Solver::Test(const int test_net_id) { + LOG(INFO) << "Iteration " << iter_ + << ", Testing net (#" << test_net_id << ")"; + CHECK_NOTNULL(test_nets_[test_net_id].get())-> + ShareTrainedLayersWith(net_.get()); + vector test_score; + vector test_score_output_id; + vector*> bottom_vec; + const shared_ptr >& test_net = test_nets_[test_net_id]; + Dtype loss = 0; + for (int i = 0; i < param_.test_iter(test_net_id); ++i) { + Dtype iter_loss; + const vector*>& result = + test_net->Forward(bottom_vec, &iter_loss); + if (param_.test_compute_loss()) { + loss += iter_loss; + } + if (i == 0) { + for (int j = 0; j < result.size(); ++j) { + const Dtype* result_vec = result[j]->cpu_data(); + for (int k = 0; k < result[j]->count(); ++k) { + test_score.push_back(result_vec[k]); + test_score_output_id.push_back(j); + } + } + } else { + int idx = 0; + for (int j = 0; j < result.size(); ++j) { + const Dtype* result_vec = result[j]->cpu_data(); + for (int k = 0; k < result[j]->count(); ++k) { + test_score[idx++] += result_vec[k]; + } + } + } + } + if (param_.test_compute_loss()) { + loss /= param_.test_iter(test_net_id); + LOG(INFO) << "Test loss: " << loss; + } + for (int i = 0; i < test_score.size(); ++i) { + const int output_blob_index = + test_net->output_blob_indices()[test_score_output_id[i]]; + const string& output_name = test_net->blob_names()[output_blob_index]; + const Dtype loss_weight = test_net->blob_loss_weights()[output_blob_index]; + ostringstream loss_msg_stream; + const Dtype mean_score = test_score[i] / param_.test_iter(test_net_id); + if (loss_weight) { + loss_msg_stream << " (* " << loss_weight + << " = " << loss_weight * mean_score << " loss)"; + } + LOG(INFO) << " Test net output #" << i << ": " << output_name << " = " + << mean_score << loss_msg_stream.str(); + } +} + +<<<<<<< e29f9656158cb307d3fb4a78c63aa2247c5ad57a +template +void Solver::Snapshot() { + CHECK(Caffe::root_solver()); + string model_filename; + switch (param_.snapshot_format()) { + case caffe::SolverParameter_SnapshotFormat_BINARYPROTO: + model_filename = SnapshotToBinaryProto(); + break; + case caffe::SolverParameter_SnapshotFormat_HDF5: + model_filename = SnapshotToHDF5(); + break; + default: + LOG(FATAL) << "Unsupported snapshot format."; + } + + SnapshotSolverState(model_filename); +} +======= +>>>>>>> macro define in upgrade_proto + +template +void Solver::Snapshot() { + NetParameter net_param; + // For intermediate results, we will also dump the gradient values. + net_->ToProto(&net_param, param_.snapshot_diff()); + string filename(param_.snapshot_prefix()); + string model_filename, snapshot_filename; + const int kBufferSize = 20; + char iter_str_buffer[kBufferSize]; + snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); + filename += iter_str_buffer; + model_filename = filename + ".caffemodel"; + LOG(INFO) << "Snapshotting to " << model_filename; + WriteProtoToBinaryFile(net_param, model_filename.c_str()); + SolverState state; + SnapshotSolverState(&state); + state.set_iter(iter_); + state.set_learned_net(model_filename); + state.set_current_step(current_step_); + snapshot_filename = filename + ".solverstate"; + LOG(INFO) << "Snapshotting solver state to " << snapshot_filename; + WriteProtoToBinaryFile(state, snapshot_filename.c_str()); +} + +template +void Solver::Restore(const char* state_file) { + SolverState state; + NetParameter net_param; + ReadProtoFromBinaryFile(state_file, &state); + if (state.has_learned_net()) { + ReadNetParamsFromBinaryFileOrDie(state.learned_net().c_str(), &net_param); + net_->CopyTrainedLayersFrom(net_param); + } + iter_ = state.iter(); + current_step_ = state.current_step(); + RestoreSolverState(state); +} + + +// Return the current learning rate. The currently implemented learning rate +// policies are as follows: +// - fixed: always return base_lr. +// - step: return base_lr * gamma ^ (floor(iter / step)) +// - exp: return base_lr * gamma ^ iter +// - inv: return base_lr * (1 + gamma * iter) ^ (- power) +// - multistep: similar to step but it allows non uniform steps defined by +// stepvalue +// - poly: the effective learning rate follows a polynomial decay, to be +// zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) +// - sigmoid: the effective learning rate follows a sigmod decay +// return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) +// +// where base_lr, max_iter, gamma, step, stepvalue and power are defined +// in the solver parameter protocol buffer, and iter is the current iteration. +template +Dtype SGDSolver::GetLearningRate() { + Dtype rate; + const string& lr_policy = this->param_.lr_policy(); + if (lr_policy == "fixed") { + rate = this->param_.base_lr(); + } else if (lr_policy == "step") { + this->current_step_ = this->iter_ / this->param_.stepsize(); + rate = this->param_.base_lr() * + pow(this->param_.gamma(), this->current_step_); + } else if (lr_policy == "exp") { + rate = this->param_.base_lr() * pow(this->param_.gamma(), this->iter_); + } else if (lr_policy == "inv") { + rate = this->param_.base_lr() * + pow(Dtype(1) + this->param_.gamma() * this->iter_, + - this->param_.power()); + } else if (lr_policy == "multistep") { + if (this->current_step_ < this->param_.stepvalue_size() && + this->iter_ >= this->param_.stepvalue(this->current_step_)) { + this->current_step_++; + LOG(INFO) << "MultiStep Status: Iteration " << + this->iter_ << ", step = " << this->current_step_; + } + rate = this->param_.base_lr() * + pow(this->param_.gamma(), this->current_step_); + } else if (lr_policy == "poly") { + rate = this->param_.base_lr() * pow(Dtype(1.) - + (Dtype(this->iter_) / Dtype(this->param_.max_iter())), + this->param_.power()); + } else if (lr_policy == "sigmoid") { + rate = this->param_.base_lr() * (Dtype(1.) / + (Dtype(1.) + exp(-this->param_.gamma() * (Dtype(this->iter_) - + Dtype(this->param_.stepsize()))))); + } else { + LOG(FATAL) << "Unknown learning rate policy: " << lr_policy; + } + return rate; +} + +template +void SGDSolver::PreSolve() { + // Initialize the history + const vector > >& net_params = this->net_->params(); + history_.clear(); + update_.clear(); + temp_.clear(); + for (int i = 0; i < net_params.size(); ++i) { + const vector& shape = net_params[i]->shape(); + history_.push_back(shared_ptr >(new Blob(shape))); + update_.push_back(shared_ptr >(new Blob(shape))); + temp_.push_back(shared_ptr >(new Blob(shape))); + } +} + +template +void SGDSolver::ClipGradients() { + const Dtype clip_gradients = this->param_.clip_gradients(); + if (clip_gradients < 0) { return; } + const vector > >& net_params = this->net_->params(); + Dtype sumsq_diff = 0; + for (int i = 0; i < net_params.size(); ++i) { + if (this->net_->param_owners()[i] < 0) { + sumsq_diff += net_params[i]->sumsq_diff(); + } + } + const Dtype l2norm_diff = std::sqrt(sumsq_diff); + if (l2norm_diff > clip_gradients) { + Dtype scale_factor = clip_gradients / l2norm_diff; + LOG(INFO) << "Gradient clipping: scaling down gradients (L2 norm " + << l2norm_diff << " > " << clip_gradients << ") " + << "by scale factor " << scale_factor; + for (int i = 0; i < net_params.size(); ++i) { + if (this->net_->param_owners()[i] < 0) { + net_params[i]->scale_diff(scale_factor); + } + } + } +} + +template +void SGDSolver::ApplyUpdate() { + Dtype rate = GetLearningRate(); + if (this->param_.display() && this->iter_ % this->param_.display() == 0) { + LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; + } + ClipGradients(); + for (int param_id = 0; param_id < this->net_->params().size(); ++param_id) { + Normalize(param_id); + Regularize(param_id); + ComputeUpdateValue(param_id, rate); + } + this->net_->Update(); +} + +template +void SGDSolver::Normalize(int param_id) { + if (this->param_.iter_size() == 1) { return; } + // Scale gradient to counterbalance accumulation. + const vector > >& net_params = this->net_->params(); + const Dtype accum_normalization = Dtype(1.) / this->param_.iter_size(); + switch (Caffe::mode()) { + case Caffe::CPU: { + caffe_scal(net_params[param_id]->count(), accum_normalization, + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + caffe_gpu_scal(net_params[param_id]->count(), accum_normalization, + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void SGDSolver::Regularize(int param_id) { + const vector > >& net_params = this->net_->params(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); + Dtype weight_decay = this->param_.weight_decay(); + string regularization_type = this->param_.regularization_type(); + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + switch (Caffe::mode()) { + case Caffe::CPU: { + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector > >& net_params = this->net_->params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype momentum = this->param_.momentum(); + Dtype local_rate = rate * net_params_lr[param_id]; + // Compute the update to history, then copy it to the parameter diff. + switch (Caffe::mode()) { + case Caffe::CPU: { + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + history_[param_id]->mutable_cpu_data()); + caffe_copy(net_params[param_id]->count(), + history_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + history_[param_id]->mutable_gpu_data()); + caffe_copy(net_params[param_id]->count(), + history_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void SGDSolver::SnapshotSolverState(SolverState* state) { + state->clear_history(); + for (int i = 0; i < history_.size(); ++i) { + // Add history + BlobProto* history_blob = state->add_history(); + history_[i]->ToProto(history_blob); + } +} + +template +void SGDSolver::RestoreSolverState(const SolverState& state) { + CHECK_EQ(state.history_size(), history_.size()) + << "Incorrect length of history blobs."; + LOG(INFO) << "SGDSolver: restoring history"; + for (int i = 0; i < history_.size(); ++i) { + history_[i]->FromProto(state.history(i)); + } +} + +template +void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector > >& net_params = this->net_->params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype momentum = this->param_.momentum(); + Dtype local_rate = rate * net_params_lr[param_id]; + switch (Caffe::mode()) { + case Caffe::CPU: { + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + this->history_[param_id]->mutable_cpu_data()); + + // compute update: step back then over step + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->cpu_data(), -momentum, + this->update_[param_id]->mutable_cpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + this->history_[param_id]->mutable_gpu_data()); + + // compute update: step back then over step + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->gpu_data(), -momentum, + this->update_[param_id]->mutable_gpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector > >& net_params = this->net_->params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype delta = this->param_.delta(); + Dtype local_rate = rate * net_params_lr[param_id]; + switch (Caffe::mode()) { + case Caffe::CPU: { + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_add(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + this->history_[param_id]->cpu_data(), + this->history_[param_id]->mutable_cpu_data()); + + // prepare update + caffe_powx(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_cpu_data()); + + caffe_div(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), + this->update_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // scale and copy + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->cpu_data(), Dtype(0), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_add(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + this->history_[param_id]->gpu_data(), + this->history_[param_id]->mutable_gpu_data()); + + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // scale and copy + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +INSTANTIATE_CLASS(Solver); +INSTANTIATE_CLASS(SGDSolver); +INSTANTIATE_CLASS(NesterovSolver); +INSTANTIATE_CLASS(AdaGradSolver); + +} // namespace caffe diff --git a/src/caffe/test/test_im2col_kernel.cu.orig b/src/caffe/test/test_im2col_kernel.cu.orig new file mode 100644 index 00000000000..e26f4af50d1 --- /dev/null +++ b/src/caffe/test/test_im2col_kernel.cu.orig @@ -0,0 +1,210 @@ +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/util/im2col.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +// Forward declare kernel functions +template +__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int height_col, const int width_col, + Dtype* data_col); + +template +__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im, + const int* im_shape, const int* col_shape, + const int* kernel_shape, const int* pad, const int* stride, + Dtype* data_col); + +extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; + +template +class Im2colKernelTest : public ::testing::Test { + protected: + Im2colKernelTest() + // big so launches > 1024 threads + : blob_bottom_(new Blob(5, 500, 10, 10)), + blob_kernel_shape_(new Blob()), + blob_stride_(new Blob()), + blob_pad_(new Blob()), + blob_top_(new Blob()), + blob_top_cpu_(new Blob()) { + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + vector dim_blob_shape(1, 2); + blob_kernel_shape_->Reshape(dim_blob_shape); + blob_stride_->Reshape(dim_blob_shape); + blob_pad_->Reshape(dim_blob_shape); + + height_ = blob_bottom_->height(); + width_ = blob_bottom_->width(); + channels_ = blob_bottom_->channels(); + pad_ = 0; + stride_ = 2; + kernel_size_ = 3; + height_col_ = (height_ + 2 * pad_ - kernel_size_) / stride_ + 1; + width_col_ = (width_ + 2 * pad_ - kernel_size_) / stride_ + 1; + + for (int i = 0; i < 2; ++i) { + blob_kernel_shape_->mutable_cpu_data()[i] = kernel_size_; + blob_stride_->mutable_cpu_data()[i] = stride_; + blob_pad_->mutable_cpu_data()[i] = pad_; + } + } + + virtual ~Im2colKernelTest() { + delete blob_bottom_; + delete blob_top_; + delete blob_top_cpu_; + delete blob_kernel_shape_; + delete blob_stride_; + delete blob_pad_; + } + + Blob* const blob_kernel_shape_; + Blob* const blob_stride_; + Blob* const blob_pad_; + Blob* const blob_bottom_; + Blob* const blob_top_; + Blob* const blob_top_cpu_; + int height_; + int width_; + int channels_; + int pad_; + int stride_; + int kernel_size_; + int height_col_; + int width_col_; +}; + +TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); + +<<<<<<< 6e71b00a8a0000eeb969029ee5665674f8fd7802 +TYPED_TEST(Im2colKernelTest, Test2D) { +======= +TYPED_TEST(Im2colKernelTest, TestGPU) { + Caffe::set_mode(Caffe::GPU); + +>>>>>>> New triplet loss layer added(beta1 version-no test source files) + // Reshape the blobs to correct size for im2col output + this->blob_top_->Reshape(this->blob_bottom_->num(), + this->channels_ * this->kernel_size_ * this->kernel_size_, + this->height_col_, + this->width_col_); + + this->blob_top_cpu_->Reshape(this->blob_bottom_->num(), + this->channels_ * this->kernel_size_ * this->kernel_size_, + this->height_col_, + this->width_col_); + + const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); + TypeParam* top_data = this->blob_top_->mutable_gpu_data(); + TypeParam* cpu_data = this->blob_top_cpu_->mutable_cpu_data(); + + // CPU Version + for (int n = 0; n < this->blob_bottom_->num(); ++n) { + im2col_cpu(this->blob_bottom_->cpu_data() + this->blob_bottom_->offset(n), + this->channels_, this->height_, this->width_, + this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, + this->stride_, this->stride_, + cpu_data + this->blob_top_cpu_->offset(n)); + } + + // GPU version + int num_kernels = this->channels_ * this->height_col_ * this->width_col_; + int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); + + // Launch with different grid sizes + for (int grid_div = 2; grid_div <= 8; grid_div++) { + for (int n = 0; n < this->blob_bottom_->num(); ++n) { + int grid_dim = default_grid_dim/grid_div; + // NOLINT_NEXT_LINE(whitespace/operators) + im2col_gpu_kernel<<>>( + num_kernels, bottom_data + this->blob_bottom_->offset(n), + this->height_, this->width_, this->kernel_size_, this->kernel_size_, + this->pad_, this->pad_, this->stride_, this->stride_, + this->height_col_, this->width_col_, + top_data + this->blob_top_->offset(n)); + CUDA_POST_KERNEL_CHECK; + } + + // Compare results against CPU version + for (int i = 0; i < this->blob_top_->count(); ++i) { + TypeParam cpuval = cpu_data[i]; + TypeParam gpuval = this->blob_top_->cpu_data()[i]; + EXPECT_EQ(cpuval, gpuval); + if (cpuval != gpuval) { + break; + } + } + } +} + +TYPED_TEST(Im2colKernelTest, TestND) { + // Reshape the blobs to correct size for im2col output + this->blob_top_->Reshape(this->blob_bottom_->num(), + this->channels_ * this->kernel_size_ * this->kernel_size_, + this->height_col_, + this->width_col_); + + this->blob_top_cpu_->ReshapeLike(*this->blob_top_); + + const TypeParam* bottom_data_cpu = this->blob_bottom_->cpu_data(); + TypeParam* top_data_cpu = this->blob_top_cpu_->mutable_cpu_data(); + + // CPU Version + for (int n = 0; n < this->blob_bottom_->num(); ++n) { + im2col_nd_cpu(bottom_data_cpu + this->blob_bottom_->offset(n), 2, + this->blob_bottom_->shape().data() + 1, + this->blob_top_cpu_->shape().data() + 1, + this->blob_kernel_shape_->cpu_data(), + this->blob_pad_->cpu_data(), this->blob_stride_->cpu_data(), + top_data_cpu + this->blob_top_cpu_->offset(n)); + } + + // GPU version + int num_kernels = this->channels_ * this->height_col_ * this->width_col_; + int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); + const TypeParam* bottom_data_gpu = this->blob_bottom_->gpu_data(); + + // Launch with different grid sizes + for (int grid_div = 2; grid_div <= 8; grid_div++) { + for (int n = 0; n < this->blob_bottom_->num(); ++n) { + const int grid_dim = default_grid_dim / grid_div; + TypeParam* top_data_gpu = this->blob_top_->mutable_gpu_data(); + // NOLINT_NEXT_LINE(whitespace/operators) + im2col_nd_gpu_kernel<<>>( + num_kernels, bottom_data_gpu + this->blob_bottom_->offset(n), + this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1, + this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(), + this->blob_stride_->gpu_data(), + top_data_gpu + this->blob_top_->offset(n)); + CUDA_POST_KERNEL_CHECK; + } + + // Compare results against CPU version + for (int i = 0; i < this->blob_top_->count(); ++i) { + TypeParam cpuval = top_data_cpu[i]; + TypeParam gpuval = this->blob_top_->cpu_data()[i]; + EXPECT_EQ(cpuval, gpuval); + if (cpuval != gpuval) { + break; + } + } + } +} + +} // namespace caffe From a0204f0ef3f8997911ff4603c793b2b2ccca1778 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Wed, 12 Aug 2015 12:50:56 +0800 Subject: [PATCH 67/82] debug GPU triplet loss codes for loss type 0 closest sample fix --- examples/triplet/convert_3d_triplet_data.cpp | 2 +- src/caffe/layers/triplet_loss_layer.cu.orig | 168 ++++--------------- 2 files changed, 33 insertions(+), 137 deletions(-) diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp index ce1981d90da..aa61a164fa9 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp +++ b/examples/triplet/convert_3d_triplet_data.cpp @@ -132,7 +132,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, float dist_ij = std::sqrt(ij_x + ij_y + ij_z); float dist_im = std::sqrt(im_x + im_y + im_z); - if (*label_i == *label_j && dist_ij < 100/2) + if (*label_i == *label_j && dist_ij < 100/2 && dist_ij != 0) pair_pass = true; if (pair_pass && (*label_i != *label_k)) triplet1_pass = true; diff --git a/src/caffe/layers/triplet_loss_layer.cu.orig b/src/caffe/layers/triplet_loss_layer.cu.orig index 1415944a03e..717e0b87e8c 100644 --- a/src/caffe/layers/triplet_loss_layer.cu.orig +++ b/src/caffe/layers/triplet_loss_layer.cu.orig @@ -30,7 +30,11 @@ void TripletLossLayer::Forward_gpu( dim, diff_pos.gpu_data() + i*dim, diff_pos.gpu_data() + i*dim, +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_pos.mutable_cpu_data() + i); +======= + dist_sq_pos.mutable_gpu_data() + i); +>>>>>>> debug GPU triplet loss codes for loss type 0 // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.gpu_data()[i]; @@ -44,29 +48,15 @@ void TripletLossLayer::Forward_gpu( bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + i*dim); // reference-negative -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 caffe_gpu_dot( dim, diff_neg.gpu_data() + i*dim, diff_neg.gpu_data() + i*dim, +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + i); ======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + i*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + i); // \Sum(reference-pose_close)^2 ->>>>>>> add initiate class name of triplet loss layer + dist_sq_neg.mutable_gpu_data() + i); +>>>>>>> debug GPU triplet loss codes for loss type 0 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; // loss accumulated accumulated by the triplet part @@ -87,7 +77,11 @@ void TripletLossLayer::Forward_gpu( dim, diff_pos.gpu_data() + i*dim, diff_pos.gpu_data() + i*dim, +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_pos.mutable_cpu_data() + i); +======= + dist_sq_pos.mutable_gpu_data() + i); +>>>>>>> debug GPU triplet loss codes for loss type 0 // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.gpu_data()[i]; @@ -100,29 +94,15 @@ void TripletLossLayer::Forward_gpu( bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + i*dim); // reference-negative -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 caffe_gpu_dot( dim, diff_neg.gpu_data() + i*dim, diff_neg.gpu_data() + i*dim, +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + i); ======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + i*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + i); // \Sum(reference-pose_close)^2 ->>>>>>> add initiate class name of triplet loss layer + dist_sq_neg.mutable_gpu_data() + i); +>>>>>>> debug GPU triplet loss codes for loss type 0 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] = 1 - \ dist_sq_neg.gpu_data()[i] / dist_sq_.mutable_gpu_data()[i]; @@ -166,29 +146,15 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); ======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum(ref-close)^2 ->>>>>>> add initiate class name of triplet loss layer + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> debug GPU triplet loss codes for loss type 0 // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; @@ -240,29 +206,15 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); ======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (ref-close)^2 ->>>>>>> add initiate class name of triplet loss layer + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> debug GPU triplet loss codes for loss type 0 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -294,29 +246,15 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); ======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (ref-close)^2 ->>>>>>> add initiate class name of triplet loss layer + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> debug GPU triplet loss codes for loss type 0 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -359,29 +297,15 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); ======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (ref-close)^2 ->>>>>>> add initiate class name of triplet loss layer + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> debug GPU triplet loss codes for loss type 0 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -431,29 +355,15 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); ======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (ref-close)^2 ->>>>>>> add initiate class name of triplet loss layer + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> debug GPU triplet loss codes for loss type 0 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -488,29 +398,15 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); ======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (ref-close)^2 ->>>>>>> add initiate class name of triplet loss layer + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> debug GPU triplet loss codes for loss type 0 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; From 6af90386551ae96a1468f15dc0bfc8ac566ea6d9 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Sun, 16 Aug 2015 21:43:45 +0800 Subject: [PATCH 68/82] add RGB data training as an option in triplet training format fix --- .../layers/triplet_loss_layer.cu.orig.orig | 464 ++++++++++++++++++ 1 file changed, 464 insertions(+) create mode 100644 src/caffe/layers/triplet_loss_layer.cu.orig.orig diff --git a/src/caffe/layers/triplet_loss_layer.cu.orig.orig b/src/caffe/layers/triplet_loss_layer.cu.orig.orig new file mode 100644 index 00000000000..d893949e5fc --- /dev/null +++ b/src/caffe/layers/triplet_loss_layer.cu.orig.orig @@ -0,0 +1,464 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +void TripletLossLayer::Forward_gpu( + const vector*>& bottom, + const vector*>& top) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); + Dtype loss(0.0); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); + if (losstype == 0) { + for (int i = 0; i < num_set; ++i) { + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close + caffe_gpu_dot( + dim, + diff_pos.gpu_data() + i*dim, + diff_pos.gpu_data() + i*dim, +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_pos.mutable_cpu_data() + i); +======= + dist_sq_pos.mutable_gpu_data() + i); +>>>>>>> debug GPU triplet loss codes for loss type 0 + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.gpu_data()[i]; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; + // Loss component calculated from negative part + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + i*dim); // reference-negative +<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 +======= +>>>>>>> debug GPU triplet loss codes for loss type 0 + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + i*dim, + diff_neg.gpu_data() + i*dim, +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_neg.mutable_cpu_data() + i); +======= + dist_sq_neg.mutable_gpu_data() + i); +>>>>>>> debug GPU triplet loss codes for loss type 0 + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(margin + dist_sq_.gpu_data()[i], Dtype(0.0)); + } + } + loss = loss / static_cast(num_set) / Dtype(2); + top[0]->mutable_gpu_data()[0] = loss; + } else { + for (int i = 0; i < num_set; ++i) { + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close + // Loss component calculated from reference and close one + caffe_gpu_dot( + dim, + diff_pos.gpu_data() + i*dim, + diff_pos.gpu_data() + i*dim, +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_pos.mutable_cpu_data() + i); +======= + dist_sq_pos.mutable_gpu_data() + i); +>>>>>>> debug GPU triplet loss codes for loss type 0 + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.gpu_data()[i]; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.mutable_gpu_data()[i]; + dist_sq_.mutable_gpu_data()[i] += margin; + // Loss component calculated from negative part + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + i*dim); // reference-negative +<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 +======= +>>>>>>> debug GPU triplet loss codes for loss type 0 + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + i*dim, + diff_neg.gpu_data() + i*dim, +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_neg.mutable_cpu_data() + i); +======= + dist_sq_neg.mutable_gpu_data() + i); +>>>>>>> debug GPU triplet loss codes for loss type 0 + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[i] = 1 - \ + dist_sq_neg.gpu_data()[i] / dist_sq_.mutable_gpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(dist_sq_.gpu_data()[i], Dtype(0.0)); + } + } + loss = loss / static_cast(num_set) / Dtype(2); + top[0]->mutable_gpu_data()[0] = loss; + } +} + +template +void TripletLossLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); + if (losstype == 0) { + // BP for feat1(extracted from reference) + for (int i = 0; i < 1; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + // the pair part + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative +<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 +======= +>>>>>>> debug GPU triplet loss codes for loss type 0 + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_neg.mutable_cpu_data() + j); +======= + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> debug GPU triplet loss codes for loss type 0 + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; + // Loss component calculated from negative part + if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(1.0), + bout + (2 + num_triplets)*j*dim); + // dissimilar pair in triplet + caffe_gpu_axpby( + dim, + -alpha, + diff_neg.gpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for feat2(extracted from the closest sample) + for (int i = 1; i < 2; ++i) { + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + // the pair part + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; +<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 +======= +>>>>>>> debug GPU triplet loss codes for loss type 0 + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_neg.mutable_cpu_data() + j); +======= + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> debug GPU triplet loss codes for loss type 0 + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; + if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for negative feature used in the num_triplets triplet part + for (int i = 2; i < 2 + num_triplets; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; +<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 +======= +>>>>>>> debug GPU triplet loss codes for loss type 0 + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_neg.mutable_cpu_data() + j); +======= + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> debug GPU triplet loss codes for loss type 0 + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; + if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + // dissimilar pairs + caffe_gpu_axpby( + dim, + alpha, + diff_neg.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } else { + for (int i = 0; i < 1; ++i) { + // BP for data1(feat1) + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + // the pair part + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; + dist_sq_.mutable_gpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative +<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 +======= +>>>>>>> debug GPU triplet loss codes for loss type 0 + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_neg.mutable_cpu_data() + j); +======= + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> debug GPU triplet loss codes for loss type 0 + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] = 1 - \ + dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + caffe_gpu_axpby( + dim, + alpha*dist_sq_neg.mutable_gpu_data()[j]\ + /((dist_sq_pos.mutable_gpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_gpu_data()[j]+margin)), + diff_pos.gpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + caffe_gpu_axpby( + dim, + -alpha/(dist_sq_pos.mutable_gpu_data()[j] + margin), + diff_neg.gpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + for (int i = 1; i < 2; ++i) { + // BP for positive data(feat2) + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + // the pair part + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; + dist_sq_.mutable_gpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative +<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 +======= +>>>>>>> debug GPU triplet loss codes for loss type 0 + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_neg.mutable_cpu_data() + j); +======= + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> debug GPU triplet loss codes for loss type 0 + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] = 1 - \ + dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + caffe_gpu_axpby( + dim, + alpha*dist_sq_neg.mutable_gpu_data()[j]\ + /((dist_sq_pos.mutable_gpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_gpu_data()[j]+margin)), + diff_pos.gpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + for (int i = 2; i < 2 + num_triplets; ++i) { + // BP for negative data(feat3) + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; + dist_sq_.mutable_gpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative +<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 +======= +>>>>>>> debug GPU triplet loss codes for loss type 0 + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_neg.mutable_cpu_data() + j); +======= + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> debug GPU triplet loss codes for loss type 0 + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] = 1 - \ + dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + caffe_gpu_axpby( + dim, + alpha/(dist_sq_pos.mutable_gpu_data()[j] + margin), + diff_neg.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer); + +} // namespace caffe From e98d8e5be6db3053f7ee87e29eb1440bc7f48da5 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Thu, 4 Jun 2015 22:31:06 +0800 Subject: [PATCH 69/82] triplet data generation and network update restore --- .../triplet/convert_mnist_triplet_data.cpp | 128 ++ examples/triplet/create_mnist_triplet.sh | 21 + examples/triplet/mnist_triplet.prototxt | 113 ++ .../triplet/mnist_triplet_solver.prototxt | 25 + .../triplet/mnist_triplet_train_test.prototxt | 500 ++++++ examples/triplet/readme.md | 167 +- examples/triplet/readme.md.orig | 243 +++ examples/triplet/train_mnist_triplet.sh | 5 + include/caffe/data_layers.hpp.orig | 99 ++ include/caffe/data_layers.hpp.orig.orig | 359 ++++ include/caffe/filler.hpp.orig | 291 ++++ include/caffe/loss_layers.hpp | 70 + include/caffe/loss_layers.hpp.orig | 6 +- include/caffe/python_layer.hpp | 5 +- include/caffe/vision_layers.hpp.orig | 549 ++++++ src/caffe/data_transformer.cpp.orig | 34 + src/caffe/layers/concat_layer.cu.orig | 96 ++ .../layers/triplet_loss_layer.cpp.orig.orig | 459 +++++ src/caffe/net.cpp | 472 ++++-- src/caffe/proto/caffe.proto | 58 +- src/caffe/solver.cpp | 575 ++++++- src/caffe/solver.cpp.orig | 1053 +++++++++--- src/caffe/solver.cpp.orig.orig | 1484 +++++++++++++++++ .../test/test_data/generate_sample_data.py | 34 + src/caffe/test/test_gradient_based_solver.cpp | 674 +++++++- src/caffe/test/test_net.cpp.orig | 136 +- .../test/test_triplet_loss_layer.orig.orig | 317 ++++ ...r~5308d9998ae0b1f97b7b99b33fac968421447f3a | 125 ++ 28 files changed, 7549 insertions(+), 549 deletions(-) create mode 100644 examples/triplet/convert_mnist_triplet_data.cpp create mode 100755 examples/triplet/create_mnist_triplet.sh create mode 100644 examples/triplet/mnist_triplet.prototxt create mode 100644 examples/triplet/mnist_triplet_solver.prototxt create mode 100644 examples/triplet/mnist_triplet_train_test.prototxt create mode 100644 examples/triplet/readme.md.orig create mode 100755 examples/triplet/train_mnist_triplet.sh create mode 100644 include/caffe/data_layers.hpp.orig.orig create mode 100644 include/caffe/filler.hpp.orig create mode 100644 include/caffe/vision_layers.hpp.orig create mode 100644 src/caffe/layers/concat_layer.cu.orig create mode 100644 src/caffe/layers/triplet_loss_layer.cpp.orig.orig create mode 100644 src/caffe/solver.cpp.orig.orig create mode 100644 src/caffe/test/test_triplet_loss_layer.orig.orig create mode 100644 src/caffe/test/test_triplet_loss_layer~5308d9998ae0b1f97b7b99b33fac968421447f3a diff --git a/examples/triplet/convert_mnist_triplet_data.cpp b/examples/triplet/convert_mnist_triplet_data.cpp new file mode 100644 index 00000000000..0cbab642b7c --- /dev/null +++ b/examples/triplet/convert_mnist_triplet_data.cpp @@ -0,0 +1,128 @@ +// This script converts the MNIST dataset to the leveldb format used +// by caffe to train siamese network. +// Usage: +// convert_mnist_data input_image_file input_label_file output_db_file +// The MNIST dataset could be downloaded at +// http://yann.lecun.com/exdb/mnist/ +#include // NOLINT(readability/streams) +#include + +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "stdint.h" + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" + +uint32_t swap_endian(uint32_t val) { + val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); + return (val << 16) | (val >> 16); +} + +void read_image(std::ifstream* image_file, std::ifstream* label_file, + uint32_t index, uint32_t rows, uint32_t cols, + char* pixels, char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index + 8); + label_file->read(label, 1); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename) { + // Open files + std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); + std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); + CHECK(image_file) << "Unable to open file " << image_filename; + CHECK(label_file) << "Unable to open file " << label_filename; + // Read the magic and the meta data + uint32_t magic; + uint32_t num_items; + uint32_t num_labels; + uint32_t rows; + uint32_t cols; + + image_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + CHECK_EQ(magic, 2051) << "Incorrect image file magic."; + label_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + CHECK_EQ(magic, 2049) << "Incorrect label file magic."; + image_file.read(reinterpret_cast(&num_items), 4); + num_items = swap_endian(num_items); + label_file.read(reinterpret_cast(&num_labels), 4); + num_labels = swap_endian(num_labels); + CHECK_EQ(num_items, num_labels); + image_file.read(reinterpret_cast(&rows), 4); + rows = swap_endian(rows); + image_file.read(reinterpret_cast(&cols), 4); + cols = swap_endian(cols); + + // Open leveldb + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = true; + options.error_if_exists = true; + leveldb::Status status = leveldb::DB::Open( + options, db_filename, &db); + CHECK(status.ok()) << "Failed to open leveldb " << db_filename + << ". Is it already existing?"; + + char label_i; + char label_j; + char label_k; + char* pixels = new char[3 * rows * cols]; + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + + caffe::Datum datum; + datum.set_channels(3); // one channel for each image in the pair + datum.set_height(rows); + datum.set_width(cols); + LOG(INFO) << "A total of " << num_items << " items."; + LOG(INFO) << "Rows: " << rows << " Cols: " << cols; + for (int itemid = 0; itemid < num_items; ++itemid) { + int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups + int j = caffe::caffe_rng_rand() % num_items; + int k = caffe::caffe_rng_rand() % num_items; + read_image(&image_file, &label_file, i, rows, cols, + pixels, &label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels + (rows * cols), &label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels + (2 * rows * cols), &label_k); + + datum.set_data(pixels, 3*rows*cols); + if (label_i == label_j && label_i != label_k) { + datum.set_label(1); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); + } else { + itemid--; + datum.set_label(0); + } + } + + delete db; + delete pixels; +} + +int main(int argc, char** argv) { + if (argc != 4) { + printf("This script converts the MNIST dataset to the leveldb format used\n" + "by caffe to train a siamese network.\n" + "Usage:\n" + " convert_mnist_data input_image_file input_label_file " + "output_db_file\n" + "The MNIST dataset could be downloaded at\n" + " http://yann.lecun.com/exdb/mnist/\n" + "You should gunzip them after downloading.\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3]); + } + return 0; +} diff --git a/examples/triplet/create_mnist_triplet.sh b/examples/triplet/create_mnist_triplet.sh new file mode 100755 index 00000000000..f404f2aa255 --- /dev/null +++ b/examples/triplet/create_mnist_triplet.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env sh +# This script converts the mnist data into leveldb format. + +EXAMPLES=./build/examples/triplet +DATA=./data/mnist + +echo "Creating leveldb..." + +rm -rf ./examples/triplet/mnist_siamese_train_leveldb +rm -rf ./examples/triplet/mnist_siamese_test_leveldb + +$EXAMPLES/convert_mnist_triplet_data.bin \ + $DATA/train-images-idx3-ubyte \ + $DATA/train-labels-idx1-ubyte \ + ./examples/triplet/mnist_triplet_train_leveldb +$EXAMPLES/convert_mnist_triplet_data.bin \ + $DATA/t10k-images-idx3-ubyte \ + $DATA/t10k-labels-idx1-ubyte \ + ./examples/triplet/mnist_triplet_test_leveldb + +echo "Done." diff --git a/examples/triplet/mnist_triplet.prototxt b/examples/triplet/mnist_triplet.prototxt new file mode 100644 index 00000000000..0e903f85909 --- /dev/null +++ b/examples/triplet/mnist_triplet.prototxt @@ -0,0 +1,113 @@ +name: "mnist_siamese" +input: "data" +input_dim: 10000 +input_dim: 1 +input_dim: 28 +input_dim: 28 +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 500 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 10 + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 2 + } +} diff --git a/examples/triplet/mnist_triplet_solver.prototxt b/examples/triplet/mnist_triplet_solver.prototxt new file mode 100644 index 00000000000..edd8e1e0338 --- /dev/null +++ b/examples/triplet/mnist_triplet_solver.prototxt @@ -0,0 +1,25 @@ +# The train/test net protocol buffer definition +net: "examples/triplet/mnist_triplet_train_test.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of MNIST, we have test batch size 100 and 100 test iterations, +# covering the full 10,000 testing images. +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 500 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.001 +momentum: 0.9 +weight_decay: 0.0000 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +max_iter: 50000 +# snapshot intermediate results +snapshot: 5000 +snapshot_prefix: "examples/triplet/mnist_triplet" +# solver mode: CPU or GPU +solver_mode: CPU diff --git a/examples/triplet/mnist_triplet_train_test.prototxt b/examples/triplet/mnist_triplet_train_test.prototxt new file mode 100644 index 00000000000..da25dec31de --- /dev/null +++ b/examples/triplet/mnist_triplet_train_test.prototxt @@ -0,0 +1,500 @@ +name: "mnist_triplet_train_test" +layer { + name: "triplet_data" + type: "Data" + top: "triplet_data" + top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/mnist_triplet_train_leveldb" + batch_size: 64 + } +} +layer { + name: "triplet_data" + type: "Data" + top: "triplet_data" + top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/mnist_triplet_test_leveldb" + batch_size: 100 + } +} +layer { + name: "slice_triplet" + type: "Slice" + bottom: "triplet_data" + top: "data" + top: "data_true" + top: "data_false" + slice_param { + slice_dim: 1 + slice_point: 1 + slice_point: 2 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_true" + type: "Convolution" + bottom: "data_true" + top: "conv1_true" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_true" + type: "Pooling" + bottom: "conv1_true" + top: "pool1_true" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_true" + type: "Convolution" + bottom: "pool1_true" + top: "conv2_true" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_true" + type: "Pooling" + bottom: "conv2_true" + top: "pool2_true" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_true" + type: "InnerProduct" + bottom: "pool2_true" + top: "ip1_true" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_true" + type: "ReLU" + bottom: "ip1_true" + top: "ip1_true" +} +layer { + name: "ip2_true" + type: "InnerProduct" + bottom: "ip1_true" + top: "ip2_true" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_true" + type: "InnerProduct" + bottom: "ip2_true" + top: "feat_true" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_false" + type: "Convolution" + bottom: "data_false" + top: "conv1_false" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_false" + type: "Pooling" + bottom: "conv1_false" + top: "pool1_false" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_false" + type: "Convolution" + bottom: "pool1_false" + top: "conv2_false" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_false" + type: "Pooling" + bottom: "conv2_false" + top: "pool2_false" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_false" + type: "InnerProduct" + bottom: "pool2_false" + top: "ip1_false" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_false" + type: "ReLU" + bottom: "ip1_false" + top: "ip1_false" +} +layer { + name: "ip2_false" + type: "InnerProduct" + bottom: "ip1_false" + top: "ip2_false" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_false" + type: "InnerProduct" + bottom: "ip2_false" + top: "feat_false" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "loss" + type: "TripletLoss" + bottom: "feat" + bottom: "feat_true" + bottom: "feat_false" + bottom: "sim" + top: "loss" + triplet_loss_param { + margin: 1 + } +} + diff --git a/examples/triplet/readme.md b/examples/triplet/readme.md index 524718ce2db..6636808691a 100644 --- a/examples/triplet/readme.md +++ b/examples/triplet/readme.md @@ -1,6 +1,6 @@ --- title: Triplet Network Tutorial -description: Train and test a triplet network on MNIST data. +description: Train and test a triplet network on data generated by 3D model. category: example include_in_docs: true layout: default @@ -21,87 +21,92 @@ root caffe directory* ## Prepare Datasets -You will first need to download and convert the data from the MNIST -website. To do this, simply run the following commands: +You will first need to convert the data from the some .ply models using +opencv_contrib cnn_3donj module. After construcing the binary files including images and labels and put them in ./data/linemod folder, just run: - ./data/mnist/get_mnist.sh - ./examples/triplet/create_mnist_triplet.sh + ./examples/triplet/create_3d_triplet.sh After running the script there should be two datasets, -`./examples/triplet/mnist_triplet_train_leveldb`, and -`./examples/triplet/mnist_triplet_test_leveldb`. +`./examples/triplet/3d_triplet_train_leveldb`, and +`./examples/triplet/3d_triplet_test_leveldb`. ## The Model First, we will define the model that we want to train using the triplet network. We will use the convolutional net defined in -`./examples/triplet/mnist_triplet.prototxt`. This model is almost -exactly the same as the [LeNet model](mnist.html), the only difference is that -we have replaced the top layers that produced probabilities over the 10 digit -classes with a linear "feature" layer that produces a 2 dimensional vector. - - layers { - name: "feat" - type: INNER_PRODUCT - bottom: "ip2" - top: "feat" - blobs_lr: 1 - blobs_lr: 2 - inner_product_param { - num_output: 2 - } - } +`./examples/triplet/3d_triplet.prototxt`. + +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip1" + top: "feat" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 4 + } +} ## Define the triplet Network In this section we will define the triplet network used for training. The resulting network is defined in -`./examples/triplet/mnist_triplet_train_test.prototxt`. +`./examples/triplet/3d_triplet_train_test.prototxt`. -### Reading in the Pair Data +### Reading in the Triplet Data We start with a data layer that reads from the LevelDB database we created -earlier. Each entry in this database contains the image data for a pair of -images (`pair_data`) and a binary label saying if they belong to the same class -or different classes (`sim`). +earlier. Each entry in this database contains the image data for a triplet of +images (`triplet_data`) and the label (`sim`) is not nessesary in our method. layers { - name: "pair_data" + name: "triplet_data" type: DATA - top: "pair_data" + top: "triplet_data" top: "sim" data_param { - source: "examples/triplet/mnist-triplet-train-leveldb" + source: "examples/triplet/3d-triplet-train-leveldb" scale: 0.00390625 - batch_size: 64 + batch_size: 250 } include: { phase: TRAIN } } -In order to pack a pair of images into the same blob in the database we pack one -image per channel. We want to be able to work with these two images separately, -so we add a slice layer after the data layer. This takes the `pair_data` and +In order to pack a triplet of images into the same blob in the database we pack one +image per channel. We want to be able to work with these three images separately, +so we add a slice layer after the data layer. This takes the `triplet_data` and slices it along the channel dimension so that we have a single image in `data` -and its paired image in `data_p.` - - layers { - name: "slice_pair" - type: SLICE - bottom: "pair_data" - top: "data" - top: "data_p" - slice_param { - slice_dim: 1 - slice_point: 1 - } - } - -### Building the First Side of the triplet Net +and its positive image in `data_pos.` & its negative image in `data_neg.`, as described in paper for 3D object classification and pose estimation, a pair wise term is also need alone with the triplet part. + +layer { + name: "slice_triplet" + type: "Slice" + bottom: "triplet_data" + top: "data" + top: "data_true" + top: "data_false" + top: "data_p1" + top: "data_p2" + slice_param { + slice_dim: 1 + slice_point: 1 + slice_point: 2 + slice_point: 3 + slice_point: 4 + } +} + +### Building the First part of the triplet Net Now we can specify the first side of the triplet net. This side operates on `data` and produces `feat`. Starting from the net in -`./examples/triplet/mnist_triplet.prototxt` we add default weight fillers. Then +`./examples/triplet/3d_triplet.prototxt` we add default weight fillers. Then we name the parameters of the convolutional and inner product layers. Naming the -parameters allows Caffe to share the parameters between layers on both sides of +parameters allows Caffe to share the parameters between layers on three channels of the triplet net. In the definition this looks like: ... @@ -132,32 +137,32 @@ Now we need to create the second path that operates on `data_neg` and produces paste it. Then we change the name of each layer, input, and output by appending `_neg` to differentiate the "paired" layers from the originals. -### Adding the Contrastive Loss Function - -To train the network we will optimize a contrastive loss function proposed in: -Raia Hadsell, Sumit Chopra, and Yann LeCun "Dimensionality Reduction by Learning -an Invariant Mapping". This loss function encourages matching pairs to be close -together in feature space while pushing non-matching pairs apart. This cost -function is implemented with the `TRIPLET_LOSS` layer: - - layers { - name: "loss" - type: TRIPLET_LOSS - triplet_loss_param { - margin: 1.0 - } - bottom: "feat" - bottom: "feat_pos" - bottom: "feat_neg" - bottom: "sim" - top: "loss" - } +### Adding the Triplet Loss Function + +To train the network we will optimize a triplet loss function proposed in: +This cost function is implemented with the `TRIPLET_LOSS` layer: + +layer { + name: "loss" + type: "TripletLoss" + bottom: "feat" + bottom: "feat_true" + bottom: "feat_false" + bottom: "feat_p1" + bottom: "feat_p2" + bottom: "sim" + top: "loss" + triplet_loss_param { + margin: 1 + losstype: 1 + } +} ## Define the Solver Nothing special needs to be done to the solver besides pointing it at the correct model file. The solver is defined in -`./examples/triplet/mnist_triplet_solver.prototxt`. +`./examples/triplet/3d_triplet_solver.prototxt`. ## Training and Testing the Model @@ -165,7 +170,7 @@ Training the model is simple after you have written the network definition protobuf and solver protobuf files. Simply run `./examples/triplet/train_mnist_triplet.sh`: - ./examples/triplet/train_mnist_triplet.sh + ./examples/triplet/train_3d_triplet.sh # Plotting the results @@ -173,15 +178,9 @@ First, we can draw the model and triplet networks by running the following commands that draw the DAGs defined in the .prototxt files: ./python/draw_net.py \ - ./examples/triplet/mnist_triplet.prototxt \ - ./examples/triplet/mnist_triplet.png + ./examples/triplet/3d_triplet.prototxt \ + ./examples/triplet/3d_triplet.png ./python/draw_net.py \ - ./examples/triplet/mnist_triplet_train_test.prototxt \ - ./examples/triplet/mnist_triplet_train_test.png - -Second, we can load the learned model and plot the features using the iPython -notebook: - - ipython notebook ./examples/triplet/mnist_triplet.ipynb - + ./examples/triplet/3d_triplet_train_test.prototxt \ + ./examples/triplet/3d_triplet_train_test.png \ No newline at end of file diff --git a/examples/triplet/readme.md.orig b/examples/triplet/readme.md.orig new file mode 100644 index 00000000000..b08d30d07f0 --- /dev/null +++ b/examples/triplet/readme.md.orig @@ -0,0 +1,243 @@ +--- +title: Triplet Network Tutorial +description: Train and test a triplet network on MNIST data. +category: example +include_in_docs: true +layout: default +priority: 100 +--- + +# Triplet Network Training with Caffe +This example shows how you can use weight sharing and a contrastive loss +function to learn a model using a triplet network in Caffe. + +We will assume that you have caffe successfully compiled. If not, please refer +to the [Installation page](../../installation.html). This example builds on the +[MNIST tutorial](mnist.html) so it would be a good idea to read that before +continuing. + +*The guide specifies all paths and assumes all commands are executed from the +root caffe directory* + +## Prepare Datasets + +You will first need to download and convert the data from the MNIST +website. To do this, simply run the following commands: + + ./data/mnist/get_mnist.sh + ./examples/triplet/create_mnist_triplet.sh + +After running the script there should be two datasets, +`./examples/triplet/mnist_triplet_train_leveldb`, and +`./examples/triplet/mnist_triplet_test_leveldb`. + +## The Model +First, we will define the model that we want to train using the triplet network. +We will use the convolutional net defined in +`./examples/triplet/mnist_triplet.prototxt`. This model is almost +exactly the same as the [LeNet model](mnist.html), the only difference is that +we have replaced the top layers that produced probabilities over the 10 digit +classes with a linear "feature" layer that produces a 2 dimensional vector. + + layers { + name: "feat" + type: INNER_PRODUCT + bottom: "ip2" + top: "feat" + blobs_lr: 1 + blobs_lr: 2 + inner_product_param { + num_output: 2 + } + } + +## Define the triplet Network + +In this section we will define the triplet network used for training. The +resulting network is defined in +`./examples/triplet/mnist_triplet_train_test.prototxt`. + +<<<<<<< f6d5f83312a008c4a1e252b2495b64b12cbe93b7 +### Reading in the Triplet Data + +We start with a data layer that reads from the LevelDB database we created +earlier. Each entry in this database contains the image data for a triplet of +images (`triplet_data`) and the label (`sim`) is not nessesary in our method. + + layers { + name: "triplet_data" + type: DATA + top: "triplet_data" +======= +### Reading in the Pair Data + +We start with a data layer that reads from the LevelDB database we created +earlier. Each entry in this database contains the image data for a pair of +images (`pair_data`) and a binary label saying if they belong to the same class +or different classes (`sim`). + + layers { + name: "pair_data" + type: DATA + top: "pair_data" +>>>>>>> triplet data generation and network update + top: "sim" + data_param { + source: "examples/triplet/mnist-triplet-train-leveldb" + scale: 0.00390625 + batch_size: 64 + } + include: { phase: TRAIN } + } + +<<<<<<< f6d5f83312a008c4a1e252b2495b64b12cbe93b7 +In order to pack a triplet of images into the same blob in the database we pack one +image per channel. We want to be able to work with these three images separately, +so we add a slice layer after the data layer. This takes the `triplet_data` and +slices it along the channel dimension so that we have a single image in `data` +and its positive image in `data_pos.` & its negative image in `data_neg.` + + layers { + name: "slice_triplet" + type: SLICE + bottom: "triplet_data" + top: "data" + top: "data_pos" + top: "data_neg" + slice_param { + slice_dim: 1 + slice_point: 1 + slice_point: 2 + } + } + +### Building the First part of the triplet Net +======= +In order to pack a pair of images into the same blob in the database we pack one +image per channel. We want to be able to work with these two images separately, +so we add a slice layer after the data layer. This takes the `pair_data` and +slices it along the channel dimension so that we have a single image in `data` +and its paired image in `data_p.` + + layers { + name: "slice_pair" + type: SLICE + bottom: "pair_data" + top: "data" + top: "data_p" + slice_param { + slice_dim: 1 + slice_point: 1 + } + } + +### Building the First Side of the triplet Net +>>>>>>> triplet data generation and network update + +Now we can specify the first side of the triplet net. This side operates on +`data` and produces `feat`. Starting from the net in +`./examples/triplet/mnist_triplet.prototxt` we add default weight fillers. Then +we name the parameters of the convolutional and inner product layers. Naming the +<<<<<<< f6d5f83312a008c4a1e252b2495b64b12cbe93b7 +parameters allows Caffe to share the parameters between layers on three channels of +======= +parameters allows Caffe to share the parameters between layers on both sides of +>>>>>>> triplet data generation and network update +the triplet net. In the definition this looks like: + + ... + param: "conv1_w" + param: "conv1_b" + ... + param: "conv2_w" + param: "conv2_b" + ... + param: "ip1_w" + param: "ip1_b" + ... + param: "ip2_w" + param: "ip2_b" + ... + +### Building the Second Side of the triplet Net + +Now we need to create the second path that operates on `data_pos` and produces +`feat_pos`. This path is exactly the same as the first. So we can just copy and +paste it. Then we change the name of each layer, input, and output by appending +`_pos` to differentiate the "paired" layers from the originals. + +### Building the Third Side of the triplet Net + +Now we need to create the second path that operates on `data_neg` and produces +`feat_neg`. This path is exactly the same as the first. So we can just copy and +paste it. Then we change the name of each layer, input, and output by appending +`_neg` to differentiate the "paired" layers from the originals. + +<<<<<<< f6d5f83312a008c4a1e252b2495b64b12cbe93b7 +### Adding the Triplet Loss Function + +To train the network we will optimize a triplet loss function proposed in: +This cost function is implemented with the `TRIPLET_LOSS` layer: +======= +### Adding the Contrastive Loss Function + +To train the network we will optimize a contrastive loss function proposed in: +Raia Hadsell, Sumit Chopra, and Yann LeCun "Dimensionality Reduction by Learning +an Invariant Mapping". This loss function encourages matching pairs to be close +together in feature space while pushing non-matching pairs apart. This cost +function is implemented with the `TRIPLET_LOSS` layer: +>>>>>>> triplet data generation and network update + + layers { + name: "loss" + type: TRIPLET_LOSS + triplet_loss_param { +<<<<<<< f6d5f83312a008c4a1e252b2495b64b12cbe93b7 + margin: 0.2 + } + bottom: "feat" + bottom: "feat_pos" + bottom: "feat_neg" +======= + margin: 1.0 + } + bottom: "feat" + bottom: "feat_pos" + bottom: "feat_neg" +>>>>>>> triplet data generation and network update + bottom: "sim" + top: "loss" + } + +## Define the Solver + +Nothing special needs to be done to the solver besides pointing it at the +correct model file. The solver is defined in +`./examples/triplet/mnist_triplet_solver.prototxt`. + +## Training and Testing the Model + +Training the model is simple after you have written the network definition +protobuf and solver protobuf files. Simply run +`./examples/triplet/train_mnist_triplet.sh`: + + ./examples/triplet/train_mnist_triplet.sh + +# Plotting the results + +First, we can draw the model and triplet networks by running the following +commands that draw the DAGs defined in the .prototxt files: + + ./python/draw_net.py \ + ./examples/triplet/mnist_triplet.prototxt \ + ./examples/triplet/mnist_triplet.png + + ./python/draw_net.py \ + ./examples/triplet/mnist_triplet_train_test.prototxt \ + ./examples/triplet/mnist_triplet_train_test.png + +Second, we can load the learned model and plot the features using the iPython +notebook: + + ipython notebook ./examples/triplet/mnist_triplet.ipynb + diff --git a/examples/triplet/train_mnist_triplet.sh b/examples/triplet/train_mnist_triplet.sh new file mode 100755 index 00000000000..683cda2963b --- /dev/null +++ b/examples/triplet/train_mnist_triplet.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env sh + +TOOLS=./release/tools + +$TOOLS/caffe train --solver=examples/triplet/mnist_triplet_solver.prototxt diff --git a/include/caffe/data_layers.hpp.orig b/include/caffe/data_layers.hpp.orig index f213c7f1afe..362bbcde53d 100644 --- a/include/caffe/data_layers.hpp.orig +++ b/include/caffe/data_layers.hpp.orig @@ -4,20 +4,33 @@ #include #include #include +<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 <<<<<<< ca976a2b520c8024cc7f41a048d2c1abe73f6022 ======= #include "boost/scoped_ptr.hpp" >>>>>>> macro define in upgrade_proto +======= + +>>>>>>> add initiate class name of triplet loss layer #include "hdf5.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" +<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 +======= +#include "caffe/data_reader.hpp" +>>>>>>> add initiate class name of triplet loss layer #include "caffe/data_transformer.hpp" #include "caffe/filler.hpp" #include "caffe/internal_thread.hpp" #include "caffe/layer.hpp" +#include "caffe/net.hpp" #include "caffe/proto/caffe.pb.h" +<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 +======= +#include "caffe/util/blocking_queue.hpp" +>>>>>>> add initiate class name of triplet loss layer #include "caffe/util/db.hpp" namespace caffe { @@ -31,11 +44,17 @@ template class BaseDataLayer : public Layer { public: explicit BaseDataLayer(const LayerParameter& param); + virtual ~BaseDataLayer() {} // LayerSetUp: implements common data layer setup functionality, and calls // DataLayerSetUp to do special data layer setup for individual layer types. // This method may not be overridden except by the BasePrefetchingDataLayer. virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); +<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 +======= + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } +>>>>>>> add initiate class name of triplet loss layer virtual void DataLayerSetUp(const vector*>& bottom, const vector*>& top) {} // Data layers have no bottoms, so reshaping is trivial. @@ -54,11 +73,34 @@ class BaseDataLayer : public Layer { }; template +<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 class BasePrefetchingDataLayer : public BaseDataLayer, public InternalThread { public: explicit BasePrefetchingDataLayer(const LayerParameter& param) : BaseDataLayer(param) {} +======= +class Batch { + public: + Blob data_, label_; +}; + +template +class BasePrefetchingDataLayer : + public BaseDataLayer, public InternalThread { + public: +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + explicit BasePrefetchingDataLayer(const LayerParameter& param); +======= + explicit BasePrefetchingDataLayer(const LayerParameter& param) + : BaseDataLayer(param) {} +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da +>>>>>>> macro define in upgrade_proto +>>>>>>> add initiate class name of triplet loss layer +======= + virtual ~BasePrefetchingDataLayer() {} +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update // LayerSetUp: implements common data layer setup functionality, and calls // DataLayerSetUp to do special data layer setup for individual layer types. // This method may not be overridden. @@ -70,6 +112,7 @@ class BasePrefetchingDataLayer : virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); +<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 virtual void CreatePrefetchThread(); virtual void JoinPrefetchThread(); // The thread's function @@ -78,28 +121,56 @@ class BasePrefetchingDataLayer : protected: Blob prefetch_data_; Blob prefetch_label_; +======= + // Prefetches batches (asynchronously if to GPU memory) + static const int PREFETCH_COUNT = 3; + + protected: + virtual void InternalThreadEntry(); + virtual void load_batch(Batch* batch) = 0; + + Batch prefetch_[PREFETCH_COUNT]; + BlockingQueue*> prefetch_free_; + BlockingQueue*> prefetch_full_; + +>>>>>>> add initiate class name of triplet loss layer Blob transformed_data_; }; template class DataLayer : public BasePrefetchingDataLayer { public: +<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 explicit DataLayer(const LayerParameter& param) : BasePrefetchingDataLayer(param) {} virtual ~DataLayer(); virtual void DataLayerSetUp(const vector*>& bottom, const vector*>& top); +======= + explicit DataLayer(const LayerParameter& param); + virtual ~DataLayer(); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + // DataLayer uses DataReader instead for sharing for parallelism + virtual inline bool ShareInParallel() const { return false; } +>>>>>>> add initiate class name of triplet loss layer virtual inline const char* type() const { return "Data"; } virtual inline int ExactNumBottomBlobs() const { return 0; } virtual inline int MinTopBlobs() const { return 1; } virtual inline int MaxTopBlobs() const { return 2; } protected: +<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 virtual void InternalThreadEntry(); shared_ptr db_; shared_ptr cursor_; +======= + virtual void load_batch(Batch* batch); + + DataReader reader_; +>>>>>>> add initiate class name of triplet loss layer }; /** @@ -114,6 +185,11 @@ class DummyDataLayer : public Layer { : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); +<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 +======= + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } +>>>>>>> add initiate class name of triplet loss layer // Data layers have no bottoms, so reshaping is trivial. virtual void Reshape(const vector*>& bottom, const vector*>& top) {} @@ -147,6 +223,11 @@ class HDF5DataLayer : public Layer { virtual ~HDF5DataLayer(); virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); +<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 +======= + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } +>>>>>>> add initiate class name of triplet loss layer // Data layers have no bottoms, so reshaping is trivial. virtual void Reshape(const vector*>& bottom, const vector*>& top) {} @@ -188,6 +269,11 @@ class HDF5OutputLayer : public Layer { virtual ~HDF5OutputLayer(); virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); +<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 +======= + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } +>>>>>>> add initiate class name of triplet loss layer // Data layers have no bottoms, so reshaping is trivial. virtual void Reshape(const vector*>& bottom, const vector*>& top) {} @@ -238,7 +324,11 @@ class ImageDataLayer : public BasePrefetchingDataLayer { protected: shared_ptr prefetch_rng_; virtual void ShuffleImages(); +<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 virtual void InternalThreadEntry(); +======= + virtual void load_batch(Batch* batch); +>>>>>>> add initiate class name of triplet loss layer vector > lines_; int lines_id_; @@ -262,10 +352,15 @@ class MemoryDataLayer : public BaseDataLayer { virtual inline int ExactNumTopBlobs() const { return 2; } virtual void AddDatumVector(const vector& datum_vector); +<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 #ifdef USE_OPENCV virtual void AddMatVector(const vector& mat_vector, const vector& labels); #endif // USE_OPENCV +======= + virtual void AddMatVector(const vector& mat_vector, + const vector& labels); +>>>>>>> add initiate class name of triplet loss layer // Reset should accept const pointers, but can't, because the memory // will be given to Blob, which is mutable @@ -312,7 +407,11 @@ class WindowDataLayer : public BasePrefetchingDataLayer { protected: virtual unsigned int PrefetchRand(); +<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 virtual void InternalThreadEntry(); +======= + virtual void load_batch(Batch* batch); +>>>>>>> add initiate class name of triplet loss layer shared_ptr prefetch_rng_; vector > > image_database_; diff --git a/include/caffe/data_layers.hpp.orig.orig b/include/caffe/data_layers.hpp.orig.orig new file mode 100644 index 00000000000..43dca80bd73 --- /dev/null +++ b/include/caffe/data_layers.hpp.orig.orig @@ -0,0 +1,359 @@ +#ifndef CAFFE_DATA_LAYERS_HPP_ +#define CAFFE_DATA_LAYERS_HPP_ + +#include +#include +#include +<<<<<<< ca976a2b520c8024cc7f41a048d2c1abe73f6022 +======= + +#include "boost/scoped_ptr.hpp" +>>>>>>> macro define in upgrade_proto +#include "hdf5.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/data_transformer.hpp" +#include "caffe/filler.hpp" +#include "caffe/internal_thread.hpp" +#include "caffe/layer.hpp" +#include "caffe/net.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/db.hpp" + +namespace caffe { + +/** + * @brief Provides base for data layers that feed blobs to the Net. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class BaseDataLayer : public Layer { + public: + explicit BaseDataLayer(const LayerParameter& param); + virtual ~BaseDataLayer() {} + // LayerSetUp: implements common data layer setup functionality, and calls + // DataLayerSetUp to do special data layer setup for individual layer types. + // This method may not be overridden except by the BasePrefetchingDataLayer. + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top) {} + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + + protected: + TransformationParameter transform_param_; + shared_ptr > data_transformer_; + bool output_labels_; +}; + +template +class BasePrefetchingDataLayer : + public BaseDataLayer, public InternalThread { + public: + explicit BasePrefetchingDataLayer(const LayerParameter& param) + : BaseDataLayer(param) {} +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= +======= +class Batch { + public: + Blob data_, label_; +}; + +template +class BasePrefetchingDataLayer : + public BaseDataLayer, public InternalThread { + public: +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + explicit BasePrefetchingDataLayer(const LayerParameter& param); +======= + explicit BasePrefetchingDataLayer(const LayerParameter& param) + : BaseDataLayer(param) {} +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da +>>>>>>> macro define in upgrade_proto +>>>>>>> add initiate class name of triplet loss layer +======= + virtual ~BasePrefetchingDataLayer() {} +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update + // LayerSetUp: implements common data layer setup functionality, and calls + // DataLayerSetUp to do special data layer setup for individual layer types. + // This method may not be overridden. + void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + virtual void CreatePrefetchThread(); + virtual void JoinPrefetchThread(); + // The thread's function + virtual void InternalThreadEntry() {} + + protected: + Blob prefetch_data_; + Blob prefetch_label_; + Blob transformed_data_; +}; + +template +class DataLayer : public BasePrefetchingDataLayer { + public: + explicit DataLayer(const LayerParameter& param) + : BasePrefetchingDataLayer(param) {} + virtual ~DataLayer(); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Data"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MaxTopBlobs() const { return 2; } + + protected: + virtual void InternalThreadEntry(); + + shared_ptr db_; + shared_ptr cursor_; +}; + +/** + * @brief Provides data to the Net generated by a Filler. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class DummyDataLayer : public Layer { + public: + explicit DummyDataLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual inline const char* type() const { return "DummyData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + + vector > > fillers_; + vector refill_; +}; + +/** + * @brief Provides data to the Net from HDF5 files. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class HDF5DataLayer : public Layer { + public: + explicit HDF5DataLayer(const LayerParameter& param) + : Layer(param) {} + virtual ~HDF5DataLayer(); + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual inline const char* type() const { return "HDF5Data"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void LoadHDF5FileData(const char* filename); + + std::vector hdf_filenames_; + unsigned int num_files_; + unsigned int current_file_; + hsize_t current_row_; + std::vector > > hdf_blobs_; + std::vector data_permutation_; + std::vector file_permutation_; +}; + +/** + * @brief Write blobs to disk as HDF5 files. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class HDF5OutputLayer : public Layer { + public: + explicit HDF5OutputLayer(const LayerParameter& param) + : Layer(param), file_opened_(false) {} + virtual ~HDF5OutputLayer(); + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual inline const char* type() const { return "HDF5Output"; } + // TODO: no limit on the number of blobs + virtual inline int ExactNumBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 0; } + + inline std::string file_name() const { return file_name_; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void SaveBlobs(); + + bool file_opened_; + std::string file_name_; + hid_t file_id_; + Blob data_blob_; + Blob label_blob_; +}; + +/** + * @brief Provides data to the Net from image files. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class ImageDataLayer : public BasePrefetchingDataLayer { + public: + explicit ImageDataLayer(const LayerParameter& param) + : BasePrefetchingDataLayer(param) {} + virtual ~ImageDataLayer(); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "ImageData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + + protected: + shared_ptr prefetch_rng_; + virtual void ShuffleImages(); + virtual void InternalThreadEntry(); + + vector > lines_; + int lines_id_; +}; + +/** + * @brief Provides data to the Net from memory. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class MemoryDataLayer : public BaseDataLayer { + public: + explicit MemoryDataLayer(const LayerParameter& param) + : BaseDataLayer(param), has_new_data_(false) {} + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "MemoryData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + + virtual void AddDatumVector(const vector& datum_vector); +#ifdef USE_OPENCV + virtual void AddMatVector(const vector& mat_vector, + const vector& labels); +#endif // USE_OPENCV + + // Reset should accept const pointers, but can't, because the memory + // will be given to Blob, which is mutable + void Reset(Dtype* data, Dtype* label, int n); + void set_batch_size(int new_size); + + int batch_size() { return batch_size_; } + int channels() { return channels_; } + int height() { return height_; } + int width() { return width_; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + int batch_size_, channels_, height_, width_, size_; + Dtype* data_; + Dtype* labels_; + int n_; + size_t pos_; + Blob added_data_; + Blob added_label_; + bool has_new_data_; +}; + +/** + * @brief Provides data to the Net from windows of images files, specified + * by a window data file. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class WindowDataLayer : public BasePrefetchingDataLayer { + public: + explicit WindowDataLayer(const LayerParameter& param) + : BasePrefetchingDataLayer(param) {} + virtual ~WindowDataLayer(); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "WindowData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + + protected: + virtual unsigned int PrefetchRand(); + virtual void InternalThreadEntry(); + + shared_ptr prefetch_rng_; + vector > > image_database_; + enum WindowField { IMAGE_INDEX, LABEL, OVERLAP, X1, Y1, X2, Y2, NUM }; + vector > fg_windows_; + vector > bg_windows_; + Blob data_mean_; + vector mean_values_; + bool has_mean_file_; + bool has_mean_values_; + bool cache_images_; + vector > image_database_cache_; +}; + +} // namespace caffe + +#endif // CAFFE_DATA_LAYERS_HPP_ diff --git a/include/caffe/filler.hpp.orig b/include/caffe/filler.hpp.orig new file mode 100644 index 00000000000..006d994f75f --- /dev/null +++ b/include/caffe/filler.hpp.orig @@ -0,0 +1,291 @@ +// Fillers are random number generators that fills a blob using the specified +// algorithm. The expectation is that they are only going to be used during +// initialization time and will not involve any GPUs. + +#ifndef CAFFE_FILLER_HPP +#define CAFFE_FILLER_HPP + +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +/// @brief Fills a Blob with constant or randomly-generated data. +template +class Filler { + public: + explicit Filler(const FillerParameter& param) : filler_param_(param) {} + virtual ~Filler() {} + virtual void Fill(Blob* blob) = 0; + protected: + FillerParameter filler_param_; +}; // class Filler + + +/// @brief Fills a Blob with constant values @f$ x = 0 @f$. +template +class ConstantFiller : public Filler { + public: + explicit ConstantFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + Dtype* data = blob->mutable_cpu_data(); + const int count = blob->count(); + const Dtype value = this->filler_param_.value(); + CHECK(count); + for (int i = 0; i < count; ++i) { + data[i] = value; + } + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + +/// @brief Fills a Blob with uniformly distributed values @f$ x\sim U(a, b) @f$. +template +class UniformFiller : public Filler { + public: + explicit UniformFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + CHECK(blob->count()); + caffe_rng_uniform(blob->count(), Dtype(this->filler_param_.min()), + Dtype(this->filler_param_.max()), blob->mutable_cpu_data()); + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + +/// @brief Fills a Blob with Gaussian-distributed values @f$ x = a @f$. +template +class GaussianFiller : public Filler { + public: + explicit GaussianFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + Dtype* data = blob->mutable_cpu_data(); + CHECK(blob->count()); + caffe_rng_gaussian(blob->count(), Dtype(this->filler_param_.mean()), + Dtype(this->filler_param_.std()), blob->mutable_cpu_data()); + int sparse = this->filler_param_.sparse(); + CHECK_GE(sparse, -1); + if (sparse >= 0) { + // Sparse initialization is implemented for "weight" blobs; i.e. matrices. + // These have num == channels == 1; width is number of inputs; height is + // number of outputs. The 'sparse' variable specifies the mean number + // of non-zero input weights for a given output. + CHECK_GE(blob->num_axes(), 1); + const int num_outputs = blob->shape(0); + Dtype non_zero_probability = Dtype(sparse) / Dtype(num_outputs); + rand_vec_.reset(new SyncedMemory(blob->count() * sizeof(int))); + int* mask = reinterpret_cast(rand_vec_->mutable_cpu_data()); + caffe_rng_bernoulli(blob->count(), non_zero_probability, mask); + for (int i = 0; i < blob->count(); ++i) { + data[i] *= mask[i]; + } + } + } + + protected: + shared_ptr rand_vec_; +}; + +/** @brief Fills a Blob with values @f$ x \in [0, 1] @f$ + * such that @f$ \forall i \sum_j x_{ij} = 1 @f$. + */ +template +class PositiveUnitballFiller : public Filler { + public: + explicit PositiveUnitballFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + Dtype* data = blob->mutable_cpu_data(); + DCHECK(blob->count()); + caffe_rng_uniform(blob->count(), 0, 1, blob->mutable_cpu_data()); + // We expect the filler to not be called very frequently, so we will + // just use a simple implementation + int dim = blob->count() / blob->num(); + CHECK(dim); + for (int i = 0; i < blob->num(); ++i) { + Dtype sum = 0; + for (int j = 0; j < dim; ++j) { + sum += data[i * dim + j]; + } + for (int j = 0; j < dim; ++j) { + data[i * dim + j] /= sum; + } + } + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + +/** + * @brief Fills a Blob with values @f$ x \sim U(-a, +a) @f$ where @f$ a @f$ + * is set inversely proportional to the number of incoming nodes. + * + * A Filler based on the paper [Bengio and Glorot 2010]: Understanding + * the difficulty of training deep feedforward neuralnetworks, but does not + * use the fan_out value. + * + * It fills the incoming matrix by randomly sampling uniform data from + * [-scale, scale] where scale = sqrt(3 / fan_in) where fan_in is the number + * of input nodes. You should make sure the input blob has shape (num, a, b, c) + * where a * b * c = fan_in. + * + * TODO(dox): make notation in above comment consistent with rest & use LaTeX. + */ +template +class XavierFiller : public Filler { + public: + explicit XavierFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + CHECK(blob->count()); + int fan_in = blob->count() / blob->num(); + Dtype scale = sqrt(Dtype(3) / fan_in); + caffe_rng_uniform(blob->count(), -scale, scale, + blob->mutable_cpu_data()); + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46:include/caffe/filler.hpp +/** + * @brief Fills a Blob with values @f$ x \sim N(0, \sigma^2) @f$ where + * @f$ \sigma^2 @f$ is set inversely proportional to number of incoming + * nodes, outgoing nodes, or their average. + * + * A Filler based on the paper [He, Zhang, Ren and Sun 2015]: Specifically + * accounts for ReLU nonlinearities. + * + * Aside: for another perspective on the scaling factor, see the derivation of + * [Saxe, McClelland, and Ganguli 2013 (v3)]. + * + * It fills the incoming matrix by randomly sampling Gaussian data with std = + * sqrt(2 / n) where n is the fan_in, fan_out, or their average, depending on + * the variance_norm option. You should make sure the input blob has shape (num, + * a, b, c) where a * b * c = fan_in and num * b * c = fan_out. Note that this + * is currently not the case for inner product layers. + */ +template +class MSRAFiller : public Filler { + public: + explicit MSRAFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + CHECK(blob->count()); + int fan_in = blob->count() / blob->num(); + int fan_out = blob->count() / blob->channels(); + Dtype n = fan_in; // default to fan_in + if (this->filler_param_.variance_norm() == + FillerParameter_VarianceNorm_AVERAGE) { + n = (fan_in + fan_out) / Dtype(2); + } else if (this->filler_param_.variance_norm() == + FillerParameter_VarianceNorm_FAN_OUT) { + n = fan_out; + } + Dtype std = sqrt(Dtype(2) / n); + caffe_rng_gaussian(blob->count(), Dtype(0), std, + blob->mutable_cpu_data()); +======= + +/*! +@brief Fills a Blob with coefficients for bilinear interpolation. + +A common use case is with the DeconvolutionLayer acting as upsampling. +You can upsample a feature map with shape of (B, C, H, W) by any integer factor +using the following proto. +\code +layer { + name: "upsample", type: "Deconvolution" + bottom: "{{bottom_name}}" top: "{{top_name}}" + convolution_param { + kernel_size: {{2 * factor - factor % 2}} stride: {{factor}} + num_output: {{C}} group: {{C}} + pad: {{ceil((factor - 1) / 2.)}} + weight_filler: { type: "bilinear" } bias_term: false + } + param { lr_mult: 0 decay_mult: 0 } +} +\endcode +Please use this by replacing `{{}}` with your values. By specifying +`num_output: {{C}} group: {{C}}`, it behaves as +channel-wise convolution. The filter shape of this deconvolution layer will be +(C, 1, K, K) where K is `kernel_size`, and this filler will set a (K, K) +interpolation kernel for every channel of the filter identically. The resulting +shape of the top feature map will be (B, C, factor * H, factor * W). +Note that the learning rate and the +weight decay are set to 0 in order to keep coefficient values of bilinear +interpolation unchanged during training. If you apply this to an image, this +operation is equivalent to the following call in Python with Scikit.Image. +\code{.py} +out = skimage.transform.rescale(img, factor, mode='constant', cval=0) +\endcode + */ +template +class BilinearFiller : public Filler { + public: + explicit BilinearFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + CHECK_EQ(blob->num_axes(), 4) << "Blob must be 4 dim."; + CHECK_EQ(blob->width(), blob->height()) << "Filter must be square"; + Dtype* data = blob->mutable_cpu_data(); + int f = ceil(blob->width() / 2.); + float c = (2 * f - 1 - f % 2) / (2. * f); + for (int i = 0; i < blob->count(); ++i) { + float x = i % blob->width(); + float y = (i / blob->width()) % blob->height(); + data[i] = (1 - fabs(x / f - c)) * (1 - fabs(y / f - c)); + } +>>>>>>> triplet data generation and network update:include/caffe/filler.hpp.orig + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + +/** + * @brief Get a specific filler from the specification given in FillerParameter. + * + * Ideally this would be replaced by a factory pattern, but we will leave it + * this way for now. + */ +template +Filler* GetFiller(const FillerParameter& param) { + const std::string& type = param.type(); + if (type == "constant") { + return new ConstantFiller(param); + } else if (type == "gaussian") { + return new GaussianFiller(param); + } else if (type == "positive_unitball") { + return new PositiveUnitballFiller(param); + } else if (type == "uniform") { + return new UniformFiller(param); + } else if (type == "xavier") { + return new XavierFiller(param); +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + } else if (type == "msra") { + return new MSRAFiller(param); +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46:include/caffe/filler.hpp +======= + } else if (type == "bilinear") { + return new BilinearFiller(param); +======= +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update:include/caffe/filler.hpp.orig + } else { + CHECK(false) << "Unknown filler name: " << param.type(); + } + return (Filler*)(NULL); +} + +} // namespace caffe + +#endif // CAFFE_FILLER_HPP_ diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index 8d41af34e88..646a6b9640f 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -216,6 +216,76 @@ class ContrastiveLossLayer : public LossLayer { Blob summer_vec_; // tmp storage for gpu forward pass }; +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da +======= +template +class TripletLossLayer : public LossLayer { + public: + explicit TripletLossLayer(const LayerParameter& param) + : LossLayer(param), diff_() {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline int ExactNumBottomBlobs() const { return 4; } + virtual inline const char* type() const { return "TripletLoss"; } + /** + * Unlike most loss layers, in the TripletLossLayer we can backpropagate + * to the first three inputs. + */ + virtual inline bool AllowForceBackward(const int bottom_index) const { + return bottom_index != 3; + } + + protected: + /// @copydoc TripletLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the Triplet error gradient w.r.t. the inputs. + * + * Computes the gradients with respect to the two input vectors (bottom[0] and + * bottom[1]), but not the similarity label (bottom[2]). + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$a@f$; Backward fills their diff with + * gradients if propagate_down[0] + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$b@f$; Backward fills their diff with gradients if + * propagate_down[1] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob diff_; // cached for backward pass + Blob diff_pos; + Blob diff_neg; + Blob dist_sq_; // cached for backward pass + Blob dist_sq_pos; + Blob dist_sq_neg; + Blob diff_sq_; // tmp storage for gpu forward pass + Blob diff_sq_pos; + Blob diff_sq_neg; + Blob summer_vec_; // tmp storage for gpu forward pass +}; + +>>>>>>> triplet data generation and network update /** * @brief Computes the Euclidean (L2) loss @f$ * E = \frac{1}{2N} \sum\limits_{n=1}^N \left| \left| \hat{y}_n - y_n diff --git a/include/caffe/loss_layers.hpp.orig b/include/caffe/loss_layers.hpp.orig index 11d86403ad6..dbc25b1e7ab 100644 --- a/include/caffe/loss_layers.hpp.orig +++ b/include/caffe/loss_layers.hpp.orig @@ -224,7 +224,7 @@ class TripletLossLayer : public LossLayer { virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); -<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 virtual inline int ExactNumBottomBlobs() const { return 2; } ======= virtual inline int ExactNumBottomBlobs() const { return 4; } @@ -235,7 +235,7 @@ class TripletLossLayer : public LossLayer { * to the first three inputs. */ virtual inline bool AllowForceBackward(const int bottom_index) const { -<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 return bottom_index != 1; ======= return bottom_index != 3; @@ -686,6 +686,8 @@ class SigmoidCrossEntropyLossLayer : public LossLayer { /// @copydoc SigmoidCrossEntropyLossLayer virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); /** * @brief Computes the sigmoid cross-entropy loss error gradient w.r.t. the diff --git a/include/caffe/python_layer.hpp b/include/caffe/python_layer.hpp index 19cf18c9742..7449c40b8c6 100644 --- a/include/caffe/python_layer.hpp +++ b/include/caffe/python_layer.hpp @@ -25,7 +25,6 @@ class PythonLayer : public Layer { throw; } } - virtual void Reshape(const vector*>& bottom, const vector*>& top) { try { @@ -36,6 +35,10 @@ class PythonLayer : public Layer { } } + virtual inline bool ShareInParallel() const { + return this->layer_param_.python_param().share_in_parallel(); + } + virtual inline const char* type() const { return "Python"; } protected: diff --git a/include/caffe/vision_layers.hpp.orig b/include/caffe/vision_layers.hpp.orig new file mode 100644 index 00000000000..014ebc22dc7 --- /dev/null +++ b/include/caffe/vision_layers.hpp.orig @@ -0,0 +1,549 @@ +#ifndef CAFFE_VISION_LAYERS_HPP_ +#define CAFFE_VISION_LAYERS_HPP_ + +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/common_layers.hpp" +#include "caffe/data_layers.hpp" +#include "caffe/layer.hpp" +#include "caffe/loss_layers.hpp" +#include "caffe/neuron_layers.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Abstract base class that factors out the BLAS code common to + * ConvolutionLayer and DeconvolutionLayer. + */ +template +class BaseConvolutionLayer : public Layer { + public: + explicit BaseConvolutionLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline int MinBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline bool EqualNumBottomTopBlobs() const { return true; } + + protected: + // Helper functions that abstract away the column buffer and gemm arguments. + // The last argument in forward_cpu_gemm is so that we can skip the im2col if + // we just called weight_cpu_gemm with the same input. + void forward_cpu_gemm(const Dtype* input, const Dtype* weights, + Dtype* output, bool skip_im2col = false); + void forward_cpu_bias(Dtype* output, const Dtype* bias); + void backward_cpu_gemm(const Dtype* input, const Dtype* weights, + Dtype* output); + void weight_cpu_gemm(const Dtype* input, const Dtype* output, Dtype* + weights); + void backward_cpu_bias(Dtype* bias, const Dtype* input); + +#ifndef CPU_ONLY + void forward_gpu_gemm(const Dtype* col_input, const Dtype* weights, + Dtype* output, bool skip_im2col = false); + void forward_gpu_bias(Dtype* output, const Dtype* bias); + void backward_gpu_gemm(const Dtype* input, const Dtype* weights, + Dtype* col_output); + void weight_gpu_gemm(const Dtype* col_input, const Dtype* output, Dtype* + weights); + void backward_gpu_bias(Dtype* bias, const Dtype* input); +#endif + + /// @brief The spatial dimensions of the input. + inline int input_shape(int i) { + return (*bottom_shape_)[channel_axis_ + i]; + } + // reverse_dimensions should return true iff we are implementing deconv, so + // that conv helpers know which dimensions are which. + virtual bool reverse_dimensions() = 0; + // Compute height_out_ and width_out_ from other parameters. + virtual void compute_output_shape() = 0; + +<<<<<<< 6af90386551ae96a1468f15dc0bfc8ac566ea6d9 + /// @brief The spatial dimensions of a filter kernel. + Blob kernel_shape_; + /// @brief The spatial dimensions of the stride. + Blob stride_; + /// @brief The spatial dimensions of the padding. + Blob pad_; + /// @brief The spatial dimensions of the convolution input. + Blob conv_input_shape_; + /// @brief The spatial dimensions of the col_buffer. + vector col_buffer_shape_; + /// @brief The spatial dimensions of the output. + vector output_shape_; + const vector* bottom_shape_; + + int num_spatial_axes_; + int bottom_dim_; + int top_dim_; + + int channel_axis_; +======= + int kernel_h_, kernel_w_; + int stride_h_, stride_w_; +>>>>>>> triplet data generation and network update + int num_; + int channels_; + int pad_h_, pad_w_; + int height_, width_; + int group_; + int num_output_; + int height_out_, width_out_; + bool bias_term_; + bool is_1x1_; + + private: + // wrap im2col/col2im so we don't have to remember the (long) argument lists + inline void conv_im2col_cpu(const Dtype* data, Dtype* col_buff) { + im2col_cpu(data, conv_in_channels_, conv_in_height_, conv_in_width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_buff); + } + inline void conv_col2im_cpu(const Dtype* col_buff, Dtype* data) { + col2im_cpu(col_buff, conv_in_channels_, conv_in_height_, conv_in_width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, data); + } +#ifndef CPU_ONLY + inline void conv_im2col_gpu(const Dtype* data, Dtype* col_buff) { + im2col_gpu(data, conv_in_channels_, conv_in_height_, conv_in_width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_buff); + } + inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) { + col2im_gpu(col_buff, conv_in_channels_, conv_in_height_, conv_in_width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, data); + } +#endif + + int conv_out_channels_; + int conv_in_channels_; + int conv_out_spatial_dim_; + int conv_in_height_; + int conv_in_width_; + int kernel_dim_; + int weight_offset_; + int col_offset_; + int output_offset_; + + Blob col_buffer_; + Blob bias_multiplier_; +}; + +/** + * @brief Convolves the input image with a bank of learned filters, + * and (optionally) adds biases. + * + * Caffe convolves by reduction to matrix multiplication. This achieves + * high-throughput and generality of input and filter dimensions but comes at + * the cost of memory for matrices. This makes use of efficiency in BLAS. + * + * The input is "im2col" transformed to a channel K' x H x W data matrix + * for multiplication with the N x K' x H x W filter matrix to yield a + * N' x H x W output matrix that is then "col2im" restored. K' is the + * input channel * kernel height * kernel width dimension of the unrolled + * inputs so that the im2col matrix has a column for each input region to + * be filtered. col2im restores the output spatial structure by rolling up + * the output channel N' columns of the output matrix. + */ +template +class ConvolutionLayer : public BaseConvolutionLayer { + public: + /** + * @param param provides ConvolutionParameter convolution_param, + * with ConvolutionLayer options: + * - num_output. The number of filters. + * - kernel_size / kernel_h / kernel_w. The filter dimensions, given by + * kernel_size for square filters or kernel_h and kernel_w for rectangular + * filters. + * - stride / stride_h / stride_w (\b optional, default 1). The filter + * stride, given by stride_size for equal dimensions or stride_h and stride_w + * for different strides. By default the convolution is dense with stride 1. + * - pad / pad_h / pad_w (\b optional, default 0). The zero-padding for + * convolution, given by pad for equal dimensions or pad_h and pad_w for + * different padding. Input padding is computed implicitly instead of + * actually padding. + * - group (\b optional, default 1). The number of filter groups. Group + * convolution is a method for reducing parameterization by selectively + * connecting input and output channels. The input and output channel dimensions must be divisible + * by the number of groups. For group @f$ \geq 1 @f$, the + * convolutional filters' input and output channels are separated s.t. each + * group takes 1 / group of the input channels and makes 1 / group of the + * output channels. Concretely 4 input channels, 8 output channels, and + * 2 groups separate input channels 1-2 and output channels 1-4 into the + * first group and input channels 3-4 and output channels 5-8 into the second + * group. + * - bias_term (\b optional, default true). Whether to have a bias. + * - engine: convolution has CAFFE (matrix multiplication) and CUDNN (library + * kernels + stream parallelism) engines. + */ + explicit ConvolutionLayer(const LayerParameter& param) + : BaseConvolutionLayer(param) {} + + virtual inline const char* type() const { return "Convolution"; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual inline bool reverse_dimensions() { return false; } + virtual void compute_output_shape(); +}; + +/** + * @brief Convolve the input with a bank of learned filters, and (optionally) + * add biases, treating filters and convolution parameters in the + * opposite sense as ConvolutionLayer. + * + * ConvolutionLayer computes each output value by dotting an input window with + * a filter; DeconvolutionLayer multiplies each input value by a filter + * elementwise, and sums over the resulting output windows. In other words, + * DeconvolutionLayer is ConvolutionLayer with the forward and backward passes + * reversed. DeconvolutionLayer reuses ConvolutionParameter for its + * parameters, but they take the opposite sense as in ConvolutionLayer (so + * padding is removed from the output rather than added to the input, and + * stride results in upsampling rather than downsampling). + */ +template +class DeconvolutionLayer : public BaseConvolutionLayer { + public: + explicit DeconvolutionLayer(const LayerParameter& param) + : BaseConvolutionLayer(param) {} + + virtual inline const char* type() const { return "Deconvolution"; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual inline bool reverse_dimensions() { return true; } + virtual void compute_output_shape(); +}; + +#ifdef USE_CUDNN +/* + * @brief cuDNN implementation of ConvolutionLayer. + * Fallback to ConvolutionLayer for CPU mode. + * + * cuDNN accelerates convolution through forward kernels for filtering and bias + * plus backward kernels for the gradient w.r.t. the filters, biases, and + * inputs. Caffe + cuDNN further speeds up the computation through forward + * parallelism across groups and backward parallelism across gradients. + * + * The CUDNN engine does not have memory overhead for matrix buffers. For many + * input and filter regimes the CUDNN engine is faster than the CAFFE engine, + * but for fully-convolutional models and large inputs the CAFFE engine can be + * faster as long as it fits in memory. +*/ +template +class CuDNNConvolutionLayer : public ConvolutionLayer { + public: + explicit CuDNNConvolutionLayer(const LayerParameter& param) + : ConvolutionLayer(param), handles_setup_(false) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual ~CuDNNConvolutionLayer(); + + protected: + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool handles_setup_; + cudnnHandle_t* handle_; + cudaStream_t* stream_; + vector bottom_descs_, top_descs_; + cudnnTensorDescriptor_t bias_desc_; + cudnnFilterDescriptor_t filter_desc_; + vector conv_descs_; + int bottom_offset_, top_offset_, weight_offset_, bias_offset_; + size_t workspaceSizeInBytes; + void *workspace; +}; +#endif + +/** + * @brief A helper for image operations that rearranges image regions into + * column vectors. Used by ConvolutionLayer to perform convolution + * by matrix multiplication. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class Im2colLayer : public Layer { + public: + explicit Im2colLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Im2col"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int kernel_h_, kernel_w_; + int stride_h_, stride_w_; + int channels_; + int height_, width_; + int pad_h_, pad_w_; +}; + +// Forward declare PoolingLayer and SplitLayer for use in LRNLayer. +template class PoolingLayer; +template class SplitLayer; + +/** + * @brief Normalize the input in a local region across or within feature maps. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class LRNLayer : public Layer { + public: + explicit LRNLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "LRN"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + virtual void CrossChannelForward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void CrossChannelForward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void WithinChannelForward(const vector*>& bottom, + const vector*>& top); + virtual void CrossChannelBackward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void CrossChannelBackward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void WithinChannelBackward(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int size_; + int pre_pad_; + Dtype alpha_; + Dtype beta_; + Dtype k_; + int num_; + int channels_; + int height_; + int width_; + + // Fields used for normalization ACROSS_CHANNELS + // scale_ stores the intermediate summing results + Blob scale_; + + // Fields used for normalization WITHIN_CHANNEL + shared_ptr > split_layer_; + vector*> split_top_vec_; + shared_ptr > square_layer_; + Blob square_input_; + Blob square_output_; + vector*> square_bottom_vec_; + vector*> square_top_vec_; + shared_ptr > pool_layer_; + Blob pool_output_; + vector*> pool_top_vec_; + shared_ptr > power_layer_; + Blob power_output_; + vector*> power_top_vec_; + shared_ptr > product_layer_; + Blob product_input_; + vector*> product_bottom_vec_; +}; + + +/** + * @brief Pools the input image by taking the max, average, etc. within regions. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class PoolingLayer : public Layer { + public: + explicit PoolingLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Pooling"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + // MAX POOL layers can output an extra top blob for the mask; + // others can only output the pooled inputs. + virtual inline int MaxTopBlobs() const { + return (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_MAX) ? 2 : 1; + } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int kernel_h_, kernel_w_; + int stride_h_, stride_w_; + int pad_h_, pad_w_; + int channels_; + int height_, width_; + int pooled_height_, pooled_width_; + bool global_pooling_; + Blob rand_idx_; + Blob max_idx_; +}; + +#ifdef USE_CUDNN +/* + * @brief cuDNN implementation of PoolingLayer. + * Fallback to PoolingLayer for CPU mode. +*/ +template +class CuDNNPoolingLayer : public PoolingLayer { + public: + explicit CuDNNPoolingLayer(const LayerParameter& param) + : PoolingLayer(param), handles_setup_(false) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual ~CuDNNPoolingLayer(); + // Currently, cuDNN does not support the extra top blob. + virtual inline int MinTopBlobs() const { return -1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool handles_setup_; + cudnnHandle_t handle_; + cudnnTensorDescriptor_t bottom_desc_, top_desc_; + cudnnPoolingDescriptor_t pooling_desc_; + cudnnPoolingMode_t mode_; +}; +#endif + +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +/** + * @brief Does spatial pyramid pooling on the input image + * by taking the max, average, etc. within regions + * so that the result vector of different sized + * images are of the same size. + */ +template +class SPPLayer : public Layer { + public: + explicit SPPLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "SPP"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + // calculates the kernel and stride dimensions for the pooling layer, + // returns a correctly configured LayerParameter for a PoolingLayer + virtual LayerParameter GetPoolingParam(const int pyramid_level, + const int bottom_h, const int bottom_w, const SPPParameter spp_param); + + int pyramid_height_; + int bottom_h_, bottom_w_; + int num_; + int channels_; + int kernel_h_, kernel_w_; + int pad_h_, pad_w_; + bool reshaped_first_time_; + + /// the internal Split layer that feeds the pooling layers + shared_ptr > split_layer_; + /// top vector holder used in call to the underlying SplitLayer::Forward + vector*> split_top_vec_; + /// bottom vector holder used in call to the underlying PoolingLayer::Forward + vector*>*> pooling_bottom_vecs_; + /// the internal Pooling layers of different kernel sizes + vector > > pooling_layers_; + /// top vector holders used in call to the underlying PoolingLayer::Forward + vector*>*> pooling_top_vecs_; + /// pooling_outputs stores the outputs of the PoolingLayers + vector*> pooling_outputs_; + /// the internal Flatten layers that the Pooling layers feed into + vector*> flatten_layers_; + /// top vector holders used in call to the underlying FlattenLayer::Forward + vector*>*> flatten_top_vecs_; + /// flatten_outputs stores the outputs of the FlattenLayers + vector*> flatten_outputs_; + /// bottom vector holder used in call to the underlying ConcatLayer::Forward + vector*> concat_bottom_vec_; + /// the internal Concat layers that the Flatten layers feed into + shared_ptr > concat_layer_; +}; + +======= +>>>>>>> triplet data generation and network update +} // namespace caffe + +#endif // CAFFE_VISION_LAYERS_HPP_ diff --git a/src/caffe/data_transformer.cpp.orig b/src/caffe/data_transformer.cpp.orig index 56f97c0ae04..d621d704c8e 100644 --- a/src/caffe/data_transformer.cpp.orig +++ b/src/caffe/data_transformer.cpp.orig @@ -21,9 +21,19 @@ DataTransformer::DataTransformer(const TransformationParameter& param, CHECK_EQ(param_.mean_value_size(), 0) << "Cannot specify mean_file and mean_value at the same time"; const string& mean_file = param.mean_file(); +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da if (Caffe::root_solver()) { LOG(INFO) << "Loading mean file from: " << mean_file; } +======= +<<<<<<< HEAD + LOG(INFO) << "Loading mean file from: " << mean_file; +======= + if (Caffe::root_solver()) { + LOG(INFO) << "Loading mean file from: " << mean_file; + } +>>>>>>> 0dbadac... triplet data generation and network update +>>>>>>> triplet data generation and network update BlobProto blob_proto; ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto); data_mean_.FromProto(blob_proto); @@ -132,8 +142,14 @@ void DataTransformer::Transform(const Datum& datum, Blob* transformed_blob) { <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da <<<<<<< HEAD <<<<<<< HEAD +======= +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> 0dbadac... triplet data generation and network update +>>>>>>> triplet data generation and network update // If datum is encoded, decoded and transform the cv::image. if (datum.encoded()) { <<<<<<< fa941056111876e03082cdc5695d75339ed24ed9 @@ -201,13 +217,19 @@ void DataTransformer::Transform(const Datum& datum, <<<<<<< HEAD <<<<<<< HEAD ======= +<<<<<<< HEAD >>>>>>> 083f61b... New triplet loss layer added(beta1 version-no test source files) ======= +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da >>>>>>> 011aef0... restore ======= >>>>>>> 4d8130b... New triplet loss layer added(beta1 version-no test source files) ======= >>>>>>> 80a07dd... macro define in upgrade_proto +======= +>>>>>>> triplet data generation and network update +>>>>>>> 0dbadac... triplet data generation and network update +>>>>>>> triplet data generation and network update const int datum_channels = datum.channels(); const int datum_height = datum.height(); const int datum_width = datum.width(); @@ -494,8 +516,14 @@ void DataTransformer::Transform(Blob* input_blob, <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da <<<<<<< HEAD <<<<<<< HEAD +======= +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> 0dbadac... triplet data generation and network update +>>>>>>> triplet data generation and network update template vector DataTransformer::InferBlobShape(const Datum& datum) { if (datum.encoded()) { @@ -620,13 +648,19 @@ vector DataTransformer::InferBlobShape( <<<<<<< HEAD <<<<<<< HEAD ======= +<<<<<<< HEAD >>>>>>> 083f61b... New triplet loss layer added(beta1 version-no test source files) ======= +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da >>>>>>> 011aef0... restore ======= >>>>>>> 4d8130b... New triplet loss layer added(beta1 version-no test source files) ======= >>>>>>> 80a07dd... macro define in upgrade_proto +======= +>>>>>>> triplet data generation and network update +>>>>>>> 0dbadac... triplet data generation and network update +>>>>>>> triplet data generation and network update template void DataTransformer::InitRand() { const bool needs_rand = param_.mirror() || diff --git a/src/caffe/layers/concat_layer.cu.orig b/src/caffe/layers/concat_layer.cu.orig new file mode 100644 index 00000000000..d3ce34709ec --- /dev/null +++ b/src/caffe/layers/concat_layer.cu.orig @@ -0,0 +1,96 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +__global__ void Concat(const int nthreads, const Dtype* in_data, + const bool forward, const int num_concats, const int concat_size, + const int top_concat_axis, const int bottom_concat_axis, + const int offset_concat_axis, Dtype* out_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int total_concat_size = concat_size * bottom_concat_axis; + const int concat_num = index / total_concat_size; + const int concat_index = index % total_concat_size; + const int top_index = concat_index + + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; + if (forward) { + out_data[top_index] = in_data[index]; + } else { + out_data[index] = in_data[top_index]; + } + } +} + +template +void ConcatLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + Dtype* top_data = top[0]->mutable_gpu_data(); + int offset_concat_axis = 0; + const int top_concat_axis = top[0]->shape(concat_axis_); + const bool kForward = true; + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + const int bottom_concat_axis = bottom[i]->shape(concat_axis_); + const int bottom_concat_size = bottom_concat_axis * concat_input_size_; + const int nthreads = bottom_concat_size * num_concats_; + Concat // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, bottom_data, kForward, num_concats_, concat_input_size_, + top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); + offset_concat_axis += bottom_concat_axis; + } +} + +template +void ConcatLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const Dtype* top_diff = top[0]->gpu_diff(); + int offset_concat_axis = 0; + const int top_concat_axis = top[0]->shape(concat_axis_); + const bool kForward = false; + for (int i = 0; i < bottom.size(); ++i) { + if (!propagate_down[i]) { continue; } + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + const int bottom_concat_axis = bottom[i]->shape(concat_axis_); +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46:src/caffe/layers/concat_layer.cu +======= +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + if (propagate_down[i]) { + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + const int bottom_concat_size = bottom_concat_axis * concat_input_size_; + const int nthreads = bottom_concat_size * num_concats_; + Concat // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, top_diff, kForward, num_concats_, concat_input_size_, + top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); +======= + for (int n = 0; n < num_concats_; ++n) { + caffe_copy(bottom_concat_axis * concat_input_size_, top_diff + + (n * top_concat_axis + offset_concat_axis) * concat_input_size_, + bottom_diff + n * bottom_concat_axis * concat_input_size_); +>>>>>>> triplet data generation and network update + } +======= +>>>>>>> triplet data generation and network update:src/caffe/layers/concat_layer.cu.orig + const int bottom_concat_size = bottom_concat_axis * concat_input_size_; + const int nthreads = bottom_concat_size * num_concats_; + Concat // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, top_diff, kForward, num_concats_, concat_input_size_, + top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46:src/caffe/layers/concat_layer.cu +======= +>>>>>>> restore +>>>>>>> triplet data generation and network update:src/caffe/layers/concat_layer.cu.orig + offset_concat_axis += bottom_concat_axis; + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer); + +} // namespace caffe diff --git a/src/caffe/layers/triplet_loss_layer.cpp.orig.orig b/src/caffe/layers/triplet_loss_layer.cpp.orig.orig new file mode 100644 index 00000000000..972e6eb988c --- /dev/null +++ b/src/caffe/layers/triplet_loss_layer.cpp.orig.orig @@ -0,0 +1,459 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/loss_layers.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void TripletLossLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + LossLayer::LayerSetUp(bottom, top); + // number of triplet in a batch + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + // dimension of each descriptor + int dim = bottom[0]->count()/bottom[0]->num(); + CHECK_EQ(bottom[0]->channels(), dim); + CHECK_EQ(bottom[0]->height(), 1); + CHECK_EQ(bottom[0]->width(), 1); + CHECK_EQ(bottom[1]->channels(), 1); + CHECK_EQ(bottom[1]->height(), 1); + CHECK_EQ(bottom[1]->width(), 1); + // In each set, we have: + // the descriptor of reference sample, closest sample, and negative samples + // number of sets in the whole batch + int num_set = bottom[0]->num()/(2 + num_triplets); + dist_sq_.Reshape(num_set, 1, 1, 1); + diff_pos.Reshape(num_set, dim, 1, 1); + dist_sq_pos.Reshape(num_set, 1, 1, 1); + diff_neg.Reshape(num_set, dim, 1, 1); + dist_sq_neg.Reshape(num_set, 1, 1, 1); + // vector of ones used to sum along channels + summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); + for (int i = 0; i < bottom[0]->channels(); ++i) + summer_vec_.mutable_cpu_data()[i] = Dtype(1); +} + +template +void TripletLossLayer::Forward_cpu( + const vector*>& bottom, + const vector*>& top) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); + Dtype loss(0.0); +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); + if (losstype == 0) { + for (int i = 0; i < num_set; ++i) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close + // Loss component calculated from reference and close one + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.cpu_data()[i]; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + i*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); + } +======= + + // Loss component calculated from ab + for (int i = 0; i < bottom[0]->num(); ++i) { + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, + diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); + // ab is a similar pair + dist_sq_.mutable_cpu_data()[i] += dist_sq_pos.cpu_data()[i]; + // Loss component calculated from ac + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, + diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); + // ac is a dissimilar pair + dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; + loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); +>>>>>>> restore + } + loss = loss / static_cast(num_set) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; + } else { + for (int i = 0; i < num_set; ++i) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close + // Loss component calculated from reference and close one + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.cpu_data()[i]; +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += margin; + for (int triplet = 0; triplet < num_triplets; ++triplet) { +======= + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += margin; +>>>>>>> triplet data generation and network update + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + i*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[i] = 1 - \ + dist_sq_neg.cpu_data()[i] / dist_sq_.mutable_cpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(dist_sq_.cpu_data()[i], Dtype(0.0)); + } + } + loss = loss / static_cast(num_set) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; + } +} + +template +void TripletLossLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); + if (losstype == 0) { + // BP for feat1(extracted from reference) + for (int i = 0; i < 1; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + // Loss component calculated from negative part + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + (2 + num_triplets)*j*dim); + // dissimilar pair in triplet + caffe_cpu_axpby( + dim, + -alpha, + diff_neg.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for feat2(extracted from the closest sample) + for (int i = 1; i < 2; ++i) { + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for negative feature used in the num_triplets triplet part + for (int i = 2; i < 2 + num_triplets; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // dissimilar pairs +======= + for (int i = 1; i < 3; ++i) { +// there must be further check to ensure the gradient calc + if (propagate_down[i]) { + const Dtype sign = (i == 2) ? 1 : -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(bottom[i]->num()); + int num = bottom[i]->num(); + int channels = bottom[i]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[i]->mutable_cpu_diff(); + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pairs +>>>>>>> restore + caffe_cpu_axpby( + dim, + alpha, + diff_neg.cpu_data() + (j*dim), + Dtype(0.0), +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } else { + for (int i = 0; i < 1; ++i) { + // BP for data1(feat1) + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { +======= + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; +>>>>>>> triplet data generation and network update + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha*dist_sq_neg.mutable_cpu_data()[j]\ + /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_cpu_data()[j]+margin)), + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + caffe_cpu_axpby( + dim, + -alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), + diff_neg.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + for (int i = 1; i < 2; ++i) { + // BP for positive data(feat2) + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { +======= + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; +>>>>>>> triplet data generation and network update + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha*dist_sq_neg.mutable_cpu_data()[j]\ + /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_cpu_data()[j]+margin)), + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + for (int i = 2; i < 2 + num_triplets; ++i) { + // BP for negative data(feat3) + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { +======= + bout + (j*channels)); + // dissimilar pairs +>>>>>>> restore + caffe_cpu_axpby( + dim, + alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), + diff_neg.cpu_data() + (j*dim), + Dtype(0.0), +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); +======= + bout + (j*channels)); + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); +>>>>>>> restore + } + } + } + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(TripletLossLayer); +#endif + +INSTANTIATE_CLASS(TripletLossLayer); +REGISTER_LAYER_CLASS(TripletLoss); + +} // namespace caffe diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index a18ee63818e..00eb1d01ab4 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -5,12 +5,15 @@ #include #include +#include "hdf5.h" + #include "caffe/common.hpp" #include "caffe/layer.hpp" #include "caffe/net.hpp" +#include "caffe/parallel.hpp" #include "caffe/proto/caffe.pb.h" +#include "caffe/util/hdf5.hpp" #include "caffe/util/insert_splits.hpp" -#include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/upgrade_proto.hpp" @@ -19,12 +22,14 @@ namespace caffe { template -Net::Net(const NetParameter& param) { +Net::Net(const NetParameter& param, const Net* root_net) + : root_net_(root_net) { Init(param); } template -Net::Net(const string& param_file, Phase phase) { +Net::Net(const string& param_file, Phase phase, const Net* root_net) + : root_net_(root_net) { NetParameter param; ReadNetParamsFromTextFileOrDie(param_file, ¶m); param.mutable_state()->set_phase(phase); @@ -33,14 +38,18 @@ Net::Net(const string& param_file, Phase phase) { template void Net::Init(const NetParameter& in_param) { + CHECK(Caffe::root_solver() || root_net_) + << "root_net_ needs to be set for all non-root solvers"; // Set phase from the state. phase_ = in_param.state().phase(); // Filter layers based on their include/exclude rules and // the current NetState. NetParameter filtered_param; FilterNet(in_param, &filtered_param); - LOG(INFO) << "Initializing net from parameters: " << std::endl - << filtered_param.DebugString(); + if (Caffe::root_solver()) { + LOG(INFO) << "Initializing net from parameters: " << std::endl + << filtered_param.DebugString(); + } // Create a copy of filtered_param with splits added where necessary. NetParameter param; InsertSplits(filtered_param, ¶m); @@ -64,7 +73,8 @@ void Net::Init(const NetParameter& in_param) { const int layer_id = -1; // inputs have fake layer ID -1 AppendTop(param, layer_id, input_id, &available_blobs, &blob_name_to_idx); } - DLOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); + DLOG_IF(INFO, Caffe::root_solver()) + << "Memory required for data: " << memory_used_ * sizeof(Dtype); // For each layer, set up its input and output bottom_vecs_.resize(param.layer_size()); top_vecs_.resize(param.layer_size()); @@ -73,6 +83,9 @@ void Net::Init(const NetParameter& in_param) { top_id_vecs_.resize(param.layer_size()); bottom_need_backward_.resize(param.layer_size()); for (int layer_id = 0; layer_id < param.layer_size(); ++layer_id) { + // For non-root solvers, whether this layer is shared from root_net_. + bool share_from_root = !Caffe::root_solver() + && root_net_->layers_[layer_id]->ShareInParallel(); // Inherit phase from net if unset. if (!param.layer(layer_id).has_phase()) { param.mutable_layer(layer_id)->set_phase(phase_); @@ -86,8 +99,11 @@ void Net::Init(const NetParameter& in_param) { << "either 0 or bottom_size times "; } layers_.push_back(LayerRegistry::CreateLayer(layer_param)); +>>>>>>> triplet data generation and network update layer_names_.push_back(layer_param.name()); - LOG(INFO) << "Creating Layer " << layer_param.name(); + if (Caffe::root_solver()) { + LOG(INFO) << "Creating Layer " << layer_param.name(); + } bool need_backward = false; // Figure out this layer's input and output @@ -117,20 +133,42 @@ void Net::Init(const NetParameter& in_param) { } } // After this layer is connected, set it up. - LOG(INFO) << "Setting up " << layer_names_[layer_id]; - layers_[layer_id]->SetUp(bottom_vecs_[layer_id], top_vecs_[layer_id]); + if (share_from_root) { + // Set up size of top blobs using root_net_ + const vector*>& base_top = root_net_->top_vecs_[layer_id]; + const vector*>& this_top = this->top_vecs_[layer_id]; + for (int top_id = 0; top_id < base_top.size(); ++top_id) { + this_top[top_id]->ReshapeLike(*base_top[top_id]); + LOG(INFO) << "Created top blob " << top_id << " (shape: " + << this_top[top_id]->shape_string() << ") for shared layer " + << layer_param.name(); + } + } else { + layers_[layer_id]->SetUp(bottom_vecs_[layer_id], top_vecs_[layer_id]); + } + if (Caffe::root_solver()) { + LOG(INFO) << "Setting up " << layer_names_[layer_id]; + } for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { if (blob_loss_weights_.size() <= top_id_vecs_[layer_id][top_id]) { blob_loss_weights_.resize(top_id_vecs_[layer_id][top_id] + 1, Dtype(0)); } blob_loss_weights_[top_id_vecs_[layer_id][top_id]] = layer->loss(top_id); - LOG(INFO) << "Top shape: " << top_vecs_[layer_id][top_id]->shape_string(); + if (Caffe::root_solver()) { + LOG(INFO) << "Top shape: " + << top_vecs_[layer_id][top_id]->shape_string(); + } if (layer->loss(top_id)) { - LOG(INFO) << " with loss weight " << layer->loss(top_id); + if (Caffe::root_solver()) { + LOG(INFO) << " with loss weight " << layer->loss(top_id); + } } memory_used_ += top_vecs_[layer_id][top_id]->count(); } - DLOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); + if (Caffe::root_solver()) { + DLOG(INFO) << "Memory required for data: " + << memory_used_ * sizeof(Dtype); + } const int param_size = layer_param.param_size(); const int num_param_blobs = layers_[layer_id]->blobs().size(); CHECK_LE(param_size, num_param_blobs) @@ -139,7 +177,7 @@ void Net::Init(const NetParameter& in_param) { for (int param_id = 0; param_id < num_param_blobs; ++param_id) { const ParamSpec* param_spec = (param_id < param_size) ? &layer_param.param(param_id) : &default_param_spec; - const bool param_need_backward = param_spec->lr_mult() > 0; + const bool param_need_backward = param_spec->lr_mult() != 0; need_backward |= param_need_backward; layers_[layer_id]->set_param_propagate_down(param_id, param_need_backward); @@ -189,10 +227,14 @@ void Net::Init(const NetParameter& in_param) { } if (!layer_contributes_loss) { layer_need_backward_[layer_id] = false; } if (layer_need_backward_[layer_id]) { - LOG(INFO) << layer_names_[layer_id] << " needs backward computation."; + if (Caffe::root_solver()) { + LOG(INFO) << layer_names_[layer_id] << " needs backward computation."; + } } else { - LOG(INFO) << layer_names_[layer_id] - << " does not need backward computation."; + if (Caffe::root_solver()) { + LOG(INFO) << layer_names_[layer_id] + << " does not need backward computation."; + } } for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); ++bottom_id) { @@ -232,7 +274,9 @@ void Net::Init(const NetParameter& in_param) { // In the end, all remaining blobs are considered output blobs. for (set::iterator it = available_blobs.begin(); it != available_blobs.end(); ++it) { - LOG(INFO) << "This network produces output " << *it; + if (Caffe::root_solver()) { + LOG(INFO) << "This network produces output " << *it; + } net_output_blobs_.push_back(blobs_[blob_name_to_idx[*it]].get()); net_output_blob_indices_.push_back(blob_name_to_idx[*it]); } @@ -242,10 +286,12 @@ void Net::Init(const NetParameter& in_param) { for (size_t layer_id = 0; layer_id < layer_names_.size(); ++layer_id) { layer_names_index_[layer_names_[layer_id]] = layer_id; } - GetLearningRateAndWeightDecay(); + ShareWeights(); debug_info_ = param.debug_info(); - LOG(INFO) << "Network initialization done."; - LOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); + if (Caffe::root_solver()) { + LOG(INFO) << "Network initialization done."; + LOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); + } } template @@ -284,27 +330,33 @@ bool Net::StateMeetsRule(const NetState& state, // Check whether the rule is broken due to phase. if (rule.has_phase()) { if (rule.phase() != state.phase()) { - LOG(INFO) << "The NetState phase (" << state.phase() - << ") differed from the phase (" << rule.phase() - << ") specified by a rule in layer " << layer_name; + if (Caffe::root_solver()) { + LOG(INFO) << "The NetState phase (" << state.phase() + << ") differed from the phase (" << rule.phase() + << ") specified by a rule in layer " << layer_name; + } return false; } } // Check whether the rule is broken due to min level. if (rule.has_min_level()) { if (state.level() < rule.min_level()) { - LOG(INFO) << "The NetState level (" << state.level() - << ") is above the min_level (" << rule.min_level() - << ") specified by a rule in layer " << layer_name; + if (Caffe::root_solver()) { + LOG(INFO) << "The NetState level (" << state.level() + << ") is above the min_level (" << rule.min_level() + << ") specified by a rule in layer " << layer_name; + } return false; } } // Check whether the rule is broken due to max level. if (rule.has_max_level()) { if (state.level() > rule.max_level()) { - LOG(INFO) << "The NetState level (" << state.level() - << ") is above the max_level (" << rule.max_level() - << ") specified by a rule in layer " << layer_name; + if (Caffe::root_solver()) { + LOG(INFO) << "The NetState level (" << state.level() + << ") is above the max_level (" << rule.max_level() + << ") specified by a rule in layer " << layer_name; + } return false; } } @@ -317,8 +369,10 @@ bool Net::StateMeetsRule(const NetState& state, if (rule.stage(i) == state.stage(j)) { has_stage = true; } } if (!has_stage) { - LOG(INFO) << "The NetState did not contain stage '" << rule.stage(i) - << "' specified by a rule in layer " << layer_name; + if (Caffe::root_solver()) { + LOG(INFO) << "The NetState did not contain stage '" << rule.stage(i) + << "' specified by a rule in layer " << layer_name; + } return false; } } @@ -331,8 +385,10 @@ bool Net::StateMeetsRule(const NetState& state, if (rule.not_stage(i) == state.stage(j)) { has_stage = true; } } if (has_stage) { - LOG(INFO) << "The NetState contained a not_stage '" << rule.not_stage(i) - << "' specified by a rule in layer " << layer_name; + if (Caffe::root_solver()) { + LOG(INFO) << "The NetState contained a not_stage '" << rule.not_stage(i) + << "' specified by a rule in layer " << layer_name; + } return false; } } @@ -354,20 +410,25 @@ void Net::AppendTop(const NetParameter& param, const int layer_id, if (blob_name_to_idx && layer_param && layer_param->bottom_size() > top_id && blob_name == layer_param->bottom(top_id)) { // In-place computation - LOG(INFO) << layer_param->name() << " -> " << blob_name << " (in-place)"; + if (Caffe::root_solver()) { + LOG(INFO) << layer_param->name() << " -> " << blob_name << " (in-place)"; + } top_vecs_[layer_id].push_back(blobs_[(*blob_name_to_idx)[blob_name]].get()); top_id_vecs_[layer_id].push_back((*blob_name_to_idx)[blob_name]); } else if (blob_name_to_idx && blob_name_to_idx->find(blob_name) != blob_name_to_idx->end()) { // If we are not doing in-place computation but have duplicated blobs, // raise an error. - LOG(FATAL) << "Duplicate blobs produced by multiple sources."; + LOG(FATAL) << "Top blob '" << blob_name + << "' produced by multiple sources."; } else { // Normal output. - if (layer_param) { - LOG(INFO) << layer_param->name() << " -> " << blob_name; - } else { - LOG(INFO) << "Input " << top_id << " -> " << blob_name; + if (Caffe::root_solver()) { + if (layer_param) { + LOG(INFO) << layer_param->name() << " -> " << blob_name; + } else { + LOG(INFO) << "Input " << top_id << " -> " << blob_name; + } } shared_ptr > blob_pointer(new Blob()); const int blob_id = blobs_.size(); @@ -403,11 +464,13 @@ int Net::AppendBottom(const NetParameter& param, const int layer_id, const LayerParameter& layer_param = param.layer(layer_id); const string& blob_name = layer_param.bottom(bottom_id); if (available_blobs->find(blob_name) == available_blobs->end()) { - LOG(FATAL) << "Unknown blob input " << blob_name - << " (at index " << bottom_id << ") to layer " << layer_id; + LOG(FATAL) << "Unknown bottom blob '" << blob_name << "' (layer '" + << layer_param.name() << "', bottom index " << bottom_id << ")"; } const int blob_id = (*blob_name_to_idx)[blob_name]; - LOG(INFO) << layer_names_[layer_id] << " <- " << blob_name; + if (Caffe::root_solver()) { + LOG(INFO) << layer_names_[layer_id] << " <- " << blob_name; + } bottom_vecs_[layer_id].push_back(blobs_[blob_id].get()); bottom_id_vecs_[layer_id].push_back(blob_id); available_blobs->erase(blob_name); @@ -439,6 +502,9 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, params_.push_back(layers_[layer_id]->blobs()[param_id]); param_id_vecs_[layer_id].push_back(net_param_id); param_layer_indices_.push_back(make_pair(layer_id, param_id)); + ParamSpec default_param_spec; + const ParamSpec* param_spec = (layer_param.param_size() > param_id) ? + &layer_param.param(param_id) : &default_param_spec; if (!param_size || !param_name.size() || (param_name.size() && param_names_index_.find(param_name) == param_names_index_.end())) { // This layer "owns" this parameter blob -- it is either anonymous @@ -448,6 +514,13 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, if (param_name.size()) { param_names_index_[param_name] = net_param_id; } + const int learnable_param_id = learnable_params_.size(); + learnable_params_.push_back(params_[net_param_id].get()); + learnable_param_ids_.push_back(learnable_param_id); + has_params_lr_.push_back(param_spec->has_lr_mult()); + has_params_decay_.push_back(param_spec->has_decay_mult()); + params_lr_.push_back(param_spec->lr_mult()); + params_weight_decay_.push_back(param_spec->decay_mult()); } else { // Named param blob with name we've seen before: share params const int owner_net_param_id = param_names_index_[param_name]; @@ -456,9 +529,10 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, param_layer_indices_[owner_net_param_id]; const int owner_layer_id = owner_index.first; const int owner_param_id = owner_index.second; - LOG(INFO) << "Sharing parameters '" << param_name << "' owned by " - << "layer '" << layer_names_[owner_layer_id] << "', param " - << "index " << owner_param_id; + LOG_IF(INFO, Caffe::root_solver()) << "Sharing parameters '" << param_name + << "' owned by " + << "layer '" << layer_names_[owner_layer_id] << "', param " + << "index " << owner_param_id; Blob* this_blob = layers_[layer_id]->blobs()[param_id].get(); Blob* owner_blob = layers_[owner_layer_id]->blobs()[owner_param_id].get(); @@ -467,28 +541,40 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, ParamSpec_DimCheckMode_PERMISSIVE)) { // Permissive dimension checking -- only check counts are the same. CHECK_EQ(this_blob->count(), owner_blob->count()) - << "Shared parameter blobs must have the same count."; + << "Cannot share param '" << param_name << "' owned by layer '" + << layer_names_[owner_layer_id] << "' with layer '" + << layer_names_[layer_id] << "'; count mismatch. Owner layer param " + << "shape is " << owner_blob->shape_string() << "; sharing layer " + << "shape is " << this_blob->shape_string(); } else { // Strict dimension checking -- all dims must be the same. - CHECK(this_blob->shape() == owner_blob->shape()); + CHECK(this_blob->shape() == owner_blob->shape()) + << "Cannot share param '" << param_name << "' owned by layer '" + << layer_names_[owner_layer_id] << "' with layer '" + << layer_names_[layer_id] << "'; shape mismatch. Owner layer param " + << "shape is " << owner_blob->shape_string() << "; sharing layer " + << "expects shape " << this_blob->shape_string(); } - layers_[layer_id]->blobs()[param_id]->ShareData( - *layers_[owner_layer_id]->blobs()[owner_param_id]); - } -} - -template -void Net::GetLearningRateAndWeightDecay() { - LOG(INFO) << "Collecting Learning Rate and Weight Decay."; - ParamSpec default_param_spec; - for (int i = 0; i < layers_.size(); ++i) { - vector > >& layer_blobs = layers_[i]->blobs(); - for (int j = 0; j < layer_blobs.size(); ++j) { - const ParamSpec* param_spec = - (layers_[i]->layer_param().param_size() > j) ? - &layers_[i]->layer_param().param(j) : &default_param_spec; - params_lr_.push_back(param_spec->lr_mult()); - params_weight_decay_.push_back(param_spec->decay_mult()); + const int learnable_param_id = learnable_param_ids_[owner_net_param_id]; + learnable_param_ids_.push_back(learnable_param_id); + if (param_spec->has_lr_mult()) { + if (has_params_lr_[learnable_param_id]) { + CHECK_EQ(param_spec->lr_mult(), params_lr_[learnable_param_id]) + << "Shared param '" << param_name << "' has mismatched lr_mult."; + } else { + has_params_lr_[learnable_param_id] = true; + params_lr_[learnable_param_id] = param_spec->lr_mult(); + } + } + if (param_spec->has_decay_mult()) { + if (has_params_decay_[learnable_param_id]) { + CHECK_EQ(param_spec->decay_mult(), + params_weight_decay_[learnable_param_id]) + << "Shared param '" << param_name << "' has mismatched decay_mult."; + } else { + has_params_decay_[learnable_param_id] = true; + params_weight_decay_[learnable_param_id] = param_spec->decay_mult(); + } } } } @@ -581,8 +667,10 @@ void Net::InputDebugInfo(const int input_id) { const Blob& blob = *net_input_blobs_[input_id]; const string& blob_name = blob_names_[net_input_blob_indices_[input_id]]; const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); - LOG(INFO) << " [Forward] " - << "Input " << blob_name << " data: " << data_abs_val_mean; + if (Caffe::root_solver()) { + LOG(INFO) << " [Forward] " + << "Input " << blob_name << " data: " << data_abs_val_mean; + } } template @@ -591,9 +679,12 @@ void Net::ForwardDebugInfo(const int layer_id) { const Blob& blob = *top_vecs_[layer_id][top_id]; const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]]; const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); - LOG(INFO) << " [Forward] " - << "Layer " << layer_names_[layer_id] << ", top blob " << blob_name - << " data: " << data_abs_val_mean; + if (Caffe::root_solver()) { + LOG(INFO) << " [Forward] " + << "Layer " << layer_names_[layer_id] + << ", top blob " << blob_name + << " data: " << data_abs_val_mean; + } } for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); ++param_id) { @@ -601,9 +692,12 @@ void Net::ForwardDebugInfo(const int layer_id) { const int net_param_id = param_id_vecs_[layer_id][param_id]; const string& blob_name = param_display_names_[net_param_id]; const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); - LOG(INFO) << " [Forward] " - << "Layer " << layer_names_[layer_id] << ", param blob " << blob_name - << " data: " << data_abs_val_mean; + if (Caffe::root_solver()) { + LOG(INFO) << " [Forward] " + << "Layer " << layer_names_[layer_id] + << ", param blob " << blob_name + << " data: " << data_abs_val_mean; + } } } @@ -615,18 +709,24 @@ void Net::BackwardDebugInfo(const int layer_id) { const Blob& blob = *bottom_vec[bottom_id]; const string& blob_name = blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); - LOG(INFO) << " [Backward] " - << "Layer " << layer_names_[layer_id] << ", bottom blob " << blob_name - << " diff: " << diff_abs_val_mean; + if (Caffe::root_solver()) { + LOG(INFO) << " [Backward] " + << "Layer " << layer_names_[layer_id] + << ", bottom blob " << blob_name + << " diff: " << diff_abs_val_mean; + } } for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); ++param_id) { if (!layers_[layer_id]->param_propagate_down(param_id)) { continue; } const Blob& blob = *layers_[layer_id]->blobs()[param_id]; const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); - LOG(INFO) << " [Backward] " - << "Layer " << layer_names_[layer_id] << ", param blob " << param_id - << " diff: " << diff_abs_val_mean; + if (Caffe::root_solver()) { + LOG(INFO) << " [Backward] " + << "Layer " << layer_names_[layer_id] + << ", param blob " << param_id + << " diff: " << diff_abs_val_mean; + } } } @@ -639,17 +739,22 @@ void Net::UpdateDebugInfo(const int param_id) { const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); if (param_owner < 0) { const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); - LOG(INFO) << " [Update] Layer " << layer_name - << ", param " << param_display_name - << " data: " << data_abs_val_mean << "; diff: " << diff_abs_val_mean; + if (Caffe::root_solver()) { + LOG(INFO) << " [Update] Layer " << layer_name + << ", param " << param_display_name + << " data: " << data_abs_val_mean + << "; diff: " << diff_abs_val_mean; + } } else { const string& owner_layer_name = layer_names_[param_layer_indices_[param_owner].first]; - LOG(INFO) << " [Update] Layer " << layer_name - << ", param blob " << param_display_name - << " (owned by layer " << owner_layer_name << ", " - << "param " << param_display_names_[param_owners_[param_id]] << ")" - << " diff: " << diff_abs_val_mean; + if (Caffe::root_solver()) { + LOG(INFO) << " [Update] Layer " << layer_name + << ", param blob " << param_display_name + << " (owned by layer " << owner_layer_name << ", " << "param " + << param_display_names_[param_owners_[param_id]] << ")" + << " diff: " << diff_abs_val_mean; + } } } @@ -675,7 +780,11 @@ void Net::ShareTrainedLayersWith(const Net* other) { << "Incompatible number of blobs for layer " << source_layer_name; for (int j = 0; j < target_blobs.size(); ++j) { Blob* source_blob = source_layer->blobs()[j].get(); - CHECK(target_blobs[j]->shape() == source_blob->shape()); + CHECK(target_blobs[j]->shape() == source_blob->shape()) + << "Cannot share param " << j << " weights from layer '" + << source_layer_name << "'; shape mismatch. Source param shape is " + << source_blob->shape_string() << "; target param shape is " + << target_blobs[j]->shape_string(); target_blobs[j]->ShareData(*source_blob); } } @@ -696,18 +805,17 @@ void Net::Backward() { BackwardFromTo(layers_.size() - 1, 0); if (debug_info_) { Dtype asum_data = 0, asum_diff = 0, sumsq_data = 0, sumsq_diff = 0; - for (int i = 0; i < params_.size(); ++i) { - if (param_owners_[i] >= 0) { continue; } - asum_data += params_[i]->asum_data(); - asum_diff += params_[i]->asum_diff(); - sumsq_data += params_[i]->sumsq_data(); - sumsq_diff += params_[i]->sumsq_diff(); + for (int i = 0; i < learnable_params_.size(); ++i) { + asum_data += learnable_params_[i]->asum_data(); + asum_diff += learnable_params_[i]->asum_diff(); + sumsq_data += learnable_params_[i]->sumsq_data(); + sumsq_diff += learnable_params_[i]->sumsq_diff(); } const Dtype l2norm_data = std::sqrt(sumsq_data); const Dtype l2norm_diff = std::sqrt(sumsq_diff); LOG(ERROR) << " [Backward] All net params (data, diff): " - << "L1 norm = (" << asum_data << ", " << asum_diff << "); " - << "L2 norm = (" << l2norm_data << ", " << l2norm_diff << ")"; + << "L1 norm = (" << asum_data << ", " << asum_diff << "); " + << "L2 norm = (" << l2norm_data << ", " << l2norm_diff << ")"; } } @@ -739,6 +847,17 @@ void Net::CopyTrainedLayersFrom(const NetParameter& param) { CHECK_EQ(target_blobs.size(), source_layer.blobs_size()) << "Incompatible number of blobs for layer " << source_layer_name; for (int j = 0; j < target_blobs.size(); ++j) { + if (!target_blobs[j]->ShapeEquals(source_layer.blobs(j))) { + Blob source_blob; + const bool kReshape = true; + source_blob.FromProto(source_layer.blobs(j), kReshape); + LOG(FATAL) << "Cannot copy param " << j << " weights from layer '" + << source_layer_name << "'; shape mismatch. Source param shape is " + << source_blob.shape_string() << "; target param shape is " + << target_blobs[j]->shape_string() << ". " + << "To learn this layer's parameters from scratch rather than " + << "copying from a saved net, rename the layer."; + } const bool kReshape = false; target_blobs[j]->FromProto(source_layer.blobs(j), kReshape); } @@ -747,11 +866,72 @@ void Net::CopyTrainedLayersFrom(const NetParameter& param) { template void Net::CopyTrainedLayersFrom(const string trained_filename) { + if (trained_filename.size() >= 3 && + trained_filename.compare(trained_filename.size() - 3, 3, ".h5") == 0) { + CopyTrainedLayersFromHDF5(trained_filename); + } else { + CopyTrainedLayersFromBinaryProto(trained_filename); + } +} + +template +void Net::CopyTrainedLayersFromBinaryProto( + const string trained_filename) { NetParameter param; ReadNetParamsFromBinaryFileOrDie(trained_filename, ¶m); CopyTrainedLayersFrom(param); } +template +void Net::CopyTrainedLayersFromHDF5(const string trained_filename) { + hid_t file_hid = H5Fopen(trained_filename.c_str(), H5F_ACC_RDONLY, + H5P_DEFAULT); + CHECK_GE(file_hid, 0) << "Couldn't open " << trained_filename; + hid_t data_hid = H5Gopen2(file_hid, "data", H5P_DEFAULT); + CHECK_GE(data_hid, 0) << "Error reading weights from " << trained_filename; + int num_layers = hdf5_get_num_links(data_hid); + for (int i = 0; i < num_layers; ++i) { + string source_layer_name = hdf5_get_name_by_idx(data_hid, i); + if (!layer_names_index_.count(source_layer_name)) { + DLOG(INFO) << "Ignoring source layer " << source_layer_name; + continue; + } + int target_layer_id = layer_names_index_[source_layer_name]; + DLOG(INFO) << "Copying source layer " << source_layer_name; + vector > >& target_blobs = + layers_[target_layer_id]->blobs(); + hid_t layer_hid = H5Gopen2(data_hid, source_layer_name.c_str(), + H5P_DEFAULT); + CHECK_GE(layer_hid, 0) + << "Error reading weights from " << trained_filename; + // Check that source layer doesn't have more params than target layer + int num_source_params = hdf5_get_num_links(layer_hid); + CHECK_LE(num_source_params, target_blobs.size()) + << "Incompatible number of blobs for layer " << source_layer_name; + for (int j = 0; j < target_blobs.size(); ++j) { + ostringstream oss; + oss << j; + string dataset_name = oss.str(); + int target_net_param_id = param_id_vecs_[target_layer_id][j]; + if (!H5Lexists(layer_hid, dataset_name.c_str(), H5P_DEFAULT)) { + // Target param doesn't exist in source weights... + if (param_owners_[target_net_param_id] != -1) { + // ...but it's weight-shared in target, so that's fine. + continue; + } else { + LOG(FATAL) << "Incompatible number of blobs for layer " + << source_layer_name; + } + } + hdf5_load_nd_dataset(layer_hid, dataset_name.c_str(), 0, kMaxBlobAxes, + target_blobs[j].get()); + } + H5Gclose(layer_hid); + } + H5Gclose(data_hid); + H5Fclose(file_hid); +} + template void Net::ToProto(NetParameter* param, bool write_diff) const { param->Clear(); @@ -763,51 +943,101 @@ void Net::ToProto(NetParameter* param, bool write_diff) const { DLOG(INFO) << "Serializing " << layers_.size() << " layers"; for (int i = 0; i < layers_.size(); ++i) { LayerParameter* layer_param = param->add_layer(); - for (int j = 0; j < bottom_id_vecs_[i].size(); ++j) { - layer_param->add_bottom(blob_names_[bottom_id_vecs_[i][j]]); + layers_[i]->ToProto(layer_param, write_diff); + } +} + +template +void Net::ToHDF5(const string& filename, bool write_diff) const { + hid_t file_hid = H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, + H5P_DEFAULT); + CHECK_GE(file_hid, 0) + << "Couldn't open " << filename << " to save weights."; + hid_t data_hid = H5Gcreate2(file_hid, "data", H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + CHECK_GE(data_hid, 0) << "Error saving weights to " << filename << "."; + hid_t diff_hid = -1; + if (write_diff) { + diff_hid = H5Gcreate2(file_hid, "diff", H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + CHECK_GE(diff_hid, 0) << "Error saving weights to " << filename << "."; + } + for (int layer_id = 0; layer_id < layers_.size(); ++layer_id) { + const LayerParameter& layer_param = layers_[layer_id]->layer_param(); + string layer_name = layer_param.name(); + hid_t layer_data_hid = H5Gcreate2(data_hid, layer_name.c_str(), + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK_GE(layer_data_hid, 0) + << "Error saving weights to " << filename << "."; + hid_t layer_diff_hid = -1; + if (write_diff) { + layer_diff_hid = H5Gcreate2(diff_hid, layer_name.c_str(), + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + CHECK_GE(layer_diff_hid, 0) + << "Error saving weights to " << filename << "."; } - for (int j = 0; j < top_id_vecs_[i].size(); ++j) { - layer_param->add_top(blob_names_[top_id_vecs_[i][j]]); + int num_params = layers_[layer_id]->blobs().size(); + for (int param_id = 0; param_id < num_params; ++param_id) { + ostringstream dataset_name; + dataset_name << param_id; + const int net_param_id = param_id_vecs_[layer_id][param_id]; + if (param_owners_[net_param_id] == -1) { + // Only save params that own themselves + hdf5_save_nd_dataset(layer_data_hid, dataset_name.str(), + *params_[net_param_id]); + } + if (write_diff) { + // Write diffs regardless of weight-sharing + hdf5_save_nd_dataset(layer_diff_hid, dataset_name.str(), + *params_[net_param_id], true); + } } - layers_[i]->ToProto(layer_param, write_diff); + H5Gclose(layer_data_hid); + if (write_diff) { + H5Gclose(layer_diff_hid); + } + } + H5Gclose(data_hid); + if (write_diff) { + H5Gclose(diff_hid); } + H5Fclose(file_hid); } template void Net::Update() { - // First, accumulate the diffs of any shared parameters into their owner's - // diff. (Assumes that the learning rate, weight decay, etc. have already been - // accounted for in the current diff.) - for (int i = 0; i < params_.size(); ++i) { - if (param_owners_[i] < 0) { continue; } - if (debug_info_) { UpdateDebugInfo(i); } - const int count = params_[i]->count(); - const Dtype* this_diff; - Dtype* owner_diff; + for (int i = 0; i < learnable_params_.size(); ++i) { + learnable_params_[i]->Update(); + } +} + +template +void Net::ClearParamDiffs() { + for (int i = 0; i < learnable_params_.size(); ++i) { + Blob* blob = learnable_params_[i]; switch (Caffe::mode()) { case Caffe::CPU: - this_diff = params_[i]->cpu_diff(); - owner_diff = params_[param_owners_[i]]->mutable_cpu_diff(); - caffe_add(count, this_diff, owner_diff, owner_diff); + caffe_set(blob->count(), static_cast(0), + blob->mutable_cpu_diff()); break; case Caffe::GPU: #ifndef CPU_ONLY - this_diff = params_[i]->gpu_diff(); - owner_diff = params_[param_owners_[i]]->mutable_gpu_diff(); - caffe_gpu_add(count, this_diff, owner_diff, owner_diff); + caffe_gpu_set(blob->count(), static_cast(0), + blob->mutable_gpu_diff()); #else NO_GPU; #endif break; - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); } } - // Now, update the owned parameters. +} + +template +void Net::ShareWeights() { for (int i = 0; i < params_.size(); ++i) { - if (param_owners_[i] >= 0) { continue; } - if (debug_info_) { UpdateDebugInfo(i); } - params_[i]->Update(); + if (param_owners_[i] < 0) { continue; } + params_[i]->ShareData(*params_[param_owners_[i]]); + params_[i]->ShareDiff(*params_[param_owners_[i]]); } } diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index b4294777259..bd344870d8e 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -351,6 +351,10 @@ message LayerParameter { optional TanHParameter tanh_param = 127; optional ThresholdParameter threshold_param = 128; optional WindowDataParameter window_data_param = 129; +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da +======= + optional TripletLossParameter triplet_loss_param = 137; +>>>>>>> triplet data generation and network update } // Message that stores parameters used to apply transformation @@ -434,27 +438,29 @@ message ContrastiveLossParameter { optional bool legacy_version = 2 [default = false]; } +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da +======= +message TripletLossParameter { + //margin for negative triplet + optional float margin = 1 [default = 1.0]; +} + +>>>>>>> triplet data generation and network update message ConvolutionParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in all spatial dimensions, or once per spatial dimension. - repeated uint32 pad = 3; // The padding size; defaults to 0 - repeated uint32 kernel_size = 4; // The kernel size - repeated uint32 stride = 6; // The stride; defaults to 1 - - // For 2D convolution only, the *_h and *_w versions may also be used to - // specify both spatial dimensions. - optional uint32 pad_h = 9 [default = 0]; // The padding height (2D only) - optional uint32 pad_w = 10 [default = 0]; // The padding width (2D only) - optional uint32 kernel_h = 11; // The kernel height (2D only) - optional uint32 kernel_w = 12; // The kernel width (2D only) - optional uint32 stride_h = 13; // The stride height (2D only) - optional uint32 stride_w = 14; // The stride width (2D only) - + // dimensions in height and width or as Y, X pairs. + optional uint32 pad = 3 [default = 0]; // The padding size (equal in Y, X) + optional uint32 pad_h = 9 [default = 0]; // The padding height + optional uint32 pad_w = 10 [default = 0]; // The padding width + optional uint32 kernel_size = 4; // The kernel size (square) + optional uint32 kernel_h = 11; // The kernel height + optional uint32 kernel_w = 12; // The kernel width optional uint32 group = 5 [default = 1]; // The group size for group conv - + optional uint32 stride = 6 [default = 1]; // The stride (equal in Y, X) + optional uint32 stride_h = 13; // The stride height + optional uint32 stride_w = 14; // The stride width optional FillerParameter weight_filler = 7; // The filler for the weight optional FillerParameter bias_filler = 8; // The filler for the bias enum Engine { @@ -463,24 +469,6 @@ message ConvolutionParameter { CUDNN = 2; } optional Engine engine = 15 [default = DEFAULT]; - - // The axis to interpret as "channels" when performing convolution. - // Preceding dimensions are treated as independent inputs; - // succeeding dimensions are treated as "spatial". - // With (N, C, H, W) inputs, and axis == 1 (the default), we perform - // N independent 2D convolutions, sliding C-channel (or (C/g)-channels, for - // groups g>1) filters across the spatial axes (H, W) of the input. - // With (N, C, D, H, W) inputs, and axis == 1, we perform - // N independent 3D convolutions, sliding (C/g)-channels - // filters across the spatial axes (D, H, W) of the input. - optional int32 axis = 16 [default = 1]; - - // Whether to force use of the general ND convolution, even if a specific - // implementation for blobs of the appropriate number of spatial dimensions - // is available. (Currently, there is only a 2D-specific convolution - // implementation; for input blobs with num_axes != 2, this option is - // ignored and the ND implementation will be used.) - optional bool force_nd_im2col = 17 [default = false]; } message DataParameter { @@ -603,7 +591,7 @@ message ImageDataParameter { // Specify the data source. optional string source = 1; // Specify the batch size. - optional uint32 batch_size = 4; + optional uint32 batch_size = 4 [default = 1]; // The rand_skip variable is for the data layer to skip a few data points // to avoid all asynchronous sgd clients to start at the same point. The skip // point would be set as rand_skip * rand(0,1). Note that rand_skip should not diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index aabe0edec80..b6ebf2736ac 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -4,24 +4,44 @@ #include #include +#include "hdf5.h" +#include "hdf5_hl.h" + #include "caffe/net.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/solver.hpp" +#include "caffe/util/hdf5.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/upgrade_proto.hpp" namespace caffe { +template +void Solver::SetActionFunction(ActionCallback func) { + action_request_function_ = func; +} + +template +SolverAction::Enum Solver::GetRequestedAction() { + if (action_request_function_) { + // If the external request function has been set, call it. + return action_request_function_(); + } + return SolverAction::NONE; +} + template -Solver::Solver(const SolverParameter& param) - : net_() { +Solver::Solver(const SolverParameter& param, const Solver* root_solver) + : net_(), callbacks_(), root_solver_(root_solver), + requested_early_exit_(false) { Init(param); } template -Solver::Solver(const string& param_file) - : net_() { +Solver::Solver(const string& param_file, const Solver* root_solver) + : net_(), callbacks_(), root_solver_(root_solver), + requested_early_exit_(false) { SolverParameter param; ReadProtoFromTextFileOrDie(param_file, ¶m); Init(param); @@ -29,17 +49,22 @@ Solver::Solver(const string& param_file) template void Solver::Init(const SolverParameter& param) { - LOG(INFO) << "Initializing solver from parameters: " << std::endl - << param.DebugString(); + CHECK(Caffe::root_solver() || root_solver_) + << "root_solver_ needs to be set for all non-root solvers"; + LOG_IF(INFO, Caffe::root_solver()) << "Initializing solver from parameters: " + << std::endl << param.DebugString(); param_ = param; CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative."; - if (param_.random_seed() >= 0) { + CheckSnapshotWritePermissions(); + if (Caffe::root_solver() && param_.random_seed() >= 0) { Caffe::set_random_seed(param_.random_seed()); } // Scaffolding code InitTrainNet(); - InitTestNets(); - LOG(INFO) << "Solver scaffolding done."; + if (Caffe::root_solver()) { + InitTestNets(); + LOG(INFO) << "Solver scaffolding done."; + } iter_ = 0; current_step_ = 0; } @@ -55,19 +80,22 @@ void Solver::InitTrainNet() { << "one of these fields specifying a train_net: " << field_names; NetParameter net_param; if (param_.has_train_net_param()) { - LOG(INFO) << "Creating training net specified in train_net_param."; + LOG_IF(INFO, Caffe::root_solver()) + << "Creating training net specified in train_net_param."; net_param.CopyFrom(param_.train_net_param()); } else if (param_.has_train_net()) { - LOG(INFO) << "Creating training net from train_net file: " - << param_.train_net(); + LOG_IF(INFO, Caffe::root_solver()) + << "Creating training net from train_net file: " << param_.train_net(); ReadNetParamsFromTextFileOrDie(param_.train_net(), &net_param); } if (param_.has_net_param()) { - LOG(INFO) << "Creating training net specified in net_param."; + LOG_IF(INFO, Caffe::root_solver()) + << "Creating training net specified in net_param."; net_param.CopyFrom(param_.net_param()); } if (param_.has_net()) { - LOG(INFO) << "Creating training net from net file: " << param_.net(); + LOG_IF(INFO, Caffe::root_solver()) + << "Creating training net from net file: " << param_.net(); ReadNetParamsFromTextFileOrDie(param_.net(), &net_param); } // Set the correct NetState. We start with the solver defaults (lowest @@ -79,11 +107,16 @@ void Solver::InitTrainNet() { net_state.MergeFrom(net_param.state()); net_state.MergeFrom(param_.train_state()); net_param.mutable_state()->CopyFrom(net_state); - net_.reset(new Net(net_param)); + if (Caffe::root_solver()) { + net_.reset(new Net(net_param)); + } else { + net_.reset(new Net(net_param, root_solver_->net_.get())); + } } template void Solver::InitTestNets() { + CHECK(Caffe::root_solver()); const bool has_net_param = param_.has_net_param(); const bool has_net_file = param_.has_net(); const int num_generic_nets = has_net_param + has_net_file; @@ -153,7 +186,12 @@ void Solver::InitTestNets() { net_params[i].mutable_state()->CopyFrom(net_state); LOG(INFO) << "Creating test net (#" << i << ") specified by " << sources[i]; - test_nets_[i].reset(new Net(net_params[i])); + if (Caffe::root_solver()) { + test_nets_[i].reset(new Net(net_params[i])); + } else { + test_nets_[i].reset(new Net(net_params[i], + root_solver_->test_nets_[i].get())); + } test_nets_[i]->set_debug_info(param_.debug_info()); } } @@ -191,7 +229,6 @@ void Solver::Step(int iters) { && (iter_ > 0 || param_.test_initialization())) { TestAll(); } - const bool display = param_.display() && iter_ % param_.display() == 0; net_->set_debug_info(display && param_.debug_info()); // accumulate the loss and gradient @@ -211,7 +248,8 @@ void Solver::Step(int iters) { losses[idx] = loss; } if (display) { - LOG(INFO) << "Iteration " << iter_ << ", loss = " << smoothed_loss; + LOG_IF(INFO, Caffe::root_solver()) << "Iteration " << iter_ + << ", loss = " << smoothed_loss; const vector*>& result = net_->output_blobs(); int score_index = 0; for (int j = 0; j < result.size(); ++j) { @@ -226,7 +264,7 @@ void Solver::Step(int iters) { loss_msg_stream << " (* " << loss_weight << " = " << loss_weight * result_vec[k] << " loss)"; } - LOG(INFO) << " Train net output #" + LOG_IF(INFO, Caffe::root_solver()) << " Train net output #" << score_index++ << ": " << output_name << " = " << result_vec[k] << loss_msg_stream.str(); } @@ -242,14 +280,23 @@ void Solver::Step(int iters) { if (param_.snapshot() && iter_ % param_.snapshot() == 0) { Snapshot(); } + if (SolverAction::STOP == request) { + requested_early_exit_ = true; + // Break out of training loop. + break; + } } } template void Solver::Solve(const char* resume_file) { + CHECK(Caffe::root_solver()); LOG(INFO) << "Solving " << net_->name(); LOG(INFO) << "Learning Rate Policy: " << param_.lr_policy(); + // Initialize to false every time we start solving. + requested_early_exit_ = false; + if (resume_file) { LOG(INFO) << "Restoring previous solver status from " << resume_file; Restore(resume_file); @@ -264,6 +311,10 @@ void Solver::Solve(const char* resume_file) { && (!param_.snapshot() || iter_ % param_.snapshot() != 0)) { Snapshot(); } + if (requested_early_exit_) { + LOG(INFO) << "Optimization stopped early."; + return; + } // After the optimization is done, run an additional train and test pass to // display the train and test loss/outputs if appropriate (based on the // display and test_interval settings, respectively). Unlike in the rest of @@ -281,16 +332,18 @@ void Solver::Solve(const char* resume_file) { LOG(INFO) << "Optimization Done."; } - template void Solver::TestAll() { - for (int test_net_id = 0; test_net_id < test_nets_.size(); ++test_net_id) { + for (int test_net_id = 0; + test_net_id < test_nets_.size() && !requested_early_exit_; + ++test_net_id) { Test(test_net_id); } } template void Solver::Test(const int test_net_id) { + CHECK(Caffe::root_solver()); LOG(INFO) << "Iteration " << iter_ << ", Testing net (#" << test_net_id << ")"; CHECK_NOTNULL(test_nets_[test_net_id].get())-> @@ -301,6 +354,21 @@ void Solver::Test(const int test_net_id) { const shared_ptr >& test_net = test_nets_[test_net_id]; Dtype loss = 0; for (int i = 0; i < param_.test_iter(test_net_id); ++i) { + SolverAction::Enum request = GetRequestedAction(); + // Check to see if stoppage of testing/training has been requested. + while (request != SolverAction::NONE) { + if (SolverAction::SNAPSHOT == request) { + Snapshot(); + } else if (SolverAction::STOP == request) { + requested_early_exit_ = true; + } + request = GetRequestedAction(); + } + if (requested_early_exit_) { + // break out of test loop. + break; + } + Dtype iter_loss; const vector*>& result = test_net->Forward(bottom_vec, &iter_loss); @@ -325,6 +393,10 @@ void Solver::Test(const int test_net_id) { } } } + if (requested_early_exit_) { + LOG(INFO) << "Test interrupted."; + return; + } if (param_.test_compute_loss()) { loss /= param_.test_iter(test_net_id); LOG(INFO) << "Test loss: " << loss; @@ -341,18 +413,49 @@ void Solver::Test(const int test_net_id) { << " = " << loss_weight * mean_score << " loss)"; } LOG(INFO) << " Test net output #" << i << ": " << output_name << " = " - << mean_score << loss_msg_stream.str(); + << mean_score << loss_msg_stream.str(); } } - template void Solver::Snapshot() { - NetParameter net_param; - // For intermediate results, we will also dump the gradient values. - net_->ToProto(&net_param, param_.snapshot_diff()); + CHECK(Caffe::root_solver()); + string model_filename; + switch (param_.snapshot_format()) { + case caffe::SolverParameter_SnapshotFormat_BINARYPROTO: + model_filename = SnapshotToBinaryProto(); + break; + case caffe::SolverParameter_SnapshotFormat_HDF5: + model_filename = SnapshotToHDF5(); + break; + default: + LOG(FATAL) << "Unsupported snapshot format."; + } + + SnapshotSolverState(model_filename); +} + +template +void Solver::CheckSnapshotWritePermissions() { + if (Caffe::root_solver() && param_.snapshot()) { + CHECK(param_.has_snapshot_prefix()) + << "In solver params, snapshot is specified but snapshot_prefix is not"; + string probe_filename = SnapshotFilename(".tempfile"); + std::ofstream probe_ofs(probe_filename.c_str()); + if (probe_ofs.good()) { + probe_ofs.close(); + std::remove(probe_filename.c_str()); + } else { + LOG(FATAL) << "Cannot write to snapshot prefix '" + << param_.snapshot_prefix() << "'. Make sure " + << "that the directory exists and is writeable."; + } + } +} + +template +string Solver::SnapshotFilename(const string extension) { string filename(param_.snapshot_prefix()); - string model_filename, snapshot_filename; const int kBufferSize = 20; char iter_str_buffer[kBufferSize]; snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); @@ -371,19 +474,34 @@ void Solver::Snapshot() { } template -void Solver::Restore(const char* state_file) { - SolverState state; +string Solver::SnapshotToBinaryProto() { + string model_filename = SnapshotFilename(".caffemodel"); + LOG(INFO) << "Snapshotting to binary proto file " << model_filename; NetParameter net_param; - ReadProtoFromBinaryFile(state_file, &state); - if (state.has_learned_net()) { - ReadNetParamsFromBinaryFileOrDie(state.learned_net().c_str(), &net_param); - net_->CopyTrainedLayersFrom(net_param); - } - iter_ = state.iter(); - current_step_ = state.current_step(); - RestoreSolverState(state); + net_->ToProto(&net_param, param_.snapshot_diff()); + WriteProtoToBinaryFile(net_param, model_filename); + return model_filename; } +template +string Solver::SnapshotToHDF5() { + string model_filename = SnapshotFilename(".caffemodel.h5"); + LOG(INFO) << "Snapshotting to HDF5 file " << model_filename; + net_->ToHDF5(model_filename, param_.snapshot_diff()); + return model_filename; +} + +template +void Solver::Restore(const char* state_file) { + CHECK(Caffe::root_solver()); + string state_filename(state_file); + if (state_filename.size() >= 3 && + state_filename.compare(state_filename.size() - 3, 3, ".h5") == 0) { + RestoreSolverStateFromHDF5(state_filename); + } else { + RestoreSolverStateFromBinaryProto(state_filename); + } +} // Return the current learning rate. The currently implemented learning rate // policies are as follows: @@ -442,7 +560,7 @@ Dtype SGDSolver::GetLearningRate() { template void SGDSolver::PreSolve() { // Initialize the history - const vector > >& net_params = this->net_->params(); + const vector*>& net_params = this->net_->learnable_params(); history_.clear(); update_.clear(); temp_.clear(); @@ -458,12 +576,10 @@ template void SGDSolver::ClipGradients() { const Dtype clip_gradients = this->param_.clip_gradients(); if (clip_gradients < 0) { return; } - const vector > >& net_params = this->net_->params(); + const vector*>& net_params = this->net_->learnable_params(); Dtype sumsq_diff = 0; for (int i = 0; i < net_params.size(); ++i) { - if (this->net_->param_owners()[i] < 0) { - sumsq_diff += net_params[i]->sumsq_diff(); - } + sumsq_diff += net_params[i]->sumsq_diff(); } const Dtype l2norm_diff = std::sqrt(sumsq_diff); if (l2norm_diff > clip_gradients) { @@ -472,9 +588,7 @@ void SGDSolver::ClipGradients() { << l2norm_diff << " > " << clip_gradients << ") " << "by scale factor " << scale_factor; for (int i = 0; i < net_params.size(); ++i) { - if (this->net_->param_owners()[i] < 0) { - net_params[i]->scale_diff(scale_factor); - } + net_params[i]->scale_diff(scale_factor); } } } @@ -622,13 +736,53 @@ void SGDSolver::SnapshotSolverState(SolverState* state) { state->clear_history(); for (int i = 0; i < history_.size(); ++i) { // Add history - BlobProto* history_blob = state->add_history(); + BlobProto* history_blob = state.add_history(); history_[i]->ToProto(history_blob); } + string snapshot_filename = Solver::SnapshotFilename(".solverstate"); + LOG(INFO) + << "Snapshotting solver state to binary proto file " << snapshot_filename; + WriteProtoToBinaryFile(state, snapshot_filename.c_str()); } template -void SGDSolver::RestoreSolverState(const SolverState& state) { +void SGDSolver::SnapshotSolverStateToHDF5( + const string& model_filename) { + string snapshot_filename = + Solver::SnapshotFilename(".solverstate.h5"); + LOG(INFO) << "Snapshotting solver state to HDF5 file " << snapshot_filename; + hid_t file_hid = H5Fcreate(snapshot_filename.c_str(), H5F_ACC_TRUNC, + H5P_DEFAULT, H5P_DEFAULT); + CHECK_GE(file_hid, 0) + << "Couldn't open " << snapshot_filename << " to save solver state."; + hdf5_save_int(file_hid, "iter", this->iter_); + hdf5_save_string(file_hid, "learned_net", model_filename); + hdf5_save_int(file_hid, "current_step", this->current_step_); + hid_t history_hid = H5Gcreate2(file_hid, "history", H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + CHECK_GE(history_hid, 0) + << "Error saving solver state to " << snapshot_filename << "."; + for (int i = 0; i < history_.size(); ++i) { + ostringstream oss; + oss << i; + hdf5_save_nd_dataset(history_hid, oss.str(), *history_[i]); + } + H5Gclose(history_hid); + H5Fclose(file_hid); +} + +template +void SGDSolver::RestoreSolverStateFromBinaryProto( + const string& state_file) { + SolverState state; + ReadProtoFromBinaryFile(state_file, &state); + this->iter_ = state.iter(); + if (state.has_learned_net()) { + NetParameter net_param; + ReadNetParamsFromBinaryFileOrDie(state.learned_net().c_str(), &net_param); + this->net_->CopyTrainedLayersFrom(net_param); + } + this->current_step_ = state.current_step(); CHECK_EQ(state.history_size(), history_.size()) << "Incorrect length of history blobs."; LOG(INFO) << "SGDSolver: restoring history"; @@ -775,9 +929,336 @@ void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { } } +template +void RMSPropSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + + // get the learning rate + Dtype delta = this->param_.delta(); + Dtype rms_decay = this->param_.rms_decay(); + Dtype local_rate = rate * net_params_lr[param_id]; + + switch (Caffe::mode()) { + case Caffe::CPU: + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_cpu_axpby(net_params[param_id] -> count(), + Dtype(1-rms_decay), this->update_[param_id]->cpu_data(), + rms_decay, this->history_[param_id]-> mutable_cpu_data()); + + // prepare update + caffe_powx(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_cpu_data()); + + caffe_div(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), this->update_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // scale and copy + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->cpu_data(), Dtype(0), + net_params[param_id]->mutable_cpu_diff()); + break; + case Caffe::GPU: +#ifndef CPU_ONLY + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_axpby(net_params[param_id] -> count(), + Dtype(1-rms_decay), this->update_[param_id]->gpu_data(), + rms_decay, this->history_[param_id]-> mutable_gpu_data()); + + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void AdaDeltaSolver::AdaDeltaPreSolve() { + // Add the extra history entries for AdaDelta after those from + // SGDSolver::PreSolve + const vector*>& net_params = this->net_->learnable_params(); + for (int i = 0; i < net_params.size(); ++i) { + const vector& shape = net_params[i]->shape(); + this->history_.push_back( + shared_ptr >(new Blob(shape))); + } +} + +template +void AdaDeltaSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype delta = this->param_.delta(); + Dtype momentum = this->param_.momentum(); + Dtype local_rate = rate * net_params_lr[param_id]; + size_t update_history_offset = net_params.size(); + switch (Caffe::mode()) { + case Caffe::CPU: { + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history of gradients + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->cpu_data(), momentum, + this->history_[param_id]->mutable_cpu_data()); + + // add delta to history to guard against dividing by zero later + caffe_set(net_params[param_id]->count(), delta, + this->temp_[param_id]->mutable_cpu_data()); + + caffe_add(net_params[param_id]->count(), + this->temp_[param_id]->cpu_data(), + this->history_[update_history_offset + param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add(net_params[param_id]->count(), + this->temp_[param_id]->cpu_data(), + this->history_[param_id]->cpu_data(), + this->temp_[param_id]->mutable_cpu_data()); + + // divide history of updates by history of gradients + caffe_div(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + this->temp_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // jointly compute the RMS of both for update and gradient history + caffe_powx(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + // compute the update + caffe_mul(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), + this->update_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + + // compute square of update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history of updates + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->cpu_data(), momentum, + this->history_[update_history_offset + param_id]->mutable_cpu_data()); + + // apply learning rate + caffe_cpu_scale(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history of gradients + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->gpu_data(), momentum, + this->history_[param_id]->mutable_gpu_data()); + + // add delta to history to guard against dividing by zero later + caffe_gpu_set(net_params[param_id]->count(), delta, + this->temp_[param_id]->mutable_gpu_data()); + + caffe_gpu_add(net_params[param_id]->count(), + this->temp_[param_id]->gpu_data(), + this->history_[update_history_offset + param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add(net_params[param_id]->count(), + this->temp_[param_id]->gpu_data(), + this->history_[param_id]->gpu_data(), + this->temp_[param_id]->mutable_gpu_data()); + + // divide history of updates by history of gradients + caffe_gpu_div(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + this->temp_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // jointly compute the RMS of both for update and gradient history + caffe_gpu_powx(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + // compute the update and copy to net_diff + caffe_gpu_mul(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + + // compute square of update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history of updates + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->gpu_data(), momentum, + this->history_[update_history_offset + param_id]->mutable_gpu_data()); + + // apply learning rate + caffe_gpu_scale(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void AdamSolver::AdamPreSolve() { + // Add the extra history entries for Adam after those from + // SGDSolver::PreSolve + const vector*>& net_params = this->net_->learnable_params(); + for (int i = 0; i < net_params.size(); ++i) { + const vector& shape = net_params[i]->shape(); + this->history_.push_back( + shared_ptr >(new Blob(shape))); + } +} + +template +void AdamSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype local_rate = rate * net_params_lr[param_id]; + const Dtype beta1 = this->param_.momentum(); + const Dtype beta2 = this->param_.momentum2(); + + // we create aliases for convenience + size_t update_history_offset = net_params.size(); + Blob* val_m = this->history_[param_id].get(); + Blob* val_v = this->history_[param_id + update_history_offset].get(); + Blob* val_t = this->temp_[param_id].get(); + + const int t = this->iter_ + 1; + const Dtype correction = std::sqrt(Dtype(1) - pow(beta2, t)) / + (Dtype(1.) - pow(beta1, t)); + const int N = net_params[param_id]->count(); + const Dtype eps_hat = this->param_.delta(); + + switch (Caffe::mode()) { + case Caffe::CPU: { + // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t + caffe_cpu_axpby(N, Dtype(1)-beta1, + net_params[param_id]->cpu_diff(), beta1, + val_m->mutable_cpu_data()); + + // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 + caffe_mul(N, + net_params[param_id]->cpu_diff(), + net_params[param_id]->cpu_diff(), + val_t->mutable_cpu_data()); + caffe_cpu_axpby(N, Dtype(1)-beta2, + val_t->cpu_data(), beta2, + val_v->mutable_cpu_data()); + + // set update + caffe_powx(N, + val_v->cpu_data(), Dtype(0.5), + val_t->mutable_cpu_data()); + caffe_add_scalar(N, eps_hat, val_t->mutable_cpu_data()); + caffe_div(N, + val_m->cpu_data(), + val_t->cpu_data(), + val_t->mutable_cpu_data()); + + caffe_cpu_scale(N, local_rate*correction, + val_t->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t + caffe_gpu_axpby(N, Dtype(1)-beta1, + net_params[param_id]->gpu_diff(), beta1, + val_m->mutable_gpu_data()); + + // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 + caffe_gpu_mul(N, + net_params[param_id]->gpu_diff(), + net_params[param_id]->gpu_diff(), + val_t->mutable_gpu_data()); + caffe_gpu_axpby(N, Dtype(1)-beta2, + val_t->gpu_data(), beta2, + val_v->mutable_gpu_data()); + + // set update + caffe_gpu_powx(N, + val_v->gpu_data(), Dtype(0.5), + val_t->mutable_gpu_data()); + caffe_gpu_add_scalar(N, eps_hat, + val_t->mutable_gpu_data()); + caffe_gpu_div(N, + val_m->gpu_data(), + val_t->gpu_data(), + val_t->mutable_gpu_data()); + + caffe_gpu_scale(N, local_rate*correction, + val_t->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + INSTANTIATE_CLASS(Solver); INSTANTIATE_CLASS(SGDSolver); INSTANTIATE_CLASS(NesterovSolver); INSTANTIATE_CLASS(AdaGradSolver); +INSTANTIATE_CLASS(RMSPropSolver); +INSTANTIATE_CLASS(AdaDeltaSolver); +INSTANTIATE_CLASS(AdamSolver); } // namespace caffe diff --git a/src/caffe/solver.cpp.orig b/src/caffe/solver.cpp.orig index 5b9ac36e2d7..78355cd2812 100644 --- a/src/caffe/solver.cpp.orig +++ b/src/caffe/solver.cpp.orig @@ -4,24 +4,44 @@ #include #include +#include "hdf5.h" +#include "hdf5_hl.h" + #include "caffe/net.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/solver.hpp" +#include "caffe/util/hdf5.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/upgrade_proto.hpp" namespace caffe { +template +void Solver::SetActionFunction(ActionCallback func) { + action_request_function_ = func; +} + +template +SolverAction::Enum Solver::GetRequestedAction() { + if (action_request_function_) { + // If the external request function has been set, call it. + return action_request_function_(); + } + return SolverAction::NONE; +} + template -Solver::Solver(const SolverParameter& param) - : net_() { +Solver::Solver(const SolverParameter& param, const Solver* root_solver) + : net_(), callbacks_(), root_solver_(root_solver), + requested_early_exit_(false) { Init(param); } template -Solver::Solver(const string& param_file) - : net_() { +Solver::Solver(const string& param_file, const Solver* root_solver) + : net_(), callbacks_(), root_solver_(root_solver), + requested_early_exit_(false) { SolverParameter param; ReadProtoFromTextFileOrDie(param_file, ¶m); Init(param); @@ -29,17 +49,21 @@ Solver::Solver(const string& param_file) template void Solver::Init(const SolverParameter& param) { - LOG(INFO) << "Initializing solver from parameters: " << std::endl - << param.DebugString(); + CHECK(Caffe::root_solver() || root_solver_) + << "root_solver_ needs to be set for all non-root solvers"; + LOG_IF(INFO, Caffe::root_solver()) << "Initializing solver from parameters: " + << std::endl << param.DebugString(); param_ = param; CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative."; - if (param_.random_seed() >= 0) { + if (Caffe::root_solver() && param_.random_seed() >= 0) { Caffe::set_random_seed(param_.random_seed()); } // Scaffolding code InitTrainNet(); - InitTestNets(); - LOG(INFO) << "Solver scaffolding done."; + if (Caffe::root_solver()) { + InitTestNets(); + LOG(INFO) << "Solver scaffolding done."; + } iter_ = 0; current_step_ = 0; } @@ -55,19 +79,22 @@ void Solver::InitTrainNet() { << "one of these fields specifying a train_net: " << field_names; NetParameter net_param; if (param_.has_train_net_param()) { - LOG(INFO) << "Creating training net specified in train_net_param."; + LOG_IF(INFO, Caffe::root_solver()) + << "Creating training net specified in train_net_param."; net_param.CopyFrom(param_.train_net_param()); } else if (param_.has_train_net()) { - LOG(INFO) << "Creating training net from train_net file: " - << param_.train_net(); + LOG_IF(INFO, Caffe::root_solver()) + << "Creating training net from train_net file: " << param_.train_net(); ReadNetParamsFromTextFileOrDie(param_.train_net(), &net_param); } if (param_.has_net_param()) { - LOG(INFO) << "Creating training net specified in net_param."; + LOG_IF(INFO, Caffe::root_solver()) + << "Creating training net specified in net_param."; net_param.CopyFrom(param_.net_param()); } if (param_.has_net()) { - LOG(INFO) << "Creating training net from net file: " << param_.net(); + LOG_IF(INFO, Caffe::root_solver()) + << "Creating training net from net file: " << param_.net(); ReadNetParamsFromTextFileOrDie(param_.net(), &net_param); } // Set the correct NetState. We start with the solver defaults (lowest @@ -79,11 +106,16 @@ void Solver::InitTrainNet() { net_state.MergeFrom(net_param.state()); net_state.MergeFrom(param_.train_state()); net_param.mutable_state()->CopyFrom(net_state); - net_.reset(new Net(net_param)); + if (Caffe::root_solver()) { + net_.reset(new Net(net_param)); + } else { + net_.reset(new Net(net_param, root_solver_->net_.get())); + } } template void Solver::InitTestNets() { + CHECK(Caffe::root_solver()); const bool has_net_param = param_.has_net_param(); const bool has_net_file = param_.has_net(); const int num_generic_nets = has_net_param + has_net_file; @@ -153,7 +185,12 @@ void Solver::InitTestNets() { net_params[i].mutable_state()->CopyFrom(net_state); LOG(INFO) << "Creating test net (#" << i << ") specified by " << sources[i]; - test_nets_[i].reset(new Net(net_params[i])); + if (Caffe::root_solver()) { + test_nets_[i].reset(new Net(net_params[i])); + } else { + test_nets_[i].reset(new Net(net_params[i], + root_solver_->test_nets_[i].get())); + } test_nets_[i]->set_debug_info(param_.debug_info()); } } @@ -167,40 +204,32 @@ void Solver::Step(int iters) { vector losses; Dtype smoothed_loss = 0; +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 while (iter_ < stop_iter) { // zero-init the params - for (int i = 0; i < net_->params().size(); ++i) { - shared_ptr > blob = net_->params()[i]; - switch (Caffe::mode()) { - case Caffe::CPU: - caffe_set(blob->count(), static_cast(0), - blob->mutable_cpu_diff()); - break; - case Caffe::GPU: -#ifndef CPU_ONLY - caffe_gpu_set(blob->count(), static_cast(0), - blob->mutable_gpu_diff()); -#else - NO_GPU; -#endif + net_->ClearParamDiffs(); + if (param_.test_interval() && iter_ % param_.test_interval() == 0 + && (iter_ > 0 || param_.test_initialization()) + && Caffe::root_solver()) { + TestAll(); + if (requested_early_exit_) { + // Break out of the while loop because stop was requested while testing. break; } } + for (int i = 0; i < callbacks_.size(); ++i) { + callbacks_[i]->on_start(); +======= + for (; iter_ < stop_iter; ++iter_) { if (param_.test_interval() && iter_ % param_.test_interval() == 0 && (iter_ > 0 || param_.test_initialization())) { TestAll(); +>>>>>>> triplet data generation and network update } - const bool display = param_.display() && iter_ % param_.display() == 0; net_->set_debug_info(display && param_.debug_info()); - // accumulate the loss and gradient - Dtype loss = 0; - for (int i = 0; i < param_.iter_size(); ++i) { - loss += net_->ForwardBackward(bottom_vec); - } - loss /= param_.iter_size(); - // average the loss across iterations for smoothed reporting + Dtype loss = net_->ForwardBackward(bottom_vec); if (losses.size() < average_loss) { losses.push_back(loss); int size = losses.size(); @@ -211,7 +240,8 @@ void Solver::Step(int iters) { losses[idx] = loss; } if (display) { - LOG(INFO) << "Iteration " << iter_ << ", loss = " << smoothed_loss; + LOG_IF(INFO, Caffe::root_solver()) << "Iteration " << iter_ + << ", loss = " << smoothed_loss; const vector*>& result = net_->output_blobs(); int score_index = 0; for (int j = 0; j < result.size(); ++j) { @@ -226,30 +256,56 @@ void Solver::Step(int iters) { loss_msg_stream << " (* " << loss_weight << " = " << loss_weight * result_vec[k] << " loss)"; } - LOG(INFO) << " Train net output #" + LOG_IF(INFO, Caffe::root_solver()) << " Train net output #" << score_index++ << ": " << output_name << " = " << result_vec[k] << loss_msg_stream.str(); } } } +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + for (int i = 0; i < callbacks_.size(); ++i) { + callbacks_[i]->on_gradients_ready(); + } ApplyUpdate(); // Increment the internal iter_ counter -- its value should always indicate // the number of times the weights have been updated. ++iter_; +======= + ComputeUpdateValue(); + net_->Update(); +>>>>>>> triplet data generation and network update + + SolverAction::Enum request = GetRequestedAction(); // Save a snapshot if needed. - if (param_.snapshot() && iter_ % param_.snapshot() == 0) { +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + if ((param_.snapshot() + && iter_ % param_.snapshot() == 0 + && Caffe::root_solver()) || + (request == SolverAction::SNAPSHOT)) { +======= + if (param_.snapshot() && (iter_ + 1) % param_.snapshot() == 0) { +>>>>>>> triplet data generation and network update Snapshot(); } + if (SolverAction::STOP == request) { + requested_early_exit_ = true; + // Break out of training loop. + break; + } } } template void Solver::Solve(const char* resume_file) { + CHECK(Caffe::root_solver()); LOG(INFO) << "Solving " << net_->name(); LOG(INFO) << "Learning Rate Policy: " << param_.lr_policy(); + // Initialize to false every time we start solving. + requested_early_exit_ = false; + if (resume_file) { LOG(INFO) << "Restoring previous solver status from " << resume_file; Restore(resume_file); @@ -264,6 +320,10 @@ void Solver::Solve(const char* resume_file) { && (!param_.snapshot() || iter_ % param_.snapshot() != 0)) { Snapshot(); } + if (requested_early_exit_) { + LOG(INFO) << "Optimization stopped early."; + return; + } // After the optimization is done, run an additional train and test pass to // display the train and test loss/outputs if appropriate (based on the // display and test_interval settings, respectively). Unlike in the rest of @@ -281,16 +341,18 @@ void Solver::Solve(const char* resume_file) { LOG(INFO) << "Optimization Done."; } - template void Solver::TestAll() { - for (int test_net_id = 0; test_net_id < test_nets_.size(); ++test_net_id) { + for (int test_net_id = 0; + test_net_id < test_nets_.size() && !requested_early_exit_; + ++test_net_id) { Test(test_net_id); } } template void Solver::Test(const int test_net_id) { + CHECK(Caffe::root_solver()); LOG(INFO) << "Iteration " << iter_ << ", Testing net (#" << test_net_id << ")"; CHECK_NOTNULL(test_nets_[test_net_id].get())-> @@ -301,6 +363,21 @@ void Solver::Test(const int test_net_id) { const shared_ptr >& test_net = test_nets_[test_net_id]; Dtype loss = 0; for (int i = 0; i < param_.test_iter(test_net_id); ++i) { + SolverAction::Enum request = GetRequestedAction(); + // Check to see if stoppage of testing/training has been requested. + while (request != SolverAction::NONE) { + if (SolverAction::SNAPSHOT == request) { + Snapshot(); + } else if (SolverAction::STOP == request) { + requested_early_exit_ = true; + } + request = GetRequestedAction(); + } + if (requested_early_exit_) { + // break out of test loop. + break; + } + Dtype iter_loss; const vector*>& result = test_net->Forward(bottom_vec, &iter_loss); @@ -325,6 +402,10 @@ void Solver::Test(const int test_net_id) { } } } + if (requested_early_exit_) { + LOG(INFO) << "Test interrupted."; + return; + } if (param_.test_compute_loss()) { loss /= param_.test_iter(test_net_id); LOG(INFO) << "Test loss: " << loss; @@ -341,69 +422,83 @@ void Solver::Test(const int test_net_id) { << " = " << loss_weight * mean_score << " loss)"; } LOG(INFO) << " Test net output #" << i << ": " << output_name << " = " - << mean_score << loss_msg_stream.str(); + << mean_score << loss_msg_stream.str(); } } -<<<<<<< e29f9656158cb307d3fb4a78c63aa2247c5ad57a template void Solver::Snapshot() { CHECK(Caffe::root_solver()); string model_filename; switch (param_.snapshot_format()) { - case caffe::SolverParameter_SnapshotFormat_BINARYPROTO: - model_filename = SnapshotToBinaryProto(); - break; - case caffe::SolverParameter_SnapshotFormat_HDF5: - model_filename = SnapshotToHDF5(); - break; - default: - LOG(FATAL) << "Unsupported snapshot format."; + case caffe::SolverParameter_SnapshotFormat_BINARYPROTO: + model_filename = SnapshotToBinaryProto(); + break; + case caffe::SolverParameter_SnapshotFormat_HDF5: + model_filename = SnapshotToHDF5(); + break; + default: + LOG(FATAL) << "Unsupported snapshot format."; } SnapshotSolverState(model_filename); } -======= ->>>>>>> macro define in upgrade_proto template -void Solver::Snapshot() { - NetParameter net_param; - // For intermediate results, we will also dump the gradient values. - net_->ToProto(&net_param, param_.snapshot_diff()); +string Solver::SnapshotFilename(const string extension) { string filename(param_.snapshot_prefix()); - string model_filename, snapshot_filename; const int kBufferSize = 20; char iter_str_buffer[kBufferSize]; +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); + return filename + iter_str_buffer + extension; +======= + // Add one to iter_ to get the number of iterations that have completed. + snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_ + 1); filename += iter_str_buffer; model_filename = filename + ".caffemodel"; LOG(INFO) << "Snapshotting to " << model_filename; WriteProtoToBinaryFile(net_param, model_filename.c_str()); SolverState state; SnapshotSolverState(&state); - state.set_iter(iter_); + state.set_iter(iter_ + 1); state.set_learned_net(model_filename); state.set_current_step(current_step_); snapshot_filename = filename + ".solverstate"; LOG(INFO) << "Snapshotting solver state to " << snapshot_filename; WriteProtoToBinaryFile(state, snapshot_filename.c_str()); +>>>>>>> triplet data generation and network update } template -void Solver::Restore(const char* state_file) { - SolverState state; +string Solver::SnapshotToBinaryProto() { + string model_filename = SnapshotFilename(".caffemodel"); + LOG(INFO) << "Snapshotting to binary proto file " << model_filename; NetParameter net_param; - ReadProtoFromBinaryFile(state_file, &state); - if (state.has_learned_net()) { - ReadNetParamsFromBinaryFileOrDie(state.learned_net().c_str(), &net_param); - net_->CopyTrainedLayersFrom(net_param); - } - iter_ = state.iter(); - current_step_ = state.current_step(); - RestoreSolverState(state); + net_->ToProto(&net_param, param_.snapshot_diff()); + WriteProtoToBinaryFile(net_param, model_filename); + return model_filename; +} + +template +string Solver::SnapshotToHDF5() { + string model_filename = SnapshotFilename(".caffemodel.h5"); + LOG(INFO) << "Snapshotting to HDF5 file " << model_filename; + net_->ToHDF5(model_filename, param_.snapshot_diff()); + return model_filename; } +template +void Solver::Restore(const char* state_file) { + CHECK(Caffe::root_solver()); + string state_filename(state_file); + if (state_filename.size() >= 3 && + state_filename.compare(state_filename.size() - 3, 3, ".h5") == 0) { + RestoreSolverStateFromHDF5(state_filename); + } else { + RestoreSolverStateFromBinaryProto(state_filename); + } +} // Return the current learning rate. The currently implemented learning rate // policies are as follows: @@ -462,7 +557,7 @@ Dtype SGDSolver::GetLearningRate() { template void SGDSolver::PreSolve() { // Initialize the history - const vector > >& net_params = this->net_->params(); + const vector*>& net_params = this->net_->learnable_params(); history_.clear(); update_.clear(); temp_.clear(); @@ -478,12 +573,10 @@ template void SGDSolver::ClipGradients() { const Dtype clip_gradients = this->param_.clip_gradients(); if (clip_gradients < 0) { return; } - const vector > >& net_params = this->net_->params(); + const vector*>& net_params = this->net_->learnable_params(); Dtype sumsq_diff = 0; for (int i = 0; i < net_params.size(); ++i) { - if (this->net_->param_owners()[i] < 0) { - sumsq_diff += net_params[i]->sumsq_diff(); - } + sumsq_diff += net_params[i]->sumsq_diff(); } const Dtype l2norm_diff = std::sqrt(sumsq_diff); if (l2norm_diff > clip_gradients) { @@ -492,21 +585,31 @@ void SGDSolver::ClipGradients() { << l2norm_diff << " > " << clip_gradients << ") " << "by scale factor " << scale_factor; for (int i = 0; i < net_params.size(); ++i) { - if (this->net_->param_owners()[i] < 0) { - net_params[i]->scale_diff(scale_factor); - } + net_params[i]->scale_diff(scale_factor); } } } template +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 void SGDSolver::ApplyUpdate() { + CHECK(Caffe::root_solver()); +======= +void SGDSolver::ComputeUpdateValue() { + const vector > >& net_params = this->net_->params(); + const vector& net_params_lr = this->net_->params_lr(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); + // get the learning rate +>>>>>>> triplet data generation and network update Dtype rate = GetLearningRate(); if (this->param_.display() && this->iter_ % this->param_.display() == 0) { LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; } ClipGradients(); - for (int param_id = 0; param_id < this->net_->params().size(); ++param_id) { +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + for (int param_id = 0; param_id < this->net_->learnable_params().size(); + ++param_id) { Normalize(param_id); Regularize(param_id); ComputeUpdateValue(param_id, rate); @@ -518,7 +621,7 @@ template void SGDSolver::Normalize(int param_id) { if (this->param_.iter_size() == 1) { return; } // Scale gradient to counterbalance accumulation. - const vector > >& net_params = this->net_->params(); + const vector*>& net_params = this->net_->learnable_params(); const Dtype accum_normalization = Dtype(1.) / this->param_.iter_size(); switch (Caffe::mode()) { case Caffe::CPU: { @@ -542,69 +645,81 @@ void SGDSolver::Normalize(int param_id) { template void SGDSolver::Regularize(int param_id) { - const vector > >& net_params = this->net_->params(); + const vector*>& net_params = this->net_->learnable_params(); const vector& net_params_weight_decay = this->net_->params_weight_decay(); +======= + Dtype momentum = this->param_.momentum(); +>>>>>>> triplet data generation and network update Dtype weight_decay = this->param_.weight_decay(); string regularization_type = this->param_.regularization_type(); - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; switch (Caffe::mode()) { - case Caffe::CPU: { - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // Compute the value to history, and then copy them to the blob's diff. + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } } + + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + history_[param_id]->mutable_cpu_data()); + // copy + caffe_copy(net_params[param_id]->count(), + history_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); } break; - } - case Caffe::GPU: { + case Caffe::GPU: #ifndef CPU_ONLY - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // Compute the value to history, and then copy them to the blob's diff. + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } } - } -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 template void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector > >& net_params = this->net_->params(); + const vector*>& net_params = this->net_->learnable_params(); const vector& net_params_lr = this->net_->params_lr(); Dtype momentum = this->param_.momentum(); Dtype local_rate = rate * net_params_lr[param_id]; @@ -627,28 +742,96 @@ void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { caffe_copy(net_params[param_id]->count(), history_[param_id]->gpu_data(), net_params[param_id]->mutable_gpu_diff()); +======= + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + history_[param_id]->mutable_gpu_data()); + // copy + caffe_copy(net_params[param_id]->count(), + history_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } +>>>>>>> triplet data generation and network update #else NO_GPU; #endif break; - } default: LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); } } template -void SGDSolver::SnapshotSolverState(SolverState* state) { - state->clear_history(); +void SGDSolver::SnapshotSolverState(const string& model_filename) { + switch (this->param_.snapshot_format()) { + case caffe::SolverParameter_SnapshotFormat_BINARYPROTO: + SnapshotSolverStateToBinaryProto(model_filename); + break; + case caffe::SolverParameter_SnapshotFormat_HDF5: + SnapshotSolverStateToHDF5(model_filename); + break; + default: + LOG(FATAL) << "Unsupported snapshot format."; + } +} + +template +void SGDSolver::SnapshotSolverStateToBinaryProto( + const string& model_filename) { + SolverState state; + state.set_iter(this->iter_); + state.set_learned_net(model_filename); + state.set_current_step(this->current_step_); + state.clear_history(); for (int i = 0; i < history_.size(); ++i) { // Add history - BlobProto* history_blob = state->add_history(); + BlobProto* history_blob = state.add_history(); history_[i]->ToProto(history_blob); } + string snapshot_filename = Solver::SnapshotFilename(".solverstate"); + LOG(INFO) + << "Snapshotting solver state to binary proto file " << snapshot_filename; + WriteProtoToBinaryFile(state, snapshot_filename.c_str()); +} + +template +void SGDSolver::SnapshotSolverStateToHDF5( + const string& model_filename) { + string snapshot_filename = + Solver::SnapshotFilename(".solverstate.h5"); + LOG(INFO) << "Snapshotting solver state to HDF5 file " << snapshot_filename; + hid_t file_hid = H5Fcreate(snapshot_filename.c_str(), H5F_ACC_TRUNC, + H5P_DEFAULT, H5P_DEFAULT); + CHECK_GE(file_hid, 0) + << "Couldn't open " << snapshot_filename << " to save solver state."; + hdf5_save_int(file_hid, "iter", this->iter_); + hdf5_save_string(file_hid, "learned_net", model_filename); + hdf5_save_int(file_hid, "current_step", this->current_step_); + hid_t history_hid = H5Gcreate2(file_hid, "history", H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + CHECK_GE(history_hid, 0) + << "Error saving solver state to " << snapshot_filename << "."; + for (int i = 0; i < history_.size(); ++i) { + ostringstream oss; + oss << i; + hdf5_save_nd_dataset(history_hid, oss.str(), *history_[i]); + } + H5Gclose(history_hid); + H5Fclose(file_hid); } template -void SGDSolver::RestoreSolverState(const SolverState& state) { +void SGDSolver::RestoreSolverStateFromBinaryProto( + const string& state_file) { + SolverState state; + ReadProtoFromBinaryFile(state_file, &state); + this->iter_ = state.iter(); + if (state.has_learned_net()) { + NetParameter net_param; + ReadNetParamsFromBinaryFileOrDie(state.learned_net().c_str(), &net_param); + this->net_->CopyTrainedLayersFrom(net_param); + } + this->current_step_ = state.current_step(); CHECK_EQ(state.history_size(), history_.size()) << "Incorrect length of history blobs."; LOG(INFO) << "SGDSolver: restoring history"; @@ -657,72 +840,390 @@ void SGDSolver::RestoreSolverState(const SolverState& state) { } } +template +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +void SGDSolver::RestoreSolverStateFromHDF5(const string& state_file) { + hid_t file_hid = H5Fopen(state_file.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK_GE(file_hid, 0) << "Couldn't open solver state file " << state_file; + this->iter_ = hdf5_load_int(file_hid, "iter"); + if (H5LTfind_dataset(file_hid, "learned_net")) { + string learned_net = hdf5_load_string(file_hid, "learned_net"); + this->net_->CopyTrainedLayersFrom(learned_net); + } + this->current_step_ = hdf5_load_int(file_hid, "current_step"); + hid_t history_hid = H5Gopen2(file_hid, "history", H5P_DEFAULT); + CHECK_GE(history_hid, 0) << "Error reading history from " << state_file; + int state_history_size = hdf5_get_num_links(history_hid); + CHECK_EQ(state_history_size, history_.size()) + << "Incorrect length of history blobs."; + for (int i = 0; i < history_.size(); ++i) { + ostringstream oss; + oss << i; + hdf5_load_nd_dataset(history_hid, oss.str().c_str(), 0, + kMaxBlobAxes, history_[i].get()); + } + H5Gclose(history_hid); + H5Fclose(file_hid); +} + template void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { + CHECK(Caffe::root_solver()); + const vector*>& net_params = this->net_->learnable_params(); +======= +void NesterovSolver::ComputeUpdateValue() { const vector > >& net_params = this->net_->params(); +>>>>>>> triplet data generation and network update const vector& net_params_lr = this->net_->params_lr(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); + // get the learning rate + Dtype rate = this->GetLearningRate(); + if (this->param_.display() && this->iter_ % this->param_.display() == 0) { + LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; + } + SGDSolver::ClipGradients(); Dtype momentum = this->param_.momentum(); + Dtype weight_decay = this->param_.weight_decay(); + string regularization_type = this->param_.regularization_type(); + switch (Caffe::mode()) { + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + this->temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // update history + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + this->history_[param_id]->mutable_cpu_data()); + + // compute udpate: step back then over step + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->cpu_data(), -momentum, + this->update_[param_id]->mutable_cpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } + break; + case Caffe::GPU: +#ifndef CPU_ONLY + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + this->temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // update history + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + this->history_[param_id]->mutable_gpu_data()); + + // compute udpate: step back then over step + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->gpu_data(), -momentum, + this->update_[param_id]->mutable_gpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } +#else + NO_GPU; +#endif + break; + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { + CHECK(Caffe::root_solver()); + const vector*>& net_params = this->net_->learnable_params(); +======= +void AdaGradSolver::ComputeUpdateValue() { + const vector > >& net_params = this->net_->params(); +>>>>>>> triplet data generation and network update + const vector& net_params_lr = this->net_->params_lr(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); + // get the learning rate + Dtype rate = this->GetLearningRate(); + Dtype delta = this->param_.delta(); + if (this->param_.display() && this->iter_ % this->param_.display() == 0) { + LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; + } + SGDSolver::ClipGradients(); + Dtype weight_decay = this->param_.weight_decay(); + string regularization_type = this->param_.regularization_type(); + switch (Caffe::mode()) { + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + this->temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_add(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + this->history_[param_id]->cpu_data(), + this->history_[param_id]->mutable_cpu_data()); + + // prepare update + caffe_powx(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_cpu_data()); + + caffe_div(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), + this->update_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // scale and copy + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->cpu_data(), Dtype(0), + net_params[param_id]->mutable_cpu_diff()); + } + break; + case Caffe::GPU: +#ifndef CPU_ONLY + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + this->temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_add(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + this->history_[param_id]->gpu_data(), + this->history_[param_id]->mutable_gpu_data()); + + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // scale and copy + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), + net_params[param_id]->mutable_gpu_diff()); + } +#else + NO_GPU; +#endif + break; + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void RMSPropSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + + // get the learning rate + Dtype delta = this->param_.delta(); + Dtype rms_decay = this->param_.rms_decay(); Dtype local_rate = rate * net_params_lr[param_id]; + switch (Caffe::mode()) { - case Caffe::CPU: { - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), + case Caffe::CPU: + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), this->update_[param_id]->mutable_cpu_data()); // update history - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - this->history_[param_id]->mutable_cpu_data()); + caffe_cpu_axpby(net_params[param_id] -> count(), + Dtype(1-rms_decay), this->update_[param_id]->cpu_data(), + rms_decay, this->history_[param_id]-> mutable_cpu_data()); - // compute update: step back then over step - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->cpu_data(), -momentum, + // prepare update + caffe_powx(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), Dtype(0.5), this->update_[param_id]->mutable_cpu_data()); - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), + caffe_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_cpu_data()); + + caffe_div(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), this->update_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // scale and copy + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->cpu_data(), Dtype(0), net_params[param_id]->mutable_cpu_diff()); break; - } - case Caffe::GPU: { + case Caffe::GPU: #ifndef CPU_ONLY - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), this->update_[param_id]->mutable_gpu_data()); // update history - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - this->history_[param_id]->mutable_gpu_data()); + caffe_gpu_axpby(net_params[param_id] -> count(), + Dtype(1-rms_decay), this->update_[param_id]->gpu_data(), + rms_decay, this->history_[param_id]-> mutable_gpu_data()); - // compute update: step back then over step - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->gpu_data(), -momentum, + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), this->update_[param_id]->mutable_gpu_data()); - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), + caffe_gpu_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), net_params[param_id]->mutable_gpu_diff()); #else NO_GPU; #endif break; - } default: LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); } } template -void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector > >& net_params = this->net_->params(); +void AdaDeltaSolver::AdaDeltaPreSolve() { + // Add the extra history entries for AdaDelta after those from + // SGDSolver::PreSolve + const vector*>& net_params = this->net_->learnable_params(); + for (int i = 0; i < net_params.size(); ++i) { + const vector& shape = net_params[i]->shape(); + this->history_.push_back( + shared_ptr >(new Blob(shape))); + } +} + +template +void AdaDeltaSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); const vector& net_params_lr = this->net_->params_lr(); Dtype delta = this->param_.delta(); + Dtype momentum = this->param_.momentum(); Dtype local_rate = rate * net_params_lr[param_id]; + size_t update_history_offset = net_params.size(); switch (Caffe::mode()) { case Caffe::CPU: { // compute square of gradient in update @@ -730,28 +1231,55 @@ void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { net_params[param_id]->cpu_diff(), Dtype(2), this->update_[param_id]->mutable_cpu_data()); - // update history + // update history of gradients + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->cpu_data(), momentum, + this->history_[param_id]->mutable_cpu_data()); + + // add delta to history to guard against dividing by zero later + caffe_set(net_params[param_id]->count(), delta, + this->temp_[param_id]->mutable_cpu_data()); + caffe_add(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), + this->temp_[param_id]->cpu_data(), + this->history_[update_history_offset + param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add(net_params[param_id]->count(), + this->temp_[param_id]->cpu_data(), this->history_[param_id]->cpu_data(), - this->history_[param_id]->mutable_cpu_data()); + this->temp_[param_id]->mutable_cpu_data()); - // prepare update + // divide history of updates by history of gradients + caffe_div(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + this->temp_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // jointly compute the RMS of both for update and gradient history caffe_powx(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); + this->update_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); - caffe_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_cpu_data()); + // compute the update + caffe_mul(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), + this->update_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); - caffe_div(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), - this->update_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); + // compute square of update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); - // scale and copy - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->cpu_data(), Dtype(0), + // update history of updates + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->cpu_data(), momentum, + this->history_[update_history_offset + param_id]->mutable_cpu_data()); + + // apply learning rate + caffe_cpu_scale(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), net_params[param_id]->mutable_cpu_diff()); break; } @@ -762,28 +1290,158 @@ void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { net_params[param_id]->gpu_diff(), Dtype(2), this->update_[param_id]->mutable_gpu_data()); - // update history + // update history of gradients + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->gpu_data(), momentum, + this->history_[param_id]->mutable_gpu_data()); + + // add delta to history to guard against dividing by zero later + caffe_gpu_set(net_params[param_id]->count(), delta, + this->temp_[param_id]->mutable_gpu_data()); + caffe_gpu_add(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), + this->temp_[param_id]->gpu_data(), + this->history_[update_history_offset + param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add(net_params[param_id]->count(), + this->temp_[param_id]->gpu_data(), this->history_[param_id]->gpu_data(), - this->history_[param_id]->mutable_gpu_data()); + this->temp_[param_id]->mutable_gpu_data()); - // prepare update + // divide history of updates by history of gradients + caffe_gpu_div(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + this->temp_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // jointly compute the RMS of both for update and gradient history caffe_gpu_powx(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); + this->update_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); - caffe_gpu_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_gpu_data()); + // compute the update and copy to net_diff + caffe_gpu_mul(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); - caffe_gpu_div(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), - this->update_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); + // compute square of update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); - // scale and copy - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->gpu_data(), Dtype(0), + // update history of updates + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->gpu_data(), momentum, + this->history_[update_history_offset + param_id]->mutable_gpu_data()); + + // apply learning rate + caffe_gpu_scale(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void AdamSolver::AdamPreSolve() { + // Add the extra history entries for Adam after those from + // SGDSolver::PreSolve + const vector*>& net_params = this->net_->learnable_params(); + for (int i = 0; i < net_params.size(); ++i) { + const vector& shape = net_params[i]->shape(); + this->history_.push_back( + shared_ptr >(new Blob(shape))); + } +} + +template +void AdamSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype local_rate = rate * net_params_lr[param_id]; + const Dtype beta1 = this->param_.momentum(); + const Dtype beta2 = this->param_.momentum2(); + + // we create aliases for convenience + size_t update_history_offset = net_params.size(); + Blob* val_m = this->history_[param_id].get(); + Blob* val_v = this->history_[param_id + update_history_offset].get(); + Blob* val_t = this->temp_[param_id].get(); + + const int t = this->iter_ + 1; + const Dtype correction = std::sqrt(Dtype(1) - pow(beta2, t)) / + (Dtype(1.) - pow(beta1, t)); + const int N = net_params[param_id]->count(); + const Dtype eps_hat = this->param_.delta(); + + switch (Caffe::mode()) { + case Caffe::CPU: { + // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t + caffe_cpu_axpby(N, Dtype(1)-beta1, + net_params[param_id]->cpu_diff(), beta1, + val_m->mutable_cpu_data()); + + // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 + caffe_mul(N, + net_params[param_id]->cpu_diff(), + net_params[param_id]->cpu_diff(), + val_t->mutable_cpu_data()); + caffe_cpu_axpby(N, Dtype(1)-beta2, + val_t->cpu_data(), beta2, + val_v->mutable_cpu_data()); + + // set update + caffe_powx(N, + val_v->cpu_data(), Dtype(0.5), + val_t->mutable_cpu_data()); + caffe_add_scalar(N, eps_hat, val_t->mutable_cpu_data()); + caffe_div(N, + val_m->cpu_data(), + val_t->cpu_data(), + val_t->mutable_cpu_data()); + + caffe_cpu_scale(N, local_rate*correction, + val_t->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t + caffe_gpu_axpby(N, Dtype(1)-beta1, + net_params[param_id]->gpu_diff(), beta1, + val_m->mutable_gpu_data()); + + // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 + caffe_gpu_mul(N, + net_params[param_id]->gpu_diff(), + net_params[param_id]->gpu_diff(), + val_t->mutable_gpu_data()); + caffe_gpu_axpby(N, Dtype(1)-beta2, + val_t->gpu_data(), beta2, + val_v->mutable_gpu_data()); + + // set update + caffe_gpu_powx(N, + val_v->gpu_data(), Dtype(0.5), + val_t->mutable_gpu_data()); + caffe_gpu_add_scalar(N, eps_hat, + val_t->mutable_gpu_data()); + caffe_gpu_div(N, + val_m->gpu_data(), + val_t->gpu_data(), + val_t->mutable_gpu_data()); + + caffe_gpu_scale(N, local_rate*correction, + val_t->gpu_data(), net_params[param_id]->mutable_gpu_diff()); #else NO_GPU; @@ -799,5 +1457,8 @@ INSTANTIATE_CLASS(Solver); INSTANTIATE_CLASS(SGDSolver); INSTANTIATE_CLASS(NesterovSolver); INSTANTIATE_CLASS(AdaGradSolver); +INSTANTIATE_CLASS(RMSPropSolver); +INSTANTIATE_CLASS(AdaDeltaSolver); +INSTANTIATE_CLASS(AdamSolver); } // namespace caffe diff --git a/src/caffe/solver.cpp.orig.orig b/src/caffe/solver.cpp.orig.orig new file mode 100644 index 00000000000..dd5e506ea0d --- /dev/null +++ b/src/caffe/solver.cpp.orig.orig @@ -0,0 +1,1484 @@ +#include + +#include +#include +#include + +#include "caffe/net.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/solver.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/upgrade_proto.hpp" + +namespace caffe { + +template +Solver::Solver(const SolverParameter& param) + : net_() { + Init(param); +} + +template +Solver::Solver(const string& param_file) + : net_() { + SolverParameter param; + ReadProtoFromTextFileOrDie(param_file, ¶m); + Init(param); +} + +template +void Solver::Init(const SolverParameter& param) { + LOG(INFO) << "Initializing solver from parameters: " << std::endl + << param.DebugString(); + param_ = param; + CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative."; +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 + if (param_.random_seed() >= 0) { +======= + if (Caffe::root_solver() && param_.random_seed() >= 0) { +>>>>>>> triplet data generation and network update + Caffe::set_random_seed(param_.random_seed()); + } + // Scaffolding code + InitTrainNet(); + InitTestNets(); + LOG(INFO) << "Solver scaffolding done."; + iter_ = 0; + current_step_ = 0; +} + +template +void Solver::InitTrainNet() { + const int num_train_nets = param_.has_net() + param_.has_net_param() + + param_.has_train_net() + param_.has_train_net_param(); + const string& field_names = "net, net_param, train_net, train_net_param"; + CHECK_GE(num_train_nets, 1) << "SolverParameter must specify a train net " + << "using one of these fields: " << field_names; + CHECK_LE(num_train_nets, 1) << "SolverParameter must not contain more than " + << "one of these fields specifying a train_net: " << field_names; + NetParameter net_param; + if (param_.has_train_net_param()) { + LOG(INFO) << "Creating training net specified in train_net_param."; + net_param.CopyFrom(param_.train_net_param()); + } else if (param_.has_train_net()) { + LOG(INFO) << "Creating training net from train_net file: " + << param_.train_net(); + ReadNetParamsFromTextFileOrDie(param_.train_net(), &net_param); + } + if (param_.has_net_param()) { + LOG(INFO) << "Creating training net specified in net_param."; + net_param.CopyFrom(param_.net_param()); + } + if (param_.has_net()) { + LOG(INFO) << "Creating training net from net file: " << param_.net(); + ReadNetParamsFromTextFileOrDie(param_.net(), &net_param); + } + // Set the correct NetState. We start with the solver defaults (lowest + // precedence); then, merge in any NetState specified by the net_param itself; + // finally, merge in any NetState specified by the train_state (highest + // precedence). + NetState net_state; + net_state.set_phase(TRAIN); + net_state.MergeFrom(net_param.state()); + net_state.MergeFrom(param_.train_state()); + net_param.mutable_state()->CopyFrom(net_state); + net_.reset(new Net(net_param)); +} + +template +void Solver::InitTestNets() { + const bool has_net_param = param_.has_net_param(); + const bool has_net_file = param_.has_net(); + const int num_generic_nets = has_net_param + has_net_file; + CHECK_LE(num_generic_nets, 1) + << "Both net_param and net_file may not be specified."; + const int num_test_net_params = param_.test_net_param_size(); + const int num_test_net_files = param_.test_net_size(); + const int num_test_nets = num_test_net_params + num_test_net_files; + if (num_generic_nets) { + CHECK_GE(param_.test_iter_size(), num_test_nets) + << "test_iter must be specified for each test network."; + } else { + CHECK_EQ(param_.test_iter_size(), num_test_nets) + << "test_iter must be specified for each test network."; + } + // If we have a generic net (specified by net or net_param, rather than + // test_net or test_net_param), we may have an unlimited number of actual + // test networks -- the actual number is given by the number of remaining + // test_iters after any test nets specified by test_net_param and/or test_net + // are evaluated. + const int num_generic_net_instances = param_.test_iter_size() - num_test_nets; + const int num_test_net_instances = num_test_nets + num_generic_net_instances; + if (param_.test_state_size()) { + CHECK_EQ(param_.test_state_size(), num_test_net_instances) + << "test_state must be unspecified or specified once per test net."; + } + if (num_test_net_instances) { + CHECK_GT(param_.test_interval(), 0); + } + int test_net_id = 0; + vector sources(num_test_net_instances); + vector net_params(num_test_net_instances); + for (int i = 0; i < num_test_net_params; ++i, ++test_net_id) { + sources[test_net_id] = "test_net_param"; + net_params[test_net_id].CopyFrom(param_.test_net_param(i)); + } + for (int i = 0; i < num_test_net_files; ++i, ++test_net_id) { + sources[test_net_id] = "test_net file: " + param_.test_net(i); + ReadNetParamsFromTextFileOrDie(param_.test_net(i), + &net_params[test_net_id]); + } + const int remaining_test_nets = param_.test_iter_size() - test_net_id; + if (has_net_param) { + for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) { + sources[test_net_id] = "net_param"; + net_params[test_net_id].CopyFrom(param_.net_param()); + } + } + if (has_net_file) { + for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) { + sources[test_net_id] = "net file: " + param_.net(); + ReadNetParamsFromTextFileOrDie(param_.net(), &net_params[test_net_id]); + } + } + test_nets_.resize(num_test_net_instances); + for (int i = 0; i < num_test_net_instances; ++i) { + // Set the correct NetState. We start with the solver defaults (lowest + // precedence); then, merge in any NetState specified by the net_param + // itself; finally, merge in any NetState specified by the test_state + // (highest precedence). + NetState net_state; + net_state.set_phase(TEST); + net_state.MergeFrom(net_params[i].state()); + if (param_.test_state_size()) { + net_state.MergeFrom(param_.test_state(i)); + } + net_params[i].mutable_state()->CopyFrom(net_state); + LOG(INFO) + << "Creating test net (#" << i << ") specified by " << sources[i]; + test_nets_[i].reset(new Net(net_params[i])); + test_nets_[i]->set_debug_info(param_.debug_info()); + } +} + +template +void Solver::Step(int iters) { + vector*> bottom_vec; + const int start_iter = iter_; + const int stop_iter = iter_ + iters; + int average_loss = this->param_.average_loss(); + vector losses; + Dtype smoothed_loss = 0; + +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + while (iter_ < stop_iter) { + // zero-init the params +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 + for (int i = 0; i < net_->params().size(); ++i) { + shared_ptr > blob = net_->params()[i]; + switch (Caffe::mode()) { + case Caffe::CPU: + caffe_set(blob->count(), static_cast(0), + blob->mutable_cpu_diff()); + break; + case Caffe::GPU: +#ifndef CPU_ONLY + caffe_gpu_set(blob->count(), static_cast(0), + blob->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + } + +======= + net_->ClearParamDiffs(); +>>>>>>> triplet data generation and network update + if (param_.test_interval() && iter_ % param_.test_interval() == 0 + && (iter_ > 0 || param_.test_initialization())) { + TestAll(); + } + +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= + for (int i = 0; i < callbacks_.size(); ++i) { + callbacks_[i]->on_start(); +======= + for (; iter_ < stop_iter; ++iter_) { + if (param_.test_interval() && iter_ % param_.test_interval() == 0 + && (iter_ > 0 || param_.test_initialization())) { + TestAll(); +>>>>>>> triplet data generation and network update + } +>>>>>>> triplet data generation and network update + const bool display = param_.display() && iter_ % param_.display() == 0; + net_->set_debug_info(display && param_.debug_info()); + Dtype loss = net_->ForwardBackward(bottom_vec); + if (losses.size() < average_loss) { + losses.push_back(loss); + int size = losses.size(); + smoothed_loss = (smoothed_loss * (size - 1) + loss) / size; + } else { + int idx = (iter_ - start_iter) % average_loss; + smoothed_loss += (loss - losses[idx]) / average_loss; + losses[idx] = loss; + } + if (display) { + LOG(INFO) << "Iteration " << iter_ << ", loss = " << smoothed_loss; + const vector*>& result = net_->output_blobs(); + int score_index = 0; + for (int j = 0; j < result.size(); ++j) { + const Dtype* result_vec = result[j]->cpu_data(); + const string& output_name = + net_->blob_names()[net_->output_blob_indices()[j]]; + const Dtype loss_weight = + net_->blob_loss_weights()[net_->output_blob_indices()[j]]; + for (int k = 0; k < result[j]->count(); ++k) { + ostringstream loss_msg_stream; + if (loss_weight) { + loss_msg_stream << " (* " << loss_weight + << " = " << loss_weight * result_vec[k] << " loss)"; + } + LOG(INFO) << " Train net output #" + << score_index++ << ": " << output_name << " = " + << result_vec[k] << loss_msg_stream.str(); + } + } + } +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + for (int i = 0; i < callbacks_.size(); ++i) { + callbacks_[i]->on_gradients_ready(); + } +>>>>>>> triplet data generation and network update + ApplyUpdate(); + + // Increment the internal iter_ counter -- its value should always indicate + // the number of times the weights have been updated. + ++iter_; +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 + + // Save a snapshot if needed. + if (param_.snapshot() && iter_ % param_.snapshot() == 0) { +======= +======= + ComputeUpdateValue(); + net_->Update(); +>>>>>>> triplet data generation and network update + + SolverAction::Enum request = GetRequestedAction(); + + // Save a snapshot if needed. +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + if ((param_.snapshot() + && iter_ % param_.snapshot() == 0 + && Caffe::root_solver()) || + (request == SolverAction::SNAPSHOT)) { +======= + if (param_.snapshot() && (iter_ + 1) % param_.snapshot() == 0) { +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update + Snapshot(); + } + } +} + +template +void Solver::Solve(const char* resume_file) { + LOG(INFO) << "Solving " << net_->name(); + LOG(INFO) << "Learning Rate Policy: " << param_.lr_policy(); + + if (resume_file) { + LOG(INFO) << "Restoring previous solver status from " << resume_file; + Restore(resume_file); + } + + // For a network that is trained by the solver, no bottom or top vecs + // should be given, and we will just provide dummy vecs. + Step(param_.max_iter() - iter_); + // If we haven't already, save a snapshot after optimization, unless + // overridden by setting snapshot_after_train := false + if (param_.snapshot_after_train() + && (!param_.snapshot() || iter_ % param_.snapshot() != 0)) { + Snapshot(); + } + // After the optimization is done, run an additional train and test pass to + // display the train and test loss/outputs if appropriate (based on the + // display and test_interval settings, respectively). Unlike in the rest of + // training, for the train net we only run a forward pass as we've already + // updated the parameters "max_iter" times -- this final pass is only done to + // display the loss, which is computed in the forward pass. + if (param_.display() && iter_ % param_.display() == 0) { + Dtype loss; + net_->ForwardPrefilled(&loss); + LOG(INFO) << "Iteration " << iter_ << ", loss = " << loss; + } + if (param_.test_interval() && iter_ % param_.test_interval() == 0) { + TestAll(); + } + LOG(INFO) << "Optimization Done."; +} + + +template +void Solver::TestAll() { + for (int test_net_id = 0; test_net_id < test_nets_.size(); ++test_net_id) { + Test(test_net_id); + } +} + +template +void Solver::Test(const int test_net_id) { + LOG(INFO) << "Iteration " << iter_ + << ", Testing net (#" << test_net_id << ")"; + CHECK_NOTNULL(test_nets_[test_net_id].get())-> + ShareTrainedLayersWith(net_.get()); + vector test_score; + vector test_score_output_id; + vector*> bottom_vec; + const shared_ptr >& test_net = test_nets_[test_net_id]; + Dtype loss = 0; + for (int i = 0; i < param_.test_iter(test_net_id); ++i) { + Dtype iter_loss; + const vector*>& result = + test_net->Forward(bottom_vec, &iter_loss); + if (param_.test_compute_loss()) { + loss += iter_loss; + } + if (i == 0) { + for (int j = 0; j < result.size(); ++j) { + const Dtype* result_vec = result[j]->cpu_data(); + for (int k = 0; k < result[j]->count(); ++k) { + test_score.push_back(result_vec[k]); + test_score_output_id.push_back(j); + } + } + } else { + int idx = 0; + for (int j = 0; j < result.size(); ++j) { + const Dtype* result_vec = result[j]->cpu_data(); + for (int k = 0; k < result[j]->count(); ++k) { + test_score[idx++] += result_vec[k]; + } + } + } + } + if (param_.test_compute_loss()) { + loss /= param_.test_iter(test_net_id); + LOG(INFO) << "Test loss: " << loss; + } + for (int i = 0; i < test_score.size(); ++i) { + const int output_blob_index = + test_net->output_blob_indices()[test_score_output_id[i]]; + const string& output_name = test_net->blob_names()[output_blob_index]; + const Dtype loss_weight = test_net->blob_loss_weights()[output_blob_index]; + ostringstream loss_msg_stream; + const Dtype mean_score = test_score[i] / param_.test_iter(test_net_id); + if (loss_weight) { + loss_msg_stream << " (* " << loss_weight + << " = " << loss_weight * mean_score << " loss)"; + } + LOG(INFO) << " Test net output #" << i << ": " << output_name << " = " + << mean_score << loss_msg_stream.str(); + } +} + +<<<<<<< e29f9656158cb307d3fb4a78c63aa2247c5ad57a +template +void Solver::Snapshot() { + CHECK(Caffe::root_solver()); + string model_filename; + switch (param_.snapshot_format()) { + case caffe::SolverParameter_SnapshotFormat_BINARYPROTO: + model_filename = SnapshotToBinaryProto(); + break; + case caffe::SolverParameter_SnapshotFormat_HDF5: + model_filename = SnapshotToHDF5(); + break; + default: + LOG(FATAL) << "Unsupported snapshot format."; + } + + SnapshotSolverState(model_filename); +} +======= +>>>>>>> macro define in upgrade_proto + +template +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +void Solver::Snapshot() { + NetParameter net_param; + // For intermediate results, we will also dump the gradient values. + net_->ToProto(&net_param, param_.snapshot_diff()); +======= +string Solver::SnapshotFilename(const string extension) { +>>>>>>> triplet data generation and network update + string filename(param_.snapshot_prefix()); + string model_filename, snapshot_filename; + const int kBufferSize = 20; + char iter_str_buffer[kBufferSize]; +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= + return filename + iter_str_buffer + extension; +======= + // Add one to iter_ to get the number of iterations that have completed. + snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_ + 1); +>>>>>>> triplet data generation and network update + filename += iter_str_buffer; + model_filename = filename + ".caffemodel"; + LOG(INFO) << "Snapshotting to " << model_filename; + WriteProtoToBinaryFile(net_param, model_filename.c_str()); + SolverState state; + SnapshotSolverState(&state); + state.set_iter(iter_ + 1); + state.set_learned_net(model_filename); + state.set_current_step(current_step_); + snapshot_filename = filename + ".solverstate"; + LOG(INFO) << "Snapshotting solver state to " << snapshot_filename; + WriteProtoToBinaryFile(state, snapshot_filename.c_str()); +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= +>>>>>>> triplet data generation and network update +} + +template +string Solver::SnapshotToBinaryProto() { + string model_filename = SnapshotFilename(".caffemodel"); + LOG(INFO) << "Snapshotting to binary proto file " << model_filename; + NetParameter net_param; + net_->ToProto(&net_param, param_.snapshot_diff()); + WriteProtoToBinaryFile(net_param, model_filename); + return model_filename; +} + +template +string Solver::SnapshotToHDF5() { + string model_filename = SnapshotFilename(".caffemodel.h5"); + LOG(INFO) << "Snapshotting to HDF5 file " << model_filename; + net_->ToHDF5(model_filename, param_.snapshot_diff()); + return model_filename; +>>>>>>> triplet data generation and network update +} + +template +void Solver::Restore(const char* state_file) { + SolverState state; + NetParameter net_param; + ReadProtoFromBinaryFile(state_file, &state); + if (state.has_learned_net()) { + ReadNetParamsFromBinaryFileOrDie(state.learned_net().c_str(), &net_param); + net_->CopyTrainedLayersFrom(net_param); + } + iter_ = state.iter(); + current_step_ = state.current_step(); + RestoreSolverState(state); +} + + +// Return the current learning rate. The currently implemented learning rate +// policies are as follows: +// - fixed: always return base_lr. +// - step: return base_lr * gamma ^ (floor(iter / step)) +// - exp: return base_lr * gamma ^ iter +// - inv: return base_lr * (1 + gamma * iter) ^ (- power) +// - multistep: similar to step but it allows non uniform steps defined by +// stepvalue +// - poly: the effective learning rate follows a polynomial decay, to be +// zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) +// - sigmoid: the effective learning rate follows a sigmod decay +// return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) +// +// where base_lr, max_iter, gamma, step, stepvalue and power are defined +// in the solver parameter protocol buffer, and iter is the current iteration. +template +Dtype SGDSolver::GetLearningRate() { + Dtype rate; + const string& lr_policy = this->param_.lr_policy(); + if (lr_policy == "fixed") { + rate = this->param_.base_lr(); + } else if (lr_policy == "step") { + this->current_step_ = this->iter_ / this->param_.stepsize(); + rate = this->param_.base_lr() * + pow(this->param_.gamma(), this->current_step_); + } else if (lr_policy == "exp") { + rate = this->param_.base_lr() * pow(this->param_.gamma(), this->iter_); + } else if (lr_policy == "inv") { + rate = this->param_.base_lr() * + pow(Dtype(1) + this->param_.gamma() * this->iter_, + - this->param_.power()); + } else if (lr_policy == "multistep") { + if (this->current_step_ < this->param_.stepvalue_size() && + this->iter_ >= this->param_.stepvalue(this->current_step_)) { + this->current_step_++; + LOG(INFO) << "MultiStep Status: Iteration " << + this->iter_ << ", step = " << this->current_step_; + } + rate = this->param_.base_lr() * + pow(this->param_.gamma(), this->current_step_); + } else if (lr_policy == "poly") { + rate = this->param_.base_lr() * pow(Dtype(1.) - + (Dtype(this->iter_) / Dtype(this->param_.max_iter())), + this->param_.power()); + } else if (lr_policy == "sigmoid") { + rate = this->param_.base_lr() * (Dtype(1.) / + (Dtype(1.) + exp(-this->param_.gamma() * (Dtype(this->iter_) - + Dtype(this->param_.stepsize()))))); + } else { + LOG(FATAL) << "Unknown learning rate policy: " << lr_policy; + } + return rate; +} + +template +void SGDSolver::PreSolve() { + // Initialize the history + const vector > >& net_params = this->net_->params(); + history_.clear(); + update_.clear(); + temp_.clear(); + for (int i = 0; i < net_params.size(); ++i) { + const vector& shape = net_params[i]->shape(); + history_.push_back(shared_ptr >(new Blob(shape))); + update_.push_back(shared_ptr >(new Blob(shape))); + temp_.push_back(shared_ptr >(new Blob(shape))); + } +} + +template +void SGDSolver::ClipGradients() { + const Dtype clip_gradients = this->param_.clip_gradients(); + if (clip_gradients < 0) { return; } + const vector > >& net_params = this->net_->params(); + Dtype sumsq_diff = 0; + for (int i = 0; i < net_params.size(); ++i) { + if (this->net_->param_owners()[i] < 0) { + sumsq_diff += net_params[i]->sumsq_diff(); + } + } + const Dtype l2norm_diff = std::sqrt(sumsq_diff); + if (l2norm_diff > clip_gradients) { + Dtype scale_factor = clip_gradients / l2norm_diff; + LOG(INFO) << "Gradient clipping: scaling down gradients (L2 norm " + << l2norm_diff << " > " << clip_gradients << ") " + << "by scale factor " << scale_factor; + for (int i = 0; i < net_params.size(); ++i) { + if (this->net_->param_owners()[i] < 0) { + net_params[i]->scale_diff(scale_factor); + } + } + } +} + +template +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +void SGDSolver::ApplyUpdate() { +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= + CHECK(Caffe::root_solver()); +======= +void SGDSolver::ComputeUpdateValue() { + const vector > >& net_params = this->net_->params(); + const vector& net_params_lr = this->net_->params_lr(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); + // get the learning rate +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update + Dtype rate = GetLearningRate(); + if (this->param_.display() && this->iter_ % this->param_.display() == 0) { + LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; + } + ClipGradients(); +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 + for (int param_id = 0; param_id < this->net_->params().size(); ++param_id) { +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + for (int param_id = 0; param_id < this->net_->learnable_params().size(); + ++param_id) { +>>>>>>> triplet data generation and network update + Normalize(param_id); + Regularize(param_id); + ComputeUpdateValue(param_id, rate); + } + this->net_->Update(); +} + +template +void SGDSolver::Normalize(int param_id) { + if (this->param_.iter_size() == 1) { return; } + // Scale gradient to counterbalance accumulation. +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 + const vector > >& net_params = this->net_->params(); +======= + const vector*>& net_params = this->net_->learnable_params(); +>>>>>>> triplet data generation and network update + const Dtype accum_normalization = Dtype(1.) / this->param_.iter_size(); + switch (Caffe::mode()) { + case Caffe::CPU: { + caffe_scal(net_params[param_id]->count(), accum_normalization, + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + caffe_gpu_scal(net_params[param_id]->count(), accum_normalization, + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void SGDSolver::Regularize(int param_id) { +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 + const vector > >& net_params = this->net_->params(); +======= + const vector*>& net_params = this->net_->learnable_params(); +>>>>>>> triplet data generation and network update + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); +======= + Dtype momentum = this->param_.momentum(); +>>>>>>> triplet data generation and network update + Dtype weight_decay = this->param_.weight_decay(); + string regularization_type = this->param_.regularization_type(); + switch (Caffe::mode()) { + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // Compute the value to history, and then copy them to the blob's diff. + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + history_[param_id]->mutable_cpu_data()); + // copy + caffe_copy(net_params[param_id]->count(), + history_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } + break; + case Caffe::GPU: +#ifndef CPU_ONLY + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // Compute the value to history, and then copy them to the blob's diff. + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 + } +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector > >& net_params = this->net_->params(); +======= + +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +template +void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); +>>>>>>> triplet data generation and network update + const vector& net_params_lr = this->net_->params_lr(); + Dtype momentum = this->param_.momentum(); + Dtype local_rate = rate * net_params_lr[param_id]; + // Compute the update to history, then copy it to the parameter diff. + switch (Caffe::mode()) { + case Caffe::CPU: { + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + history_[param_id]->mutable_cpu_data()); + caffe_copy(net_params[param_id]->count(), + history_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + history_[param_id]->mutable_gpu_data()); + caffe_copy(net_params[param_id]->count(), + history_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= +======= + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + history_[param_id]->mutable_gpu_data()); + // copy + caffe_copy(net_params[param_id]->count(), + history_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update +#else + NO_GPU; +#endif + break; +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 + } +======= +>>>>>>> triplet data generation and network update + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void SGDSolver::SnapshotSolverState(SolverState* state) { + state->clear_history(); + for (int i = 0; i < history_.size(); ++i) { + // Add history + BlobProto* history_blob = state->add_history(); + history_[i]->ToProto(history_blob); + } +} + +template +void SGDSolver::RestoreSolverState(const SolverState& state) { + CHECK_EQ(state.history_size(), history_.size()) + << "Incorrect length of history blobs."; + LOG(INFO) << "SGDSolver: restoring history"; + for (int i = 0; i < history_.size(); ++i) { + history_[i]->FromProto(state.history(i)); + } +} + +template +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector > >& net_params = this->net_->params(); +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +void SGDSolver::RestoreSolverStateFromHDF5(const string& state_file) { + hid_t file_hid = H5Fopen(state_file.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK_GE(file_hid, 0) << "Couldn't open solver state file " << state_file; + this->iter_ = hdf5_load_int(file_hid, "iter"); + if (H5LTfind_dataset(file_hid, "learned_net")) { + string learned_net = hdf5_load_string(file_hid, "learned_net"); + this->net_->CopyTrainedLayersFrom(learned_net); + } + this->current_step_ = hdf5_load_int(file_hid, "current_step"); + hid_t history_hid = H5Gopen2(file_hid, "history", H5P_DEFAULT); + CHECK_GE(history_hid, 0) << "Error reading history from " << state_file; + int state_history_size = hdf5_get_num_links(history_hid); + CHECK_EQ(state_history_size, history_.size()) + << "Incorrect length of history blobs."; + for (int i = 0; i < history_.size(); ++i) { + ostringstream oss; + oss << i; + hdf5_load_nd_dataset(history_hid, oss.str().c_str(), 0, + kMaxBlobAxes, history_[i].get()); + } + H5Gclose(history_hid); + H5Fclose(file_hid); +} + +template +void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { + CHECK(Caffe::root_solver()); + const vector*>& net_params = this->net_->learnable_params(); +======= +void NesterovSolver::ComputeUpdateValue() { + const vector > >& net_params = this->net_->params(); +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update + const vector& net_params_lr = this->net_->params_lr(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); + // get the learning rate + Dtype rate = this->GetLearningRate(); + if (this->param_.display() && this->iter_ % this->param_.display() == 0) { + LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; + } + SGDSolver::ClipGradients(); + Dtype momentum = this->param_.momentum(); + Dtype weight_decay = this->param_.weight_decay(); + string regularization_type = this->param_.regularization_type(); + switch (Caffe::mode()) { + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + this->temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // update history + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + this->history_[param_id]->mutable_cpu_data()); + + // compute udpate: step back then over step + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->cpu_data(), -momentum, + this->update_[param_id]->mutable_cpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } + break; + case Caffe::GPU: +#ifndef CPU_ONLY + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + this->temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // update history + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + this->history_[param_id]->mutable_gpu_data()); + + // compute udpate: step back then over step + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->gpu_data(), -momentum, + this->update_[param_id]->mutable_gpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } +#else + NO_GPU; +#endif + break; + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 + const vector > >& net_params = this->net_->params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype delta = this->param_.delta(); + Dtype local_rate = rate * net_params_lr[param_id]; + switch (Caffe::mode()) { + case Caffe::CPU: { +======= + CHECK(Caffe::root_solver()); + const vector*>& net_params = this->net_->learnable_params(); +======= +void AdaGradSolver::ComputeUpdateValue() { + const vector > >& net_params = this->net_->params(); +>>>>>>> triplet data generation and network update + const vector& net_params_lr = this->net_->params_lr(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); + // get the learning rate + Dtype rate = this->GetLearningRate(); + Dtype delta = this->param_.delta(); + if (this->param_.display() && this->iter_ % this->param_.display() == 0) { + LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; + } + SGDSolver::ClipGradients(); + Dtype weight_decay = this->param_.weight_decay(); + string regularization_type = this->param_.regularization_type(); + switch (Caffe::mode()) { + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + this->temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_add(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + this->history_[param_id]->cpu_data(), + this->history_[param_id]->mutable_cpu_data()); + + // prepare update + caffe_powx(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_cpu_data()); + + caffe_div(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), + this->update_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // scale and copy + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->cpu_data(), Dtype(0), + net_params[param_id]->mutable_cpu_diff()); + } + break; + case Caffe::GPU: +#ifndef CPU_ONLY + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + this->temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_add(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + this->history_[param_id]->gpu_data(), + this->history_[param_id]->mutable_gpu_data()); + + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // scale and copy + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), + net_params[param_id]->mutable_gpu_diff()); + } +#else + NO_GPU; +#endif + break; + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void RMSPropSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + + // get the learning rate + Dtype delta = this->param_.delta(); + Dtype rms_decay = this->param_.rms_decay(); + Dtype local_rate = rate * net_params_lr[param_id]; + + switch (Caffe::mode()) { + case Caffe::CPU: +>>>>>>> triplet data generation and network update + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 + caffe_add(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + this->history_[param_id]->cpu_data(), + this->history_[param_id]->mutable_cpu_data()); + + // prepare update + caffe_powx(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_cpu_data()); + + caffe_div(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), + this->update_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); +======= + caffe_cpu_axpby(net_params[param_id] -> count(), + Dtype(1-rms_decay), this->update_[param_id]->cpu_data(), + rms_decay, this->history_[param_id]-> mutable_cpu_data()); + + // prepare update + caffe_powx(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_cpu_data()); + + caffe_div(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), this->update_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); +>>>>>>> triplet data generation and network update + + // scale and copy + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->cpu_data(), Dtype(0), + net_params[param_id]->mutable_cpu_diff()); + break; + case Caffe::GPU: +#ifndef CPU_ONLY + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 + caffe_gpu_add(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + this->history_[param_id]->gpu_data(), + this->history_[param_id]->mutable_gpu_data()); + + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // scale and copy + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), + net_params[param_id]->mutable_gpu_diff()); +======= + caffe_gpu_axpby(net_params[param_id] -> count(), + Dtype(1-rms_decay), this->update_[param_id]->gpu_data(), + rms_decay, this->history_[param_id]-> mutable_gpu_data()); + + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void AdaDeltaSolver::AdaDeltaPreSolve() { + // Add the extra history entries for AdaDelta after those from + // SGDSolver::PreSolve + const vector*>& net_params = this->net_->learnable_params(); + for (int i = 0; i < net_params.size(); ++i) { + const vector& shape = net_params[i]->shape(); + this->history_.push_back( + shared_ptr >(new Blob(shape))); + } +} + +template +void AdaDeltaSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype delta = this->param_.delta(); + Dtype momentum = this->param_.momentum(); + Dtype local_rate = rate * net_params_lr[param_id]; + size_t update_history_offset = net_params.size(); + switch (Caffe::mode()) { + case Caffe::CPU: { + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history of gradients + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->cpu_data(), momentum, + this->history_[param_id]->mutable_cpu_data()); + + // add delta to history to guard against dividing by zero later + caffe_set(net_params[param_id]->count(), delta, + this->temp_[param_id]->mutable_cpu_data()); + + caffe_add(net_params[param_id]->count(), + this->temp_[param_id]->cpu_data(), + this->history_[update_history_offset + param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add(net_params[param_id]->count(), + this->temp_[param_id]->cpu_data(), + this->history_[param_id]->cpu_data(), + this->temp_[param_id]->mutable_cpu_data()); + + // divide history of updates by history of gradients + caffe_div(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + this->temp_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // jointly compute the RMS of both for update and gradient history + caffe_powx(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + // compute the update + caffe_mul(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), + this->update_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + + // compute square of update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history of updates + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->cpu_data(), momentum, + this->history_[update_history_offset + param_id]->mutable_cpu_data()); + + // apply learning rate + caffe_cpu_scale(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history of gradients + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->gpu_data(), momentum, + this->history_[param_id]->mutable_gpu_data()); + + // add delta to history to guard against dividing by zero later + caffe_gpu_set(net_params[param_id]->count(), delta, + this->temp_[param_id]->mutable_gpu_data()); + + caffe_gpu_add(net_params[param_id]->count(), + this->temp_[param_id]->gpu_data(), + this->history_[update_history_offset + param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add(net_params[param_id]->count(), + this->temp_[param_id]->gpu_data(), + this->history_[param_id]->gpu_data(), + this->temp_[param_id]->mutable_gpu_data()); + + // divide history of updates by history of gradients + caffe_gpu_div(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + this->temp_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // jointly compute the RMS of both for update and gradient history + caffe_gpu_powx(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + // compute the update and copy to net_diff + caffe_gpu_mul(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + + // compute square of update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history of updates + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->gpu_data(), momentum, + this->history_[update_history_offset + param_id]->mutable_gpu_data()); + + // apply learning rate + caffe_gpu_scale(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void AdamSolver::AdamPreSolve() { + // Add the extra history entries for Adam after those from + // SGDSolver::PreSolve + const vector*>& net_params = this->net_->learnable_params(); + for (int i = 0; i < net_params.size(); ++i) { + const vector& shape = net_params[i]->shape(); + this->history_.push_back( + shared_ptr >(new Blob(shape))); + } +} + +template +void AdamSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype local_rate = rate * net_params_lr[param_id]; + const Dtype beta1 = this->param_.momentum(); + const Dtype beta2 = this->param_.momentum2(); + + // we create aliases for convenience + size_t update_history_offset = net_params.size(); + Blob* val_m = this->history_[param_id].get(); + Blob* val_v = this->history_[param_id + update_history_offset].get(); + Blob* val_t = this->temp_[param_id].get(); + + const int t = this->iter_ + 1; + const Dtype correction = std::sqrt(Dtype(1) - pow(beta2, t)) / + (Dtype(1.) - pow(beta1, t)); + const int N = net_params[param_id]->count(); + const Dtype eps_hat = this->param_.delta(); + + switch (Caffe::mode()) { + case Caffe::CPU: { + // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t + caffe_cpu_axpby(N, Dtype(1)-beta1, + net_params[param_id]->cpu_diff(), beta1, + val_m->mutable_cpu_data()); + + // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 + caffe_mul(N, + net_params[param_id]->cpu_diff(), + net_params[param_id]->cpu_diff(), + val_t->mutable_cpu_data()); + caffe_cpu_axpby(N, Dtype(1)-beta2, + val_t->cpu_data(), beta2, + val_v->mutable_cpu_data()); + + // set update + caffe_powx(N, + val_v->cpu_data(), Dtype(0.5), + val_t->mutable_cpu_data()); + caffe_add_scalar(N, eps_hat, val_t->mutable_cpu_data()); + caffe_div(N, + val_m->cpu_data(), + val_t->cpu_data(), + val_t->mutable_cpu_data()); + + caffe_cpu_scale(N, local_rate*correction, + val_t->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t + caffe_gpu_axpby(N, Dtype(1)-beta1, + net_params[param_id]->gpu_diff(), beta1, + val_m->mutable_gpu_data()); + + // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 + caffe_gpu_mul(N, + net_params[param_id]->gpu_diff(), + net_params[param_id]->gpu_diff(), + val_t->mutable_gpu_data()); + caffe_gpu_axpby(N, Dtype(1)-beta2, + val_t->gpu_data(), beta2, + val_v->mutable_gpu_data()); + + // set update + caffe_gpu_powx(N, + val_v->gpu_data(), Dtype(0.5), + val_t->mutable_gpu_data()); + caffe_gpu_add_scalar(N, eps_hat, + val_t->mutable_gpu_data()); + caffe_gpu_div(N, + val_m->gpu_data(), + val_t->gpu_data(), + val_t->mutable_gpu_data()); + + caffe_gpu_scale(N, local_rate*correction, + val_t->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); +>>>>>>> triplet data generation and network update +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +INSTANTIATE_CLASS(Solver); +INSTANTIATE_CLASS(SGDSolver); +INSTANTIATE_CLASS(NesterovSolver); +INSTANTIATE_CLASS(AdaGradSolver); + +} // namespace caffe diff --git a/src/caffe/test/test_data/generate_sample_data.py b/src/caffe/test/test_data/generate_sample_data.py index ab5572685cb..a9b3c866d6c 100644 --- a/src/caffe/test/test_data/generate_sample_data.py +++ b/src/caffe/test/test_data/generate_sample_data.py @@ -51,3 +51,37 @@ with open(script_dir + '/sample_data_list.txt', 'w') as f: f.write(script_dir + '/sample_data.h5\n') f.write(script_dir + '/sample_data_2_gzip.h5\n') + +# Generate GradientBasedSolver solver_data.h5 + +num_cols = 3 +num_rows = 8 +height = 10 +width = 10 + +data = np.random.randn(num_rows, num_cols, height, width) +data = data.reshape(num_rows, num_cols, height, width) +data = data.astype('float32') + +targets = np.random.randn(num_rows, 1) +targets = targets.astype('float32') + +print data +print targets + +with h5py.File(script_dir + '/solver_data.h5', 'w') as f: + f['data'] = data + f['targets'] = targets + +with open(script_dir + '/solver_data_list.txt', 'w') as f: + f.write(script_dir + '/solver_data.h5\n') +======= +with open(os.path.dirname(__file__) + '/sample_data_list.txt', 'w') as f: + f.write(os.path.dirname(__file__) + '/sample_data.h5\n') + f.write(os.path.dirname(__file__) + '/sample_data_2_gzip.h5\n') +>>>>>>> triplet data generation and network update +======= +with open(script_dir + '/sample_data_list.txt', 'w') as f: + f.write(script_dir + '/sample_data.h5\n') + f.write(script_dir + '/sample_data_2_gzip.h5\n') +>>>>>>> restore diff --git a/src/caffe/test/test_gradient_based_solver.cpp b/src/caffe/test/test_gradient_based_solver.cpp index c9135d64e70..809122404a7 100644 --- a/src/caffe/test/test_gradient_based_solver.cpp +++ b/src/caffe/test/test_gradient_based_solver.cpp @@ -8,8 +8,10 @@ #include "gtest/gtest.h" #include "caffe/common.hpp" +#include "caffe/parallel.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/solver.hpp" +#include "caffe/util/io.hpp" #include "caffe/test/test_caffe_main.hpp" @@ -25,10 +27,18 @@ class GradientBasedSolverTest : public MultiDeviceTest { GradientBasedSolverTest() : seed_(1701), num_(4), channels_(3), height_(10), width_(10) {} + string snapshot_prefix_; shared_ptr > solver_; + shared_ptr > sync_; int seed_; + // Dimensions are determined by generate_sample_data.py + // TODO this is brittle and the hdf5 file should be checked instead. int num_, channels_, height_, width_; - Dtype delta_; // Stability constant for AdaGrad. + bool share_; + Dtype delta_; // Stability constant for RMSProp, AdaGrad, AdaDelta and Adam + + // Test data: check out generate_sample_data.py in the same directory. + string* input_file_; virtual SolverParameter_SolverType solver_type() = 0; virtual void InitSolver(const SolverParameter& param) = 0; @@ -36,9 +46,6 @@ class GradientBasedSolverTest : public MultiDeviceTest { virtual void InitSolverFromProtoString(const string& proto) { SolverParameter param; CHECK(google::protobuf::TextFormat::ParseFromString(proto, ¶m)); - // Disable saving a final snapshot so the tests don't pollute the user's - // working directory with useless snapshots. - param.set_snapshot_after_train(false); // Set the solver_mode according to current Caffe::mode. switch (Caffe::mode()) { case Caffe::CPU: @@ -51,15 +58,21 @@ class GradientBasedSolverTest : public MultiDeviceTest { LOG(FATAL) << "Unknown Caffe mode: " << Caffe::mode(); } InitSolver(param); - delta_ = (solver_type() == SolverParameter_SolverType_ADAGRAD) ? - param.delta() : 0; + delta_ = param.delta(); } void RunLeastSquaresSolver(const Dtype learning_rate, const Dtype weight_decay, const Dtype momentum, const int num_iters, const int iter_size = 1) { ostringstream proto; + int device_id = 0; +#ifndef CPU_ONLY + if (Caffe::mode() == Caffe::GPU) { + CUDA_CHECK(cudaGetDevice(&device_id)); + } +#endif proto << + "snapshot_after_train: " << snapshot << " " "max_iter: " << num_iters << " " "base_lr: " << learning_rate << " " "lr_policy: 'fixed' " @@ -88,10 +101,26 @@ class GradientBasedSolverTest : public MultiDeviceTest { " } " " top: 'data' " " top: 'targets' " - " } " + " } "; + if (share_) { + proto << + " layer { " + " name: 'slice' " + " type: 'Slice' " + " bottom: 'data' " + " top: 'data1' " + " top: 'data2' " + " slice_param { " + " axis: 0 " + " } " + " } "; + } + proto << " layer { " " name: 'innerprod' " " type: 'InnerProduct' " + " param { name: 'weights' } " + " param { name: 'bias' } " " inner_product_param { " " num_output: 1 " " weight_filler { " @@ -103,9 +132,42 @@ class GradientBasedSolverTest : public MultiDeviceTest { " std: 1.0 " " } " " } " - " bottom: 'data' " - " top: 'innerprod' " - " } " + " bottom: '" << string(share_ ? "data1": "data") << "' " + " top: '" << string(share_ ? "innerprod1": "innerprod") << "' " + " } "; + if (share_) { + proto << + " layer { " + " name: 'innerprod2' " + " type: 'InnerProduct' " + " param { name: 'weights' } " + " param { name: 'bias' } " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 1.0 " + " } " + " bias_filler { " + " type: 'gaussian' " + " std: 1.0 " + " } " + " } " + " bottom: 'data2' " + " top: 'innerprod2' " + " } " + " layer { " + " name: 'concat' " + " type: 'Concat' " + " bottom: 'innerprod1' " + " bottom: 'innerprod2' " + " top: 'innerprod' " + " concat_param { " + " axis: 0 " + " } " + " } "; + } + proto << " layer { " " name: 'loss' " " type: 'EuclideanLoss' " @@ -119,9 +181,46 @@ class GradientBasedSolverTest : public MultiDeviceTest { if (momentum != 0) { proto << "momentum: " << momentum << " "; } + MakeTempDir(&snapshot_prefix_); + proto << "snapshot_prefix: '" << snapshot_prefix_ << "/' "; + if (snapshot) { + proto << "snapshot: " << num_iters << " "; + } Caffe::set_random_seed(this->seed_); this->InitSolverFromProtoString(proto.str()); - this->solver_->Solve(); + if (from_snapshot != NULL) { + this->solver_->Restore(from_snapshot); + vector*> empty_bottom_vec; + for (int i = 0; i < this->solver_->iter(); ++i) { + this->solver_->net()->Forward(empty_bottom_vec); + } + } + if (devices == 1) { + this->solver_->Solve(); + } else { + LOG(INFO) << "Multi-GPU test on " << devices << " devices"; + vector gpus; + // put current device at the beginning + int device_id = solver_->param().device_id(); + gpus.push_back(device_id); + for (int i = 0; gpus.size() < devices; ++i) { + if (i != device_id) + gpus.push_back(i); + } + Caffe::set_solver_count(gpus.size()); + this->sync_.reset(new P2PSync( + this->solver_, NULL, this->solver_->param())); + this->sync_->run(gpus); + Caffe::set_solver_count(1); + } + if (snapshot) { + ostringstream resume_file; + resume_file << snapshot_prefix_ << "/_iter_" << num_iters + << ".solverstate"; + string resume_filename = resume_file.str(); + return resume_filename; + } + return string(); } // Compute an update value given the current state of the train net, @@ -129,7 +228,7 @@ class GradientBasedSolverTest : public MultiDeviceTest { // updated_params will store the updated weight and bias results, // using the blobs' diffs to hold the update values themselves. void ComputeLeastSquaresUpdate(const Dtype learning_rate, - const Dtype weight_decay, const Dtype momentum, + const Dtype weight_decay, const Dtype momentum, const int num_iters, vector > >* updated_params) { const int N = num_; const int D = channels_ * height_ * width_; @@ -195,7 +294,12 @@ class GradientBasedSolverTest : public MultiDeviceTest { ((i == D) ? bias.cpu_data()[0] : weights.cpu_data()[i]); // Finally, compute update. const vector > >& history = solver_->history(); - ASSERT_EQ(2, history.size()); // 1 blob for weights, 1 for bias + if (solver_type() != SolverParameter_SolverType_ADADELTA + && solver_type() != SolverParameter_SolverType_ADAM) { + ASSERT_EQ(2, history.size()); // 1 blob for weights, 1 for bias + } else { + ASSERT_EQ(4, history.size()); // additional blobs for update history + } Dtype update_value = learning_rate * grad; const Dtype history_value = (i == D) ? history[1]->cpu_data()[0] : history[0]->cpu_data()[i]; @@ -212,6 +316,40 @@ class GradientBasedSolverTest : public MultiDeviceTest { case SolverParameter_SolverType_ADAGRAD: update_value /= std::sqrt(history_value + grad * grad) + delta_; break; + case SolverParameter_SolverType_RMSPROP: { + const Dtype rms_decay = 0.95; + update_value /= std::sqrt(rms_decay*history_value + + grad * grad * (1 - rms_decay)) + delta_; + } + break; + case SolverParameter_SolverType_ADADELTA: + { + const Dtype update_history_value = (i == D) ? + history[1 + num_param_blobs]->cpu_data()[0] : + history[0 + num_param_blobs]->cpu_data()[i]; + const Dtype weighted_gradient_average = + momentum * history_value + (1 - momentum) * (grad * grad); + update_value = grad * std::sqrt((update_history_value + delta_) / + (weighted_gradient_average + delta_)) * learning_rate; + // not actually needed, just here for illustrative purposes + // const Dtype weighted_update_average = + // momentum * update_history_value + (1 - momentum) * (update_value); + break; + } + case SolverParameter_SolverType_ADAM: { + const Dtype momentum2 = 0.999; + const Dtype m = history_value; + const Dtype v = (i == D) ? + history[1 + num_param_blobs]->cpu_data()[0] : + history[0 + num_param_blobs]->cpu_data()[i]; + const Dtype val_m = (1 - momentum) * grad + momentum * m; + const Dtype val_v = (1 - momentum2) * grad * grad + momentum2 * v; + Dtype alpha_t = learning_rate * + std::sqrt(Dtype(1) - pow(momentum2, num_iters)) / + (Dtype(1.) - pow(momentum, num_iters)); + update_value = alpha_t * val_m / (std::sqrt(val_v) + delta_); + break; + } default: LOG(FATAL) << "Unknown solver type: " << solver_type(); } @@ -333,20 +471,108 @@ class GradientBasedSolverTest : public MultiDeviceTest { void TestLeastSquaresUpdate(const Dtype learning_rate = 1.0, const Dtype weight_decay = 0.0, const Dtype momentum = 0.0, const int iter_to_check = 0) { - // Initialize the solver and run K (= iter_to_check) solver iterations. - RunLeastSquaresSolver(learning_rate, weight_decay, momentum, iter_to_check); + const int kNum = num_; + const int kIterSize = 1; + // Test over all numbers of devices. + int available_devices = 1; +#ifndef CPU_ONLY + if (Caffe::mode() == Caffe::GPU) { + CUDA_CHECK(cudaGetDeviceCount(&available_devices)); + } +#endif + for (int devices = 1; devices <= available_devices; ++devices) { + // Configure batch size for single / multi device equivalence. + // Constant data is needed for multi device as for accumulation. + num_ = kNum * devices; - // Compute the (K+1)th update using the analytic least squares gradient. - vector > > updated_params; - ComputeLeastSquaresUpdate(learning_rate, weight_decay, momentum, - &updated_params); + // Initialize the solver and run K (= iter_to_check) solver iterations + // (on single device). + RunLeastSquaresSolver(learning_rate, weight_decay, momentum, + iter_to_check, kIterSize, 1); - // Reinitialize the solver and run K+1 solver iterations. + // Compute the (K+1)th update using the analytic least squares gradient. + vector > > updated_params; + ComputeLeastSquaresUpdate(learning_rate, weight_decay, momentum, + iter_to_check + 1, &updated_params); + + // Reinitialize the solver and run K+1 solver iterations. + num_ = kNum; + RunLeastSquaresSolver(learning_rate, weight_decay, momentum, + iter_to_check + 1, kIterSize, devices); + + // Check that the solver's solution matches ours. + CheckLeastSquaresUpdate(updated_params); + } + } + + void TestSnapshot(const Dtype learning_rate = 1.0, + const Dtype weight_decay = 0.0, const Dtype momentum = 0.0, + const int num_iters = 1) { + // Run the solver for num_iters * 2 iterations. + const int total_num_iters = num_iters * 2; + bool snapshot = false; + const int kIterSize = 1; + const int kDevices = 1; RunLeastSquaresSolver(learning_rate, weight_decay, momentum, - iter_to_check + 1); + total_num_iters, kIterSize, kDevices, snapshot); + + // Save the resulting param values. + vector > > param_copies; + const vector*>& orig_params = + solver_->net()->learnable_params(); + param_copies.resize(orig_params.size()); + for (int i = 0; i < orig_params.size(); ++i) { + param_copies[i].reset(new Blob()); + const bool kReshape = true; + for (int copy_diff = false; copy_diff <= true; ++copy_diff) { + param_copies[i]->CopyFrom(*orig_params[i], copy_diff, kReshape); + } + } + + // Save the solver history + vector > > history_copies; + const vector > >& orig_history = solver_->history(); + history_copies.resize(orig_history.size()); + for (int i = 0; i < orig_history.size(); ++i) { + history_copies[i].reset(new Blob()); + const bool kReshape = true; + for (int copy_diff = false; copy_diff <= true; ++copy_diff) { + history_copies[i]->CopyFrom(*orig_history[i], copy_diff, kReshape); + } + } + + // Run the solver for num_iters iterations and snapshot. + snapshot = true; + string snapshot_name = RunLeastSquaresSolver(learning_rate, weight_decay, + momentum, num_iters, kIterSize, kDevices, snapshot); + + // Reinitialize the solver and run for num_iters more iterations. + snapshot = false; + RunLeastSquaresSolver(learning_rate, weight_decay, momentum, + total_num_iters, kIterSize, kDevices, + snapshot, snapshot_name.c_str()); + + // Check that params now match. + const vector*>& params = solver_->net()->learnable_params(); + for (int i = 0; i < params.size(); ++i) { + for (int j = 0; j < params[i]->count(); ++j) { + EXPECT_EQ(param_copies[i]->cpu_data()[j], params[i]->cpu_data()[j]) + << "param " << i << " data differed at dim " << j; + EXPECT_EQ(param_copies[i]->cpu_diff()[j], params[i]->cpu_diff()[j]) + << "param " << i << " diff differed at dim " << j; + } + } - // Check that the solver's solution matches ours. - CheckLeastSquaresUpdate(updated_params); + // Check that history now matches. + const vector > >& history = solver_->history(); + for (int i = 0; i < history.size(); ++i) { + for (int j = 0; j < history[i]->count(); ++j) { + EXPECT_EQ(history_copies[i]->cpu_data()[j], history[i]->cpu_data()[j]) + << "history blob " << i << " data differed at dim " << j; + EXPECT_EQ(history_copies[i]->cpu_diff()[j], history[i]->cpu_diff()[j]) + << "history blob " << i << " diff differed at dim " << j; + } + } } }; @@ -371,23 +597,38 @@ TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdate) { this->TestLeastSquaresUpdate(); } -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateLROneTenth) { +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateLROneHundredth) { typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; + const Dtype kLearningRate = 0.01; this->TestLeastSquaresUpdate(kLearningRate); } TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecay) { typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 1.0; + const Dtype kLearningRate = 0.01; const Dtype kWeightDecay = 0.5; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); + const Dtype kMomentum = 0; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecayMultiIter) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } } TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentum) { typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 1.0; - const Dtype kWeightDecay = 0.0; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0; const Dtype kMomentum = 0.5; const int kNumIters = 1; for (int i = 0; i <= kNumIters; ++i) { @@ -397,8 +638,8 @@ TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentum) { TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 1.0; - const Dtype kWeightDecay = 0.0; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0; const Dtype kMomentum = 0.5; const int kNumIters = 4; for (int i = 0; i <= kNumIters; ++i) { @@ -409,14 +650,61 @@ TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverything) { typedef typename TypeParam::Dtype Dtype; const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.9; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.5; const int kNumIters = 4; for (int i = 0; i <= kNumIters; ++i) { this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); } } +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(SGDSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(SGDSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +======= TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { typedef typename TypeParam::Dtype Dtype; const Dtype kLearningRate = 0.01; @@ -427,6 +715,7 @@ TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, kIterSize); } +>>>>>>> restore template class AdaGradSolverTest : public GradientBasedSolverTest { @@ -447,15 +736,15 @@ TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdate) { this->TestLeastSquaresUpdate(); } -TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateLROneTenth) { +TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateLROneHundredth) { typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; + const Dtype kLearningRate = 0.01; this->TestLeastSquaresUpdate(kLearningRate); } TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithWeightDecay) { typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 1.0; + const Dtype kLearningRate = 0.01; const Dtype kWeightDecay = 0.5; this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); } @@ -463,8 +752,8 @@ TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithWeightDecay) { TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithEverything) { typedef typename TypeParam::Dtype Dtype; const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.0; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; const int kNumIters = 4; for (int i = 0; i <= kNumIters; ++i) { this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); @@ -482,6 +771,44 @@ TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { kIterSize); } +TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(AdaGradSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaGradSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +======= +>>>>>>> restore + template class NesterovSolverTest : public GradientBasedSolverTest { typedef typename TypeParam::Dtype Dtype; @@ -501,23 +828,35 @@ TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdate) { this->TestLeastSquaresUpdate(); } -TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateLROneTenth) { +TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateLROneHundredth) { typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; + const Dtype kLearningRate = 0.01; this->TestLeastSquaresUpdate(kLearningRate); } TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithWeightDecay) { typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 1.0; + const Dtype kLearningRate = 0.01; const Dtype kWeightDecay = 0.5; this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); } +TYPED_TEST(NesterovSolverTest, + TestNesterovLeastSquaresUpdateWithWeightDecayMultiIter) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithMomentum) { typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 1.0; - const Dtype kWeightDecay = 0.0; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0; const Dtype kMomentum = 0.5; const int kNumIters = 1; for (int i = 0; i <= kNumIters; ++i) { @@ -527,8 +866,8 @@ TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithMomentum) { TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 1.0; - const Dtype kWeightDecay = 0.0; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0; const Dtype kMomentum = 0.5; const int kNumIters = 4; for (int i = 0; i <= kNumIters; ++i) { @@ -539,18 +878,263 @@ TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithEverything) { typedef typename TypeParam::Dtype Dtype; const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(NesterovSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(NesterovSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +template +class AdaDeltaSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + this->solver_.reset(new AdaDeltaSolver(param)); + } + + virtual SolverParameter_SolverType solver_type() { + return SolverParameter_SolverType_ADADELTA; + } +}; + +TYPED_TEST_CASE(AdaDeltaSolverTest, TestDtypesAndDevices); + +TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdate) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + this->TestLeastSquaresUpdate(kLearningRate); +} + +TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.95; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); +} + +TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithHalfMomentum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.5; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); + } +} + +TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithMomentum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.95; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); + } +} + +TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaDeltaSolverTest, TestAdaDeltaLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaDeltaSolverTest, + TestAdaDeltaLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(AdaDeltaSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(AdaDeltaSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaDeltaSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.95; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +template +class AdamSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + SolverParameter new_param = param; + const Dtype momentum = 0.9; + new_param.set_momentum(momentum); + const Dtype momentum2 = 0.999; + new_param.set_momentum2(momentum2); + this->solver_.reset(new AdamSolver(new_param)); + } + virtual SolverParameter_SolverType solver_type() { + return SolverParameter_SolverType_ADAM; + } +}; + +TYPED_TEST_CASE(AdamSolverTest, TestDtypesAndDevices); + +TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdate) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0; + const Dtype kMomentum = 0.9; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); +} + +TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum); +} + +TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; const Dtype kMomentum = 0.9; const int kNumIters = 4; + this->share_ = true; for (int i = 0; i <= kNumIters; ++i) { this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); } } +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; +======= TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { typedef typename TypeParam::Dtype Dtype; const Dtype kLearningRate = 0.01; const Dtype kWeightDecay = 0.1; +>>>>>>> restore const Dtype kMomentum = 0.9; const int kNumIters = 4; const int kIterSize = 2; diff --git a/src/caffe/test/test_net.cpp.orig b/src/caffe/test/test_net.cpp.orig index 56959f4793b..6183c38d3fd 100644 --- a/src/caffe/test/test_net.cpp.orig +++ b/src/caffe/test/test_net.cpp.orig @@ -614,6 +614,21 @@ class NetTest : public MultiDeviceTest { InitNetFromProtoString(proto); } +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 011aef0... restore +======= +>>>>>>> 80a07dd... macro define in upgrade_proto +======= +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +======= +>>>>>>> restore +>>>>>>> 0a85215... triplet data generation and network update virtual void InitSkipPropNet(bool test_skip_true) { string proto = "name: 'SkipPropTestNetwork' " @@ -700,11 +715,85 @@ class NetTest : public MultiDeviceTest { " bottom: 'innerproduct' " " bottom: 'label_argmax' "; if (test_skip_true) +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= +<<<<<<< HEAD +<<<<<<< HEAD +>>>>>>> triplet data generation and network update +======= +>>>>>>> 00341b2... triplet data generation and network update +======= +>>>>>>> 1882ac9... add initiate class name of triplet loss layer +======= +>>>>>>> 08d5d6d... macro define in upgrade_proto +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= +======= +>>>>>>> 1f7ef32... add RGB data training as an option in triplet training +======= +======= +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +>>>>>>> 0a85215... triplet data generation and network update +>>>>>>> 0dbadac... triplet data generation and network update +>>>>>>> triplet data generation and network update proto += " propagate_down: true " " propagate_down: false "; else proto += " propagate_down: true " " propagate_down: true "; +======= + proto += " propagate_down: [true, false] "; + else + proto += " propagate_down: [true, true] "; +<<<<<<< HEAD +>>>>>>> 011aef0... restore +<<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 1882ac9... add initiate class name of triplet loss layer +======= + proto += " propagate_down: true " + " propagate_down: false "; + else + proto += " propagate_down: true " + " propagate_down: true "; +>>>>>>> 98fb438... fixed two bugs with prototext format +<<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 00341b2... triplet data generation and network update +======= +>>>>>>> 1882ac9... add initiate class name of triplet loss layer +======= +======= + proto += " propagate_down: [true, false] "; + else + proto += " propagate_down: [true, true] "; +>>>>>>> 80a07dd... macro define in upgrade_proto +>>>>>>> 08d5d6d... macro define in upgrade_proto +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= +======= +======= + proto += " propagate_down: true " + " propagate_down: false "; + else + proto += " propagate_down: true " + " propagate_down: true "; +>>>>>>> b266250... fixed two bugs with prototext format +<<<<<<< HEAD +>>>>>>> 1f7ef32... add RGB data training as an option in triplet training +======= +======= +>>>>>>> restore +>>>>>>> 0a85215... triplet data generation and network update +>>>>>>> 0dbadac... triplet data generation and network update +>>>>>>> triplet data generation and network update proto += " top: 'cross_entropy_loss' " " type: 'SigmoidCrossEntropyLoss' " @@ -713,6 +802,24 @@ class NetTest : public MultiDeviceTest { InitNetFromProtoString(proto); } +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 083f61b... New triplet loss layer added(beta1 version-no test source files) +======= +>>>>>>> 011aef0... restore +======= +>>>>>>> 4d8130b... New triplet loss layer added(beta1 version-no test source files) +======= +>>>>>>> 80a07dd... macro define in upgrade_proto +======= +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +======= +>>>>>>> triplet data generation and network update +======= +>>>>>>> restore +>>>>>>> 0a85215... triplet data generation and network update int seed_; shared_ptr > net_; }; @@ -1107,11 +1214,10 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); Blob* ip1_weights = this->net_->layers()[1]->blobs()[0].get(); Blob* ip2_weights = this->net_->layers()[2]->blobs()[0].get(); - // Check that data blobs of shared weights share the same location in memory. + // Check that data and diff blobs of shared weights share the same memory + // locations. EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - // Check that diff blobs of shared weights are at different locations in - // memory. (The diffs should be accumulated at update time.) - EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); + EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); this->net_->Forward(bottom); this->net_->Backward(); // Compute the expected update as the data minus the two diffs. @@ -1124,11 +1230,7 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { // Make sure the diffs are non-trivial. for (int i = 0; i < count; ++i) { EXPECT_NE(0, ip1_weights->cpu_diff()[i]); - EXPECT_NE(0, ip2_weights->cpu_diff()[i]); - EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]); } - caffe_axpy(count, Dtype(1), ip2_weights->cpu_diff(), - shared_params.mutable_cpu_diff()); caffe_axpy(count, Dtype(-1), shared_params.cpu_diff(), shared_params.mutable_cpu_data()); const Dtype* expected_updated_params = shared_params.cpu_data(); @@ -1165,8 +1267,8 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { EXPECT_NE(0, ip1_weights->cpu_diff()[i]); EXPECT_NE(0, ip2_weights->cpu_diff()[i]); EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]); - EXPECT_EQ(ip1_weights->cpu_diff()[i] + ip2_weights->cpu_diff()[i], - shared_params.cpu_diff()[i]); + EXPECT_FLOAT_EQ(ip1_weights->cpu_diff()[i] + ip2_weights->cpu_diff()[i], + shared_params.cpu_diff()[i]); } caffe_axpy(count, Dtype(-1), ip1_weights->cpu_diff(), unshared_params1.mutable_cpu_data()); @@ -1196,11 +1298,10 @@ TYPED_TEST(NetTest, TestSharedWeightsResume) { EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); Blob* ip1_weights = this->net_->layers()[1]->blobs()[0].get(); Blob* ip2_weights = this->net_->layers()[2]->blobs()[0].get(); - // Check that data blobs of shared weights share the same location in memory. + // Check that data and diff blobs of shared weights share the same memory + // locations. EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - // Check that diff blobs of shared weights are at different locations in - // memory. (The diffs should be accumulated at update time.) - EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); + EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); this->net_->ForwardBackward(bottom); this->net_->Update(); Blob shared_params; @@ -1223,14 +1324,13 @@ TYPED_TEST(NetTest, TestSharedWeightsResume) { ASSERT_FALSE(NULL == ip1_weights); ASSERT_FALSE(NULL == ip2_weights); EXPECT_NE(ip1_weights, ip2_weights); - // Check that data blobs of shared weights share the same location in memory. + // Check that data and diff blobs of shared weights share the same memory + // locations. EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); + EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); for (int i = 0; i < count; ++i) { EXPECT_FLOAT_EQ(shared_params.cpu_data()[i], ip1_weights->cpu_data()[i]); } - // Check that diff blobs of shared weights are at different locations in - // memory. (The diffs should be accumulated at update time.) - EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); } TYPED_TEST(NetTest, TestParamPropagateDown) { diff --git a/src/caffe/test/test_triplet_loss_layer.orig.orig b/src/caffe/test/test_triplet_loss_layer.orig.orig new file mode 100644 index 00000000000..4f1764d2e33 --- /dev/null +++ b/src/caffe/test/test_triplet_loss_layer.orig.orig @@ -0,0 +1,317 @@ +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class TripletLossLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + TripletLossLayerTest() +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c +======= +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig +<<<<<<< 80a07dd611590aa6547f3a3b2af35c791f32dfcf +>>>>>>> triplet data generation and network update + : blob_bottom_data_(new Blob(50, 1, 1, 1)), + blob_bottom_y_(new Blob(50, 1, 1, 1)), +======= +======= +>>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer + : blob_bottom_data_i_(new Blob(512, 2, 1, 1)), + blob_bottom_data_j_(new Blob(512, 2, 1, 1)), + blob_bottom_data_k_(new Blob(512, 2, 1, 1)), + blob_bottom_y_(new Blob(512, 1, 1, 1)), +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig +>>>>>>> suit for opencv3.0.0 +======= +>>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer + blob_top_loss_(new Blob()) { + // fill the values + FillerParameter filler_param; + filler_param.set_min(-1.0); + filler_param.set_max(1.0); // distances~=1.0 to test both sides of margin + UniformFiller filler(filler_param); +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c +======= +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig +<<<<<<< 80a07dd611590aa6547f3a3b2af35c791f32dfcf +>>>>>>> triplet data generation and network update + filler.Fill(this->blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_data_); +======= +======= +>>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer + filler.Fill(this->blob_bottom_data_i_); + blob_bottom_vec_.push_back(blob_bottom_data_i_); + filler.Fill(this->blob_bottom_data_j_); + blob_bottom_vec_.push_back(blob_bottom_data_j_); + filler.Fill(this->blob_bottom_data_k_); + blob_bottom_vec_.push_back(blob_bottom_data_k_); +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig +>>>>>>> suit for opencv3.0.0 +======= +>>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer + for (int i = 0; i < blob_bottom_y_->count(); ++i) { + blob_bottom_y_->mutable_cpu_data()[i] = caffe_rng_rand() % 2; // 0 or 1 + } + blob_bottom_vec_.push_back(blob_bottom_y_); + blob_top_vec_.push_back(blob_top_loss_); + } + virtual ~TripletLossLayerTest() { +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c +======= +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig +<<<<<<< 80a07dd611590aa6547f3a3b2af35c791f32dfcf +>>>>>>> triplet data generation and network update + delete blob_bottom_data_; +======= + delete blob_bottom_data_i_; + delete blob_bottom_data_j_; + delete blob_bottom_data_k_; +>>>>>>> suit for opencv3.0.0 +======= + delete blob_bottom_data_i_; + delete blob_bottom_data_j_; + delete blob_bottom_data_k_; +>>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer + delete blob_bottom_y_; + delete blob_top_loss_; + } + +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c +======= +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig +<<<<<<< 80a07dd611590aa6547f3a3b2af35c791f32dfcf +>>>>>>> triplet data generation and network update + Blob* const blob_bottom_data_; +======= + Blob* const blob_bottom_data_i_; + Blob* const blob_bottom_data_j_; + Blob* const blob_bottom_data_k_; +>>>>>>> suit for opencv3.0.0 +======= + Blob* const blob_bottom_data_i_; + Blob* const blob_bottom_data_j_; + Blob* const blob_bottom_data_k_; +>>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer + Blob* const blob_bottom_y_; + Blob* const blob_top_loss_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(TripletLossLayerTest, TestDtypesAndDevices); + +TYPED_TEST(TripletLossLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // manually compute to compare + const Dtype margin = layer_param.triplet_loss_param().margin(); +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c +======= +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig +<<<<<<< 80a07dd611590aa6547f3a3b2af35c791f32dfcf +>>>>>>> triplet data generation and network update + const Dtype losstype = 0;//layer_param.triplet_loss_param().losstype(); + const int num_triplets = 3; + const int num_set = this->blob_bottom_data_->num()/(2 + num_triplets); + const int channels = this->blob_bottom_data_->channels(); +======= + const int num = this->blob_bottom_data_i_->num(); + const int channels = this->blob_bottom_data_i_->channels(); +>>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer + Dtype loss(0); + for (int i = 0; i < num; ++i) { + Dtype dist_sq(0); +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig + Dtype dist_par(0); +======= + const int num = this->blob_bottom_data_i_->num(); + const int channels = this->blob_bottom_data_i_->channels(); + Dtype loss(0); + for (int i = 0; i < num; ++i) { + Dtype dist_sq(0); +>>>>>>> suit for opencv3.0.0 +======= +>>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer + for (int j = 0; j < channels; ++j) { + Dtype diff_pos = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq += diff_pos*diff_pos; +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c +======= +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig +<<<<<<< 80a07dd611590aa6547f3a3b2af35c791f32dfcf +>>>>>>> triplet data generation and network update + dist_sq += margin; +======= +>>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer + Dtype diff_neg = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq -= diff_neg*diff_neg; + } + loss += std::max(margin + dist_sq, 0.0); + /*if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs + loss += dist_sq; + } else { + Dtype dist = std::max(margin - sqrt(dist_sq), 0.0); + loss += dist*dist; + }*/ + } +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig + }*/ + loss /= static_cast(num_set) * Dtype(2); +======= + Dtype diff_neg = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq -= diff_neg*diff_neg; + } + loss += std::max(margin + dist_sq, 0.0); + /*if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs + loss += dist_sq; + } else { + Dtype dist = std::max(margin - sqrt(dist_sq), 0.0); + loss += dist*dist; + }*/ + } + loss /= static_cast(num) * Dtype(2); +>>>>>>> suit for opencv3.0.0 +======= + loss /= static_cast(num) * Dtype(2); +>>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer + EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); +} + +TYPED_TEST(TripletLossLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c +======= +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig +<<<<<<< 80a07dd611590aa6547f3a3b2af35c791f32dfcf +>>>>>>> triplet data generation and network update + // check the gradient for the first 5 bottom layers +======= + // check the gradient for the first two bottom layers +>>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 1); +} + +/*TYPED_TEST(TripletLossLayerTest, TestForwardLegacy) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_triplet_loss_param()->set_legacy_version(true); + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // manually compute to compare + const Dtype margin = layer_param.triplet_loss_param().margin(); + const int num = this->blob_bottom_data_i_->num(); + const int channels = this->blob_bottom_data_i_->channels(); + Dtype loss(0); + for (int i = 0; i < num; ++i) { + Dtype dist_sq(0); + for (int j = 0; j < channels; ++j) { + Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq += diff*diff; + } + if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs + loss += dist_sq; + } else { + loss += std::max(margin - dist_sq, Dtype(0.0)); + } + } + loss /= static_cast(num) * Dtype(2); + EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); +} +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig +======= + // check the gradient for the first two bottom layers + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 1); +} + +/*TYPED_TEST(TripletLossLayerTest, TestForwardLegacy) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_triplet_loss_param()->set_legacy_version(true); + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // manually compute to compare + const Dtype margin = layer_param.triplet_loss_param().margin(); + const int num = this->blob_bottom_data_i_->num(); + const int channels = this->blob_bottom_data_i_->channels(); + Dtype loss(0); + for (int i = 0; i < num; ++i) { + Dtype dist_sq(0); + for (int j = 0; j < channels; ++j) { + Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq += diff*diff; + } + if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs + loss += dist_sq; + } else { + loss += std::max(margin - dist_sq, Dtype(0.0)); + } + } + loss /= static_cast(num) * Dtype(2); + EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); +} +======= +>>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer + +TYPED_TEST(TripletLossLayerTest, TestGradientLegacy) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_triplet_loss_param()->set_legacy_version(true); + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + // check the gradient for the first two bottom layers + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 1); +}*/ + +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig +>>>>>>> suit for opencv3.0.0 +======= +>>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer +} // namespace caffe diff --git a/src/caffe/test/test_triplet_loss_layer~5308d9998ae0b1f97b7b99b33fac968421447f3a b/src/caffe/test/test_triplet_loss_layer~5308d9998ae0b1f97b7b99b33fac968421447f3a new file mode 100644 index 00000000000..6c25ce9bd4b --- /dev/null +++ b/src/caffe/test/test_triplet_loss_layer~5308d9998ae0b1f97b7b99b33fac968421447f3a @@ -0,0 +1,125 @@ +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class TripletLossLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + TripletLossLayerTest() + : blob_bottom_data_(new Blob(50, 1, 1, 1)), + blob_bottom_y_(new Blob(50, 1, 1, 1)), + blob_top_loss_(new Blob()) { + // fill the values + FillerParameter filler_param; + filler_param.set_min(-1.0); + filler_param.set_max(1.0); // distances~=1.0 to test both sides of margin + UniformFiller filler(filler_param); + filler.Fill(this->blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_data_); + for (int i = 0; i < blob_bottom_y_->count(); ++i) { + blob_bottom_y_->mutable_cpu_data()[i] = caffe_rng_rand() % 2; // 0 or 1 + } + blob_bottom_vec_.push_back(blob_bottom_y_); + blob_top_vec_.push_back(blob_top_loss_); + } + virtual ~TripletLossLayerTest() { + delete blob_bottom_data_; + delete blob_bottom_y_; + delete blob_top_loss_; + } + + Blob* const blob_bottom_data_; + Blob* const blob_bottom_y_; + Blob* const blob_top_loss_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(TripletLossLayerTest, TestDtypesAndDevices); + +TYPED_TEST(TripletLossLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // manually compute to compare + const Dtype margin = layer_param.triplet_loss_param().margin(); + const Dtype losstype = 0;//layer_param.triplet_loss_param().losstype(); + const int num_triplets = 3; + const int num_set = this->blob_bottom_data_->num()/(2 + num_triplets); + const int channels = this->blob_bottom_data_->channels(); + Dtype loss(0); + if (losstype == 0) { + for (int i = 0; i < num_set; ++i) { + Dtype dist_par(0); + for (int j = 0; j < channels; ++j) { + Dtype diff_pos = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - + this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+1)*channels+j]; + dist_par = diff_pos*diff_pos; + loss += dist_par; + } + for (int triplet = 0; triplet < num_triplets; ++triplet) { + Dtype dist_sq(0); + for (int j = 0; j < channels; ++j) { + Dtype diff_pos = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - + this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+1)*channels+j]; + dist_sq += diff_pos*diff_pos; + Dtype diff_neg = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - + this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+2+triplet)*channels+j]; + dist_sq -= diff_neg*diff_neg; + } + loss += std::max(margin + dist_sq, Dtype(0.0)); + } + } + } /*else { + for (int i = 0; i < num; ++i) { + Dtype dist_sq(0); + Dtype dist_par(0); + for (int j = 0; j < channels; ++j) { + Dtype diff_pos = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq += diff_pos*diff_pos; + dist_sq += margin; + Dtype diff_neg = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_k_->cpu_data()[i*channels+j]; + dist_sq = 1 - diff_neg*diff_neg/dist_sq; + Dtype diff_par = this->blob_bottom_data_l_->cpu_data()[i*channels+j] - + this->blob_bottom_data_m_->cpu_data()[i*channels+j]; + dist_par = diff_par*diff_par; + } + loss += std::max(dist_sq, Dtype(0.0)); + loss += dist_par; + } + }*/ + loss /= static_cast(num_set) * Dtype(2); + EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); +} + +TYPED_TEST(TripletLossLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + // check the gradient for the first 5 bottom layers + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); +} +} // namespace caffe From 9f2d6af2b31639307e1cce32ff0d4f404bba2f9e Mon Sep 17 00:00:00 2001 From: Wangyida Date: Tue, 16 Jun 2015 11:40:05 +0800 Subject: [PATCH 70/82] GPU version added --- Makefile | 10 +- Makefile.orig | 16 ++ .../layers/triplet_loss_layer.cpp.orig.orig | 59 +------ src/caffe/layers/triplet_loss_layer.cu.orig | 154 +++++++++++++----- 4 files changed, 143 insertions(+), 96 deletions(-) diff --git a/Makefile b/Makefile index 5fb6394e947..e445ebed568 100644 --- a/Makefile +++ b/Makefile @@ -242,7 +242,7 @@ ifeq ($(LINUX), 1) CXX ?= /usr/bin/g++ GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.) # older versions of gcc are too dumb to build boost with -Wuninitalized - ifeq ($(shell echo | awk '{exit $(GCCVERSION) < 4.6;}'), 1) + ifeq ($(shell echo $(GCCVERSION) \< 4.6 | bc), 1) WARNINGS += -Wno-uninitialized endif # boost::thread is reasonably called boost_thread (compare OS X) @@ -257,7 +257,7 @@ ifeq ($(OSX), 1) CXX := /usr/bin/clang++ ifneq ($(CPU_ONLY), 1) CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d') - ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1) + ifeq ($(shell echo $(CUDA_VERSION) \< 7.0 | bc), 1) CXXFLAGS += -stdlib=libstdc++ LINKFLAGS += -stdlib=libstdc++ endif @@ -412,13 +412,11 @@ endif ############################## # Define build targets ############################## -.PHONY: all lib test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ +.PHONY: all test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ py mat py$(PROJECT) mat$(PROJECT) proto runtest \ superclean supercleanlist supercleanfiles warn everything -all: lib tools examples - -lib: $(STATIC_NAME) $(DYNAMIC_NAME) +all: $(STATIC_NAME) $(DYNAMIC_NAME) tools examples everything: $(EVERYTHING_TARGETS) diff --git a/Makefile.orig b/Makefile.orig index 287fa4ea52f..7653a57b6e1 100644 --- a/Makefile.orig +++ b/Makefile.orig @@ -228,7 +228,11 @@ ifeq ($(LINUX), 1) CXX ?= /usr/bin/g++ GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.) # older versions of gcc are too dumb to build boost with -Wuninitalized +<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c ifeq ($(shell echo | awk '{exit $(GCCVERSION) < 4.6;}'), 1) +======= + ifeq ($(shell echo $(GCCVERSION) \< 4.6 | bc), 1) +>>>>>>> GPU version added WARNINGS += -Wno-uninitialized endif # boost::thread is reasonably called boost_thread (compare OS X) @@ -243,7 +247,11 @@ ifeq ($(OSX), 1) CXX := /usr/bin/clang++ ifneq ($(CPU_ONLY), 1) CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d') +<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1) +======= + ifeq ($(shell echo $(CUDA_VERSION) \< 7.0 | bc), 1) +>>>>>>> GPU version added CXXFLAGS += -stdlib=libstdc++ LINKFLAGS += -stdlib=libstdc++ endif @@ -386,6 +394,7 @@ endif ############################## # Define build targets ############################## +<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c .PHONY: all lib test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ py mat py$(PROJECT) mat$(PROJECT) proto runtest \ superclean supercleanlist supercleanfiles warn everything @@ -393,6 +402,13 @@ endif all: lib tools examples lib: $(STATIC_NAME) $(DYNAMIC_NAME) +======= +.PHONY: all test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ + py mat py$(PROJECT) mat$(PROJECT) proto runtest \ + superclean supercleanlist supercleanfiles warn everything + +all: $(STATIC_NAME) $(DYNAMIC_NAME) tools examples +>>>>>>> GPU version added everything: $(EVERYTHING_TARGETS) diff --git a/src/caffe/layers/triplet_loss_layer.cpp.orig.orig b/src/caffe/layers/triplet_loss_layer.cpp.orig.orig index 972e6eb988c..e65f1fdc29b 100644 --- a/src/caffe/layers/triplet_loss_layer.cpp.orig.orig +++ b/src/caffe/layers/triplet_loss_layer.cpp.orig.orig @@ -46,7 +46,10 @@ void TripletLossLayer::Forward_cpu( int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); Dtype loss(0.0); -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +<<<<<<< 46f6a4f657c9a5f4ffb83b7c8540b4fd2b8208bb +======= +<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c +>>>>>>> GPU version added int dim = bottom[0]->count()/bottom[0]->num(); int num_set = bottom[0]->num()/(2 + num_triplets); if (losstype == 0) { @@ -79,21 +82,23 @@ void TripletLossLayer::Forward_cpu( // loss accumulated accumulated by the triplet part loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); } +<<<<<<< 46f6a4f657c9a5f4ffb83b7c8540b4fd2b8208bb +======= ======= - // Loss component calculated from ab for (int i = 0; i < bottom[0]->num(); ++i) { dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); // ab is a similar pair - dist_sq_.mutable_cpu_data()[i] += dist_sq_pos.cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; // Loss component calculated from ac dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); // ac is a dissimilar pair dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); ->>>>>>> restore +>>>>>>> GPU version added +>>>>>>> GPU version added } loss = loss / static_cast(num_set) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; @@ -110,15 +115,9 @@ void TripletLossLayer::Forward_cpu( // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.cpu_data()[i]; -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; dist_sq_.mutable_cpu_data()[i] += margin; for (int triplet = 0; triplet < num_triplets; ++triplet) { -======= - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] += margin; ->>>>>>> triplet data generation and network update // Loss component calculated from negative part caffe_sub( dim, @@ -143,7 +142,6 @@ template void TripletLossLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.triplet_loss_param().margin(); -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); int dim = bottom[0]->count()/bottom[0]->num(); @@ -263,26 +261,11 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { // dissimilar pairs -======= - for (int i = 1; i < 3; ++i) { -// there must be further check to ensure the gradient calc - if (propagate_down[i]) { - const Dtype sign = (i == 2) ? 1 : -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(bottom[i]->num()); - int num = bottom[i]->num(); - int channels = bottom[i]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[i]->mutable_cpu_diff(); - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pairs ->>>>>>> restore caffe_cpu_axpby( dim, alpha, diff_neg.cpu_data() + (j*dim), Dtype(0.0), -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a bout + ((2 + num_triplets)*j + i)*dim); } else { caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); @@ -306,17 +289,10 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, diff_pos.cpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; dist_sq_.mutable_cpu_data()[j] += margin; // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { -======= - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; ->>>>>>> triplet data generation and network update // Loss component calculated from negative part caffe_sub( dim, @@ -364,17 +340,10 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, diff_pos.cpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; dist_sq_.mutable_cpu_data()[j] += margin; // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { -======= - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; ->>>>>>> triplet data generation and network update // Loss component calculated from negative part caffe_sub( dim, @@ -424,24 +393,14 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; // loss accumulated accumulated by the triplet part if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { -======= - bout + (j*channels)); - // dissimilar pairs ->>>>>>> restore caffe_cpu_axpby( dim, alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), diff_neg.cpu_data() + (j*dim), Dtype(0.0), -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a bout + ((2 + num_triplets)*j + i)*dim); } else { caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); -======= - bout + (j*channels)); - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); ->>>>>>> restore } } } diff --git a/src/caffe/layers/triplet_loss_layer.cu.orig b/src/caffe/layers/triplet_loss_layer.cu.orig index 717e0b87e8c..11ef82e17ac 100644 --- a/src/caffe/layers/triplet_loss_layer.cu.orig +++ b/src/caffe/layers/triplet_loss_layer.cu.orig @@ -2,9 +2,15 @@ #include #include "caffe/layer.hpp" +<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +======= +#include "caffe/vision_layers.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +>>>>>>> GPU version added namespace caffe { @@ -12,6 +18,7 @@ template void TripletLossLayer::Forward_gpu( const vector*>& bottom, const vector*>& top) { +<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c Dtype margin = this->layer_param_.triplet_loss_param().margin(); Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); @@ -30,11 +37,7 @@ void TripletLossLayer::Forward_gpu( dim, diff_pos.gpu_data() + i*dim, diff_pos.gpu_data() + i*dim, -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_pos.mutable_cpu_data() + i); -======= - dist_sq_pos.mutable_gpu_data() + i); ->>>>>>> debug GPU triplet loss codes for loss type 0 // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.gpu_data()[i]; @@ -52,11 +55,7 @@ void TripletLossLayer::Forward_gpu( dim, diff_neg.gpu_data() + i*dim, diff_neg.gpu_data() + i*dim, -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + i); -======= - dist_sq_neg.mutable_gpu_data() + i); ->>>>>>> debug GPU triplet loss codes for loss type 0 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; // loss accumulated accumulated by the triplet part @@ -77,11 +76,7 @@ void TripletLossLayer::Forward_gpu( dim, diff_pos.gpu_data() + i*dim, diff_pos.gpu_data() + i*dim, -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_pos.mutable_cpu_data() + i); -======= - dist_sq_pos.mutable_gpu_data() + i); ->>>>>>> debug GPU triplet loss codes for loss type 0 // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.gpu_data()[i]; @@ -98,11 +93,7 @@ void TripletLossLayer::Forward_gpu( dim, diff_neg.gpu_data() + i*dim, diff_neg.gpu_data() + i*dim, -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + i); -======= - dist_sq_neg.mutable_gpu_data() + i); ->>>>>>> debug GPU triplet loss codes for loss type 0 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] = 1 - \ dist_sq_neg.gpu_data()[i] / dist_sq_.mutable_gpu_data()[i]; @@ -113,12 +104,54 @@ void TripletLossLayer::Forward_gpu( loss = loss / static_cast(num_set) / Dtype(2); top[0]->mutable_gpu_data()[0] = loss; } +======= + int count = bottom[0]->count(); + caffe_gpu_sub( + count, + bottom[0]->gpu_data(), // a + bottom[1]->gpu_data(), // b + diff_pos.mutable_gpu_data()); // a_i-b_i + caffe_gpu_sub( + count, + bottom[0]->gpu_data(), // a + bottom[2]->gpu_data(), // c + diff_neg.mutable_gpu_data()); // a_i-c_i + caffe_gpu_powx( + count, + diff_pos.mutable_gpu_data(), // a_i-b_i + Dtype(2), + diff_sq_pos.mutable_gpu_data()); // (a_i-b_i)^2 + caffe_gpu_powx( + count, + diff_neg.mutable_gpu_data(), // a_i-c_i + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (a_i-c_i)^2 + const int channels = bottom[0]->channels(); + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype loss(0.0); + // Loss component calculated from ab + for (int i = 0; i < bottom[0]->num(); ++i) { + /*dist_sq_pos.mutable_gpu_data()[i] = caffe_gpu_dot(channels, + diff_pos.gpu_data() + (i*channels), diff_pos.gpu_data() + (i*channels));*/ + // ab is a similar pair + dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; + // Loss component calculated from ac + /*dist_sq_neg.mutable_gpu_data()[i] = caffe_gpu_dot(channels, + diff_neg.gpu_data() + (i*channels), diff_neg.gpu_data() + (i*channels));*/ + // ac is a dissimilar pair + dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; + loss += std::max(margin + dist_sq_.gpu_data()[i], Dtype(0.0)); + } + loss = loss / static_cast(bottom[0]->num()) / Dtype(2); + top[0]->mutable_gpu_data()[0] = loss; +>>>>>>> GPU version added } template void TripletLossLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.triplet_loss_param().margin(); +<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); int dim = bottom[0]->count()/bottom[0]->num(); @@ -150,11 +183,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); -======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> debug GPU triplet loss codes for loss type 0 // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; @@ -210,11 +239,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); -======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> debug GPU triplet loss codes for loss type 0 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -250,11 +275,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); -======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> debug GPU triplet loss codes for loss type 0 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -301,11 +322,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); -======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> debug GPU triplet loss codes for loss type 0 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -359,11 +376,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); -======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> debug GPU triplet loss codes for loss type 0 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -402,11 +415,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); -======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> debug GPU triplet loss codes for loss type 0 // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -420,11 +429,76 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bout + ((2 + num_triplets)*j + i)*dim); } else { caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); +======= +// there must be further check to ensure the gradient calc + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(bottom[0]->num()); + int num = bottom[0]->num(); + int channels = bottom[0]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + // similar pairs + caffe_gpu_axpby( + channels, + alpha, + diff_pos.gpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + // dissimilar pairs + caffe_gpu_axpby( + channels, + -alpha, + diff_neg.gpu_data() + (j*channels), + Dtype(1.0), + bout + (j*channels)); + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); + } + } + } + for (int i = 1; i < 3; ++i) { +// there must be further check to ensure the gradient calc + if (propagate_down[i]) { + const Dtype sign = (i == 1) ? -1 : 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(bottom[i]->num()); + int num = bottom[i]->num(); + int channels = bottom[i]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[i]->mutable_gpu_diff(); + if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + if (i == 1) { + // similar pairs + caffe_gpu_axpby( + channels, + alpha, + diff_pos.gpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } + else { + // dissimilar pairs + caffe_gpu_axpby( + channels, + alpha, + diff_neg.gpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); +>>>>>>> GPU version added } } } } +<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c } +======= +>>>>>>> GPU version added } INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer); From 93b9bfa24a36bd76c3207de2b834750f8db45d3a Mon Sep 17 00:00:00 2001 From: Wangyida Date: Tue, 30 Jun 2015 21:29:31 +0800 Subject: [PATCH 71/82] lfw test added in examples of triplet --- examples/siamese/convert_lfw_siamese_data.cpp | 121 +++++ examples/siamese/create_lfw_siamese.sh | 21 + examples/siamese/lfw_siamese.prototxt | 113 ++++ examples/siamese/lfw_siamese_solver.prototxt | 25 + .../siamese/lfw_siamese_train_test.prototxt | 349 ++++++++++++ examples/siamese/train_lfw_siamese.sh | 5 + examples/triplet/convert_lfw_triplet_data.cpp | 126 +++++ examples/triplet/create_lfw_triplet.sh | 21 + examples/triplet/lfw_triplet.prototxt | 113 ++++ examples/triplet/lfw_triplet_solver.prototxt | 25 + .../triplet/lfw_triplet_train_test.prototxt | 500 ++++++++++++++++++ examples/triplet/train_lfw_triplet.sh | 5 + 12 files changed, 1424 insertions(+) create mode 100644 examples/siamese/convert_lfw_siamese_data.cpp create mode 100755 examples/siamese/create_lfw_siamese.sh create mode 100644 examples/siamese/lfw_siamese.prototxt create mode 100644 examples/siamese/lfw_siamese_solver.prototxt create mode 100644 examples/siamese/lfw_siamese_train_test.prototxt create mode 100755 examples/siamese/train_lfw_siamese.sh create mode 100644 examples/triplet/convert_lfw_triplet_data.cpp create mode 100755 examples/triplet/create_lfw_triplet.sh create mode 100644 examples/triplet/lfw_triplet.prototxt create mode 100644 examples/triplet/lfw_triplet_solver.prototxt create mode 100644 examples/triplet/lfw_triplet_train_test.prototxt create mode 100755 examples/triplet/train_lfw_triplet.sh diff --git a/examples/siamese/convert_lfw_siamese_data.cpp b/examples/siamese/convert_lfw_siamese_data.cpp new file mode 100644 index 00000000000..fe134ca9b4e --- /dev/null +++ b/examples/siamese/convert_lfw_siamese_data.cpp @@ -0,0 +1,121 @@ +// +// This script converts the MNIST dataset to the leveldb format used +// by caffe to train siamese network. +// Usage: +// convert_mnist_data input_image_file input_label_file output_db_file +// The MNIST dataset could be downloaded at +// http://yann.lecun.com/exdb/mnist/ +#include // NOLINT(readability/streams) +#include + +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "stdint.h" + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" + +uint32_t swap_endian(uint32_t val) { + val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); + return (val << 16) | (val >> 16); +} + +void read_image(std::ifstream* image_file, std::ifstream* label_file, + uint32_t index, uint32_t rows, uint32_t cols, + char* pixels, char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index + 8); + label_file->read(label, 1); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename) { + // Open files + std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); + std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); + CHECK(image_file) << "Unable to open file " << image_filename; + CHECK(label_file) << "Unable to open file " << label_filename; + // Read the magic and the meta data + uint32_t magic; + uint32_t num_items; + uint32_t num_labels; + uint32_t rows; + uint32_t cols; + + image_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + label_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + image_file.read(reinterpret_cast(&num_items), 4); + num_items = swap_endian(num_items); + label_file.read(reinterpret_cast(&num_labels), 4); + num_labels = swap_endian(num_labels); + CHECK_EQ(num_items, num_labels); + image_file.read(reinterpret_cast(&rows), 4); + rows = swap_endian(rows); + image_file.read(reinterpret_cast(&cols), 4); + cols = swap_endian(cols); + + // Open leveldb + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = true; + options.error_if_exists = true; + leveldb::Status status = leveldb::DB::Open( + options, db_filename, &db); + CHECK(status.ok()) << "Failed to open leveldb " << db_filename + << ". Is it already existing?"; + + char label_i; + char label_j; + char* pixels = new char[2 * rows * cols]; + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + + caffe::Datum datum; + datum.set_channels(2); // one channel for each image in the pair + datum.set_height(rows); + datum.set_width(cols); + LOG(INFO) << "A total of " << num_items << " items."; + LOG(INFO) << "Rows: " << rows << " Cols: " << cols; + for (int itemid = 0; itemid < num_items; ++itemid) { + int i = caffe::caffe_rng_rand() % num_items; // pick a random pair + int j = caffe::caffe_rng_rand() % num_items; + read_image(&image_file, &label_file, i, rows, cols, + pixels, &label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels + (rows * cols), &label_j); + datum.set_data(pixels, 2*rows*cols); + if (label_i == label_j) { + datum.set_label(1); + } else { + datum.set_label(0); + } + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); + } + + delete db; + delete pixels; +} + +int main(int argc, char** argv) { + if (argc != 4) { + printf("This script converts the MNIST dataset to the leveldb format used\n" + "by caffe to train a siamese network.\n" + "Usage:\n" + " convert_mnist_data input_image_file input_label_file " + "output_db_file\n" + "The MNIST dataset could be downloaded at\n" + " http://yann.lecun.com/exdb/mnist/\n" + "You should gunzip them after downloading.\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3]); + } + return 0; +} diff --git a/examples/siamese/create_lfw_siamese.sh b/examples/siamese/create_lfw_siamese.sh new file mode 100755 index 00000000000..3790b9eb2a0 --- /dev/null +++ b/examples/siamese/create_lfw_siamese.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env sh +# This script converts the mnist data into leveldb format. + +EXAMPLES=./build/examples/siamese +DATA=./data/lfw + +echo "Creating leveldb..." + +rm -rf ./examples/siamese/lfw_siamese_train_leveldb +rm -rf ./examples/siamese/lfw_siamese_test_leveldb + +$EXAMPLES/convert_lfw_siamese_data.bin \ + $DATA/traindata \ + $DATA/trainlabel \ + ./examples/siamese/lfw_siamese_train_leveldb +$EXAMPLES/convert_mnist_siamese_data.bin \ + $DATA/testdata \ + $DATA/testlabel \ + ./examples/siamese/lfw_siamese_test_leveldb + +echo "Done." diff --git a/examples/siamese/lfw_siamese.prototxt b/examples/siamese/lfw_siamese.prototxt new file mode 100644 index 00000000000..106d9aa76f4 --- /dev/null +++ b/examples/siamese/lfw_siamese.prototxt @@ -0,0 +1,113 @@ +name: "lfw_siamese" +input: "data" +input_dim: 10000 +input_dim: 1 +input_dim: 150 +input_dim: 80 +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 500 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 10 + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 2 + } +} diff --git a/examples/siamese/lfw_siamese_solver.prototxt b/examples/siamese/lfw_siamese_solver.prototxt new file mode 100644 index 00000000000..2aaafb63c1f --- /dev/null +++ b/examples/siamese/lfw_siamese_solver.prototxt @@ -0,0 +1,25 @@ +# The train/test net protocol buffer definition +net: "examples/siamese/lfw_siamese_train_test.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of MNIST, we have test batch size 100 and 100 test iterations, +# covering the full 10,000 testing images. +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 500 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.01 +momentum: 0.9 +weight_decay: 0.0000 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +max_iter: 50000 +# snapshot intermediate results +snapshot: 5000 +snapshot_prefix: "examples/siamese/lfw_siamese" +# solver mode: CPU or GPU +solver_mode: CPU diff --git a/examples/siamese/lfw_siamese_train_test.prototxt b/examples/siamese/lfw_siamese_train_test.prototxt new file mode 100644 index 00000000000..049187bf3d4 --- /dev/null +++ b/examples/siamese/lfw_siamese_train_test.prototxt @@ -0,0 +1,349 @@ +name: "lfw_siamese_train_test" +layer { + name: "pair_data" + type: "Data" + top: "pair_data" + top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/siamese/lfw_siamese_train_leveldb" + batch_size: 64 + } +} +layer { + name: "pair_data" + type: "Data" + top: "pair_data" + top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/siamese/lfw_siamese_test_leveldb" + batch_size: 100 + } +} +layer { + name: "slice_pair" + type: "Slice" + bottom: "pair_data" + top: "data" + top: "data_p" + slice_param { + slice_dim: 1 + slice_point: 1 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_p" + type: "Convolution" + bottom: "data_p" + top: "conv1_p" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_p" + type: "Pooling" + bottom: "conv1_p" + top: "pool1_p" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_p" + type: "Convolution" + bottom: "pool1_p" + top: "conv2_p" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_p" + type: "Pooling" + bottom: "conv2_p" + top: "pool2_p" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_p" + type: "InnerProduct" + bottom: "pool2_p" + top: "ip1_p" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_p" + type: "ReLU" + bottom: "ip1_p" + top: "ip1_p" +} +layer { + name: "ip2_p" + type: "InnerProduct" + bottom: "ip1_p" + top: "ip2_p" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_p" + type: "InnerProduct" + bottom: "ip2_p" + top: "feat_p" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "loss" + type: "ContrastiveLoss" + bottom: "feat" + bottom: "feat_p" + bottom: "sim" + top: "loss" + contrastive_loss_param { + margin: 1 + } +} diff --git a/examples/siamese/train_lfw_siamese.sh b/examples/siamese/train_lfw_siamese.sh new file mode 100755 index 00000000000..0a879a65419 --- /dev/null +++ b/examples/siamese/train_lfw_siamese.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env sh + +TOOLS=./build/tools + +$TOOLS/caffe train --solver=examples/siamese/lfw_siamese_solver.prototxt diff --git a/examples/triplet/convert_lfw_triplet_data.cpp b/examples/triplet/convert_lfw_triplet_data.cpp new file mode 100644 index 00000000000..9f65fab76b4 --- /dev/null +++ b/examples/triplet/convert_lfw_triplet_data.cpp @@ -0,0 +1,126 @@ +// This script converts the MNIST dataset to the leveldb format used +// by caffe to train siamese network. +// Usage: +// convert_mnist_data input_image_file input_label_file output_db_file +// The MNIST dataset could be downloaded at +// http://yann.lecun.com/exdb/mnist/ +#include // NOLINT(readability/streams) +#include + +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "stdint.h" + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" + +uint32_t swap_endian(uint32_t val) { + val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); + return (val << 16) | (val >> 16); +} + +void read_image(std::ifstream* image_file, std::ifstream* label_file, + uint32_t index, uint32_t rows, uint32_t cols, + char* pixels, char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index + 8); + label_file->read(label, 1); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename) { + // Open files + std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); + std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); + CHECK(image_file) << "Unable to open file " << image_filename; + CHECK(label_file) << "Unable to open file " << label_filename; + // Read the magic and the meta data + uint32_t magic; + uint32_t num_items; + uint32_t num_labels; + uint32_t rows; + uint32_t cols; + + image_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + label_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + image_file.read(reinterpret_cast(&num_items), 4); + num_items = swap_endian(num_items); + label_file.read(reinterpret_cast(&num_labels), 4); + num_labels = swap_endian(num_labels); + CHECK_EQ(num_items, num_labels); + image_file.read(reinterpret_cast(&rows), 4); + rows = swap_endian(rows); + image_file.read(reinterpret_cast(&cols), 4); + cols = swap_endian(cols); + + // Open leveldb + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = true; + options.error_if_exists = true; + leveldb::Status status = leveldb::DB::Open( + options, db_filename, &db); + CHECK(status.ok()) << "Failed to open leveldb " << db_filename + << ". Is it already existing?"; + + char label_i; + char label_j; + char label_k; + char* pixels = new char[3 * rows * cols]; + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + + caffe::Datum datum; + datum.set_channels(3); // one channel for each image in the pair + datum.set_height(rows); + datum.set_width(cols); + LOG(INFO) << "A total of " << num_items << " items."; + LOG(INFO) << "Rows: " << rows << " Cols: " << cols; + for (int itemid = 0; itemid < num_items; ++itemid) { + int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups + int j = caffe::caffe_rng_rand() % num_items; + int k = caffe::caffe_rng_rand() % num_items; + read_image(&image_file, &label_file, i, rows, cols, + pixels, &label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels + (rows * cols), &label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels + (2 * rows * cols), &label_k); + + datum.set_data(pixels, 3*rows*cols); + if (label_i == label_j && label_i != label_k) { + datum.set_label(1); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); + } else { + itemid--; + datum.set_label(0); + } + } + + delete db; + delete pixels; +} + +int main(int argc, char** argv) { + if (argc != 4) { + printf("This script converts the MNIST dataset to the leveldb format used\n" + "by caffe to train a siamese network.\n" + "Usage:\n" + " convert_mnist_data input_image_file input_label_file " + "output_db_file\n" + "The MNIST dataset could be downloaded at\n" + " http://yann.lecun.com/exdb/mnist/\n" + "You should gunzip them after downloading.\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3]); + } + return 0; +} diff --git a/examples/triplet/create_lfw_triplet.sh b/examples/triplet/create_lfw_triplet.sh new file mode 100755 index 00000000000..382a9021f10 --- /dev/null +++ b/examples/triplet/create_lfw_triplet.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env sh +# This script converts the lfw data into leveldb format. + +EXAMPLES=./build/examples/triplet +DATA=./data/lfw + +echo "Creating leveldb..." + +rm -rf ./examples/triplet/lfw_triplet_train_leveldb +rm -rf ./examples/triplet/lfw_triplet_test_leveldb + +$EXAMPLES/convert_lfw_triplet_data.bin \ + $DATA/traindata \ + $DATA/trainlabel \ + ./examples/triplet/lfw_triplet_train_leveldb +$EXAMPLES/convert_lfw_triplet_data.bin \ + $DATA/testdata \ + $DATA/testlabel \ + ./examples/triplet/lfw_triplet_test_leveldb + +echo "Done." diff --git a/examples/triplet/lfw_triplet.prototxt b/examples/triplet/lfw_triplet.prototxt new file mode 100644 index 00000000000..9537d1feb8b --- /dev/null +++ b/examples/triplet/lfw_triplet.prototxt @@ -0,0 +1,113 @@ +name: "lfw_siamese" +input: "data" +input_dim: 10000 +input_dim: 1 +input_dim: 150 +input_dim: 130 +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 500 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 10 + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 2 + } +} diff --git a/examples/triplet/lfw_triplet_solver.prototxt b/examples/triplet/lfw_triplet_solver.prototxt new file mode 100644 index 00000000000..eb4c2c369e9 --- /dev/null +++ b/examples/triplet/lfw_triplet_solver.prototxt @@ -0,0 +1,25 @@ +# The train/test net protocol buffer definition +net: "examples/triplet/lfw_triplet_train_test.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of lfw, we have test batch size 100 and 100 test iterations, +# covering the full 10,000 testing images. +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 500 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.01 +momentum: 0.9 +weight_decay: 0.0000 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +max_iter: 50000 +# snapshot intermediate results +snapshot: 5000 +snapshot_prefix: "examples/triplet/lfw_triplet" +# solver mode: CPU or GPU +solver_mode: CPU diff --git a/examples/triplet/lfw_triplet_train_test.prototxt b/examples/triplet/lfw_triplet_train_test.prototxt new file mode 100644 index 00000000000..59ef26e90a4 --- /dev/null +++ b/examples/triplet/lfw_triplet_train_test.prototxt @@ -0,0 +1,500 @@ +name: "lfw_triplet_train_test" +layer { + name: "triplet_data" + type: "Data" + top: "triplet_data" + top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/lfw_triplet_train_leveldb" + batch_size: 64 + } +} +layer { + name: "triplet_data" + type: "Data" + top: "triplet_data" + top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/lfw_triplet_test_leveldb" + batch_size: 100 + } +} +layer { + name: "slice_triplet" + type: "Slice" + bottom: "triplet_data" + top: "data" + top: "data_true" + top: "data_false" + slice_param { + slice_dim: 1 + slice_point: 1 + slice_point: 2 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip2" + top: "feat" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_true" + type: "Convolution" + bottom: "data_true" + top: "conv1_true" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_true" + type: "Pooling" + bottom: "conv1_true" + top: "pool1_true" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_true" + type: "Convolution" + bottom: "pool1_true" + top: "conv2_true" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_true" + type: "Pooling" + bottom: "conv2_true" + top: "pool2_true" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_true" + type: "InnerProduct" + bottom: "pool2_true" + top: "ip1_true" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_true" + type: "ReLU" + bottom: "ip1_true" + top: "ip1_true" +} +layer { + name: "ip2_true" + type: "InnerProduct" + bottom: "ip1_true" + top: "ip2_true" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_true" + type: "InnerProduct" + bottom: "ip2_true" + top: "feat_true" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_false" + type: "Convolution" + bottom: "data_false" + top: "conv1_false" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_false" + type: "Pooling" + bottom: "conv1_false" + top: "pool1_false" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_false" + type: "Convolution" + bottom: "pool1_false" + top: "conv2_false" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_false" + type: "Pooling" + bottom: "conv2_false" + top: "pool2_false" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1_false" + type: "InnerProduct" + bottom: "pool2_false" + top: "ip1_false" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1_false" + type: "ReLU" + bottom: "ip1_false" + top: "ip1_false" +} +layer { + name: "ip2_false" + type: "InnerProduct" + bottom: "ip1_false" + top: "ip2_false" + param { + name: "ip2_w" + lr_mult: 1 + } + param { + name: "ip2_b" + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "feat_false" + type: "InnerProduct" + bottom: "ip2_false" + top: "feat_false" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "loss" + type: "TripletLoss" + bottom: "feat" + bottom: "feat_true" + bottom: "feat_false" + bottom: "sim" + top: "loss" + triplet_loss_param { + margin: 0.2 + } +} + diff --git a/examples/triplet/train_lfw_triplet.sh b/examples/triplet/train_lfw_triplet.sh new file mode 100755 index 00000000000..076738a5e63 --- /dev/null +++ b/examples/triplet/train_lfw_triplet.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env sh + +TOOLS=./build/tools + +$TOOLS/caffe train --solver=examples/triplet/lfw_triplet_solver.prototxt From ed4e4774a4efe0837a2affdb7decbdc3853eb25a Mon Sep 17 00:00:00 2001 From: Wangyida Date: Wed, 15 Jul 2015 10:42:04 +0800 Subject: [PATCH 72/82] add 3d network training param --- Makefile.orig.orig | 663 ++++++++++++ examples/triplet/3d_triplet.prototxt | 110 ++ examples/triplet/3d_triplet_solver.prototxt | 25 + .../triplet/3d_triplet_train_test.prototxt | 580 ++++++++++- ...t~165e1d595232eb2a908f62887bcf2d5e1743ed10 | 181 ++++ .../triplet/convert_3d_triplet_data.cpp.orig | 182 +++- .../triplet/convert_mnist_triplet_data.cpp | 16 +- examples/triplet/create_3d_triplet.sh.orig | 12 + .../triplet/mnist_triplet_train_test.prototxt | 500 --------- examples/triplet/train_3d_triplet.sh | 5 + include/caffe/data_layers.hpp.orig.orig | 68 +- include/caffe/filler.hpp.orig.orig | 336 ++++++ include/caffe/loss_layers.hpp.orig | 16 + include/caffe/python_layer.hpp.orig.orig | 74 ++ include/caffe/vision_layers.hpp.orig.orig | 555 ++++++++++ .../layers/base_data_layer.cpp.orig.orig | 179 ++++ src/caffe/layers/base_data_layer.cu.orig.orig | 60 ++ src/caffe/layers/concat_layer.cu.orig.orig | 101 ++ src/caffe/layers/data_layer.cpp.orig.orig | 234 +++++ .../layers/image_data_layer.cpp.orig.orig | 213 ++++ .../layers/triplet_loss_layer.cpp.orig.orig | 167 ++- .../layers/triplet_loss_layer.cu.orig.orig | 208 +++- src/caffe/net.cpp.orig.orig | 856 ++++++++++++++++ src/caffe/proto/caffe.proto.orig | 233 ++++- src/caffe/solver.cpp.orig.orig | 852 ++------------- .../generate_sample_data.py.orig.orig | 105 ++ .../test_gradient_based_solver.cpp.orig.orig | 967 ++++++++++++++++++ tools/extract_features.cpp | 189 ---- 28 files changed, 6108 insertions(+), 1579 deletions(-) create mode 100644 Makefile.orig.orig create mode 100644 examples/triplet/3d_triplet.prototxt create mode 100644 examples/triplet/3d_triplet_solver.prototxt create mode 100644 examples/triplet/3d_triplet_train_test.prototxt~165e1d595232eb2a908f62887bcf2d5e1743ed10 delete mode 100644 examples/triplet/mnist_triplet_train_test.prototxt create mode 100755 examples/triplet/train_3d_triplet.sh create mode 100644 include/caffe/filler.hpp.orig.orig create mode 100644 include/caffe/python_layer.hpp.orig.orig create mode 100644 include/caffe/vision_layers.hpp.orig.orig create mode 100644 src/caffe/layers/base_data_layer.cpp.orig.orig create mode 100644 src/caffe/layers/base_data_layer.cu.orig.orig create mode 100644 src/caffe/layers/concat_layer.cu.orig.orig create mode 100644 src/caffe/layers/data_layer.cpp.orig.orig create mode 100644 src/caffe/layers/image_data_layer.cpp.orig.orig create mode 100644 src/caffe/net.cpp.orig.orig create mode 100644 src/caffe/test/test_data/generate_sample_data.py.orig.orig create mode 100644 src/caffe/test/test_gradient_based_solver.cpp.orig.orig delete mode 100644 tools/extract_features.cpp diff --git a/Makefile.orig.orig b/Makefile.orig.orig new file mode 100644 index 00000000000..65078e2ef46 --- /dev/null +++ b/Makefile.orig.orig @@ -0,0 +1,663 @@ +PROJECT := caffe + +CONFIG_FILE := Makefile.config +# Explicitly check for the config file, otherwise make -k will proceed anyway. +ifeq ($(wildcard $(CONFIG_FILE)),) +$(error $(CONFIG_FILE) not found. See $(CONFIG_FILE).example.) +endif +include $(CONFIG_FILE) + +BUILD_DIR_LINK := $(BUILD_DIR) +ifeq ($(RELEASE_BUILD_DIR),) + RELEASE_BUILD_DIR := .$(BUILD_DIR)_release +endif +ifeq ($(DEBUG_BUILD_DIR),) + DEBUG_BUILD_DIR := .$(BUILD_DIR)_debug +endif + +DEBUG ?= 0 +ifeq ($(DEBUG), 1) + BUILD_DIR := $(DEBUG_BUILD_DIR) + OTHER_BUILD_DIR := $(RELEASE_BUILD_DIR) +else + BUILD_DIR := $(RELEASE_BUILD_DIR) + OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR) +endif + +# All of the directories containing code. +SRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \ + \( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print) + +# The target shared library name +LIB_BUILD_DIR := $(BUILD_DIR)/lib +STATIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).a +DYNAMIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).so + +############################## +# Get all source files +############################## +# CXX_SRCS are the source files excluding the test ones. +CXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp") +# CU_SRCS are the cuda source files +CU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu") +# TEST_SRCS are the test source files +TEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp +TEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp") +TEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS)) +TEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu") +GTEST_SRC := src/gtest/gtest-all.cpp +# TOOL_SRCS are the source files for the tool binaries +TOOL_SRCS := $(shell find tools -name "*.cpp") +# EXAMPLE_SRCS are the source files for the example binaries +EXAMPLE_SRCS := $(shell find examples -name "*.cpp") +# BUILD_INCLUDE_DIR contains any generated header files we want to include. +BUILD_INCLUDE_DIR := $(BUILD_DIR)/src +# PROTO_SRCS are the protocol buffer definitions +PROTO_SRC_DIR := src/$(PROJECT)/proto +PROTO_SRCS := $(wildcard $(PROTO_SRC_DIR)/*.proto) +# PROTO_BUILD_DIR will contain the .cc and obj files generated from +# PROTO_SRCS; PROTO_BUILD_INCLUDE_DIR will contain the .h header files +PROTO_BUILD_DIR := $(BUILD_DIR)/$(PROTO_SRC_DIR) +PROTO_BUILD_INCLUDE_DIR := $(BUILD_INCLUDE_DIR)/$(PROJECT)/proto +# NONGEN_CXX_SRCS includes all source/header files except those generated +# automatically (e.g., by proto). +NONGEN_CXX_SRCS := $(shell find \ + src/$(PROJECT) \ + include/$(PROJECT) \ + python/$(PROJECT) \ + matlab/+$(PROJECT)/private \ + examples \ + tools \ + -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh") +LINT_SCRIPT := scripts/cpp_lint.py +LINT_OUTPUT_DIR := $(BUILD_DIR)/.lint +LINT_EXT := lint.txt +LINT_OUTPUTS := $(addsuffix .$(LINT_EXT), $(addprefix $(LINT_OUTPUT_DIR)/, $(NONGEN_CXX_SRCS))) +EMPTY_LINT_REPORT := $(BUILD_DIR)/.$(LINT_EXT) +NONEMPTY_LINT_REPORT := $(BUILD_DIR)/$(LINT_EXT) +# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT) +PY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp +PY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so +PY$(PROJECT)_HXX := include/$(PROJECT)/python_layer.hpp +# MAT$(PROJECT)_SRC is the mex entrance point of matlab package for $(PROJECT) +MAT$(PROJECT)_SRC := matlab/+$(PROJECT)/private/$(PROJECT)_.cpp +ifneq ($(MATLAB_DIR),) + MAT_SO_EXT := $(shell $(MATLAB_DIR)/bin/mexext) +endif +MAT$(PROJECT)_SO := matlab/+$(PROJECT)/private/$(PROJECT)_.$(MAT_SO_EXT) + +############################## +# Derive generated files +############################## +# The generated files for protocol buffers +PROTO_GEN_HEADER_SRCS := $(addprefix $(PROTO_BUILD_DIR)/, \ + $(notdir ${PROTO_SRCS:.proto=.pb.h})) +PROTO_GEN_HEADER := $(addprefix $(PROTO_BUILD_INCLUDE_DIR)/, \ + $(notdir ${PROTO_SRCS:.proto=.pb.h})) +PROTO_GEN_CC := $(addprefix $(BUILD_DIR)/, ${PROTO_SRCS:.proto=.pb.cc}) +PY_PROTO_BUILD_DIR := python/$(PROJECT)/proto +PY_PROTO_INIT := python/$(PROJECT)/proto/__init__.py +PROTO_GEN_PY := $(foreach file,${PROTO_SRCS:.proto=_pb2.py}, \ + $(PY_PROTO_BUILD_DIR)/$(notdir $(file))) +# The objects corresponding to the source files +# These objects will be linked into the final shared library, so we +# exclude the tool, example, and test objects. +CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o}) +CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o}) +PROTO_OBJS := ${PROTO_GEN_CC:.cc=.o} +OBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS) +# tool, example, and test objects +TOOL_OBJS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o}) +TOOL_BUILD_DIR := $(BUILD_DIR)/tools +TEST_CXX_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT)/test +TEST_CU_BUILD_DIR := $(BUILD_DIR)/cuda/src/$(PROJECT)/test +TEST_CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o}) +TEST_CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o}) +TEST_OBJS := $(TEST_CXX_OBJS) $(TEST_CU_OBJS) +GTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o}) +EXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o}) +# Output files for automatic dependency generation +DEPS := ${CXX_OBJS:.o=.d} ${CU_OBJS:.o=.d} ${TEST_CXX_OBJS:.o=.d} \ + ${TEST_CU_OBJS:.o=.d} $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d} +# tool, example, and test bins +TOOL_BINS := ${TOOL_OBJS:.o=.bin} +EXAMPLE_BINS := ${EXAMPLE_OBJS:.o=.bin} +# symlinks to tool bins without the ".bin" extension +TOOL_BIN_LINKS := ${TOOL_BINS:.bin=} +# Put the test binaries in build/test for convenience. +TEST_BIN_DIR := $(BUILD_DIR)/test +TEST_CU_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ + $(foreach obj,$(TEST_CU_OBJS),$(basename $(notdir $(obj)))))) +TEST_CXX_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ + $(foreach obj,$(TEST_CXX_OBJS),$(basename $(notdir $(obj)))))) +TEST_BINS := $(TEST_CXX_BINS) $(TEST_CU_BINS) +# TEST_ALL_BIN is the test binary that links caffe dynamically. +TEST_ALL_BIN := $(TEST_BIN_DIR)/test_all.testbin + +############################## +# Derive compiler warning dump locations +############################## +WARNS_EXT := warnings.txt +CXX_WARNS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o.$(WARNS_EXT)}) +CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o.$(WARNS_EXT)}) +TOOL_WARNS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o.$(WARNS_EXT)}) +EXAMPLE_WARNS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o.$(WARNS_EXT)}) +TEST_WARNS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o.$(WARNS_EXT)}) +TEST_CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o.$(WARNS_EXT)}) +ALL_CXX_WARNS := $(CXX_WARNS) $(TOOL_WARNS) $(EXAMPLE_WARNS) $(TEST_WARNS) +ALL_CU_WARNS := $(CU_WARNS) $(TEST_CU_WARNS) +ALL_WARNS := $(ALL_CXX_WARNS) $(ALL_CU_WARNS) + +EMPTY_WARN_REPORT := $(BUILD_DIR)/.$(WARNS_EXT) +NONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT) + +############################## +# Derive include and lib directories +############################## +CUDA_INCLUDE_DIR := $(CUDA_DIR)/include + +CUDA_LIB_DIR := +# add /lib64 only if it exists +ifneq ("$(wildcard $(CUDA_DIR)/lib64)","") + CUDA_LIB_DIR += $(CUDA_DIR)/lib64 +endif +CUDA_LIB_DIR += $(CUDA_DIR)/lib + +INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include +ifneq ($(CPU_ONLY), 1) + INCLUDE_DIRS += $(CUDA_INCLUDE_DIR) + LIBRARY_DIRS += $(CUDA_LIB_DIR) + LIBRARIES := cudart cublas curand +endif +LIBRARIES += glog gflags protobuf leveldb snappy \ + lmdb boost_system hdf5_hl hdf5 m \ + opencv_core opencv_highgui opencv_imgproc #opencv_imgcodecs +PYTHON_LIBRARIES := boost_python python2.7 +WARNINGS := -Wall -Wno-sign-compare + +############################## +# Set build directories +############################## + +DISTRIBUTE_DIR ?= distribute +DISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib +DIST_ALIASES := dist +ifneq ($(strip $(DISTRIBUTE_DIR)),distribute) + DIST_ALIASES += distribute +endif + +ALL_BUILD_DIRS := $(sort $(BUILD_DIR) $(addprefix $(BUILD_DIR)/, $(SRC_DIRS)) \ + $(addprefix $(BUILD_DIR)/cuda/, $(SRC_DIRS)) \ + $(LIB_BUILD_DIR) $(TEST_BIN_DIR) $(PY_PROTO_BUILD_DIR) $(LINT_OUTPUT_DIR) \ + $(DISTRIBUTE_SUBDIRS) $(PROTO_BUILD_INCLUDE_DIR)) + +############################## +# Set directory for Doxygen-generated documentation +############################## +DOXYGEN_CONFIG_FILE ?= ./.Doxyfile +# should be the same as OUTPUT_DIRECTORY in the .Doxyfile +DOXYGEN_OUTPUT_DIR ?= ./doxygen +DOXYGEN_COMMAND ?= doxygen +# All the files that might have Doxygen documentation. +DOXYGEN_SOURCES := $(shell find \ + src/$(PROJECT) \ + include/$(PROJECT) \ + python/ \ + matlab/ \ + examples \ + tools \ + -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh" -or \ + -name "*.py" -or -name "*.m") +DOXYGEN_SOURCES += $(DOXYGEN_CONFIG_FILE) + + +############################## +# Configure build +############################## + +# Determine platform +UNAME := $(shell uname -s) +ifeq ($(UNAME), Linux) + LINUX := 1 +else ifeq ($(UNAME), Darwin) + OSX := 1 +endif + +# Linux +ifeq ($(LINUX), 1) + CXX ?= /usr/bin/g++ + GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.) + # older versions of gcc are too dumb to build boost with -Wuninitalized +<<<<<<< 0a8521567403409d70ece475762c203e38274530 + ifeq ($(shell echo | awk '{exit $(GCCVERSION) < 4.6;}'), 1) +======= +<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c + ifeq ($(shell echo | awk '{exit $(GCCVERSION) < 4.6;}'), 1) +======= + ifeq ($(shell echo $(GCCVERSION) \< 4.6 | bc), 1) +>>>>>>> GPU version added +>>>>>>> GPU version added + WARNINGS += -Wno-uninitialized + endif + # boost::thread is reasonably called boost_thread (compare OS X) + # We will also explicitly add stdc++ to the link target. + LIBRARIES += boost_thread stdc++ +endif + +# OS X: +# clang++ instead of g++ +# libstdc++ for NVCC compatibility on OS X >= 10.9 with CUDA < 7.0 +ifeq ($(OSX), 1) + CXX := /usr/bin/clang++ + ifneq ($(CPU_ONLY), 1) + CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d') +<<<<<<< 0a8521567403409d70ece475762c203e38274530 + ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1) +======= +<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c + ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1) +======= + ifeq ($(shell echo $(CUDA_VERSION) \< 7.0 | bc), 1) +>>>>>>> GPU version added +>>>>>>> GPU version added + CXXFLAGS += -stdlib=libstdc++ + LINKFLAGS += -stdlib=libstdc++ + endif + # clang throws this warning for cuda headers + WARNINGS += -Wno-unneeded-internal-declaration + endif + # gtest needs to use its own tuple to not conflict with clang + COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1 + # boost::thread is called boost_thread-mt to mark multithreading on OS X + LIBRARIES += boost_thread-mt + # we need to explicitly ask for the rpath to be obeyed + DYNAMIC_FLAGS := -install_name @rpath/libcaffe.so + ORIGIN := @loader_path +else + ORIGIN := \$$ORIGIN +endif + +# Custom compiler +ifdef CUSTOM_CXX + CXX := $(CUSTOM_CXX) +endif + +# Static linking +ifneq (,$(findstring clang++,$(CXX))) + STATIC_LINK_COMMAND := -Wl,-force_load $(STATIC_NAME) +else ifneq (,$(findstring g++,$(CXX))) + STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive +else + # The following line must not be indented with a tab, since we are not inside a target + $(error Cannot static link with the $(CXX) compiler) +endif + +# Debugging +ifeq ($(DEBUG), 1) + COMMON_FLAGS += -DDEBUG -g -O0 + NVCCFLAGS += -G +else + COMMON_FLAGS += -DNDEBUG -O2 +endif + +# cuDNN acceleration configuration. +ifeq ($(USE_CUDNN), 1) + LIBRARIES += cudnn + COMMON_FLAGS += -DUSE_CUDNN +endif + +# CPU-only configuration +ifeq ($(CPU_ONLY), 1) + OBJS := $(PROTO_OBJS) $(CXX_OBJS) + TEST_OBJS := $(TEST_CXX_OBJS) + TEST_BINS := $(TEST_CXX_BINS) + ALL_WARNS := $(ALL_CXX_WARNS) + TEST_FILTER := --gtest_filter="-*GPU*" + COMMON_FLAGS += -DCPU_ONLY +endif + +# Python layer support +ifeq ($(WITH_PYTHON_LAYER), 1) + COMMON_FLAGS += -DWITH_PYTHON_LAYER + LIBRARIES += $(PYTHON_LIBRARIES) +endif + +# BLAS configuration (default = ATLAS) +BLAS ?= atlas +ifeq ($(BLAS), mkl) + # MKL + LIBRARIES += mkl_rt + COMMON_FLAGS += -DUSE_MKL + MKL_DIR ?= /opt/intel/mkl + BLAS_INCLUDE ?= $(MKL_DIR)/include + BLAS_LIB ?= $(MKL_DIR)/lib $(MKL_DIR)/lib/intel64 +else ifeq ($(BLAS), open) + # OpenBLAS + LIBRARIES += openblas +else + # ATLAS + ifeq ($(LINUX), 1) + ifeq ($(BLAS), atlas) + # Linux simply has cblas and atlas + LIBRARIES += cblas atlas + endif + else ifeq ($(OSX), 1) + # OS X packages atlas as the vecLib framework + LIBRARIES += cblas + # 10.10 has accelerate while 10.9 has veclib + XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep -o 'version: 6') + ifneq (,$(findstring version: 6,$(XCODE_CLT_VER))) + BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/ + LDFLAGS += -framework Accelerate + else + BLAS_INCLUDE ?= /System/Library/Frameworks/vecLib.framework/Versions/Current/Headers/ + LDFLAGS += -framework vecLib + endif + endif +endif +INCLUDE_DIRS += $(BLAS_INCLUDE) +LIBRARY_DIRS += $(BLAS_LIB) + +LIBRARY_DIRS += $(LIB_BUILD_DIR) + +# Automatic dependency generation (nvcc is handled separately) +CXXFLAGS += -MMD -MP + +# Complete build flags. +COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir)) +CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) +NVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS) +# mex may invoke an older gcc that is too liberal with -Wuninitalized +MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized +LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) + +USE_PKG_CONFIG ?= 0 +ifeq ($(USE_PKG_CONFIG), 1) + PKG_CONFIG := $(shell pkg-config opencv --libs) +else + PKG_CONFIG := +endif +LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) $(PKG_CONFIG) \ + $(foreach library,$(LIBRARIES),-l$(library)) +PYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library)) + +# 'superclean' target recursively* deletes all files ending with an extension +# in $(SUPERCLEAN_EXTS) below. This may be useful if you've built older +# versions of Caffe that do not place all generated files in a location known +# to the 'clean' target. +# +# 'supercleanlist' will list the files to be deleted by make superclean. +# +# * Recursive with the exception that symbolic links are never followed, per the +# default behavior of 'find'. +SUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo + +# Set the sub-targets of the 'everything' target. +EVERYTHING_TARGETS := all py$(PROJECT) test warn lint +# Only build matcaffe as part of "everything" if MATLAB_DIR is specified. +ifneq ($(MATLAB_DIR),) + EVERYTHING_TARGETS += mat$(PROJECT) +endif + +############################## +# Define build targets +############################## +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +======= +<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c +>>>>>>> GPU version added +.PHONY: all lib test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ + py mat py$(PROJECT) mat$(PROJECT) proto runtest \ + superclean supercleanlist supercleanfiles warn everything + +all: lib tools examples + +lib: $(STATIC_NAME) $(DYNAMIC_NAME) +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +======= +======= +.PHONY: all test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ + py mat py$(PROJECT) mat$(PROJECT) proto runtest \ + superclean supercleanlist supercleanfiles warn everything + +all: $(STATIC_NAME) $(DYNAMIC_NAME) tools examples +>>>>>>> GPU version added +>>>>>>> GPU version added + +everything: $(EVERYTHING_TARGETS) + +linecount: + cloc --read-lang-def=$(PROJECT).cloc \ + src/$(PROJECT) include/$(PROJECT) tools examples \ + python matlab + +lint: $(EMPTY_LINT_REPORT) + +lintclean: + @ $(RM) -r $(LINT_OUTPUT_DIR) $(EMPTY_LINT_REPORT) $(NONEMPTY_LINT_REPORT) + +docs: $(DOXYGEN_OUTPUT_DIR) + @ cd ./docs ; ln -sfn ../$(DOXYGEN_OUTPUT_DIR)/html doxygen + +$(DOXYGEN_OUTPUT_DIR): $(DOXYGEN_CONFIG_FILE) $(DOXYGEN_SOURCES) + $(DOXYGEN_COMMAND) $(DOXYGEN_CONFIG_FILE) + +$(EMPTY_LINT_REPORT): $(LINT_OUTPUTS) | $(BUILD_DIR) + @ cat $(LINT_OUTPUTS) > $@ + @ if [ -s "$@" ]; then \ + cat $@; \ + mv $@ $(NONEMPTY_LINT_REPORT); \ + echo "Found one or more lint errors."; \ + exit 1; \ + fi; \ + $(RM) $(NONEMPTY_LINT_REPORT); \ + echo "No lint errors!"; + +$(LINT_OUTPUTS): $(LINT_OUTPUT_DIR)/%.lint.txt : % $(LINT_SCRIPT) | $(LINT_OUTPUT_DIR) + @ mkdir -p $(dir $@) + @ python $(LINT_SCRIPT) $< 2>&1 \ + | grep -v "^Done processing " \ + | grep -v "^Total errors found: 0" \ + > $@ \ + || true + +test: $(TEST_ALL_BIN) $(TEST_ALL_DYNLINK_BIN) $(TEST_BINS) + +tools: $(TOOL_BINS) $(TOOL_BIN_LINKS) + +examples: $(EXAMPLE_BINS) + +py$(PROJECT): py + +py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY) + +$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ $< + $(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \ + -o $@ $(LINKFLAGS) -l$(PROJECT) $(PYTHON_LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../../build/lib + +mat$(PROJECT): mat + +mat: $(MAT$(PROJECT)_SO) + +$(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME) + @ if [ -z "$(MATLAB_DIR)" ]; then \ + echo "MATLAB_DIR must be specified in $(CONFIG_FILE)" \ + "to build mat$(PROJECT)."; \ + exit 1; \ + fi + @ echo MEX $< + $(Q)$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) \ + CXX="$(CXX)" \ + CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \ + CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS)" -output $@ + @ if [ -f "$(PROJECT)_.d" ]; then \ + mv -f $(PROJECT)_.d $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}; \ + fi + +runtest: $(TEST_ALL_BIN) + $(TOOL_BUILD_DIR)/caffe + $(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle $(TEST_FILTER) + +pytest: py + cd python; python -m unittest discover -s caffe/test + +mattest: mat + cd matlab; $(MATLAB_DIR)/bin/matlab -nodisplay -r 'caffe.run_tests(), exit()' + +warn: $(EMPTY_WARN_REPORT) + +$(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR) + @ cat $(ALL_WARNS) > $@ + @ if [ -s "$@" ]; then \ + cat $@; \ + mv $@ $(NONEMPTY_WARN_REPORT); \ + echo "Compiler produced one or more warnings."; \ + exit 1; \ + fi; \ + $(RM) $(NONEMPTY_WARN_REPORT); \ + echo "No compiler warnings!"; + +$(ALL_WARNS): %.o.$(WARNS_EXT) : %.o + +$(BUILD_DIR_LINK): $(BUILD_DIR)/.linked + +# Create a target ".linked" in this BUILD_DIR to tell Make that the "build" link +# is currently correct, then delete the one in the OTHER_BUILD_DIR in case it +# exists and $(DEBUG) is toggled later. +$(BUILD_DIR)/.linked: + @ mkdir -p $(BUILD_DIR) + @ $(RM) $(OTHER_BUILD_DIR)/.linked + @ $(RM) -r $(BUILD_DIR_LINK) + @ ln -s $(BUILD_DIR) $(BUILD_DIR_LINK) + @ touch $@ + +$(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK) + @ mkdir -p $@ + +$(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) + @ echo LD -o $@ + $(Q)$(CXX) -shared -o $@ $(OBJS) $(LINKFLAGS) $(LDFLAGS) $(DYNAMIC_FLAGS) + +$(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) + @ echo AR -o $@ + $(Q)ar rcs $@ $(OBJS) + +$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS) + @ echo CXX $< + $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \ + | $(PROTO_BUILD_DIR) + @ echo CXX $< + $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS) + @ echo NVCC $< + $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \ + -odir $(@D) + $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \ + || (cat $@.$(WARNS_EXT); exit 1) + @ cat $@.$(WARNS_EXT) + +$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ + | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo CXX/LD -o $@ $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +$(TEST_CU_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CU_BUILD_DIR)/%.o \ + $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo LD $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +$(TEST_CXX_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CXX_BUILD_DIR)/%.o \ + $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + @ echo LD $< + $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ + -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib + +# Target for extension-less symlinks to tool binaries with extension '*.bin'. +$(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR) + @ $(RM) $@ + @ ln -s $(abspath $<) $@ + +$(TOOL_BINS): %.bin : %.o | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ + $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../lib + +$(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME) + @ echo CXX/LD -o $@ + $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ + -Wl,-rpath,$(ORIGIN)/../../lib + +proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER) + +$(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \ + $(PROTO_SRC_DIR)/%.proto | $(PROTO_BUILD_DIR) + @ echo PROTOC $< + $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --cpp_out=$(PROTO_BUILD_DIR) $< + +$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \ + $(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR) + @ echo PROTOC \(python\) $< + $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $< + +$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR) + touch $(PY_PROTO_INIT) + +clean: + @- $(RM) -rf $(ALL_BUILD_DIRS) + @- $(RM) -rf $(OTHER_BUILD_DIR) + @- $(RM) -rf $(BUILD_DIR_LINK) + @- $(RM) -rf $(DISTRIBUTE_DIR) + @- $(RM) $(PY$(PROJECT)_SO) + @- $(RM) $(MAT$(PROJECT)_SO) + +supercleanfiles: + $(eval SUPERCLEAN_FILES := $(strip \ + $(foreach ext,$(SUPERCLEAN_EXTS), $(shell find . -name '*$(ext)' \ + -not -path './data/*')))) + +supercleanlist: supercleanfiles + @ \ + if [ -z "$(SUPERCLEAN_FILES)" ]; then \ + echo "No generated files found."; \ + else \ + echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ + fi + +superclean: clean supercleanfiles + @ \ + if [ -z "$(SUPERCLEAN_FILES)" ]; then \ + echo "No generated files found."; \ + else \ + echo "Deleting the following generated files:"; \ + echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ + $(RM) $(SUPERCLEAN_FILES); \ + fi + +$(DIST_ALIASES): $(DISTRIBUTE_DIR) + +$(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS) + # add include + cp -r include $(DISTRIBUTE_DIR)/ + mkdir -p $(DISTRIBUTE_DIR)/include/caffe/proto + cp $(PROTO_GEN_HEADER_SRCS) $(DISTRIBUTE_DIR)/include/caffe/proto + # add tool and example binaries + cp $(TOOL_BINS) $(DISTRIBUTE_DIR)/bin + cp $(EXAMPLE_BINS) $(DISTRIBUTE_DIR)/bin + # add libraries + cp $(STATIC_NAME) $(DISTRIBUTE_DIR)/lib + cp $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib + # add python - it's not the standard way, indeed... + cp -r python $(DISTRIBUTE_DIR)/python + +-include $(DEPS) diff --git a/examples/triplet/3d_triplet.prototxt b/examples/triplet/3d_triplet.prototxt new file mode 100644 index 00000000000..6865d67a6c3 --- /dev/null +++ b/examples/triplet/3d_triplet.prototxt @@ -0,0 +1,110 @@ +name: "3d_triplet" +input: "data" +input_dim: 276 +input_dim: 1 +input_dim: 64 +input_dim: 64 +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 16 + kernel_size: 8 + stride: 1 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "pool1" + top: "pool1" +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 7 + kernel_size: 5 + stride: 1 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu2" + type: "ReLU" + bottom: "pool2" + top: "pool2" +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 256 + } +} +layer { + name: "relu3" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip1" + top: "feat" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 4 + } +} diff --git a/examples/triplet/3d_triplet_solver.prototxt b/examples/triplet/3d_triplet_solver.prototxt new file mode 100644 index 00000000000..d61a6c42f4b --- /dev/null +++ b/examples/triplet/3d_triplet_solver.prototxt @@ -0,0 +1,25 @@ +# The train/test net protocol buffer definition +net: "examples/triplet/3d_triplet_train_test.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of 3d database, we have test batch size 23 and 23 test iterations, +# covering the full 276 testing images. +test_iter: 23 +# Carry out testing every 500 training iterations. +test_interval: 23 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.01 +momentum: 0.9 +weight_decay: 0.0000 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +max_iter: 1000 +# snapshot intermediate results +snapshot: 200 +snapshot_prefix: "examples/triplet/3d_triplet" +# solver mode: CPU or GPU +solver_mode: CPU diff --git a/examples/triplet/3d_triplet_train_test.prototxt b/examples/triplet/3d_triplet_train_test.prototxt index 1ac185aa2cc..bff19047ab8 100644 --- a/examples/triplet/3d_triplet_train_test.prototxt +++ b/examples/triplet/3d_triplet_train_test.prototxt @@ -1,8 +1,8 @@ name: "3d_triplet_train_test" layer { - name: "data" + name: "triplet_data" type: "Data" - top: "data" + top: "triplet_data" top: "sim" include { phase: TRAIN @@ -12,13 +12,13 @@ layer { } data_param { source: "examples/triplet/3d_triplet_train_leveldb" - batch_size: 250 + batch_size: 23 } } layer { - name: "data" + name: "triplet_data" type: "Data" - top: "data" + top: "triplet_data" top: "sim" include { phase: TEST @@ -28,7 +28,24 @@ layer { } data_param { source: "examples/triplet/3d_triplet_test_leveldb" - batch_size: 250 + batch_size: 23 + } +} +layer { + name: "slice_triplet" + type: "Slice" + bottom: "triplet_data" + top: "data" + top: "data_true" + top: "data_false" + top: "data_p1" + top: "data_p2" + slice_param { + slice_dim: 1 + slice_point: 1 + slice_point: 2 + slice_point: 3 + slice_point: 4 } } layer { @@ -167,15 +184,562 @@ layer { } } } +layer { + name: "conv1_true" + type: "Convolution" + bottom: "data_true" + top: "conv1_true" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 16 + kernel_size: 8 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_true" + type: "Pooling" + bottom: "conv1_true" + top: "pool1_true" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu1_true" + type: "ReLU" + bottom: "pool1_true" + top: "pool1_true" +} +layer { + name: "conv2_true" + type: "Convolution" + bottom: "pool1_true" + top: "conv2_true" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 7 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_true" + type: "Pooling" + bottom: "conv2_true" + top: "pool2_true" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu2_true" + type: "ReLU" + bottom: "pool2_true" + top: "pool2_true" +} +layer { + name: "ip1_true" + type: "InnerProduct" + bottom: "pool2_true" + top: "ip1_true" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 256 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu3_true" + type: "ReLU" + bottom: "ip1_true" + top: "ip1_true" +} +layer { + name: "feat_true" + type: "InnerProduct" + bottom: "ip1_true" + top: "feat_true" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 4 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_false" + type: "Convolution" + bottom: "data_false" + top: "conv1_false" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 16 + kernel_size: 8 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_false" + type: "Pooling" + bottom: "conv1_false" + top: "pool1_false" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu1_false" + type: "ReLU" + bottom: "pool1_false" + top: "pool1_false" +} +layer { + name: "conv2_false" + type: "Convolution" + bottom: "pool1_false" + top: "conv2_false" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 7 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_false" + type: "Pooling" + bottom: "conv2_false" + top: "pool2_false" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu2_false" + type: "ReLU" + bottom: "pool2_false" + top: "pool2_false" +} +layer { + name: "ip1_false" + type: "InnerProduct" + bottom: "pool2_false" + top: "ip1_false" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 256 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu3_false" + type: "ReLU" + bottom: "ip1_false" + top: "ip1_false" +} +layer { + name: "feat_false" + type: "InnerProduct" + bottom: "ip1_false" + top: "feat_false" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 4 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_p1" + type: "Convolution" + bottom: "data_p1" + top: "conv1_p1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 16 + kernel_size: 8 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_p1" + type: "Pooling" + bottom: "conv1_p1" + top: "pool1_p1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu1_p1" + type: "ReLU" + bottom: "pool1_p1" + top: "pool1_p1" +} +layer { + name: "conv2_p1" + type: "Convolution" + bottom: "pool1_p1" + top: "conv2_p1" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 7 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_p1" + type: "Pooling" + bottom: "conv2_p1" + top: "pool2_p1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu2_p1" + type: "ReLU" + bottom: "pool2_p1" + top: "pool2_p1" +} +layer { + name: "ip1_p1" + type: "InnerProduct" + bottom: "pool2_p1" + top: "ip1_p1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 256 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu3_p1" + type: "ReLU" + bottom: "ip1_p1" + top: "ip1_p1" +} +layer { + name: "feat_p1" + type: "InnerProduct" + bottom: "ip1_p1" + top: "feat_p1" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 4 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "conv1_p2" + type: "Convolution" + bottom: "data_p2" + top: "conv1_p2" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 16 + kernel_size: 8 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1_p2" + type: "Pooling" + bottom: "conv1_p2" + top: "pool1_p2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu1_p2" + type: "ReLU" + bottom: "pool1_p2" + top: "pool1_p2" +} +layer { + name: "conv2_p2" + type: "Convolution" + bottom: "pool1_p2" + top: "conv2_p2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 7 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2_p2" + type: "Pooling" + bottom: "conv2_p2" + top: "pool2_p2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu2_p2" + type: "ReLU" + bottom: "pool2_p2" + top: "pool2_p2" +} +layer { + name: "ip1_p2" + type: "InnerProduct" + bottom: "pool2_p2" + top: "ip1_p2" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 256 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu3_p2" + type: "ReLU" + bottom: "ip1_p2" + top: "ip1_p2" +} +layer { + name: "feat_p2" + type: "InnerProduct" + bottom: "ip1_p2" + top: "feat_p2" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 4 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} layer { name: "loss" type: "TripletLoss" bottom: "feat" + bottom: "feat_true" + bottom: "feat_false" + bottom: "feat_p1" + bottom: "feat_p2" bottom: "sim" top: "loss" triplet_loss_param { margin: 1 - losstype: 0 - num_triplets: 3 } } + diff --git a/examples/triplet/3d_triplet_train_test.prototxt~165e1d595232eb2a908f62887bcf2d5e1743ed10 b/examples/triplet/3d_triplet_train_test.prototxt~165e1d595232eb2a908f62887bcf2d5e1743ed10 new file mode 100644 index 00000000000..1ac185aa2cc --- /dev/null +++ b/examples/triplet/3d_triplet_train_test.prototxt~165e1d595232eb2a908f62887bcf2d5e1743ed10 @@ -0,0 +1,181 @@ +name: "3d_triplet_train_test" +layer { + name: "data" + type: "Data" + top: "data" + top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/3d_triplet_train_leveldb" + batch_size: 250 + } +} +layer { + name: "data" + type: "Data" + top: "data" + top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/3d_triplet_test_leveldb" + batch_size: 250 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 16 + kernel_size: 8 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "pool1" + top: "pool1" +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 7 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu2" + type: "ReLU" + bottom: "pool2" + top: "pool2" +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 256 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu3" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip1" + top: "feat" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 4 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "loss" + type: "TripletLoss" + bottom: "feat" + bottom: "sim" + top: "loss" + triplet_loss_param { + margin: 1 + losstype: 0 + num_triplets: 3 + } +} diff --git a/examples/triplet/convert_3d_triplet_data.cpp.orig b/examples/triplet/convert_3d_triplet_data.cpp.orig index ce1981d90da..38d4a296d0d 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp.orig +++ b/examples/triplet/convert_3d_triplet_data.cpp.orig @@ -1,15 +1,35 @@ +<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 // Usage: // convert_3d_data input_image_file input_label_file output_db_file #include // NOLINT(readability/streams) -#include #include #include "caffe/proto/caffe.pb.h" #include "caffe/util/math_functions.hpp" #include "glog/logging.h" #include "google/protobuf/text_format.h" #include "leveldb/db.h" +#include "math.h" #include "stdint.h" +======= +// This script converts the MNIST dataset to the leveldb format used +// by caffe to train siamese network. +// Usage: +// convert_mnist_data input_image_file input_label_file output_db_file +// The MNIST dataset could be downloaded at +// http://yann.lecun.com/exdb/mnist/ +#include // NOLINT(readability/streams) +#include + +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "stdint.h" + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" + +>>>>>>> add 3d network training param uint32_t swap_endian(uint32_t val) { val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); return (val << 16) | (val >> 16); @@ -17,18 +37,41 @@ uint32_t swap_endian(uint32_t val) { void read_image(std::ifstream* image_file, std::ifstream* label_file, uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label_temp, signed char* label) { +<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 + char* pixels, char* label_temp, signed char* label, int rgb_use) { + if (rgb_use == 0) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); + } else { + image_file->seekg(3 * index * rows * cols + 16); + image_file->read(pixels, 3 * rows * cols); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); + } +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename, const char* class_number, + const char* rgb_use) { + int rgb_use1 = atoi(rgb_use); + int class_num = atoi(class_number); +======= + char* pixels, char* label) { image_file->seekg(index * rows * cols + 16); image_file->read(pixels, rows * cols); - label_file->seekg(index * 4 + 8); - label_file->read(label_temp, 4); - for (int i = 0; i < 4; i++) - *(label+i) = (signed char)*(label_temp+i); + label_file->seekg(index + 8); + label_file->read(label, 1); } void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename, const char* class_number) { - int class_num = atoi(class_number); + const char* db_filename) { +>>>>>>> add 3d network training param // Open files std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); @@ -46,7 +89,11 @@ void convert_dataset(const char* image_filename, const char* label_filename, CHECK_EQ(magic, 2051) << "Incorrect image file magic."; label_file.read(reinterpret_cast(&magic), 4); magic = swap_endian(magic); +<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 CHECK_EQ(magic, 2050) << "Incorrect label file magic."; +======= + CHECK_EQ(magic, 2049) << "Incorrect label file magic."; +>>>>>>> add 3d network training param image_file.read(reinterpret_cast(&num_items), 4); num_items = swap_endian(num_items); label_file.read(reinterpret_cast(&num_labels), 4); @@ -67,26 +114,47 @@ void convert_dataset(const char* image_filename, const char* label_filename, CHECK(status.ok()) << "Failed to open leveldb " << db_filename << ". Is it already existing?"; +<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 char* label_temp = new char[4]; // label for unsigned char* signed char* label_i = new signed char[4]; // label for triplet signed char* label_j = new signed char[4]; signed char* label_k = new signed char[4]; signed char* label_l = new signed char[4]; // label for pair wise signed char* label_m = new signed char[4]; - char* pixels1 = new char[rows * cols]; - char* pixels2 = new char[rows * cols]; - char* pixels3 = new char[rows * cols]; - char* pixels4 = new char[rows * cols]; - char* pixels5 = new char[rows * cols]; + int db_size; + if (rgb_use1 == 0) + db_size = rows * cols; + else + db_size = 3 * rows * cols; + char* pixels1 = new char[db_size]; + char* pixels2 = new char[db_size]; + char* pixels3 = new char[db_size]; + char* pixels4 = new char[db_size]; + char* pixels5 = new char[db_size]; const int kMaxKeyLength = 10; char key[kMaxKeyLength]; std::string value; caffe::Datum datum; datum.set_channels(1); +======= + char label_i; // label for triplet + char label_j; + char label_k; + char label_l; // label for pair wise + char label_m; + char* pixels = new char[5 * rows * cols]; + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + + caffe::Datum datum; + datum.set_channels(5); // one channel for each image in the triplet and pair +>>>>>>> add 3d network training param datum.set_height(rows); datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; +<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 int counter = 0; for (unsigned int times = 0; times < 5; ++times) { // iteration in the samples of all class @@ -100,15 +168,15 @@ void convert_dataset(const char* image_filename, const char* label_filename, int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups int m = caffe::caffe_rng_rand() % num_items; read_image(&image_file, &label_file, i, rows, cols, // read triplet - pixels1, label_temp, label_i); + pixels1, label_temp, label_i, rgb_use1); read_image(&image_file, &label_file, j, rows, cols, - pixels2, label_temp, label_j); + pixels2, label_temp, label_j, rgb_use1); read_image(&image_file, &label_file, k, rows, cols, - pixels3, label_temp, label_k); + pixels3, label_temp, label_k, rgb_use1); read_image(&image_file, &label_file, l, rows, cols, // read pair wise - pixels4, label_temp, label_l); + pixels4, label_temp, label_l, rgb_use1); read_image(&image_file, &label_file, m, rows, cols, - pixels5, label_temp, label_m); + pixels5, label_temp, label_m, rgb_use1); bool pair_pass = false; bool triplet1_pass = false; @@ -132,7 +200,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, float dist_ij = std::sqrt(ij_x + ij_y + ij_z); float dist_im = std::sqrt(im_x + im_y + im_z); - if (*label_i == *label_j && dist_ij < 100/2) + if (*label_i == *label_j && dist_ij < 100/2 && dist_ij != 0) pair_pass = true; if (pair_pass && (*label_i != *label_k)) triplet1_pass = true; @@ -144,31 +212,31 @@ void convert_dataset(const char* image_filename, const char* label_filename, triplet3_pass = true; if (pair_pass && triplet1_pass && triplet2_pass && triplet3_pass) { datum.set_data(pixels1, rows*cols); // set data - datum.set_label(int(*label_i)); + datum.set_label(static_cast(*label_i)); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", counter); db->Put(leveldb::WriteOptions(), std::string(key), value); counter++; datum.set_data(pixels2, rows*cols); // set data - datum.set_label(int(*label_j)); + datum.set_label(static_cast(*label_j)); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", counter); db->Put(leveldb::WriteOptions(), std::string(key), value); counter++; datum.set_data(pixels3, rows*cols); // set data - datum.set_label(int(*label_k)); + datum.set_label(static_cast(*label_k)); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", counter); db->Put(leveldb::WriteOptions(), std::string(key), value); counter++; datum.set_data(pixels4, rows*cols); // set data - datum.set_label(int(*label_l)); + datum.set_label(static_cast(*label_l)); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", counter); db->Put(leveldb::WriteOptions(), std::string(key), value); counter++; datum.set_data(pixels5, rows*cols); // set data - datum.set_label(int(*label_m)); + datum.set_label(static_cast(*label_m)); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", counter); db->Put(leveldb::WriteOptions(), std::string(key), value); @@ -176,9 +244,9 @@ void convert_dataset(const char* image_filename, const char* label_filename, } else { class_ind--; } - } // iteration in the samples of all class - } // iteration in the samples in one class - } // iteration in times + } // iteration in the samples of all class + } // iteration in the samples in one class + } // iteration in times delete db; delete pixels1; delete pixels2; @@ -188,15 +256,67 @@ void convert_dataset(const char* image_filename, const char* label_filename, } int main(int argc, char** argv) { - if (argc != 5) { - printf("This script converts the images dataset to the leveldb format used\n" + if (argc != 6) { + printf("This script converts the dataset to the leveldb format used\n" "by caffe to train a triplet network.\n" "Usage:\n" " convert_3d_data input_image_file input_label_file " - "output_db_file class_number\n"); + "output_db_file class_number rgb_use \n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3], argv[4], argv[5]); +======= + for (int itemid = 0; itemid < num_items; ++itemid) { + // pick triplet groups + int i = caffe::caffe_rng_rand() % num_items; + int j = caffe::caffe_rng_rand() % num_items; + int k = caffe::caffe_rng_rand() % num_items; + // pick pair wise groups + int l = caffe::caffe_rng_rand() % num_items; + int m = caffe::caffe_rng_rand() % num_items; + // read triplet groups + read_image(&image_file, &label_file, i, rows, cols, + pixels, &label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels + (rows * cols), &label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels + (2 * rows * cols), &label_k); + // read pair wise groups + read_image(&image_file, &label_file, l, rows, cols, + pixels + (3 * rows * cols), &label_l); + read_image(&image_file, &label_file, m, rows, cols, + pixels + (4 * rows * cols), &label_m); + + datum.set_data(pixels, 5*rows*cols); // set data + if ((label_i == label_j && label_i != label_k) && (label_l == label_m)) { + datum.set_label(1); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); + } else { + itemid--; + datum.set_label(0); + } + } + + delete db; + delete pixels; +} + +int main(int argc, char** argv) { + if (argc != 4) { + printf("This script converts the MNIST dataset to the leveldb format used\n" + "by caffe to train a siamese network.\n" + "Usage:\n" + " convert_mnist_data input_image_file input_label_file " + "output_db_file\n" + "The MNIST dataset could be downloaded at\n" + " http://yann.lecun.com/exdb/mnist/\n" + "You should gunzip them after downloading.\n"); } else { google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3], argv[4]); + convert_dataset(argv[1], argv[2], argv[3]); +>>>>>>> add 3d network training param } return 0; } diff --git a/examples/triplet/convert_mnist_triplet_data.cpp b/examples/triplet/convert_mnist_triplet_data.cpp index 0cbab642b7c..97e27bc4db8 100644 --- a/examples/triplet/convert_mnist_triplet_data.cpp +++ b/examples/triplet/convert_mnist_triplet_data.cpp @@ -72,7 +72,9 @@ void convert_dataset(const char* image_filename, const char* label_filename, char label_i; char label_j; char label_k; - char* pixels = new char[3 * rows * cols]; + char label_l; // label for pair wise + char label_m; + char* pixels = new char[5 * rows * cols]; const int kMaxKeyLength = 10; char key[kMaxKeyLength]; std::string value; @@ -84,15 +86,25 @@ void convert_dataset(const char* image_filename, const char* label_filename, LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; for (int itemid = 0; itemid < num_items; ++itemid) { - int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups + // pick triplet groups + int i = caffe::caffe_rng_rand() % num_items; int j = caffe::caffe_rng_rand() % num_items; int k = caffe::caffe_rng_rand() % num_items; + // pick pair wise groups + int l = caffe::caffe_rng_rand() % num_items; + int m = caffe::caffe_rng_rand() % num_items; + // read triplet groups read_image(&image_file, &label_file, i, rows, cols, pixels, &label_i); read_image(&image_file, &label_file, j, rows, cols, pixels + (rows * cols), &label_j); read_image(&image_file, &label_file, k, rows, cols, pixels + (2 * rows * cols), &label_k); + // pair wise groups + read_image(&image_file, &label_file, l, rows, cols, + pixels + (3 * rows * cols), &label_l); + read_image(&image_file, &label_file, m, rows, cols, + pixels + (4 * rows * cols), &label_m); datum.set_data(pixels, 3*rows*cols); if (label_i == label_j && label_i != label_k) { diff --git a/examples/triplet/create_3d_triplet.sh.orig b/examples/triplet/create_3d_triplet.sh.orig index 3cd8ee469ce..662684c6c87 100755 --- a/examples/triplet/create_3d_triplet.sh.orig +++ b/examples/triplet/create_3d_triplet.sh.orig @@ -12,10 +12,22 @@ rm -rf ./examples/triplet/3d_triplet_test_leveldb $EXAMPLES/convert_3d_triplet_data.bin \ $DATA/binary_image_train \ $DATA/binary_label_train \ +<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 + ./examples/triplet/3d_triplet_train_leveldb \ + 4 \ + 0 +$EXAMPLES/convert_3d_triplet_data.bin \ + $DATA/binary_image_test \ + $DATA/binary_label_test \ + ./examples/triplet/3d_triplet_test_leveldb \ + 4 \ + 0 +======= ./examples/triplet/3d_triplet_train_leveldb $EXAMPLES/convert_3d_triplet_data.bin \ $DATA/binary_image_test \ $DATA/binary_label_test \ ./examples/triplet/3d_triplet_test_leveldb +>>>>>>> add 3d network training param echo "Done." diff --git a/examples/triplet/mnist_triplet_train_test.prototxt b/examples/triplet/mnist_triplet_train_test.prototxt deleted file mode 100644 index da25dec31de..00000000000 --- a/examples/triplet/mnist_triplet_train_test.prototxt +++ /dev/null @@ -1,500 +0,0 @@ -name: "mnist_triplet_train_test" -layer { - name: "triplet_data" - type: "Data" - top: "triplet_data" - top: "sim" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/mnist_triplet_train_leveldb" - batch_size: 64 - } -} -layer { - name: "triplet_data" - type: "Data" - top: "triplet_data" - top: "sim" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/mnist_triplet_test_leveldb" - batch_size: 100 - } -} -layer { - name: "slice_triplet" - type: "Slice" - bottom: "triplet_data" - top: "data" - top: "data_true" - top: "data_false" - slice_param { - slice_dim: 1 - slice_point: 1 - slice_point: 2 - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_true" - type: "Convolution" - bottom: "data_true" - top: "conv1_true" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_true" - type: "Pooling" - bottom: "conv1_true" - top: "pool1_true" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_true" - type: "Convolution" - bottom: "pool1_true" - top: "conv2_true" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_true" - type: "Pooling" - bottom: "conv2_true" - top: "pool2_true" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_true" - type: "InnerProduct" - bottom: "pool2_true" - top: "ip1_true" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_true" - type: "ReLU" - bottom: "ip1_true" - top: "ip1_true" -} -layer { - name: "ip2_true" - type: "InnerProduct" - bottom: "ip1_true" - top: "ip2_true" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_true" - type: "InnerProduct" - bottom: "ip2_true" - top: "feat_true" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_false" - type: "Convolution" - bottom: "data_false" - top: "conv1_false" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_false" - type: "Pooling" - bottom: "conv1_false" - top: "pool1_false" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_false" - type: "Convolution" - bottom: "pool1_false" - top: "conv2_false" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_false" - type: "Pooling" - bottom: "conv2_false" - top: "pool2_false" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_false" - type: "InnerProduct" - bottom: "pool2_false" - top: "ip1_false" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_false" - type: "ReLU" - bottom: "ip1_false" - top: "ip1_false" -} -layer { - name: "ip2_false" - type: "InnerProduct" - bottom: "ip1_false" - top: "ip2_false" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_false" - type: "InnerProduct" - bottom: "ip2_false" - top: "feat_false" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "loss" - type: "TripletLoss" - bottom: "feat" - bottom: "feat_true" - bottom: "feat_false" - bottom: "sim" - top: "loss" - triplet_loss_param { - margin: 1 - } -} - diff --git a/examples/triplet/train_3d_triplet.sh b/examples/triplet/train_3d_triplet.sh new file mode 100755 index 00000000000..e421af54493 --- /dev/null +++ b/examples/triplet/train_3d_triplet.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env sh + +TOOLS=./build/tools + +$TOOLS/caffe train --solver=examples/triplet/3d_triplet_solver.prototxt diff --git a/include/caffe/data_layers.hpp.orig.orig b/include/caffe/data_layers.hpp.orig.orig index 43dca80bd73..da0118d2f9b 100644 --- a/include/caffe/data_layers.hpp.orig.orig +++ b/include/caffe/data_layers.hpp.orig.orig @@ -4,21 +4,22 @@ #include #include #include -<<<<<<< ca976a2b520c8024cc7f41a048d2c1abe73f6022 -======= -#include "boost/scoped_ptr.hpp" ->>>>>>> macro define in upgrade_proto #include "hdf5.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" +#include "caffe/data_reader.hpp" #include "caffe/data_transformer.hpp" #include "caffe/filler.hpp" #include "caffe/internal_thread.hpp" #include "caffe/layer.hpp" +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= #include "caffe/net.hpp" +>>>>>>> triplet data generation and network update #include "caffe/proto/caffe.pb.h" +#include "caffe/util/blocking_queue.hpp" #include "caffe/util/db.hpp" namespace caffe { @@ -32,12 +33,17 @@ template class BaseDataLayer : public Layer { public: explicit BaseDataLayer(const LayerParameter& param); +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= virtual ~BaseDataLayer() {} +>>>>>>> triplet data generation and network update // LayerSetUp: implements common data layer setup functionality, and calls // DataLayerSetUp to do special data layer setup for individual layer types. // This method may not be overridden except by the BasePrefetchingDataLayer. virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } virtual void DataLayerSetUp(const vector*>& bottom, const vector*>& top) {} // Data layers have no bottoms, so reshaping is trivial. @@ -56,14 +62,6 @@ class BaseDataLayer : public Layer { }; template -class BasePrefetchingDataLayer : - public BaseDataLayer, public InternalThread { - public: - explicit BasePrefetchingDataLayer(const LayerParameter& param) - : BaseDataLayer(param) {} -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= -======= class Batch { public: Blob data_, label_; @@ -73,18 +71,20 @@ template class BasePrefetchingDataLayer : public BaseDataLayer, public InternalThread { public: +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 +======= <<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> triplet data generation and network update explicit BasePrefetchingDataLayer(const LayerParameter& param); ======= explicit BasePrefetchingDataLayer(const LayerParameter& param) : BaseDataLayer(param) {} -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 >>>>>>> macro define in upgrade_proto ->>>>>>> add initiate class name of triplet loss layer ======= virtual ~BasePrefetchingDataLayer() {} >>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update >>>>>>> triplet data generation and network update // LayerSetUp: implements common data layer setup functionality, and calls // DataLayerSetUp to do special data layer setup for individual layer types. @@ -97,36 +97,38 @@ class BasePrefetchingDataLayer : virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); - virtual void CreatePrefetchThread(); - virtual void JoinPrefetchThread(); - // The thread's function - virtual void InternalThreadEntry() {} + // Prefetches batches (asynchronously if to GPU memory) + static const int PREFETCH_COUNT = 3; protected: - Blob prefetch_data_; - Blob prefetch_label_; + virtual void InternalThreadEntry(); + virtual void load_batch(Batch* batch) = 0; + + Batch prefetch_[PREFETCH_COUNT]; + BlockingQueue*> prefetch_free_; + BlockingQueue*> prefetch_full_; + Blob transformed_data_; }; template class DataLayer : public BasePrefetchingDataLayer { public: - explicit DataLayer(const LayerParameter& param) - : BasePrefetchingDataLayer(param) {} + explicit DataLayer(const LayerParameter& param); virtual ~DataLayer(); virtual void DataLayerSetUp(const vector*>& bottom, const vector*>& top); - + // DataLayer uses DataReader instead for sharing for parallelism + virtual inline bool ShareInParallel() const { return false; } virtual inline const char* type() const { return "Data"; } virtual inline int ExactNumBottomBlobs() const { return 0; } virtual inline int MinTopBlobs() const { return 1; } virtual inline int MaxTopBlobs() const { return 2; } protected: - virtual void InternalThreadEntry(); + virtual void load_batch(Batch* batch); - shared_ptr db_; - shared_ptr cursor_; + DataReader reader_; }; /** @@ -141,6 +143,8 @@ class DummyDataLayer : public Layer { : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } // Data layers have no bottoms, so reshaping is trivial. virtual void Reshape(const vector*>& bottom, const vector*>& top) {} @@ -174,6 +178,8 @@ class HDF5DataLayer : public Layer { virtual ~HDF5DataLayer(); virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } // Data layers have no bottoms, so reshaping is trivial. virtual void Reshape(const vector*>& bottom, const vector*>& top) {} @@ -215,6 +221,8 @@ class HDF5OutputLayer : public Layer { virtual ~HDF5OutputLayer(); virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } // Data layers have no bottoms, so reshaping is trivial. virtual void Reshape(const vector*>& bottom, const vector*>& top) {} @@ -265,7 +273,7 @@ class ImageDataLayer : public BasePrefetchingDataLayer { protected: shared_ptr prefetch_rng_; virtual void ShuffleImages(); - virtual void InternalThreadEntry(); + virtual void load_batch(Batch* batch); vector > lines_; int lines_id_; @@ -289,10 +297,8 @@ class MemoryDataLayer : public BaseDataLayer { virtual inline int ExactNumTopBlobs() const { return 2; } virtual void AddDatumVector(const vector& datum_vector); -#ifdef USE_OPENCV virtual void AddMatVector(const vector& mat_vector, const vector& labels); -#endif // USE_OPENCV // Reset should accept const pointers, but can't, because the memory // will be given to Blob, which is mutable @@ -339,7 +345,7 @@ class WindowDataLayer : public BasePrefetchingDataLayer { protected: virtual unsigned int PrefetchRand(); - virtual void InternalThreadEntry(); + virtual void load_batch(Batch* batch); shared_ptr prefetch_rng_; vector > > image_database_; diff --git a/include/caffe/filler.hpp.orig.orig b/include/caffe/filler.hpp.orig.orig new file mode 100644 index 00000000000..3b69e97f81c --- /dev/null +++ b/include/caffe/filler.hpp.orig.orig @@ -0,0 +1,336 @@ +// Fillers are random number generators that fills a blob using the specified +// algorithm. The expectation is that they are only going to be used during +// initialization time and will not involve any GPUs. + +#ifndef CAFFE_FILLER_HPP +#define CAFFE_FILLER_HPP + +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +/// @brief Fills a Blob with constant or randomly-generated data. +template +class Filler { + public: + explicit Filler(const FillerParameter& param) : filler_param_(param) {} + virtual ~Filler() {} + virtual void Fill(Blob* blob) = 0; + protected: + FillerParameter filler_param_; +}; // class Filler + + +/// @brief Fills a Blob with constant values @f$ x = 0 @f$. +template +class ConstantFiller : public Filler { + public: + explicit ConstantFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + Dtype* data = blob->mutable_cpu_data(); + const int count = blob->count(); + const Dtype value = this->filler_param_.value(); + CHECK(count); + for (int i = 0; i < count; ++i) { + data[i] = value; + } + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + +/// @brief Fills a Blob with uniformly distributed values @f$ x\sim U(a, b) @f$. +template +class UniformFiller : public Filler { + public: + explicit UniformFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + CHECK(blob->count()); + caffe_rng_uniform(blob->count(), Dtype(this->filler_param_.min()), + Dtype(this->filler_param_.max()), blob->mutable_cpu_data()); + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + +/// @brief Fills a Blob with Gaussian-distributed values @f$ x = a @f$. +template +class GaussianFiller : public Filler { + public: + explicit GaussianFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + Dtype* data = blob->mutable_cpu_data(); + CHECK(blob->count()); + caffe_rng_gaussian(blob->count(), Dtype(this->filler_param_.mean()), + Dtype(this->filler_param_.std()), blob->mutable_cpu_data()); + int sparse = this->filler_param_.sparse(); + CHECK_GE(sparse, -1); + if (sparse >= 0) { + // Sparse initialization is implemented for "weight" blobs; i.e. matrices. + // These have num == channels == 1; width is number of inputs; height is + // number of outputs. The 'sparse' variable specifies the mean number + // of non-zero input weights for a given output. + CHECK_GE(blob->num_axes(), 1); + const int num_outputs = blob->shape(0); + Dtype non_zero_probability = Dtype(sparse) / Dtype(num_outputs); + rand_vec_.reset(new SyncedMemory(blob->count() * sizeof(int))); + int* mask = reinterpret_cast(rand_vec_->mutable_cpu_data()); + caffe_rng_bernoulli(blob->count(), non_zero_probability, mask); + for (int i = 0; i < blob->count(); ++i) { + data[i] *= mask[i]; + } + } + } + + protected: + shared_ptr rand_vec_; +}; + +/** @brief Fills a Blob with values @f$ x \in [0, 1] @f$ + * such that @f$ \forall i \sum_j x_{ij} = 1 @f$. + */ +template +class PositiveUnitballFiller : public Filler { + public: + explicit PositiveUnitballFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + Dtype* data = blob->mutable_cpu_data(); + DCHECK(blob->count()); + caffe_rng_uniform(blob->count(), 0, 1, blob->mutable_cpu_data()); + // We expect the filler to not be called very frequently, so we will + // just use a simple implementation + int dim = blob->count() / blob->num(); + CHECK(dim); + for (int i = 0; i < blob->num(); ++i) { + Dtype sum = 0; + for (int j = 0; j < dim; ++j) { + sum += data[i * dim + j]; + } + for (int j = 0; j < dim; ++j) { + data[i * dim + j] /= sum; + } + } + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + +/** +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + * @brief Fills a Blob with values @f$ x \sim U(-a, +a) @f$ where @f$ a @f$ is + * set inversely proportional to number of incoming nodes, outgoing + * nodes, or their average. + * + * A Filler based on the paper [Bengio and Glorot 2010]: Understanding + * the difficulty of training deep feedforward neuralnetworks. + * + * It fills the incoming matrix by randomly sampling uniform data from [-scale, + * scale] where scale = sqrt(3 / n) where n is the fan_in, fan_out, or their + * average, depending on the variance_norm option. You should make sure the + * input blob has shape (num, a, b, c) where a * b * c = fan_in and num * b * c + * = fan_out. Note that this is currently not the case for inner product layers. +======= + * @brief Fills a Blob with values @f$ x \sim U(-a, +a) @f$ where @f$ a @f$ + * is set inversely proportional to the number of incoming nodes. + * + * A Filler based on the paper [Bengio and Glorot 2010]: Understanding + * the difficulty of training deep feedforward neuralnetworks, but does not + * use the fan_out value. + * + * It fills the incoming matrix by randomly sampling uniform data from + * [-scale, scale] where scale = sqrt(3 / fan_in) where fan_in is the number + * of input nodes. You should make sure the input blob has shape (num, a, b, c) + * where a * b * c = fan_in. +>>>>>>> triplet data generation and network update + * + * TODO(dox): make notation in above comment consistent with rest & use LaTeX. + */ +template +class XavierFiller : public Filler { + public: + explicit XavierFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + CHECK(blob->count()); + int fan_in = blob->count() / blob->num(); +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + int fan_out = blob->count() / blob->channels(); + Dtype n = fan_in; // default to fan_in + if (this->filler_param_.variance_norm() == + FillerParameter_VarianceNorm_AVERAGE) { + n = (fan_in + fan_out) / Dtype(2); + } else if (this->filler_param_.variance_norm() == + FillerParameter_VarianceNorm_FAN_OUT) { + n = fan_out; + } + Dtype scale = sqrt(Dtype(3) / n); +======= + Dtype scale = sqrt(Dtype(3) / fan_in); +>>>>>>> triplet data generation and network update + caffe_rng_uniform(blob->count(), -scale, scale, + blob->mutable_cpu_data()); + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +/** + * @brief Fills a Blob with values @f$ x \sim N(0, \sigma^2) @f$ where + * @f$ \sigma^2 @f$ is set inversely proportional to number of incoming + * nodes, outgoing nodes, or their average. + * + * A Filler based on the paper [He, Zhang, Ren and Sun 2015]: Specifically + * accounts for ReLU nonlinearities. + * + * Aside: for another perspective on the scaling factor, see the derivation of + * [Saxe, McClelland, and Ganguli 2013 (v3)]. + * + * It fills the incoming matrix by randomly sampling Gaussian data with std = + * sqrt(2 / n) where n is the fan_in, fan_out, or their average, depending on + * the variance_norm option. You should make sure the input blob has shape (num, + * a, b, c) where a * b * c = fan_in and num * b * c = fan_out. Note that this + * is currently not the case for inner product layers. + */ +template +class MSRAFiller : public Filler { + public: + explicit MSRAFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + CHECK(blob->count()); + int fan_in = blob->count() / blob->num(); + int fan_out = blob->count() / blob->channels(); + Dtype n = fan_in; // default to fan_in + if (this->filler_param_.variance_norm() == + FillerParameter_VarianceNorm_AVERAGE) { + n = (fan_in + fan_out) / Dtype(2); + } else if (this->filler_param_.variance_norm() == + FillerParameter_VarianceNorm_FAN_OUT) { + n = fan_out; + } + Dtype std = sqrt(Dtype(2) / n); + caffe_rng_gaussian(blob->count(), Dtype(0), std, + blob->mutable_cpu_data()); + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; +<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 +======= +>>>>>>> triplet data generation and network update + +/*! +@brief Fills a Blob with coefficients for bilinear interpolation. + +A common use case is with the DeconvolutionLayer acting as upsampling. +You can upsample a feature map with shape of (B, C, H, W) by any integer factor +using the following proto. +\code +layer { + name: "upsample", type: "Deconvolution" + bottom: "{{bottom_name}}" top: "{{top_name}}" + convolution_param { + kernel_size: {{2 * factor - factor % 2}} stride: {{factor}} + num_output: {{C}} group: {{C}} + pad: {{ceil((factor - 1) / 2.)}} + weight_filler: { type: "bilinear" } bias_term: false + } + param { lr_mult: 0 decay_mult: 0 } +} +\endcode +Please use this by replacing `{{}}` with your values. By specifying +`num_output: {{C}} group: {{C}}`, it behaves as +channel-wise convolution. The filter shape of this deconvolution layer will be +(C, 1, K, K) where K is `kernel_size`, and this filler will set a (K, K) +interpolation kernel for every channel of the filter identically. The resulting +shape of the top feature map will be (B, C, factor * H, factor * W). +Note that the learning rate and the +weight decay are set to 0 in order to keep coefficient values of bilinear +interpolation unchanged during training. If you apply this to an image, this +operation is equivalent to the following call in Python with Scikit.Image. +\code{.py} +out = skimage.transform.rescale(img, factor, mode='constant', cval=0) +\endcode + */ +template +class BilinearFiller : public Filler { + public: + explicit BilinearFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + CHECK_EQ(blob->num_axes(), 4) << "Blob must be 4 dim."; + CHECK_EQ(blob->width(), blob->height()) << "Filter must be square"; + Dtype* data = blob->mutable_cpu_data(); + int f = ceil(blob->width() / 2.); + float c = (2 * f - 1 - f % 2) / (2. * f); + for (int i = 0; i < blob->count(); ++i) { + float x = i % blob->width(); + float y = (i / blob->width()) % blob->height(); + data[i] = (1 - fabs(x / f - c)) * (1 - fabs(y / f - c)); + } + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +>>>>>>> macro define in upgrade_proto +======= +>>>>>>> triplet data generation and network update + +/** + * @brief Get a specific filler from the specification given in FillerParameter. + * + * Ideally this would be replaced by a factory pattern, but we will leave it + * this way for now. + */ +template +Filler* GetFiller(const FillerParameter& param) { + const std::string& type = param.type(); + if (type == "constant") { + return new ConstantFiller(param); + } else if (type == "gaussian") { + return new GaussianFiller(param); + } else if (type == "positive_unitball") { + return new PositiveUnitballFiller(param); + } else if (type == "uniform") { + return new UniformFiller(param); + } else if (type == "xavier") { + return new XavierFiller(param); +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + } else if (type == "msra") { + return new MSRAFiller(param); +<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 + } else if (type == "bilinear") { + return new BilinearFiller(param); +======= +>>>>>>> macro define in upgrade_proto +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + } else if (type == "msra") { + return new MSRAFiller(param); + } else if (type == "bilinear") { + return new BilinearFiller(param); +======= +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update + } else { + CHECK(false) << "Unknown filler name: " << param.type(); + } + return (Filler*)(NULL); +} + +} // namespace caffe + +#endif // CAFFE_FILLER_HPP_ diff --git a/include/caffe/loss_layers.hpp.orig b/include/caffe/loss_layers.hpp.orig index dbc25b1e7ab..b0c8469611a 100644 --- a/include/caffe/loss_layers.hpp.orig +++ b/include/caffe/loss_layers.hpp.orig @@ -224,10 +224,18 @@ class TripletLossLayer : public LossLayer { virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 + virtual inline int ExactNumBottomBlobs() const { return 2; } +======= + virtual inline int ExactNumBottomBlobs() const { return 4; } +>>>>>>> New triplet loss layer added(beta1 version-no test source files) +======= <<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 virtual inline int ExactNumBottomBlobs() const { return 2; } ======= virtual inline int ExactNumBottomBlobs() const { return 4; } +>>>>>>> triplet data generation and network update >>>>>>> triplet data generation and network update virtual inline const char* type() const { return "TripletLoss"; } /** @@ -235,10 +243,18 @@ class TripletLossLayer : public LossLayer { * to the first three inputs. */ virtual inline bool AllowForceBackward(const int bottom_index) const { +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 + return bottom_index != 1; +======= + return bottom_index != 3; +>>>>>>> New triplet loss layer added(beta1 version-no test source files) +======= <<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 return bottom_index != 1; ======= return bottom_index != 3; +>>>>>>> triplet data generation and network update >>>>>>> triplet data generation and network update } diff --git a/include/caffe/python_layer.hpp.orig.orig b/include/caffe/python_layer.hpp.orig.orig new file mode 100644 index 00000000000..11d27f9740c --- /dev/null +++ b/include/caffe/python_layer.hpp.orig.orig @@ -0,0 +1,74 @@ +#ifndef CAFFE_PYTHON_LAYER_HPP_ +#define CAFFE_PYTHON_LAYER_HPP_ + +#include +#include + +#include "caffe/layer.hpp" + +namespace bp = boost::python; + +namespace caffe { + +template +class PythonLayer : public Layer { + public: + PythonLayer(PyObject* self, const LayerParameter& param) + : Layer(param), self_(bp::handle<>(bp::borrowed(self))) { } + + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top) { + try { + self_.attr("setup")(bottom, top); + } catch (bp::error_already_set) { + PyErr_Print(); + throw; + } + } + + virtual void Reshape(const vector*>& bottom, + const vector*>& top) { + try { + self_.attr("reshape")(bottom, top); + } catch (bp::error_already_set) { + PyErr_Print(); + throw; + } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= + } + + virtual inline bool ShareInParallel() const { + return this->layer_param_.python_param().share_in_parallel(); +>>>>>>> triplet data generation and network update + } + + virtual inline const char* type() const { return "Python"; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top) { + try { + self_.attr("forward")(bottom, top); + } catch (bp::error_already_set) { + PyErr_Print(); + throw; + } + } + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + try { + self_.attr("backward")(top, propagate_down, bottom); + } catch (bp::error_already_set) { + PyErr_Print(); + throw; + } + } + + private: + bp::object self_; +}; + +} // namespace caffe + +#endif diff --git a/include/caffe/vision_layers.hpp.orig.orig b/include/caffe/vision_layers.hpp.orig.orig new file mode 100644 index 00000000000..800a71a26f9 --- /dev/null +++ b/include/caffe/vision_layers.hpp.orig.orig @@ -0,0 +1,555 @@ +#ifndef CAFFE_VISION_LAYERS_HPP_ +#define CAFFE_VISION_LAYERS_HPP_ + +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/common_layers.hpp" +#include "caffe/data_layers.hpp" +#include "caffe/layer.hpp" +#include "caffe/loss_layers.hpp" +#include "caffe/neuron_layers.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Abstract base class that factors out the BLAS code common to + * ConvolutionLayer and DeconvolutionLayer. + */ +template +class BaseConvolutionLayer : public Layer { + public: + explicit BaseConvolutionLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline int MinBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline bool EqualNumBottomTopBlobs() const { return true; } + + protected: + // Helper functions that abstract away the column buffer and gemm arguments. + // The last argument in forward_cpu_gemm is so that we can skip the im2col if + // we just called weight_cpu_gemm with the same input. + void forward_cpu_gemm(const Dtype* input, const Dtype* weights, + Dtype* output, bool skip_im2col = false); + void forward_cpu_bias(Dtype* output, const Dtype* bias); + void backward_cpu_gemm(const Dtype* input, const Dtype* weights, + Dtype* output); + void weight_cpu_gemm(const Dtype* input, const Dtype* output, Dtype* + weights); + void backward_cpu_bias(Dtype* bias, const Dtype* input); + +#ifndef CPU_ONLY + void forward_gpu_gemm(const Dtype* col_input, const Dtype* weights, + Dtype* output, bool skip_im2col = false); + void forward_gpu_bias(Dtype* output, const Dtype* bias); + void backward_gpu_gemm(const Dtype* input, const Dtype* weights, + Dtype* col_output); + void weight_gpu_gemm(const Dtype* col_input, const Dtype* output, Dtype* + weights); + void backward_gpu_bias(Dtype* bias, const Dtype* input); +#endif + + // reverse_dimensions should return true iff we are implementing deconv, so + // that conv helpers know which dimensions are which. + virtual bool reverse_dimensions() = 0; + // Compute height_out_ and width_out_ from other parameters. + virtual void compute_output_shape() = 0; + + int kernel_h_, kernel_w_; + int stride_h_, stride_w_; + int num_; + int channels_; + int pad_h_, pad_w_; + int height_, width_; + int group_; + int num_output_; + int height_out_, width_out_; + bool bias_term_; + bool is_1x1_; + + private: + // wrap im2col/col2im so we don't have to remember the (long) argument lists + inline void conv_im2col_cpu(const Dtype* data, Dtype* col_buff) { + im2col_cpu(data, conv_in_channels_, conv_in_height_, conv_in_width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_buff); + } + inline void conv_col2im_cpu(const Dtype* col_buff, Dtype* data) { + col2im_cpu(col_buff, conv_in_channels_, conv_in_height_, conv_in_width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, data); + } +#ifndef CPU_ONLY + inline void conv_im2col_gpu(const Dtype* data, Dtype* col_buff) { + im2col_gpu(data, conv_in_channels_, conv_in_height_, conv_in_width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_buff); + } + inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) { + col2im_gpu(col_buff, conv_in_channels_, conv_in_height_, conv_in_width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, data); + } +#endif + + int conv_out_channels_; + int conv_in_channels_; + int conv_out_spatial_dim_; + int conv_in_height_; + int conv_in_width_; + int kernel_dim_; + int weight_offset_; + int col_offset_; + int output_offset_; + + Blob col_buffer_; + Blob bias_multiplier_; +}; + +/** + * @brief Convolves the input image with a bank of learned filters, + * and (optionally) adds biases. + * + * Caffe convolves by reduction to matrix multiplication. This achieves + * high-throughput and generality of input and filter dimensions but comes at + * the cost of memory for matrices. This makes use of efficiency in BLAS. + * + * The input is "im2col" transformed to a channel K' x H x W data matrix + * for multiplication with the N x K' x H x W filter matrix to yield a + * N' x H x W output matrix that is then "col2im" restored. K' is the + * input channel * kernel height * kernel width dimension of the unrolled + * inputs so that the im2col matrix has a column for each input region to + * be filtered. col2im restores the output spatial structure by rolling up + * the output channel N' columns of the output matrix. + */ +template +class ConvolutionLayer : public BaseConvolutionLayer { + public: + /** + * @param param provides ConvolutionParameter convolution_param, + * with ConvolutionLayer options: + * - num_output. The number of filters. + * - kernel_size / kernel_h / kernel_w. The filter dimensions, given by + * kernel_size for square filters or kernel_h and kernel_w for rectangular + * filters. + * - stride / stride_h / stride_w (\b optional, default 1). The filter + * stride, given by stride_size for equal dimensions or stride_h and stride_w + * for different strides. By default the convolution is dense with stride 1. + * - pad / pad_h / pad_w (\b optional, default 0). The zero-padding for + * convolution, given by pad for equal dimensions or pad_h and pad_w for + * different padding. Input padding is computed implicitly instead of + * actually padding. + * - group (\b optional, default 1). The number of filter groups. Group + * convolution is a method for reducing parameterization by selectively + * connecting input and output channels. The input and output channel dimensions must be divisible + * by the number of groups. For group @f$ \geq 1 @f$, the + * convolutional filters' input and output channels are separated s.t. each + * group takes 1 / group of the input channels and makes 1 / group of the + * output channels. Concretely 4 input channels, 8 output channels, and + * 2 groups separate input channels 1-2 and output channels 1-4 into the + * first group and input channels 3-4 and output channels 5-8 into the second + * group. + * - bias_term (\b optional, default true). Whether to have a bias. + * - engine: convolution has CAFFE (matrix multiplication) and CUDNN (library + * kernels + stream parallelism) engines. + */ + explicit ConvolutionLayer(const LayerParameter& param) + : BaseConvolutionLayer(param) {} + + virtual inline const char* type() const { return "Convolution"; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual inline bool reverse_dimensions() { return false; } + virtual void compute_output_shape(); +}; + +/** + * @brief Convolve the input with a bank of learned filters, and (optionally) + * add biases, treating filters and convolution parameters in the + * opposite sense as ConvolutionLayer. + * + * ConvolutionLayer computes each output value by dotting an input window with + * a filter; DeconvolutionLayer multiplies each input value by a filter + * elementwise, and sums over the resulting output windows. In other words, + * DeconvolutionLayer is ConvolutionLayer with the forward and backward passes + * reversed. DeconvolutionLayer reuses ConvolutionParameter for its + * parameters, but they take the opposite sense as in ConvolutionLayer (so + * padding is removed from the output rather than added to the input, and + * stride results in upsampling rather than downsampling). + */ +template +class DeconvolutionLayer : public BaseConvolutionLayer { + public: + explicit DeconvolutionLayer(const LayerParameter& param) + : BaseConvolutionLayer(param) {} + + virtual inline const char* type() const { return "Deconvolution"; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual inline bool reverse_dimensions() { return true; } + virtual void compute_output_shape(); +}; + +#ifdef USE_CUDNN +/* + * @brief cuDNN implementation of ConvolutionLayer. + * Fallback to ConvolutionLayer for CPU mode. + * + * cuDNN accelerates convolution through forward kernels for filtering and bias + * plus backward kernels for the gradient w.r.t. the filters, biases, and + * inputs. Caffe + cuDNN further speeds up the computation through forward + * parallelism across groups and backward parallelism across gradients. + * + * The CUDNN engine does not have memory overhead for matrix buffers. For many + * input and filter regimes the CUDNN engine is faster than the CAFFE engine, + * but for fully-convolutional models and large inputs the CAFFE engine can be + * faster as long as it fits in memory. +*/ +template +class CuDNNConvolutionLayer : public ConvolutionLayer { + public: + explicit CuDNNConvolutionLayer(const LayerParameter& param) + : ConvolutionLayer(param), handles_setup_(false) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual ~CuDNNConvolutionLayer(); + + protected: + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool handles_setup_; + cudnnHandle_t* handle_; + cudaStream_t* stream_; + vector bottom_descs_, top_descs_; + cudnnTensorDescriptor_t bias_desc_; + cudnnFilterDescriptor_t filter_desc_; + vector conv_descs_; + int bottom_offset_, top_offset_, weight_offset_, bias_offset_; + size_t workspaceSizeInBytes; + void *workspace; +}; +#endif + +/** + * @brief A helper for image operations that rearranges image regions into + * column vectors. Used by ConvolutionLayer to perform convolution + * by matrix multiplication. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class Im2colLayer : public Layer { + public: + explicit Im2colLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Im2col"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int kernel_h_, kernel_w_; + int stride_h_, stride_w_; + int channels_; + int height_, width_; + int pad_h_, pad_w_; +}; + +// Forward declare PoolingLayer and SplitLayer for use in LRNLayer. +template class PoolingLayer; +template class SplitLayer; + +/** + * @brief Normalize the input in a local region across or within feature maps. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class LRNLayer : public Layer { + public: + explicit LRNLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "LRN"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + virtual void CrossChannelForward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void CrossChannelForward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void WithinChannelForward(const vector*>& bottom, + const vector*>& top); + virtual void CrossChannelBackward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void CrossChannelBackward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void WithinChannelBackward(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int size_; + int pre_pad_; + Dtype alpha_; + Dtype beta_; + Dtype k_; + int num_; + int channels_; + int height_; + int width_; + + // Fields used for normalization ACROSS_CHANNELS + // scale_ stores the intermediate summing results + Blob scale_; + + // Fields used for normalization WITHIN_CHANNEL + shared_ptr > split_layer_; + vector*> split_top_vec_; + shared_ptr > square_layer_; + Blob square_input_; + Blob square_output_; + vector*> square_bottom_vec_; + vector*> square_top_vec_; + shared_ptr > pool_layer_; + Blob pool_output_; + vector*> pool_top_vec_; + shared_ptr > power_layer_; + Blob power_output_; + vector*> power_top_vec_; + shared_ptr > product_layer_; + Blob product_input_; + vector*> product_bottom_vec_; +}; + + +/** + * @brief Pools the input image by taking the max, average, etc. within regions. + * + * TODO(dox): thorough documentation for Forward, Backward, and proto params. + */ +template +class PoolingLayer : public Layer { + public: + explicit PoolingLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Pooling"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + // MAX POOL layers can output an extra top blob for the mask; + // others can only output the pooled inputs. + virtual inline int MaxTopBlobs() const { + return (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_MAX) ? 2 : 1; + } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int kernel_h_, kernel_w_; + int stride_h_, stride_w_; + int pad_h_, pad_w_; + int channels_; + int height_, width_; + int pooled_height_, pooled_width_; + bool global_pooling_; + Blob rand_idx_; + Blob max_idx_; +}; + +#ifdef USE_CUDNN +/* + * @brief cuDNN implementation of PoolingLayer. + * Fallback to PoolingLayer for CPU mode. +*/ +template +class CuDNNPoolingLayer : public PoolingLayer { + public: + explicit CuDNNPoolingLayer(const LayerParameter& param) + : PoolingLayer(param), handles_setup_(false) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual ~CuDNNPoolingLayer(); + // Currently, cuDNN does not support the extra top blob. + virtual inline int MinTopBlobs() const { return -1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + bool handles_setup_; + cudnnHandle_t handle_; + cudnnTensorDescriptor_t bottom_desc_, top_desc_; + cudnnPoolingDescriptor_t pooling_desc_; + cudnnPoolingMode_t mode_; +}; +#endif + +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> triplet data generation and network update +/** + * @brief Does spatial pyramid pooling on the input image + * by taking the max, average, etc. within regions + * so that the result vector of different sized + * images are of the same size. + */ +template +class SPPLayer : public Layer { + public: + explicit SPPLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "SPP"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 + virtual inline int ExactNumTopBlobs() const { return 1; } +======= + virtual inline int MinTopBlobs() const { return 1; } + // MAX POOL layers can output an extra top blob for the mask; + // others can only output the pooled inputs. + virtual inline int MaxTopBlobs() const { + return (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_MAX) ? 2 : 1; + } +>>>>>>> macro define in upgrade_proto +======= + virtual inline int ExactNumTopBlobs() const { return 1; } +>>>>>>> triplet data generation and network update + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + // calculates the kernel and stride dimensions for the pooling layer, + // returns a correctly configured LayerParameter for a PoolingLayer + virtual LayerParameter GetPoolingParam(const int pyramid_level, + const int bottom_h, const int bottom_w, const SPPParameter spp_param); + + int pyramid_height_; + int bottom_h_, bottom_w_; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 +======= +>>>>>>> triplet data generation and network update + int num_; + int channels_; + int kernel_h_, kernel_w_; + int pad_h_, pad_w_; + bool reshaped_first_time_; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= + int channels_; + int kernel_h_, kernel_w_; + int pad_h_, pad_w_; +>>>>>>> macro define in upgrade_proto +======= +>>>>>>> triplet data generation and network update + + /// the internal Split layer that feeds the pooling layers + shared_ptr > split_layer_; + /// top vector holder used in call to the underlying SplitLayer::Forward + vector*> split_top_vec_; + /// bottom vector holder used in call to the underlying PoolingLayer::Forward + vector*>*> pooling_bottom_vecs_; + /// the internal Pooling layers of different kernel sizes + vector > > pooling_layers_; + /// top vector holders used in call to the underlying PoolingLayer::Forward + vector*>*> pooling_top_vecs_; + /// pooling_outputs stores the outputs of the PoolingLayers + vector*> pooling_outputs_; + /// the internal Flatten layers that the Pooling layers feed into + vector*> flatten_layers_; + /// top vector holders used in call to the underlying FlattenLayer::Forward + vector*>*> flatten_top_vecs_; + /// flatten_outputs stores the outputs of the FlattenLayers + vector*> flatten_outputs_; + /// bottom vector holder used in call to the underlying ConcatLayer::Forward + vector*> concat_bottom_vec_; + /// the internal Concat layers that the Flatten layers feed into + shared_ptr > concat_layer_; +}; + +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +======= +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update +} // namespace caffe + +#endif // CAFFE_VISION_LAYERS_HPP_ diff --git a/src/caffe/layers/base_data_layer.cpp.orig.orig b/src/caffe/layers/base_data_layer.cpp.orig.orig new file mode 100644 index 00000000000..72e4909aafc --- /dev/null +++ b/src/caffe/layers/base_data_layer.cpp.orig.orig @@ -0,0 +1,179 @@ +#include +#include +#include + +#include "caffe/data_layers.hpp" +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +#include "caffe/net.hpp" +>>>>>>> triplet data generation and network update +#include "caffe/util/io.hpp" + +namespace caffe { + +template +BaseDataLayer::BaseDataLayer(const LayerParameter& param) + : Layer(param), + transform_param_(param.transform_param()) { +} + +template +void BaseDataLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + if (top.size() == 1) { + output_labels_ = false; + } else { + output_labels_ = true; + } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + data_transformer_.reset( + new DataTransformer(transform_param_, this->phase_)); + data_transformer_->InitRand(); + // The subclasses should setup the size of bottom and top + DataLayerSetUp(bottom, top); +<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 +======= + // The subclasses should setup the size of bottom and top + DataLayerSetUp(bottom, top); + data_transformer_.reset( + new DataTransformer(transform_param_, this->phase_)); + data_transformer_->InitRand(); +>>>>>>> triplet data generation and network update +} + +template +BasePrefetchingDataLayer::BasePrefetchingDataLayer( + const LayerParameter& param) + : BaseDataLayer(param), + prefetch_free_(), prefetch_full_() { + for (int i = 0; i < PREFETCH_COUNT; ++i) { + prefetch_free_.push(&prefetch_[i]); + } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +>>>>>>> macro define in upgrade_proto +======= +>>>>>>> triplet data generation and network update +} + +template +void BasePrefetchingDataLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + BaseDataLayer::LayerSetUp(bottom, top); + // Before starting the prefetch thread, we make cpu_data and gpu_data + // calls so that the prefetch thread does not accidentally make simultaneous + // cudaMalloc calls when the main thread is running. In some GPUs this + // seems to cause failures if we do not so. + for (int i = 0; i < PREFETCH_COUNT; ++i) { + prefetch_[i].data_.mutable_cpu_data(); + if (this->output_labels_) { + prefetch_[i].label_.mutable_cpu_data(); + } + } +#ifndef CPU_ONLY + if (Caffe::mode() == Caffe::GPU) { + for (int i = 0; i < PREFETCH_COUNT; ++i) { + prefetch_[i].data_.mutable_gpu_data(); + if (this->output_labels_) { + prefetch_[i].label_.mutable_gpu_data(); + } + } + } +#endif + DLOG(INFO) << "Initializing prefetch"; + this->data_transformer_->InitRand(); + StartInternalThread(); + DLOG(INFO) << "Prefetch initialized."; +} + +template +void BasePrefetchingDataLayer::InternalThreadEntry() { +#ifndef CPU_ONLY + cudaStream_t stream; + if (Caffe::mode() == Caffe::GPU) { + CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); + } +#endif + + try { + while (!must_stop()) { + Batch* batch = prefetch_free_.pop(); + load_batch(batch); +#ifndef CPU_ONLY + if (Caffe::mode() == Caffe::GPU) { + batch->data_.data().get()->async_gpu_push(stream); + CUDA_CHECK(cudaStreamSynchronize(stream)); + } +#endif + prefetch_full_.push(batch); + } + } catch (boost::thread_interrupted&) { + // Interrupted exception is expected on shutdown + } +#ifndef CPU_ONLY + if (Caffe::mode() == Caffe::GPU) { + CUDA_CHECK(cudaStreamDestroy(stream)); + } +#endif +} + +template +void BasePrefetchingDataLayer::Forward_cpu( + const vector*>& bottom, const vector*>& top) { + Batch* batch = prefetch_full_.pop("Data layer prefetch queue empty"); + // Reshape to loaded data. +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 + top[0]->ReshapeLike(batch->data_); +======= + top[0]->ReshapeLike(prefetch_data_); +>>>>>>> macro define in upgrade_proto +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + top[0]->ReshapeLike(batch->data_); +======= + top[0]->Reshape(this->prefetch_data_.num(), this->prefetch_data_.channels(), + this->prefetch_data_.height(), this->prefetch_data_.width()); +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update + // Copy the data + caffe_copy(batch->data_.count(), batch->data_.cpu_data(), + top[0]->mutable_cpu_data()); + DLOG(INFO) << "Prefetch copied"; + if (this->output_labels_) { +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + // Reshape to loaded labels. +<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + // Reshape to loaded labels. +>>>>>>> triplet data generation and network update + top[1]->ReshapeLike(batch->label_); + // Copy the labels. + caffe_copy(batch->label_.count(), batch->label_.cpu_data(), + top[1]->mutable_cpu_data()); +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + top[1]->ReshapeLike(prefetch_label_); + // Copy the labels. + caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), + top[1]->mutable_cpu_data()); +>>>>>>> macro define in upgrade_proto +======= + caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), + top[1]->mutable_cpu_data()); +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update + } + + prefetch_free_.push(batch); +} + +#ifdef CPU_ONLY +STUB_GPU_FORWARD(BasePrefetchingDataLayer, Forward); +#endif + +INSTANTIATE_CLASS(BaseDataLayer); +INSTANTIATE_CLASS(BasePrefetchingDataLayer); + +} // namespace caffe diff --git a/src/caffe/layers/base_data_layer.cu.orig.orig b/src/caffe/layers/base_data_layer.cu.orig.orig new file mode 100644 index 00000000000..ef504680e82 --- /dev/null +++ b/src/caffe/layers/base_data_layer.cu.orig.orig @@ -0,0 +1,60 @@ +#include + +#include "caffe/data_layers.hpp" + +namespace caffe { + +template +void BasePrefetchingDataLayer::Forward_gpu( + const vector*>& bottom, const vector*>& top) { + Batch* batch = prefetch_full_.pop("Data layer prefetch queue empty"); + // Reshape to loaded data. +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 + top[0]->ReshapeLike(batch->data_); +======= + top[0]->ReshapeLike(this->prefetch_data_); +>>>>>>> macro define in upgrade_proto +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + top[0]->ReshapeLike(batch->data_); +======= + top[0]->Reshape(this->prefetch_data_.num(), this->prefetch_data_.channels(), + this->prefetch_data_.height(), this->prefetch_data_.width()); +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update + // Copy the data + caffe_copy(batch->data_.count(), batch->data_.gpu_data(), + top[0]->mutable_gpu_data()); + if (this->output_labels_) { +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + // Reshape to loaded labels. +<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + // Reshape to loaded labels. +>>>>>>> triplet data generation and network update + top[1]->ReshapeLike(batch->label_); + // Copy the labels. + caffe_copy(batch->label_.count(), batch->label_.gpu_data(), +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + top[1]->ReshapeLike(prefetch_label_); + // Copy the labels. + caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), +>>>>>>> macro define in upgrade_proto +======= + caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update + top[1]->mutable_gpu_data()); + } + // Ensure the copy is synchronous wrt the host, so that the next batch isn't + // copied in meanwhile. + CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); + prefetch_free_.push(batch); +} + +INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); + +} // namespace caffe diff --git a/src/caffe/layers/concat_layer.cu.orig.orig b/src/caffe/layers/concat_layer.cu.orig.orig new file mode 100644 index 00000000000..35acd1f833a --- /dev/null +++ b/src/caffe/layers/concat_layer.cu.orig.orig @@ -0,0 +1,101 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +__global__ void Concat(const int nthreads, const Dtype* in_data, + const bool forward, const int num_concats, const int concat_size, + const int top_concat_axis, const int bottom_concat_axis, + const int offset_concat_axis, Dtype* out_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int total_concat_size = concat_size * bottom_concat_axis; + const int concat_num = index / total_concat_size; + const int concat_index = index % total_concat_size; + const int top_index = concat_index + + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; + if (forward) { + out_data[top_index] = in_data[index]; + } else { + out_data[index] = in_data[top_index]; + } + } +} + +template +void ConcatLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + if (bottom.size() == 1) { return; } + Dtype* top_data = top[0]->mutable_gpu_data(); + int offset_concat_axis = 0; + const int top_concat_axis = top[0]->shape(concat_axis_); + const bool kForward = true; + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + const int bottom_concat_axis = bottom[i]->shape(concat_axis_); + const int bottom_concat_size = bottom_concat_axis * concat_input_size_; + const int nthreads = bottom_concat_size * num_concats_; + Concat // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, bottom_data, kForward, num_concats_, concat_input_size_, + top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); + offset_concat_axis += bottom_concat_axis; + } +} + +template +void ConcatLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (bottom.size() == 1) { return; } + const Dtype* top_diff = top[0]->gpu_diff(); + int offset_concat_axis = 0; + const int top_concat_axis = top[0]->shape(concat_axis_); + const bool kForward = false; + for (int i = 0; i < bottom.size(); ++i) { + const int bottom_concat_axis = bottom[i]->shape(concat_axis_); +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 +======= +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> triplet data generation and network update + if (propagate_down[i]) { + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + const int bottom_concat_size = bottom_concat_axis * concat_input_size_; + const int nthreads = bottom_concat_size * num_concats_; + Concat // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, top_diff, kForward, num_concats_, concat_input_size_, + top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +======= + for (int n = 0; n < num_concats_; ++n) { + caffe_copy(bottom_concat_axis * concat_input_size_, top_diff + + (n * top_concat_axis + offset_concat_axis) * concat_input_size_, + bottom_diff + n * bottom_concat_axis * concat_input_size_); +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update + } +======= + const int bottom_concat_size = bottom_concat_axis * concat_input_size_; + const int nthreads = bottom_concat_size * num_concats_; + Concat // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, top_diff, kForward, num_concats_, concat_input_size_, + top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +>>>>>>> macro define in upgrade_proto +======= +>>>>>>> restore +>>>>>>> triplet data generation and network update + offset_concat_axis += bottom_concat_axis; + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer); + +} // namespace caffe diff --git a/src/caffe/layers/data_layer.cpp.orig.orig b/src/caffe/layers/data_layer.cpp.orig.orig new file mode 100644 index 00000000000..9bfd1c95be7 --- /dev/null +++ b/src/caffe/layers/data_layer.cpp.orig.orig @@ -0,0 +1,234 @@ +#include + +#include + +#include +#include + +#include "caffe/common.hpp" +#include "caffe/data_layers.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/benchmark.hpp" +#include "caffe/util/io.hpp" + +namespace caffe { + +template +DataLayer::DataLayer(const LayerParameter& param) + : BasePrefetchingDataLayer(param), + reader_(param) { +} + +template +DataLayer::~DataLayer() { + this->StopInternalThread(); +} + +template +void DataLayer::DataLayerSetUp(const vector*>& bottom, + const vector*>& top) { + const int batch_size = this->layer_param_.data_param().batch_size(); + // Read a data point, and use it to initialize the top blob. + Datum& datum = *(reader_.full().peek()); + +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> triplet data generation and network update + // Use data_transformer to infer the expected blob shape from datum. + vector top_shape = this->data_transformer_->InferBlobShape(datum); + this->transformed_data_.Reshape(top_shape); + // Reshape top[0] and prefetch_data according to the batch_size. + top_shape[0] = batch_size; + top[0]->Reshape(top_shape); + for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + this->prefetch_[i].data_.Reshape(top_shape); +======= + // Check if we should randomly skip a few data points + if (this->layer_param_.data_param().rand_skip()) { + unsigned int skip = caffe_rng_rand() % + this->layer_param_.data_param().rand_skip(); + LOG(INFO) << "Skipping first " << skip << " data points."; + while (skip-- > 0) { + cursor_->Next(); + } + } + // Read a data point, and use it to initialize the top blob. + Datum datum; + datum.ParseFromString(cursor_->value()); + + bool force_color = this->layer_param_.data_param().force_encoded_color(); + if ((force_color && DecodeDatum(&datum, true)) || + DecodeDatumNative(&datum)) { + LOG(INFO) << "Decoding Datum"; + } + // image + int crop_size = this->layer_param_.transform_param().crop_size(); + if (crop_size > 0) { + top[0]->Reshape(this->layer_param_.data_param().batch_size(), + datum.channels(), crop_size, crop_size); + this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), + datum.channels(), crop_size, crop_size); + this->transformed_data_.Reshape(1, datum.channels(), crop_size, crop_size); + } else { + top[0]->Reshape( + this->layer_param_.data_param().batch_size(), datum.channels(), + datum.height(), datum.width()); + this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), + datum.channels(), datum.height(), datum.width()); + this->transformed_data_.Reshape(1, datum.channels(), + datum.height(), datum.width()); +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +>>>>>>> New triplet loss layer added(beta1 version-no test source files) +======= +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update + } + LOG(INFO) << "output data size: " << top[0]->num() << "," + << top[0]->channels() << "," << top[0]->height() << "," + << top[0]->width(); + // label + if (this->output_labels_) { + vector label_shape(1, batch_size); + top[1]->Reshape(label_shape); + for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + this->prefetch_[i].label_.Reshape(label_shape); + } + } +} + +// This function is called on prefetch thread +template +void DataLayer::load_batch(Batch* batch) { + CPUTimer batch_timer; + batch_timer.Start(); + double read_time = 0; + double trans_time = 0; + CPUTimer timer; + CHECK(batch->data_.count()); + CHECK(this->transformed_data_.count()); + + // Reshape on single input batches for inputs of varying dimension. + const int batch_size = this->layer_param_.data_param().batch_size(); +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> triplet data generation and network update + Datum& datum = *(reader_.full().peek()); + // Use data_transformer to infer the expected blob shape from datum. + vector top_shape = this->data_transformer_->InferBlobShape(datum); + this->transformed_data_.Reshape(top_shape); + // Reshape batch according to the batch_size. + top_shape[0] = batch_size; + batch->data_.Reshape(top_shape); +======= + const int crop_size = this->layer_param_.transform_param().crop_size(); + bool force_color = this->layer_param_.data_param().force_encoded_color(); + if (batch_size == 1 && crop_size == 0) { + Datum datum; + datum.ParseFromString(cursor_->value()); + if (datum.encoded()) { + if (force_color) { + DecodeDatum(&datum, true); + } else { + DecodeDatumNative(&datum); + } + } + this->prefetch_data_.Reshape(1, datum.channels(), + datum.height(), datum.width()); + this->transformed_data_.Reshape(1, datum.channels(), + datum.height(), datum.width()); + } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +>>>>>>> New triplet loss layer added(beta1 version-no test source files) +======= +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update + + Dtype* top_data = batch->data_.mutable_cpu_data(); + Dtype* top_label = NULL; // suppress warnings about uninitialized variables + + if (this->output_labels_) { + top_label = batch->label_.mutable_cpu_data(); + } + for (int item_id = 0; item_id < batch_size; ++item_id) { + timer.Start(); +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> triplet data generation and network update + // get a datum + Datum& datum = *(reader_.full().pop("Waiting for data")); +======= + // get a blob + Datum datum; + datum.ParseFromString(cursor_->value()); + + cv::Mat cv_img; + if (datum.encoded()) { + if (force_color) { + cv_img = DecodeDatumToCVMat(datum, true); + } else { + cv_img = DecodeDatumToCVMatNative(datum); + } + if (cv_img.channels() != this->transformed_data_.channels()) { + LOG(WARNING) << "Your dataset contains encoded images with mixed " + << "channel sizes. Consider adding a 'force_color' flag to the " + << "model definition, or rebuild your dataset using " + << "convert_imageset."; + } + } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +>>>>>>> New triplet loss layer added(beta1 version-no test source files) +======= +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update + read_time += timer.MicroSeconds(); + timer.Start(); + + // Apply data transformations (mirror, scale, crop...) + int offset = batch->data_.offset(item_id); + this->transformed_data_.set_cpu_data(top_data + offset); + if (datum.encoded()) { + this->data_transformer_->Transform(cv_img, &(this->transformed_data_)); + } else { + this->data_transformer_->Transform(datum, &(this->transformed_data_)); + } + if (this->output_labels_) { + top_label[item_id] = datum.label(); + } + trans_time += timer.MicroSeconds(); +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> triplet data generation and network update + + reader_.free().push(const_cast(&datum)); +======= + // go to the next iter + cursor_->Next(); + if (!cursor_->valid()) { + DLOG(INFO) << "Restarting data prefetching from start."; + cursor_->SeekToFirst(); + } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +>>>>>>> New triplet loss layer added(beta1 version-no test source files) +======= +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update + } + batch_timer.Stop(); + DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; + DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; + DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; +} + +INSTANTIATE_CLASS(DataLayer); +REGISTER_LAYER_CLASS(Data); + +} // namespace caffe diff --git a/src/caffe/layers/image_data_layer.cpp.orig.orig b/src/caffe/layers/image_data_layer.cpp.orig.orig new file mode 100644 index 00000000000..851fc635fb8 --- /dev/null +++ b/src/caffe/layers/image_data_layer.cpp.orig.orig @@ -0,0 +1,213 @@ +#include + +#include // NOLINT(readability/streams) +#include // NOLINT(readability/streams) +#include +#include +#include + +#include "caffe/data_layers.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/benchmark.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/rng.hpp" + +namespace caffe { + +template +ImageDataLayer::~ImageDataLayer() { + this->StopInternalThread(); +} + +template +void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, + const vector*>& top) { + const int new_height = this->layer_param_.image_data_param().new_height(); + const int new_width = this->layer_param_.image_data_param().new_width(); + const bool is_color = this->layer_param_.image_data_param().is_color(); + string root_folder = this->layer_param_.image_data_param().root_folder(); + + CHECK((new_height == 0 && new_width == 0) || + (new_height > 0 && new_width > 0)) << "Current implementation requires " + "new_height and new_width to be set at the same time."; + // Read the file with filenames and labels + const string& source = this->layer_param_.image_data_param().source(); + LOG(INFO) << "Opening file " << source; + std::ifstream infile(source.c_str()); + string filename; + int label; + while (infile >> filename >> label) { + lines_.push_back(std::make_pair(filename, label)); + } + + if (this->layer_param_.image_data_param().shuffle()) { + // randomly shuffle data + LOG(INFO) << "Shuffling data"; + const unsigned int prefetch_rng_seed = caffe_rng_rand(); + prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed)); + ShuffleImages(); + } + LOG(INFO) << "A total of " << lines_.size() << " images."; + + lines_id_ = 0; + // Check if we would need to randomly skip a few data points + if (this->layer_param_.image_data_param().rand_skip()) { + unsigned int skip = caffe_rng_rand() % + this->layer_param_.image_data_param().rand_skip(); + LOG(INFO) << "Skipping first " << skip << " data points."; + CHECK_GT(lines_.size(), skip) << "Not enough points to skip"; + lines_id_ = skip; + } + // Read an image, and use it to initialize the top blob. + cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, + new_height, new_width, is_color); +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> triplet data generation and network update + CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first; + // Use data_transformer to infer the expected blob shape from a cv_image. + vector top_shape = this->data_transformer_->InferBlobShape(cv_img); + this->transformed_data_.Reshape(top_shape); + // Reshape prefetch_data and top[0] according to the batch_size. + const int batch_size = this->layer_param_.image_data_param().batch_size(); + CHECK_GT(batch_size, 0) << "Positive batch size required"; + top_shape[0] = batch_size; + for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + this->prefetch_[i].data_.Reshape(top_shape); + } + top[0]->Reshape(top_shape); + +======= + const int channels = cv_img.channels(); + const int height = cv_img.rows; + const int width = cv_img.cols; + // image + const int crop_size = this->layer_param_.transform_param().crop_size(); + const int batch_size = this->layer_param_.image_data_param().batch_size(); + if (crop_size > 0) { + top[0]->Reshape(batch_size, channels, crop_size, crop_size); + this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size); + this->transformed_data_.Reshape(1, channels, crop_size, crop_size); + } else { + top[0]->Reshape(batch_size, channels, height, width); + this->prefetch_data_.Reshape(batch_size, channels, height, width); + this->transformed_data_.Reshape(1, channels, height, width); + } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +>>>>>>> New triplet loss layer added(beta1 version-no test source files) +======= +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update + LOG(INFO) << "output data size: " << top[0]->num() << "," + << top[0]->channels() << "," << top[0]->height() << "," + << top[0]->width(); + // label + vector label_shape(1, batch_size); + top[1]->Reshape(label_shape); + for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + this->prefetch_[i].label_.Reshape(label_shape); + } +} + +template +void ImageDataLayer::ShuffleImages() { + caffe::rng_t* prefetch_rng = + static_cast(prefetch_rng_->generator()); + shuffle(lines_.begin(), lines_.end(), prefetch_rng); +} + +// This function is called on prefetch thread +template +void ImageDataLayer::load_batch(Batch* batch) { + CPUTimer batch_timer; + batch_timer.Start(); + double read_time = 0; + double trans_time = 0; + CPUTimer timer; + CHECK(batch->data_.count()); + CHECK(this->transformed_data_.count()); + ImageDataParameter image_data_param = this->layer_param_.image_data_param(); + const int batch_size = image_data_param.batch_size(); + const int new_height = image_data_param.new_height(); + const int new_width = image_data_param.new_width(); + const int crop_size = this->layer_param_.transform_param().crop_size(); + const bool is_color = image_data_param.is_color(); + string root_folder = image_data_param.root_folder(); + +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> triplet data generation and network update + // Reshape according to the first image of each batch + // on single input batches allows for inputs of varying dimension. + cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, + new_height, new_width, is_color); + CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first; + // Use data_transformer to infer the expected blob shape from a cv_img. + vector top_shape = this->data_transformer_->InferBlobShape(cv_img); + this->transformed_data_.Reshape(top_shape); + // Reshape batch according to the batch_size. + top_shape[0] = batch_size; + batch->data_.Reshape(top_shape); +======= + // Reshape on single input batches for inputs of varying dimension. + if (batch_size == 1 && crop_size == 0 && new_height == 0 && new_width == 0) { + cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, + 0, 0, is_color); + this->prefetch_data_.Reshape(1, cv_img.channels(), + cv_img.rows, cv_img.cols); + this->transformed_data_.Reshape(1, cv_img.channels(), + cv_img.rows, cv_img.cols); + } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +>>>>>>> New triplet loss layer added(beta1 version-no test source files) +======= +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update + + Dtype* prefetch_data = batch->data_.mutable_cpu_data(); + Dtype* prefetch_label = batch->label_.mutable_cpu_data(); + + // datum scales + const int lines_size = lines_.size(); + for (int item_id = 0; item_id < batch_size; ++item_id) { + // get a blob + timer.Start(); + CHECK_GT(lines_size, lines_id_); + cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, + new_height, new_width, is_color); + CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first; + read_time += timer.MicroSeconds(); + timer.Start(); + // Apply transformations (mirror, crop...) to the image + int offset = batch->data_.offset(item_id); + this->transformed_data_.set_cpu_data(prefetch_data + offset); + this->data_transformer_->Transform(cv_img, &(this->transformed_data_)); + trans_time += timer.MicroSeconds(); + + prefetch_label[item_id] = lines_[lines_id_].second; + // go to the next iter + lines_id_++; + if (lines_id_ >= lines_size) { + // We have reached the end. Restart from the first. + DLOG(INFO) << "Restarting data prefetching from start."; + lines_id_ = 0; + if (this->layer_param_.image_data_param().shuffle()) { + ShuffleImages(); + } + } + } + batch_timer.Stop(); + DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; + DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; + DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; +} + +INSTANTIATE_CLASS(ImageDataLayer); +REGISTER_LAYER_CLASS(ImageData); + +} // namespace caffe diff --git a/src/caffe/layers/triplet_loss_layer.cpp.orig.orig b/src/caffe/layers/triplet_loss_layer.cpp.orig.orig index e65f1fdc29b..1bac0565530 100644 --- a/src/caffe/layers/triplet_loss_layer.cpp.orig.orig +++ b/src/caffe/layers/triplet_loss_layer.cpp.orig.orig @@ -46,10 +46,10 @@ void TripletLossLayer::Forward_cpu( int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); Dtype loss(0.0); -<<<<<<< 46f6a4f657c9a5f4ffb83b7c8540b4fd2b8208bb +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ======= -<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c ->>>>>>> GPU version added +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +>>>>>>> triplet data generation and network update int dim = bottom[0]->count()/bottom[0]->num(); int num_set = bottom[0]->num()/(2 + num_triplets); if (losstype == 0) { @@ -82,23 +82,24 @@ void TripletLossLayer::Forward_cpu( // loss accumulated accumulated by the triplet part loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); } -<<<<<<< 46f6a4f657c9a5f4ffb83b7c8540b4fd2b8208bb +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ======= ======= + // Loss component calculated from ab for (int i = 0; i < bottom[0]->num(); ++i) { dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); // ab is a similar pair - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += dist_sq_pos.cpu_data()[i]; // Loss component calculated from ac dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); // ac is a dissimilar pair dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); ->>>>>>> GPU version added ->>>>>>> GPU version added +>>>>>>> restore +>>>>>>> triplet data generation and network update } loss = loss / static_cast(num_set) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; @@ -115,9 +116,21 @@ void TripletLossLayer::Forward_cpu( // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.cpu_data()[i]; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += margin; +======= dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; dist_sq_.mutable_cpu_data()[i] += margin; for (int triplet = 0; triplet < num_triplets; ++triplet) { +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise +======= + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += margin; +>>>>>>> triplet data generation and network update // Loss component calculated from negative part caffe_sub( dim, @@ -142,6 +155,10 @@ template void TripletLossLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.triplet_loss_param().margin(); +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +>>>>>>> triplet data generation and network update Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); int dim = bottom[0]->count()/bottom[0]->num(); @@ -193,6 +210,11 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, Dtype(1.0), bout + ((2 + num_triplets)*j + i)*dim); } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 +======= +======= +>>>>>>> triplet data generation and network update } } } @@ -235,18 +257,83 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, Dtype(1.0), bout + ((2 + num_triplets)*j + i)*dim); } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise +======= +>>>>>>> triplet data generation and network update } } } } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 + // BP for feat2(extracted from the closest sample) + for (int i = 1; i < 2; ++i) { + if (propagate_down[0]) { + const Dtype sign = -1; +======= +======= +>>>>>>> triplet data generation and network update // BP for negative feature used in the num_triplets triplet part for (int i = 2; i < 2 + num_triplets; ++i) { if (propagate_down[0]) { const Dtype sign = 1; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise +======= +>>>>>>> triplet data generation and network update const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast(num_set); for (int j = 0; j < num_set; ++j) { Dtype* bout = bottom[0]->mutable_cpu_diff(); +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for negative feature used in the num_triplets triplet part + for (int i = 2; i < 2 + num_triplets; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); +======= +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise +======= +>>>>>>> triplet data generation and network update caffe_sub( dim, bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference @@ -261,11 +348,32 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { // dissimilar pairs +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +======= + for (int i = 1; i < 3; ++i) { +// there must be further check to ensure the gradient calc + if (propagate_down[i]) { + const Dtype sign = (i == 2) ? 1 : -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(bottom[i]->num()); + int num = bottom[i]->num(); + int channels = bottom[i]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[i]->mutable_cpu_diff(); + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pairs +>>>>>>> restore +>>>>>>> triplet data generation and network update caffe_cpu_axpby( dim, alpha, diff_neg.cpu_data() + (j*dim), Dtype(0.0), +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +>>>>>>> triplet data generation and network update bout + ((2 + num_triplets)*j + i)*dim); } else { caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); @@ -289,10 +397,23 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, diff_pos.cpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 +======= +>>>>>>> triplet data generation and network update + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; dist_sq_.mutable_cpu_data()[j] += margin; // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise +======= +>>>>>>> triplet data generation and network update // Loss component calculated from negative part caffe_sub( dim, @@ -340,10 +461,23 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, diff_pos.cpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 +======= +>>>>>>> triplet data generation and network update + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; dist_sq_.mutable_cpu_data()[j] += margin; // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise +======= +>>>>>>> triplet data generation and network update // Loss component calculated from negative part caffe_sub( dim, @@ -393,14 +527,33 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; // loss accumulated accumulated by the triplet part if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +======= + bout + (j*channels)); + // dissimilar pairs +>>>>>>> restore +>>>>>>> triplet data generation and network update caffe_cpu_axpby( dim, alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), diff_neg.cpu_data() + (j*dim), Dtype(0.0), +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); +======= +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a bout + ((2 + num_triplets)*j + i)*dim); } else { caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); +======= + bout + (j*channels)); + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); +>>>>>>> restore +>>>>>>> triplet data generation and network update } } } diff --git a/src/caffe/layers/triplet_loss_layer.cu.orig.orig b/src/caffe/layers/triplet_loss_layer.cu.orig.orig index d893949e5fc..49c2d44e9ce 100644 --- a/src/caffe/layers/triplet_loss_layer.cu.orig.orig +++ b/src/caffe/layers/triplet_loss_layer.cu.orig.orig @@ -2,9 +2,21 @@ #include #include "caffe/layer.hpp" +<<<<<<< 0a8521567403409d70ece475762c203e38274530 #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" +======= +<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" +======= +#include "caffe/vision_layers.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +>>>>>>> GPU version added +>>>>>>> GPU version added namespace caffe { @@ -12,6 +24,10 @@ template void TripletLossLayer::Forward_gpu( const vector*>& bottom, const vector*>& top) { +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +======= +<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c +>>>>>>> GPU version added Dtype margin = this->layer_param_.triplet_loss_param().margin(); Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); @@ -30,11 +46,15 @@ void TripletLossLayer::Forward_gpu( dim, diff_pos.gpu_data() + i*dim, diff_pos.gpu_data() + i*dim, +<<<<<<< 0a8521567403409d70ece475762c203e38274530 <<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_pos.mutable_cpu_data() + i); ======= dist_sq_pos.mutable_gpu_data() + i); >>>>>>> debug GPU triplet loss codes for loss type 0 +======= + dist_sq_pos.mutable_cpu_data() + i); +>>>>>>> GPU version added // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.gpu_data()[i]; @@ -48,19 +68,19 @@ void TripletLossLayer::Forward_gpu( bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + i*dim); // reference-negative -<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 -======= ->>>>>>> debug GPU triplet loss codes for loss type 0 caffe_gpu_dot( dim, diff_neg.gpu_data() + i*dim, diff_neg.gpu_data() + i*dim, +<<<<<<< 0a8521567403409d70ece475762c203e38274530 <<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + i); ======= dist_sq_neg.mutable_gpu_data() + i); >>>>>>> debug GPU triplet loss codes for loss type 0 +======= + dist_sq_neg.mutable_cpu_data() + i); +>>>>>>> GPU version added // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; // loss accumulated accumulated by the triplet part @@ -81,11 +101,15 @@ void TripletLossLayer::Forward_gpu( dim, diff_pos.gpu_data() + i*dim, diff_pos.gpu_data() + i*dim, +<<<<<<< 0a8521567403409d70ece475762c203e38274530 <<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_pos.mutable_cpu_data() + i); ======= dist_sq_pos.mutable_gpu_data() + i); >>>>>>> debug GPU triplet loss codes for loss type 0 +======= + dist_sq_pos.mutable_cpu_data() + i); +>>>>>>> GPU version added // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.gpu_data()[i]; @@ -98,19 +122,19 @@ void TripletLossLayer::Forward_gpu( bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + i*dim); // reference-negative -<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 -======= ->>>>>>> debug GPU triplet loss codes for loss type 0 caffe_gpu_dot( dim, diff_neg.gpu_data() + i*dim, diff_neg.gpu_data() + i*dim, +<<<<<<< 0a8521567403409d70ece475762c203e38274530 <<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + i); ======= dist_sq_neg.mutable_gpu_data() + i); >>>>>>> debug GPU triplet loss codes for loss type 0 +======= + dist_sq_neg.mutable_cpu_data() + i); +>>>>>>> GPU version added // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] = 1 - \ dist_sq_neg.gpu_data()[i] / dist_sq_.mutable_gpu_data()[i]; @@ -121,12 +145,60 @@ void TripletLossLayer::Forward_gpu( loss = loss / static_cast(num_set) / Dtype(2); top[0]->mutable_gpu_data()[0] = loss; } +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +======= +======= + int count = bottom[0]->count(); + caffe_gpu_sub( + count, + bottom[0]->gpu_data(), // a + bottom[1]->gpu_data(), // b + diff_pos.mutable_gpu_data()); // a_i-b_i + caffe_gpu_sub( + count, + bottom[0]->gpu_data(), // a + bottom[2]->gpu_data(), // c + diff_neg.mutable_gpu_data()); // a_i-c_i + caffe_gpu_powx( + count, + diff_pos.mutable_gpu_data(), // a_i-b_i + Dtype(2), + diff_sq_pos.mutable_gpu_data()); // (a_i-b_i)^2 + caffe_gpu_powx( + count, + diff_neg.mutable_gpu_data(), // a_i-c_i + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (a_i-c_i)^2 + const int channels = bottom[0]->channels(); + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype loss(0.0); + // Loss component calculated from ab + for (int i = 0; i < bottom[0]->num(); ++i) { + /*dist_sq_pos.mutable_gpu_data()[i] = caffe_gpu_dot(channels, + diff_pos.gpu_data() + (i*channels), diff_pos.gpu_data() + (i*channels));*/ + // ab is a similar pair + dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; + // Loss component calculated from ac + /*dist_sq_neg.mutable_gpu_data()[i] = caffe_gpu_dot(channels, + diff_neg.gpu_data() + (i*channels), diff_neg.gpu_data() + (i*channels));*/ + // ac is a dissimilar pair + dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; + loss += std::max(margin + dist_sq_.gpu_data()[i], Dtype(0.0)); + } + loss = loss / static_cast(bottom[0]->num()) / Dtype(2); + top[0]->mutable_gpu_data()[0] = loss; +>>>>>>> GPU version added +>>>>>>> GPU version added } template void TripletLossLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.triplet_loss_param().margin(); +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +======= +<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c +>>>>>>> GPU version added Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); int dim = bottom[0]->count()/bottom[0]->num(); @@ -154,19 +226,19 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative -<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 -======= ->>>>>>> debug GPU triplet loss codes for loss type 0 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, +<<<<<<< 0a8521567403409d70ece475762c203e38274530 <<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); ======= dist_sq_neg.mutable_gpu_data() + j); >>>>>>> debug GPU triplet loss codes for loss type 0 +======= + dist_sq_neg.mutable_cpu_data() + j); +>>>>>>> GPU version added // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; @@ -218,19 +290,19 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; -<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 -======= ->>>>>>> debug GPU triplet loss codes for loss type 0 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, +<<<<<<< 0a8521567403409d70ece475762c203e38274530 <<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); ======= dist_sq_neg.mutable_gpu_data() + j); >>>>>>> debug GPU triplet loss codes for loss type 0 +======= + dist_sq_neg.mutable_cpu_data() + j); +>>>>>>> GPU version added // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -262,19 +334,19 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; -<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 -======= ->>>>>>> debug GPU triplet loss codes for loss type 0 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, +<<<<<<< 0a8521567403409d70ece475762c203e38274530 <<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); ======= dist_sq_neg.mutable_gpu_data() + j); >>>>>>> debug GPU triplet loss codes for loss type 0 +======= + dist_sq_neg.mutable_cpu_data() + j); +>>>>>>> GPU version added // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -317,19 +389,19 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative -<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 -======= ->>>>>>> debug GPU triplet loss codes for loss type 0 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, +<<<<<<< 0a8521567403409d70ece475762c203e38274530 <<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); ======= dist_sq_neg.mutable_gpu_data() + j); >>>>>>> debug GPU triplet loss codes for loss type 0 +======= + dist_sq_neg.mutable_cpu_data() + j); +>>>>>>> GPU version added // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -379,19 +451,19 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative -<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 -======= ->>>>>>> debug GPU triplet loss codes for loss type 0 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, +<<<<<<< 0a8521567403409d70ece475762c203e38274530 <<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); ======= dist_sq_neg.mutable_gpu_data() + j); >>>>>>> debug GPU triplet loss codes for loss type 0 +======= + dist_sq_neg.mutable_cpu_data() + j); +>>>>>>> GPU version added // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -426,19 +498,19 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative -<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 -======= ->>>>>>> debug GPU triplet loss codes for loss type 0 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, +<<<<<<< 0a8521567403409d70ece475762c203e38274530 <<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); ======= dist_sq_neg.mutable_gpu_data() + j); >>>>>>> debug GPU triplet loss codes for loss type 0 +======= + dist_sq_neg.mutable_cpu_data() + j); +>>>>>>> GPU version added // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -452,11 +524,83 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bout + ((2 + num_triplets)*j + i)*dim); } else { caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +======= +======= +// there must be further check to ensure the gradient calc + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(bottom[0]->num()); + int num = bottom[0]->num(); + int channels = bottom[0]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + // similar pairs + caffe_gpu_axpby( + channels, + alpha, + diff_pos.gpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + // dissimilar pairs + caffe_gpu_axpby( + channels, + -alpha, + diff_neg.gpu_data() + (j*channels), + Dtype(1.0), + bout + (j*channels)); + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); + } + } + } + for (int i = 1; i < 3; ++i) { +// there must be further check to ensure the gradient calc + if (propagate_down[i]) { + const Dtype sign = (i == 1) ? -1 : 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(bottom[i]->num()); + int num = bottom[i]->num(); + int channels = bottom[i]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[i]->mutable_gpu_diff(); + if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + if (i == 1) { + // similar pairs + caffe_gpu_axpby( + channels, + alpha, + diff_pos.gpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } + else { + // dissimilar pairs + caffe_gpu_axpby( + channels, + alpha, + diff_neg.gpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); +>>>>>>> GPU version added +>>>>>>> GPU version added } } } } +<<<<<<< 0a8521567403409d70ece475762c203e38274530 } +======= +<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c + } +======= +>>>>>>> GPU version added +>>>>>>> GPU version added } INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer); diff --git a/src/caffe/net.cpp.orig.orig b/src/caffe/net.cpp.orig.orig new file mode 100644 index 00000000000..05e20ba27a4 --- /dev/null +++ b/src/caffe/net.cpp.orig.orig @@ -0,0 +1,856 @@ +#include +#include +#include +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/net.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/insert_splits.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/upgrade_proto.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +Net::Net(const NetParameter& param) { + Init(param); +} + +template +Net::Net(const string& param_file, Phase phase) { + NetParameter param; + ReadNetParamsFromTextFileOrDie(param_file, ¶m); + param.mutable_state()->set_phase(phase); + Init(param); +} + +template +void Net::Init(const NetParameter& in_param) { + // Set phase from the state. + phase_ = in_param.state().phase(); + // Filter layers based on their include/exclude rules and + // the current NetState. + NetParameter filtered_param; + FilterNet(in_param, &filtered_param); + LOG(INFO) << "Initializing net from parameters: " << std::endl + << filtered_param.DebugString(); + // Create a copy of filtered_param with splits added where necessary. + NetParameter param; + InsertSplits(filtered_param, ¶m); + // Basically, build all the layers and set up their connections. + name_ = param.name(); + map blob_name_to_idx; + set available_blobs; + CHECK(param.input_dim_size() == 0 || param.input_shape_size() == 0) + << "Must specify either input_shape OR deprecated input_dim, not both."; + if (param.input_dim_size() > 0) { + // Deprecated 4D dimensions. + CHECK_EQ(param.input_size() * 4, param.input_dim_size()) + << "Incorrect input blob dimension specifications."; + } else { + CHECK_EQ(param.input_size(), param.input_shape_size()) + << "Exactly one input_shape must be specified per input."; + } + memory_used_ = 0; + // set the input blobs + for (int input_id = 0; input_id < param.input_size(); ++input_id) { + const int layer_id = -1; // inputs have fake layer ID -1 + AppendTop(param, layer_id, input_id, &available_blobs, &blob_name_to_idx); + } + DLOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); + // For each layer, set up its input and output + bottom_vecs_.resize(param.layer_size()); + top_vecs_.resize(param.layer_size()); + bottom_id_vecs_.resize(param.layer_size()); + param_id_vecs_.resize(param.layer_size()); + top_id_vecs_.resize(param.layer_size()); + bottom_need_backward_.resize(param.layer_size()); + for (int layer_id = 0; layer_id < param.layer_size(); ++layer_id) { + // Inherit phase from net if unset. + if (!param.layer(layer_id).has_phase()) { + param.mutable_layer(layer_id)->set_phase(phase_); + } + // Setup layer. + const LayerParameter& layer_param = param.layer(layer_id); + if (layer_param.propagate_down_size() > 0) { + CHECK_EQ(layer_param.propagate_down_size(), + layer_param.bottom_size()) + << "propagate_down param must be specified " + << "either 0 or bottom_size times "; + } + layers_.push_back(LayerRegistry::CreateLayer(layer_param)); +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update + layer_names_.push_back(layer_param.name()); + LOG(INFO) << "Creating Layer " << layer_param.name(); + bool need_backward = false; + + // Figure out this layer's input and output + for (int bottom_id = 0; bottom_id < layer_param.bottom_size(); + ++bottom_id) { + const int blob_id = AppendBottom(param, layer_id, bottom_id, + &available_blobs, &blob_name_to_idx); + // If a blob needs backward, this layer should provide it. + need_backward |= blob_need_backward_[blob_id]; + } + int num_top = layer_param.top_size(); + for (int top_id = 0; top_id < num_top; ++top_id) { + AppendTop(param, layer_id, top_id, &available_blobs, &blob_name_to_idx); + } + // If the layer specifies that AutoTopBlobs() -> true and the LayerParameter + // specified fewer than the required number (as specified by + // ExactNumTopBlobs() or MinTopBlobs()), allocate them here. + Layer* layer = layers_[layer_id].get(); + if (layer->AutoTopBlobs()) { + const int needed_num_top = + std::max(layer->MinTopBlobs(), layer->ExactNumTopBlobs()); + for (; num_top < needed_num_top; ++num_top) { + // Add "anonymous" top blobs -- do not modify available_blobs or + // blob_name_to_idx as we don't want these blobs to be usable as input + // to other layers. + AppendTop(param, layer_id, num_top, NULL, NULL); + } + } + // After this layer is connected, set it up. + LOG(INFO) << "Setting up " << layer_names_[layer_id]; + layers_[layer_id]->SetUp(bottom_vecs_[layer_id], top_vecs_[layer_id]); + for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { + if (blob_loss_weights_.size() <= top_id_vecs_[layer_id][top_id]) { + blob_loss_weights_.resize(top_id_vecs_[layer_id][top_id] + 1, Dtype(0)); + } + blob_loss_weights_[top_id_vecs_[layer_id][top_id]] = layer->loss(top_id); + LOG(INFO) << "Top shape: " << top_vecs_[layer_id][top_id]->shape_string(); + if (layer->loss(top_id)) { + LOG(INFO) << " with loss weight " << layer->loss(top_id); + } + memory_used_ += top_vecs_[layer_id][top_id]->count(); + } + DLOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); + const int param_size = layer_param.param_size(); + const int num_param_blobs = layers_[layer_id]->blobs().size(); + CHECK_LE(param_size, num_param_blobs) + << "Too many params specified for layer " << layer_param.name(); + ParamSpec default_param_spec; + for (int param_id = 0; param_id < num_param_blobs; ++param_id) { + const ParamSpec* param_spec = (param_id < param_size) ? + &layer_param.param(param_id) : &default_param_spec; + const bool param_need_backward = param_spec->lr_mult() > 0; + need_backward |= param_need_backward; + layers_[layer_id]->set_param_propagate_down(param_id, + param_need_backward); + } + for (int param_id = 0; param_id < num_param_blobs; ++param_id) { + AppendParam(param, layer_id, param_id); + } + // Finally, set the backward flag + layer_need_backward_.push_back(need_backward); + if (need_backward) { + for (int top_id = 0; top_id < top_id_vecs_[layer_id].size(); ++top_id) { + blob_need_backward_[top_id_vecs_[layer_id][top_id]] = true; + } + } + } + // Go through the net backwards to determine which blobs contribute to the + // loss. We can skip backward computation for blobs that don't contribute + // to the loss. + // Also checks if all bottom blobs don't need backward computation (possible + // because the skip_propagate_down param) and so we can skip bacward + // computation for the entire layer + set blobs_under_loss; + set blobs_skip_backp; + for (int layer_id = layers_.size() - 1; layer_id >= 0; --layer_id) { + bool layer_contributes_loss = false; + bool layer_skip_propagate_down = true; + for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { + const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]]; + if (layers_[layer_id]->loss(top_id) || + (blobs_under_loss.find(blob_name) != blobs_under_loss.end())) { + layer_contributes_loss = true; + } + if (blobs_skip_backp.find(blob_name) == blobs_skip_backp.end()) { + layer_skip_propagate_down = false; + } + if (layer_contributes_loss && !layer_skip_propagate_down) + break; + } + // If this layer can skip backward computation, also all his bottom blobs + // don't need backpropagation + if (layer_need_backward_[layer_id] && layer_skip_propagate_down) { + layer_need_backward_[layer_id] = false; + for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); + ++bottom_id) { + bottom_need_backward_[layer_id][bottom_id] = false; + } + } + if (!layer_contributes_loss) { layer_need_backward_[layer_id] = false; } + if (layer_need_backward_[layer_id]) { + LOG(INFO) << layer_names_[layer_id] << " needs backward computation."; + } else { + LOG(INFO) << layer_names_[layer_id] + << " does not need backward computation."; + } + for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); + ++bottom_id) { + if (layer_contributes_loss) { + const string& blob_name = + blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; + blobs_under_loss.insert(blob_name); + } else { + bottom_need_backward_[layer_id][bottom_id] = false; + } + if (!bottom_need_backward_[layer_id][bottom_id]) { + const string& blob_name = + blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; + blobs_skip_backp.insert(blob_name); + } + } + } + // Handle force_backward if needed. + if (param.force_backward()) { + for (int layer_id = 0; layer_id < layers_.size(); ++layer_id) { + layer_need_backward_[layer_id] = true; + for (int bottom_id = 0; + bottom_id < bottom_need_backward_[layer_id].size(); ++bottom_id) { + bottom_need_backward_[layer_id][bottom_id] = + bottom_need_backward_[layer_id][bottom_id] || + layers_[layer_id]->AllowForceBackward(bottom_id); + blob_need_backward_[bottom_id_vecs_[layer_id][bottom_id]] = + blob_need_backward_[bottom_id_vecs_[layer_id][bottom_id]] || + bottom_need_backward_[layer_id][bottom_id]; + } + for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); + ++param_id) { + layers_[layer_id]->set_param_propagate_down(param_id, true); + } + } + } + // In the end, all remaining blobs are considered output blobs. + for (set::iterator it = available_blobs.begin(); + it != available_blobs.end(); ++it) { + LOG(INFO) << "This network produces output " << *it; + net_output_blobs_.push_back(blobs_[blob_name_to_idx[*it]].get()); + net_output_blob_indices_.push_back(blob_name_to_idx[*it]); + } + for (size_t blob_id = 0; blob_id < blob_names_.size(); ++blob_id) { + blob_names_index_[blob_names_[blob_id]] = blob_id; + } + for (size_t layer_id = 0; layer_id < layer_names_.size(); ++layer_id) { + layer_names_index_[layer_names_[layer_id]] = layer_id; + } + GetLearningRateAndWeightDecay(); + debug_info_ = param.debug_info(); + LOG(INFO) << "Network initialization done."; + LOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); +} + +template +void Net::FilterNet(const NetParameter& param, + NetParameter* param_filtered) { + NetState net_state(param.state()); + param_filtered->CopyFrom(param); + param_filtered->clear_layer(); + for (int i = 0; i < param.layer_size(); ++i) { + const LayerParameter& layer_param = param.layer(i); + const string& layer_name = layer_param.name(); + CHECK(layer_param.include_size() == 0 || layer_param.exclude_size() == 0) + << "Specify either include rules or exclude rules; not both."; + // If no include rules are specified, the layer is included by default and + // only excluded if it meets one of the exclude rules. + bool layer_included = (layer_param.include_size() == 0); + for (int j = 0; layer_included && j < layer_param.exclude_size(); ++j) { + if (StateMeetsRule(net_state, layer_param.exclude(j), layer_name)) { + layer_included = false; + } + } + for (int j = 0; !layer_included && j < layer_param.include_size(); ++j) { + if (StateMeetsRule(net_state, layer_param.include(j), layer_name)) { + layer_included = true; + } + } + if (layer_included) { + param_filtered->add_layer()->CopyFrom(layer_param); + } + } +} + +template +bool Net::StateMeetsRule(const NetState& state, + const NetStateRule& rule, const string& layer_name) { + // Check whether the rule is broken due to phase. + if (rule.has_phase()) { + if (rule.phase() != state.phase()) { + LOG(INFO) << "The NetState phase (" << state.phase() + << ") differed from the phase (" << rule.phase() + << ") specified by a rule in layer " << layer_name; + return false; + } + } + // Check whether the rule is broken due to min level. + if (rule.has_min_level()) { + if (state.level() < rule.min_level()) { + LOG(INFO) << "The NetState level (" << state.level() + << ") is above the min_level (" << rule.min_level() + << ") specified by a rule in layer " << layer_name; + return false; + } + } + // Check whether the rule is broken due to max level. + if (rule.has_max_level()) { + if (state.level() > rule.max_level()) { + LOG(INFO) << "The NetState level (" << state.level() + << ") is above the max_level (" << rule.max_level() + << ") specified by a rule in layer " << layer_name; + return false; + } + } + // Check whether the rule is broken due to stage. The NetState must + // contain ALL of the rule's stages to meet it. + for (int i = 0; i < rule.stage_size(); ++i) { + // Check that the NetState contains the rule's ith stage. + bool has_stage = false; + for (int j = 0; !has_stage && j < state.stage_size(); ++j) { + if (rule.stage(i) == state.stage(j)) { has_stage = true; } + } + if (!has_stage) { + LOG(INFO) << "The NetState did not contain stage '" << rule.stage(i) + << "' specified by a rule in layer " << layer_name; + return false; + } + } + // Check whether the rule is broken due to not_stage. The NetState must + // contain NONE of the rule's not_stages to meet it. + for (int i = 0; i < rule.not_stage_size(); ++i) { + // Check that the NetState contains the rule's ith not_stage. + bool has_stage = false; + for (int j = 0; !has_stage && j < state.stage_size(); ++j) { + if (rule.not_stage(i) == state.stage(j)) { has_stage = true; } + } + if (has_stage) { + LOG(INFO) << "The NetState contained a not_stage '" << rule.not_stage(i) + << "' specified by a rule in layer " << layer_name; + return false; + } + } + return true; +} + +// Helper for Net::Init: add a new input or top blob to the net. (Inputs have +// layer_id == -1, tops have layer_id >= 0.) +template +void Net::AppendTop(const NetParameter& param, const int layer_id, + const int top_id, set* available_blobs, + map* blob_name_to_idx) { + shared_ptr layer_param((layer_id >= 0) ? + (new LayerParameter(param.layer(layer_id))) : NULL); + const string& blob_name = layer_param ? + (layer_param->top_size() > top_id ? + layer_param->top(top_id) : "(automatic)") : param.input(top_id); + // Check if we are doing in-place computation + if (blob_name_to_idx && layer_param && layer_param->bottom_size() > top_id && + blob_name == layer_param->bottom(top_id)) { + // In-place computation + LOG(INFO) << layer_param->name() << " -> " << blob_name << " (in-place)"; + top_vecs_[layer_id].push_back(blobs_[(*blob_name_to_idx)[blob_name]].get()); + top_id_vecs_[layer_id].push_back((*blob_name_to_idx)[blob_name]); + } else if (blob_name_to_idx && + blob_name_to_idx->find(blob_name) != blob_name_to_idx->end()) { + // If we are not doing in-place computation but have duplicated blobs, + // raise an error. + LOG(FATAL) << "Duplicate blobs produced by multiple sources."; + } else { + // Normal output. + if (layer_param) { + LOG(INFO) << layer_param->name() << " -> " << blob_name; + } else { + LOG(INFO) << "Input " << top_id << " -> " << blob_name; + } + shared_ptr > blob_pointer(new Blob()); + const int blob_id = blobs_.size(); + blobs_.push_back(blob_pointer); + blob_names_.push_back(blob_name); + blob_need_backward_.push_back(false); + if (blob_name_to_idx) { (*blob_name_to_idx)[blob_name] = blob_id; } + if (layer_id == -1) { + // Set the (explicitly specified) dimensions of the input blob. + if (param.input_dim_size() > 0) { + blob_pointer->Reshape(param.input_dim(top_id * 4), + param.input_dim(top_id * 4 + 1), + param.input_dim(top_id * 4 + 2), + param.input_dim(top_id * 4 + 3)); + } else { + blob_pointer->Reshape(param.input_shape(top_id)); + } + net_input_blob_indices_.push_back(blob_id); + net_input_blobs_.push_back(blob_pointer.get()); + } else { + top_id_vecs_[layer_id].push_back(blob_id); + top_vecs_[layer_id].push_back(blob_pointer.get()); + } + } + if (available_blobs) { available_blobs->insert(blob_name); } +} + +// Helper for Net::Init: add a new bottom blob to the net. +template +int Net::AppendBottom(const NetParameter& param, const int layer_id, + const int bottom_id, set* available_blobs, + map* blob_name_to_idx) { + const LayerParameter& layer_param = param.layer(layer_id); + const string& blob_name = layer_param.bottom(bottom_id); + if (available_blobs->find(blob_name) == available_blobs->end()) { + LOG(FATAL) << "Unknown blob input " << blob_name + << " (at index " << bottom_id << ") to layer " << layer_id; + } + const int blob_id = (*blob_name_to_idx)[blob_name]; + LOG(INFO) << layer_names_[layer_id] << " <- " << blob_name; + bottom_vecs_[layer_id].push_back(blobs_[blob_id].get()); + bottom_id_vecs_[layer_id].push_back(blob_id); + available_blobs->erase(blob_name); + bool propagate_down = true; + // Check if the backpropagation on bottom_id should be skipped + if (layer_param.propagate_down_size() > 0) + propagate_down = layer_param.propagate_down(bottom_id); + const bool need_backward = blob_need_backward_[blob_id] && + propagate_down; + bottom_need_backward_[layer_id].push_back(need_backward); + return blob_id; +} + +template +void Net::AppendParam(const NetParameter& param, const int layer_id, + const int param_id) { + const LayerParameter& layer_param = layers_[layer_id]->layer_param(); + const int param_size = layer_param.param_size(); + string param_name = + (param_size > param_id) ? layer_param.param(param_id).name() : ""; + if (param_name.size()) { + param_display_names_.push_back(param_name); + } else { + ostringstream param_display_name; + param_display_name << param_id; + param_display_names_.push_back(param_display_name.str()); + } + const int net_param_id = params_.size(); + params_.push_back(layers_[layer_id]->blobs()[param_id]); + param_id_vecs_[layer_id].push_back(net_param_id); + param_layer_indices_.push_back(make_pair(layer_id, param_id)); + if (!param_size || !param_name.size() || (param_name.size() && + param_names_index_.find(param_name) == param_names_index_.end())) { + // This layer "owns" this parameter blob -- it is either anonymous + // (i.e., not given a param_name) or explicitly given a name that we + // haven't already seen. + param_owners_.push_back(-1); + if (param_name.size()) { + param_names_index_[param_name] = net_param_id; + } + } else { + // Named param blob with name we've seen before: share params + const int owner_net_param_id = param_names_index_[param_name]; + param_owners_.push_back(owner_net_param_id); + const pair& owner_index = + param_layer_indices_[owner_net_param_id]; + const int owner_layer_id = owner_index.first; + const int owner_param_id = owner_index.second; + LOG(INFO) << "Sharing parameters '" << param_name << "' owned by " + << "layer '" << layer_names_[owner_layer_id] << "', param " + << "index " << owner_param_id; + Blob* this_blob = layers_[layer_id]->blobs()[param_id].get(); + Blob* owner_blob = + layers_[owner_layer_id]->blobs()[owner_param_id].get(); + const int param_size = layer_param.param_size(); + if (param_size > param_id && (layer_param.param(param_id).share_mode() == + ParamSpec_DimCheckMode_PERMISSIVE)) { + // Permissive dimension checking -- only check counts are the same. + CHECK_EQ(this_blob->count(), owner_blob->count()) + << "Shared parameter blobs must have the same count."; + } else { + // Strict dimension checking -- all dims must be the same. + CHECK(this_blob->shape() == owner_blob->shape()); + } + layers_[layer_id]->blobs()[param_id]->ShareData( + *layers_[owner_layer_id]->blobs()[owner_param_id]); + } +} + +template +void Net::GetLearningRateAndWeightDecay() { + LOG(INFO) << "Collecting Learning Rate and Weight Decay."; + ParamSpec default_param_spec; + for (int i = 0; i < layers_.size(); ++i) { + vector > >& layer_blobs = layers_[i]->blobs(); + for (int j = 0; j < layer_blobs.size(); ++j) { + const ParamSpec* param_spec = + (layers_[i]->layer_param().param_size() > j) ? + &layers_[i]->layer_param().param(j) : &default_param_spec; + params_lr_.push_back(param_spec->lr_mult()); + params_weight_decay_.push_back(param_spec->decay_mult()); + } + } +} + +template +Dtype Net::ForwardFromTo(int start, int end) { + CHECK_GE(start, 0); + CHECK_LT(end, layers_.size()); + Dtype loss = 0; + if (debug_info_) { + for (int i = 0; i < net_input_blobs_.size(); ++i) { + InputDebugInfo(i); + } + } + for (int i = start; i <= end; ++i) { + // LOG(ERROR) << "Forwarding " << layer_names_[i]; + Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], top_vecs_[i]); + loss += layer_loss; + if (debug_info_) { ForwardDebugInfo(i); } + } + return loss; +} + +template +Dtype Net::ForwardFrom(int start) { + return ForwardFromTo(start, layers_.size() - 1); +} + +template +Dtype Net::ForwardTo(int end) { + return ForwardFromTo(0, end); +} + +template +const vector*>& Net::ForwardPrefilled(Dtype* loss) { + if (loss != NULL) { + *loss = ForwardFromTo(0, layers_.size() - 1); + } else { + ForwardFromTo(0, layers_.size() - 1); + } + return net_output_blobs_; +} + +template +const vector*>& Net::Forward( + const vector*> & bottom, Dtype* loss) { + // Copy bottom to internal bottom + for (int i = 0; i < bottom.size(); ++i) { + net_input_blobs_[i]->CopyFrom(*bottom[i]); + } + return ForwardPrefilled(loss); +} + +template +string Net::Forward(const string& input_blob_protos, Dtype* loss) { + BlobProtoVector blob_proto_vec; + if (net_input_blobs_.size()) { + blob_proto_vec.ParseFromString(input_blob_protos); + CHECK_EQ(blob_proto_vec.blobs_size(), net_input_blobs_.size()) + << "Incorrect input size."; + for (int i = 0; i < blob_proto_vec.blobs_size(); ++i) { + net_input_blobs_[i]->FromProto(blob_proto_vec.blobs(i)); + } + } + ForwardPrefilled(loss); + blob_proto_vec.Clear(); + for (int i = 0; i < net_output_blobs_.size(); ++i) { + net_output_blobs_[i]->ToProto(blob_proto_vec.add_blobs()); + } + string output; + blob_proto_vec.SerializeToString(&output); + return output; +} + +template +void Net::BackwardFromTo(int start, int end) { + CHECK_GE(end, 0); + CHECK_LT(start, layers_.size()); + for (int i = start; i >= end; --i) { + if (layer_need_backward_[i]) { + layers_[i]->Backward( + top_vecs_[i], bottom_need_backward_[i], bottom_vecs_[i]); + if (debug_info_) { BackwardDebugInfo(i); } + } + } +} + +template +void Net::InputDebugInfo(const int input_id) { + const Blob& blob = *net_input_blobs_[input_id]; + const string& blob_name = blob_names_[net_input_blob_indices_[input_id]]; + const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); + LOG(INFO) << " [Forward] " + << "Input " << blob_name << " data: " << data_abs_val_mean; +} + +template +void Net::ForwardDebugInfo(const int layer_id) { + for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { + const Blob& blob = *top_vecs_[layer_id][top_id]; + const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]]; + const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); + LOG(INFO) << " [Forward] " + << "Layer " << layer_names_[layer_id] << ", top blob " << blob_name + << " data: " << data_abs_val_mean; + } + for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); + ++param_id) { + const Blob& blob = *layers_[layer_id]->blobs()[param_id]; + const int net_param_id = param_id_vecs_[layer_id][param_id]; + const string& blob_name = param_display_names_[net_param_id]; + const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); + LOG(INFO) << " [Forward] " + << "Layer " << layer_names_[layer_id] << ", param blob " << blob_name + << " data: " << data_abs_val_mean; + } +} + +template +void Net::BackwardDebugInfo(const int layer_id) { + const vector*>& bottom_vec = bottom_vecs_[layer_id]; + for (int bottom_id = 0; bottom_id < bottom_vec.size(); ++bottom_id) { + if (!bottom_need_backward_[layer_id][bottom_id]) { continue; } + const Blob& blob = *bottom_vec[bottom_id]; + const string& blob_name = blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; + const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); + LOG(INFO) << " [Backward] " + << "Layer " << layer_names_[layer_id] << ", bottom blob " << blob_name + << " diff: " << diff_abs_val_mean; + } + for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); + ++param_id) { + if (!layers_[layer_id]->param_propagate_down(param_id)) { continue; } + const Blob& blob = *layers_[layer_id]->blobs()[param_id]; + const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); + LOG(INFO) << " [Backward] " + << "Layer " << layer_names_[layer_id] << ", param blob " << param_id + << " diff: " << diff_abs_val_mean; + } +} + +template +void Net::UpdateDebugInfo(const int param_id) { + const Blob& blob = *params_[param_id]; + const int param_owner = param_owners_[param_id]; + const string& layer_name = layer_names_[param_layer_indices_[param_id].first]; + const string& param_display_name = param_display_names_[param_id]; + const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); + if (param_owner < 0) { + const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); + LOG(INFO) << " [Update] Layer " << layer_name + << ", param " << param_display_name + << " data: " << data_abs_val_mean << "; diff: " << diff_abs_val_mean; + } else { + const string& owner_layer_name = + layer_names_[param_layer_indices_[param_owner].first]; + LOG(INFO) << " [Update] Layer " << layer_name + << ", param blob " << param_display_name + << " (owned by layer " << owner_layer_name << ", " + << "param " << param_display_names_[param_owners_[param_id]] << ")" + << " diff: " << diff_abs_val_mean; + } +} + +template +void Net::ShareTrainedLayersWith(const Net* other) { + int num_source_layers = other->layers().size(); + for (int i = 0; i < num_source_layers; ++i) { + Layer* source_layer = other->layers()[i].get(); + const string& source_layer_name = other->layer_names()[i]; + int target_layer_id = 0; + while (target_layer_id != layer_names_.size() && + layer_names_[target_layer_id] != source_layer_name) { + ++target_layer_id; + } + if (target_layer_id == layer_names_.size()) { + DLOG(INFO) << "Ignoring source layer " << source_layer_name; + continue; + } + DLOG(INFO) << "Copying source layer " << source_layer_name; + vector > >& target_blobs = + layers_[target_layer_id]->blobs(); + CHECK_EQ(target_blobs.size(), source_layer->blobs().size()) + << "Incompatible number of blobs for layer " << source_layer_name; + for (int j = 0; j < target_blobs.size(); ++j) { + Blob* source_blob = source_layer->blobs()[j].get(); + CHECK(target_blobs[j]->shape() == source_blob->shape()); + target_blobs[j]->ShareData(*source_blob); + } + } +} + +template +void Net::BackwardFrom(int start) { + BackwardFromTo(start, 0); +} + +template +void Net::BackwardTo(int end) { + BackwardFromTo(layers_.size() - 1, end); +} + +template +void Net::Backward() { + BackwardFromTo(layers_.size() - 1, 0); + if (debug_info_) { + Dtype asum_data = 0, asum_diff = 0, sumsq_data = 0, sumsq_diff = 0; + for (int i = 0; i < params_.size(); ++i) { + if (param_owners_[i] >= 0) { continue; } + asum_data += params_[i]->asum_data(); + asum_diff += params_[i]->asum_diff(); + sumsq_data += params_[i]->sumsq_data(); + sumsq_diff += params_[i]->sumsq_diff(); + } + const Dtype l2norm_data = std::sqrt(sumsq_data); + const Dtype l2norm_diff = std::sqrt(sumsq_diff); + LOG(ERROR) << " [Backward] All net params (data, diff): " + << "L1 norm = (" << asum_data << ", " << asum_diff << "); " + << "L2 norm = (" << l2norm_data << ", " << l2norm_diff << ")"; + } +} + +template +void Net::Reshape() { + for (int i = 0; i < layers_.size(); ++i) { + layers_[i]->Reshape(bottom_vecs_[i], top_vecs_[i]); + } +} + +template +void Net::CopyTrainedLayersFrom(const NetParameter& param) { + int num_source_layers = param.layer_size(); + for (int i = 0; i < num_source_layers; ++i) { + const LayerParameter& source_layer = param.layer(i); + const string& source_layer_name = source_layer.name(); + int target_layer_id = 0; + while (target_layer_id != layer_names_.size() && + layer_names_[target_layer_id] != source_layer_name) { + ++target_layer_id; + } + if (target_layer_id == layer_names_.size()) { + DLOG(INFO) << "Ignoring source layer " << source_layer_name; + continue; + } + DLOG(INFO) << "Copying source layer " << source_layer_name; + vector > >& target_blobs = + layers_[target_layer_id]->blobs(); + CHECK_EQ(target_blobs.size(), source_layer.blobs_size()) + << "Incompatible number of blobs for layer " << source_layer_name; + for (int j = 0; j < target_blobs.size(); ++j) { + const bool kReshape = false; + target_blobs[j]->FromProto(source_layer.blobs(j), kReshape); + } + } +} + +template +void Net::CopyTrainedLayersFrom(const string trained_filename) { + NetParameter param; + ReadNetParamsFromBinaryFileOrDie(trained_filename, ¶m); + CopyTrainedLayersFrom(param); +} + +template +void Net::ToProto(NetParameter* param, bool write_diff) const { + param->Clear(); + param->set_name(name_); + // Add bottom and top + for (int i = 0; i < net_input_blob_indices_.size(); ++i) { + param->add_input(blob_names_[net_input_blob_indices_[i]]); + } + DLOG(INFO) << "Serializing " << layers_.size() << " layers"; + for (int i = 0; i < layers_.size(); ++i) { + LayerParameter* layer_param = param->add_layer(); + for (int j = 0; j < bottom_id_vecs_[i].size(); ++j) { + layer_param->add_bottom(blob_names_[bottom_id_vecs_[i][j]]); + } + for (int j = 0; j < top_id_vecs_[i].size(); ++j) { + layer_param->add_top(blob_names_[top_id_vecs_[i][j]]); + } + layers_[i]->ToProto(layer_param, write_diff); + } +} + +template +void Net::Update() { + // First, accumulate the diffs of any shared parameters into their owner's + // diff. (Assumes that the learning rate, weight decay, etc. have already been + // accounted for in the current diff.) + for (int i = 0; i < params_.size(); ++i) { + if (param_owners_[i] < 0) { continue; } + if (debug_info_) { UpdateDebugInfo(i); } + const int count = params_[i]->count(); + const Dtype* this_diff; + Dtype* owner_diff; + switch (Caffe::mode()) { + case Caffe::CPU: + this_diff = params_[i]->cpu_diff(); + owner_diff = params_[param_owners_[i]]->mutable_cpu_diff(); + caffe_add(count, this_diff, owner_diff, owner_diff); + break; + case Caffe::GPU: +#ifndef CPU_ONLY + this_diff = params_[i]->gpu_diff(); + owner_diff = params_[param_owners_[i]]->mutable_gpu_diff(); + caffe_gpu_add(count, this_diff, owner_diff, owner_diff); +#else + NO_GPU; +#endif + break; + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } + } + // Now, update the owned parameters. + for (int i = 0; i < params_.size(); ++i) { + if (param_owners_[i] >= 0) { continue; } + if (debug_info_) { UpdateDebugInfo(i); } + params_[i]->Update(); + } +} + +template +bool Net::has_blob(const string& blob_name) const { + return blob_names_index_.find(blob_name) != blob_names_index_.end(); +} + +template +const shared_ptr > Net::blob_by_name( + const string& blob_name) const { + shared_ptr > blob_ptr; + if (has_blob(blob_name)) { + blob_ptr = blobs_[blob_names_index_.find(blob_name)->second]; + } else { + blob_ptr.reset((Blob*)(NULL)); + LOG(WARNING) << "Unknown blob name " << blob_name; + } + return blob_ptr; +} + +template +bool Net::has_layer(const string& layer_name) const { + return layer_names_index_.find(layer_name) != layer_names_index_.end(); +} + +template +const shared_ptr > Net::layer_by_name( + const string& layer_name) const { + shared_ptr > layer_ptr; + if (has_layer(layer_name)) { + layer_ptr = layers_[layer_names_index_.find(layer_name)->second]; + } else { + layer_ptr.reset((Layer*)(NULL)); + LOG(WARNING) << "Unknown layer name " << layer_name; + } + return layer_ptr; +} + +INSTANTIATE_CLASS(Net); + +} // namespace caffe diff --git a/src/caffe/proto/caffe.proto.orig b/src/caffe/proto/caffe.proto.orig index 5b0f6ce1b0a..75380fc9094 100644 --- a/src/caffe/proto/caffe.proto.orig +++ b/src/caffe/proto/caffe.proto.orig @@ -11,8 +11,11 @@ message BlobProto { optional BlobShape shape = 7; repeated float data = 5 [packed = true]; repeated float diff = 6 [packed = true]; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= repeated double double_data = 8 [packed = true]; repeated double double_diff = 9 [packed = true]; +>>>>>>> triplet data generation and network update // 4D dimensions -- deprecated. Use "shape" instead. optional int32 num = 1 [default = 0]; @@ -51,6 +54,7 @@ message FillerParameter { // The expected number of non-zero output weights for a given input in // Gaussian filler -- the default -1 means don't perform sparsification. optional int32 sparse = 7 [default = -1]; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 // Normalize the filler variance by fan_in, fan_out, or their average. // Applies to 'xavier' and 'msra' fillers. enum VarianceNorm { @@ -59,6 +63,8 @@ message FillerParameter { AVERAGE = 2; } optional VarianceNorm variance_norm = 8 [default = FAN_IN]; +======= +>>>>>>> triplet data generation and network update } message NetParameter { @@ -98,7 +104,15 @@ message NetParameter { // NOTE // Update the next available ID when you add a new SolverParameter field. // +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +// SolverParameter next available ID: 37 (last added: iter_size) +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 // SolverParameter next available ID: 40 (last added: momentum2) +======= +// SolverParameter next available ID: 36 (last added: clip_gradients) +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update message SolverParameter { ////////////////////////////////////////////////////////////////////////////// // Specifying the train and test networks @@ -151,6 +165,12 @@ message SolverParameter { // Display the loss averaged over the last average_loss iterations optional int32 average_loss = 33 [default = 1]; optional int32 max_iter = 7; // the maximum number of iterations +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + // accumulate gradients over `iter_size` x `batch_size` instances + optional int32 iter_size = 36 [default = 1]; + optional string lr_policy = 8; // The learning rate decay policy. +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 // accumulate gradients over `iter_size` x `batch_size` instances optional int32 iter_size = 36 [default = 1]; @@ -170,6 +190,10 @@ message SolverParameter { // where base_lr, max_iter, gamma, step, stepvalue and power are defined // in the solver parameter protocol buffer, and iter is the current iteration. optional string lr_policy = 8; +======= + optional string lr_policy = 8; // The learning rate decay policy. +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update optional float gamma = 9; // The parameter to compute the learning rate. optional float power = 10; // The parameter to compute the learning rate. optional float momentum = 11; // The momentum value. @@ -191,11 +215,14 @@ message SolverParameter { // whether to snapshot diff in the results or not. Snapshotting diff will help // debugging but the final protocol buffer size will be much larger. optional bool snapshot_diff = 16 [default = false]; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= enum SnapshotFormat { HDF5 = 0; BINARYPROTO = 1; } optional SnapshotFormat snapshot_format = 37 [default = BINARYPROTO]; +>>>>>>> triplet data generation and network update // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default. enum SolverMode { CPU = 0; @@ -214,6 +241,12 @@ message SolverParameter { SGD = 0; NESTEROV = 1; ADAGRAD = 2; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + } + optional SolverType solver_type = 30 [default = SGD]; + // numerical stability for AdaGrad + optional float delta = 31 [default = 1e-8]; +======= RMSPROP = 3; ADADELTA = 4; ADAM = 5; @@ -227,6 +260,7 @@ message SolverParameter { // RMSProp decay value // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t) optional float rms_decay = 38; +>>>>>>> triplet data generation and network update // If true, print information about the state of the net that may help with // debugging learning problems. @@ -301,7 +335,19 @@ message ParamSpec { // NOTE // Update the next available ID when you add a new LayerParameter field. // +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 90c50a1b3e5527cfa0d92174b79cb05438c5302e +// LayerParameter next available layer-specific ID: 137 (last added: reduction_param) +======= +// LayerParameter next available layer-specific ID: 139 (last added: tile_param) +>>>>>>> Add TileLayer +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 // LayerParameter next available layer-specific ID: 139 (last added: tile_param) +======= +// LayerParameter next available layer-specific ID: 132 (last added: prelu_param) +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update message LayerParameter { optional string name = 1; // the layer name optional string type = 2; // the layer type @@ -322,10 +368,19 @@ message LayerParameter { // The blobs containing the numeric parameters of the layer. repeated BlobProto blobs = 7; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> triplet data generation and network update // Specifies on which bottoms the backpropagation should be skipped. // The size must be either 0 or equal to the number of bottoms. repeated bool propagate_down = 11; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +======= +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update // Rules controlling whether and when a layer is included in the network, // based on the current NetState. You may specify a non-zero number of rules @@ -357,16 +412,23 @@ message LayerParameter { optional DropoutParameter dropout_param = 108; optional DummyDataParameter dummy_data_param = 109; optional EltwiseParameter eltwise_param = 110; - optional EmbedParameter embed_param = 137; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 optional ExpParameter exp_param = 111; optional FlattenParameter flatten_param = 135; +======= + optional EmbedParameter embed_param = 137; + optional ExpParameter exp_param = 111; +>>>>>>> triplet data generation and network update optional HDF5DataParameter hdf5_data_param = 112; optional HDF5OutputParameter hdf5_output_param = 113; optional HingeLossParameter hinge_loss_param = 114; optional ImageDataParameter image_data_param = 115; optional InfogainLossParameter infogain_loss_param = 116; optional InnerProductParameter inner_product_param = 117; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 optional LogParameter log_param = 134; +======= +>>>>>>> triplet data generation and network update optional LRNParameter lrn_param = 118; optional MemoryDataParameter memory_data_param = 119; optional MVNParameter mvn_param = 120; @@ -374,21 +436,31 @@ message LayerParameter { optional PowerParameter power_param = 122; optional PReLUParameter prelu_param = 131; optional PythonParameter python_param = 130; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 optional ReductionParameter reduction_param = 136; optional ReLUParameter relu_param = 123; optional ReshapeParameter reshape_param = 133; optional SigmoidParameter sigmoid_param = 124; optional SoftmaxParameter softmax_param = 125; optional SPPParameter spp_param = 132; +======= + optional ReLUParameter relu_param = 123; + optional SigmoidParameter sigmoid_param = 124; + optional SoftmaxParameter softmax_param = 125; +>>>>>>> triplet data generation and network update optional SliceParameter slice_param = 126; optional TanHParameter tanh_param = 127; optional ThresholdParameter threshold_param = 128; optional TileParameter tile_param = 138; optional WindowDataParameter window_data_param = 129; -<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + optional TripletLossParameter triplet_loss_param = 139; +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 optional TripletLossParameter triplet_loss_param = 139; ======= - optional TripletLossParameter triplet_loss_param = 137; + optional TripletLossParameter triplet_loss_param = 132; +>>>>>>> triplet data generation and network update >>>>>>> triplet data generation and network update } @@ -409,10 +481,13 @@ message TransformationParameter { // or can be repeated the same number of times as channels // (would subtract them from the corresponding channel) repeated float mean_value = 5; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 // Force the decoded image to have 3 color channels. optional bool force_color = 6 [default = false]; // Force the decoded image to have 1 color channels. optional bool force_gray = 7 [default = false]; +======= +>>>>>>> triplet data generation and network update } // Message that stores parameters shared by loss layers @@ -424,9 +499,13 @@ message LossParameter { optional bool normalize = 2 [default = true]; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 // Messages that store parameters used by individual layer types follow, in // alphabetical order. +======= +// Message that stores parameters used by AccuracyLayer +>>>>>>> triplet data generation and network update message AccuracyParameter { // When computing accuracy, count as correct by comparing the true label to // the top k scoring classes. By default, only compare to the top scoring @@ -444,12 +523,20 @@ message AccuracyParameter { optional int32 ignore_label = 3; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by ArgMaxLayer +>>>>>>> triplet data generation and network update message ArgMaxParameter { // If true produce pairs (argmax, maxval) optional bool out_max_val = 1 [default = false]; optional uint32 top_k = 2 [default = 1]; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by ConcatLayer +>>>>>>> triplet data generation and network update message ConcatParameter { // The axis along which to concatenate -- may be negative to index from the // end (e.g., -1 for the last axis). Other axes must have the @@ -461,9 +548,17 @@ message ConcatParameter { optional uint32 concat_dim = 1 [default = 1]; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 message ContrastiveLossParameter { // margin for dissimilar pair optional float margin = 1 [default = 1.0]; +======= +// Message that stores parameters used by ContrastiveLossLayer +message ContrastiveLossParameter { + //margin for dissimilar pair + optional float margin = 1 [default = 1.0]; +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> triplet data generation and network update // The first implementation of this cost did not exactly match the cost of // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2. // legacy_version = false (the default) uses (margin - d)^2 as proposed in the @@ -478,13 +573,19 @@ message TripletLossParameter { optional float margin = 1 [default = 1.0]; optional uint32 losstype = 2 [default = 1]; optional uint32 num_triplets = 3 [default = 3]; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 } +======= +======= +>>>>>>> triplet data generation and network update +} message TripletLossParameter { //margin for negative triplet optional float margin = 1 [default = 1.0]; } - +// Message that stores parameters used by ConvolutionLayer +>>>>>>> triplet data generation and network update message ConvolutionParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms @@ -510,6 +611,10 @@ message ConvolutionParameter { optional Engine engine = 15 [default = DEFAULT]; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by DataLayer +>>>>>>> triplet data generation and network update message DataParameter { enum DB { LEVELDB = 0; @@ -523,7 +628,10 @@ message DataParameter { // to avoid all asynchronous sgd clients to start at the same point. The skip // point would be set as rand_skip * rand(0,1). Note that rand_skip should not // be larger than the number of keys in the database. +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= // DEPRECATED. Each solver accesses a different subset of the database. +>>>>>>> triplet data generation and network update optional uint32 rand_skip = 7 [default = 0]; optional DB backend = 8 [default = LEVELDB]; // DEPRECATED. See TransformationParameter. For data pre-processing, we can do @@ -539,15 +647,25 @@ message DataParameter { optional bool mirror = 6 [default = false]; // Force the encoded image to have 3 color channels optional bool force_encoded_color = 9 [default = false]; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +} + +======= // Prefetch queue (Number of batches to prefetch to host memory, increase if // data access bandwidth varies). optional uint32 prefetch = 10 [default = 4]; } +// Message that stores parameters used by DropoutLayer +>>>>>>> triplet data generation and network update message DropoutParameter { optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by DummyDataLayer. +>>>>>>> triplet data generation and network update // DummyDataLayer fills any number of arbitrarily shaped blobs with random // (or constant) data generated by "Fillers" (see "message FillerParameter"). message DummyDataParameter { @@ -567,6 +685,10 @@ message DummyDataParameter { repeated uint32 width = 5; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by EltwiseLayer +>>>>>>> triplet data generation and network update message EltwiseParameter { enum EltwiseOp { PROD = 0; @@ -581,6 +703,9 @@ message EltwiseParameter { optional bool stable_prod_grad = 3 [default = true]; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 // Message that stores parameters used by EmbedLayer message EmbedParameter { optional uint32 num_output = 1; // The number of outputs for the layer @@ -595,7 +720,10 @@ message EmbedParameter { } +======= +>>>>>>> triplet data generation and network update // Message that stores parameters used by ExpLayer +>>>>>>> triplet data generation and network update message ExpParameter { // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. // Or if base is set to the default (-1), base is set to e, @@ -605,6 +733,7 @@ message ExpParameter { optional float shift = 3 [default = 0.0]; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 /// Message that stores parameters used by FlattenLayer message FlattenParameter { // The first axis to flatten: all preceding axes are retained in the output. @@ -617,6 +746,8 @@ message FlattenParameter { optional int32 end_axis = 2 [default = -1]; } +======= +>>>>>>> triplet data generation and network update // Message that stores parameters used by HDF5DataLayer message HDF5DataParameter { // Specify the data source. @@ -632,6 +763,10 @@ message HDF5DataParameter { optional bool shuffle = 3 [default = false]; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by HDF5OutputLayer +>>>>>>> triplet data generation and network update message HDF5OutputParameter { optional string file_name = 1; } @@ -645,6 +780,10 @@ message HingeLossParameter { optional Norm norm = 1 [default = L1]; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by ImageDataLayer +>>>>>>> triplet data generation and network update message ImageDataParameter { // Specify the data source. optional string source = 1; @@ -676,11 +815,19 @@ message ImageDataParameter { optional string root_folder = 12 [default = ""]; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters InfogainLossLayer +>>>>>>> triplet data generation and network update message InfogainLossParameter { // Specify the infogain matrix source. optional string source = 1; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by InnerProductLayer +>>>>>>> triplet data generation and network update message InnerProductParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms @@ -693,6 +840,7 @@ message InnerProductParameter { optional int32 axis = 5 [default = 1]; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 // Message that stores parameters used by LogLayer message LogParameter { // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. @@ -703,6 +851,8 @@ message LogParameter { optional float shift = 3 [default = 0.0]; } +======= +>>>>>>> triplet data generation and network update // Message that stores parameters used by LRNLayer message LRNParameter { optional uint32 local_size = 1 [default = 5]; @@ -716,6 +866,10 @@ message LRNParameter { optional float k = 5 [default = 1.]; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by MemoryDataLayer +>>>>>>> triplet data generation and network update message MemoryDataParameter { optional uint32 batch_size = 1; optional uint32 channels = 2; @@ -723,17 +877,27 @@ message MemoryDataParameter { optional uint32 width = 4; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by MVNLayer +>>>>>>> triplet data generation and network update message MVNParameter { // This parameter can be set to false to normalize mean only optional bool normalize_variance = 1 [default = true]; // This parameter can be set to true to perform DNN-like MVN optional bool across_channels = 2 [default = false]; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 // Epsilon for not dividing by zero while normalizing variance optional float eps = 3 [default = 1e-9]; } +======= +} + +// Message that stores parameters used by PoolingLayer +>>>>>>> triplet data generation and network update message PoolingParameter { enum PoolMethod { MAX = 0; @@ -763,6 +927,10 @@ message PoolingParameter { optional bool global_pooling = 12 [default = false]; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by PowerLayer +>>>>>>> triplet data generation and network update message PowerParameter { // PowerLayer computes outputs y = (shift + scale * x) ^ power. optional float power = 1 [default = 1.0]; @@ -770,18 +938,10 @@ message PowerParameter { optional float shift = 3 [default = 0.0]; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 message PythonParameter { optional string module = 1; optional string layer = 2; - // This value is set to the attribute `param_str` of the `PythonLayer` object - // in Python before calling the `setup()` method. This could be a number, - // string, dictionary in Python dict format, JSON, etc. You may parse this - // string in `setup` method and use it in `forward` and `backward`. - optional string param_str = 3 [default = '']; - // Whether this PythonLayer is shared among worker solvers during data parallelism. - // If true, each worker solver sequentially run forward from this layer. - // This value should be set true if you are using it as a data layer. - optional bool share_in_parallel = 4 [default = false]; } // Message that stores parameters used by ReductionLayer @@ -811,6 +971,21 @@ message ReductionParameter { optional int32 axis = 2 [default = 0]; optional float coeff = 3 [default = 1.0]; // coefficient for output +======= +// Message that stores parameters used by PythonLayer +message PythonParameter { + optional string module = 1; + optional string layer = 2; + // This value is set to the attribute `param_str` of the `PythonLayer` object + // in Python before calling the `setup()` method. This could be a number, + // string, dictionary in Python dict format, JSON, etc. You may parse this + // string in `setup` method and use it in `forward` and `backward`. + optional string param_str = 3 [default = '']; + // Whether this PythonLayer is shared among worker solvers during data parallelism. + // If true, each worker solver sequentially run forward from this layer. + // This value should be set true if you are using it as a data layer. + optional bool share_in_parallel = 4 [default = false]; +>>>>>>> triplet data generation and network update } // Message that stores parameters used by ReLULayer @@ -829,6 +1004,7 @@ message ReLUParameter { optional Engine engine = 2 [default = DEFAULT]; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 message ReshapeParameter { // Specify the output dimensions. If some of the dimensions are set to 0, // the corresponding dimension from the bottom layer is used (unchanged). @@ -893,6 +1069,9 @@ message ReshapeParameter { optional int32 num_axes = 3 [default = -1]; } +======= +// Message that stores parameters used by SigmoidLayer +>>>>>>> triplet data generation and network update message SigmoidParameter { enum Engine { DEFAULT = 0; @@ -902,6 +1081,10 @@ message SigmoidParameter { optional Engine engine = 1 [default = DEFAULT]; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by SliceLayer +>>>>>>> triplet data generation and network update message SliceParameter { // The axis along which to slice -- may be negative to index from the end // (e.g., -1 for the last axis). @@ -928,6 +1111,10 @@ message SoftmaxParameter { optional int32 axis = 2 [default = 1]; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by TanHLayer +>>>>>>> triplet data generation and network update message TanHParameter { enum Engine { DEFAULT = 0; @@ -937,6 +1124,10 @@ message TanHParameter { optional Engine engine = 1 [default = DEFAULT]; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> triplet data generation and network update // Message that stores parameters used by TileLayer message TileParameter { // The index of the axis to tile. @@ -946,11 +1137,20 @@ message TileParameter { optional int32 tiles = 2; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +======= +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update // Message that stores parameters used by ThresholdLayer message ThresholdParameter { optional float threshold = 1 [default = 0]; // Strictly positive values } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by WindowDataLayer +>>>>>>> triplet data generation and network update message WindowDataParameter { // Specify the data source. optional string source = 1; @@ -984,6 +1184,7 @@ message WindowDataParameter { optional string root_folder = 13 [default = ""]; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 message SPPParameter { enum PoolMethod { MAX = 0; @@ -1000,6 +1201,8 @@ message SPPParameter { optional Engine engine = 6 [default = DEFAULT]; } +======= +>>>>>>> triplet data generation and network update // DEPRECATED: use LayerParameter. message V1LayerParameter { repeated string bottom = 2; @@ -1190,6 +1393,10 @@ message V0LayerParameter { optional HDF5OutputParameter hdf5_output_param = 1001; } +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by PReLULayer +>>>>>>> triplet data generation and network update message PReLUParameter { // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: // Surpassing Human-Level Performance on ImageNet Classification, 2015. diff --git a/src/caffe/solver.cpp.orig.orig b/src/caffe/solver.cpp.orig.orig index dd5e506ea0d..8b6f3c51779 100644 --- a/src/caffe/solver.cpp.orig.orig +++ b/src/caffe/solver.cpp.orig.orig @@ -33,11 +33,7 @@ void Solver::Init(const SolverParameter& param) { << param.DebugString(); param_ = param; CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative."; -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 if (param_.random_seed() >= 0) { -======= - if (Caffe::root_solver() && param_.random_seed() >= 0) { ->>>>>>> triplet data generation and network update Caffe::set_random_seed(param_.random_seed()); } // Scaffolding code @@ -171,10 +167,8 @@ void Solver::Step(int iters) { vector losses; Dtype smoothed_loss = 0; -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 while (iter_ < stop_iter) { // zero-init the params -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 for (int i = 0; i < net_->params().size(); ++i) { shared_ptr > blob = net_->params()[i]; switch (Caffe::mode()) { @@ -193,29 +187,20 @@ void Solver::Step(int iters) { } } -======= - net_->ClearParamDiffs(); ->>>>>>> triplet data generation and network update if (param_.test_interval() && iter_ % param_.test_interval() == 0 && (iter_ > 0 || param_.test_initialization())) { TestAll(); } -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= - for (int i = 0; i < callbacks_.size(); ++i) { - callbacks_[i]->on_start(); -======= - for (; iter_ < stop_iter; ++iter_) { - if (param_.test_interval() && iter_ % param_.test_interval() == 0 - && (iter_ > 0 || param_.test_initialization())) { - TestAll(); ->>>>>>> triplet data generation and network update - } ->>>>>>> triplet data generation and network update const bool display = param_.display() && iter_ % param_.display() == 0; net_->set_debug_info(display && param_.debug_info()); - Dtype loss = net_->ForwardBackward(bottom_vec); + // accumulate the loss and gradient + Dtype loss = 0; + for (int i = 0; i < param_.iter_size(); ++i) { + loss += net_->ForwardBackward(bottom_vec); + } + loss /= param_.iter_size(); + // average the loss across iterations for smoothed reporting if (losses.size() < average_loss) { losses.push_back(loss); int size = losses.size(); @@ -247,40 +232,14 @@ void Solver::Step(int iters) { } } } -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - for (int i = 0; i < callbacks_.size(); ++i) { - callbacks_[i]->on_gradients_ready(); - } ->>>>>>> triplet data generation and network update ApplyUpdate(); // Increment the internal iter_ counter -- its value should always indicate // the number of times the weights have been updated. ++iter_; -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 // Save a snapshot if needed. if (param_.snapshot() && iter_ % param_.snapshot() == 0) { -======= -======= - ComputeUpdateValue(); - net_->Update(); ->>>>>>> triplet data generation and network update - - SolverAction::Enum request = GetRequestedAction(); - - // Save a snapshot if needed. -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - if ((param_.snapshot() - && iter_ % param_.snapshot() == 0 - && Caffe::root_solver()) || - (request == SolverAction::SNAPSHOT)) { -======= - if (param_.snapshot() && (iter_ + 1) % param_.snapshot() == 0) { ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update Snapshot(); } } @@ -386,64 +345,31 @@ void Solver::Test(const int test_net_id) { } } -<<<<<<< e29f9656158cb307d3fb4a78c63aa2247c5ad57a -template -void Solver::Snapshot() { - CHECK(Caffe::root_solver()); - string model_filename; - switch (param_.snapshot_format()) { - case caffe::SolverParameter_SnapshotFormat_BINARYPROTO: - model_filename = SnapshotToBinaryProto(); - break; - case caffe::SolverParameter_SnapshotFormat_HDF5: - model_filename = SnapshotToHDF5(); - break; - default: - LOG(FATAL) << "Unsupported snapshot format."; - } - - SnapshotSolverState(model_filename); -} -======= ->>>>>>> macro define in upgrade_proto template -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 void Solver::Snapshot() { NetParameter net_param; // For intermediate results, we will also dump the gradient values. net_->ToProto(&net_param, param_.snapshot_diff()); -======= -string Solver::SnapshotFilename(const string extension) { ->>>>>>> triplet data generation and network update string filename(param_.snapshot_prefix()); string model_filename, snapshot_filename; const int kBufferSize = 20; char iter_str_buffer[kBufferSize]; -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= - return filename + iter_str_buffer + extension; -======= - // Add one to iter_ to get the number of iterations that have completed. - snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_ + 1); ->>>>>>> triplet data generation and network update filename += iter_str_buffer; model_filename = filename + ".caffemodel"; LOG(INFO) << "Snapshotting to " << model_filename; WriteProtoToBinaryFile(net_param, model_filename.c_str()); SolverState state; SnapshotSolverState(&state); - state.set_iter(iter_ + 1); + state.set_iter(iter_); state.set_learned_net(model_filename); state.set_current_step(current_step_); snapshot_filename = filename + ".solverstate"; LOG(INFO) << "Snapshotting solver state to " << snapshot_filename; WriteProtoToBinaryFile(state, snapshot_filename.c_str()); -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ======= ->>>>>>> triplet data generation and network update } template @@ -575,32 +501,13 @@ void SGDSolver::ClipGradients() { } template -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 void SGDSolver::ApplyUpdate() { -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= - CHECK(Caffe::root_solver()); -======= -void SGDSolver::ComputeUpdateValue() { - const vector > >& net_params = this->net_->params(); - const vector& net_params_lr = this->net_->params_lr(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); - // get the learning rate ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update Dtype rate = GetLearningRate(); if (this->param_.display() && this->iter_ % this->param_.display() == 0) { LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; } ClipGradients(); -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 for (int param_id = 0; param_id < this->net_->params().size(); ++param_id) { -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - for (int param_id = 0; param_id < this->net_->learnable_params().size(); - ++param_id) { ->>>>>>> triplet data generation and network update Normalize(param_id); Regularize(param_id); ComputeUpdateValue(param_id, rate); @@ -612,11 +519,7 @@ template void SGDSolver::Normalize(int param_id) { if (this->param_.iter_size() == 1) { return; } // Scale gradient to counterbalance accumulation. -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 const vector > >& net_params = this->net_->params(); -======= - const vector*>& net_params = this->net_->learnable_params(); ->>>>>>> triplet data generation and network update const Dtype accum_normalization = Dtype(1.) / this->param_.iter_size(); switch (Caffe::mode()) { case Caffe::CPU: { @@ -640,81 +543,55 @@ void SGDSolver::Normalize(int param_id) { template void SGDSolver::Regularize(int param_id) { -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 const vector > >& net_params = this->net_->params(); -======= - const vector*>& net_params = this->net_->learnable_params(); ->>>>>>> triplet data generation and network update const vector& net_params_weight_decay = this->net_->params_weight_decay(); -======= - Dtype momentum = this->param_.momentum(); ->>>>>>> triplet data generation and network update Dtype weight_decay = this->param_.weight_decay(); string regularization_type = this->param_.regularization_type(); + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; switch (Caffe::mode()) { - case Caffe::CPU: - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // Compute the value to history, and then copy them to the blob's diff. - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } + case Caffe::CPU: { + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; } - - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - history_[param_id]->mutable_cpu_data()); - // copy - caffe_copy(net_params[param_id]->count(), - history_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); } break; - case Caffe::GPU: + } + case Caffe::GPU: { #ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // Compute the value to history, and then copy them to the blob's diff. - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; } -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 } #else NO_GPU; @@ -729,13 +606,6 @@ void SGDSolver::Regularize(int param_id) { template void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { const vector > >& net_params = this->net_->params(); -======= - -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -template -void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector*>& net_params = this->net_->learnable_params(); ->>>>>>> triplet data generation and network update const vector& net_params_lr = this->net_->params_lr(); Dtype momentum = this->param_.momentum(); Dtype local_rate = rate * net_params_lr[param_id]; @@ -758,27 +628,11 @@ void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { caffe_copy(net_params[param_id]->count(), history_[param_id]->gpu_data(), net_params[param_id]->mutable_gpu_diff()); -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= -======= - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - history_[param_id]->mutable_gpu_data()); - // copy - caffe_copy(net_params[param_id]->count(), - history_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update #else NO_GPU; #endif break; -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 } -======= ->>>>>>> triplet data generation and network update default: LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); } @@ -805,327 +659,79 @@ void SGDSolver::RestoreSolverState(const SolverState& state) { } template -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { const vector > >& net_params = this->net_->params(); -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -void SGDSolver::RestoreSolverStateFromHDF5(const string& state_file) { - hid_t file_hid = H5Fopen(state_file.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK_GE(file_hid, 0) << "Couldn't open solver state file " << state_file; - this->iter_ = hdf5_load_int(file_hid, "iter"); - if (H5LTfind_dataset(file_hid, "learned_net")) { - string learned_net = hdf5_load_string(file_hid, "learned_net"); - this->net_->CopyTrainedLayersFrom(learned_net); - } - this->current_step_ = hdf5_load_int(file_hid, "current_step"); - hid_t history_hid = H5Gopen2(file_hid, "history", H5P_DEFAULT); - CHECK_GE(history_hid, 0) << "Error reading history from " << state_file; - int state_history_size = hdf5_get_num_links(history_hid); - CHECK_EQ(state_history_size, history_.size()) - << "Incorrect length of history blobs."; - for (int i = 0; i < history_.size(); ++i) { - ostringstream oss; - oss << i; - hdf5_load_nd_dataset(history_hid, oss.str().c_str(), 0, - kMaxBlobAxes, history_[i].get()); - } - H5Gclose(history_hid); - H5Fclose(file_hid); -} - -template -void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { - CHECK(Caffe::root_solver()); - const vector*>& net_params = this->net_->learnable_params(); -======= -void NesterovSolver::ComputeUpdateValue() { - const vector > >& net_params = this->net_->params(); ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update const vector& net_params_lr = this->net_->params_lr(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); - // get the learning rate - Dtype rate = this->GetLearningRate(); - if (this->param_.display() && this->iter_ % this->param_.display() == 0) { - LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; - } - SGDSolver::ClipGradients(); Dtype momentum = this->param_.momentum(); - Dtype weight_decay = this->param_.weight_decay(); - string regularization_type = this->param_.regularization_type(); + Dtype local_rate = rate * net_params_lr[param_id]; switch (Caffe::mode()) { - case Caffe::CPU: - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - this->temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } + case Caffe::CPU: { + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); - // update history - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - this->history_[param_id]->mutable_cpu_data()); + // update history + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + this->history_[param_id]->mutable_cpu_data()); - // compute udpate: step back then over step - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->cpu_data(), -momentum, - this->update_[param_id]->mutable_cpu_data()); + // compute update: step back then over step + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->cpu_data(), -momentum, + this->update_[param_id]->mutable_cpu_data()); - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); break; - case Caffe::GPU: + } + case Caffe::GPU: { #ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - this->temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); - // update history - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - this->history_[param_id]->mutable_gpu_data()); + // update history + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + this->history_[param_id]->mutable_gpu_data()); - // compute udpate: step back then over step - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->gpu_data(), -momentum, - this->update_[param_id]->mutable_gpu_data()); + // compute update: step back then over step + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->gpu_data(), -momentum, + this->update_[param_id]->mutable_gpu_data()); - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); #else NO_GPU; #endif break; + } default: LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); } } template -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 const vector > >& net_params = this->net_->params(); const vector& net_params_lr = this->net_->params_lr(); Dtype delta = this->param_.delta(); Dtype local_rate = rate * net_params_lr[param_id]; switch (Caffe::mode()) { case Caffe::CPU: { -======= - CHECK(Caffe::root_solver()); - const vector*>& net_params = this->net_->learnable_params(); -======= -void AdaGradSolver::ComputeUpdateValue() { - const vector > >& net_params = this->net_->params(); ->>>>>>> triplet data generation and network update - const vector& net_params_lr = this->net_->params_lr(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); - // get the learning rate - Dtype rate = this->GetLearningRate(); - Dtype delta = this->param_.delta(); - if (this->param_.display() && this->iter_ % this->param_.display() == 0) { - LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; - } - SGDSolver::ClipGradients(); - Dtype weight_decay = this->param_.weight_decay(); - string regularization_type = this->param_.regularization_type(); - switch (Caffe::mode()) { - case Caffe::CPU: - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - this->temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - - // compute square of gradient in update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history - caffe_add(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - this->history_[param_id]->cpu_data(), - this->history_[param_id]->mutable_cpu_data()); - - // prepare update - caffe_powx(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_cpu_data()); - - caffe_div(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), - this->update_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // scale and copy - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->cpu_data(), Dtype(0), - net_params[param_id]->mutable_cpu_diff()); - } - break; - case Caffe::GPU: -#ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - this->temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - - // compute square of gradient in update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history - caffe_gpu_add(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - this->history_[param_id]->gpu_data(), - this->history_[param_id]->mutable_gpu_data()); - - // prepare update - caffe_gpu_powx(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_div(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), - this->update_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // scale and copy - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->gpu_data(), Dtype(0), - net_params[param_id]->mutable_gpu_diff()); - } -#else - NO_GPU; -#endif - break; - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void RMSPropSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_lr = this->net_->params_lr(); - - // get the learning rate - Dtype delta = this->param_.delta(); - Dtype rms_decay = this->param_.rms_decay(); - Dtype local_rate = rate * net_params_lr[param_id]; - - switch (Caffe::mode()) { - case Caffe::CPU: ->>>>>>> triplet data generation and network update // compute square of gradient in update caffe_powx(net_params[param_id]->count(), net_params[param_id]->cpu_diff(), Dtype(2), this->update_[param_id]->mutable_cpu_data()); // update history -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 caffe_add(net_params[param_id]->count(), this->update_[param_id]->cpu_data(), this->history_[param_id]->cpu_data(), @@ -1143,30 +749,14 @@ void RMSPropSolver::ComputeUpdateValue(int param_id, Dtype rate) { net_params[param_id]->cpu_diff(), this->update_[param_id]->cpu_data(), this->update_[param_id]->mutable_cpu_data()); -======= - caffe_cpu_axpby(net_params[param_id] -> count(), - Dtype(1-rms_decay), this->update_[param_id]->cpu_data(), - rms_decay, this->history_[param_id]-> mutable_cpu_data()); - - // prepare update - caffe_powx(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_cpu_data()); - - caffe_div(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), this->update_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); ->>>>>>> triplet data generation and network update // scale and copy caffe_cpu_axpby(net_params[param_id]->count(), local_rate, this->update_[param_id]->cpu_data(), Dtype(0), net_params[param_id]->mutable_cpu_diff()); break; - case Caffe::GPU: + } + case Caffe::GPU: { #ifndef CPU_ONLY // compute square of gradient in update caffe_gpu_powx(net_params[param_id]->count(), @@ -1174,7 +764,6 @@ void RMSPropSolver::ComputeUpdateValue(int param_id, Dtype rate) { this->update_[param_id]->mutable_gpu_data()); // update history -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 caffe_gpu_add(net_params[param_id]->count(), this->update_[param_id]->gpu_data(), this->history_[param_id]->gpu_data(), @@ -1197,275 +786,6 @@ void RMSPropSolver::ComputeUpdateValue(int param_id, Dtype rate) { caffe_gpu_axpby(net_params[param_id]->count(), local_rate, this->update_[param_id]->gpu_data(), Dtype(0), net_params[param_id]->mutable_gpu_diff()); -======= - caffe_gpu_axpby(net_params[param_id] -> count(), - Dtype(1-rms_decay), this->update_[param_id]->gpu_data(), - rms_decay, this->history_[param_id]-> mutable_gpu_data()); - - // prepare update - caffe_gpu_powx(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_div(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), this->update_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->gpu_data(), Dtype(0), - net_params[param_id]->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void AdaDeltaSolver::AdaDeltaPreSolve() { - // Add the extra history entries for AdaDelta after those from - // SGDSolver::PreSolve - const vector*>& net_params = this->net_->learnable_params(); - for (int i = 0; i < net_params.size(); ++i) { - const vector& shape = net_params[i]->shape(); - this->history_.push_back( - shared_ptr >(new Blob(shape))); - } -} - -template -void AdaDeltaSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype delta = this->param_.delta(); - Dtype momentum = this->param_.momentum(); - Dtype local_rate = rate * net_params_lr[param_id]; - size_t update_history_offset = net_params.size(); - switch (Caffe::mode()) { - case Caffe::CPU: { - // compute square of gradient in update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history of gradients - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, - this->update_[param_id]->cpu_data(), momentum, - this->history_[param_id]->mutable_cpu_data()); - - // add delta to history to guard against dividing by zero later - caffe_set(net_params[param_id]->count(), delta, - this->temp_[param_id]->mutable_cpu_data()); - - caffe_add(net_params[param_id]->count(), - this->temp_[param_id]->cpu_data(), - this->history_[update_history_offset + param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add(net_params[param_id]->count(), - this->temp_[param_id]->cpu_data(), - this->history_[param_id]->cpu_data(), - this->temp_[param_id]->mutable_cpu_data()); - - // divide history of updates by history of gradients - caffe_div(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - this->temp_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // jointly compute the RMS of both for update and gradient history - caffe_powx(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - // compute the update - caffe_mul(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), - this->update_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - - // compute square of update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history of updates - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, - this->update_[param_id]->cpu_data(), momentum, - this->history_[update_history_offset + param_id]->mutable_cpu_data()); - - // apply learning rate - caffe_cpu_scale(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - // compute square of gradient in update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history of gradients - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, - this->update_[param_id]->gpu_data(), momentum, - this->history_[param_id]->mutable_gpu_data()); - - // add delta to history to guard against dividing by zero later - caffe_gpu_set(net_params[param_id]->count(), delta, - this->temp_[param_id]->mutable_gpu_data()); - - caffe_gpu_add(net_params[param_id]->count(), - this->temp_[param_id]->gpu_data(), - this->history_[update_history_offset + param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add(net_params[param_id]->count(), - this->temp_[param_id]->gpu_data(), - this->history_[param_id]->gpu_data(), - this->temp_[param_id]->mutable_gpu_data()); - - // divide history of updates by history of gradients - caffe_gpu_div(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - this->temp_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // jointly compute the RMS of both for update and gradient history - caffe_gpu_powx(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - // compute the update and copy to net_diff - caffe_gpu_mul(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), - this->update_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - - // compute square of update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history of updates - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, - this->update_[param_id]->gpu_data(), momentum, - this->history_[update_history_offset + param_id]->mutable_gpu_data()); - - // apply learning rate - caffe_gpu_scale(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), - net_params[param_id]->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void AdamSolver::AdamPreSolve() { - // Add the extra history entries for Adam after those from - // SGDSolver::PreSolve - const vector*>& net_params = this->net_->learnable_params(); - for (int i = 0; i < net_params.size(); ++i) { - const vector& shape = net_params[i]->shape(); - this->history_.push_back( - shared_ptr >(new Blob(shape))); - } -} - -template -void AdamSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype local_rate = rate * net_params_lr[param_id]; - const Dtype beta1 = this->param_.momentum(); - const Dtype beta2 = this->param_.momentum2(); - - // we create aliases for convenience - size_t update_history_offset = net_params.size(); - Blob* val_m = this->history_[param_id].get(); - Blob* val_v = this->history_[param_id + update_history_offset].get(); - Blob* val_t = this->temp_[param_id].get(); - - const int t = this->iter_ + 1; - const Dtype correction = std::sqrt(Dtype(1) - pow(beta2, t)) / - (Dtype(1.) - pow(beta1, t)); - const int N = net_params[param_id]->count(); - const Dtype eps_hat = this->param_.delta(); - - switch (Caffe::mode()) { - case Caffe::CPU: { - // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t - caffe_cpu_axpby(N, Dtype(1)-beta1, - net_params[param_id]->cpu_diff(), beta1, - val_m->mutable_cpu_data()); - - // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 - caffe_mul(N, - net_params[param_id]->cpu_diff(), - net_params[param_id]->cpu_diff(), - val_t->mutable_cpu_data()); - caffe_cpu_axpby(N, Dtype(1)-beta2, - val_t->cpu_data(), beta2, - val_v->mutable_cpu_data()); - - // set update - caffe_powx(N, - val_v->cpu_data(), Dtype(0.5), - val_t->mutable_cpu_data()); - caffe_add_scalar(N, eps_hat, val_t->mutable_cpu_data()); - caffe_div(N, - val_m->cpu_data(), - val_t->cpu_data(), - val_t->mutable_cpu_data()); - - caffe_cpu_scale(N, local_rate*correction, - val_t->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t - caffe_gpu_axpby(N, Dtype(1)-beta1, - net_params[param_id]->gpu_diff(), beta1, - val_m->mutable_gpu_data()); - - // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 - caffe_gpu_mul(N, - net_params[param_id]->gpu_diff(), - net_params[param_id]->gpu_diff(), - val_t->mutable_gpu_data()); - caffe_gpu_axpby(N, Dtype(1)-beta2, - val_t->gpu_data(), beta2, - val_v->mutable_gpu_data()); - - // set update - caffe_gpu_powx(N, - val_v->gpu_data(), Dtype(0.5), - val_t->mutable_gpu_data()); - caffe_gpu_add_scalar(N, eps_hat, - val_t->mutable_gpu_data()); - caffe_gpu_div(N, - val_m->gpu_data(), - val_t->gpu_data(), - val_t->mutable_gpu_data()); - - caffe_gpu_scale(N, local_rate*correction, - val_t->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); ->>>>>>> triplet data generation and network update #else NO_GPU; #endif diff --git a/src/caffe/test/test_data/generate_sample_data.py.orig.orig b/src/caffe/test/test_data/generate_sample_data.py.orig.orig new file mode 100644 index 00000000000..8e2a6e94175 --- /dev/null +++ b/src/caffe/test/test_data/generate_sample_data.py.orig.orig @@ -0,0 +1,105 @@ +""" +Generate data used in the HDF5DataLayer test. +""" +import os +import numpy as np +import h5py + +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +script_dir = os.path.dirname(os.path.abspath(__file__)) + +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +# Generate HDF5DataLayer sample_data.h5 + +======= +>>>>>>> triplet data generation and network update +======= +script_dir = os.path.dirname(os.path.abspath(__file__)) + +>>>>>>> restore +>>>>>>> triplet data generation and network update +num_cols = 8 +num_rows = 10 +height = 6 +width = 5 +total_size = num_cols * num_rows * height * width + +data = np.arange(total_size) +data = data.reshape(num_rows, num_cols, height, width) +data = data.astype('float32') + +# We had a bug where data was copied into label, but the tests weren't +# catching it, so let's make label 1-indexed. +label = 1 + np.arange(num_rows)[:, np.newaxis] +label = label.astype('float32') + +# We add an extra label2 dataset to test HDF5 layer's ability +# to handle arbitrary number of output ("top") Blobs. +label2 = label + 1 + +print data +print label + +with h5py.File(script_dir + '/sample_data.h5', 'w') as f: + f['data'] = data + f['label'] = label + f['label2'] = label2 + +with h5py.File(script_dir + '/sample_data_2_gzip.h5', 'w') as f: + f.create_dataset( + 'data', data=data + total_size, + compression='gzip', compression_opts=1 + ) + f.create_dataset( + 'label', data=label, + compression='gzip', compression_opts=1 + ) + f.create_dataset( + 'label2', data=label2, + compression='gzip', compression_opts=1 + ) + +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +with open(script_dir + '/sample_data_list.txt', 'w') as f: + f.write(script_dir + '/sample_data.h5\n') + f.write(script_dir + '/sample_data_2_gzip.h5\n') +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= + +# Generate GradientBasedSolver solver_data.h5 + +num_cols = 3 +num_rows = 8 +height = 10 +width = 10 + +data = np.random.randn(num_rows, num_cols, height, width) +data = data.reshape(num_rows, num_cols, height, width) +data = data.astype('float32') + +targets = np.random.randn(num_rows, 1) +targets = targets.astype('float32') + +print data +print targets + +with h5py.File(script_dir + '/solver_data.h5', 'w') as f: + f['data'] = data + f['targets'] = targets + +with open(script_dir + '/solver_data_list.txt', 'w') as f: + f.write(script_dir + '/solver_data.h5\n') +======= +with open(os.path.dirname(__file__) + '/sample_data_list.txt', 'w') as f: + f.write(os.path.dirname(__file__) + '/sample_data.h5\n') + f.write(os.path.dirname(__file__) + '/sample_data_2_gzip.h5\n') +>>>>>>> triplet data generation and network update +======= +with open(script_dir + '/sample_data_list.txt', 'w') as f: + f.write(script_dir + '/sample_data.h5\n') + f.write(script_dir + '/sample_data_2_gzip.h5\n') +>>>>>>> restore +>>>>>>> triplet data generation and network update diff --git a/src/caffe/test/test_gradient_based_solver.cpp.orig.orig b/src/caffe/test/test_gradient_based_solver.cpp.orig.orig new file mode 100644 index 00000000000..b39955cd038 --- /dev/null +++ b/src/caffe/test/test_gradient_based_solver.cpp.orig.orig @@ -0,0 +1,967 @@ +#include +#include +#include +#include + +#include "google/protobuf/text_format.h" + +#include "gtest/gtest.h" + +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/solver.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +using std::ostringstream; + +namespace caffe { + +template +class GradientBasedSolverTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + GradientBasedSolverTest() : +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + seed_(1701), num_(4), channels_(3), height_(10), width_(10) {} +======= +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + seed_(1701), num_(4), channels_(3), height_(10), width_(10), + share_(false) { + input_file_ = new string( + CMAKE_SOURCE_DIR "caffe/test/test_data/solver_data_list.txt" CMAKE_EXT); + } + ~GradientBasedSolverTest() { + delete input_file_; + } +======= + seed_(1701), num_(5), channels_(3), height_(10), width_(10) {} +>>>>>>> triplet data generation and network update +======= + seed_(1701), num_(4), channels_(3), height_(10), width_(10) {} +>>>>>>> restore +>>>>>>> triplet data generation and network update + + shared_ptr > solver_; + int seed_; + int num_, channels_, height_, width_; + Dtype delta_; // Stability constant for AdaGrad. + + virtual SolverParameter_SolverType solver_type() = 0; + virtual void InitSolver(const SolverParameter& param) = 0; + + virtual void InitSolverFromProtoString(const string& proto) { + SolverParameter param; + CHECK(google::protobuf::TextFormat::ParseFromString(proto, ¶m)); + // Disable saving a final snapshot so the tests don't pollute the user's + // working directory with useless snapshots. + param.set_snapshot_after_train(false); + // Set the solver_mode according to current Caffe::mode. + switch (Caffe::mode()) { + case Caffe::CPU: + param.set_solver_mode(SolverParameter_SolverMode_CPU); + break; + case Caffe::GPU: + param.set_solver_mode(SolverParameter_SolverMode_GPU); + break; + default: + LOG(FATAL) << "Unknown Caffe mode: " << Caffe::mode(); + } + InitSolver(param); + delta_ = (solver_type() == SolverParameter_SolverType_ADAGRAD) ? + param.delta() : 0; + } + +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + void RunLeastSquaresSolver(const Dtype learning_rate, + const Dtype weight_decay, const Dtype momentum, const int num_iters, + const int iter_size = 1) { +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + string RunLeastSquaresSolver(const Dtype learning_rate, + const Dtype weight_decay, const Dtype momentum, const int num_iters, + const int iter_size = 1, const int devices = 1, + const bool snapshot = false, const char* from_snapshot = NULL) { +======= + void RunLeastSquaresSolver(const Dtype learning_rate, +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a + const Dtype weight_decay, const Dtype momentum, const int num_iters) { +>>>>>>> triplet data generation and network update +======= + const Dtype weight_decay, const Dtype momentum, const int num_iters, + const int iter_size = 1) { +>>>>>>> restore +>>>>>>> triplet data generation and network update + ostringstream proto; + proto << + "max_iter: " << num_iters << " " + "base_lr: " << learning_rate << " " + "lr_policy: 'fixed' " +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + "iter_size: " << iter_size << " " +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= + "device_id: " << device_id << " " +======= +>>>>>>> triplet data generation and network update +======= + "iter_size: " << iter_size << " " +>>>>>>> restore +>>>>>>> triplet data generation and network update + "net_param { " + " name: 'TestNetwork' " + " layer { " + " name: 'data' " +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + " type: 'HDF5Data' " + " hdf5_data_param { " + " source: '" << *(this->input_file_) << "' " + " batch_size: " << num_ / iter_size << " " +======= +>>>>>>> triplet data generation and network update + " type: 'DummyData' " + " dummy_data_param { " + " num: " << num_ / iter_size << " " + " channels: " << channels_ << " " + " height: " << height_ << " " + " width: " << width_ << " " + " channels: 1 " + " height: 1 " + " width: 1 " + " data_filler { " + " type: 'constant' " + " value: 1.0 " + " } " + " data_filler { " + " type: 'gaussian' " + " std: 1.0 " + " } " +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update + " } " + " top: 'data' " + " top: 'targets' " + " } " + " layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 1.0 " + " } " + " bias_filler { " + " type: 'gaussian' " + " std: 1.0 " + " } " + " } " + " bottom: 'data' " + " top: 'innerprod' " + " } " + " layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod' " + " bottom: 'targets' " + " } " + "} "; + if (weight_decay != 0) { + proto << "weight_decay: " << weight_decay << " "; + } + if (momentum != 0) { + proto << "momentum: " << momentum << " "; + } + Caffe::set_random_seed(this->seed_); + this->InitSolverFromProtoString(proto.str()); + this->solver_->Solve(); + } + + // Compute an update value given the current state of the train net, + // using the analytical formula for the least squares gradient. + // updated_params will store the updated weight and bias results, + // using the blobs' diffs to hold the update values themselves. + void ComputeLeastSquaresUpdate(const Dtype learning_rate, + const Dtype weight_decay, const Dtype momentum, + vector > >* updated_params) { + const int N = num_; + const int D = channels_ * height_ * width_; + + // Run a forward pass, and manually compute the update values from the + // result. + Net& net = *this->solver_->net(); + vector*> empty_bottom_vec; + net.Forward(empty_bottom_vec); + ASSERT_TRUE(net.has_blob("data")); + const Blob& data = *net.blob_by_name("data"); + ASSERT_TRUE(net.has_blob("targets")); + const Blob& targets = *net.blob_by_name("targets"); + ASSERT_TRUE(net.has_layer("innerprod")); + const vector > >& param_blobs = + net.layer_by_name("innerprod")->blobs(); + const int num_param_blobs = 2; + ASSERT_EQ(num_param_blobs, param_blobs.size()); + const Blob& weights = *param_blobs[0]; + const Blob& bias = *param_blobs[1]; + ASSERT_EQ(D * N, data.count()); + ASSERT_EQ(N, targets.count()); + ASSERT_EQ(D, weights.count()); + ASSERT_EQ(1, bias.count()); + + updated_params->clear(); + updated_params->resize(num_param_blobs); + for (int i = 0; i < num_param_blobs; ++i) { + (*updated_params)[i].reset(new Blob()); + } + Blob& updated_weights = *(*updated_params)[0]; + updated_weights.ReshapeLike(weights); + Blob& updated_bias = *(*updated_params)[1]; + updated_bias.ReshapeLike(bias); + + for (int i = 0; i <= D; ++i) { + // Compute the derivative with respect to the ith weight (i.e., the ith + // element of the gradient). + Dtype grad = 0; + for (int j = 0; j <= D; ++j) { + // Compute element (i, j) of X^T * X. + Dtype element = 0; + for (int k = 0; k < N; ++k) { + // (i, k) in X^T (== (k, i) in X) times (k, j) in X. + const Dtype element_i = (i == D) ? 1 : data.cpu_data()[k * D + i]; + const Dtype element_j = (j == D) ? 1 : data.cpu_data()[k * D + j]; + element += element_i * element_j; + } + if (j == D) { + grad += element * bias.cpu_data()[0]; + } else { + grad += element * weights.cpu_data()[j]; + } + } + for (int k = 0; k < N; ++k) { + const Dtype element_i = (i == D) ? 1 : data.cpu_data()[k * D + i]; + grad -= element_i * targets.cpu_data()[k]; + } + // Scale the gradient over the N samples. + grad /= N; + // Add the weight decay to the gradient. + grad += weight_decay * + ((i == D) ? bias.cpu_data()[0] : weights.cpu_data()[i]); + // Finally, compute update. + const vector > >& history = solver_->history(); + ASSERT_EQ(2, history.size()); // 1 blob for weights, 1 for bias + Dtype update_value = learning_rate * grad; + const Dtype history_value = (i == D) ? + history[1]->cpu_data()[0] : history[0]->cpu_data()[i]; + const Dtype temp = momentum * history_value; + switch (solver_type()) { + case SolverParameter_SolverType_SGD: + update_value += temp; + break; + case SolverParameter_SolverType_NESTEROV: + update_value += temp; + // step back then over-step + update_value = (1 + momentum) * update_value - temp; + break; + case SolverParameter_SolverType_ADAGRAD: + update_value /= std::sqrt(history_value + grad * grad) + delta_; + break; + default: + LOG(FATAL) << "Unknown solver type: " << solver_type(); + } + if (i == D) { + updated_bias.mutable_cpu_diff()[0] = update_value; + updated_bias.mutable_cpu_data()[0] = bias.cpu_data()[0] - update_value; + } else { + updated_weights.mutable_cpu_diff()[i] = update_value; + updated_weights.mutable_cpu_data()[i] = + weights.cpu_data()[i] - update_value; + } + } + } + + void CheckLeastSquaresUpdate( + const vector > >& updated_params) { + const int D = channels_ * height_ * width_; + + const Blob& updated_weights = *updated_params[0]; + const Blob& updated_bias = *updated_params[1]; + + Net& net = *this->solver_->net(); + ASSERT_TRUE(net.has_layer("innerprod")); + const vector > >& param_blobs = + net.layer_by_name("innerprod")->blobs(); + ASSERT_EQ(2, param_blobs.size()); + const Blob& solver_updated_weights = *param_blobs[0]; + ASSERT_EQ(D, solver_updated_weights.count()); + const double kPrecision = 1e-2; + const double kMinPrecision = 1e-7; + for (int i = 0; i < D; ++i) { + const Dtype expected_updated_weight = updated_weights.cpu_data()[i]; + const Dtype solver_updated_weight = solver_updated_weights.cpu_data()[i]; + const Dtype error_margin = std::max(kMinPrecision, kPrecision * + std::min(fabs(expected_updated_weight), fabs(solver_updated_weight))); + EXPECT_NEAR(expected_updated_weight, solver_updated_weight, error_margin); + } + const Blob& solver_updated_bias_blob = *param_blobs[1]; + ASSERT_EQ(1, solver_updated_bias_blob.count()); + const Dtype expected_updated_bias = updated_bias.cpu_data()[0]; + const Dtype solver_updated_bias = solver_updated_bias_blob.cpu_data()[0]; + const Dtype error_margin = std::max(kMinPrecision, kPrecision * + std::min(fabs(expected_updated_bias), fabs(solver_updated_bias))); + EXPECT_NEAR(expected_updated_bias, solver_updated_bias, error_margin); + + // Check the solver's history -- should contain the previous update value. + if (solver_type() == SolverParameter_SolverType_SGD) { + const vector > >& history = solver_->history(); + ASSERT_EQ(2, history.size()); + for (int i = 0; i < D; ++i) { + const Dtype expected_history = updated_weights.cpu_diff()[i]; + const Dtype solver_history = history[0]->cpu_data()[i]; + const Dtype error_margin_hist = std::max(kMinPrecision, kPrecision * + std::min(fabs(expected_history), fabs(solver_history))); + EXPECT_NEAR(expected_history, solver_history, error_margin_hist); + } + const Dtype expected_history = updated_bias.cpu_diff()[0]; + const Dtype solver_history = history[1]->cpu_data()[0]; + const Dtype error_margin_hist = std::max(kMinPrecision, kPrecision * + std::min(fabs(expected_history), fabs(solver_history))); + EXPECT_NEAR(expected_history, solver_history, error_margin_hist); + } + } + + void CheckAccumulation(const Dtype kLearningRate, const Dtype kWeightDecay, + const Dtype kMomentum, const int kNumIters, const int kIterSize) { + const double kPrecision = 1e-2; + const double kMinPrecision = 1e-7; + // Solve without accumulation and save parameters. + this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, + kNumIters); + // Save parameters for comparison. + Net& net = *this->solver_->net(); + const vector > >& param_blobs = + net.layer_by_name("innerprod")->blobs(); + vector > > noaccum_params(param_blobs.size()); + for (int i = 0; i < param_blobs.size(); ++i) { + noaccum_params[i].reset(new Blob()); + noaccum_params[i]->CopyFrom(*param_blobs[i], false, true); + } + // Solve by equivalent accumulation of gradients over divided batches. + this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, + kNumIters, kIterSize); + Net& net_accum = *this->solver_->net(); + const vector > >& accum_params = + net_accum.layer_by_name("innerprod")->blobs(); + // Compare accumulated parameters against no accumulation standard. + const int D = this->channels_ * this->height_ * this->width_; + for (int i = 0; i < D; ++i) { + const Dtype expected_param = noaccum_params[0]->cpu_data()[i]; + const Dtype accum_param = accum_params[0]->cpu_data()[i]; + const Dtype error_margin = std::max(kMinPrecision, kPrecision * + std::min(fabs(expected_param), fabs(accum_param))); + EXPECT_NEAR(expected_param, accum_param, error_margin); + } + ASSERT_EQ(1, accum_params[1]->count()); + const Dtype expected_bias = noaccum_params[1]->cpu_data()[0]; + const Dtype accum_bias = accum_params[1]->cpu_data()[0]; + const Dtype error_margin = std::max(kMinPrecision, kPrecision * + std::min(fabs(expected_bias), fabs(accum_bias))); + EXPECT_NEAR(expected_bias, accum_bias, error_margin); + } + + // Test that the correct update is computed for a regularized least squares + // problem: + // + // E = (1/(2n)) || X w - y ||^2 + (lambda / 2) || w ||^2 + // \nabla_w E = (1/n) (X^T X w - X^T y) + lambda * w + // + // X \in R^{n x (d+1)} (each example is a row, (d+1)th element is always 1) + // w \in R^{(d+1) x 1} ((d+1)th element is the bias) + // y \in R^{n x 1} + // lambda is weight_decay + // + // TestLeastSquaresUpdate works "inductively", assuming that the solver + // correctly updates the net K (= iter_to_check) times, then given the history + // from the Kth update, we compute the (K+1)th update and check that it + // matches the solver's (K+1)th update. + void TestLeastSquaresUpdate(const Dtype learning_rate = 1.0, + const Dtype weight_decay = 0.0, const Dtype momentum = 0.0, + const int iter_to_check = 0) { + // Initialize the solver and run K (= iter_to_check) solver iterations. + RunLeastSquaresSolver(learning_rate, weight_decay, momentum, iter_to_check); + + // Compute the (K+1)th update using the analytic least squares gradient. + vector > > updated_params; + ComputeLeastSquaresUpdate(learning_rate, weight_decay, momentum, + &updated_params); + + // Reinitialize the solver and run K+1 solver iterations. + RunLeastSquaresSolver(learning_rate, weight_decay, momentum, + iter_to_check + 1); + + // Check that the solver's solution matches ours. + CheckLeastSquaresUpdate(updated_params); + } +}; + + +template +class SGDSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + this->solver_.reset(new SGDSolver(param)); + } + + virtual SolverParameter_SolverType solver_type() { + return SolverParameter_SolverType_SGD; + } +}; + +TYPED_TEST_CASE(SGDSolverTest, TestDtypesAndDevices); + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdate) { + this->TestLeastSquaresUpdate(); +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateLROneTenth) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + this->TestLeastSquaresUpdate(kLearningRate); +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 1.0; + const Dtype kWeightDecay = 0.5; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 1.0; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.5; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 1.0; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.5; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.9; +======= + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.5; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.5; +>>>>>>> triplet data generation and network update + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + const Dtype kWeightDecay = 0.1; +======= + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} +======= +>>>>>>> triplet data generation and network update + +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; +>>>>>>> triplet data generation and network update + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +TYPED_TEST(SGDSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(SGDSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +======= +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} +>>>>>>> restore + +>>>>>>> triplet data generation and network update +template +class AdaGradSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + this->solver_.reset(new AdaGradSolver(param)); + } + virtual SolverParameter_SolverType solver_type() { + return SolverParameter_SolverType_ADAGRAD; + } +}; + +TYPED_TEST_CASE(AdaGradSolverTest, TestDtypesAndDevices); + +TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdate) { + this->TestLeastSquaresUpdate(); +} + +TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateLROneTenth) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + this->TestLeastSquaresUpdate(kLearningRate); +} + +TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 1.0; + const Dtype kWeightDecay = 0.5; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); +} + +TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.0; +======= + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +TYPED_TEST(AdaGradSolverTest, + TestAdaGradLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; +>>>>>>> triplet data generation and network update + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.0; +======= + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; +======= +TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.0; +>>>>>>> restore + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +======= +>>>>>>> triplet data generation and network update + +TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; +>>>>>>> triplet data generation and network update + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +TYPED_TEST(AdaGradSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdaGradSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +======= +>>>>>>> restore + +>>>>>>> triplet data generation and network update +template +class NesterovSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + this->solver_.reset(new NesterovSolver(param)); + } + virtual SolverParameter_SolverType solver_type() { + return SolverParameter_SolverType_NESTEROV; + } +}; + +TYPED_TEST_CASE(NesterovSolverTest, TestDtypesAndDevices); + +TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdate) { + this->TestLeastSquaresUpdate(); +} + +TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateLROneTenth) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.1; + this->TestLeastSquaresUpdate(kLearningRate); +} + +TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 1.0; + const Dtype kWeightDecay = 0.5; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); +} + +TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithMomentum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 1.0; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.5; + const int kNumIters = 1; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 1.0; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.5; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + const Dtype kWeightDecay = 0.1; +======= + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +TYPED_TEST(NesterovSolverTest, + TestNesterovLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; +>>>>>>> triplet data generation and network update + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.1; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; +======= +TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.1; +>>>>>>> restore + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(AdamSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdamSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +template +class RMSPropSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + const Dtype rms_decay = 0.95; + SolverParameter new_param = param; + new_param.set_rms_decay(rms_decay); + this->solver_.reset(new RMSPropSolver(new_param)); + } + virtual SolverParameter_SolverType solver_type() { + return SolverParameter_SolverType_RMSPROP; + } +}; + +TYPED_TEST_CASE(RMSPropSolverTest, TestDtypesAndDevices); + +TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 1.0; + const Dtype kWeightDecay = 0.5; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); +} + +TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithRmsDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(RMSPropSolverTest, + TestRMSPropLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; +>>>>>>> triplet data generation and network update + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +TYPED_TEST(RMSPropSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(RMSPropSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +======= +>>>>>>> triplet data generation and network update +======= +>>>>>>> restore +>>>>>>> triplet data generation and network update +} // namespace caffe diff --git a/tools/extract_features.cpp b/tools/extract_features.cpp deleted file mode 100644 index 084c9bf88df..00000000000 --- a/tools/extract_features.cpp +++ /dev/null @@ -1,189 +0,0 @@ -#include // for snprintf -#include -#include - -#include "boost/algorithm/string.hpp" -#include "google/protobuf/text_format.h" - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -#include "caffe/net.hpp" -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/db.hpp" -#include "caffe/util/io.hpp" -#include "caffe/vision_layers.hpp" - -using caffe::Blob; -using caffe::Caffe; -using caffe::Datum; -using caffe::Net; -using boost::shared_ptr; -using std::string; -namespace db = caffe::db; - -template -int feature_extraction_pipeline(int argc, char** argv); - -int main(int argc, char** argv) { - return feature_extraction_pipeline(argc, argv); -// return feature_extraction_pipeline(argc, argv); -} - -template -int feature_extraction_pipeline(int argc, char** argv) { - ::google::InitGoogleLogging(argv[0]); - const int num_required_args = 7; - if (argc < num_required_args) { - LOG(ERROR)<< - "This program takes in a trained network and an input data layer, and then" - " extract features of the input data produced by the net.\n" - "Usage: extract_features pretrained_net_param" - " feature_extraction_proto_file extract_feature_blob_name1[,name2,...]" - " save_feature_dataset_name1[,name2,...] num_mini_batches db_type" - " [CPU/GPU] [DEVICE_ID=0]\n" - "Note: you can extract multiple features in one pass by specifying" - " multiple feature blob names and dataset names separated by ','." - " The names cannot contain white space characters and the number of blobs" - " and datasets must be equal."; - return 1; - } - int arg_pos = num_required_args; - - arg_pos = num_required_args; - if (argc > arg_pos && strcmp(argv[arg_pos], "GPU") == 0) { - LOG(ERROR)<< "Using GPU"; - uint device_id = 0; - if (argc > arg_pos + 1) { - device_id = atoi(argv[arg_pos + 1]); - CHECK_GE(device_id, 0); - } - LOG(ERROR) << "Using Device_id=" << device_id; - Caffe::SetDevice(device_id); - Caffe::set_mode(Caffe::GPU); - } else { - LOG(ERROR) << "Using CPU"; - Caffe::set_mode(Caffe::CPU); - } - - arg_pos = 0; // the name of the executable - std::string pretrained_binary_proto(argv[++arg_pos]); - - // Expected prototxt contains at least one data layer such as - // the layer data_layer_name and one feature blob such as the - // fc7 top blob to extract features. - /* - layers { - name: "data_layer_name" - type: DATA - data_param { - source: "/path/to/your/images/to/extract/feature/images_leveldb" - mean_file: "/path/to/your/image_mean.binaryproto" - batch_size: 128 - crop_size: 227 - mirror: false - } - top: "data_blob_name" - top: "label_blob_name" - } - layers { - name: "drop7" - type: DROPOUT - dropout_param { - dropout_ratio: 0.5 - } - bottom: "fc7" - top: "fc7" - } - */ - std::string feature_extraction_proto(argv[++arg_pos]); - shared_ptr > feature_extraction_net( - new Net(feature_extraction_proto, caffe::TEST)); - feature_extraction_net->CopyTrainedLayersFrom(pretrained_binary_proto); - - std::string extract_feature_blob_names(argv[++arg_pos]); - std::vector blob_names; - boost::split(blob_names, extract_feature_blob_names, boost::is_any_of(",")); - - std::string save_feature_dataset_names(argv[++arg_pos]); - std::vector dataset_names; - boost::split(dataset_names, save_feature_dataset_names, - boost::is_any_of(",")); - CHECK_EQ(blob_names.size(), dataset_names.size()) << - " the number of blob names and dataset names must be equal"; - size_t num_features = blob_names.size(); - - for (size_t i = 0; i < num_features; i++) { - CHECK(feature_extraction_net->has_blob(blob_names[i])) - << "Unknown feature blob name " << blob_names[i] - << " in the network " << feature_extraction_proto; - } - - int num_mini_batches = atoi(argv[++arg_pos]); - - std::vector > feature_dbs; - std::vector > txns; - const char* db_type = argv[++arg_pos]; - for (size_t i = 0; i < num_features; ++i) { - LOG(INFO)<< "Opening dataset " << dataset_names[i]; - shared_ptr db(db::GetDB(db_type)); - db->Open(dataset_names.at(i), db::NEW); - feature_dbs.push_back(db); - shared_ptr txn(db->NewTransaction()); - txns.push_back(txn); - } - - LOG(ERROR)<< "Extacting Features"; - - Datum datum; - const int kMaxKeyStrLength = 100; - char key_str[kMaxKeyStrLength]; - std::vector*> input_vec; - std::vector image_indices(num_features, 0); - for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index) { - feature_extraction_net->Forward(input_vec); - for (int i = 0; i < num_features; ++i) { - const shared_ptr > feature_blob = feature_extraction_net - ->blob_by_name(blob_names[i]); - int batch_size = feature_blob->num(); - int dim_features = feature_blob->count() / batch_size; - const Dtype* feature_blob_data; - for (int n = 0; n < batch_size; ++n) { - datum.set_height(feature_blob->height()); - datum.set_width(feature_blob->width()); - datum.set_channels(feature_blob->channels()); - datum.clear_data(); - datum.clear_float_data(); - feature_blob_data = feature_blob->cpu_data() + - feature_blob->offset(n); - for (int d = 0; d < dim_features; ++d) { - datum.add_float_data(feature_blob_data[d]); - } - int length = snprintf(key_str, kMaxKeyStrLength, "%010d", - image_indices[i]); - string out; - CHECK(datum.SerializeToString(&out)); - txns.at(i)->Put(std::string(key_str, length), out); - ++image_indices[i]; - if (image_indices[i] % 1000 == 0) { - txns.at(i)->Commit(); - txns.at(i).reset(feature_dbs.at(i)->NewTransaction()); - LOG(ERROR)<< "Extracted features of " << image_indices[i] << - " query images for feature blob " << blob_names[i]; - } - } // for (int n = 0; n < batch_size; ++n) - } // for (int i = 0; i < num_features; ++i) - } // for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index) - // write the last batch - for (int i = 0; i < num_features; ++i) { - if (image_indices[i] % 1000 != 0) { - txns.at(i)->Commit(); - } - LOG(ERROR)<< "Extracted features of " << image_indices[i] << - " query images for feature blob " << blob_names[i]; - feature_dbs.at(i)->Close(); - } - - LOG(ERROR)<< "Successfully extracted the features!"; - return 0; -} - From 53e1a28990a1bd0ee0b9d210fc2c19c6595126d4 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Thu, 30 Jul 2015 15:16:19 +0800 Subject: [PATCH 73/82] add pose information for training data arrangement in triplet training --- .../triplet/convert_3d_triplet_data.cpp.orig | 145 +++++++----------- 1 file changed, 52 insertions(+), 93 deletions(-) diff --git a/examples/triplet/convert_3d_triplet_data.cpp.orig b/examples/triplet/convert_3d_triplet_data.cpp.orig index 38d4a296d0d..c9e8c64ea6e 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp.orig +++ b/examples/triplet/convert_3d_triplet_data.cpp.orig @@ -1,35 +1,23 @@ -<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 // Usage: // convert_3d_data input_image_file input_label_file output_db_file #include // NOLINT(readability/streams) #include +<<<<<<< fba1c0e4ee61d053e91d7a142130ae3a6855e182 #include "caffe/proto/caffe.pb.h" #include "caffe/util/math_functions.hpp" +======= +>>>>>>> add pose information for training data arrangement in triplet training #include "glog/logging.h" #include "google/protobuf/text_format.h" #include "leveldb/db.h" #include "math.h" #include "stdint.h" - +<<<<<<< fba1c0e4ee61d053e91d7a142130ae3a6855e182 ======= -// This script converts the MNIST dataset to the leveldb format used -// by caffe to train siamese network. -// Usage: -// convert_mnist_data input_image_file input_label_file output_db_file -// The MNIST dataset could be downloaded at -// http://yann.lecun.com/exdb/mnist/ -#include // NOLINT(readability/streams) -#include - -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "stdint.h" - #include "caffe/proto/caffe.pb.h" #include "caffe/util/math_functions.hpp" +>>>>>>> add pose information for training data arrangement in triplet training ->>>>>>> add 3d network training param uint32_t swap_endian(uint32_t val) { val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); return (val << 16) | (val >> 16); @@ -37,7 +25,7 @@ uint32_t swap_endian(uint32_t val) { void read_image(std::ifstream* image_file, std::ifstream* label_file, uint32_t index, uint32_t rows, uint32_t cols, -<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 +<<<<<<< fba1c0e4ee61d053e91d7a142130ae3a6855e182 char* pixels, char* label_temp, signed char* label, int rgb_use) { if (rgb_use == 0) { image_file->seekg(index * rows * cols + 16); @@ -54,6 +42,15 @@ void read_image(std::ifstream* image_file, std::ifstream* label_file, for (int i = 0; i < 4; i++) *(label+i) = (signed char)*(label_temp+i); } +======= + char* pixels, char* label_temp, signed char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); +>>>>>>> add pose information for training data arrangement in triplet training } void convert_dataset(const char* image_filename, const char* label_filename, @@ -61,17 +58,6 @@ void convert_dataset(const char* image_filename, const char* label_filename, const char* rgb_use) { int rgb_use1 = atoi(rgb_use); int class_num = atoi(class_number); -======= - char* pixels, char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index + 8); - label_file->read(label, 1); -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename) { ->>>>>>> add 3d network training param // Open files std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); @@ -89,11 +75,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, CHECK_EQ(magic, 2051) << "Incorrect image file magic."; label_file.read(reinterpret_cast(&magic), 4); magic = swap_endian(magic); -<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 CHECK_EQ(magic, 2050) << "Incorrect label file magic."; -======= - CHECK_EQ(magic, 2049) << "Incorrect label file magic."; ->>>>>>> add 3d network training param image_file.read(reinterpret_cast(&num_items), 4); num_items = swap_endian(num_items); label_file.read(reinterpret_cast(&num_labels), 4); @@ -114,13 +96,13 @@ void convert_dataset(const char* image_filename, const char* label_filename, CHECK(status.ok()) << "Failed to open leveldb " << db_filename << ". Is it already existing?"; -<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 char* label_temp = new char[4]; // label for unsigned char* signed char* label_i = new signed char[4]; // label for triplet signed char* label_j = new signed char[4]; signed char* label_k = new signed char[4]; signed char* label_l = new signed char[4]; // label for pair wise signed char* label_m = new signed char[4]; +<<<<<<< fba1c0e4ee61d053e91d7a142130ae3a6855e182 int db_size; if (rgb_use1 == 0) db_size = rows * cols; @@ -131,30 +113,19 @@ void convert_dataset(const char* image_filename, const char* label_filename, char* pixels3 = new char[db_size]; char* pixels4 = new char[db_size]; char* pixels5 = new char[db_size]; - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - caffe::Datum datum; - datum.set_channels(1); ======= - char label_i; // label for triplet - char label_j; - char label_k; - char label_l; // label for pair wise - char label_m; char* pixels = new char[5 * rows * cols]; +>>>>>>> add pose information for training data arrangement in triplet training const int kMaxKeyLength = 10; char key[kMaxKeyLength]; std::string value; - caffe::Datum datum; - datum.set_channels(5); // one channel for each image in the triplet and pair ->>>>>>> add 3d network training param + datum.set_channels(1); datum.set_height(rows); datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; -<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 +<<<<<<< fba1c0e4ee61d053e91d7a142130ae3a6855e182 int counter = 0; for (unsigned int times = 0; times < 5; ++times) { // iteration in the samples of all class @@ -247,48 +218,31 @@ void convert_dataset(const char* image_filename, const char* label_filename, } // iteration in the samples of all class } // iteration in the samples in one class } // iteration in times - delete db; - delete pixels1; - delete pixels2; - delete pixels3; - delete pixels4; - delete pixels5; -} - -int main(int argc, char** argv) { - if (argc != 6) { - printf("This script converts the dataset to the leveldb format used\n" - "by caffe to train a triplet network.\n" - "Usage:\n" - " convert_3d_data input_image_file input_label_file " - "output_db_file class_number rgb_use \n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3], argv[4], argv[5]); ======= - for (int itemid = 0; itemid < num_items; ++itemid) { - // pick triplet groups - int i = caffe::caffe_rng_rand() % num_items; + for (unsigned int itemid = 0; itemid < 10 * num_items; ++itemid) { + int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups int j = caffe::caffe_rng_rand() % num_items; int k = caffe::caffe_rng_rand() % num_items; - // pick pair wise groups - int l = caffe::caffe_rng_rand() % num_items; + int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups int m = caffe::caffe_rng_rand() % num_items; - // read triplet groups - read_image(&image_file, &label_file, i, rows, cols, - pixels, &label_i); + read_image(&image_file, &label_file, i, rows, cols, // read triplet groups + pixels, label_temp, label_i); read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), &label_j); + pixels + (rows * cols), label_temp, label_j); read_image(&image_file, &label_file, k, rows, cols, - pixels + (2 * rows * cols), &label_k); - // read pair wise groups - read_image(&image_file, &label_file, l, rows, cols, - pixels + (3 * rows * cols), &label_l); + pixels + (2 * rows * cols), label_temp, label_k); + read_image(&image_file, &label_file, l, rows, cols, // read pair wise groups + pixels + (3 * rows * cols), label_temp, label_l); read_image(&image_file, &label_file, m, rows, cols, - pixels + (4 * rows * cols), &label_m); + pixels + (4 * rows * cols), label_temp, label_m); datum.set_data(pixels, 5*rows*cols); // set data - if ((label_i == label_j && label_i != label_k) && (label_l == label_m)) { + bool pose_pass; + int dist_ij = (int)((*(label_i+1)-*(label_j+1))*(*(label_i+1)-*(label_j+1)) + (*(label_i+2)-*(label_j+2))*(*(label_i+2)-*(label_j+2)) + (*(label_i+3)-*(label_j+3))*(*(label_i+3)-*(label_j+3))); + int dist_ik = (int)((*(label_i+1)-*(label_k+1))*(*(label_i+1)-*(label_k+1)) + (*(label_i+2)-*(label_k+2))*(*(label_i+2)-*(label_k+2)) + (*(label_i+3)-*(label_k+3))*(*(label_i+3)-*(label_k+3))); + if (dist_ij < dist_ik ) + pose_pass = true; + if (((*label_i == *label_j && *label_i != *label_k) || ((*label_i == *label_j && *label_i == *label_k) && pose_pass)) && (*label_l == *label_m)) { datum.set_label(1); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", itemid); @@ -298,25 +252,30 @@ int main(int argc, char** argv) { datum.set_label(0); } } - +>>>>>>> add pose information for training data arrangement in triplet training delete db; - delete pixels; + delete pixels1; + delete pixels2; + delete pixels3; + delete pixels4; + delete pixels5; } int main(int argc, char** argv) { - if (argc != 4) { - printf("This script converts the MNIST dataset to the leveldb format used\n" - "by caffe to train a siamese network.\n" +<<<<<<< fba1c0e4ee61d053e91d7a142130ae3a6855e182 + if (argc != 6) { + printf("This script converts the dataset to the leveldb format used\n" + "by caffe to train a triplet network.\n" "Usage:\n" - " convert_mnist_data input_image_file input_label_file " - "output_db_file\n" - "The MNIST dataset could be downloaded at\n" - " http://yann.lecun.com/exdb/mnist/\n" - "You should gunzip them after downloading.\n"); + " convert_3d_data input_image_file input_label_file " + "output_db_file class_number rgb_use \n"); } else { google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3]); ->>>>>>> add 3d network training param + convert_dataset(argv[1], argv[2], argv[3], argv[4], argv[5]); } +======= + convert_dataset("/home/wangyida/Desktop/caffe/data/linemod/binary_image_train", "/home/wangyida/Desktop/caffe/data/linemod/binary_label_train", "/home/wangyida/Desktop/caffe/data/linemod/leveldb"); +>>>>>>> add pose information for training data arrangement in triplet training return 0; } + From bc4f4af55bcdd77b1e002f5165d7e43339c8bd11 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Fri, 31 Jul 2015 14:08:02 +0800 Subject: [PATCH 74/82] rearrange the training samples selection codes --- .../triplet/convert_3d_triplet_data.cpp.orig | 62 ++++++++----------- 1 file changed, 26 insertions(+), 36 deletions(-) diff --git a/examples/triplet/convert_3d_triplet_data.cpp.orig b/examples/triplet/convert_3d_triplet_data.cpp.orig index c9e8c64ea6e..1d6e726f2b8 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp.orig +++ b/examples/triplet/convert_3d_triplet_data.cpp.orig @@ -2,21 +2,13 @@ // convert_3d_data input_image_file input_label_file output_db_file #include // NOLINT(readability/streams) #include -<<<<<<< fba1c0e4ee61d053e91d7a142130ae3a6855e182 #include "caffe/proto/caffe.pb.h" #include "caffe/util/math_functions.hpp" -======= ->>>>>>> add pose information for training data arrangement in triplet training #include "glog/logging.h" #include "google/protobuf/text_format.h" #include "leveldb/db.h" #include "math.h" #include "stdint.h" -<<<<<<< fba1c0e4ee61d053e91d7a142130ae3a6855e182 -======= -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" ->>>>>>> add pose information for training data arrangement in triplet training uint32_t swap_endian(uint32_t val) { val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); @@ -25,7 +17,6 @@ uint32_t swap_endian(uint32_t val) { void read_image(std::ifstream* image_file, std::ifstream* label_file, uint32_t index, uint32_t rows, uint32_t cols, -<<<<<<< fba1c0e4ee61d053e91d7a142130ae3a6855e182 char* pixels, char* label_temp, signed char* label, int rgb_use) { if (rgb_use == 0) { image_file->seekg(index * rows * cols + 16); @@ -42,15 +33,6 @@ void read_image(std::ifstream* image_file, std::ifstream* label_file, for (int i = 0; i < 4; i++) *(label+i) = (signed char)*(label_temp+i); } -======= - char* pixels, char* label_temp, signed char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index * 4 + 8); - label_file->read(label_temp, 4); - for (int i = 0; i < 4; i++) - *(label+i) = (signed char)*(label_temp+i); ->>>>>>> add pose information for training data arrangement in triplet training } void convert_dataset(const char* image_filename, const char* label_filename, @@ -102,7 +84,6 @@ void convert_dataset(const char* image_filename, const char* label_filename, signed char* label_k = new signed char[4]; signed char* label_l = new signed char[4]; // label for pair wise signed char* label_m = new signed char[4]; -<<<<<<< fba1c0e4ee61d053e91d7a142130ae3a6855e182 int db_size; if (rgb_use1 == 0) db_size = rows * cols; @@ -113,9 +94,6 @@ void convert_dataset(const char* image_filename, const char* label_filename, char* pixels3 = new char[db_size]; char* pixels4 = new char[db_size]; char* pixels5 = new char[db_size]; -======= - char* pixels = new char[5 * rows * cols]; ->>>>>>> add pose information for training data arrangement in triplet training const int kMaxKeyLength = 10; char key[kMaxKeyLength]; std::string value; @@ -125,7 +103,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; -<<<<<<< fba1c0e4ee61d053e91d7a142130ae3a6855e182 +<<<<<<< fab928843d2f9ac33ce9468a63ed163247ffdfc8 int counter = 0; for (unsigned int times = 0; times < 5; ++times) { // iteration in the samples of all class @@ -225,24 +203,41 @@ void convert_dataset(const char* image_filename, const char* label_filename, int k = caffe::caffe_rng_rand() % num_items; int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups int m = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, // read triplet groups + read_image(&image_file, &label_file, i, rows, cols, // read triplet pixels, label_temp, label_i); read_image(&image_file, &label_file, j, rows, cols, pixels + (rows * cols), label_temp, label_j); read_image(&image_file, &label_file, k, rows, cols, pixels + (2 * rows * cols), label_temp, label_k); - read_image(&image_file, &label_file, l, rows, cols, // read pair wise groups + read_image(&image_file, &label_file, l, rows, cols, // read pair wise pixels + (3 * rows * cols), label_temp, label_l); read_image(&image_file, &label_file, m, rows, cols, pixels + (4 * rows * cols), label_temp, label_m); datum.set_data(pixels, 5*rows*cols); // set data - bool pose_pass; - int dist_ij = (int)((*(label_i+1)-*(label_j+1))*(*(label_i+1)-*(label_j+1)) + (*(label_i+2)-*(label_j+2))*(*(label_i+2)-*(label_j+2)) + (*(label_i+3)-*(label_j+3))*(*(label_i+3)-*(label_j+3))); - int dist_ik = (int)((*(label_i+1)-*(label_k+1))*(*(label_i+1)-*(label_k+1)) + (*(label_i+2)-*(label_k+2))*(*(label_i+2)-*(label_k+2)) + (*(label_i+3)-*(label_k+3))*(*(label_i+3)-*(label_k+3))); + bool triplet_class_pass; + bool triplet_class_same; + bool triplet_pose_pass; + bool pair_class_pass; + int ij_x, ij_y, ij_z; + int ik_x, ik_y, ik_z; + ij_x = static_cast(*(label_i+1)-*(label_j+1))*(*(label_i+1)-*(label_j+1)); + ij_y = static_cast(*(label_i+2)-*(label_j+2))*(*(label_i+2)-*(label_j+2)); + ij_z = static_cast(*(label_i+3)-*(label_j+3))*(*(label_i+3)-*(label_j+3)); + ik_x = static_cast(*(label_i+1)-*(label_k+1))*(*(label_i+1)-*(label_k+1)); + ik_y = static_cast(*(label_i+2)-*(label_k+2))*(*(label_i+2)-*(label_k+2)); + ik_z = static_cast(*(label_i+3)-*(label_k+3))*(*(label_i+3)-*(label_k+3)); + int dist_ij = ij_x + ij_y + ij_z; + int dist_ik = ik_x + ik_y + ik_z; if (dist_ij < dist_ik ) - pose_pass = true; - if (((*label_i == *label_j && *label_i != *label_k) || ((*label_i == *label_j && *label_i == *label_k) && pose_pass)) && (*label_l == *label_m)) { + triplet_pose_pass = true; + if ((*label_i == *label_j) && (*label_i != *label_k)) + triplet_class_pass = true; + if ((*label_i == *label_j) && (*label_i == *label_k)) + triplet_class_same = true; + if (*label_l == *label_m) + pair_class_pass = true; + if (( triplet_class_pass || (triplet_class_same && triplet_pose_pass)) && pair_class_pass) { datum.set_label(1); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", itemid); @@ -252,7 +247,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, datum.set_label(0); } } ->>>>>>> add pose information for training data arrangement in triplet training +>>>>>>> rearrange the training samples selection codes delete db; delete pixels1; delete pixels2; @@ -262,7 +257,6 @@ void convert_dataset(const char* image_filename, const char* label_filename, } int main(int argc, char** argv) { -<<<<<<< fba1c0e4ee61d053e91d7a142130ae3a6855e182 if (argc != 6) { printf("This script converts the dataset to the leveldb format used\n" "by caffe to train a triplet network.\n" @@ -273,9 +267,5 @@ int main(int argc, char** argv) { google::InitGoogleLogging(argv[0]); convert_dataset(argv[1], argv[2], argv[3], argv[4], argv[5]); } -======= - convert_dataset("/home/wangyida/Desktop/caffe/data/linemod/binary_image_train", "/home/wangyida/Desktop/caffe/data/linemod/binary_label_train", "/home/wangyida/Desktop/caffe/data/linemod/leveldb"); ->>>>>>> add pose information for training data arrangement in triplet training return 0; } - From 704886bcf32ac18713342bb2aa5c8994315f1c30 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Sun, 2 Aug 2015 10:36:54 +0800 Subject: [PATCH 75/82] the soft style triplet loss as added in triplet loss layer as an option the soft style triplet loss as added in triplet loss layer as an option --- .../convert_3d_triplet_data.cpp.orig.orig | 334 ++++ .../triplet/create_3d_triplet.sh.orig.orig | 39 + include/caffe/data_layers.hpp.orig.orig.orig | 464 +++++ include/caffe/loss_layers.hpp.orig.orig | 872 +++++++++ .../triplet_loss_layer.cpp.orig.orig.orig | 626 ++++++ .../triplet_loss_layer.cu.orig.orig.orig | 728 +++++++ src/caffe/proto/caffe.proto.orig.orig | 1598 ++++++++++++++++ src/caffe/solver.cpp.orig.orig.orig | 1692 +++++++++++++++++ 8 files changed, 6353 insertions(+) create mode 100644 examples/triplet/convert_3d_triplet_data.cpp.orig.orig create mode 100755 examples/triplet/create_3d_triplet.sh.orig.orig create mode 100644 include/caffe/data_layers.hpp.orig.orig.orig create mode 100644 include/caffe/loss_layers.hpp.orig.orig create mode 100644 src/caffe/layers/triplet_loss_layer.cpp.orig.orig.orig create mode 100644 src/caffe/layers/triplet_loss_layer.cu.orig.orig.orig create mode 100644 src/caffe/proto/caffe.proto.orig.orig create mode 100644 src/caffe/solver.cpp.orig.orig.orig diff --git a/examples/triplet/convert_3d_triplet_data.cpp.orig.orig b/examples/triplet/convert_3d_triplet_data.cpp.orig.orig new file mode 100644 index 00000000000..cb8ab886ab6 --- /dev/null +++ b/examples/triplet/convert_3d_triplet_data.cpp.orig.orig @@ -0,0 +1,334 @@ +<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 +// Usage: +// convert_3d_data input_image_file input_label_file output_db_file +#include // NOLINT(readability/streams) +#include +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "math.h" +#include "stdint.h" + +======= +// This script converts the MNIST dataset to the leveldb format used +// by caffe to train siamese network. +// Usage: +// convert_mnist_data input_image_file input_label_file output_db_file +// The MNIST dataset could be downloaded at +// http://yann.lecun.com/exdb/mnist/ +#include // NOLINT(readability/streams) +#include + +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "stdint.h" + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" + +>>>>>>> add 3d network training param +uint32_t swap_endian(uint32_t val) { + val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); + return (val << 16) | (val >> 16); +} + +void read_image(std::ifstream* image_file, std::ifstream* label_file, + uint32_t index, uint32_t rows, uint32_t cols, +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f + char* pixels, char* label_temp, signed char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); +======= +<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 + char* pixels, char* label_temp, signed char* label, int rgb_use) { + if (rgb_use == 0) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); + } else { + image_file->seekg(3 * index * rows * cols + 16); + image_file->read(pixels, 3 * rows * cols); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); + } +>>>>>>> add 3d network training param +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename, const char* class_number) { + int class_num = atoi(class_number); +======= + char* pixels, char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index + 8); + label_file->read(label, 1); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename) { +>>>>>>> add 3d network training param + // Open files + std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); + std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); + CHECK(image_file) << "Unable to open file " << image_filename; + CHECK(label_file) << "Unable to open file " << label_filename; + // Read the magic and the meta data + uint32_t magic; + uint32_t num_items; + uint32_t num_labels; + uint32_t rows; + uint32_t cols; + + image_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + CHECK_EQ(magic, 2051) << "Incorrect image file magic."; + label_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); +<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 + CHECK_EQ(magic, 2050) << "Incorrect label file magic."; +======= + CHECK_EQ(magic, 2049) << "Incorrect label file magic."; +>>>>>>> add 3d network training param + image_file.read(reinterpret_cast(&num_items), 4); + num_items = swap_endian(num_items); + label_file.read(reinterpret_cast(&num_labels), 4); + num_labels = swap_endian(num_labels); + CHECK_EQ(num_items, num_labels); + image_file.read(reinterpret_cast(&rows), 4); + rows = swap_endian(rows); + image_file.read(reinterpret_cast(&cols), 4); + cols = swap_endian(cols); + + // Open leveldb + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = true; + options.error_if_exists = true; + leveldb::Status status = leveldb::DB::Open( + options, db_filename, &db); + CHECK(status.ok()) << "Failed to open leveldb " << db_filename + << ". Is it already existing?"; + +<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 + char* label_temp = new char[4]; // label for unsigned char* + signed char* label_i = new signed char[4]; // label for triplet + signed char* label_j = new signed char[4]; + signed char* label_k = new signed char[4]; + signed char* label_l = new signed char[4]; // label for pair wise + signed char* label_m = new signed char[4]; + char* pixels1 = new char[rows * cols]; + char* pixels2 = new char[rows * cols]; + char* pixels3 = new char[rows * cols]; + char* pixels4 = new char[rows * cols]; + char* pixels5 = new char[rows * cols]; + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + caffe::Datum datum; + datum.set_channels(1); +======= + char label_i; // label for triplet + char label_j; + char label_k; + char label_l; // label for pair wise + char label_m; + char* pixels = new char[5 * rows * cols]; + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + + caffe::Datum datum; + datum.set_channels(5); // one channel for each image in the triplet and pair +>>>>>>> add 3d network training param + datum.set_height(rows); + datum.set_width(cols); + LOG(INFO) << "A total of " << num_items << " items."; + LOG(INFO) << "Rows: " << rows << " Cols: " << cols; +<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 + int counter = 0; + for (unsigned int times = 0; times < 5; ++times) { + // iteration in the samples of all class + for (unsigned int itemid = 0; itemid < num_items/class_num; ++itemid) { + // iteration in the samples in one class + for (unsigned int class_ind = 0; class_ind < class_num; ++class_ind) { + // use reference sample one by one at each iteration + int i = itemid % num_items + class_ind*num_items/class_num; + int j = caffe::caffe_rng_rand() % num_items; // pick triplet groups + int k = caffe::caffe_rng_rand() % num_items; + int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups + int m = caffe::caffe_rng_rand() % num_items; + read_image(&image_file, &label_file, i, rows, cols, // read triplet + pixels1, label_temp, label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels2, label_temp, label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels3, label_temp, label_k); + read_image(&image_file, &label_file, l, rows, cols, // read pair wise + pixels4, label_temp, label_l); + read_image(&image_file, &label_file, m, rows, cols, + pixels5, label_temp, label_m); + + bool pair_pass = false; + bool triplet1_pass = false; + bool triplet2_pass = false; + bool triplet3_class_same = false; + bool triplet3_pass = false; + + int ij_diff_x = static_cast(*(label_i+1)-*(label_j+1)); + int ij_diff_y = static_cast(*(label_i+2)-*(label_j+2)); + int ij_diff_z = static_cast(*(label_i+3)-*(label_j+3)); + int im_diff_x = static_cast(*(label_i+1)-*(label_m+1)); + int im_diff_y = static_cast(*(label_i+2)-*(label_m+2)); + int im_diff_z = static_cast(*(label_i+3)-*(label_m+3)); + + int ij_x = ij_diff_x*ij_diff_x; + int ij_y = ij_diff_y*ij_diff_y; + int ij_z = ij_diff_z*ij_diff_z; + int im_x = im_diff_x*im_diff_x; + int im_y = im_diff_y*im_diff_y; + int im_z = im_diff_z*im_diff_z; + + float dist_ij = std::sqrt(ij_x + ij_y + ij_z); + float dist_im = std::sqrt(im_x + im_y + im_z); + if (*label_i == *label_j && dist_ij < 100/2) + pair_pass = true; + if (pair_pass && (*label_i != *label_k)) + triplet1_pass = true; + if (pair_pass && (*label_i != *label_l)) + triplet2_pass = true; + if (pair_pass && (*label_i == *label_m)) + triplet3_class_same = true; + if (triplet3_class_same && dist_im > 100*sqrt(2)) + triplet3_pass = true; + if (pair_pass && triplet1_pass && triplet2_pass && triplet3_pass) { + datum.set_data(pixels1, rows*cols); // set data + datum.set_label(static_cast(*label_i)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels2, rows*cols); // set data + datum.set_label(static_cast(*label_j)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels3, rows*cols); // set data + datum.set_label(static_cast(*label_k)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels4, rows*cols); // set data + datum.set_label(static_cast(*label_l)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels5, rows*cols); // set data + datum.set_label(static_cast(*label_m)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + } else { + class_ind--; + } + } // iteration in the samples of all class + } // iteration in the samples in one class + } // iteration in times + delete db; + delete pixels1; + delete pixels2; + delete pixels3; + delete pixels4; + delete pixels5; +} + +int main(int argc, char** argv) { +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f + if (argc != 5) { + printf("This script converts the images dataset to the leveldb format used\n" +======= + if (argc != 6) { + printf("This script converts the dataset to the leveldb format used\n" +>>>>>>> add 3d network training param + "by caffe to train a triplet network.\n" + "Usage:\n" + " convert_3d_data input_image_file input_label_file " + "output_db_file class_number\n"); + } else { + google::InitGoogleLogging(argv[0]); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f + convert_dataset(argv[1], argv[2], argv[3], argv[4]); +======= + convert_dataset(argv[1], argv[2], argv[3], argv[4], argv[5]); +======= + for (int itemid = 0; itemid < num_items; ++itemid) { + // pick triplet groups + int i = caffe::caffe_rng_rand() % num_items; + int j = caffe::caffe_rng_rand() % num_items; + int k = caffe::caffe_rng_rand() % num_items; + // pick pair wise groups + int l = caffe::caffe_rng_rand() % num_items; + int m = caffe::caffe_rng_rand() % num_items; + // read triplet groups + read_image(&image_file, &label_file, i, rows, cols, + pixels, &label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels + (rows * cols), &label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels + (2 * rows * cols), &label_k); + // read pair wise groups + read_image(&image_file, &label_file, l, rows, cols, + pixels + (3 * rows * cols), &label_l); + read_image(&image_file, &label_file, m, rows, cols, + pixels + (4 * rows * cols), &label_m); + + datum.set_data(pixels, 5*rows*cols); // set data + if ((label_i == label_j && label_i != label_k) && (label_l == label_m)) { + datum.set_label(1); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); + } else { + itemid--; + datum.set_label(0); + } + } + + delete db; + delete pixels; +} + +int main(int argc, char** argv) { + if (argc != 4) { + printf("This script converts the MNIST dataset to the leveldb format used\n" + "by caffe to train a siamese network.\n" + "Usage:\n" + " convert_mnist_data input_image_file input_label_file " + "output_db_file\n" + "The MNIST dataset could be downloaded at\n" + " http://yann.lecun.com/exdb/mnist/\n" + "You should gunzip them after downloading.\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3]); +>>>>>>> add 3d network training param +>>>>>>> add 3d network training param + } + return 0; +} diff --git a/examples/triplet/create_3d_triplet.sh.orig.orig b/examples/triplet/create_3d_triplet.sh.orig.orig new file mode 100755 index 00000000000..f8ac44ba37b --- /dev/null +++ b/examples/triplet/create_3d_triplet.sh.orig.orig @@ -0,0 +1,39 @@ +#!/usr/bin/env sh +# This script converts the mnist data into leveldb format. + +EXAMPLES=./build/examples/triplet +DATA=./data/linemod + +echo "Creating leveldb..." + +rm -rf ./examples/triplet/3d_triplet_train_leveldb +rm -rf ./examples/triplet/3d_triplet_test_leveldb + +$EXAMPLES/convert_3d_triplet_data.bin \ + $DATA/binary_image_train \ + $DATA/binary_label_train \ +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 + ./examples/triplet/3d_triplet_train_leveldb \ + 4 \ + 0 +$EXAMPLES/convert_3d_triplet_data.bin \ + $DATA/binary_image_test \ + $DATA/binary_label_test \ + ./examples/triplet/3d_triplet_test_leveldb \ + 4 \ + 0 +======= +>>>>>>> add 3d network training param + ./examples/triplet/3d_triplet_train_leveldb +$EXAMPLES/convert_3d_triplet_data.bin \ + $DATA/binary_image_test \ + $DATA/binary_label_test \ + ./examples/triplet/3d_triplet_test_leveldb +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +>>>>>>> add 3d network training param +>>>>>>> add 3d network training param + +echo "Done." diff --git a/include/caffe/data_layers.hpp.orig.orig.orig b/include/caffe/data_layers.hpp.orig.orig.orig new file mode 100644 index 00000000000..4059642d86e --- /dev/null +++ b/include/caffe/data_layers.hpp.orig.orig.orig @@ -0,0 +1,464 @@ +#ifndef CAFFE_DATA_LAYERS_HPP_ +#define CAFFE_DATA_LAYERS_HPP_ + +#include +#include +#include +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< ca976a2b520c8024cc7f41a048d2c1abe73f6022 +======= + +#include "boost/scoped_ptr.hpp" +>>>>>>> macro define in upgrade_proto +======= + +>>>>>>> add 3d network training param +#include "hdf5.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +#include "caffe/data_reader.hpp" +>>>>>>> add 3d network training param +#include "caffe/data_transformer.hpp" +#include "caffe/filler.hpp" +#include "caffe/internal_thread.hpp" +#include "caffe/layer.hpp" +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +#include "caffe/net.hpp" +#include "caffe/proto/caffe.pb.h" +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +#include "caffe/net.hpp" +>>>>>>> triplet data generation and network update +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/blocking_queue.hpp" +>>>>>>> add 3d network training param +#include "caffe/util/db.hpp" + +namespace caffe { + +/** + * @brief Provides base for data layers that feed blobs to the Net. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class BaseDataLayer : public Layer { + public: + explicit BaseDataLayer(const LayerParameter& param); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f + virtual ~BaseDataLayer() {} +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= + virtual ~BaseDataLayer() {} +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param + // LayerSetUp: implements common data layer setup functionality, and calls + // DataLayerSetUp to do special data layer setup for individual layer types. + // This method may not be overridden except by the BasePrefetchingDataLayer. + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } +>>>>>>> add 3d network training param + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top) {} + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + + protected: + TransformationParameter transform_param_; + shared_ptr > data_transformer_; + bool output_labels_; +}; + +template +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +class BasePrefetchingDataLayer : + public BaseDataLayer, public InternalThread { + public: + explicit BasePrefetchingDataLayer(const LayerParameter& param) + : BaseDataLayer(param) {} +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= +======= +======= +>>>>>>> add 3d network training param +class Batch { + public: + Blob data_, label_; +}; + +template +class BasePrefetchingDataLayer : + public BaseDataLayer, public InternalThread { + public: +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param + explicit BasePrefetchingDataLayer(const LayerParameter& param); +======= + explicit BasePrefetchingDataLayer(const LayerParameter& param) + : BaseDataLayer(param) {} +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da +>>>>>>> macro define in upgrade_proto +>>>>>>> add initiate class name of triplet loss layer +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +>>>>>>> macro define in upgrade_proto +>>>>>>> add 3d network training param +======= + virtual ~BasePrefetchingDataLayer() {} +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +>>>>>>> triplet data generation and network update +======= +>>>>>>> add 3d network training param + // LayerSetUp: implements common data layer setup functionality, and calls + // DataLayerSetUp to do special data layer setup for individual layer types. + // This method may not be overridden. + void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f + virtual void CreatePrefetchThread(); + virtual void JoinPrefetchThread(); + // The thread's function + virtual void InternalThreadEntry() {} + + protected: + Blob prefetch_data_; + Blob prefetch_label_; +======= + // Prefetches batches (asynchronously if to GPU memory) + static const int PREFETCH_COUNT = 3; + + protected: + virtual void InternalThreadEntry(); + virtual void load_batch(Batch* batch) = 0; + + Batch prefetch_[PREFETCH_COUNT]; + BlockingQueue*> prefetch_free_; + BlockingQueue*> prefetch_full_; + +>>>>>>> add 3d network training param + Blob transformed_data_; +}; + +template +class DataLayer : public BasePrefetchingDataLayer { + public: +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f + explicit DataLayer(const LayerParameter& param) + : BasePrefetchingDataLayer(param) {} + virtual ~DataLayer(); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + +======= + explicit DataLayer(const LayerParameter& param); + virtual ~DataLayer(); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + // DataLayer uses DataReader instead for sharing for parallelism + virtual inline bool ShareInParallel() const { return false; } +>>>>>>> add 3d network training param + virtual inline const char* type() const { return "Data"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MaxTopBlobs() const { return 2; } + + protected: +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f + virtual void InternalThreadEntry(); + + shared_ptr db_; + shared_ptr cursor_; +======= + virtual void load_batch(Batch* batch); + + DataReader reader_; +>>>>>>> add 3d network training param +}; + +/** + * @brief Provides data to the Net generated by a Filler. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class DummyDataLayer : public Layer { + public: + explicit DummyDataLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } +>>>>>>> add 3d network training param + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual inline const char* type() const { return "DummyData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + + vector > > fillers_; + vector refill_; +}; + +/** + * @brief Provides data to the Net from HDF5 files. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class HDF5DataLayer : public Layer { + public: + explicit HDF5DataLayer(const LayerParameter& param) + : Layer(param) {} + virtual ~HDF5DataLayer(); + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } +>>>>>>> add 3d network training param + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual inline const char* type() const { return "HDF5Data"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) {} + virtual void LoadHDF5FileData(const char* filename); + + std::vector hdf_filenames_; + unsigned int num_files_; + unsigned int current_file_; + hsize_t current_row_; + std::vector > > hdf_blobs_; + std::vector data_permutation_; + std::vector file_permutation_; +}; + +/** + * @brief Write blobs to disk as HDF5 files. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class HDF5OutputLayer : public Layer { + public: + explicit HDF5OutputLayer(const LayerParameter& param) + : Layer(param), file_opened_(false) {} + virtual ~HDF5OutputLayer(); + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= + // Data layers should be shared by multiple solvers in parallel + virtual inline bool ShareInParallel() const { return true; } +>>>>>>> add 3d network training param + // Data layers have no bottoms, so reshaping is trivial. + virtual void Reshape(const vector*>& bottom, + const vector*>& top) {} + + virtual inline const char* type() const { return "HDF5Output"; } + // TODO: no limit on the number of blobs + virtual inline int ExactNumBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 0; } + + inline std::string file_name() const { return file_name_; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void SaveBlobs(); + + bool file_opened_; + std::string file_name_; + hid_t file_id_; + Blob data_blob_; + Blob label_blob_; +}; + +/** + * @brief Provides data to the Net from image files. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class ImageDataLayer : public BasePrefetchingDataLayer { + public: + explicit ImageDataLayer(const LayerParameter& param) + : BasePrefetchingDataLayer(param) {} + virtual ~ImageDataLayer(); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "ImageData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + + protected: + shared_ptr prefetch_rng_; + virtual void ShuffleImages(); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f + virtual void InternalThreadEntry(); +======= + virtual void load_batch(Batch* batch); +>>>>>>> add 3d network training param + + vector > lines_; + int lines_id_; +}; + +/** + * @brief Provides data to the Net from memory. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class MemoryDataLayer : public BaseDataLayer { + public: + explicit MemoryDataLayer(const LayerParameter& param) + : BaseDataLayer(param), has_new_data_(false) {} + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "MemoryData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + + virtual void AddDatumVector(const vector& datum_vector); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +#ifdef USE_OPENCV + virtual void AddMatVector(const vector& mat_vector, + const vector& labels); +#endif // USE_OPENCV +======= + virtual void AddMatVector(const vector& mat_vector, + const vector& labels); +>>>>>>> add 3d network training param + + // Reset should accept const pointers, but can't, because the memory + // will be given to Blob, which is mutable + void Reset(Dtype* data, Dtype* label, int n); + void set_batch_size(int new_size); + + int batch_size() { return batch_size_; } + int channels() { return channels_; } + int height() { return height_; } + int width() { return width_; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + int batch_size_, channels_, height_, width_, size_; + Dtype* data_; + Dtype* labels_; + int n_; + size_t pos_; + Blob added_data_; + Blob added_label_; + bool has_new_data_; +}; + +/** + * @brief Provides data to the Net from windows of images files, specified + * by a window data file. + * + * TODO(dox): thorough documentation for Forward and proto params. + */ +template +class WindowDataLayer : public BasePrefetchingDataLayer { + public: + explicit WindowDataLayer(const LayerParameter& param) + : BasePrefetchingDataLayer(param) {} + virtual ~WindowDataLayer(); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "WindowData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int ExactNumTopBlobs() const { return 2; } + + protected: + virtual unsigned int PrefetchRand(); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f + virtual void InternalThreadEntry(); +======= + virtual void load_batch(Batch* batch); +>>>>>>> add 3d network training param + + shared_ptr prefetch_rng_; + vector > > image_database_; + enum WindowField { IMAGE_INDEX, LABEL, OVERLAP, X1, Y1, X2, Y2, NUM }; + vector > fg_windows_; + vector > bg_windows_; + Blob data_mean_; + vector mean_values_; + bool has_mean_file_; + bool has_mean_values_; + bool cache_images_; + vector > image_database_cache_; +}; + +} // namespace caffe + +#endif // CAFFE_DATA_LAYERS_HPP_ diff --git a/include/caffe/loss_layers.hpp.orig.orig b/include/caffe/loss_layers.hpp.orig.orig new file mode 100644 index 00000000000..c3242b49761 --- /dev/null +++ b/include/caffe/loss_layers.hpp.orig.orig @@ -0,0 +1,872 @@ +#ifndef CAFFE_LOSS_LAYERS_HPP_ +#define CAFFE_LOSS_LAYERS_HPP_ + +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/neuron_layers.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +const float kLOG_THRESHOLD = 1e-20; + +/** + * @brief Computes the classification accuracy for a one-of-many + * classification task. + */ +template +class AccuracyLayer : public Layer { + public: + /** + * @param param provides AccuracyParameter accuracy_param, + * with AccuracyLayer options: + * - top_k (\b optional, default 1). + * Sets the maximum rank @f$ k @f$ at which a prediction is considered + * correct. For example, if @f$ k = 5 @f$, a prediction is counted + * correct if the correct label is among the top 5 predicted labels. + */ + explicit AccuracyLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Accuracy"; } + virtual inline int ExactNumBottomBlobs() const { return 2; } + + // If there are two top blobs, then the second blob will contain + // accuracies per class. + virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MaxTopBlos() const { return 2; } + + protected: + /** + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ x @f$, a Blob with values in + * @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of + * the @f$ K = CHW @f$ classes. Each @f$ x_n @f$ is mapped to a predicted + * label @f$ \hat{l}_n @f$ given by its maximal index: + * @f$ \hat{l}_n = \arg\max\limits_k x_{nk} @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels @f$ l @f$, an integer-valued Blob with values + * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ + * indicating the correct class label among the @f$ K @f$ classes + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed accuracy: @f$ + * \frac{1}{N} \sum\limits_{n=1}^N \delta\{ \hat{l}_n = l_n \} + * @f$, where @f$ + * \delta\{\mathrm{condition}\} = \left\{ + * \begin{array}{lr} + * 1 & \mbox{if condition} \\ + * 0 & \mbox{otherwise} + * \end{array} \right. + * @f$ + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + + /// @brief Not implemented -- AccuracyLayer cannot be used as a loss. + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + for (int i = 0; i < propagate_down.size(); ++i) { + if (propagate_down[i]) { NOT_IMPLEMENTED; } + } + } + + int label_axis_, outer_num_, inner_num_; + + int top_k_; + + /// Whether to ignore instances with a certain label. + bool has_ignore_label_; + /// The label indicating that an instance should be ignored. + int ignore_label_; + /// Keeps counts of the number of samples per class. + Blob nums_buffer_; +}; + +/** + * @brief An interface for Layer%s that take two Blob%s as input -- usually + * (1) predictions and (2) ground-truth labels -- and output a + * singleton Blob representing the loss. + * + * LossLayers are typically only capable of backpropagating to their first input + * -- the predictions. + */ +template +class LossLayer : public Layer { + public: + explicit LossLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp( + const vector*>& bottom, const vector*>& top); + virtual void Reshape( + const vector*>& bottom, const vector*>& top); + + virtual inline int ExactNumBottomBlobs() const { return 2; } + + /** + * @brief For convenience and backwards compatibility, instruct the Net to + * automatically allocate a single top Blob for LossLayers, into which + * they output their singleton loss, (even if the user didn't specify + * one in the prototxt, etc.). + */ + virtual inline bool AutoTopBlobs() const { return true; } + virtual inline int ExactNumTopBlobs() const { return 1; } + /** + * We usually cannot backpropagate to the labels; ignore force_backward for + * these inputs. + */ + virtual inline bool AllowForceBackward(const int bottom_index) const { + return bottom_index != 1; + } +}; + +/** + * @brief Computes the contrastive loss @f$ + * E = \frac{1}{2N} \sum\limits_{n=1}^N \left(y\right) d + + * \left(1-y\right) \max \left(margin-d, 0\right)^2 + * @f$ where @f$ + * d = \left| \left| a_n - b_n \right| \right|_2 @f$. This can be + * used to train siamese networks. + * + * @param bottom input Blob vector (length 3) + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$ a \in [-\infty, +\infty]@f$ + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$ b \in [-\infty, +\infty]@f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the binary similarity @f$ s \in [0, 1]@f$ + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed contrastive loss: @f$ E = + * \frac{1}{2N} \sum\limits_{n=1}^N \left(y\right) d + + * \left(1-y\right) \max \left(margin-d, 0\right)^2 + * @f$ where @f$ + * d = \left| \left| a_n - b_n \right| \right|_2 @f$. + * This can be used to train siamese networks. + */ +template +class ContrastiveLossLayer : public LossLayer { + public: + explicit ContrastiveLossLayer(const LayerParameter& param) + : LossLayer(param), diff_() {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual inline int ExactNumBottomBlobs() const { return 3; } + virtual inline const char* type() const { return "ContrastiveLoss"; } + /** + * Unlike most loss layers, in the ContrastiveLossLayer we can backpropagate + * to the first two inputs. + */ + virtual inline bool AllowForceBackward(const int bottom_index) const { + return bottom_index != 2; + } + + protected: + /// @copydoc ContrastiveLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the Contrastive error gradient w.r.t. the inputs. + * + * Computes the gradients with respect to the two input vectors (bottom[0] and + * bottom[1]), but not the similarity label (bottom[2]). + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$a@f$; Backward fills their diff with + * gradients if propagate_down[0] + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$b@f$; Backward fills their diff with gradients if + * propagate_down[1] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob diff_; // cached for backward pass + Blob dist_sq_; // cached for backward pass + Blob diff_sq_; // tmp storage for gpu forward pass + Blob summer_vec_; // tmp storage for gpu forward pass +}; + +template +class TripletLossLayer : public LossLayer { + public: + explicit TripletLossLayer(const LayerParameter& param) + : LossLayer(param), diff_() {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 + virtual inline int ExactNumBottomBlobs() const { return 2; } +======= + virtual inline int ExactNumBottomBlobs() const { return 4; } +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:include/caffe/loss_layers.hpp.orig +======= +>>>>>>> New triplet loss layer added(beta1 version-no test source files) +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + virtual inline int ExactNumBottomBlobs() const { return 2; } +======= + virtual inline int ExactNumBottomBlobs() const { return 4; } +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:include/caffe/loss_layers.hpp.orig.orig +>>>>>>> triplet data generation and network update + virtual inline const char* type() const { return "TripletLoss"; } + /** + * Unlike most loss layers, in the TripletLossLayer we can backpropagate + * to the first three inputs. + */ + virtual inline bool AllowForceBackward(const int bottom_index) const { +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 + return bottom_index != 1; +======= + return bottom_index != 3; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:include/caffe/loss_layers.hpp.orig +======= +>>>>>>> New triplet loss layer added(beta1 version-no test source files) +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + return bottom_index != 1; +======= + return bottom_index != 3; +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:include/caffe/loss_layers.hpp.orig.orig +>>>>>>> triplet data generation and network update + } + + protected: + /// @copydoc TripletLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the Triplet error gradient w.r.t. the inputs. + * + * Computes the gradients with respect to the two input vectors (bottom[0] and + * bottom[1]), but not the similarity label (bottom[2]). + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$a@f$; Backward fills their diff with + * gradients if propagate_down[0] + * -# @f$ (N \times C \times 1 \times 1) @f$ + * the features @f$b@f$; Backward fills their diff with gradients if + * propagate_down[1] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob diff_; // cached for backward pass + Blob diff_pos; + Blob diff_neg; + Blob dist_sq_; // cached for backward pass + Blob dist_sq_pos; + Blob dist_sq_neg; + Blob diff_sq_; // tmp storage for gpu forward pass + Blob diff_sq_pos; + Blob diff_sq_neg; + Blob summer_vec_; // tmp storage for gpu forward pass +}; + +/** + * @brief Computes the Euclidean (L2) loss @f$ + * E = \frac{1}{2N} \sum\limits_{n=1}^N \left| \left| \hat{y}_n - y_n + * \right| \right|_2^2 @f$ for real-valued regression tasks. + * + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ \hat{y} \in [-\infty, +\infty]@f$ + * -# @f$ (N \times C \times H \times W) @f$ + * the targets @f$ y \in [-\infty, +\infty]@f$ + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed Euclidean loss: @f$ E = + * \frac{1}{2n} \sum\limits_{n=1}^N \left| \left| \hat{y}_n - y_n + * \right| \right|_2^2 @f$ + * + * This can be used for least-squares regression tasks. An InnerProductLayer + * input to a EuclideanLossLayer exactly formulates a linear least squares + * regression problem. With non-zero weight decay the problem becomes one of + * ridge regression -- see src/caffe/test/test_sgd_solver.cpp for a concrete + * example wherein we check that the gradients computed for a Net with exactly + * this structure match hand-computed gradient formulas for ridge regression. + * + * (Note: Caffe, and SGD in general, is certainly \b not the best way to solve + * linear least squares problems! We use it only as an instructive example.) + */ +template +class EuclideanLossLayer : public LossLayer { + public: + explicit EuclideanLossLayer(const LayerParameter& param) + : LossLayer(param), diff_() {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "EuclideanLoss"; } + /** + * Unlike most loss layers, in the EuclideanLossLayer we can backpropagate + * to both inputs -- override to return true and always allow force_backward. + */ + virtual inline bool AllowForceBackward(const int bottom_index) const { + return true; + } + + protected: + /// @copydoc EuclideanLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the Euclidean error gradient w.r.t. the inputs. + * + * Unlike other children of LossLayer, EuclideanLossLayer \b can compute + * gradients with respect to the label inputs bottom[1] (but still only will + * if propagate_down[1] is set, due to being produced by learnable parameters + * or if force_backward is set). In fact, this layer is "commutative" -- the + * result is the same regardless of the order of the two bottoms. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$\hat{y}@f$; Backward fills their diff with + * gradients @f$ + * \frac{\partial E}{\partial \hat{y}} = + * \frac{1}{n} \sum\limits_{n=1}^N (\hat{y}_n - y_n) + * @f$ if propagate_down[0] + * -# @f$ (N \times C \times H \times W) @f$ + * the targets @f$y@f$; Backward fills their diff with gradients + * @f$ \frac{\partial E}{\partial y} = + * \frac{1}{n} \sum\limits_{n=1}^N (y_n - \hat{y}_n) + * @f$ if propagate_down[1] + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob diff_; +}; + +/** + * @brief Computes the hinge loss for a one-of-many classification task. + * + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ t @f$, a Blob with values in + * @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of + * the @f$ K = CHW @f$ classes. In an SVM, @f$ t @f$ is the result of + * taking the inner product @f$ X^T W @f$ of the D-dimensional features + * @f$ X \in \mathcal{R}^{D \times N} @f$ and the learned hyperplane + * parameters @f$ W \in \mathcal{R}^{D \times K} @f$, so a Net with just + * an InnerProductLayer (with num_output = D) providing predictions to a + * HingeLossLayer and no other learnable parameters or losses is + * equivalent to an SVM. + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels @f$ l @f$, an integer-valued Blob with values + * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ + * indicating the correct class label among the @f$ K @f$ classes + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed hinge loss: @f$ E = + * \frac{1}{N} \sum\limits_{n=1}^N \sum\limits_{k=1}^K + * [\max(0, 1 - \delta\{l_n = k\} t_{nk})] ^ p + * @f$, for the @f$ L^p @f$ norm + * (defaults to @f$ p = 1 @f$, the L1 norm; L2 norm, as in L2-SVM, + * is also available), and @f$ + * \delta\{\mathrm{condition}\} = \left\{ + * \begin{array}{lr} + * 1 & \mbox{if condition} \\ + * -1 & \mbox{otherwise} + * \end{array} \right. + * @f$ + * + * In an SVM, @f$ t \in \mathcal{R}^{N \times K} @f$ is the result of taking + * the inner product @f$ X^T W @f$ of the features + * @f$ X \in \mathcal{R}^{D \times N} @f$ + * and the learned hyperplane parameters + * @f$ W \in \mathcal{R}^{D \times K} @f$. So, a Net with just an + * InnerProductLayer (with num_output = @f$k@f$) providing predictions to a + * HingeLossLayer is equivalent to an SVM (assuming it has no other learned + * outside the InnerProductLayer and no other losses outside the + * HingeLossLayer). + */ +template +class HingeLossLayer : public LossLayer { + public: + explicit HingeLossLayer(const LayerParameter& param) + : LossLayer(param) {} + + virtual inline const char* type() const { return "HingeLoss"; } + + protected: + /// @copydoc HingeLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the hinge loss error gradient w.r.t. the predictions. + * + * Gradients cannot be computed with respect to the label inputs (bottom[1]), + * so this method ignores bottom[1] and requires !propagate_down[1], crashing + * if propagate_down[1] is set. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * propagate_down[1] must be false as we can't compute gradients with + * respect to the labels. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$t@f$; Backward computes diff + * @f$ \frac{\partial E}{\partial t} @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels -- ignored as we can't compute their error gradients + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +/** + * @brief A generalization of MultinomialLogisticLossLayer that takes an + * "information gain" (infogain) matrix specifying the "value" of all label + * pairs. + * + * Equivalent to the MultinomialLogisticLossLayer if the infogain matrix is the + * identity. + * + * @param bottom input Blob vector (length 2-3) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ \hat{p} @f$, a Blob with values in + * @f$ [0, 1] @f$ indicating the predicted probability of each of the + * @f$ K = CHW @f$ classes. Each prediction vector @f$ \hat{p}_n @f$ + * should sum to 1 as in a probability distribution: @f$ + * \forall n \sum\limits_{k=1}^K \hat{p}_{nk} = 1 @f$. + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels @f$ l @f$, an integer-valued Blob with values + * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ + * indicating the correct class label among the @f$ K @f$ classes + * -# @f$ (1 \times 1 \times K \times K) @f$ + * (\b optional) the infogain matrix @f$ H @f$. This must be provided as + * the third bottom blob input if not provided as the infogain_mat in the + * InfogainLossParameter. If @f$ H = I @f$, this layer is equivalent to the + * MultinomialLogisticLossLayer. + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed infogain multinomial logistic loss: @f$ E = + * \frac{-1}{N} \sum\limits_{n=1}^N H_{l_n} \log(\hat{p}_n) = + * \frac{-1}{N} \sum\limits_{n=1}^N \sum\limits_{k=1}^{K} H_{l_n,k} + * \log(\hat{p}_{n,k}) + * @f$, where @f$ H_{l_n} @f$ denotes row @f$l_n@f$ of @f$H@f$. + */ +template +class InfogainLossLayer : public LossLayer { + public: + explicit InfogainLossLayer(const LayerParameter& param) + : LossLayer(param), infogain_() {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + // InfogainLossLayer takes 2-3 bottom Blobs; if there are 3 the third should + // be the infogain matrix. (Otherwise the infogain matrix is loaded from a + // file specified by LayerParameter.) + virtual inline int ExactNumBottomBlobs() const { return -1; } + virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int MaxBottomBlobs() const { return 3; } + + virtual inline const char* type() const { return "InfogainLoss"; } + + protected: + /// @copydoc InfogainLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the infogain loss error gradient w.r.t. the predictions. + * + * Gradients cannot be computed with respect to the label inputs (bottom[1]), + * so this method ignores bottom[1] and requires !propagate_down[1], crashing + * if propagate_down[1] is set. (The same applies to the infogain matrix, if + * provided as bottom[2] rather than in the layer_param.) + * + * @param top output Blob vector (length 1), providing the error gradient + * with respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * propagate_down[1] must be false as we can't compute gradients with + * respect to the labels (similarly for propagate_down[2] and the + * infogain matrix, if provided as bottom[2]) + * @param bottom input Blob vector (length 2-3) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ \hat{p} @f$; Backward computes diff + * @f$ \frac{\partial E}{\partial \hat{p}} @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels -- ignored as we can't compute their error gradients + * -# @f$ (1 \times 1 \times K \times K) @f$ + * (\b optional) the information gain matrix -- ignored as its error + * gradient computation is not implemented. + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob infogain_; +}; + +/** + * @brief Computes the multinomial logistic loss for a one-of-many + * classification task, directly taking a predicted probability + * distribution as input. + * + * When predictions are not already a probability distribution, you should + * instead use the SoftmaxWithLossLayer, which maps predictions to a + * distribution using the SoftmaxLayer, before computing the multinomial + * logistic loss. The SoftmaxWithLossLayer should be preferred over separate + * SoftmaxLayer + MultinomialLogisticLossLayer + * as its gradient computation is more numerically stable. + * + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ \hat{p} @f$, a Blob with values in + * @f$ [0, 1] @f$ indicating the predicted probability of each of the + * @f$ K = CHW @f$ classes. Each prediction vector @f$ \hat{p}_n @f$ + * should sum to 1 as in a probability distribution: @f$ + * \forall n \sum\limits_{k=1}^K \hat{p}_{nk} = 1 @f$. + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels @f$ l @f$, an integer-valued Blob with values + * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ + * indicating the correct class label among the @f$ K @f$ classes + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed multinomial logistic loss: @f$ E = + * \frac{-1}{N} \sum\limits_{n=1}^N \log(\hat{p}_{n,l_n}) + * @f$ + */ +template +class MultinomialLogisticLossLayer : public LossLayer { + public: + explicit MultinomialLogisticLossLayer(const LayerParameter& param) + : LossLayer(param) {} + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "MultinomialLogisticLoss"; } + + protected: + /// @copydoc MultinomialLogisticLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the multinomial logistic loss error gradient w.r.t. the + * predictions. + * + * Gradients cannot be computed with respect to the label inputs (bottom[1]), + * so this method ignores bottom[1] and requires !propagate_down[1], crashing + * if propagate_down[1] is set. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * propagate_down[1] must be false as we can't compute gradients with + * respect to the labels. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ \hat{p} @f$; Backward computes diff + * @f$ \frac{\partial E}{\partial \hat{p}} @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels -- ignored as we can't compute their error gradients + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); +}; + +/** + * @brief Computes the cross-entropy (logistic) loss @f$ + * E = \frac{-1}{n} \sum\limits_{n=1}^N \left[ + * p_n \log \hat{p}_n + + * (1 - p_n) \log(1 - \hat{p}_n) + * \right] + * @f$, often used for predicting targets interpreted as probabilities. + * + * This layer is implemented rather than separate + * SigmoidLayer + CrossEntropyLayer + * as its gradient computation is more numerically stable. + * At test time, this layer can be replaced simply by a SigmoidLayer. + * + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the scores @f$ x \in [-\infty, +\infty]@f$, + * which this layer maps to probability predictions + * @f$ \hat{p}_n = \sigma(x_n) \in [0, 1] @f$ + * using the sigmoid function @f$ \sigma(.) @f$ (see SigmoidLayer). + * -# @f$ (N \times C \times H \times W) @f$ + * the targets @f$ y \in [0, 1] @f$ + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed cross-entropy loss: @f$ + * E = \frac{-1}{n} \sum\limits_{n=1}^N \left[ + * p_n \log \hat{p}_n + (1 - p_n) \log(1 - \hat{p}_n) + * \right] + * @f$ + */ +template +class SigmoidCrossEntropyLossLayer : public LossLayer { + public: + explicit SigmoidCrossEntropyLossLayer(const LayerParameter& param) + : LossLayer(param), + sigmoid_layer_(new SigmoidLayer(param)), + sigmoid_output_(new Blob()) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "SigmoidCrossEntropyLoss"; } + + protected: + /// @copydoc SigmoidCrossEntropyLossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + /** + * @brief Computes the sigmoid cross-entropy loss error gradient w.r.t. the + * predictions. + * + * Gradients cannot be computed with respect to the target inputs (bottom[1]), + * so this method ignores bottom[1] and requires !propagate_down[1], crashing + * if propagate_down[1] is set. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * propagate_down[1] must be false as gradient computation with respect + * to the targets is not implemented. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$x@f$; Backward computes diff + * @f$ \frac{\partial E}{\partial x} = + * \frac{1}{n} \sum\limits_{n=1}^N (\hat{p}_n - p_n) + * @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels -- ignored as we can't compute their error gradients + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + /// The internal SigmoidLayer used to map predictions to probabilities. + shared_ptr > sigmoid_layer_; + /// sigmoid_output stores the output of the SigmoidLayer. + shared_ptr > sigmoid_output_; + /// bottom vector holder to call the underlying SigmoidLayer::Forward + vector*> sigmoid_bottom_vec_; + /// top vector holder to call the underlying SigmoidLayer::Forward + vector*> sigmoid_top_vec_; +}; + +// Forward declare SoftmaxLayer for use in SoftmaxWithLossLayer. +template class SoftmaxLayer; + +/** + * @brief Computes the multinomial logistic loss for a one-of-many + * classification task, passing real-valued predictions through a + * softmax to get a probability distribution over classes. + * + * This layer should be preferred over separate + * SoftmaxLayer + MultinomialLogisticLossLayer + * as its gradient computation is more numerically stable. + * At test time, this layer can be replaced simply by a SoftmaxLayer. + * + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ x @f$, a Blob with values in + * @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of + * the @f$ K = CHW @f$ classes. This layer maps these scores to a + * probability distribution over classes using the softmax function + * @f$ \hat{p}_{nk} = \exp(x_{nk}) / + * \left[\sum_{k'} \exp(x_{nk'})\right] @f$ (see SoftmaxLayer). + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels @f$ l @f$, an integer-valued Blob with values + * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ + * indicating the correct class label among the @f$ K @f$ classes + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * the computed cross-entropy classification loss: @f$ E = + * \frac{-1}{N} \sum\limits_{n=1}^N \log(\hat{p}_{n,l_n}) + * @f$, for softmax output class probabilites @f$ \hat{p} @f$ + */ +template +class SoftmaxWithLossLayer : public LossLayer { + public: + /** + * @param param provides LossParameter loss_param, with options: + * - ignore_label (optional) + * Specify a label value that should be ignored when computing the loss. + * - normalize (optional, default true) + * If true, the loss is normalized by the number of (nonignored) labels + * present; otherwise the loss is simply summed over spatial locations. + */ + explicit SoftmaxWithLossLayer(const LayerParameter& param) + : LossLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "SoftmaxWithLoss"; } + virtual inline int ExactNumTopBlobs() const { return -1; } + virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MaxTopBlobs() const { return 2; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + /** + * @brief Computes the softmax loss error gradient w.r.t. the predictions. + * + * Gradients cannot be computed with respect to the label inputs (bottom[1]), + * so this method ignores bottom[1] and requires !propagate_down[1], crashing + * if propagate_down[1] is set. + * + * @param top output Blob vector (length 1), providing the error gradient with + * respect to the outputs + * -# @f$ (1 \times 1 \times 1 \times 1) @f$ + * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, + * as @f$ \lambda @f$ is the coefficient of this layer's output + * @f$\ell_i@f$ in the overall Net loss + * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence + * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. + * (*Assuming that this top Blob is not used as a bottom (input) by any + * other layer of the Net.) + * @param propagate_down see Layer::Backward. + * propagate_down[1] must be false as we can't compute gradients with + * respect to the labels. + * @param bottom input Blob vector (length 2) + * -# @f$ (N \times C \times H \times W) @f$ + * the predictions @f$ x @f$; Backward computes diff + * @f$ \frac{\partial E}{\partial x} @f$ + * -# @f$ (N \times 1 \times 1 \times 1) @f$ + * the labels -- ignored as we can't compute their error gradients + */ + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + + /// The internal SoftmaxLayer used to map predictions to a distribution. + shared_ptr > softmax_layer_; + /// prob stores the output probability predictions from the SoftmaxLayer. + Blob prob_; + /// bottom vector holder used in call to the underlying SoftmaxLayer::Forward + vector*> softmax_bottom_vec_; + /// top vector holder used in call to the underlying SoftmaxLayer::Forward + vector*> softmax_top_vec_; + /// Whether to ignore instances with a certain label. + bool has_ignore_label_; + /// The label indicating that an instance should be ignored. + int ignore_label_; + /// Whether to normalize the loss by the total number of values present + /// (otherwise just by the batch size). + bool normalize_; + + int softmax_axis_, outer_num_, inner_num_; +}; + +} // namespace caffe + +#endif // CAFFE_LOSS_LAYERS_HPP_ diff --git a/src/caffe/layers/triplet_loss_layer.cpp.orig.orig.orig b/src/caffe/layers/triplet_loss_layer.cpp.orig.orig.orig new file mode 100644 index 00000000000..460042f5ff5 --- /dev/null +++ b/src/caffe/layers/triplet_loss_layer.cpp.orig.orig.orig @@ -0,0 +1,626 @@ +#include +#include + +#include "caffe/layer.hpp" +#include "caffe/loss_layers.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void TripletLossLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + LossLayer::LayerSetUp(bottom, top); + // number of triplet in a batch + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + // dimension of each descriptor + int dim = bottom[0]->count()/bottom[0]->num(); + CHECK_EQ(bottom[0]->channels(), dim); + CHECK_EQ(bottom[0]->height(), 1); + CHECK_EQ(bottom[0]->width(), 1); + CHECK_EQ(bottom[1]->channels(), 1); + CHECK_EQ(bottom[1]->height(), 1); + CHECK_EQ(bottom[1]->width(), 1); + // In each set, we have: + // the descriptor of reference sample, closest sample, and negative samples + // number of sets in the whole batch + int num_set = bottom[0]->num()/(2 + num_triplets); + dist_sq_.Reshape(num_set, 1, 1, 1); + diff_pos.Reshape(num_set, dim, 1, 1); + dist_sq_pos.Reshape(num_set, 1, 1, 1); + diff_neg.Reshape(num_set, dim, 1, 1); + dist_sq_neg.Reshape(num_set, 1, 1, 1); + // vector of ones used to sum along channels + summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); + for (int i = 0; i < bottom[0]->channels(); ++i) + summer_vec_.mutable_cpu_data()[i] = Dtype(1); +} + +template +void TripletLossLayer::Forward_cpu( + const vector*>& bottom, + const vector*>& top) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); + Dtype loss(0.0); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 46f6a4f657c9a5f4ffb83b7c8540b4fd2b8208bb +======= +<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c +>>>>>>> GPU version added +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); + if (losstype == 0) { + for (int i = 0; i < num_set; ++i) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close + // Loss component calculated from reference and close one + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.cpu_data()[i]; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + i*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); + } +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 46f6a4f657c9a5f4ffb83b7c8540b4fd2b8208bb +======= +======= +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +======= + +>>>>>>> add 3d network training param + // Loss component calculated from ab + for (int i = 0; i < bottom[0]->num(); ++i) { + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, + diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); + // ab is a similar pair +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; +======= + dist_sq_.mutable_cpu_data()[i] += dist_sq_pos.cpu_data()[i]; +>>>>>>> add 3d network training param + // Loss component calculated from ac + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, + diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); + // ac is a dissimilar pair + dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; + loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +>>>>>>> GPU version added +>>>>>>> GPU version added +======= +>>>>>>> restore +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param + } + loss = loss / static_cast(num_set) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; + } else { + for (int i = 0; i < num_set; ++i) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close + // Loss component calculated from reference and close one + dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.cpu_data()[i]; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += margin; + for (int triplet = 0; triplet < num_triplets; ++triplet) { +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += margin; +======= + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += margin; + for (int triplet = 0; triplet < num_triplets; ++triplet) { +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise +======= + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += margin; +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + i*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[i] = 1 - \ + dist_sq_neg.cpu_data()[i] / dist_sq_.mutable_cpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(dist_sq_.cpu_data()[i], Dtype(0.0)); + } + } + loss = loss / static_cast(num_set) / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; + } +} + +template +void TripletLossLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +>>>>>>> triplet data generation and network update + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); + if (losstype == 0) { + // BP for feat1(extracted from reference) + for (int i = 0; i < 1; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + // Loss component calculated from negative part + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + (2 + num_triplets)*j*dim); + // dissimilar pair in triplet + caffe_cpu_axpby( + dim, + -alpha, + diff_neg.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 +======= +======= +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param + } + } + } + } + // BP for feat2(extracted from the closest sample) + for (int i = 1; i < 2; ++i) { + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise +======= +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param + } + } + } + } +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 + // BP for feat2(extracted from the closest sample) + for (int i = 1; i < 2; ++i) { + if (propagate_down[0]) { + const Dtype sign = -1; +======= +======= +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param + // BP for negative feature used in the num_triplets triplet part + for (int i = 2; i < 2 + num_triplets; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise +======= +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for negative feature used in the num_triplets triplet part + for (int i = 2; i < 2 + num_triplets; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); +======= +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise +======= +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // dissimilar pairs +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +======= + for (int i = 1; i < 3; ++i) { +// there must be further check to ensure the gradient calc + if (propagate_down[i]) { + const Dtype sign = (i == 2) ? 1 : -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(bottom[i]->num()); + int num = bottom[i]->num(); + int channels = bottom[i]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[i]->mutable_cpu_diff(); + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pairs +>>>>>>> restore +>>>>>>> triplet data generation and network update + caffe_cpu_axpby( + dim, + alpha, + diff_neg.cpu_data() + (j*dim), + Dtype(0.0), +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +>>>>>>> triplet data generation and network update + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } else { + for (int i = 0; i < 1; ++i) { + // BP for data1(feat1) + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 +======= +>>>>>>> triplet data generation and network update + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +>>>>>>> add 3d network training param + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise +======= +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha*dist_sq_neg.mutable_cpu_data()[j]\ + /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_cpu_data()[j]+margin)), + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + caffe_cpu_axpby( + dim, + -alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), + diff_neg.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + for (int i = 1; i < 2; ++i) { + // BP for positive data(feat2) + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 +======= +>>>>>>> triplet data generation and network update + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +>>>>>>> add 3d network training param + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise +======= +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + caffe_cpu_axpby( + dim, + alpha*dist_sq_neg.mutable_cpu_data()[j]\ + /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_cpu_data()[j]+margin)), + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + for (int i = 2; i < 2 + num_triplets; ++i) { + // BP for negative data(feat3) + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] = 1 - \ + dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +======= + bout + (j*channels)); + // dissimilar pairs +>>>>>>> restore +>>>>>>> triplet data generation and network update + caffe_cpu_axpby( + dim, + alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), + diff_neg.cpu_data() + (j*dim), + Dtype(0.0), +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); +======= +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); +======= + bout + (j*channels)); + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); +>>>>>>> restore +>>>>>>> triplet data generation and network update + } + } + } + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(TripletLossLayer); +#endif + +INSTANTIATE_CLASS(TripletLossLayer); +REGISTER_LAYER_CLASS(TripletLoss); + +} // namespace caffe diff --git a/src/caffe/layers/triplet_loss_layer.cu.orig.orig.orig b/src/caffe/layers/triplet_loss_layer.cu.orig.orig.orig new file mode 100644 index 00000000000..2ebebd327f3 --- /dev/null +++ b/src/caffe/layers/triplet_loss_layer.cu.orig.orig.orig @@ -0,0 +1,728 @@ +#include +#include + +#include "caffe/layer.hpp" +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" +======= +<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" +======= +#include "caffe/vision_layers.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +>>>>>>> GPU version added +>>>>>>> GPU version added + +namespace caffe { + +template +void TripletLossLayer::Forward_gpu( + const vector*>& bottom, + const vector*>& top) { +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +======= +<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c +>>>>>>> GPU version added + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); + Dtype loss(0.0); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); + if (losstype == 0) { + for (int i = 0; i < num_set; ++i) { + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close + caffe_gpu_dot( + dim, + diff_pos.gpu_data() + i*dim, + diff_pos.gpu_data() + i*dim, +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_pos.mutable_cpu_data() + i); +======= + dist_sq_pos.mutable_gpu_data() + i); +>>>>>>> debug GPU triplet loss codes for loss type 0 +======= +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_pos.mutable_cpu_data() + i); +======= + dist_sq_pos.mutable_gpu_data() + i); +>>>>>>> debug GPU triplet loss codes for loss type 0 +======= + dist_sq_pos.mutable_cpu_data() + i); +>>>>>>> GPU version added +>>>>>>> add 3d network training param + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.gpu_data()[i]; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; + // Loss component calculated from negative part + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + i*dim); // reference-negative +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 +======= +>>>>>>> debug GPU triplet loss codes for loss type 0 +======= +>>>>>>> add 3d network training param + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + i*dim, + diff_neg.gpu_data() + i*dim, +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +>>>>>>> add 3d network training param +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_neg.mutable_cpu_data() + i); +======= + dist_sq_neg.mutable_gpu_data() + i); +>>>>>>> debug GPU triplet loss codes for loss type 0 +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +======= + dist_sq_neg.mutable_cpu_data() + i); +>>>>>>> GPU version added +>>>>>>> add 3d network training param + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(margin + dist_sq_.gpu_data()[i], Dtype(0.0)); + } + } + loss = loss / static_cast(num_set) / Dtype(2); + top[0]->mutable_gpu_data()[0] = loss; + } else { + for (int i = 0; i < num_set; ++i) { + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive + diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close + // Loss component calculated from reference and close one + caffe_gpu_dot( + dim, + diff_pos.gpu_data() + i*dim, + diff_pos.gpu_data() + i*dim, +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_pos.mutable_cpu_data() + i); +======= + dist_sq_pos.mutable_gpu_data() + i); +>>>>>>> debug GPU triplet loss codes for loss type 0 +======= +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_pos.mutable_cpu_data() + i); +======= + dist_sq_pos.mutable_gpu_data() + i); +>>>>>>> debug GPU triplet loss codes for loss type 0 +======= + dist_sq_pos.mutable_cpu_data() + i); +>>>>>>> GPU version added +>>>>>>> add 3d network training param + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.gpu_data()[i]; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.mutable_gpu_data()[i]; + dist_sq_.mutable_gpu_data()[i] += margin; + // Loss component calculated from negative part + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + i*dim); // reference-negative +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 +======= +>>>>>>> debug GPU triplet loss codes for loss type 0 +======= +>>>>>>> add 3d network training param + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + i*dim, + diff_neg.gpu_data() + i*dim, +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +>>>>>>> add 3d network training param +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_neg.mutable_cpu_data() + i); +======= + dist_sq_neg.mutable_gpu_data() + i); +>>>>>>> debug GPU triplet loss codes for loss type 0 +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +======= + dist_sq_neg.mutable_cpu_data() + i); +>>>>>>> GPU version added +>>>>>>> add 3d network training param + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[i] = 1 - \ + dist_sq_neg.gpu_data()[i] / dist_sq_.mutable_gpu_data()[i]; + // loss accumulated accumulated by the triplet part + loss += std::max(dist_sq_.gpu_data()[i], Dtype(0.0)); + } + } + loss = loss / static_cast(num_set) / Dtype(2); + top[0]->mutable_gpu_data()[0] = loss; + } +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +======= +======= + int count = bottom[0]->count(); + caffe_gpu_sub( + count, + bottom[0]->gpu_data(), // a + bottom[1]->gpu_data(), // b + diff_pos.mutable_gpu_data()); // a_i-b_i + caffe_gpu_sub( + count, + bottom[0]->gpu_data(), // a + bottom[2]->gpu_data(), // c + diff_neg.mutable_gpu_data()); // a_i-c_i + caffe_gpu_powx( + count, + diff_pos.mutable_gpu_data(), // a_i-b_i + Dtype(2), + diff_sq_pos.mutable_gpu_data()); // (a_i-b_i)^2 + caffe_gpu_powx( + count, + diff_neg.mutable_gpu_data(), // a_i-c_i + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (a_i-c_i)^2 + const int channels = bottom[0]->channels(); + Dtype margin = this->layer_param_.triplet_loss_param().margin(); + Dtype loss(0.0); + // Loss component calculated from ab + for (int i = 0; i < bottom[0]->num(); ++i) { + /*dist_sq_pos.mutable_gpu_data()[i] = caffe_gpu_dot(channels, + diff_pos.gpu_data() + (i*channels), diff_pos.gpu_data() + (i*channels));*/ + // ab is a similar pair + dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; + // Loss component calculated from ac + /*dist_sq_neg.mutable_gpu_data()[i] = caffe_gpu_dot(channels, + diff_neg.gpu_data() + (i*channels), diff_neg.gpu_data() + (i*channels));*/ + // ac is a dissimilar pair + dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; + loss += std::max(margin + dist_sq_.gpu_data()[i], Dtype(0.0)); + } + loss = loss / static_cast(bottom[0]->num()) / Dtype(2); + top[0]->mutable_gpu_data()[0] = loss; +>>>>>>> GPU version added +>>>>>>> GPU version added +} + +template +void TripletLossLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + Dtype margin = this->layer_param_.triplet_loss_param().margin(); +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +======= +<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c +>>>>>>> GPU version added + Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); + int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); + int dim = bottom[0]->count()/bottom[0]->num(); + int num_set = bottom[0]->num()/(2 + num_triplets); + if (losstype == 0) { + // BP for feat1(extracted from reference) + for (int i = 0; i < 1; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + // the pair part + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 +======= +>>>>>>> debug GPU triplet loss codes for loss type 0 +======= +>>>>>>> add 3d network training param + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +>>>>>>> add 3d network training param +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_neg.mutable_cpu_data() + j); +======= + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> debug GPU triplet loss codes for loss type 0 +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +======= + dist_sq_neg.mutable_cpu_data() + j); +>>>>>>> GPU version added +>>>>>>> add 3d network training param + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; + // Loss component calculated from negative part + if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(1.0), + bout + (2 + num_triplets)*j*dim); + // dissimilar pair in triplet + caffe_gpu_axpby( + dim, + -alpha, + diff_neg.gpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for feat2(extracted from the closest sample) + for (int i = 1; i < 2; ++i) { + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + // the pair part + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 +======= +>>>>>>> debug GPU triplet loss codes for loss type 0 +======= +>>>>>>> add 3d network training param + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +>>>>>>> add 3d network training param +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_neg.mutable_cpu_data() + j); +======= + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> debug GPU triplet loss codes for loss type 0 +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +======= + dist_sq_neg.mutable_cpu_data() + j); +>>>>>>> GPU version added +>>>>>>> add 3d network training param + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; + if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for negative feature used in the num_triplets triplet part + for (int i = 2; i < 2 + num_triplets; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 +======= +>>>>>>> debug GPU triplet loss codes for loss type 0 +======= +>>>>>>> add 3d network training param + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +>>>>>>> add 3d network training param +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_neg.mutable_cpu_data() + j); +======= + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> debug GPU triplet loss codes for loss type 0 +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +======= + dist_sq_neg.mutable_cpu_data() + j); +>>>>>>> GPU version added +>>>>>>> add 3d network training param + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; + if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + // dissimilar pairs + caffe_gpu_axpby( + dim, + alpha, + diff_neg.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } else { + for (int i = 0; i < 1; ++i) { + // BP for data1(feat1) + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + // the pair part + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; + dist_sq_.mutable_gpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 +======= +>>>>>>> debug GPU triplet loss codes for loss type 0 +======= +>>>>>>> add 3d network training param + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +>>>>>>> add 3d network training param +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_neg.mutable_cpu_data() + j); +======= + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> debug GPU triplet loss codes for loss type 0 +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +======= + dist_sq_neg.mutable_cpu_data() + j); +>>>>>>> GPU version added +>>>>>>> add 3d network training param + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] = 1 - \ + dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + caffe_gpu_axpby( + dim, + alpha*dist_sq_neg.mutable_gpu_data()[j]\ + /((dist_sq_pos.mutable_gpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_gpu_data()[j]+margin)), + diff_pos.gpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + caffe_gpu_axpby( + dim, + -alpha/(dist_sq_pos.mutable_gpu_data()[j] + margin), + diff_neg.gpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + for (int i = 1; i < 2; ++i) { + // BP for positive data(feat2) + if (propagate_down[0]) { + const Dtype sign = -1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + // the pair part + caffe_gpu_axpby( + dim, + alpha, + diff_pos.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; + dist_sq_.mutable_gpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 +======= +>>>>>>> debug GPU triplet loss codes for loss type 0 +======= +>>>>>>> add 3d network training param + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +>>>>>>> add 3d network training param +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_neg.mutable_cpu_data() + j); +======= + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> debug GPU triplet loss codes for loss type 0 +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +======= + dist_sq_neg.mutable_cpu_data() + j); +>>>>>>> GPU version added +>>>>>>> add 3d network training param + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] = 1 - \ + dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + caffe_gpu_axpby( + dim, + alpha*dist_sq_neg.mutable_gpu_data()[j]\ + /((dist_sq_pos.mutable_gpu_data()[j]+margin)\ + *(dist_sq_pos.mutable_gpu_data()[j]+margin)), + diff_pos.gpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + for (int i = 2; i < 2 + num_triplets; ++i) { + // BP for negative data(feat3) + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; + dist_sq_.mutable_gpu_data()[j] += margin; + // Loss component calculated from negative part + caffe_gpu_sub( + dim, + bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, + diff_neg.mutable_gpu_data() + j*dim); // reference-negative +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 +<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 +======= +>>>>>>> debug GPU triplet loss codes for loss type 0 +======= +>>>>>>> add 3d network training param + caffe_gpu_dot( + dim, + diff_neg.gpu_data() + j*dim, + diff_neg.gpu_data() + j*dim, +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +>>>>>>> add 3d network training param +<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 + dist_sq_neg.mutable_cpu_data() + j); +======= + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> debug GPU triplet loss codes for loss type 0 +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +======= + dist_sq_neg.mutable_cpu_data() + j); +>>>>>>> GPU version added +>>>>>>> add 3d network training param + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_gpu_data()[j] = 1 - \ + dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; + // loss accumulated accumulated by the triplet part + if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + caffe_gpu_axpby( + dim, + alpha/(dist_sq_pos.mutable_gpu_data()[j] + margin), + diff_neg.gpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + } else { + caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); +<<<<<<< 0a8521567403409d70ece475762c203e38274530 +======= +======= +// there must be further check to ensure the gradient calc + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(bottom[0]->num()); + int num = bottom[0]->num(); + int channels = bottom[0]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[0]->mutable_gpu_diff(); + if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + // similar pairs + caffe_gpu_axpby( + channels, + alpha, + diff_pos.gpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + // dissimilar pairs + caffe_gpu_axpby( + channels, + -alpha, + diff_neg.gpu_data() + (j*channels), + Dtype(1.0), + bout + (j*channels)); + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); + } + } + } + for (int i = 1; i < 3; ++i) { +// there must be further check to ensure the gradient calc + if (propagate_down[i]) { + const Dtype sign = (i == 1) ? -1 : 1; + const Dtype alpha = sign * top[0]->gpu_diff()[0] / + static_cast(bottom[i]->num()); + int num = bottom[i]->num(); + int channels = bottom[i]->channels(); + for (int j = 0; j < num; ++j) { + Dtype* bout = bottom[i]->mutable_gpu_diff(); + if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { + if (i == 1) { + // similar pairs + caffe_gpu_axpby( + channels, + alpha, + diff_pos.gpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } + else { + // dissimilar pairs + caffe_gpu_axpby( + channels, + alpha, + diff_neg.gpu_data() + (j*channels), + Dtype(0.0), + bout + (j*channels)); + } + } else { + caffe_set(channels, Dtype(0), bout + (j*channels)); +>>>>>>> GPU version added +>>>>>>> GPU version added + } + } + } + } +<<<<<<< 0a8521567403409d70ece475762c203e38274530 + } +======= +<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c + } +======= +>>>>>>> GPU version added +>>>>>>> GPU version added +} + +INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer); + +} // namespace caffe diff --git a/src/caffe/proto/caffe.proto.orig.orig b/src/caffe/proto/caffe.proto.orig.orig new file mode 100644 index 00000000000..9bb8666a766 --- /dev/null +++ b/src/caffe/proto/caffe.proto.orig.orig @@ -0,0 +1,1598 @@ +syntax = "proto2"; + +package caffe; + +// Specifies the shape (dimensions) of a Blob. +message BlobShape { + repeated int64 dim = 1 [packed = true]; +} + +message BlobProto { + optional BlobShape shape = 7; + repeated float data = 5 [packed = true]; + repeated float diff = 6 [packed = true]; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= + repeated double double_data = 8 [packed = true]; + repeated double double_diff = 9 [packed = true]; +>>>>>>> triplet data generation and network update + + // 4D dimensions -- deprecated. Use "shape" instead. + optional int32 num = 1 [default = 0]; + optional int32 channels = 2 [default = 0]; + optional int32 height = 3 [default = 0]; + optional int32 width = 4 [default = 0]; +} + +// The BlobProtoVector is simply a way to pass multiple blobproto instances +// around. +message BlobProtoVector { + repeated BlobProto blobs = 1; +} + +message Datum { + optional int32 channels = 1; + optional int32 height = 2; + optional int32 width = 3; + // the actual image data, in bytes + optional bytes data = 4; + optional int32 label = 5; + // Optionally, the datum could also hold float data. + repeated float float_data = 6; + // If true data contains an encoded image that need to be decoded + optional bool encoded = 7 [default = false]; +} + +message FillerParameter { + // The filler type. + optional string type = 1 [default = 'constant']; + optional float value = 2 [default = 0]; // the value in constant filler + optional float min = 3 [default = 0]; // the min value in uniform filler + optional float max = 4 [default = 1]; // the max value in uniform filler + optional float mean = 5 [default = 0]; // the mean value in Gaussian filler + optional float std = 6 [default = 1]; // the std value in Gaussian filler + // The expected number of non-zero output weights for a given input in + // Gaussian filler -- the default -1 means don't perform sparsification. + optional int32 sparse = 7 [default = -1]; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig + // Normalize the filler variance by fan_in, fan_out, or their average. + // Applies to 'xavier' and 'msra' fillers. + enum VarianceNorm { + FAN_IN = 0; + FAN_OUT = 1; + AVERAGE = 2; + } + optional VarianceNorm variance_norm = 8 [default = FAN_IN]; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +======= +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +} + +message NetParameter { + optional string name = 1; // consider giving the network a name + // The input blobs to the network. + repeated string input = 3; + // The shape of the input blobs. + repeated BlobShape input_shape = 8; + + // 4D input dimensions -- deprecated. Use "shape" instead. + // If specified, for each input blob there should be four + // values specifying the num, channels, height and width of the input blob. + // Thus, there should be a total of (4 * #input) numbers. + repeated int32 input_dim = 4; + + // Whether the network will force every layer to carry out backward operation. + // If set False, then whether to carry out backward is determined + // automatically according to the net structure and learning rates. + optional bool force_backward = 5 [default = false]; + // The current "state" of the network, including the phase, level, and stage. + // Some layers may be included/excluded depending on this state and the states + // specified in the layers' include and exclude fields. + optional NetState state = 6; + + // Print debugging information about results while running Net::Forward, + // Net::Backward, and Net::Update. + optional bool debug_info = 7 [default = false]; + + // The layers that make up the net. Each of their configurations, including + // connectivity and behavior, is specified as a LayerParameter. + repeated LayerParameter layer = 100; // ID 100 so layers are printed last. + + // DEPRECATED: use 'layer' instead. + repeated V1LayerParameter layers = 2; +} + +// NOTE +// Update the next available ID when you add a new SolverParameter field. +// +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +// SolverParameter next available ID: 40 (last added: momentum2) +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +// SolverParameter next available ID: 37 (last added: iter_size) +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +// SolverParameter next available ID: 40 (last added: momentum2) +======= +// SolverParameter next available ID: 36 (last added: clip_gradients) +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message SolverParameter { + ////////////////////////////////////////////////////////////////////////////// + // Specifying the train and test networks + // + // Exactly one train net must be specified using one of the following fields: + // train_net_param, train_net, net_param, net + // One or more test nets may be specified using any of the following fields: + // test_net_param, test_net, net_param, net + // If more than one test net field is specified (e.g., both net and + // test_net are specified), they will be evaluated in the field order given + // above: (1) test_net_param, (2) test_net, (3) net_param/net. + // A test_iter must be specified for each test_net. + // A test_level and/or a test_stage may also be specified for each test_net. + ////////////////////////////////////////////////////////////////////////////// + + // Proto filename for the train net, possibly combined with one or more + // test nets. + optional string net = 24; + // Inline train net param, possibly combined with one or more test nets. + optional NetParameter net_param = 25; + + optional string train_net = 1; // Proto filename for the train net. + repeated string test_net = 2; // Proto filenames for the test nets. + optional NetParameter train_net_param = 21; // Inline train net params. + repeated NetParameter test_net_param = 22; // Inline test net params. + + // The states for the train/test nets. Must be unspecified or + // specified once per net. + // + // By default, all states will have solver = true; + // train_state will have phase = TRAIN, + // and all test_state's will have phase = TEST. + // Other defaults are set according to the NetState defaults. + optional NetState train_state = 26; + repeated NetState test_state = 27; + + // The number of iterations for each test net. + repeated int32 test_iter = 3; + + // The number of iterations between two testing phases. + optional int32 test_interval = 4 [default = 0]; + optional bool test_compute_loss = 19 [default = false]; + // If true, run an initial test pass before the first iteration, + // ensuring memory availability and printing the starting value of the loss. + optional bool test_initialization = 32 [default = true]; + optional float base_lr = 5; // The base learning rate + // the number of iterations between displaying info. If display = 0, no info + // will be displayed. + optional int32 display = 6; + // Display the loss averaged over the last average_loss iterations + optional int32 average_loss = 33 [default = 1]; + optional int32 max_iter = 7; // the maximum number of iterations +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + // accumulate gradients over `iter_size` x `batch_size` instances + optional int32 iter_size = 36 [default = 1]; + optional string lr_policy = 8; // The learning rate decay policy. +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig + // accumulate gradients over `iter_size` x `batch_size` instances + optional int32 iter_size = 36 [default = 1]; + + // The learning rate decay policy. The currently implemented learning rate + // policies are as follows: + // - fixed: always return base_lr. + // - step: return base_lr * gamma ^ (floor(iter / step)) + // - exp: return base_lr * gamma ^ iter + // - inv: return base_lr * (1 + gamma * iter) ^ (- power) + // - multistep: similar to step but it allows non uniform steps defined by + // stepvalue + // - poly: the effective learning rate follows a polynomial decay, to be + // zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) + // - sigmoid: the effective learning rate follows a sigmod decay + // return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) + // + // where base_lr, max_iter, gamma, step, stepvalue and power are defined + // in the solver parameter protocol buffer, and iter is the current iteration. + optional string lr_policy = 8; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +======= + optional string lr_policy = 8; // The learning rate decay policy. +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig + optional float gamma = 9; // The parameter to compute the learning rate. + optional float power = 10; // The parameter to compute the learning rate. + optional float momentum = 11; // The momentum value. + optional float weight_decay = 12; // The weight decay. + // regularization types supported: L1 and L2 + // controlled by weight_decay + optional string regularization_type = 29 [default = "L2"]; + // the stepsize for learning rate policy "step" + optional int32 stepsize = 13; + // the stepsize for learning rate policy "multistep" + repeated int32 stepvalue = 34; + + // Set clip_gradients to >= 0 to clip parameter gradients to that L2 norm, + // whenever their actual L2 norm is larger. + optional float clip_gradients = 35 [default = -1]; + + optional int32 snapshot = 14 [default = 0]; // The snapshot interval + optional string snapshot_prefix = 15; // The prefix for the snapshot. + // whether to snapshot diff in the results or not. Snapshotting diff will help + // debugging but the final protocol buffer size will be much larger. + optional bool snapshot_diff = 16 [default = false]; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= + enum SnapshotFormat { + HDF5 = 0; + BINARYPROTO = 1; + } + optional SnapshotFormat snapshot_format = 37 [default = BINARYPROTO]; +>>>>>>> triplet data generation and network update + // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default. + enum SolverMode { + CPU = 0; + GPU = 1; + } + optional SolverMode solver_mode = 17 [default = GPU]; + // the device_id will that be used in GPU mode. Use device_id = 0 in default. + optional int32 device_id = 18 [default = 0]; + // If non-negative, the seed with which the Solver will initialize the Caffe + // random number generator -- useful for reproducible results. Otherwise, + // (and by default) initialize using a seed derived from the system clock. + optional int64 random_seed = 20 [default = -1]; + + // Solver type + enum SolverType { + SGD = 0; + NESTEROV = 1; + ADAGRAD = 2; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + } + optional SolverType solver_type = 30 [default = SGD]; + // numerical stability for AdaGrad + optional float delta = 31 [default = 1e-8]; +======= + RMSPROP = 3; + ADADELTA = 4; + ADAM = 5; + } + optional SolverType solver_type = 30 [default = SGD]; + // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam + optional float delta = 31 [default = 1e-8]; + // parameters for the Adam solver + optional float momentum2 = 39 [default = 0.999]; + + // RMSProp decay value + // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t) + optional float rms_decay = 38; +>>>>>>> triplet data generation and network update + + // If true, print information about the state of the net that may help with + // debugging learning problems. + optional bool debug_info = 23 [default = false]; + + // If false, don't save a snapshot after training finishes. + optional bool snapshot_after_train = 28 [default = true]; +} + +// A message that stores the solver snapshots +message SolverState { + optional int32 iter = 1; // The current iteration + optional string learned_net = 2; // The file that stores the learned net. + repeated BlobProto history = 3; // The history for sgd solvers + optional int32 current_step = 4 [default = 0]; // The current step for learning rate +} + +enum Phase { + TRAIN = 0; + TEST = 1; +} + +message NetState { + optional Phase phase = 1 [default = TEST]; + optional int32 level = 2 [default = 0]; + repeated string stage = 3; +} + +message NetStateRule { + // Set phase to require the NetState have a particular phase (TRAIN or TEST) + // to meet this rule. + optional Phase phase = 1; + + // Set the minimum and/or maximum levels in which the layer should be used. + // Leave undefined to meet the rule regardless of level. + optional int32 min_level = 2; + optional int32 max_level = 3; + + // Customizable sets of stages to include or exclude. + // The net must have ALL of the specified stages and NONE of the specified + // "not_stage"s to meet the rule. + // (Use multiple NetStateRules to specify conjunctions of stages.) + repeated string stage = 4; + repeated string not_stage = 5; +} + +// Specifies training parameters (multipliers on global learning constants, +// and the name and other settings used for weight sharing). +message ParamSpec { + // The names of the parameter blobs -- useful for sharing parameters among + // layers, but never required otherwise. To share a parameter between two + // layers, give it a (non-empty) name. + optional string name = 1; + + // Whether to require shared weights to have the same shape, or just the same + // count -- defaults to STRICT if unspecified. + optional DimCheckMode share_mode = 2; + enum DimCheckMode { + // STRICT (default) requires that num, channels, height, width each match. + STRICT = 0; + // PERMISSIVE requires only the count (num*channels*height*width) to match. + PERMISSIVE = 1; + } + + // The multiplier on the global learning rate for this parameter. + optional float lr_mult = 3 [default = 1.0]; + + // The multiplier on the global weight decay for this parameter. + optional float decay_mult = 4 [default = 1.0]; +} + +// NOTE +// Update the next available ID when you add a new LayerParameter field. +// +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< 90c50a1b3e5527cfa0d92174b79cb05438c5302e +// LayerParameter next available layer-specific ID: 137 (last added: reduction_param) +======= +// LayerParameter next available layer-specific ID: 139 (last added: tile_param) +>>>>>>> Add TileLayer +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +// LayerParameter next available layer-specific ID: 139 (last added: tile_param) +message LayerParameter { + optional string name = 1; // the layer name + optional string type = 2; // the layer type + repeated string bottom = 3; // the name of each bottom blob + repeated string top = 4; // the name of each top blob + + // The train / test phase for computation. + optional Phase phase = 10; + + // The amount of weight to assign each top blob in the objective. + // Each layer assigns a default value, usually of either 0 or 1, + // to each top blob. + repeated float loss_weight = 5; + + // Specifies training parameters (multipliers on global learning constants, + // and the name and other settings used for weight sharing). + repeated ParamSpec param = 6; + + // The blobs containing the numeric parameters of the layer. + repeated BlobProto blobs = 7; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig + + // Specifies on which bottoms the backpropagation should be skipped. + // The size must be either 0 or equal to the number of bottoms. + repeated bool propagate_down = 11; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +======= +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig + + // Rules controlling whether and when a layer is included in the network, + // based on the current NetState. You may specify a non-zero number of rules + // to include OR exclude, but not both. If no include or exclude rules are + // specified, the layer is always included. If the current NetState meets + // ANY (i.e., one or more) of the specified rules, the layer is + // included/excluded. + repeated NetStateRule include = 8; + repeated NetStateRule exclude = 9; + + // Parameters for data pre-processing. + optional TransformationParameter transform_param = 100; + + // Parameters shared by loss layers. + optional LossParameter loss_param = 101; + + // Layer type-specific parameters. + // + // Note: certain layers may have more than one computational engine + // for their implementation. These layers include an Engine type and + // engine parameter for selecting the implementation. + // The default for the engine is set by the ENGINE switch at compile-time. + optional AccuracyParameter accuracy_param = 102; + optional ArgMaxParameter argmax_param = 103; + optional ConcatParameter concat_param = 104; + optional ContrastiveLossParameter contrastive_loss_param = 105; + optional ConvolutionParameter convolution_param = 106; + optional DataParameter data_param = 107; + optional DropoutParameter dropout_param = 108; + optional DummyDataParameter dummy_data_param = 109; + optional EltwiseParameter eltwise_param = 110; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + optional ExpParameter exp_param = 111; + optional FlattenParameter flatten_param = 135; +======= + optional EmbedParameter embed_param = 137; + optional ExpParameter exp_param = 111; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig + optional FlattenParameter flatten_param = 135; +======= +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig + optional HDF5DataParameter hdf5_data_param = 112; + optional HDF5OutputParameter hdf5_output_param = 113; + optional HingeLossParameter hinge_loss_param = 114; + optional ImageDataParameter image_data_param = 115; + optional InfogainLossParameter infogain_loss_param = 116; + optional InnerProductParameter inner_product_param = 117; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig + optional LogParameter log_param = 134; +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + optional LogParameter log_param = 134; +======= +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig + optional LRNParameter lrn_param = 118; + optional MemoryDataParameter memory_data_param = 119; + optional MVNParameter mvn_param = 120; + optional PoolingParameter pooling_param = 121; + optional PowerParameter power_param = 122; + optional PReLUParameter prelu_param = 131; + optional PythonParameter python_param = 130; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig + optional ReductionParameter reduction_param = 136; + optional ReLUParameter relu_param = 123; + optional ReshapeParameter reshape_param = 133; + optional SigmoidParameter sigmoid_param = 124; + optional SoftmaxParameter softmax_param = 125; + optional SPPParameter spp_param = 132; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +======= + optional ReLUParameter relu_param = 123; + optional SigmoidParameter sigmoid_param = 124; + optional SoftmaxParameter softmax_param = 125; +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig + optional SliceParameter slice_param = 126; + optional TanHParameter tanh_param = 127; + optional ThresholdParameter threshold_param = 128; + optional TileParameter tile_param = 138; + optional WindowDataParameter window_data_param = 129; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 + optional TripletLossParameter triplet_loss_param = 139; +======= + optional TripletLossParameter triplet_loss_param = 137; +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + optional TripletLossParameter triplet_loss_param = 139; +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + optional TripletLossParameter triplet_loss_param = 139; +======= + optional TripletLossParameter triplet_loss_param = 132; +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +>>>>>>> triplet data generation and network update +} + +// Message that stores parameters used to apply transformation +// to the data layer's data +message TransformationParameter { + // For data pre-processing, we can do simple scaling and subtracting the + // data mean, if provided. Note that the mean subtraction is always carried + // out before scaling. + optional float scale = 1 [default = 1]; + // Specify if we want to randomly mirror data. + optional bool mirror = 2 [default = false]; + // Specify if we would like to randomly crop an image. + optional uint32 crop_size = 3 [default = 0]; + // mean_file and mean_value cannot be specified at the same time + optional string mean_file = 4; + // if specified can be repeated once (would substract it from all the channels) + // or can be repeated the same number of times as channels + // (would subtract them from the corresponding channel) + repeated float mean_value = 5; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig + // Force the decoded image to have 3 color channels. + optional bool force_color = 6 [default = false]; + // Force the decoded image to have 1 color channels. + optional bool force_gray = 7 [default = false]; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +======= +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +} + +// Message that stores parameters shared by loss layers +message LossParameter { + // If specified, ignore instances with the given label. + optional int32 ignore_label = 1; + // If true, normalize each batch across all instances (including spatial + // dimesions, but not ignored instances); else, divide by batch size only. + optional bool normalize = 2 [default = true]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +// Messages that store parameters used by individual layer types follow, in +// alphabetical order. + +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +// Messages that store parameters used by individual layer types follow, in +// alphabetical order. + +======= +// Message that stores parameters used by AccuracyLayer +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message AccuracyParameter { + // When computing accuracy, count as correct by comparing the true label to + // the top k scoring classes. By default, only compare to the top scoring + // class (i.e. argmax). + optional uint32 top_k = 1 [default = 1]; + + // The "label" axis of the prediction blob, whose argmax corresponds to the + // predicted label -- may be negative to index from the end (e.g., -1 for the + // last axis). For example, if axis == 1 and the predictions are + // (N x C x H x W), the label blob is expected to contain N*H*W ground truth + // labels with integer values in {0, 1, ..., C-1}. + optional int32 axis = 2 [default = 1]; + + // If specified, ignore instances with the given label. + optional int32 ignore_label = 3; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by ArgMaxLayer +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message ArgMaxParameter { + // If true produce pairs (argmax, maxval) + optional bool out_max_val = 1 [default = false]; + optional uint32 top_k = 2 [default = 1]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by ConcatLayer +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message ConcatParameter { + // The axis along which to concatenate -- may be negative to index from the + // end (e.g., -1 for the last axis). Other axes must have the + // same dimension for all the bottom blobs. + // By default, ConcatLayer concatenates blobs along the "channels" axis (1). + optional int32 axis = 2 [default = 1]; + + // DEPRECATED: alias for "axis" -- does not support negative indexing. + optional uint32 concat_dim = 1 [default = 1]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +message ContrastiveLossParameter { + // margin for dissimilar pair + optional float margin = 1 [default = 1.0]; +======= +// Message that stores parameters used by ContrastiveLossLayer +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message ContrastiveLossParameter { + // margin for dissimilar pair + optional float margin = 1 [default = 1.0]; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig + // The first implementation of this cost did not exactly match the cost of + // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2. + // legacy_version = false (the default) uses (margin - d)^2 as proposed in the + // Hadsell paper. New models should probably use this version. + // legacy_version = true uses (margin - d^2). This is kept to support / + // reproduce existing models and results + optional bool legacy_version = 2 [default = false]; +} + +message TripletLossParameter { + //margin for negative triplet + optional float margin = 1 [default = 1.0]; + optional uint32 losstype = 2 [default = 1]; + optional uint32 num_triplets = 3 [default = 3]; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +} + +======= +======= +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +} + +message TripletLossParameter { + //margin for negative triplet + optional float margin = 1 [default = 1.0]; +} +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig + +======= +// Message that stores parameters used by ConvolutionLayer +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message ConvolutionParameter { + optional uint32 num_output = 1; // The number of outputs for the layer + optional bool bias_term = 2 [default = true]; // whether to have bias terms + // Pad, kernel size, and stride are all given as a single value for equal + // dimensions in height and width or as Y, X pairs. + optional uint32 pad = 3 [default = 0]; // The padding size (equal in Y, X) + optional uint32 pad_h = 9 [default = 0]; // The padding height + optional uint32 pad_w = 10 [default = 0]; // The padding width + optional uint32 kernel_size = 4; // The kernel size (square) + optional uint32 kernel_h = 11; // The kernel height + optional uint32 kernel_w = 12; // The kernel width + optional uint32 group = 5 [default = 1]; // The group size for group conv + optional uint32 stride = 6 [default = 1]; // The stride (equal in Y, X) + optional uint32 stride_h = 13; // The stride height + optional uint32 stride_w = 14; // The stride width + optional FillerParameter weight_filler = 7; // The filler for the weight + optional FillerParameter bias_filler = 8; // The filler for the bias + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 15 [default = DEFAULT]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by DataLayer +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message DataParameter { + enum DB { + LEVELDB = 0; + LMDB = 1; + } + // Specify the data source. + optional string source = 1; + // Specify the batch size. + optional uint32 batch_size = 4; + // The rand_skip variable is for the data layer to skip a few data points + // to avoid all asynchronous sgd clients to start at the same point. The skip + // point would be set as rand_skip * rand(0,1). Note that rand_skip should not + // be larger than the number of keys in the database. +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= + // DEPRECATED. Each solver accesses a different subset of the database. +>>>>>>> triplet data generation and network update + optional uint32 rand_skip = 7 [default = 0]; + optional DB backend = 8 [default = LEVELDB]; + // DEPRECATED. See TransformationParameter. For data pre-processing, we can do + // simple scaling and subtracting the data mean, if provided. Note that the + // mean subtraction is always carried out before scaling. + optional float scale = 2 [default = 1]; + optional string mean_file = 3; + // DEPRECATED. See TransformationParameter. Specify if we would like to randomly + // crop an image. + optional uint32 crop_size = 5 [default = 0]; + // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror + // data. + optional bool mirror = 6 [default = false]; + // Force the encoded image to have 3 color channels + optional bool force_encoded_color = 9 [default = false]; +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +} + +======= + // Prefetch queue (Number of batches to prefetch to host memory, increase if + // data access bandwidth varies). + optional uint32 prefetch = 10 [default = 4]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +// Message that stores parameters used by DropoutLayer +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message DropoutParameter { + optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by DummyDataLayer. +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +// DummyDataLayer fills any number of arbitrarily shaped blobs with random +// (or constant) data generated by "Fillers" (see "message FillerParameter"). +message DummyDataParameter { + // This layer produces N >= 1 top blobs. DummyDataParameter must specify 1 or N + // shape fields, and 0, 1 or N data_fillers. + // + // If 0 data_fillers are specified, ConstantFiller with a value of 0 is used. + // If 1 data_filler is specified, it is applied to all top blobs. If N are + // specified, the ith is applied to the ith top blob. + repeated FillerParameter data_filler = 1; + repeated BlobShape shape = 6; + + // 4D dimensions -- deprecated. Use "shape" instead. + repeated uint32 num = 2; + repeated uint32 channels = 3; + repeated uint32 height = 4; + repeated uint32 width = 5; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by EltwiseLayer +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message EltwiseParameter { + enum EltwiseOp { + PROD = 0; + SUM = 1; + MAX = 2; + } + optional EltwiseOp operation = 1 [default = SUM]; // element-wise operation + repeated float coeff = 2; // blob-wise coefficient for SUM operation + + // Whether to use an asymptotically slower (for >2 inputs) but stabler method + // of computing the gradient for the PROD operation. (No effect for SUM op.) + optional bool stable_prod_grad = 3 [default = true]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +// Message that stores parameters used by EmbedLayer +message EmbedParameter { + optional uint32 num_output = 1; // The number of outputs for the layer + // The input is given as integers to be interpreted as one-hot + // vector indices with dimension num_input. Hence num_input should be + // 1 greater than the maximum possible input value. + optional uint32 input_dim = 2; + + optional bool bias_term = 3 [default = true]; // Whether to use a bias term + optional FillerParameter weight_filler = 4; // The filler for the weight + optional FillerParameter bias_filler = 5; // The filler for the bias + +} + +// Message that stores parameters used by ExpLayer +>>>>>>> triplet data generation and network update +message ExpParameter { + // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. + // Or if base is set to the default (-1), base is set to e, + // so y = exp(shift + scale * x). + optional float base = 1 [default = -1.0]; + optional float scale = 2 [default = 1.0]; + optional float shift = 3 [default = 0.0]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +/// Message that stores parameters used by FlattenLayer +message FlattenParameter { + // The first axis to flatten: all preceding axes are retained in the output. + // May be negative to index from the end (e.g., -1 for the last axis). + optional int32 axis = 1 [default = 1]; + + // The last axis to flatten: all following axes are retained in the output. + // May be negative to index from the end (e.g., the default -1 for the last + // axis). + optional int32 end_axis = 2 [default = -1]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +======= +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +// Message that stores parameters used by HDF5DataLayer +message HDF5DataParameter { + // Specify the data source. + optional string source = 1; + // Specify the batch size. + optional uint32 batch_size = 2; + + // Specify whether to shuffle the data. + // If shuffle == true, the ordering of the HDF5 files is shuffled, + // and the ordering of data within any given HDF5 file is shuffled, + // but data between different files are not interleaved; all of a file's + // data are output (in a random order) before moving onto another file. + optional bool shuffle = 3 [default = false]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by HDF5OutputLayer +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message HDF5OutputParameter { + optional string file_name = 1; +} + +message HingeLossParameter { + enum Norm { + L1 = 1; + L2 = 2; + } + // Specify the Norm to use L1 or L2 + optional Norm norm = 1 [default = L1]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by ImageDataLayer +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message ImageDataParameter { + // Specify the data source. + optional string source = 1; + // Specify the batch size. + optional uint32 batch_size = 4 [default = 1]; + // The rand_skip variable is for the data layer to skip a few data points + // to avoid all asynchronous sgd clients to start at the same point. The skip + // point would be set as rand_skip * rand(0,1). Note that rand_skip should not + // be larger than the number of keys in the database. + optional uint32 rand_skip = 7 [default = 0]; + // Whether or not ImageLayer should shuffle the list of files at every epoch. + optional bool shuffle = 8 [default = false]; + // It will also resize images if new_height or new_width are not zero. + optional uint32 new_height = 9 [default = 0]; + optional uint32 new_width = 10 [default = 0]; + // Specify if the images are color or gray + optional bool is_color = 11 [default = true]; + // DEPRECATED. See TransformationParameter. For data pre-processing, we can do + // simple scaling and subtracting the data mean, if provided. Note that the + // mean subtraction is always carried out before scaling. + optional float scale = 2 [default = 1]; + optional string mean_file = 3; + // DEPRECATED. See TransformationParameter. Specify if we would like to randomly + // crop an image. + optional uint32 crop_size = 5 [default = 0]; + // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror + // data. + optional bool mirror = 6 [default = false]; + optional string root_folder = 12 [default = ""]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters InfogainLossLayer +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message InfogainLossParameter { + // Specify the infogain matrix source. + optional string source = 1; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by InnerProductLayer +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message InnerProductParameter { + optional uint32 num_output = 1; // The number of outputs for the layer + optional bool bias_term = 2 [default = true]; // whether to have bias terms + optional FillerParameter weight_filler = 3; // The filler for the weight + optional FillerParameter bias_filler = 4; // The filler for the bias + + // The first axis to be lumped into a single inner product computation; + // all preceding axes are retained in the output. + // May be negative to index from the end (e.g., -1 for the last axis). + optional int32 axis = 5 [default = 1]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +// Message that stores parameters used by LogLayer +message LogParameter { + // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. + // Or if base is set to the default (-1), base is set to e, + // so y = ln(shift + scale * x) = log_e(shift + scale * x) + optional float base = 1 [default = -1.0]; + optional float scale = 2 [default = 1.0]; + optional float shift = 3 [default = 0.0]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +======= +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +// Message that stores parameters used by LRNLayer +message LRNParameter { + optional uint32 local_size = 1 [default = 5]; + optional float alpha = 2 [default = 1.]; + optional float beta = 3 [default = 0.75]; + enum NormRegion { + ACROSS_CHANNELS = 0; + WITHIN_CHANNEL = 1; + } + optional NormRegion norm_region = 4 [default = ACROSS_CHANNELS]; + optional float k = 5 [default = 1.]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by MemoryDataLayer +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message MemoryDataParameter { + optional uint32 batch_size = 1; + optional uint32 channels = 2; + optional uint32 height = 3; + optional uint32 width = 4; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by MVNLayer +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message MVNParameter { + // This parameter can be set to false to normalize mean only + optional bool normalize_variance = 1 [default = true]; + + // This parameter can be set to true to perform DNN-like MVN + optional bool across_channels = 2 [default = false]; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig + + // Epsilon for not dividing by zero while normalizing variance + optional float eps = 3 [default = 1e-9]; +} + +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 + + // Epsilon for not dividing by zero while normalizing variance + optional float eps = 3 [default = 1e-9]; +} + +======= +} + +// Message that stores parameters used by PoolingLayer +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message PoolingParameter { + enum PoolMethod { + MAX = 0; + AVE = 1; + STOCHASTIC = 2; + } + optional PoolMethod pool = 1 [default = MAX]; // The pooling method + // Pad, kernel size, and stride are all given as a single value for equal + // dimensions in height and width or as Y, X pairs. + optional uint32 pad = 4 [default = 0]; // The padding size (equal in Y, X) + optional uint32 pad_h = 9 [default = 0]; // The padding height + optional uint32 pad_w = 10 [default = 0]; // The padding width + optional uint32 kernel_size = 2; // The kernel size (square) + optional uint32 kernel_h = 5; // The kernel height + optional uint32 kernel_w = 6; // The kernel width + optional uint32 stride = 3 [default = 1]; // The stride (equal in Y, X) + optional uint32 stride_h = 7; // The stride height + optional uint32 stride_w = 8; // The stride width + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 11 [default = DEFAULT]; + // If global_pooling then it will pool over the size of the bottom by doing + // kernel_h = bottom->height and kernel_w = bottom->width + optional bool global_pooling = 12 [default = false]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by PowerLayer +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message PowerParameter { + // PowerLayer computes outputs y = (shift + scale * x) ^ power. + optional float power = 1 [default = 1.0]; + optional float scale = 2 [default = 1.0]; + optional float shift = 3 [default = 0.0]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +message PythonParameter { + optional string module = 1; + optional string layer = 2; +} + +// Message that stores parameters used by ReductionLayer +message ReductionParameter { + enum ReductionOp { + SUM = 1; + ASUM = 2; + SUMSQ = 3; + MEAN = 4; + } + + optional ReductionOp operation = 1 [default = SUM]; // reduction operation + + // The first axis to reduce to a scalar -- may be negative to index from the + // end (e.g., -1 for the last axis). + // (Currently, only reduction along ALL "tail" axes is supported; reduction + // of axis M through N, where N < num_axes - 1, is unsupported.) + // Suppose we have an n-axis bottom Blob with shape: + // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). + // If axis == m, the output Blob will have shape + // (d0, d1, d2, ..., d(m-1)), + // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) + // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. + // If axis == 0 (the default), the output Blob always has the empty shape + // (count 1), performing reduction across the entire input -- + // often useful for creating new loss functions. + optional int32 axis = 2 [default = 0]; + + optional float coeff = 3 [default = 1.0]; // coefficient for output +======= +// Message that stores parameters used by PythonLayer +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message PythonParameter { + optional string module = 1; + optional string layer = 2; + // This value is set to the attribute `param_str` of the `PythonLayer` object + // in Python before calling the `setup()` method. This could be a number, + // string, dictionary in Python dict format, JSON, etc. You may parse this + // string in `setup` method and use it in `forward` and `backward`. + optional string param_str = 3 [default = '']; + // Whether this PythonLayer is shared among worker solvers during data parallelism. + // If true, each worker solver sequentially run forward from this layer. + // This value should be set true if you are using it as a data layer. + optional bool share_in_parallel = 4 [default = false]; +>>>>>>> triplet data generation and network update +} + +// Message that stores parameters used by ReductionLayer +message ReductionParameter { + enum ReductionOp { + SUM = 1; + ASUM = 2; + SUMSQ = 3; + MEAN = 4; + } + + optional ReductionOp operation = 1 [default = SUM]; // reduction operation + + // The first axis to reduce to a scalar -- may be negative to index from the + // end (e.g., -1 for the last axis). + // (Currently, only reduction along ALL "tail" axes is supported; reduction + // of axis M through N, where N < num_axes - 1, is unsupported.) + // Suppose we have an n-axis bottom Blob with shape: + // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). + // If axis == m, the output Blob will have shape + // (d0, d1, d2, ..., d(m-1)), + // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) + // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. + // If axis == 0 (the default), the output Blob always has the empty shape + // (count 1), performing reduction across the entire input -- + // often useful for creating new loss functions. + optional int32 axis = 2 [default = 0]; + + optional float coeff = 3 [default = 1.0]; // coefficient for output +} + +// Message that stores parameters used by ReLULayer +message ReLUParameter { + // Allow non-zero slope for negative inputs to speed up optimization + // Described in: + // Maas, A. L., Hannun, A. Y., & Ng, A. Y. (2013). Rectifier nonlinearities + // improve neural network acoustic models. In ICML Workshop on Deep Learning + // for Audio, Speech, and Language Processing. + optional float negative_slope = 1 [default = 0]; + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 2 [default = DEFAULT]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message ReshapeParameter { + // Specify the output dimensions. If some of the dimensions are set to 0, + // the corresponding dimension from the bottom layer is used (unchanged). + // Exactly one dimension may be set to -1, in which case its value is + // inferred from the count of the bottom blob and the remaining dimensions. + // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8: + // + // layer { + // type: "Reshape" bottom: "input" top: "output" + // reshape_param { ... } + // } + // + // If "input" is 2D with shape 2 x 8, then the following reshape_param + // specifications are all equivalent, producing a 3D blob "output" with shape + // 2 x 2 x 4: + // + // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 0 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 0 dim: 2 dim: -1 } } + // reshape_param { shape { dim: -1 dim: 0 dim: 2 } } + // + optional BlobShape shape = 1; + + // axis and num_axes control the portion of the bottom blob's shape that are + // replaced by (included in) the reshape. By default (axis == 0 and + // num_axes == -1), the entire bottom blob shape is included in the reshape, + // and hence the shape field must specify the entire output shape. + // + // axis may be non-zero to retain some portion of the beginning of the input + // shape (and may be negative to index from the end; e.g., -1 to begin the + // reshape after the last axis, including nothing in the reshape, + // -2 to include only the last axis, etc.). + // + // For example, suppose "input" is a 2D blob with shape 2 x 8. + // Then the following ReshapeLayer specifications are all equivalent, + // producing a blob "output" with shape 2 x 2 x 4: + // + // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 2 dim: 4 } axis: 1 } + // reshape_param { shape { dim: 2 dim: 4 } axis: -3 } + // + // num_axes specifies the extent of the reshape. + // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on + // input axes in the range [axis, axis+num_axes]. + // num_axes may also be -1, the default, to include all remaining axes + // (starting from axis). + // + // For example, suppose "input" is a 2D blob with shape 2 x 8. + // Then the following ReshapeLayer specifications are equivalent, + // producing a blob "output" with shape 1 x 2 x 8. + // + // reshape_param { shape { dim: 1 dim: 2 dim: 8 } } + // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 } + // reshape_param { shape { dim: 1 } num_axes: 0 } + // + // On the other hand, these would produce output blob shape 2 x 1 x 8: + // + // reshape_param { shape { dim: 2 dim: 1 dim: 8 } } + // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 } + // + optional int32 axis = 2 [default = 0]; + optional int32 num_axes = 3 [default = -1]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +======= +// Message that stores parameters used by SigmoidLayer +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message SigmoidParameter { + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 1 [default = DEFAULT]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by SliceLayer +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message SliceParameter { + // The axis along which to slice -- may be negative to index from the end + // (e.g., -1 for the last axis). + // By default, SliceLayer concatenates blobs along the "channels" axis (1). + optional int32 axis = 3 [default = 1]; + repeated uint32 slice_point = 2; + + // DEPRECATED: alias for "axis" -- does not support negative indexing. + optional uint32 slice_dim = 1 [default = 1]; +} + +// Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer +message SoftmaxParameter { + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 1 [default = DEFAULT]; + + // The axis along which to perform the softmax -- may be negative to index + // from the end (e.g., -1 for the last axis). + // Any other axes will be evaluated as independent softmaxes. + optional int32 axis = 2 [default = 1]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by TanHLayer +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message TanHParameter { + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 1 [default = DEFAULT]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +// Message that stores parameters used by TileLayer +message TileParameter { + // The index of the axis to tile. + optional int32 axis = 1 [default = 1]; + + // The number of copies (tiles) of the blob to output. + optional int32 tiles = 2; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +======= +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +// Message that stores parameters used by ThresholdLayer +message ThresholdParameter { + optional float threshold = 1 [default = 0]; // Strictly positive values +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by WindowDataLayer +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message WindowDataParameter { + // Specify the data source. + optional string source = 1; + // For data pre-processing, we can do simple scaling and subtracting the + // data mean, if provided. Note that the mean subtraction is always carried + // out before scaling. + optional float scale = 2 [default = 1]; + optional string mean_file = 3; + // Specify the batch size. + optional uint32 batch_size = 4; + // Specify if we would like to randomly crop an image. + optional uint32 crop_size = 5 [default = 0]; + // Specify if we want to randomly mirror data. + optional bool mirror = 6 [default = false]; + // Foreground (object) overlap threshold + optional float fg_threshold = 7 [default = 0.5]; + // Background (non-object) overlap threshold + optional float bg_threshold = 8 [default = 0.5]; + // Fraction of batch that should be foreground objects + optional float fg_fraction = 9 [default = 0.25]; + // Amount of contextual padding to add around a window + // (used only by the window_data_layer) + optional uint32 context_pad = 10 [default = 0]; + // Mode for cropping out a detection window + // warp: cropped window is warped to a fixed size and aspect ratio + // square: the tightest square around the window is cropped + optional string crop_mode = 11 [default = "warp"]; + // cache_images: will load all images in memory for faster access + optional bool cache_images = 12 [default = false]; + // append root_folder to locate images + optional string root_folder = 13 [default = ""]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message SPPParameter { + enum PoolMethod { + MAX = 0; + AVE = 1; + STOCHASTIC = 2; + } + optional uint32 pyramid_height = 1; + optional PoolMethod pool = 2 [default = MAX]; // The pooling method + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 6 [default = DEFAULT]; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +======= +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +// DEPRECATED: use LayerParameter. +message V1LayerParameter { + repeated string bottom = 2; + repeated string top = 3; + optional string name = 4; + repeated NetStateRule include = 32; + repeated NetStateRule exclude = 33; + enum LayerType { + NONE = 0; + ABSVAL = 35; + ACCURACY = 1; + ARGMAX = 30; + BNLL = 2; + CONCAT = 3; + CONTRASTIVE_LOSS = 37; + CONVOLUTION = 4; + DATA = 5; + DECONVOLUTION = 39; + DROPOUT = 6; + DUMMY_DATA = 32; + EUCLIDEAN_LOSS = 7; + ELTWISE = 25; + EXP = 38; + FLATTEN = 8; + HDF5_DATA = 9; + HDF5_OUTPUT = 10; + HINGE_LOSS = 28; + IM2COL = 11; + IMAGE_DATA = 12; + INFOGAIN_LOSS = 13; + INNER_PRODUCT = 14; + LRN = 15; + MEMORY_DATA = 29; + MULTINOMIAL_LOGISTIC_LOSS = 16; + MVN = 34; + POOLING = 17; + POWER = 26; + RELU = 18; + SIGMOID = 19; + SIGMOID_CROSS_ENTROPY_LOSS = 27; + SILENCE = 36; + SOFTMAX = 20; + SOFTMAX_LOSS = 21; + SPLIT = 22; + SLICE = 33; + TANH = 23; + TRIPLET_LOSS = 40; + WINDOW_DATA = 24; + THRESHOLD = 31; + + } + optional LayerType type = 5; + repeated BlobProto blobs = 6; + repeated string param = 1001; + repeated DimCheckMode blob_share_mode = 1002; + enum DimCheckMode { + STRICT = 0; + PERMISSIVE = 1; + } + repeated float blobs_lr = 7; + repeated float weight_decay = 8; + repeated float loss_weight = 35; + optional AccuracyParameter accuracy_param = 27; + optional ArgMaxParameter argmax_param = 23; + optional ConcatParameter concat_param = 9; + optional ContrastiveLossParameter contrastive_loss_param = 40; + optional ConvolutionParameter convolution_param = 10; + optional DataParameter data_param = 11; + optional DropoutParameter dropout_param = 12; + optional DummyDataParameter dummy_data_param = 26; + optional EltwiseParameter eltwise_param = 24; + optional ExpParameter exp_param = 41; + optional HDF5DataParameter hdf5_data_param = 13; + optional HDF5OutputParameter hdf5_output_param = 14; + optional HingeLossParameter hinge_loss_param = 29; + optional ImageDataParameter image_data_param = 15; + optional InfogainLossParameter infogain_loss_param = 16; + optional InnerProductParameter inner_product_param = 17; + optional LRNParameter lrn_param = 18; + optional MemoryDataParameter memory_data_param = 22; + optional MVNParameter mvn_param = 34; + optional PoolingParameter pooling_param = 19; + optional PowerParameter power_param = 21; + optional ReLUParameter relu_param = 30; + optional SigmoidParameter sigmoid_param = 38; + optional SoftmaxParameter softmax_param = 39; + optional SliceParameter slice_param = 31; + optional TanHParameter tanh_param = 37; + optional ThresholdParameter threshold_param = 25; + optional WindowDataParameter window_data_param = 20; + optional TransformationParameter transform_param = 36; + optional LossParameter loss_param = 42; + optional V0LayerParameter layer = 1; + optional TripletLossParameter triplet_loss_param = 43; +} + +// DEPRECATED: V0LayerParameter is the old way of specifying layer parameters +// in Caffe. We keep this message type around for legacy support. +message V0LayerParameter { + optional string name = 1; // the layer name + optional string type = 2; // the string to specify the layer type + + // Parameters to specify layers with inner products. + optional uint32 num_output = 3; // The number of outputs for the layer + optional bool biasterm = 4 [default = true]; // whether to have bias terms + optional FillerParameter weight_filler = 5; // The filler for the weight + optional FillerParameter bias_filler = 6; // The filler for the bias + + optional uint32 pad = 7 [default = 0]; // The padding size + optional uint32 kernelsize = 8; // The kernel size + optional uint32 group = 9 [default = 1]; // The group size for group conv + optional uint32 stride = 10 [default = 1]; // The stride + enum PoolMethod { + MAX = 0; + AVE = 1; + STOCHASTIC = 2; + } + optional PoolMethod pool = 11 [default = MAX]; // The pooling method + optional float dropout_ratio = 12 [default = 0.5]; // dropout ratio + + optional uint32 local_size = 13 [default = 5]; // for local response norm + optional float alpha = 14 [default = 1.]; // for local response norm + optional float beta = 15 [default = 0.75]; // for local response norm + optional float k = 22 [default = 1.]; + + // For data layers, specify the data source + optional string source = 16; + // For data pre-processing, we can do simple scaling and subtracting the + // data mean, if provided. Note that the mean subtraction is always carried + // out before scaling. + optional float scale = 17 [default = 1]; + optional string meanfile = 18; + // For data layers, specify the batch size. + optional uint32 batchsize = 19; + // For data layers, specify if we would like to randomly crop an image. + optional uint32 cropsize = 20 [default = 0]; + // For data layers, specify if we want to randomly mirror data. + optional bool mirror = 21 [default = false]; + + // The blobs containing the numeric parameters of the layer + repeated BlobProto blobs = 50; + // The ratio that is multiplied on the global learning rate. If you want to + // set the learning ratio for one blob, you need to set it for all blobs. + repeated float blobs_lr = 51; + // The weight decay that is multiplied on the global weight decay. + repeated float weight_decay = 52; + + // The rand_skip variable is for the data layer to skip a few data points + // to avoid all asynchronous sgd clients to start at the same point. The skip + // point would be set as rand_skip * rand(0,1). Note that rand_skip should not + // be larger than the number of keys in the database. + optional uint32 rand_skip = 53 [default = 0]; + + // Fields related to detection (det_*) + // foreground (object) overlap threshold + optional float det_fg_threshold = 54 [default = 0.5]; + // background (non-object) overlap threshold + optional float det_bg_threshold = 55 [default = 0.5]; + // Fraction of batch that should be foreground objects + optional float det_fg_fraction = 56 [default = 0.25]; + + // optional bool OBSOLETE_can_clobber = 57 [default = true]; + + // Amount of contextual padding to add around a window + // (used only by the window_data_layer) + optional uint32 det_context_pad = 58 [default = 0]; + + // Mode for cropping out a detection window + // warp: cropped window is warped to a fixed size and aspect ratio + // square: the tightest square around the window is cropped + optional string det_crop_mode = 59 [default = "warp"]; + + // For ReshapeLayer, one needs to specify the new dimensions. + optional int32 new_num = 60 [default = 0]; + optional int32 new_channels = 61 [default = 0]; + optional int32 new_height = 62 [default = 0]; + optional int32 new_width = 63 [default = 0]; + + // Whether or not ImageLayer should shuffle the list of files at every epoch. + // It will also resize images if new_height or new_width are not zero. + optional bool shuffle_images = 64 [default = false]; + + // For ConcatLayer, one needs to specify the dimension for concatenation, and + // the other dimensions must be the same for all the bottom blobs. + // By default it will concatenate blobs along the channels dimension. + optional uint32 concat_dim = 65 [default = 1]; + + optional HDF5OutputParameter hdf5_output_param = 1001; +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +// Message that stores parameters used by PReLULayer +>>>>>>> triplet data generation and network update +>>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +message PReLUParameter { + // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: + // Surpassing Human-Level Performance on ImageNet Classification, 2015. + + // Initial value of a_i. Default is a_i=0.25 for all i. + optional FillerParameter filler = 1; + // Whether or not slope paramters are shared across channels. + optional bool channel_shared = 2 [default = false]; +} diff --git a/src/caffe/solver.cpp.orig.orig.orig b/src/caffe/solver.cpp.orig.orig.orig new file mode 100644 index 00000000000..53b16a6217d --- /dev/null +++ b/src/caffe/solver.cpp.orig.orig.orig @@ -0,0 +1,1692 @@ +#include + +#include +#include +#include + +#include "caffe/net.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/solver.hpp" +#include "caffe/util/io.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/upgrade_proto.hpp" + +namespace caffe { + +template +Solver::Solver(const SolverParameter& param) + : net_() { + Init(param); +} + +template +Solver::Solver(const string& param_file) + : net_() { + SolverParameter param; + ReadProtoFromTextFileOrDie(param_file, ¶m); + Init(param); +} + +template +void Solver::Init(const SolverParameter& param) { + LOG(INFO) << "Initializing solver from parameters: " << std::endl + << param.DebugString(); + param_ = param; + CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative."; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 + if (param_.random_seed() >= 0) { +======= + if (Caffe::root_solver() && param_.random_seed() >= 0) { +>>>>>>> triplet data generation and network update +======= + if (param_.random_seed() >= 0) { +>>>>>>> add 3d network training param + Caffe::set_random_seed(param_.random_seed()); + } + // Scaffolding code + InitTrainNet(); + InitTestNets(); + LOG(INFO) << "Solver scaffolding done."; + iter_ = 0; + current_step_ = 0; +} + +template +void Solver::InitTrainNet() { + const int num_train_nets = param_.has_net() + param_.has_net_param() + + param_.has_train_net() + param_.has_train_net_param(); + const string& field_names = "net, net_param, train_net, train_net_param"; + CHECK_GE(num_train_nets, 1) << "SolverParameter must specify a train net " + << "using one of these fields: " << field_names; + CHECK_LE(num_train_nets, 1) << "SolverParameter must not contain more than " + << "one of these fields specifying a train_net: " << field_names; + NetParameter net_param; + if (param_.has_train_net_param()) { + LOG(INFO) << "Creating training net specified in train_net_param."; + net_param.CopyFrom(param_.train_net_param()); + } else if (param_.has_train_net()) { + LOG(INFO) << "Creating training net from train_net file: " + << param_.train_net(); + ReadNetParamsFromTextFileOrDie(param_.train_net(), &net_param); + } + if (param_.has_net_param()) { + LOG(INFO) << "Creating training net specified in net_param."; + net_param.CopyFrom(param_.net_param()); + } + if (param_.has_net()) { + LOG(INFO) << "Creating training net from net file: " << param_.net(); + ReadNetParamsFromTextFileOrDie(param_.net(), &net_param); + } + // Set the correct NetState. We start with the solver defaults (lowest + // precedence); then, merge in any NetState specified by the net_param itself; + // finally, merge in any NetState specified by the train_state (highest + // precedence). + NetState net_state; + net_state.set_phase(TRAIN); + net_state.MergeFrom(net_param.state()); + net_state.MergeFrom(param_.train_state()); + net_param.mutable_state()->CopyFrom(net_state); + net_.reset(new Net(net_param)); +} + +template +void Solver::InitTestNets() { + const bool has_net_param = param_.has_net_param(); + const bool has_net_file = param_.has_net(); + const int num_generic_nets = has_net_param + has_net_file; + CHECK_LE(num_generic_nets, 1) + << "Both net_param and net_file may not be specified."; + const int num_test_net_params = param_.test_net_param_size(); + const int num_test_net_files = param_.test_net_size(); + const int num_test_nets = num_test_net_params + num_test_net_files; + if (num_generic_nets) { + CHECK_GE(param_.test_iter_size(), num_test_nets) + << "test_iter must be specified for each test network."; + } else { + CHECK_EQ(param_.test_iter_size(), num_test_nets) + << "test_iter must be specified for each test network."; + } + // If we have a generic net (specified by net or net_param, rather than + // test_net or test_net_param), we may have an unlimited number of actual + // test networks -- the actual number is given by the number of remaining + // test_iters after any test nets specified by test_net_param and/or test_net + // are evaluated. + const int num_generic_net_instances = param_.test_iter_size() - num_test_nets; + const int num_test_net_instances = num_test_nets + num_generic_net_instances; + if (param_.test_state_size()) { + CHECK_EQ(param_.test_state_size(), num_test_net_instances) + << "test_state must be unspecified or specified once per test net."; + } + if (num_test_net_instances) { + CHECK_GT(param_.test_interval(), 0); + } + int test_net_id = 0; + vector sources(num_test_net_instances); + vector net_params(num_test_net_instances); + for (int i = 0; i < num_test_net_params; ++i, ++test_net_id) { + sources[test_net_id] = "test_net_param"; + net_params[test_net_id].CopyFrom(param_.test_net_param(i)); + } + for (int i = 0; i < num_test_net_files; ++i, ++test_net_id) { + sources[test_net_id] = "test_net file: " + param_.test_net(i); + ReadNetParamsFromTextFileOrDie(param_.test_net(i), + &net_params[test_net_id]); + } + const int remaining_test_nets = param_.test_iter_size() - test_net_id; + if (has_net_param) { + for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) { + sources[test_net_id] = "net_param"; + net_params[test_net_id].CopyFrom(param_.net_param()); + } + } + if (has_net_file) { + for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) { + sources[test_net_id] = "net file: " + param_.net(); + ReadNetParamsFromTextFileOrDie(param_.net(), &net_params[test_net_id]); + } + } + test_nets_.resize(num_test_net_instances); + for (int i = 0; i < num_test_net_instances; ++i) { + // Set the correct NetState. We start with the solver defaults (lowest + // precedence); then, merge in any NetState specified by the net_param + // itself; finally, merge in any NetState specified by the test_state + // (highest precedence). + NetState net_state; + net_state.set_phase(TEST); + net_state.MergeFrom(net_params[i].state()); + if (param_.test_state_size()) { + net_state.MergeFrom(param_.test_state(i)); + } + net_params[i].mutable_state()->CopyFrom(net_state); + LOG(INFO) + << "Creating test net (#" << i << ") specified by " << sources[i]; + test_nets_[i].reset(new Net(net_params[i])); + test_nets_[i]->set_debug_info(param_.debug_info()); + } +} + +template +void Solver::Step(int iters) { + vector*> bottom_vec; + const int start_iter = iter_; + const int stop_iter = iter_ + iters; + int average_loss = this->param_.average_loss(); + vector losses; + Dtype smoothed_loss = 0; + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + while (iter_ < stop_iter) { + // zero-init the params +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= + while (iter_ < stop_iter) { + // zero-init the params +>>>>>>> add 3d network training param + for (int i = 0; i < net_->params().size(); ++i) { + shared_ptr > blob = net_->params()[i]; + switch (Caffe::mode()) { + case Caffe::CPU: + caffe_set(blob->count(), static_cast(0), + blob->mutable_cpu_diff()); + break; + case Caffe::GPU: +#ifndef CPU_ONLY + caffe_gpu_set(blob->count(), static_cast(0), + blob->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + } + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= + net_->ClearParamDiffs(); +>>>>>>> triplet data generation and network update +======= +>>>>>>> add 3d network training param + if (param_.test_interval() && iter_ % param_.test_interval() == 0 + && (iter_ > 0 || param_.test_initialization())) { + TestAll(); + } + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= + for (int i = 0; i < callbacks_.size(); ++i) { + callbacks_[i]->on_start(); +======= + for (; iter_ < stop_iter; ++iter_) { + if (param_.test_interval() && iter_ % param_.test_interval() == 0 + && (iter_ > 0 || param_.test_initialization())) { + TestAll(); +>>>>>>> triplet data generation and network update + } +>>>>>>> triplet data generation and network update + const bool display = param_.display() && iter_ % param_.display() == 0; + net_->set_debug_info(display && param_.debug_info()); + Dtype loss = net_->ForwardBackward(bottom_vec); +======= + const bool display = param_.display() && iter_ % param_.display() == 0; + net_->set_debug_info(display && param_.debug_info()); + // accumulate the loss and gradient + Dtype loss = 0; + for (int i = 0; i < param_.iter_size(); ++i) { + loss += net_->ForwardBackward(bottom_vec); + } + loss /= param_.iter_size(); + // average the loss across iterations for smoothed reporting +>>>>>>> add 3d network training param + if (losses.size() < average_loss) { + losses.push_back(loss); + int size = losses.size(); + smoothed_loss = (smoothed_loss * (size - 1) + loss) / size; + } else { + int idx = (iter_ - start_iter) % average_loss; + smoothed_loss += (loss - losses[idx]) / average_loss; + losses[idx] = loss; + } + if (display) { + LOG(INFO) << "Iteration " << iter_ << ", loss = " << smoothed_loss; + const vector*>& result = net_->output_blobs(); + int score_index = 0; + for (int j = 0; j < result.size(); ++j) { + const Dtype* result_vec = result[j]->cpu_data(); + const string& output_name = + net_->blob_names()[net_->output_blob_indices()[j]]; + const Dtype loss_weight = + net_->blob_loss_weights()[net_->output_blob_indices()[j]]; + for (int k = 0; k < result[j]->count(); ++k) { + ostringstream loss_msg_stream; + if (loss_weight) { + loss_msg_stream << " (* " << loss_weight + << " = " << loss_weight * result_vec[k] << " loss)"; + } + LOG(INFO) << " Train net output #" + << score_index++ << ": " << output_name << " = " + << result_vec[k] << loss_msg_stream.str(); + } + } + } +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + for (int i = 0; i < callbacks_.size(); ++i) { + callbacks_[i]->on_gradients_ready(); + } +>>>>>>> triplet data generation and network update +======= +>>>>>>> add 3d network training param + ApplyUpdate(); + + // Increment the internal iter_ counter -- its value should always indicate + // the number of times the weights have been updated. + ++iter_; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 + + // Save a snapshot if needed. + if (param_.snapshot() && iter_ % param_.snapshot() == 0) { +======= +======= + ComputeUpdateValue(); + net_->Update(); +>>>>>>> triplet data generation and network update + + SolverAction::Enum request = GetRequestedAction(); + + // Save a snapshot if needed. +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + if ((param_.snapshot() + && iter_ % param_.snapshot() == 0 + && Caffe::root_solver()) || + (request == SolverAction::SNAPSHOT)) { +======= + if (param_.snapshot() && (iter_ + 1) % param_.snapshot() == 0) { +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update +======= + + // Save a snapshot if needed. + if (param_.snapshot() && iter_ % param_.snapshot() == 0) { +>>>>>>> add 3d network training param + Snapshot(); + } + } +} + +template +void Solver::Solve(const char* resume_file) { + LOG(INFO) << "Solving " << net_->name(); + LOG(INFO) << "Learning Rate Policy: " << param_.lr_policy(); + + if (resume_file) { + LOG(INFO) << "Restoring previous solver status from " << resume_file; + Restore(resume_file); + } + + // For a network that is trained by the solver, no bottom or top vecs + // should be given, and we will just provide dummy vecs. + Step(param_.max_iter() - iter_); + // If we haven't already, save a snapshot after optimization, unless + // overridden by setting snapshot_after_train := false + if (param_.snapshot_after_train() + && (!param_.snapshot() || iter_ % param_.snapshot() != 0)) { + Snapshot(); + } + // After the optimization is done, run an additional train and test pass to + // display the train and test loss/outputs if appropriate (based on the + // display and test_interval settings, respectively). Unlike in the rest of + // training, for the train net we only run a forward pass as we've already + // updated the parameters "max_iter" times -- this final pass is only done to + // display the loss, which is computed in the forward pass. + if (param_.display() && iter_ % param_.display() == 0) { + Dtype loss; + net_->ForwardPrefilled(&loss); + LOG(INFO) << "Iteration " << iter_ << ", loss = " << loss; + } + if (param_.test_interval() && iter_ % param_.test_interval() == 0) { + TestAll(); + } + LOG(INFO) << "Optimization Done."; +} + + +template +void Solver::TestAll() { + for (int test_net_id = 0; test_net_id < test_nets_.size(); ++test_net_id) { + Test(test_net_id); + } +} + +template +void Solver::Test(const int test_net_id) { + LOG(INFO) << "Iteration " << iter_ + << ", Testing net (#" << test_net_id << ")"; + CHECK_NOTNULL(test_nets_[test_net_id].get())-> + ShareTrainedLayersWith(net_.get()); + vector test_score; + vector test_score_output_id; + vector*> bottom_vec; + const shared_ptr >& test_net = test_nets_[test_net_id]; + Dtype loss = 0; + for (int i = 0; i < param_.test_iter(test_net_id); ++i) { + Dtype iter_loss; + const vector*>& result = + test_net->Forward(bottom_vec, &iter_loss); + if (param_.test_compute_loss()) { + loss += iter_loss; + } + if (i == 0) { + for (int j = 0; j < result.size(); ++j) { + const Dtype* result_vec = result[j]->cpu_data(); + for (int k = 0; k < result[j]->count(); ++k) { + test_score.push_back(result_vec[k]); + test_score_output_id.push_back(j); + } + } + } else { + int idx = 0; + for (int j = 0; j < result.size(); ++j) { + const Dtype* result_vec = result[j]->cpu_data(); + for (int k = 0; k < result[j]->count(); ++k) { + test_score[idx++] += result_vec[k]; + } + } + } + } + if (param_.test_compute_loss()) { + loss /= param_.test_iter(test_net_id); + LOG(INFO) << "Test loss: " << loss; + } + for (int i = 0; i < test_score.size(); ++i) { + const int output_blob_index = + test_net->output_blob_indices()[test_score_output_id[i]]; + const string& output_name = test_net->blob_names()[output_blob_index]; + const Dtype loss_weight = test_net->blob_loss_weights()[output_blob_index]; + ostringstream loss_msg_stream; + const Dtype mean_score = test_score[i] / param_.test_iter(test_net_id); + if (loss_weight) { + loss_msg_stream << " (* " << loss_weight + << " = " << loss_weight * mean_score << " loss)"; + } + LOG(INFO) << " Test net output #" << i << ": " << output_name << " = " + << mean_score << loss_msg_stream.str(); + } +} + +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< e29f9656158cb307d3fb4a78c63aa2247c5ad57a +template +void Solver::Snapshot() { + CHECK(Caffe::root_solver()); + string model_filename; + switch (param_.snapshot_format()) { + case caffe::SolverParameter_SnapshotFormat_BINARYPROTO: + model_filename = SnapshotToBinaryProto(); + break; + case caffe::SolverParameter_SnapshotFormat_HDF5: + model_filename = SnapshotToHDF5(); + break; + default: + LOG(FATAL) << "Unsupported snapshot format."; + } + + SnapshotSolverState(model_filename); +} +======= +>>>>>>> macro define in upgrade_proto + +template +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= + +template +>>>>>>> add 3d network training param +void Solver::Snapshot() { + NetParameter net_param; + // For intermediate results, we will also dump the gradient values. + net_->ToProto(&net_param, param_.snapshot_diff()); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +string Solver::SnapshotFilename(const string extension) { +>>>>>>> triplet data generation and network update +======= +>>>>>>> add 3d network training param + string filename(param_.snapshot_prefix()); + string model_filename, snapshot_filename; + const int kBufferSize = 20; + char iter_str_buffer[kBufferSize]; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= + return filename + iter_str_buffer + extension; +======= + // Add one to iter_ to get the number of iterations that have completed. + snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_ + 1); +>>>>>>> triplet data generation and network update +======= + snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); +>>>>>>> add 3d network training param + filename += iter_str_buffer; + model_filename = filename + ".caffemodel"; + LOG(INFO) << "Snapshotting to " << model_filename; + WriteProtoToBinaryFile(net_param, model_filename.c_str()); + SolverState state; + SnapshotSolverState(&state); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f + state.set_iter(iter_ + 1); +======= + state.set_iter(iter_); +>>>>>>> add 3d network training param + state.set_learned_net(model_filename); + state.set_current_step(current_step_); + snapshot_filename = filename + ".solverstate"; + LOG(INFO) << "Snapshotting solver state to " << snapshot_filename; + WriteProtoToBinaryFile(state, snapshot_filename.c_str()); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= +>>>>>>> triplet data generation and network update +======= +<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +======= +>>>>>>> add 3d network training param +} + +template +string Solver::SnapshotToBinaryProto() { + string model_filename = SnapshotFilename(".caffemodel"); + LOG(INFO) << "Snapshotting to binary proto file " << model_filename; + NetParameter net_param; + net_->ToProto(&net_param, param_.snapshot_diff()); + WriteProtoToBinaryFile(net_param, model_filename); + return model_filename; +} + +template +string Solver::SnapshotToHDF5() { + string model_filename = SnapshotFilename(".caffemodel.h5"); + LOG(INFO) << "Snapshotting to HDF5 file " << model_filename; + net_->ToHDF5(model_filename, param_.snapshot_diff()); + return model_filename; +>>>>>>> triplet data generation and network update +} + +template +void Solver::Restore(const char* state_file) { + SolverState state; + NetParameter net_param; + ReadProtoFromBinaryFile(state_file, &state); + if (state.has_learned_net()) { + ReadNetParamsFromBinaryFileOrDie(state.learned_net().c_str(), &net_param); + net_->CopyTrainedLayersFrom(net_param); + } + iter_ = state.iter(); + current_step_ = state.current_step(); + RestoreSolverState(state); +} + + +// Return the current learning rate. The currently implemented learning rate +// policies are as follows: +// - fixed: always return base_lr. +// - step: return base_lr * gamma ^ (floor(iter / step)) +// - exp: return base_lr * gamma ^ iter +// - inv: return base_lr * (1 + gamma * iter) ^ (- power) +// - multistep: similar to step but it allows non uniform steps defined by +// stepvalue +// - poly: the effective learning rate follows a polynomial decay, to be +// zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) +// - sigmoid: the effective learning rate follows a sigmod decay +// return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) +// +// where base_lr, max_iter, gamma, step, stepvalue and power are defined +// in the solver parameter protocol buffer, and iter is the current iteration. +template +Dtype SGDSolver::GetLearningRate() { + Dtype rate; + const string& lr_policy = this->param_.lr_policy(); + if (lr_policy == "fixed") { + rate = this->param_.base_lr(); + } else if (lr_policy == "step") { + this->current_step_ = this->iter_ / this->param_.stepsize(); + rate = this->param_.base_lr() * + pow(this->param_.gamma(), this->current_step_); + } else if (lr_policy == "exp") { + rate = this->param_.base_lr() * pow(this->param_.gamma(), this->iter_); + } else if (lr_policy == "inv") { + rate = this->param_.base_lr() * + pow(Dtype(1) + this->param_.gamma() * this->iter_, + - this->param_.power()); + } else if (lr_policy == "multistep") { + if (this->current_step_ < this->param_.stepvalue_size() && + this->iter_ >= this->param_.stepvalue(this->current_step_)) { + this->current_step_++; + LOG(INFO) << "MultiStep Status: Iteration " << + this->iter_ << ", step = " << this->current_step_; + } + rate = this->param_.base_lr() * + pow(this->param_.gamma(), this->current_step_); + } else if (lr_policy == "poly") { + rate = this->param_.base_lr() * pow(Dtype(1.) - + (Dtype(this->iter_) / Dtype(this->param_.max_iter())), + this->param_.power()); + } else if (lr_policy == "sigmoid") { + rate = this->param_.base_lr() * (Dtype(1.) / + (Dtype(1.) + exp(-this->param_.gamma() * (Dtype(this->iter_) - + Dtype(this->param_.stepsize()))))); + } else { + LOG(FATAL) << "Unknown learning rate policy: " << lr_policy; + } + return rate; +} + +template +void SGDSolver::PreSolve() { + // Initialize the history + const vector > >& net_params = this->net_->params(); + history_.clear(); + update_.clear(); + temp_.clear(); + for (int i = 0; i < net_params.size(); ++i) { + const vector& shape = net_params[i]->shape(); + history_.push_back(shared_ptr >(new Blob(shape))); + update_.push_back(shared_ptr >(new Blob(shape))); + temp_.push_back(shared_ptr >(new Blob(shape))); + } +} + +template +void SGDSolver::ClipGradients() { + const Dtype clip_gradients = this->param_.clip_gradients(); + if (clip_gradients < 0) { return; } + const vector > >& net_params = this->net_->params(); + Dtype sumsq_diff = 0; + for (int i = 0; i < net_params.size(); ++i) { + if (this->net_->param_owners()[i] < 0) { + sumsq_diff += net_params[i]->sumsq_diff(); + } + } + const Dtype l2norm_diff = std::sqrt(sumsq_diff); + if (l2norm_diff > clip_gradients) { + Dtype scale_factor = clip_gradients / l2norm_diff; + LOG(INFO) << "Gradient clipping: scaling down gradients (L2 norm " + << l2norm_diff << " > " << clip_gradients << ") " + << "by scale factor " << scale_factor; + for (int i = 0; i < net_params.size(); ++i) { + if (this->net_->param_owners()[i] < 0) { + net_params[i]->scale_diff(scale_factor); + } + } + } +} + +template +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +void SGDSolver::ApplyUpdate() { +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= + CHECK(Caffe::root_solver()); +======= +void SGDSolver::ComputeUpdateValue() { + const vector > >& net_params = this->net_->params(); + const vector& net_params_lr = this->net_->params_lr(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); + // get the learning rate +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update +======= +void SGDSolver::ApplyUpdate() { +>>>>>>> add 3d network training param + Dtype rate = GetLearningRate(); + if (this->param_.display() && this->iter_ % this->param_.display() == 0) { + LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; + } + ClipGradients(); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 + for (int param_id = 0; param_id < this->net_->params().size(); ++param_id) { +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 + for (int param_id = 0; param_id < this->net_->learnable_params().size(); + ++param_id) { +>>>>>>> triplet data generation and network update +======= + for (int param_id = 0; param_id < this->net_->params().size(); ++param_id) { +>>>>>>> add 3d network training param + Normalize(param_id); + Regularize(param_id); + ComputeUpdateValue(param_id, rate); + } + this->net_->Update(); +} + +template +void SGDSolver::Normalize(int param_id) { + if (this->param_.iter_size() == 1) { return; } + // Scale gradient to counterbalance accumulation. +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 + const vector > >& net_params = this->net_->params(); +======= + const vector*>& net_params = this->net_->learnable_params(); +>>>>>>> triplet data generation and network update +======= + const vector > >& net_params = this->net_->params(); +>>>>>>> add 3d network training param + const Dtype accum_normalization = Dtype(1.) / this->param_.iter_size(); + switch (Caffe::mode()) { + case Caffe::CPU: { + caffe_scal(net_params[param_id]->count(), accum_normalization, + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + caffe_gpu_scal(net_params[param_id]->count(), accum_normalization, + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void SGDSolver::Regularize(int param_id) { +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 + const vector > >& net_params = this->net_->params(); +======= + const vector*>& net_params = this->net_->learnable_params(); +>>>>>>> triplet data generation and network update + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); +======= + Dtype momentum = this->param_.momentum(); +>>>>>>> triplet data generation and network update + Dtype weight_decay = this->param_.weight_decay(); + string regularization_type = this->param_.regularization_type(); + switch (Caffe::mode()) { + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // Compute the value to history, and then copy them to the blob's diff. + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + history_[param_id]->mutable_cpu_data()); + // copy + caffe_copy(net_params[param_id]->count(), + history_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } + break; + case Caffe::GPU: +#ifndef CPU_ONLY + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // Compute the value to history, and then copy them to the blob's diff. + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= + const vector > >& net_params = this->net_->params(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); + Dtype weight_decay = this->param_.weight_decay(); + string regularization_type = this->param_.regularization_type(); + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + switch (Caffe::mode()) { + case Caffe::CPU: { + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } +>>>>>>> add 3d network training param + } +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector > >& net_params = this->net_->params(); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= + +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +template +void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); +>>>>>>> triplet data generation and network update +======= +>>>>>>> add 3d network training param + const vector& net_params_lr = this->net_->params_lr(); + Dtype momentum = this->param_.momentum(); + Dtype local_rate = rate * net_params_lr[param_id]; + // Compute the update to history, then copy it to the parameter diff. + switch (Caffe::mode()) { + case Caffe::CPU: { + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + history_[param_id]->mutable_cpu_data()); + caffe_copy(net_params[param_id]->count(), + history_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + history_[param_id]->mutable_gpu_data()); + caffe_copy(net_params[param_id]->count(), + history_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= +======= + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + history_[param_id]->mutable_gpu_data()); + // copy + caffe_copy(net_params[param_id]->count(), + history_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update +======= +>>>>>>> add 3d network training param +#else + NO_GPU; +#endif + break; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 + } +======= +>>>>>>> triplet data generation and network update +======= + } +>>>>>>> add 3d network training param + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void SGDSolver::SnapshotSolverState(SolverState* state) { + state->clear_history(); + for (int i = 0; i < history_.size(); ++i) { + // Add history + BlobProto* history_blob = state->add_history(); + history_[i]->ToProto(history_blob); + } +} + +template +void SGDSolver::RestoreSolverState(const SolverState& state) { + CHECK_EQ(state.history_size(), history_.size()) + << "Incorrect length of history blobs."; + LOG(INFO) << "SGDSolver: restoring history"; + for (int i = 0; i < history_.size(); ++i) { + history_[i]->FromProto(state.history(i)); + } +} + +template +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector > >& net_params = this->net_->params(); +======= +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +void SGDSolver::RestoreSolverStateFromHDF5(const string& state_file) { + hid_t file_hid = H5Fopen(state_file.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK_GE(file_hid, 0) << "Couldn't open solver state file " << state_file; + this->iter_ = hdf5_load_int(file_hid, "iter"); + if (H5LTfind_dataset(file_hid, "learned_net")) { + string learned_net = hdf5_load_string(file_hid, "learned_net"); + this->net_->CopyTrainedLayersFrom(learned_net); + } + this->current_step_ = hdf5_load_int(file_hid, "current_step"); + hid_t history_hid = H5Gopen2(file_hid, "history", H5P_DEFAULT); + CHECK_GE(history_hid, 0) << "Error reading history from " << state_file; + int state_history_size = hdf5_get_num_links(history_hid); + CHECK_EQ(state_history_size, history_.size()) + << "Incorrect length of history blobs."; + for (int i = 0; i < history_.size(); ++i) { + ostringstream oss; + oss << i; + hdf5_load_nd_dataset(history_hid, oss.str().c_str(), 0, + kMaxBlobAxes, history_[i].get()); + } + H5Gclose(history_hid); + H5Fclose(file_hid); +} + +template +void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { + CHECK(Caffe::root_solver()); + const vector*>& net_params = this->net_->learnable_params(); +======= +void NesterovSolver::ComputeUpdateValue() { + const vector > >& net_params = this->net_->params(); +>>>>>>> triplet data generation and network update +>>>>>>> triplet data generation and network update + const vector& net_params_lr = this->net_->params_lr(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); + // get the learning rate + Dtype rate = this->GetLearningRate(); + if (this->param_.display() && this->iter_ % this->param_.display() == 0) { + LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; + } + SGDSolver::ClipGradients(); + Dtype momentum = this->param_.momentum(); + Dtype weight_decay = this->param_.weight_decay(); + string regularization_type = this->param_.regularization_type(); + switch (Caffe::mode()) { + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + this->temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // update history + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + this->history_[param_id]->mutable_cpu_data()); + + // compute udpate: step back then over step + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->cpu_data(), -momentum, + this->update_[param_id]->mutable_cpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } + break; + case Caffe::GPU: +#ifndef CPU_ONLY + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + this->temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // update history + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + this->history_[param_id]->mutable_gpu_data()); + + // compute udpate: step back then over step + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->gpu_data(), -momentum, + this->update_[param_id]->mutable_gpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } +======= +void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector > >& net_params = this->net_->params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype momentum = this->param_.momentum(); + Dtype local_rate = rate * net_params_lr[param_id]; + switch (Caffe::mode()) { + case Caffe::CPU: { + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + this->history_[param_id]->mutable_cpu_data()); + + // compute update: step back then over step + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->cpu_data(), -momentum, + this->update_[param_id]->mutable_cpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + // save history momentum for stepping back + caffe_copy(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), momentum, + this->history_[param_id]->mutable_gpu_data()); + + // compute update: step back then over step + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, + this->history_[param_id]->gpu_data(), -momentum, + this->update_[param_id]->mutable_gpu_data()); + + // copy + caffe_copy(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); +>>>>>>> add 3d network training param +#else + NO_GPU; +#endif + break; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= + } +>>>>>>> add 3d network training param + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= +void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { +>>>>>>> add 3d network training param + const vector > >& net_params = this->net_->params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype delta = this->param_.delta(); + Dtype local_rate = rate * net_params_lr[param_id]; + switch (Caffe::mode()) { + case Caffe::CPU: { +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= + CHECK(Caffe::root_solver()); + const vector*>& net_params = this->net_->learnable_params(); +======= +void AdaGradSolver::ComputeUpdateValue() { + const vector > >& net_params = this->net_->params(); +>>>>>>> triplet data generation and network update + const vector& net_params_lr = this->net_->params_lr(); + const vector& net_params_weight_decay = + this->net_->params_weight_decay(); + // get the learning rate + Dtype rate = this->GetLearningRate(); + Dtype delta = this->param_.delta(); + if (this->param_.display() && this->iter_ % this->param_.display() == 0) { + LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; + } + SGDSolver::ClipGradients(); + Dtype weight_decay = this->param_.weight_decay(); + string regularization_type = this->param_.regularization_type(); + switch (Caffe::mode()) { + case Caffe::CPU: + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else if (regularization_type == "L1") { + caffe_cpu_sign(net_params[param_id]->count(), + net_params[param_id]->cpu_data(), + this->temp_[param_id]->mutable_cpu_data()); + caffe_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history + caffe_add(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + this->history_[param_id]->cpu_data(), + this->history_[param_id]->mutable_cpu_data()); + + // prepare update + caffe_powx(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_cpu_data()); + + caffe_div(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), + this->update_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // scale and copy + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->cpu_data(), Dtype(0), + net_params[param_id]->mutable_cpu_diff()); + } + break; + case Caffe::GPU: +#ifndef CPU_ONLY + for (int param_id = 0; param_id < net_params.size(); ++param_id) { + Dtype local_rate = rate * net_params_lr[param_id]; + Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; + + if (local_decay) { + if (regularization_type == "L2") { + // add weight decay + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else if (regularization_type == "L1") { + caffe_gpu_sign(net_params[param_id]->count(), + net_params[param_id]->gpu_data(), + this->temp_[param_id]->mutable_gpu_data()); + caffe_gpu_axpy(net_params[param_id]->count(), + local_decay, + this->temp_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + } else { + LOG(FATAL) << "Unknown regularization type: " << regularization_type; + } + } + + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history + caffe_gpu_add(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + this->history_[param_id]->gpu_data(), + this->history_[param_id]->mutable_gpu_data()); + + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // scale and copy + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), + net_params[param_id]->mutable_gpu_diff()); + } +#else + NO_GPU; +#endif + break; + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void RMSPropSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + + // get the learning rate + Dtype delta = this->param_.delta(); + Dtype rms_decay = this->param_.rms_decay(); + Dtype local_rate = rate * net_params_lr[param_id]; + + switch (Caffe::mode()) { + case Caffe::CPU: +>>>>>>> triplet data generation and network update +======= +>>>>>>> add 3d network training param + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= +>>>>>>> add 3d network training param + caffe_add(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + this->history_[param_id]->cpu_data(), + this->history_[param_id]->mutable_cpu_data()); + + // prepare update + caffe_powx(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_cpu_data()); + + caffe_div(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), + this->update_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= + caffe_cpu_axpby(net_params[param_id] -> count(), + Dtype(1-rms_decay), this->update_[param_id]->cpu_data(), + rms_decay, this->history_[param_id]-> mutable_cpu_data()); + + // prepare update + caffe_powx(net_params[param_id]->count(), + this->history_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_cpu_data()); + + caffe_div(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), this->update_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); +>>>>>>> triplet data generation and network update +======= +>>>>>>> add 3d network training param + + // scale and copy + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->cpu_data(), Dtype(0), + net_params[param_id]->mutable_cpu_diff()); + break; +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f + case Caffe::GPU: +======= + } + case Caffe::GPU: { +>>>>>>> add 3d network training param +#ifndef CPU_ONLY + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 +======= +>>>>>>> add 3d network training param + caffe_gpu_add(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + this->history_[param_id]->gpu_data(), + this->history_[param_id]->mutable_gpu_data()); + + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // scale and copy + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), + net_params[param_id]->mutable_gpu_diff()); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= + caffe_gpu_axpby(net_params[param_id] -> count(), + Dtype(1-rms_decay), this->update_[param_id]->gpu_data(), + rms_decay, this->history_[param_id]-> mutable_gpu_data()); + + // prepare update + caffe_gpu_powx(net_params[param_id]->count(), + this->history_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add_scalar(net_params[param_id]->count(), + delta, this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_div(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), this->update_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_axpby(net_params[param_id]->count(), local_rate, + this->update_[param_id]->gpu_data(), Dtype(0), + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void AdaDeltaSolver::AdaDeltaPreSolve() { + // Add the extra history entries for AdaDelta after those from + // SGDSolver::PreSolve + const vector*>& net_params = this->net_->learnable_params(); + for (int i = 0; i < net_params.size(); ++i) { + const vector& shape = net_params[i]->shape(); + this->history_.push_back( + shared_ptr >(new Blob(shape))); + } +} + +template +void AdaDeltaSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype delta = this->param_.delta(); + Dtype momentum = this->param_.momentum(); + Dtype local_rate = rate * net_params_lr[param_id]; + size_t update_history_offset = net_params.size(); + switch (Caffe::mode()) { + case Caffe::CPU: { + // compute square of gradient in update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history of gradients + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->cpu_data(), momentum, + this->history_[param_id]->mutable_cpu_data()); + + // add delta to history to guard against dividing by zero later + caffe_set(net_params[param_id]->count(), delta, + this->temp_[param_id]->mutable_cpu_data()); + + caffe_add(net_params[param_id]->count(), + this->temp_[param_id]->cpu_data(), + this->history_[update_history_offset + param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + caffe_add(net_params[param_id]->count(), + this->temp_[param_id]->cpu_data(), + this->history_[param_id]->cpu_data(), + this->temp_[param_id]->mutable_cpu_data()); + + // divide history of updates by history of gradients + caffe_div(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), + this->temp_[param_id]->cpu_data(), + this->update_[param_id]->mutable_cpu_data()); + + // jointly compute the RMS of both for update and gradient history + caffe_powx(net_params[param_id]->count(), + this->update_[param_id]->cpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_cpu_data()); + + // compute the update + caffe_mul(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), + this->update_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + + // compute square of update + caffe_powx(net_params[param_id]->count(), + net_params[param_id]->cpu_diff(), Dtype(2), + this->update_[param_id]->mutable_cpu_data()); + + // update history of updates + caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->cpu_data(), momentum, + this->history_[update_history_offset + param_id]->mutable_cpu_data()); + + // apply learning rate + caffe_cpu_scale(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + // compute square of gradient in update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history of gradients + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->gpu_data(), momentum, + this->history_[param_id]->mutable_gpu_data()); + + // add delta to history to guard against dividing by zero later + caffe_gpu_set(net_params[param_id]->count(), delta, + this->temp_[param_id]->mutable_gpu_data()); + + caffe_gpu_add(net_params[param_id]->count(), + this->temp_[param_id]->gpu_data(), + this->history_[update_history_offset + param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + caffe_gpu_add(net_params[param_id]->count(), + this->temp_[param_id]->gpu_data(), + this->history_[param_id]->gpu_data(), + this->temp_[param_id]->mutable_gpu_data()); + + // divide history of updates by history of gradients + caffe_gpu_div(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), + this->temp_[param_id]->gpu_data(), + this->update_[param_id]->mutable_gpu_data()); + + // jointly compute the RMS of both for update and gradient history + caffe_gpu_powx(net_params[param_id]->count(), + this->update_[param_id]->gpu_data(), Dtype(0.5), + this->update_[param_id]->mutable_gpu_data()); + + // compute the update and copy to net_diff + caffe_gpu_mul(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), + this->update_[param_id]->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); + + // compute square of update + caffe_gpu_powx(net_params[param_id]->count(), + net_params[param_id]->gpu_diff(), Dtype(2), + this->update_[param_id]->mutable_gpu_data()); + + // update history of updates + caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, + this->update_[param_id]->gpu_data(), momentum, + this->history_[update_history_offset + param_id]->mutable_gpu_data()); + + // apply learning rate + caffe_gpu_scale(net_params[param_id]->count(), local_rate, + net_params[param_id]->gpu_diff(), + net_params[param_id]->mutable_gpu_diff()); +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +template +void AdamSolver::AdamPreSolve() { + // Add the extra history entries for Adam after those from + // SGDSolver::PreSolve + const vector*>& net_params = this->net_->learnable_params(); + for (int i = 0; i < net_params.size(); ++i) { + const vector& shape = net_params[i]->shape(); + this->history_.push_back( + shared_ptr >(new Blob(shape))); + } +} + +template +void AdamSolver::ComputeUpdateValue(int param_id, Dtype rate) { + const vector*>& net_params = this->net_->learnable_params(); + const vector& net_params_lr = this->net_->params_lr(); + Dtype local_rate = rate * net_params_lr[param_id]; + const Dtype beta1 = this->param_.momentum(); + const Dtype beta2 = this->param_.momentum2(); + + // we create aliases for convenience + size_t update_history_offset = net_params.size(); + Blob* val_m = this->history_[param_id].get(); + Blob* val_v = this->history_[param_id + update_history_offset].get(); + Blob* val_t = this->temp_[param_id].get(); + + const int t = this->iter_ + 1; + const Dtype correction = std::sqrt(Dtype(1) - pow(beta2, t)) / + (Dtype(1.) - pow(beta1, t)); + const int N = net_params[param_id]->count(); + const Dtype eps_hat = this->param_.delta(); + + switch (Caffe::mode()) { + case Caffe::CPU: { + // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t + caffe_cpu_axpby(N, Dtype(1)-beta1, + net_params[param_id]->cpu_diff(), beta1, + val_m->mutable_cpu_data()); + + // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 + caffe_mul(N, + net_params[param_id]->cpu_diff(), + net_params[param_id]->cpu_diff(), + val_t->mutable_cpu_data()); + caffe_cpu_axpby(N, Dtype(1)-beta2, + val_t->cpu_data(), beta2, + val_v->mutable_cpu_data()); + + // set update + caffe_powx(N, + val_v->cpu_data(), Dtype(0.5), + val_t->mutable_cpu_data()); + caffe_add_scalar(N, eps_hat, val_t->mutable_cpu_data()); + caffe_div(N, + val_m->cpu_data(), + val_t->cpu_data(), + val_t->mutable_cpu_data()); + + caffe_cpu_scale(N, local_rate*correction, + val_t->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + break; + } + case Caffe::GPU: { +#ifndef CPU_ONLY + // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t + caffe_gpu_axpby(N, Dtype(1)-beta1, + net_params[param_id]->gpu_diff(), beta1, + val_m->mutable_gpu_data()); + + // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 + caffe_gpu_mul(N, + net_params[param_id]->gpu_diff(), + net_params[param_id]->gpu_diff(), + val_t->mutable_gpu_data()); + caffe_gpu_axpby(N, Dtype(1)-beta2, + val_t->gpu_data(), beta2, + val_v->mutable_gpu_data()); + + // set update + caffe_gpu_powx(N, + val_v->gpu_data(), Dtype(0.5), + val_t->mutable_gpu_data()); + caffe_gpu_add_scalar(N, eps_hat, + val_t->mutable_gpu_data()); + caffe_gpu_div(N, + val_m->gpu_data(), + val_t->gpu_data(), + val_t->mutable_gpu_data()); + + caffe_gpu_scale(N, local_rate*correction, + val_t->gpu_data(), + net_params[param_id]->mutable_gpu_diff()); +>>>>>>> triplet data generation and network update +======= +>>>>>>> add 3d network training param +#else + NO_GPU; +#endif + break; + } + default: + LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); + } +} + +INSTANTIATE_CLASS(Solver); +INSTANTIATE_CLASS(SGDSolver); +INSTANTIATE_CLASS(NesterovSolver); +INSTANTIATE_CLASS(AdaGradSolver); + +} // namespace caffe From f23f7a43060e53fc29abd6db4f480f7f9a29f47f Mon Sep 17 00:00:00 2001 From: Wangyida Date: Sun, 9 Aug 2015 15:11:32 +0800 Subject: [PATCH 76/82] No sclice layer version which could forward a set of triplets together with 1 pair wise --- examples/siamese/convert_lfw_siamese_data.cpp | 121 ---- examples/siamese/create_lfw_siamese.sh | 21 - examples/siamese/lfw_siamese.prototxt | 113 ---- examples/siamese/lfw_siamese_solver.prototxt | 25 - .../siamese/lfw_siamese_train_test.prototxt | 349 ----------- examples/siamese/train_lfw_siamese.sh | 5 - examples/triplet/3d_triplet_solver.prototxt | 2 +- .../triplet/3d_triplet_train_test.prototxt | 580 +----------------- ...10 => 3d_triplet_train_test.prototxt.orig} | 3 + .../triplet/convert_3d_triplet_data.cpp.orig | 85 +-- examples/triplet/convert_lfw_triplet_data.cpp | 126 ---- .../triplet/convert_mnist_triplet_data.cpp | 140 ----- examples/triplet/create_lfw_triplet.sh | 21 - examples/triplet/create_mnist_triplet.sh | 21 - examples/triplet/lfw_triplet.prototxt | 113 ---- examples/triplet/lfw_triplet_solver.prototxt | 25 - .../triplet/lfw_triplet_train_test.prototxt | 500 --------------- examples/triplet/mnist_triplet.prototxt | 113 ---- .../triplet/mnist_triplet_solver.prototxt | 25 - examples/triplet/train_lfw_triplet.sh | 5 - examples/triplet/train_mnist_triplet.sh | 5 - include/caffe/loss_layers.hpp | 4 +- include/caffe/loss_layers.hpp.orig | 26 +- include/caffe/loss_layers.hpp.orig.orig | 24 +- src/caffe/layers/triplet_loss_layer.cpp.orig | 75 +++ .../layers/triplet_loss_layer.cpp.orig.orig | 127 +--- src/caffe/layers/triplet_loss_layer.cu.orig | 361 +++++++---- .../layers/triplet_loss_layer.cu.orig.orig | 344 ++++++++--- src/caffe/proto/caffe.proto | 2 + src/caffe/proto/caffe.proto.orig | 299 +-------- ...r~5308d9998ae0b1f97b7b99b33fac968421447f3a | 125 ---- 31 files changed, 673 insertions(+), 3112 deletions(-) delete mode 100644 examples/siamese/convert_lfw_siamese_data.cpp delete mode 100755 examples/siamese/create_lfw_siamese.sh delete mode 100644 examples/siamese/lfw_siamese.prototxt delete mode 100644 examples/siamese/lfw_siamese_solver.prototxt delete mode 100644 examples/siamese/lfw_siamese_train_test.prototxt delete mode 100755 examples/siamese/train_lfw_siamese.sh rename examples/triplet/{3d_triplet_train_test.prototxt~165e1d595232eb2a908f62887bcf2d5e1743ed10 => 3d_triplet_train_test.prototxt.orig} (94%) delete mode 100644 examples/triplet/convert_lfw_triplet_data.cpp delete mode 100644 examples/triplet/convert_mnist_triplet_data.cpp delete mode 100755 examples/triplet/create_lfw_triplet.sh delete mode 100755 examples/triplet/create_mnist_triplet.sh delete mode 100644 examples/triplet/lfw_triplet.prototxt delete mode 100644 examples/triplet/lfw_triplet_solver.prototxt delete mode 100644 examples/triplet/lfw_triplet_train_test.prototxt delete mode 100644 examples/triplet/mnist_triplet.prototxt delete mode 100644 examples/triplet/mnist_triplet_solver.prototxt delete mode 100755 examples/triplet/train_lfw_triplet.sh delete mode 100755 examples/triplet/train_mnist_triplet.sh delete mode 100644 src/caffe/test/test_triplet_loss_layer~5308d9998ae0b1f97b7b99b33fac968421447f3a diff --git a/examples/siamese/convert_lfw_siamese_data.cpp b/examples/siamese/convert_lfw_siamese_data.cpp deleted file mode 100644 index fe134ca9b4e..00000000000 --- a/examples/siamese/convert_lfw_siamese_data.cpp +++ /dev/null @@ -1,121 +0,0 @@ -// -// This script converts the MNIST dataset to the leveldb format used -// by caffe to train siamese network. -// Usage: -// convert_mnist_data input_image_file input_label_file output_db_file -// The MNIST dataset could be downloaded at -// http://yann.lecun.com/exdb/mnist/ -#include // NOLINT(readability/streams) -#include - -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "stdint.h" - -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" - -uint32_t swap_endian(uint32_t val) { - val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); - return (val << 16) | (val >> 16); -} - -void read_image(std::ifstream* image_file, std::ifstream* label_file, - uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index + 8); - label_file->read(label, 1); -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename) { - // Open files - std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); - std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); - CHECK(image_file) << "Unable to open file " << image_filename; - CHECK(label_file) << "Unable to open file " << label_filename; - // Read the magic and the meta data - uint32_t magic; - uint32_t num_items; - uint32_t num_labels; - uint32_t rows; - uint32_t cols; - - image_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - label_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - image_file.read(reinterpret_cast(&num_items), 4); - num_items = swap_endian(num_items); - label_file.read(reinterpret_cast(&num_labels), 4); - num_labels = swap_endian(num_labels); - CHECK_EQ(num_items, num_labels); - image_file.read(reinterpret_cast(&rows), 4); - rows = swap_endian(rows); - image_file.read(reinterpret_cast(&cols), 4); - cols = swap_endian(cols); - - // Open leveldb - leveldb::DB* db; - leveldb::Options options; - options.create_if_missing = true; - options.error_if_exists = true; - leveldb::Status status = leveldb::DB::Open( - options, db_filename, &db); - CHECK(status.ok()) << "Failed to open leveldb " << db_filename - << ". Is it already existing?"; - - char label_i; - char label_j; - char* pixels = new char[2 * rows * cols]; - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - - caffe::Datum datum; - datum.set_channels(2); // one channel for each image in the pair - datum.set_height(rows); - datum.set_width(cols); - LOG(INFO) << "A total of " << num_items << " items."; - LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (int itemid = 0; itemid < num_items; ++itemid) { - int i = caffe::caffe_rng_rand() % num_items; // pick a random pair - int j = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, - pixels, &label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), &label_j); - datum.set_data(pixels, 2*rows*cols); - if (label_i == label_j) { - datum.set_label(1); - } else { - datum.set_label(0); - } - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); - db->Put(leveldb::WriteOptions(), std::string(key), value); - } - - delete db; - delete pixels; -} - -int main(int argc, char** argv) { - if (argc != 4) { - printf("This script converts the MNIST dataset to the leveldb format used\n" - "by caffe to train a siamese network.\n" - "Usage:\n" - " convert_mnist_data input_image_file input_label_file " - "output_db_file\n" - "The MNIST dataset could be downloaded at\n" - " http://yann.lecun.com/exdb/mnist/\n" - "You should gunzip them after downloading.\n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3]); - } - return 0; -} diff --git a/examples/siamese/create_lfw_siamese.sh b/examples/siamese/create_lfw_siamese.sh deleted file mode 100755 index 3790b9eb2a0..00000000000 --- a/examples/siamese/create_lfw_siamese.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env sh -# This script converts the mnist data into leveldb format. - -EXAMPLES=./build/examples/siamese -DATA=./data/lfw - -echo "Creating leveldb..." - -rm -rf ./examples/siamese/lfw_siamese_train_leveldb -rm -rf ./examples/siamese/lfw_siamese_test_leveldb - -$EXAMPLES/convert_lfw_siamese_data.bin \ - $DATA/traindata \ - $DATA/trainlabel \ - ./examples/siamese/lfw_siamese_train_leveldb -$EXAMPLES/convert_mnist_siamese_data.bin \ - $DATA/testdata \ - $DATA/testlabel \ - ./examples/siamese/lfw_siamese_test_leveldb - -echo "Done." diff --git a/examples/siamese/lfw_siamese.prototxt b/examples/siamese/lfw_siamese.prototxt deleted file mode 100644 index 106d9aa76f4..00000000000 --- a/examples/siamese/lfw_siamese.prototxt +++ /dev/null @@ -1,113 +0,0 @@ -name: "lfw_siamese" -input: "data" -input_dim: 10000 -input_dim: 1 -input_dim: 150 -input_dim: 80 -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 500 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 10 - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 2 - } -} diff --git a/examples/siamese/lfw_siamese_solver.prototxt b/examples/siamese/lfw_siamese_solver.prototxt deleted file mode 100644 index 2aaafb63c1f..00000000000 --- a/examples/siamese/lfw_siamese_solver.prototxt +++ /dev/null @@ -1,25 +0,0 @@ -# The train/test net protocol buffer definition -net: "examples/siamese/lfw_siamese_train_test.prototxt" -# test_iter specifies how many forward passes the test should carry out. -# In the case of MNIST, we have test batch size 100 and 100 test iterations, -# covering the full 10,000 testing images. -test_iter: 100 -# Carry out testing every 500 training iterations. -test_interval: 500 -# The base learning rate, momentum and the weight decay of the network. -base_lr: 0.01 -momentum: 0.9 -weight_decay: 0.0000 -# The learning rate policy -lr_policy: "inv" -gamma: 0.0001 -power: 0.75 -# Display every 100 iterations -display: 100 -# The maximum number of iterations -max_iter: 50000 -# snapshot intermediate results -snapshot: 5000 -snapshot_prefix: "examples/siamese/lfw_siamese" -# solver mode: CPU or GPU -solver_mode: CPU diff --git a/examples/siamese/lfw_siamese_train_test.prototxt b/examples/siamese/lfw_siamese_train_test.prototxt deleted file mode 100644 index 049187bf3d4..00000000000 --- a/examples/siamese/lfw_siamese_train_test.prototxt +++ /dev/null @@ -1,349 +0,0 @@ -name: "lfw_siamese_train_test" -layer { - name: "pair_data" - type: "Data" - top: "pair_data" - top: "sim" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/siamese/lfw_siamese_train_leveldb" - batch_size: 64 - } -} -layer { - name: "pair_data" - type: "Data" - top: "pair_data" - top: "sim" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/siamese/lfw_siamese_test_leveldb" - batch_size: 100 - } -} -layer { - name: "slice_pair" - type: "Slice" - bottom: "pair_data" - top: "data" - top: "data_p" - slice_param { - slice_dim: 1 - slice_point: 1 - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_p" - type: "Convolution" - bottom: "data_p" - top: "conv1_p" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_p" - type: "Pooling" - bottom: "conv1_p" - top: "pool1_p" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_p" - type: "Convolution" - bottom: "pool1_p" - top: "conv2_p" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_p" - type: "Pooling" - bottom: "conv2_p" - top: "pool2_p" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_p" - type: "InnerProduct" - bottom: "pool2_p" - top: "ip1_p" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_p" - type: "ReLU" - bottom: "ip1_p" - top: "ip1_p" -} -layer { - name: "ip2_p" - type: "InnerProduct" - bottom: "ip1_p" - top: "ip2_p" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_p" - type: "InnerProduct" - bottom: "ip2_p" - top: "feat_p" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "loss" - type: "ContrastiveLoss" - bottom: "feat" - bottom: "feat_p" - bottom: "sim" - top: "loss" - contrastive_loss_param { - margin: 1 - } -} diff --git a/examples/siamese/train_lfw_siamese.sh b/examples/siamese/train_lfw_siamese.sh deleted file mode 100755 index 0a879a65419..00000000000 --- a/examples/siamese/train_lfw_siamese.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env sh - -TOOLS=./build/tools - -$TOOLS/caffe train --solver=examples/siamese/lfw_siamese_solver.prototxt diff --git a/examples/triplet/3d_triplet_solver.prototxt b/examples/triplet/3d_triplet_solver.prototxt index d61a6c42f4b..e5719a8f96b 100644 --- a/examples/triplet/3d_triplet_solver.prototxt +++ b/examples/triplet/3d_triplet_solver.prototxt @@ -7,7 +7,7 @@ test_iter: 23 # Carry out testing every 500 training iterations. test_interval: 23 # The base learning rate, momentum and the weight decay of the network. -base_lr: 0.01 +base_lr: 0.001 momentum: 0.9 weight_decay: 0.0000 # The learning rate policy diff --git a/examples/triplet/3d_triplet_train_test.prototxt b/examples/triplet/3d_triplet_train_test.prototxt index bff19047ab8..1ac185aa2cc 100644 --- a/examples/triplet/3d_triplet_train_test.prototxt +++ b/examples/triplet/3d_triplet_train_test.prototxt @@ -1,8 +1,8 @@ name: "3d_triplet_train_test" layer { - name: "triplet_data" + name: "data" type: "Data" - top: "triplet_data" + top: "data" top: "sim" include { phase: TRAIN @@ -12,13 +12,13 @@ layer { } data_param { source: "examples/triplet/3d_triplet_train_leveldb" - batch_size: 23 + batch_size: 250 } } layer { - name: "triplet_data" + name: "data" type: "Data" - top: "triplet_data" + top: "data" top: "sim" include { phase: TEST @@ -28,24 +28,7 @@ layer { } data_param { source: "examples/triplet/3d_triplet_test_leveldb" - batch_size: 23 - } -} -layer { - name: "slice_triplet" - type: "Slice" - bottom: "triplet_data" - top: "data" - top: "data_true" - top: "data_false" - top: "data_p1" - top: "data_p2" - slice_param { - slice_dim: 1 - slice_point: 1 - slice_point: 2 - slice_point: 3 - slice_point: 4 + batch_size: 250 } } layer { @@ -184,562 +167,15 @@ layer { } } } -layer { - name: "conv1_true" - type: "Convolution" - bottom: "data_true" - top: "conv1_true" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 16 - kernel_size: 8 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_true" - type: "Pooling" - bottom: "conv1_true" - top: "pool1_true" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu1_true" - type: "ReLU" - bottom: "pool1_true" - top: "pool1_true" -} -layer { - name: "conv2_true" - type: "Convolution" - bottom: "pool1_true" - top: "conv2_true" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 7 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_true" - type: "Pooling" - bottom: "conv2_true" - top: "pool2_true" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu2_true" - type: "ReLU" - bottom: "pool2_true" - top: "pool2_true" -} -layer { - name: "ip1_true" - type: "InnerProduct" - bottom: "pool2_true" - top: "ip1_true" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 256 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu3_true" - type: "ReLU" - bottom: "ip1_true" - top: "ip1_true" -} -layer { - name: "feat_true" - type: "InnerProduct" - bottom: "ip1_true" - top: "feat_true" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 4 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_false" - type: "Convolution" - bottom: "data_false" - top: "conv1_false" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 16 - kernel_size: 8 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_false" - type: "Pooling" - bottom: "conv1_false" - top: "pool1_false" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu1_false" - type: "ReLU" - bottom: "pool1_false" - top: "pool1_false" -} -layer { - name: "conv2_false" - type: "Convolution" - bottom: "pool1_false" - top: "conv2_false" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 7 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_false" - type: "Pooling" - bottom: "conv2_false" - top: "pool2_false" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu2_false" - type: "ReLU" - bottom: "pool2_false" - top: "pool2_false" -} -layer { - name: "ip1_false" - type: "InnerProduct" - bottom: "pool2_false" - top: "ip1_false" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 256 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu3_false" - type: "ReLU" - bottom: "ip1_false" - top: "ip1_false" -} -layer { - name: "feat_false" - type: "InnerProduct" - bottom: "ip1_false" - top: "feat_false" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 4 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_p1" - type: "Convolution" - bottom: "data_p1" - top: "conv1_p1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 16 - kernel_size: 8 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_p1" - type: "Pooling" - bottom: "conv1_p1" - top: "pool1_p1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu1_p1" - type: "ReLU" - bottom: "pool1_p1" - top: "pool1_p1" -} -layer { - name: "conv2_p1" - type: "Convolution" - bottom: "pool1_p1" - top: "conv2_p1" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 7 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_p1" - type: "Pooling" - bottom: "conv2_p1" - top: "pool2_p1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu2_p1" - type: "ReLU" - bottom: "pool2_p1" - top: "pool2_p1" -} -layer { - name: "ip1_p1" - type: "InnerProduct" - bottom: "pool2_p1" - top: "ip1_p1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 256 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu3_p1" - type: "ReLU" - bottom: "ip1_p1" - top: "ip1_p1" -} -layer { - name: "feat_p1" - type: "InnerProduct" - bottom: "ip1_p1" - top: "feat_p1" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 4 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_p2" - type: "Convolution" - bottom: "data_p2" - top: "conv1_p2" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 16 - kernel_size: 8 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_p2" - type: "Pooling" - bottom: "conv1_p2" - top: "pool1_p2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu1_p2" - type: "ReLU" - bottom: "pool1_p2" - top: "pool1_p2" -} -layer { - name: "conv2_p2" - type: "Convolution" - bottom: "pool1_p2" - top: "conv2_p2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 7 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_p2" - type: "Pooling" - bottom: "conv2_p2" - top: "pool2_p2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu2_p2" - type: "ReLU" - bottom: "pool2_p2" - top: "pool2_p2" -} -layer { - name: "ip1_p2" - type: "InnerProduct" - bottom: "pool2_p2" - top: "ip1_p2" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 256 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu3_p2" - type: "ReLU" - bottom: "ip1_p2" - top: "ip1_p2" -} -layer { - name: "feat_p2" - type: "InnerProduct" - bottom: "ip1_p2" - top: "feat_p2" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 4 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} layer { name: "loss" type: "TripletLoss" bottom: "feat" - bottom: "feat_true" - bottom: "feat_false" - bottom: "feat_p1" - bottom: "feat_p2" bottom: "sim" top: "loss" triplet_loss_param { margin: 1 + losstype: 0 + num_triplets: 3 } } - diff --git a/examples/triplet/3d_triplet_train_test.prototxt~165e1d595232eb2a908f62887bcf2d5e1743ed10 b/examples/triplet/3d_triplet_train_test.prototxt.orig similarity index 94% rename from examples/triplet/3d_triplet_train_test.prototxt~165e1d595232eb2a908f62887bcf2d5e1743ed10 rename to examples/triplet/3d_triplet_train_test.prototxt.orig index 1ac185aa2cc..f79629bda21 100644 --- a/examples/triplet/3d_triplet_train_test.prototxt~165e1d595232eb2a908f62887bcf2d5e1743ed10 +++ b/examples/triplet/3d_triplet_train_test.prototxt.orig @@ -175,7 +175,10 @@ layer { top: "loss" triplet_loss_param { margin: 1 +<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 +======= losstype: 0 num_triplets: 3 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise } } diff --git a/examples/triplet/convert_3d_triplet_data.cpp.orig b/examples/triplet/convert_3d_triplet_data.cpp.orig index 1d6e726f2b8..9a08806e70e 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp.orig +++ b/examples/triplet/convert_3d_triplet_data.cpp.orig @@ -84,6 +84,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, signed char* label_k = new signed char[4]; signed char* label_l = new signed char[4]; // label for pair wise signed char* label_m = new signed char[4]; +<<<<<<< d189d0a9af2a0b5e5baf61f9ab66ee9cbf75c6ac int db_size; if (rgb_use1 == 0) db_size = rows * cols; @@ -94,6 +95,13 @@ void convert_dataset(const char* image_filename, const char* label_filename, char* pixels3 = new char[db_size]; char* pixels4 = new char[db_size]; char* pixels5 = new char[db_size]; +======= + char* pixels1 = new char[rows * cols]; + char* pixels2 = new char[rows * cols]; + char* pixels3 = new char[rows * cols]; + char* pixels4 = new char[rows * cols]; + char* pixels5 = new char[rows * cols]; +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise const int kMaxKeyLength = 10; char key[kMaxKeyLength]; std::string value; @@ -103,7 +111,6 @@ void convert_dataset(const char* image_filename, const char* label_filename, datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; -<<<<<<< fab928843d2f9ac33ce9468a63ed163247ffdfc8 int counter = 0; for (unsigned int times = 0; times < 5; ++times) { // iteration in the samples of all class @@ -117,6 +124,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups int m = caffe::caffe_rng_rand() % num_items; read_image(&image_file, &label_file, i, rows, cols, // read triplet +<<<<<<< d189d0a9af2a0b5e5baf61f9ab66ee9cbf75c6ac pixels1, label_temp, label_i, rgb_use1); read_image(&image_file, &label_file, j, rows, cols, pixels2, label_temp, label_j, rgb_use1); @@ -126,6 +134,17 @@ void convert_dataset(const char* image_filename, const char* label_filename, pixels4, label_temp, label_l, rgb_use1); read_image(&image_file, &label_file, m, rows, cols, pixels5, label_temp, label_m, rgb_use1); +======= + pixels1, label_temp, label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels2, label_temp, label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels3, label_temp, label_k); + read_image(&image_file, &label_file, l, rows, cols, // read pair wise + pixels4, label_temp, label_l); + read_image(&image_file, &label_file, m, rows, cols, + pixels5, label_temp, label_m); +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise bool pair_pass = false; bool triplet1_pass = false; @@ -149,7 +168,11 @@ void convert_dataset(const char* image_filename, const char* label_filename, float dist_ij = std::sqrt(ij_x + ij_y + ij_z); float dist_im = std::sqrt(im_x + im_y + im_z); +<<<<<<< d189d0a9af2a0b5e5baf61f9ab66ee9cbf75c6ac if (*label_i == *label_j && dist_ij < 100/2 && dist_ij != 0) +======= + if (*label_i == *label_j && dist_ij < 100/2) +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise pair_pass = true; if (pair_pass && (*label_i != *label_k)) triplet1_pass = true; @@ -196,58 +219,6 @@ void convert_dataset(const char* image_filename, const char* label_filename, } // iteration in the samples of all class } // iteration in the samples in one class } // iteration in times -======= - for (unsigned int itemid = 0; itemid < 10 * num_items; ++itemid) { - int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups - int j = caffe::caffe_rng_rand() % num_items; - int k = caffe::caffe_rng_rand() % num_items; - int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups - int m = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, // read triplet - pixels, label_temp, label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), label_temp, label_j); - read_image(&image_file, &label_file, k, rows, cols, - pixels + (2 * rows * cols), label_temp, label_k); - read_image(&image_file, &label_file, l, rows, cols, // read pair wise - pixels + (3 * rows * cols), label_temp, label_l); - read_image(&image_file, &label_file, m, rows, cols, - pixels + (4 * rows * cols), label_temp, label_m); - - datum.set_data(pixels, 5*rows*cols); // set data - bool triplet_class_pass; - bool triplet_class_same; - bool triplet_pose_pass; - bool pair_class_pass; - int ij_x, ij_y, ij_z; - int ik_x, ik_y, ik_z; - ij_x = static_cast(*(label_i+1)-*(label_j+1))*(*(label_i+1)-*(label_j+1)); - ij_y = static_cast(*(label_i+2)-*(label_j+2))*(*(label_i+2)-*(label_j+2)); - ij_z = static_cast(*(label_i+3)-*(label_j+3))*(*(label_i+3)-*(label_j+3)); - ik_x = static_cast(*(label_i+1)-*(label_k+1))*(*(label_i+1)-*(label_k+1)); - ik_y = static_cast(*(label_i+2)-*(label_k+2))*(*(label_i+2)-*(label_k+2)); - ik_z = static_cast(*(label_i+3)-*(label_k+3))*(*(label_i+3)-*(label_k+3)); - int dist_ij = ij_x + ij_y + ij_z; - int dist_ik = ik_x + ik_y + ik_z; - if (dist_ij < dist_ik ) - triplet_pose_pass = true; - if ((*label_i == *label_j) && (*label_i != *label_k)) - triplet_class_pass = true; - if ((*label_i == *label_j) && (*label_i == *label_k)) - triplet_class_same = true; - if (*label_l == *label_m) - pair_class_pass = true; - if (( triplet_class_pass || (triplet_class_same && triplet_pose_pass)) && pair_class_pass) { - datum.set_label(1); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); - db->Put(leveldb::WriteOptions(), std::string(key), value); - } else { - itemid--; - datum.set_label(0); - } - } ->>>>>>> rearrange the training samples selection codes delete db; delete pixels1; delete pixels2; @@ -257,12 +228,20 @@ void convert_dataset(const char* image_filename, const char* label_filename, } int main(int argc, char** argv) { +<<<<<<< d189d0a9af2a0b5e5baf61f9ab66ee9cbf75c6ac if (argc != 6) { +======= + if (argc != 5) { +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise printf("This script converts the dataset to the leveldb format used\n" "by caffe to train a triplet network.\n" "Usage:\n" " convert_3d_data input_image_file input_label_file " +<<<<<<< d189d0a9af2a0b5e5baf61f9ab66ee9cbf75c6ac "output_db_file class_number rgb_use \n"); +======= + "output_db_file class_number\n"); +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise } else { google::InitGoogleLogging(argv[0]); convert_dataset(argv[1], argv[2], argv[3], argv[4], argv[5]); diff --git a/examples/triplet/convert_lfw_triplet_data.cpp b/examples/triplet/convert_lfw_triplet_data.cpp deleted file mode 100644 index 9f65fab76b4..00000000000 --- a/examples/triplet/convert_lfw_triplet_data.cpp +++ /dev/null @@ -1,126 +0,0 @@ -// This script converts the MNIST dataset to the leveldb format used -// by caffe to train siamese network. -// Usage: -// convert_mnist_data input_image_file input_label_file output_db_file -// The MNIST dataset could be downloaded at -// http://yann.lecun.com/exdb/mnist/ -#include // NOLINT(readability/streams) -#include - -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "stdint.h" - -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" - -uint32_t swap_endian(uint32_t val) { - val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); - return (val << 16) | (val >> 16); -} - -void read_image(std::ifstream* image_file, std::ifstream* label_file, - uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index + 8); - label_file->read(label, 1); -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename) { - // Open files - std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); - std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); - CHECK(image_file) << "Unable to open file " << image_filename; - CHECK(label_file) << "Unable to open file " << label_filename; - // Read the magic and the meta data - uint32_t magic; - uint32_t num_items; - uint32_t num_labels; - uint32_t rows; - uint32_t cols; - - image_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - label_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - image_file.read(reinterpret_cast(&num_items), 4); - num_items = swap_endian(num_items); - label_file.read(reinterpret_cast(&num_labels), 4); - num_labels = swap_endian(num_labels); - CHECK_EQ(num_items, num_labels); - image_file.read(reinterpret_cast(&rows), 4); - rows = swap_endian(rows); - image_file.read(reinterpret_cast(&cols), 4); - cols = swap_endian(cols); - - // Open leveldb - leveldb::DB* db; - leveldb::Options options; - options.create_if_missing = true; - options.error_if_exists = true; - leveldb::Status status = leveldb::DB::Open( - options, db_filename, &db); - CHECK(status.ok()) << "Failed to open leveldb " << db_filename - << ". Is it already existing?"; - - char label_i; - char label_j; - char label_k; - char* pixels = new char[3 * rows * cols]; - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - - caffe::Datum datum; - datum.set_channels(3); // one channel for each image in the pair - datum.set_height(rows); - datum.set_width(cols); - LOG(INFO) << "A total of " << num_items << " items."; - LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (int itemid = 0; itemid < num_items; ++itemid) { - int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups - int j = caffe::caffe_rng_rand() % num_items; - int k = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, - pixels, &label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), &label_j); - read_image(&image_file, &label_file, k, rows, cols, - pixels + (2 * rows * cols), &label_k); - - datum.set_data(pixels, 3*rows*cols); - if (label_i == label_j && label_i != label_k) { - datum.set_label(1); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); - db->Put(leveldb::WriteOptions(), std::string(key), value); - } else { - itemid--; - datum.set_label(0); - } - } - - delete db; - delete pixels; -} - -int main(int argc, char** argv) { - if (argc != 4) { - printf("This script converts the MNIST dataset to the leveldb format used\n" - "by caffe to train a siamese network.\n" - "Usage:\n" - " convert_mnist_data input_image_file input_label_file " - "output_db_file\n" - "The MNIST dataset could be downloaded at\n" - " http://yann.lecun.com/exdb/mnist/\n" - "You should gunzip them after downloading.\n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3]); - } - return 0; -} diff --git a/examples/triplet/convert_mnist_triplet_data.cpp b/examples/triplet/convert_mnist_triplet_data.cpp deleted file mode 100644 index 97e27bc4db8..00000000000 --- a/examples/triplet/convert_mnist_triplet_data.cpp +++ /dev/null @@ -1,140 +0,0 @@ -// This script converts the MNIST dataset to the leveldb format used -// by caffe to train siamese network. -// Usage: -// convert_mnist_data input_image_file input_label_file output_db_file -// The MNIST dataset could be downloaded at -// http://yann.lecun.com/exdb/mnist/ -#include // NOLINT(readability/streams) -#include - -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "stdint.h" - -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" - -uint32_t swap_endian(uint32_t val) { - val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); - return (val << 16) | (val >> 16); -} - -void read_image(std::ifstream* image_file, std::ifstream* label_file, - uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index + 8); - label_file->read(label, 1); -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename) { - // Open files - std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); - std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); - CHECK(image_file) << "Unable to open file " << image_filename; - CHECK(label_file) << "Unable to open file " << label_filename; - // Read the magic and the meta data - uint32_t magic; - uint32_t num_items; - uint32_t num_labels; - uint32_t rows; - uint32_t cols; - - image_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2051) << "Incorrect image file magic."; - label_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2049) << "Incorrect label file magic."; - image_file.read(reinterpret_cast(&num_items), 4); - num_items = swap_endian(num_items); - label_file.read(reinterpret_cast(&num_labels), 4); - num_labels = swap_endian(num_labels); - CHECK_EQ(num_items, num_labels); - image_file.read(reinterpret_cast(&rows), 4); - rows = swap_endian(rows); - image_file.read(reinterpret_cast(&cols), 4); - cols = swap_endian(cols); - - // Open leveldb - leveldb::DB* db; - leveldb::Options options; - options.create_if_missing = true; - options.error_if_exists = true; - leveldb::Status status = leveldb::DB::Open( - options, db_filename, &db); - CHECK(status.ok()) << "Failed to open leveldb " << db_filename - << ". Is it already existing?"; - - char label_i; - char label_j; - char label_k; - char label_l; // label for pair wise - char label_m; - char* pixels = new char[5 * rows * cols]; - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - - caffe::Datum datum; - datum.set_channels(3); // one channel for each image in the pair - datum.set_height(rows); - datum.set_width(cols); - LOG(INFO) << "A total of " << num_items << " items."; - LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (int itemid = 0; itemid < num_items; ++itemid) { - // pick triplet groups - int i = caffe::caffe_rng_rand() % num_items; - int j = caffe::caffe_rng_rand() % num_items; - int k = caffe::caffe_rng_rand() % num_items; - // pick pair wise groups - int l = caffe::caffe_rng_rand() % num_items; - int m = caffe::caffe_rng_rand() % num_items; - // read triplet groups - read_image(&image_file, &label_file, i, rows, cols, - pixels, &label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), &label_j); - read_image(&image_file, &label_file, k, rows, cols, - pixels + (2 * rows * cols), &label_k); - // pair wise groups - read_image(&image_file, &label_file, l, rows, cols, - pixels + (3 * rows * cols), &label_l); - read_image(&image_file, &label_file, m, rows, cols, - pixels + (4 * rows * cols), &label_m); - - datum.set_data(pixels, 3*rows*cols); - if (label_i == label_j && label_i != label_k) { - datum.set_label(1); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); - db->Put(leveldb::WriteOptions(), std::string(key), value); - } else { - itemid--; - datum.set_label(0); - } - } - - delete db; - delete pixels; -} - -int main(int argc, char** argv) { - if (argc != 4) { - printf("This script converts the MNIST dataset to the leveldb format used\n" - "by caffe to train a siamese network.\n" - "Usage:\n" - " convert_mnist_data input_image_file input_label_file " - "output_db_file\n" - "The MNIST dataset could be downloaded at\n" - " http://yann.lecun.com/exdb/mnist/\n" - "You should gunzip them after downloading.\n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3]); - } - return 0; -} diff --git a/examples/triplet/create_lfw_triplet.sh b/examples/triplet/create_lfw_triplet.sh deleted file mode 100755 index 382a9021f10..00000000000 --- a/examples/triplet/create_lfw_triplet.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env sh -# This script converts the lfw data into leveldb format. - -EXAMPLES=./build/examples/triplet -DATA=./data/lfw - -echo "Creating leveldb..." - -rm -rf ./examples/triplet/lfw_triplet_train_leveldb -rm -rf ./examples/triplet/lfw_triplet_test_leveldb - -$EXAMPLES/convert_lfw_triplet_data.bin \ - $DATA/traindata \ - $DATA/trainlabel \ - ./examples/triplet/lfw_triplet_train_leveldb -$EXAMPLES/convert_lfw_triplet_data.bin \ - $DATA/testdata \ - $DATA/testlabel \ - ./examples/triplet/lfw_triplet_test_leveldb - -echo "Done." diff --git a/examples/triplet/create_mnist_triplet.sh b/examples/triplet/create_mnist_triplet.sh deleted file mode 100755 index f404f2aa255..00000000000 --- a/examples/triplet/create_mnist_triplet.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env sh -# This script converts the mnist data into leveldb format. - -EXAMPLES=./build/examples/triplet -DATA=./data/mnist - -echo "Creating leveldb..." - -rm -rf ./examples/triplet/mnist_siamese_train_leveldb -rm -rf ./examples/triplet/mnist_siamese_test_leveldb - -$EXAMPLES/convert_mnist_triplet_data.bin \ - $DATA/train-images-idx3-ubyte \ - $DATA/train-labels-idx1-ubyte \ - ./examples/triplet/mnist_triplet_train_leveldb -$EXAMPLES/convert_mnist_triplet_data.bin \ - $DATA/t10k-images-idx3-ubyte \ - $DATA/t10k-labels-idx1-ubyte \ - ./examples/triplet/mnist_triplet_test_leveldb - -echo "Done." diff --git a/examples/triplet/lfw_triplet.prototxt b/examples/triplet/lfw_triplet.prototxt deleted file mode 100644 index 9537d1feb8b..00000000000 --- a/examples/triplet/lfw_triplet.prototxt +++ /dev/null @@ -1,113 +0,0 @@ -name: "lfw_siamese" -input: "data" -input_dim: 10000 -input_dim: 1 -input_dim: 150 -input_dim: 130 -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 500 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 10 - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 2 - } -} diff --git a/examples/triplet/lfw_triplet_solver.prototxt b/examples/triplet/lfw_triplet_solver.prototxt deleted file mode 100644 index eb4c2c369e9..00000000000 --- a/examples/triplet/lfw_triplet_solver.prototxt +++ /dev/null @@ -1,25 +0,0 @@ -# The train/test net protocol buffer definition -net: "examples/triplet/lfw_triplet_train_test.prototxt" -# test_iter specifies how many forward passes the test should carry out. -# In the case of lfw, we have test batch size 100 and 100 test iterations, -# covering the full 10,000 testing images. -test_iter: 100 -# Carry out testing every 500 training iterations. -test_interval: 500 -# The base learning rate, momentum and the weight decay of the network. -base_lr: 0.01 -momentum: 0.9 -weight_decay: 0.0000 -# The learning rate policy -lr_policy: "inv" -gamma: 0.0001 -power: 0.75 -# Display every 100 iterations -display: 100 -# The maximum number of iterations -max_iter: 50000 -# snapshot intermediate results -snapshot: 5000 -snapshot_prefix: "examples/triplet/lfw_triplet" -# solver mode: CPU or GPU -solver_mode: CPU diff --git a/examples/triplet/lfw_triplet_train_test.prototxt b/examples/triplet/lfw_triplet_train_test.prototxt deleted file mode 100644 index 59ef26e90a4..00000000000 --- a/examples/triplet/lfw_triplet_train_test.prototxt +++ /dev/null @@ -1,500 +0,0 @@ -name: "lfw_triplet_train_test" -layer { - name: "triplet_data" - type: "Data" - top: "triplet_data" - top: "sim" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/lfw_triplet_train_leveldb" - batch_size: 64 - } -} -layer { - name: "triplet_data" - type: "Data" - top: "triplet_data" - top: "sim" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/lfw_triplet_test_leveldb" - batch_size: 100 - } -} -layer { - name: "slice_triplet" - type: "Slice" - bottom: "triplet_data" - top: "data" - top: "data_true" - top: "data_false" - slice_param { - slice_dim: 1 - slice_point: 1 - slice_point: 2 - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_true" - type: "Convolution" - bottom: "data_true" - top: "conv1_true" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_true" - type: "Pooling" - bottom: "conv1_true" - top: "pool1_true" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_true" - type: "Convolution" - bottom: "pool1_true" - top: "conv2_true" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_true" - type: "Pooling" - bottom: "conv2_true" - top: "pool2_true" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_true" - type: "InnerProduct" - bottom: "pool2_true" - top: "ip1_true" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_true" - type: "ReLU" - bottom: "ip1_true" - top: "ip1_true" -} -layer { - name: "ip2_true" - type: "InnerProduct" - bottom: "ip1_true" - top: "ip2_true" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_true" - type: "InnerProduct" - bottom: "ip2_true" - top: "feat_true" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "conv1_false" - type: "Convolution" - bottom: "data_false" - top: "conv1_false" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1_false" - type: "Pooling" - bottom: "conv1_false" - top: "pool1_false" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2_false" - type: "Convolution" - bottom: "pool1_false" - top: "conv2_false" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2_false" - type: "Pooling" - bottom: "conv2_false" - top: "pool2_false" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1_false" - type: "InnerProduct" - bottom: "pool2_false" - top: "ip1_false" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1_false" - type: "ReLU" - bottom: "ip1_false" - top: "ip1_false" -} -layer { - name: "ip2_false" - type: "InnerProduct" - bottom: "ip1_false" - top: "ip2_false" - param { - name: "ip2_w" - lr_mult: 1 - } - param { - name: "ip2_b" - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "feat_false" - type: "InnerProduct" - bottom: "ip2_false" - top: "feat_false" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 2 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "loss" - type: "TripletLoss" - bottom: "feat" - bottom: "feat_true" - bottom: "feat_false" - bottom: "sim" - top: "loss" - triplet_loss_param { - margin: 0.2 - } -} - diff --git a/examples/triplet/mnist_triplet.prototxt b/examples/triplet/mnist_triplet.prototxt deleted file mode 100644 index 0e903f85909..00000000000 --- a/examples/triplet/mnist_triplet.prototxt +++ /dev/null @@ -1,113 +0,0 @@ -name: "mnist_siamese" -input: "data" -input_dim: 10000 -input_dim: 1 -input_dim: 28 -input_dim: 28 -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 500 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 10 - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 2 - } -} diff --git a/examples/triplet/mnist_triplet_solver.prototxt b/examples/triplet/mnist_triplet_solver.prototxt deleted file mode 100644 index edd8e1e0338..00000000000 --- a/examples/triplet/mnist_triplet_solver.prototxt +++ /dev/null @@ -1,25 +0,0 @@ -# The train/test net protocol buffer definition -net: "examples/triplet/mnist_triplet_train_test.prototxt" -# test_iter specifies how many forward passes the test should carry out. -# In the case of MNIST, we have test batch size 100 and 100 test iterations, -# covering the full 10,000 testing images. -test_iter: 100 -# Carry out testing every 500 training iterations. -test_interval: 500 -# The base learning rate, momentum and the weight decay of the network. -base_lr: 0.001 -momentum: 0.9 -weight_decay: 0.0000 -# The learning rate policy -lr_policy: "inv" -gamma: 0.0001 -power: 0.75 -# Display every 100 iterations -display: 100 -# The maximum number of iterations -max_iter: 50000 -# snapshot intermediate results -snapshot: 5000 -snapshot_prefix: "examples/triplet/mnist_triplet" -# solver mode: CPU or GPU -solver_mode: CPU diff --git a/examples/triplet/train_lfw_triplet.sh b/examples/triplet/train_lfw_triplet.sh deleted file mode 100755 index 076738a5e63..00000000000 --- a/examples/triplet/train_lfw_triplet.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env sh - -TOOLS=./build/tools - -$TOOLS/caffe train --solver=examples/triplet/lfw_triplet_solver.prototxt diff --git a/examples/triplet/train_mnist_triplet.sh b/examples/triplet/train_mnist_triplet.sh deleted file mode 100755 index 683cda2963b..00000000000 --- a/examples/triplet/train_mnist_triplet.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env sh - -TOOLS=./release/tools - -$TOOLS/caffe train --solver=examples/triplet/mnist_triplet_solver.prototxt diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index 646a6b9640f..0a9547749bd 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -226,14 +226,14 @@ class TripletLossLayer : public LossLayer { virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); - virtual inline int ExactNumBottomBlobs() const { return 4; } + virtual inline int ExactNumBottomBlobs() const { return 2; } virtual inline const char* type() const { return "TripletLoss"; } /** * Unlike most loss layers, in the TripletLossLayer we can backpropagate * to the first three inputs. */ virtual inline bool AllowForceBackward(const int bottom_index) const { - return bottom_index != 3; + return bottom_index != 1; } protected: diff --git a/include/caffe/loss_layers.hpp.orig b/include/caffe/loss_layers.hpp.orig index b0c8469611a..87c019d763b 100644 --- a/include/caffe/loss_layers.hpp.orig +++ b/include/caffe/loss_layers.hpp.orig @@ -224,38 +224,22 @@ class TripletLossLayer : public LossLayer { virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 - virtual inline int ExactNumBottomBlobs() const { return 2; } -======= +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 virtual inline int ExactNumBottomBlobs() const { return 4; } ->>>>>>> New triplet loss layer added(beta1 version-no test source files) ======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 virtual inline int ExactNumBottomBlobs() const { return 2; } -======= - virtual inline int ExactNumBottomBlobs() const { return 4; } ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise virtual inline const char* type() const { return "TripletLoss"; } /** * Unlike most loss layers, in the TripletLossLayer we can backpropagate * to the first three inputs. */ virtual inline bool AllowForceBackward(const int bottom_index) const { -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 - return bottom_index != 1; -======= +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 return bottom_index != 3; ->>>>>>> New triplet loss layer added(beta1 version-no test source files) ======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 return bottom_index != 1; -======= - return bottom_index != 3; ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise } protected: @@ -702,8 +686,6 @@ class SigmoidCrossEntropyLossLayer : public LossLayer { /// @copydoc SigmoidCrossEntropyLossLayer virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); /** * @brief Computes the sigmoid cross-entropy loss error gradient w.r.t. the diff --git a/include/caffe/loss_layers.hpp.orig.orig b/include/caffe/loss_layers.hpp.orig.orig index c3242b49761..26b506a7662 100644 --- a/include/caffe/loss_layers.hpp.orig.orig +++ b/include/caffe/loss_layers.hpp.orig.orig @@ -224,13 +224,12 @@ class TripletLossLayer : public LossLayer { virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); +<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 <<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 <<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 virtual inline int ExactNumBottomBlobs() const { return 2; } ======= virtual inline int ExactNumBottomBlobs() const { return 4; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:include/caffe/loss_layers.hpp.orig -======= >>>>>>> New triplet loss layer added(beta1 version-no test source files) ======= <<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 @@ -238,21 +237,26 @@ class TripletLossLayer : public LossLayer { ======= virtual inline int ExactNumBottomBlobs() const { return 4; } >>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:include/caffe/loss_layers.hpp.orig.orig >>>>>>> triplet data generation and network update +======= +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 + virtual inline int ExactNumBottomBlobs() const { return 4; } +======= + virtual inline int ExactNumBottomBlobs() const { return 2; } +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise virtual inline const char* type() const { return "TripletLoss"; } /** * Unlike most loss layers, in the TripletLossLayer we can backpropagate * to the first three inputs. */ virtual inline bool AllowForceBackward(const int bottom_index) const { +<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 <<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 <<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 return bottom_index != 1; ======= return bottom_index != 3; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:include/caffe/loss_layers.hpp.orig -======= >>>>>>> New triplet loss layer added(beta1 version-no test source files) ======= <<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 @@ -260,8 +264,14 @@ class TripletLossLayer : public LossLayer { ======= return bottom_index != 3; >>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:include/caffe/loss_layers.hpp.orig.orig >>>>>>> triplet data generation and network update +======= +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 + return bottom_index != 3; +======= + return bottom_index != 1; +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise } protected: @@ -708,8 +718,6 @@ class SigmoidCrossEntropyLossLayer : public LossLayer { /// @copydoc SigmoidCrossEntropyLossLayer virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); /** * @brief Computes the sigmoid cross-entropy loss error gradient w.r.t. the diff --git a/src/caffe/layers/triplet_loss_layer.cpp.orig b/src/caffe/layers/triplet_loss_layer.cpp.orig index 613fa703676..066d7967652 100644 --- a/src/caffe/layers/triplet_loss_layer.cpp.orig +++ b/src/caffe/layers/triplet_loss_layer.cpp.orig @@ -94,9 +94,15 @@ void TripletLossLayer::Forward_cpu( // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.cpu_data()[i]; +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += margin; +======= dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; dist_sq_.mutable_cpu_data()[i] += margin; for (int triplet = 0; triplet < num_triplets; ++triplet) { +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // Loss component calculated from negative part caffe_sub( dim, @@ -172,6 +178,8 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, Dtype(1.0), bout + ((2 + num_triplets)*j + i)*dim); } +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 +======= } } } @@ -214,18 +222,71 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, Dtype(1.0), bout + ((2 + num_triplets)*j + i)*dim); } +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise } } } } +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 + // BP for feat2(extracted from the closest sample) + for (int i = 1; i < 2; ++i) { + if (propagate_down[0]) { + const Dtype sign = -1; +======= // BP for negative feature used in the num_triplets triplet part for (int i = 2; i < 2 + num_triplets; ++i) { if (propagate_down[0]) { const Dtype sign = 1; +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast(num_set); for (int j = 0; j < num_set; ++j) { Dtype* bout = bottom[0]->mutable_cpu_diff(); +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 + // the pair part + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(0.0), + bout + ((2 + num_triplets)*j + i)*dim); + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + caffe_sub( + dim, + bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference + bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, + diff_neg.mutable_cpu_data() + j*dim); // reference-negative + // Triplet loss accumulation + // a and negative[triplet] is a similar pair for triplet + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; + dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, + diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); + // a and negative[triplet] is a dissimilar pair for triplet + dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; + if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { + // similar pair in triplet + caffe_cpu_axpby( + dim, + alpha, + diff_pos.cpu_data() + (j*dim), + Dtype(1.0), + bout + ((2 + num_triplets)*j + i)*dim); + } + } + } + } + } + // BP for negative feature used in the num_triplets triplet part + for (int i = 2; i < 2 + num_triplets; ++i) { + if (propagate_down[0]) { + const Dtype sign = 1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / + static_cast(num_set); + for (int j = 0; j < num_set; ++j) { + Dtype* bout = bottom[0]->mutable_cpu_diff(); +======= +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise caffe_sub( dim, bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference @@ -268,10 +329,17 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, diff_pos.cpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; +======= dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; dist_sq_.mutable_cpu_data()[j] += margin; // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // Loss component calculated from negative part caffe_sub( dim, @@ -319,10 +387,17 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, diff_pos.cpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; +======= dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; dist_sq_.mutable_cpu_data()[j] += margin; // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // Loss component calculated from negative part caffe_sub( dim, diff --git a/src/caffe/layers/triplet_loss_layer.cpp.orig.orig b/src/caffe/layers/triplet_loss_layer.cpp.orig.orig index 1bac0565530..6d30c7f1878 100644 --- a/src/caffe/layers/triplet_loss_layer.cpp.orig.orig +++ b/src/caffe/layers/triplet_loss_layer.cpp.orig.orig @@ -46,10 +46,6 @@ void TripletLossLayer::Forward_cpu( int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); Dtype loss(0.0); -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a ->>>>>>> triplet data generation and network update int dim = bottom[0]->count()/bottom[0]->num(); int num_set = bottom[0]->num()/(2 + num_triplets); if (losstype == 0) { @@ -82,24 +78,6 @@ void TripletLossLayer::Forward_cpu( // loss accumulated accumulated by the triplet part loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -======= - - // Loss component calculated from ab - for (int i = 0; i < bottom[0]->num(); ++i) { - dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, - diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); - // ab is a similar pair - dist_sq_.mutable_cpu_data()[i] += dist_sq_pos.cpu_data()[i]; - // Loss component calculated from ac - dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, - diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); - // ac is a dissimilar pair - dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; - loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); ->>>>>>> restore ->>>>>>> triplet data generation and network update } loss = loss / static_cast(num_set) / Dtype(2); top[0]->mutable_cpu_data()[0] = loss; @@ -116,8 +94,12 @@ void TripletLossLayer::Forward_cpu( // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.cpu_data()[i]; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 +<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += margin; + for (int triplet = 0; triplet < num_triplets; ++triplet) { +======= +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 for (int triplet = 0; triplet < num_triplets; ++triplet) { dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; dist_sq_.mutable_cpu_data()[i] += margin; @@ -126,11 +108,7 @@ void TripletLossLayer::Forward_cpu( dist_sq_.mutable_cpu_data()[i] += margin; for (int triplet = 0; triplet < num_triplets; ++triplet) { >>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise -======= - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] += margin; ->>>>>>> triplet data generation and network update +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // Loss component calculated from negative part caffe_sub( dim, @@ -155,10 +133,6 @@ template void TripletLossLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.triplet_loss_param().margin(); -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a ->>>>>>> triplet data generation and network update Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); int dim = bottom[0]->count()/bottom[0]->num(); @@ -210,11 +184,8 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, Dtype(1.0), bout + ((2 + num_triplets)*j + i)*dim); } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 ======= -======= ->>>>>>> triplet data generation and network update } } } @@ -257,37 +228,27 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, Dtype(1.0), bout + ((2 + num_triplets)*j + i)*dim); } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 >>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise -======= ->>>>>>> triplet data generation and network update } } } } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 // BP for feat2(extracted from the closest sample) for (int i = 1; i < 2; ++i) { if (propagate_down[0]) { const Dtype sign = -1; ======= -======= ->>>>>>> triplet data generation and network update // BP for negative feature used in the num_triplets triplet part for (int i = 2; i < 2 + num_triplets; ++i) { if (propagate_down[0]) { const Dtype sign = 1; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 >>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise -======= ->>>>>>> triplet data generation and network update const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast(num_set); for (int j = 0; j < num_set; ++j) { Dtype* bout = bottom[0]->mutable_cpu_diff(); -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 // the pair part caffe_cpu_axpby( dim, @@ -332,8 +293,6 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, Dtype* bout = bottom[0]->mutable_cpu_diff(); ======= >>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise -======= ->>>>>>> triplet data generation and network update caffe_sub( dim, bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference @@ -348,32 +307,11 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { // dissimilar pairs -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -======= - for (int i = 1; i < 3; ++i) { -// there must be further check to ensure the gradient calc - if (propagate_down[i]) { - const Dtype sign = (i == 2) ? 1 : -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(bottom[i]->num()); - int num = bottom[i]->num(); - int channels = bottom[i]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[i]->mutable_cpu_diff(); - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pairs ->>>>>>> restore ->>>>>>> triplet data generation and network update caffe_cpu_axpby( dim, alpha, diff_neg.cpu_data() + (j*dim), Dtype(0.0), -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a ->>>>>>> triplet data generation and network update bout + ((2 + num_triplets)*j + i)*dim); } else { caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); @@ -397,23 +335,24 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, diff_pos.cpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 +<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { ======= ->>>>>>> triplet data generation and network update +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; dist_sq_.mutable_cpu_data()[j] += margin; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ======= dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; dist_sq_.mutable_cpu_data()[j] += margin; // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { >>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise -======= ->>>>>>> triplet data generation and network update +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // Loss component calculated from negative part caffe_sub( dim, @@ -461,23 +400,24 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, diff_pos.cpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 +<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { ======= ->>>>>>> triplet data generation and network update +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; dist_sq_.mutable_cpu_data()[j] += margin; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ======= dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; dist_sq_.mutable_cpu_data()[j] += margin; // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { >>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise -======= ->>>>>>> triplet data generation and network update +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // Loss component calculated from negative part caffe_sub( dim, @@ -527,33 +467,14 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; // loss accumulated accumulated by the triplet part if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -======= - bout + (j*channels)); - // dissimilar pairs ->>>>>>> restore ->>>>>>> triplet data generation and network update caffe_cpu_axpby( dim, alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), diff_neg.cpu_data() + (j*dim), Dtype(0.0), -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 bout + ((2 + num_triplets)*j + i)*dim); } else { caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); -======= -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a - bout + ((2 + num_triplets)*j + i)*dim); - } else { - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); -======= - bout + (j*channels)); - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); ->>>>>>> restore ->>>>>>> triplet data generation and network update } } } diff --git a/src/caffe/layers/triplet_loss_layer.cu.orig b/src/caffe/layers/triplet_loss_layer.cu.orig index 11ef82e17ac..21695217f7a 100644 --- a/src/caffe/layers/triplet_loss_layer.cu.orig +++ b/src/caffe/layers/triplet_loss_layer.cu.orig @@ -2,15 +2,9 @@ #include #include "caffe/layer.hpp" -<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" -======= -#include "caffe/vision_layers.hpp" -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" ->>>>>>> GPU version added namespace caffe { @@ -18,7 +12,6 @@ template void TripletLossLayer::Forward_gpu( const vector*>& bottom, const vector*>& top) { -<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c Dtype margin = this->layer_param_.triplet_loss_param().margin(); Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); @@ -28,16 +21,38 @@ void TripletLossLayer::Forward_gpu( int num_set = bottom[0]->num()/(2 + num_triplets); if (losstype == 0) { for (int i = 0; i < num_set; ++i) { +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( +======= + caffe_sub( +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_pos.gpu_data() + i*dim, diff_pos.gpu_data() + i*dim, dist_sq_pos.mutable_cpu_data() + i); +======= + caffe_gpu_powx( + dim, + diff_pos.mutable_gpu_data() + i*dim, // reference-pose_close + Dtype(2), + diff_sq_pos.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_pos.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_pos.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.gpu_data()[i]; @@ -46,16 +61,38 @@ void TripletLossLayer::Forward_gpu( // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; // Loss component calculated from negative part +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( +======= + caffe_sub( +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + i*dim); // reference-negative +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + i*dim, diff_neg.gpu_data() + i*dim, dist_sq_neg.mutable_cpu_data() + i); +======= + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + i*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; // loss accumulated accumulated by the triplet part @@ -66,12 +103,17 @@ void TripletLossLayer::Forward_gpu( top[0]->mutable_gpu_data()[0] = loss; } else { for (int i = 0; i < num_set; ++i) { +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( +======= + caffe_sub( +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close // Loss component calculated from reference and close one +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_pos.gpu_data() + i*dim, @@ -85,15 +127,58 @@ void TripletLossLayer::Forward_gpu( dist_sq_.mutable_gpu_data()[i] += margin; // Loss component calculated from negative part caffe_gpu_sub( +======= + caffe_gpu_powx( + dim, + diff_pos.mutable_gpu_data() + i*dim, // reference-pose_close + Dtype(2), + diff_sq_pos.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_pos.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_pos.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.gpu_data()[i]; + dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.mutable_gpu_data()[i]; + dist_sq_.mutable_gpu_data()[i] += margin; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Loss component calculated from negative part + caffe_sub( +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + i*dim); // reference-negative +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + i*dim, diff_neg.gpu_data() + i*dim, dist_sq_neg.mutable_cpu_data() + i); +======= + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + i*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] = 1 - \ dist_sq_neg.gpu_data()[i] / dist_sq_.mutable_gpu_data()[i]; @@ -104,54 +189,12 @@ void TripletLossLayer::Forward_gpu( loss = loss / static_cast(num_set) / Dtype(2); top[0]->mutable_gpu_data()[0] = loss; } -======= - int count = bottom[0]->count(); - caffe_gpu_sub( - count, - bottom[0]->gpu_data(), // a - bottom[1]->gpu_data(), // b - diff_pos.mutable_gpu_data()); // a_i-b_i - caffe_gpu_sub( - count, - bottom[0]->gpu_data(), // a - bottom[2]->gpu_data(), // c - diff_neg.mutable_gpu_data()); // a_i-c_i - caffe_gpu_powx( - count, - diff_pos.mutable_gpu_data(), // a_i-b_i - Dtype(2), - diff_sq_pos.mutable_gpu_data()); // (a_i-b_i)^2 - caffe_gpu_powx( - count, - diff_neg.mutable_gpu_data(), // a_i-c_i - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (a_i-c_i)^2 - const int channels = bottom[0]->channels(); - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype loss(0.0); - // Loss component calculated from ab - for (int i = 0; i < bottom[0]->num(); ++i) { - /*dist_sq_pos.mutable_gpu_data()[i] = caffe_gpu_dot(channels, - diff_pos.gpu_data() + (i*channels), diff_pos.gpu_data() + (i*channels));*/ - // ab is a similar pair - dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; - // Loss component calculated from ac - /*dist_sq_neg.mutable_gpu_data()[i] = caffe_gpu_dot(channels, - diff_neg.gpu_data() + (i*channels), diff_neg.gpu_data() + (i*channels));*/ - // ac is a dissimilar pair - dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; - loss += std::max(margin + dist_sq_.gpu_data()[i], Dtype(0.0)); - } - loss = loss / static_cast(bottom[0]->num()) / Dtype(2); - top[0]->mutable_gpu_data()[0] = loss; ->>>>>>> GPU version added } template void TripletLossLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.triplet_loss_param().margin(); -<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); int dim = bottom[0]->count()/bottom[0]->num(); @@ -174,16 +217,38 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bout + ((2 + num_triplets)*j + i)*dim); // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( +======= + caffe_sub( +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, dist_sq_neg.mutable_cpu_data() + j); +======= + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; @@ -227,7 +292,11 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bout + ((2 + num_triplets)*j + i)*dim); // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( +======= + caffe_sub( +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, @@ -235,11 +304,29 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, dist_sq_neg.mutable_cpu_data() + j); +======= + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -263,7 +350,11 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, static_cast(num_set); for (int j = 0; j < num_set; ++j) { Dtype* bout = bottom[0]->mutable_gpu_diff(); +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( +======= + caffe_sub( +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, @@ -271,11 +362,29 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, dist_sq_neg.mutable_cpu_data() + j); +======= + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -287,7 +396,11 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); } else { +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); +======= + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise } } } @@ -308,21 +421,48 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, diff_pos.gpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; dist_sq_.mutable_gpu_data()[j] += margin; // Loss component calculated from negative part caffe_gpu_sub( +======= + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; + dist_sq_.mutable_gpu_data()[j] += margin; + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Loss component calculated from negative part + caffe_sub( +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, dist_sq_neg.mutable_cpu_data() + j); +======= + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -362,21 +502,48 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, diff_pos.gpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; dist_sq_.mutable_gpu_data()[j] += margin; // Loss component calculated from negative part caffe_gpu_sub( +======= + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; + dist_sq_.mutable_gpu_data()[j] += margin; + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Loss component calculated from negative part + caffe_sub( +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, dist_sq_neg.mutable_cpu_data() + j); +======= + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -406,16 +573,38 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; dist_sq_.mutable_gpu_data()[j] += margin; // Loss component calculated from negative part +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( +======= + caffe_sub( +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, dist_sq_neg.mutable_cpu_data() + j); +======= + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -428,79 +617,23 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); } else { +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); ======= -// there must be further check to ensure the gradient calc - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(bottom[0]->num()); - int num = bottom[0]->num(); - int channels = bottom[0]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); - if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - // similar pairs - caffe_gpu_axpby( - channels, - alpha, - diff_pos.gpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - // dissimilar pairs - caffe_gpu_axpby( - channels, - -alpha, - diff_neg.gpu_data() + (j*channels), - Dtype(1.0), - bout + (j*channels)); - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); - } - } - } - for (int i = 1; i < 3; ++i) { -// there must be further check to ensure the gradient calc - if (propagate_down[i]) { - const Dtype sign = (i == 1) ? -1 : 1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(bottom[i]->num()); - int num = bottom[i]->num(); - int channels = bottom[i]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[i]->mutable_gpu_diff(); - if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - if (i == 1) { - // similar pairs - caffe_gpu_axpby( - channels, - alpha, - diff_pos.gpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - } - else { - // dissimilar pairs - caffe_gpu_axpby( - channels, - alpha, - diff_neg.gpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - } - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); ->>>>>>> GPU version added + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise } } } } -<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c } -======= ->>>>>>> GPU version added } -INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer); +#ifdef CPU_ONLY +STUB_GPU(TripletLossLayer); +#endif + +INSTANTIATE_CLASS(TripletLossLayer); +REGISTER_LAYER_CLASS(TripletLoss); } // namespace caffe diff --git a/src/caffe/layers/triplet_loss_layer.cu.orig.orig b/src/caffe/layers/triplet_loss_layer.cu.orig.orig index 49c2d44e9ce..0911362a47f 100644 --- a/src/caffe/layers/triplet_loss_layer.cu.orig.orig +++ b/src/caffe/layers/triplet_loss_layer.cu.orig.orig @@ -2,11 +2,6 @@ #include #include "caffe/layer.hpp" -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/vision_layers.hpp" -======= <<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" @@ -16,7 +11,6 @@ #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" >>>>>>> GPU version added ->>>>>>> GPU version added namespace caffe { @@ -24,10 +18,10 @@ template void TripletLossLayer::Forward_gpu( const vector*>& bottom, const vector*>& top) { -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -======= +<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 <<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c ->>>>>>> GPU version added +======= +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise Dtype margin = this->layer_param_.triplet_loss_param().margin(); Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); @@ -37,24 +31,38 @@ void TripletLossLayer::Forward_gpu( int num_set = bottom[0]->num()/(2 + num_triplets); if (losstype == 0) { for (int i = 0; i < num_set; ++i) { +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( +======= + caffe_sub( +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_pos.gpu_data() + i*dim, diff_pos.gpu_data() + i*dim, -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_pos.mutable_cpu_data() + i); ======= - dist_sq_pos.mutable_gpu_data() + i); ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= - dist_sq_pos.mutable_cpu_data() + i); ->>>>>>> GPU version added + caffe_gpu_powx( + dim, + diff_pos.mutable_gpu_data() + i*dim, // reference-pose_close + Dtype(2), + diff_sq_pos.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_pos.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_pos.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.gpu_data()[i]; @@ -63,24 +71,38 @@ void TripletLossLayer::Forward_gpu( // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; // Loss component calculated from negative part +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( +======= + caffe_sub( +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + i*dim); // reference-negative +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + i*dim, diff_neg.gpu_data() + i*dim, -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + i); ======= - dist_sq_neg.mutable_gpu_data() + i); ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= - dist_sq_neg.mutable_cpu_data() + i); ->>>>>>> GPU version added + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + i*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; // loss accumulated accumulated by the triplet part @@ -91,25 +113,22 @@ void TripletLossLayer::Forward_gpu( top[0]->mutable_gpu_data()[0] = loss; } else { for (int i = 0; i < num_set; ++i) { +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( +======= + caffe_sub( +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close // Loss component calculated from reference and close one +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_pos.gpu_data() + i*dim, diff_pos.gpu_data() + i*dim, -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 - dist_sq_pos.mutable_cpu_data() + i); -======= - dist_sq_pos.mutable_gpu_data() + i); ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= dist_sq_pos.mutable_cpu_data() + i); ->>>>>>> GPU version added // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.gpu_data()[i]; @@ -118,23 +137,58 @@ void TripletLossLayer::Forward_gpu( dist_sq_.mutable_gpu_data()[i] += margin; // Loss component calculated from negative part caffe_gpu_sub( +======= + caffe_gpu_powx( + dim, + diff_pos.mutable_gpu_data() + i*dim, // reference-pose_close + Dtype(2), + diff_sq_pos.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_pos.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_pos.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 + // a b is a similar pair for pair wise + // loss accumulated by the pair wise part + loss += dist_sq_pos.gpu_data()[i]; + dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.mutable_gpu_data()[i]; + dist_sq_.mutable_gpu_data()[i] += margin; + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Loss component calculated from negative part + caffe_sub( +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + i*dim); // reference-negative +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + i*dim, diff_neg.gpu_data() + i*dim, -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + i); ======= - dist_sq_neg.mutable_gpu_data() + i); ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= - dist_sq_neg.mutable_cpu_data() + i); ->>>>>>> GPU version added + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + i*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] = 1 - \ dist_sq_neg.gpu_data()[i] / dist_sq_.mutable_gpu_data()[i]; @@ -145,8 +199,7 @@ void TripletLossLayer::Forward_gpu( loss = loss / static_cast(num_set) / Dtype(2); top[0]->mutable_gpu_data()[0] = loss; } -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -======= +<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 ======= int count = bottom[0]->count(); caffe_gpu_sub( @@ -188,17 +241,15 @@ void TripletLossLayer::Forward_gpu( loss = loss / static_cast(bottom[0]->num()) / Dtype(2); top[0]->mutable_gpu_data()[0] = loss; >>>>>>> GPU version added ->>>>>>> GPU version added +======= +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise } template void TripletLossLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.triplet_loss_param().margin(); -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -======= <<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c ->>>>>>> GPU version added Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); int dim = bottom[0]->count()/bottom[0]->num(); @@ -221,24 +272,38 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bout + ((2 + num_triplets)*j + i)*dim); // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( +======= + caffe_sub( +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); ======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= - dist_sq_neg.mutable_cpu_data() + j); ->>>>>>> GPU version added + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; @@ -282,7 +347,11 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bout + ((2 + num_triplets)*j + i)*dim); // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( +======= + caffe_sub( +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, @@ -290,19 +359,29 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); ======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= - dist_sq_neg.mutable_cpu_data() + j); ->>>>>>> GPU version added + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -326,7 +405,11 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, static_cast(num_set); for (int j = 0; j < num_set; ++j) { Dtype* bout = bottom[0]->mutable_gpu_diff(); +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( +======= + caffe_sub( +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, @@ -334,19 +417,29 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); ======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= - dist_sq_neg.mutable_cpu_data() + j); ->>>>>>> GPU version added + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -358,7 +451,11 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); } else { +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); +======= + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise } } } @@ -379,29 +476,48 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, diff_pos.gpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; dist_sq_.mutable_gpu_data()[j] += margin; // Loss component calculated from negative part caffe_gpu_sub( +======= + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; + dist_sq_.mutable_gpu_data()[j] += margin; + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Loss component calculated from negative part + caffe_sub( +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); ======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= - dist_sq_neg.mutable_cpu_data() + j); ->>>>>>> GPU version added + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -441,29 +557,48 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, diff_pos.gpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; dist_sq_.mutable_gpu_data()[j] += margin; // Loss component calculated from negative part caffe_gpu_sub( +======= + dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; + dist_sq_.mutable_gpu_data()[j] += margin; + // the num_triplets triplet part + for (int triplet = 0; triplet < num_triplets; ++triplet) { + // Loss component calculated from negative part + caffe_sub( +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); ======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= - dist_sq_neg.mutable_cpu_data() + j); ->>>>>>> GPU version added + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -493,24 +628,38 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; dist_sq_.mutable_gpu_data()[j] += margin; // Loss component calculated from negative part +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( +======= + caffe_sub( +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 dist_sq_neg.mutable_cpu_data() + j); ======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= - dist_sq_neg.mutable_cpu_data() + j); ->>>>>>> GPU version added + caffe_gpu_powx( + dim, + diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close + Dtype(2), + diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 + // Loss component calculated from reference and close one + caffe_gpu_gemv( + CblasNoTrans, + num_set, + bottom[0]->channels(), + Dtype(1.0), + diff_sq_neg.gpu_data(), // (reference-pose_close)^2 + summer_vec_.gpu_data(), + Dtype(0.0), + dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -523,10 +672,10 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); } else { +<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -======= ======= +<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 // there must be further check to ensure the gradient calc if (propagate_down[0]) { const Dtype sign = 1; @@ -588,21 +737,28 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, } else { caffe_set(channels, Dtype(0), bout + (j*channels)); >>>>>>> GPU version added ->>>>>>> GPU version added +======= + caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise } } } } -<<<<<<< 0a8521567403409d70ece475762c203e38274530 - } -======= +<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 <<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c +======= +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise } ======= >>>>>>> GPU version added ->>>>>>> GPU version added } -INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer); +#ifdef CPU_ONLY +STUB_GPU(TripletLossLayer); +#endif + +INSTANTIATE_CLASS(TripletLossLayer); +REGISTER_LAYER_CLASS(TripletLoss); } // namespace caffe diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index bd344870d8e..d1a1109f123 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -443,6 +443,8 @@ message ContrastiveLossParameter { message TripletLossParameter { //margin for negative triplet optional float margin = 1 [default = 1.0]; + optional uint32 losstype = 2 [default = 1]; + optional uint32 num_triplets = 3 [default = 3]; } >>>>>>> triplet data generation and network update diff --git a/src/caffe/proto/caffe.proto.orig b/src/caffe/proto/caffe.proto.orig index 75380fc9094..0930486e7f0 100644 --- a/src/caffe/proto/caffe.proto.orig +++ b/src/caffe/proto/caffe.proto.orig @@ -11,11 +11,6 @@ message BlobProto { optional BlobShape shape = 7; repeated float data = 5 [packed = true]; repeated float diff = 6 [packed = true]; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= - repeated double double_data = 8 [packed = true]; - repeated double double_diff = 9 [packed = true]; ->>>>>>> triplet data generation and network update // 4D dimensions -- deprecated. Use "shape" instead. optional int32 num = 1 [default = 0]; @@ -54,7 +49,6 @@ message FillerParameter { // The expected number of non-zero output weights for a given input in // Gaussian filler -- the default -1 means don't perform sparsification. optional int32 sparse = 7 [default = -1]; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 // Normalize the filler variance by fan_in, fan_out, or their average. // Applies to 'xavier' and 'msra' fillers. enum VarianceNorm { @@ -63,8 +57,6 @@ message FillerParameter { AVERAGE = 2; } optional VarianceNorm variance_norm = 8 [default = FAN_IN]; -======= ->>>>>>> triplet data generation and network update } message NetParameter { @@ -104,15 +96,7 @@ message NetParameter { // NOTE // Update the next available ID when you add a new SolverParameter field. // -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 // SolverParameter next available ID: 37 (last added: iter_size) -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -// SolverParameter next available ID: 40 (last added: momentum2) -======= -// SolverParameter next available ID: 36 (last added: clip_gradients) ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update message SolverParameter { ////////////////////////////////////////////////////////////////////////////// // Specifying the train and test networks @@ -165,35 +149,9 @@ message SolverParameter { // Display the loss averaged over the last average_loss iterations optional int32 average_loss = 33 [default = 1]; optional int32 max_iter = 7; // the maximum number of iterations -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - // accumulate gradients over `iter_size` x `batch_size` instances - optional int32 iter_size = 36 [default = 1]; - optional string lr_policy = 8; // The learning rate decay policy. -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 // accumulate gradients over `iter_size` x `batch_size` instances optional int32 iter_size = 36 [default = 1]; - - // The learning rate decay policy. The currently implemented learning rate - // policies are as follows: - // - fixed: always return base_lr. - // - step: return base_lr * gamma ^ (floor(iter / step)) - // - exp: return base_lr * gamma ^ iter - // - inv: return base_lr * (1 + gamma * iter) ^ (- power) - // - multistep: similar to step but it allows non uniform steps defined by - // stepvalue - // - poly: the effective learning rate follows a polynomial decay, to be - // zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) - // - sigmoid: the effective learning rate follows a sigmod decay - // return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) - // - // where base_lr, max_iter, gamma, step, stepvalue and power are defined - // in the solver parameter protocol buffer, and iter is the current iteration. - optional string lr_policy = 8; -======= optional string lr_policy = 8; // The learning rate decay policy. ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update optional float gamma = 9; // The parameter to compute the learning rate. optional float power = 10; // The parameter to compute the learning rate. optional float momentum = 11; // The momentum value. @@ -215,14 +173,6 @@ message SolverParameter { // whether to snapshot diff in the results or not. Snapshotting diff will help // debugging but the final protocol buffer size will be much larger. optional bool snapshot_diff = 16 [default = false]; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= - enum SnapshotFormat { - HDF5 = 0; - BINARYPROTO = 1; - } - optional SnapshotFormat snapshot_format = 37 [default = BINARYPROTO]; ->>>>>>> triplet data generation and network update // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default. enum SolverMode { CPU = 0; @@ -241,26 +191,10 @@ message SolverParameter { SGD = 0; NESTEROV = 1; ADAGRAD = 2; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 } optional SolverType solver_type = 30 [default = SGD]; // numerical stability for AdaGrad optional float delta = 31 [default = 1e-8]; -======= - RMSPROP = 3; - ADADELTA = 4; - ADAM = 5; - } - optional SolverType solver_type = 30 [default = SGD]; - // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam - optional float delta = 31 [default = 1e-8]; - // parameters for the Adam solver - optional float momentum2 = 39 [default = 0.999]; - - // RMSProp decay value - // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t) - optional float rms_decay = 38; ->>>>>>> triplet data generation and network update // If true, print information about the state of the net that may help with // debugging learning problems. @@ -335,19 +269,7 @@ message ParamSpec { // NOTE // Update the next available ID when you add a new LayerParameter field. // -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 90c50a1b3e5527cfa0d92174b79cb05438c5302e // LayerParameter next available layer-specific ID: 137 (last added: reduction_param) -======= -// LayerParameter next available layer-specific ID: 139 (last added: tile_param) ->>>>>>> Add TileLayer -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -// LayerParameter next available layer-specific ID: 139 (last added: tile_param) -======= -// LayerParameter next available layer-specific ID: 132 (last added: prelu_param) ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update message LayerParameter { optional string name = 1; // the layer name optional string type = 2; // the layer type @@ -368,19 +290,10 @@ message LayerParameter { // The blobs containing the numeric parameters of the layer. repeated BlobProto blobs = 7; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> triplet data generation and network update - + // Specifies on which bottoms the backpropagation should be skipped. // The size must be either 0 or equal to the number of bottoms. repeated bool propagate_down = 11; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -======= ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update // Rules controlling whether and when a layer is included in the network, // based on the current NetState. You may specify a non-zero number of rules @@ -412,23 +325,15 @@ message LayerParameter { optional DropoutParameter dropout_param = 108; optional DummyDataParameter dummy_data_param = 109; optional EltwiseParameter eltwise_param = 110; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 optional ExpParameter exp_param = 111; optional FlattenParameter flatten_param = 135; -======= - optional EmbedParameter embed_param = 137; - optional ExpParameter exp_param = 111; ->>>>>>> triplet data generation and network update optional HDF5DataParameter hdf5_data_param = 112; optional HDF5OutputParameter hdf5_output_param = 113; optional HingeLossParameter hinge_loss_param = 114; optional ImageDataParameter image_data_param = 115; optional InfogainLossParameter infogain_loss_param = 116; optional InnerProductParameter inner_product_param = 117; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 optional LogParameter log_param = 134; -======= ->>>>>>> triplet data generation and network update optional LRNParameter lrn_param = 118; optional MemoryDataParameter memory_data_param = 119; optional MVNParameter mvn_param = 120; @@ -436,31 +341,19 @@ message LayerParameter { optional PowerParameter power_param = 122; optional PReLUParameter prelu_param = 131; optional PythonParameter python_param = 130; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 optional ReductionParameter reduction_param = 136; optional ReLUParameter relu_param = 123; optional ReshapeParameter reshape_param = 133; optional SigmoidParameter sigmoid_param = 124; optional SoftmaxParameter softmax_param = 125; optional SPPParameter spp_param = 132; -======= - optional ReLUParameter relu_param = 123; - optional SigmoidParameter sigmoid_param = 124; - optional SoftmaxParameter softmax_param = 125; ->>>>>>> triplet data generation and network update optional SliceParameter slice_param = 126; optional TanHParameter tanh_param = 127; optional ThresholdParameter threshold_param = 128; - optional TileParameter tile_param = 138; optional WindowDataParameter window_data_param = 129; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - optional TripletLossParameter triplet_loss_param = 139; +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da ======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - optional TripletLossParameter triplet_loss_param = 139; -======= - optional TripletLossParameter triplet_loss_param = 132; ->>>>>>> triplet data generation and network update + optional TripletLossParameter triplet_loss_param = 137; >>>>>>> triplet data generation and network update } @@ -481,13 +374,10 @@ message TransformationParameter { // or can be repeated the same number of times as channels // (would subtract them from the corresponding channel) repeated float mean_value = 5; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 // Force the decoded image to have 3 color channels. optional bool force_color = 6 [default = false]; // Force the decoded image to have 1 color channels. optional bool force_gray = 7 [default = false]; -======= ->>>>>>> triplet data generation and network update } // Message that stores parameters shared by loss layers @@ -499,13 +389,9 @@ message LossParameter { optional bool normalize = 2 [default = true]; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 // Messages that store parameters used by individual layer types follow, in // alphabetical order. -======= -// Message that stores parameters used by AccuracyLayer ->>>>>>> triplet data generation and network update message AccuracyParameter { // When computing accuracy, count as correct by comparing the true label to // the top k scoring classes. By default, only compare to the top scoring @@ -523,20 +409,12 @@ message AccuracyParameter { optional int32 ignore_label = 3; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by ArgMaxLayer ->>>>>>> triplet data generation and network update message ArgMaxParameter { // If true produce pairs (argmax, maxval) optional bool out_max_val = 1 [default = false]; optional uint32 top_k = 2 [default = 1]; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by ConcatLayer ->>>>>>> triplet data generation and network update message ConcatParameter { // The axis along which to concatenate -- may be negative to index from the // end (e.g., -1 for the last axis). Other axes must have the @@ -548,43 +426,30 @@ message ConcatParameter { optional uint32 concat_dim = 1 [default = 1]; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 message ContrastiveLossParameter { // margin for dissimilar pair optional float margin = 1 [default = 1.0]; -======= -// Message that stores parameters used by ContrastiveLossLayer -message ContrastiveLossParameter { - //margin for dissimilar pair - optional float margin = 1 [default = 1.0]; -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> triplet data generation and network update // The first implementation of this cost did not exactly match the cost of // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2. // legacy_version = false (the default) uses (margin - d)^2 as proposed in the // Hadsell paper. New models should probably use this version. // legacy_version = true uses (margin - d^2). This is kept to support / // reproduce existing models and results - optional bool legacy_version = 2 [default = false]; + optional bool legacy_version = 2 [default = false]; } +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da +======= message TripletLossParameter { //margin for negative triplet optional float margin = 1 [default = 1.0]; +<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 +======= optional uint32 losstype = 2 [default = 1]; optional uint32 num_triplets = 3 [default = 3]; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise } -======= -======= ->>>>>>> triplet data generation and network update -} -message TripletLossParameter { - //margin for negative triplet - optional float margin = 1 [default = 1.0]; -} -// Message that stores parameters used by ConvolutionLayer >>>>>>> triplet data generation and network update message ConvolutionParameter { optional uint32 num_output = 1; // The number of outputs for the layer @@ -611,10 +476,6 @@ message ConvolutionParameter { optional Engine engine = 15 [default = DEFAULT]; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by DataLayer ->>>>>>> triplet data generation and network update message DataParameter { enum DB { LEVELDB = 0; @@ -628,10 +489,6 @@ message DataParameter { // to avoid all asynchronous sgd clients to start at the same point. The skip // point would be set as rand_skip * rand(0,1). Note that rand_skip should not // be larger than the number of keys in the database. -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= - // DEPRECATED. Each solver accesses a different subset of the database. ->>>>>>> triplet data generation and network update optional uint32 rand_skip = 7 [default = 0]; optional DB backend = 8 [default = LEVELDB]; // DEPRECATED. See TransformationParameter. For data pre-processing, we can do @@ -647,25 +504,12 @@ message DataParameter { optional bool mirror = 6 [default = false]; // Force the encoded image to have 3 color channels optional bool force_encoded_color = 9 [default = false]; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 } -======= - // Prefetch queue (Number of batches to prefetch to host memory, increase if - // data access bandwidth varies). - optional uint32 prefetch = 10 [default = 4]; -} - -// Message that stores parameters used by DropoutLayer ->>>>>>> triplet data generation and network update message DropoutParameter { optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by DummyDataLayer. ->>>>>>> triplet data generation and network update // DummyDataLayer fills any number of arbitrarily shaped blobs with random // (or constant) data generated by "Fillers" (see "message FillerParameter"). message DummyDataParameter { @@ -685,10 +529,6 @@ message DummyDataParameter { repeated uint32 width = 5; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by EltwiseLayer ->>>>>>> triplet data generation and network update message EltwiseParameter { enum EltwiseOp { PROD = 0; @@ -703,27 +543,6 @@ message EltwiseParameter { optional bool stable_prod_grad = 3 [default = true]; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -// Message that stores parameters used by EmbedLayer -message EmbedParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - // The input is given as integers to be interpreted as one-hot - // vector indices with dimension num_input. Hence num_input should be - // 1 greater than the maximum possible input value. - optional uint32 input_dim = 2; - - optional bool bias_term = 3 [default = true]; // Whether to use a bias term - optional FillerParameter weight_filler = 4; // The filler for the weight - optional FillerParameter bias_filler = 5; // The filler for the bias - -} - -======= ->>>>>>> triplet data generation and network update -// Message that stores parameters used by ExpLayer ->>>>>>> triplet data generation and network update message ExpParameter { // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. // Or if base is set to the default (-1), base is set to e, @@ -733,7 +552,6 @@ message ExpParameter { optional float shift = 3 [default = 0.0]; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 /// Message that stores parameters used by FlattenLayer message FlattenParameter { // The first axis to flatten: all preceding axes are retained in the output. @@ -746,8 +564,6 @@ message FlattenParameter { optional int32 end_axis = 2 [default = -1]; } -======= ->>>>>>> triplet data generation and network update // Message that stores parameters used by HDF5DataLayer message HDF5DataParameter { // Specify the data source. @@ -763,10 +579,6 @@ message HDF5DataParameter { optional bool shuffle = 3 [default = false]; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by HDF5OutputLayer ->>>>>>> triplet data generation and network update message HDF5OutputParameter { optional string file_name = 1; } @@ -780,10 +592,6 @@ message HingeLossParameter { optional Norm norm = 1 [default = L1]; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by ImageDataLayer ->>>>>>> triplet data generation and network update message ImageDataParameter { // Specify the data source. optional string source = 1; @@ -815,19 +623,11 @@ message ImageDataParameter { optional string root_folder = 12 [default = ""]; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters InfogainLossLayer ->>>>>>> triplet data generation and network update message InfogainLossParameter { // Specify the infogain matrix source. optional string source = 1; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by InnerProductLayer ->>>>>>> triplet data generation and network update message InnerProductParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms @@ -840,7 +640,6 @@ message InnerProductParameter { optional int32 axis = 5 [default = 1]; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 // Message that stores parameters used by LogLayer message LogParameter { // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. @@ -851,8 +650,6 @@ message LogParameter { optional float shift = 3 [default = 0.0]; } -======= ->>>>>>> triplet data generation and network update // Message that stores parameters used by LRNLayer message LRNParameter { optional uint32 local_size = 1 [default = 5]; @@ -866,10 +663,6 @@ message LRNParameter { optional float k = 5 [default = 1.]; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by MemoryDataLayer ->>>>>>> triplet data generation and network update message MemoryDataParameter { optional uint32 batch_size = 1; optional uint32 channels = 2; @@ -877,27 +670,17 @@ message MemoryDataParameter { optional uint32 width = 4; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by MVNLayer ->>>>>>> triplet data generation and network update message MVNParameter { // This parameter can be set to false to normalize mean only optional bool normalize_variance = 1 [default = true]; // This parameter can be set to true to perform DNN-like MVN optional bool across_channels = 2 [default = false]; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 // Epsilon for not dividing by zero while normalizing variance optional float eps = 3 [default = 1e-9]; } -======= -} - -// Message that stores parameters used by PoolingLayer ->>>>>>> triplet data generation and network update message PoolingParameter { enum PoolMethod { MAX = 0; @@ -927,10 +710,6 @@ message PoolingParameter { optional bool global_pooling = 12 [default = false]; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by PowerLayer ->>>>>>> triplet data generation and network update message PowerParameter { // PowerLayer computes outputs y = (shift + scale * x) ^ power. optional float power = 1 [default = 1.0]; @@ -938,7 +717,6 @@ message PowerParameter { optional float shift = 3 [default = 0.0]; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 message PythonParameter { optional string module = 1; optional string layer = 2; @@ -971,21 +749,6 @@ message ReductionParameter { optional int32 axis = 2 [default = 0]; optional float coeff = 3 [default = 1.0]; // coefficient for output -======= -// Message that stores parameters used by PythonLayer -message PythonParameter { - optional string module = 1; - optional string layer = 2; - // This value is set to the attribute `param_str` of the `PythonLayer` object - // in Python before calling the `setup()` method. This could be a number, - // string, dictionary in Python dict format, JSON, etc. You may parse this - // string in `setup` method and use it in `forward` and `backward`. - optional string param_str = 3 [default = '']; - // Whether this PythonLayer is shared among worker solvers during data parallelism. - // If true, each worker solver sequentially run forward from this layer. - // This value should be set true if you are using it as a data layer. - optional bool share_in_parallel = 4 [default = false]; ->>>>>>> triplet data generation and network update } // Message that stores parameters used by ReLULayer @@ -1004,7 +767,6 @@ message ReLUParameter { optional Engine engine = 2 [default = DEFAULT]; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 message ReshapeParameter { // Specify the output dimensions. If some of the dimensions are set to 0, // the corresponding dimension from the bottom layer is used (unchanged). @@ -1069,9 +831,6 @@ message ReshapeParameter { optional int32 num_axes = 3 [default = -1]; } -======= -// Message that stores parameters used by SigmoidLayer ->>>>>>> triplet data generation and network update message SigmoidParameter { enum Engine { DEFAULT = 0; @@ -1081,10 +840,6 @@ message SigmoidParameter { optional Engine engine = 1 [default = DEFAULT]; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by SliceLayer ->>>>>>> triplet data generation and network update message SliceParameter { // The axis along which to slice -- may be negative to index from the end // (e.g., -1 for the last axis). @@ -1111,10 +866,6 @@ message SoftmaxParameter { optional int32 axis = 2 [default = 1]; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by TanHLayer ->>>>>>> triplet data generation and network update message TanHParameter { enum Engine { DEFAULT = 0; @@ -1124,33 +875,10 @@ message TanHParameter { optional Engine engine = 1 [default = DEFAULT]; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> triplet data generation and network update -// Message that stores parameters used by TileLayer -message TileParameter { - // The index of the axis to tile. - optional int32 axis = 1 [default = 1]; - - // The number of copies (tiles) of the blob to output. - optional int32 tiles = 2; -} - -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -======= ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update -// Message that stores parameters used by ThresholdLayer message ThresholdParameter { optional float threshold = 1 [default = 0]; // Strictly positive values } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by WindowDataLayer ->>>>>>> triplet data generation and network update message WindowDataParameter { // Specify the data source. optional string source = 1; @@ -1184,7 +912,6 @@ message WindowDataParameter { optional string root_folder = 13 [default = ""]; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 message SPPParameter { enum PoolMethod { MAX = 0; @@ -1201,8 +928,6 @@ message SPPParameter { optional Engine engine = 6 [default = DEFAULT]; } -======= ->>>>>>> triplet data generation and network update // DEPRECATED: use LayerParameter. message V1LayerParameter { repeated string bottom = 2; @@ -1249,7 +974,6 @@ message V1LayerParameter { SPLIT = 22; SLICE = 33; TANH = 23; - TRIPLET_LOSS = 40; WINDOW_DATA = 24; THRESHOLD = 31; @@ -1296,7 +1020,6 @@ message V1LayerParameter { optional TransformationParameter transform_param = 36; optional LossParameter loss_param = 42; optional V0LayerParameter layer = 1; - optional TripletLossParameter triplet_loss_param = 43; } // DEPRECATED: V0LayerParameter is the old way of specifying layer parameters @@ -1393,10 +1116,6 @@ message V0LayerParameter { optional HDF5OutputParameter hdf5_output_param = 1001; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by PReLULayer ->>>>>>> triplet data generation and network update message PReLUParameter { // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: // Surpassing Human-Level Performance on ImageNet Classification, 2015. diff --git a/src/caffe/test/test_triplet_loss_layer~5308d9998ae0b1f97b7b99b33fac968421447f3a b/src/caffe/test/test_triplet_loss_layer~5308d9998ae0b1f97b7b99b33fac968421447f3a deleted file mode 100644 index 6c25ce9bd4b..00000000000 --- a/src/caffe/test/test_triplet_loss_layer~5308d9998ae0b1f97b7b99b33fac968421447f3a +++ /dev/null @@ -1,125 +0,0 @@ -#include -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -#include "caffe/filler.hpp" -#include "caffe/vision_layers.hpp" - -#include "caffe/test/test_caffe_main.hpp" -#include "caffe/test/test_gradient_check_util.hpp" - -namespace caffe { - -template -class TripletLossLayerTest : public MultiDeviceTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - TripletLossLayerTest() - : blob_bottom_data_(new Blob(50, 1, 1, 1)), - blob_bottom_y_(new Blob(50, 1, 1, 1)), - blob_top_loss_(new Blob()) { - // fill the values - FillerParameter filler_param; - filler_param.set_min(-1.0); - filler_param.set_max(1.0); // distances~=1.0 to test both sides of margin - UniformFiller filler(filler_param); - filler.Fill(this->blob_bottom_data_); - blob_bottom_vec_.push_back(blob_bottom_data_); - for (int i = 0; i < blob_bottom_y_->count(); ++i) { - blob_bottom_y_->mutable_cpu_data()[i] = caffe_rng_rand() % 2; // 0 or 1 - } - blob_bottom_vec_.push_back(blob_bottom_y_); - blob_top_vec_.push_back(blob_top_loss_); - } - virtual ~TripletLossLayerTest() { - delete blob_bottom_data_; - delete blob_bottom_y_; - delete blob_top_loss_; - } - - Blob* const blob_bottom_data_; - Blob* const blob_bottom_y_; - Blob* const blob_top_loss_; - vector*> blob_bottom_vec_; - vector*> blob_top_vec_; -}; - -TYPED_TEST_CASE(TripletLossLayerTest, TestDtypesAndDevices); - -TYPED_TEST(TripletLossLayerTest, TestForward) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // manually compute to compare - const Dtype margin = layer_param.triplet_loss_param().margin(); - const Dtype losstype = 0;//layer_param.triplet_loss_param().losstype(); - const int num_triplets = 3; - const int num_set = this->blob_bottom_data_->num()/(2 + num_triplets); - const int channels = this->blob_bottom_data_->channels(); - Dtype loss(0); - if (losstype == 0) { - for (int i = 0; i < num_set; ++i) { - Dtype dist_par(0); - for (int j = 0; j < channels; ++j) { - Dtype diff_pos = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - - this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+1)*channels+j]; - dist_par = diff_pos*diff_pos; - loss += dist_par; - } - for (int triplet = 0; triplet < num_triplets; ++triplet) { - Dtype dist_sq(0); - for (int j = 0; j < channels; ++j) { - Dtype diff_pos = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - - this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+1)*channels+j]; - dist_sq += diff_pos*diff_pos; - Dtype diff_neg = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - - this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+2+triplet)*channels+j]; - dist_sq -= diff_neg*diff_neg; - } - loss += std::max(margin + dist_sq, Dtype(0.0)); - } - } - } /*else { - for (int i = 0; i < num; ++i) { - Dtype dist_sq(0); - Dtype dist_par(0); - for (int j = 0; j < channels; ++j) { - Dtype diff_pos = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; - dist_sq += diff_pos*diff_pos; - dist_sq += margin; - Dtype diff_neg = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_k_->cpu_data()[i*channels+j]; - dist_sq = 1 - diff_neg*diff_neg/dist_sq; - Dtype diff_par = this->blob_bottom_data_l_->cpu_data()[i*channels+j] - - this->blob_bottom_data_m_->cpu_data()[i*channels+j]; - dist_par = diff_par*diff_par; - } - loss += std::max(dist_sq, Dtype(0.0)); - loss += dist_par; - } - }*/ - loss /= static_cast(num_set) * Dtype(2); - EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); -} - -TYPED_TEST(TripletLossLayerTest, TestGradient) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - GradientChecker checker(1e-2, 1e-2, 1701); - // check the gradient for the first 5 bottom layers - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 0); -} -} // namespace caffe From 2a560f5e35d1363e45c5a14f6ecb1d191fc9b099 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Mon, 10 Aug 2015 19:54:16 +0800 Subject: [PATCH 77/82] add initiate class name of triplet loss layer --- python/caffe/test/test_net_spec.py.orig | 88 + python/caffe/test/test_python_layer.py.orig | 8 +- src/caffe/layers/image_data_layer.cpp | 3 + src/caffe/layers/triplet_loss_layer.cu.orig | 271 +- src/caffe/proto/caffe.proto | 51 +- src/caffe/proto/caffe.proto.orig | 64 +- src/caffe/proto/caffe.proto.orig.orig | 456 +--- src/caffe/test/test_net.cpp | 73 +- src/caffe/test/test_net.cpp.orig | 124 +- src/caffe/test/test_net.cpp.orig.orig | 2508 +++++++++++++++++++ 10 files changed, 2873 insertions(+), 773 deletions(-) create mode 100644 python/caffe/test/test_net_spec.py.orig create mode 100644 src/caffe/test/test_net.cpp.orig.orig diff --git a/python/caffe/test/test_net_spec.py.orig b/python/caffe/test/test_net_spec.py.orig new file mode 100644 index 00000000000..84d50797e02 --- /dev/null +++ b/python/caffe/test/test_net_spec.py.orig @@ -0,0 +1,88 @@ +import unittest +import tempfile +import caffe +from caffe import layers as L +from caffe import params as P + +def lenet(batch_size): + n = caffe.NetSpec() + n.data, n.label = L.DummyData(shape=[dict(dim=[batch_size, 1, 28, 28]), + dict(dim=[batch_size, 1, 1, 1])], + transform_param=dict(scale=1./255), ntop=2) + n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, + weight_filler=dict(type='xavier')) + n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX) + n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, + weight_filler=dict(type='xavier')) + n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX) + n.ip1 = L.InnerProduct(n.pool2, num_output=500, + weight_filler=dict(type='xavier')) + n.relu1 = L.ReLU(n.ip1, in_place=True) + n.ip2 = L.InnerProduct(n.relu1, num_output=10, + weight_filler=dict(type='xavier')) + n.loss = L.SoftmaxWithLoss(n.ip2, n.label) + return n.to_proto() + +def anon_lenet(batch_size): + data, label = L.DummyData(shape=[dict(dim=[batch_size, 1, 28, 28]), + dict(dim=[batch_size, 1, 1, 1])], + transform_param=dict(scale=1./255), ntop=2) + conv1 = L.Convolution(data, kernel_size=5, num_output=20, + weight_filler=dict(type='xavier')) + pool1 = L.Pooling(conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX) + conv2 = L.Convolution(pool1, kernel_size=5, num_output=50, + weight_filler=dict(type='xavier')) + pool2 = L.Pooling(conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX) + ip1 = L.InnerProduct(pool2, num_output=500, + weight_filler=dict(type='xavier')) + relu1 = L.ReLU(ip1, in_place=True) + ip2 = L.InnerProduct(relu1, num_output=10, + weight_filler=dict(type='xavier')) + loss = L.SoftmaxWithLoss(ip2, label) + return loss.to_proto() + +def silent_net(): + n = caffe.NetSpec() +<<<<<<< b30868495fbae44b9556c621a319178d919bf562:python/caffe/test/test_net_spec.py +======= +<<<<<<< 273512afc2a5d2c56027b96ffeda45809d92328e + n.data, n.data2 = L.DummyData(shape=dict(dim=3), ntop=2) +======= +>>>>>>> add initiate class name of triplet loss layer:python/caffe/test/test_net_spec.py.orig + n.data, n.data2 = L.DummyData(shape=[dict(dim=[3]), dict(dim=[4, 2])], + ntop=2) + n.silence_data = L.Silence(n.data, ntop=0) + n.silence_data2 = L.Silence(n.data2, ntop=0) + return n.to_proto() + +class TestNetSpec(unittest.TestCase): + def load_net(self, net_proto): + f = tempfile.NamedTemporaryFile(mode='w+', delete=False) + f.write(str(net_proto)) + f.close() + return caffe.Net(f.name, caffe.TEST) + + def test_lenet(self): + """Construct and build the Caffe version of LeNet.""" + + net_proto = lenet(50) + # check that relu is in-place + self.assertEqual(net_proto.layer[6].bottom, + net_proto.layer[6].top) + net = self.load_net(net_proto) + # check that all layers are present + self.assertEqual(len(net.layers), 9) + + # now the check the version with automatically-generated layer names + net_proto = anon_lenet(50) + self.assertEqual(net_proto.layer[6].bottom, + net_proto.layer[6].top) + net = self.load_net(net_proto) + self.assertEqual(len(net.layers), 9) + + def test_zero_tops(self): + """Test net construction for top-less layers.""" + + net_proto = silent_net() + net = self.load_net(net_proto) + self.assertEqual(len(net.forward()), 0) diff --git a/python/caffe/test/test_python_layer.py.orig b/python/caffe/test/test_python_layer.py.orig index a244d3da753..722539428e1 100644 --- a/python/caffe/test/test_python_layer.py.orig +++ b/python/caffe/test/test_python_layer.py.orig @@ -28,7 +28,7 @@ class ExceptionLayer(caffe.Layer): def setup(self, bottom, top): raise RuntimeError -<<<<<<< bdae411e81e13783dc650836abdcf1e9a2cd5178 +<<<<<<< 0edd44bede38fe9ded0a0f91ed29fbdab54557a5 class ParameterLayer(caffe.Layer): """A layer that just multiplies by ten""" @@ -61,7 +61,7 @@ def python_net_file(): def exception_net_file(): -<<<<<<< bdae411e81e13783dc650836abdcf1e9a2cd5178 +<<<<<<< 0edd44bede38fe9ded0a0f91ed29fbdab54557a5 with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f: ======= with tempfile.NamedTemporaryFile(delete=False) as f: @@ -74,7 +74,7 @@ def exception_net_file(): return f.name -<<<<<<< bdae411e81e13783dc650836abdcf1e9a2cd5178 +<<<<<<< 0edd44bede38fe9ded0a0f91ed29fbdab54557a5 def parameter_net_file(): with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f: f.write("""name: 'pythonnet' force_backward: true @@ -119,7 +119,7 @@ class TestPythonLayer(unittest.TestCase): net_file = exception_net_file() self.assertRaises(RuntimeError, caffe.Net, net_file, caffe.TEST) os.remove(net_file) -<<<<<<< bdae411e81e13783dc650836abdcf1e9a2cd5178 +<<<<<<< 0edd44bede38fe9ded0a0f91ed29fbdab54557a5 def test_parameter(self): net_file = parameter_net_file() diff --git a/src/caffe/layers/image_data_layer.cpp b/src/caffe/layers/image_data_layer.cpp index 832597496b3..fa3a66a5133 100644 --- a/src/caffe/layers/image_data_layer.cpp +++ b/src/caffe/layers/image_data_layer.cpp @@ -63,11 +63,13 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, // Read an image, and use it to initialize the top blob. cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, new_height, new_width, is_color); + CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first; // Use data_transformer to infer the expected blob shape from a cv_image. vector top_shape = this->data_transformer_->InferBlobShape(cv_img); this->transformed_data_.Reshape(top_shape); // Reshape prefetch_data and top[0] according to the batch_size. const int batch_size = this->layer_param_.image_data_param().batch_size(); + CHECK_GT(batch_size, 0) << "Positive batch size required"; top_shape[0] = batch_size; this->prefetch_data_.Reshape(top_shape); top[0]->ReshapeLike(this->prefetch_data_); @@ -109,6 +111,7 @@ void ImageDataLayer::InternalThreadEntry() { // on single input batches allows for inputs of varying dimension. cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, new_height, new_width, is_color); + CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first; // Use data_transformer to infer the expected blob shape from a cv_img. vector top_shape = this->data_transformer_->InferBlobShape(cv_img); this->transformed_data_.Reshape(top_shape); diff --git a/src/caffe/layers/triplet_loss_layer.cu.orig b/src/caffe/layers/triplet_loss_layer.cu.orig index 21695217f7a..32dc48aafc2 100644 --- a/src/caffe/layers/triplet_loss_layer.cu.orig +++ b/src/caffe/layers/triplet_loss_layer.cu.orig @@ -21,38 +21,20 @@ void TripletLossLayer::Forward_gpu( int num_set = bottom[0]->num()/(2 + num_triplets); if (losstype == 0) { for (int i = 0; i < num_set; ++i) { -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( -======= - caffe_sub( ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_pos.gpu_data() + i*dim, diff_pos.gpu_data() + i*dim, +<<<<<<< 362a69f0c62096aa4083a981cab52469e40874d0 dist_sq_pos.mutable_cpu_data() + i); ======= - caffe_gpu_powx( - dim, - diff_pos.mutable_gpu_data() + i*dim, // reference-pose_close - Dtype(2), - diff_sq_pos.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_pos.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_pos.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise + dist_sq_pos.mutable_gpu_data() + i); +>>>>>>> add initiate class name of triplet loss layer // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.gpu_data()[i]; @@ -61,38 +43,20 @@ void TripletLossLayer::Forward_gpu( // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; // Loss component calculated from negative part -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( -======= - caffe_sub( ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + i*dim); // reference-negative -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + i*dim, diff_neg.gpu_data() + i*dim, +<<<<<<< 362a69f0c62096aa4083a981cab52469e40874d0 dist_sq_neg.mutable_cpu_data() + i); ======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + i*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise + dist_sq_neg.mutable_gpu_data() + i); +>>>>>>> add initiate class name of triplet loss layer // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; // loss accumulated accumulated by the triplet part @@ -103,22 +67,21 @@ void TripletLossLayer::Forward_gpu( top[0]->mutable_gpu_data()[0] = loss; } else { for (int i = 0; i < num_set; ++i) { -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( -======= - caffe_sub( ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close // Loss component calculated from reference and close one -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_pos.gpu_data() + i*dim, diff_pos.gpu_data() + i*dim, +<<<<<<< 362a69f0c62096aa4083a981cab52469e40874d0 dist_sq_pos.mutable_cpu_data() + i); +======= + dist_sq_pos.mutable_gpu_data() + i); +>>>>>>> add initiate class name of triplet loss layer // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.gpu_data()[i]; @@ -127,58 +90,19 @@ void TripletLossLayer::Forward_gpu( dist_sq_.mutable_gpu_data()[i] += margin; // Loss component calculated from negative part caffe_gpu_sub( -======= - caffe_gpu_powx( - dim, - diff_pos.mutable_gpu_data() + i*dim, // reference-pose_close - Dtype(2), - diff_sq_pos.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_pos.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_pos.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.gpu_data()[i]; - dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.mutable_gpu_data()[i]; - dist_sq_.mutable_gpu_data()[i] += margin; - for (int triplet = 0; triplet < num_triplets; ++triplet) { - // Loss component calculated from negative part - caffe_sub( ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + i*dim); // reference-negative -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + i*dim, diff_neg.gpu_data() + i*dim, +<<<<<<< 362a69f0c62096aa4083a981cab52469e40874d0 dist_sq_neg.mutable_cpu_data() + i); ======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + i*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise + dist_sq_neg.mutable_gpu_data() + i); +>>>>>>> add initiate class name of triplet loss layer // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] = 1 - \ dist_sq_neg.gpu_data()[i] / dist_sq_.mutable_gpu_data()[i]; @@ -217,38 +141,20 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bout + ((2 + num_triplets)*j + i)*dim); // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( -======= - caffe_sub( ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, +<<<<<<< 362a69f0c62096aa4083a981cab52469e40874d0 dist_sq_neg.mutable_cpu_data() + j); ======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> add initiate class name of triplet loss layer // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; @@ -292,11 +198,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, bout + ((2 + num_triplets)*j + i)*dim); // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( -======= - caffe_sub( ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, @@ -304,29 +206,15 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, +<<<<<<< 362a69f0c62096aa4083a981cab52469e40874d0 dist_sq_neg.mutable_cpu_data() + j); ======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> add initiate class name of triplet loss layer // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -350,11 +238,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, static_cast(num_set); for (int j = 0; j < num_set; ++j) { Dtype* bout = bottom[0]->mutable_gpu_diff(); -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( -======= - caffe_sub( ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, @@ -362,29 +246,15 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, +<<<<<<< 362a69f0c62096aa4083a981cab52469e40874d0 dist_sq_neg.mutable_cpu_data() + j); ======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> add initiate class name of triplet loss layer // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -396,11 +266,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); } else { -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); -======= - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise } } } @@ -421,48 +287,25 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, diff_pos.gpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; dist_sq_.mutable_gpu_data()[j] += margin; // Loss component calculated from negative part caffe_gpu_sub( -======= - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; - dist_sq_.mutable_gpu_data()[j] += margin; - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - // Loss component calculated from negative part - caffe_sub( ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, +<<<<<<< 362a69f0c62096aa4083a981cab52469e40874d0 dist_sq_neg.mutable_cpu_data() + j); ======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> add initiate class name of triplet loss layer // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -502,48 +345,25 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, diff_pos.gpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; dist_sq_.mutable_gpu_data()[j] += margin; // Loss component calculated from negative part caffe_gpu_sub( -======= - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; - dist_sq_.mutable_gpu_data()[j] += margin; - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - // Loss component calculated from negative part - caffe_sub( ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, +<<<<<<< 362a69f0c62096aa4083a981cab52469e40874d0 dist_sq_neg.mutable_cpu_data() + j); ======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> add initiate class name of triplet loss layer // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -573,38 +393,20 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; dist_sq_.mutable_gpu_data()[j] += margin; // Loss component calculated from negative part -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_sub( -======= - caffe_sub( ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise dim, bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, diff_neg.mutable_gpu_data() + j*dim); // reference-negative -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_dot( dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, +<<<<<<< 362a69f0c62096aa4083a981cab52469e40874d0 dist_sq_neg.mutable_cpu_data() + j); ======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise + dist_sq_neg.mutable_gpu_data() + j); +>>>>>>> add initiate class name of triplet loss layer // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -617,11 +419,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); } else { -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); -======= - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise } } } @@ -629,11 +427,6 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, } } -#ifdef CPU_ONLY -STUB_GPU(TripletLossLayer); -#endif - -INSTANTIATE_CLASS(TripletLossLayer); -REGISTER_LAYER_CLASS(TripletLoss); +INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer); } // namespace caffe diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index d1a1109f123..4a128eba8f9 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -96,7 +96,7 @@ message NetParameter { // NOTE // Update the next available ID when you add a new SolverParameter field. // -// SolverParameter next available ID: 37 (last added: iter_size) +// SolverParameter next available ID: 40 (last added: momentum2) message SolverParameter { ////////////////////////////////////////////////////////////////////////////// // Specifying the train and test networks @@ -151,7 +151,23 @@ message SolverParameter { optional int32 max_iter = 7; // the maximum number of iterations // accumulate gradients over `iter_size` x `batch_size` instances optional int32 iter_size = 36 [default = 1]; - optional string lr_policy = 8; // The learning rate decay policy. + + // The learning rate decay policy. The currently implemented learning rate + // policies are as follows: + // - fixed: always return base_lr. + // - step: return base_lr * gamma ^ (floor(iter / step)) + // - exp: return base_lr * gamma ^ iter + // - inv: return base_lr * (1 + gamma * iter) ^ (- power) + // - multistep: similar to step but it allows non uniform steps defined by + // stepvalue + // - poly: the effective learning rate follows a polynomial decay, to be + // zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) + // - sigmoid: the effective learning rate follows a sigmod decay + // return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) + // + // where base_lr, max_iter, gamma, step, stepvalue and power are defined + // in the solver parameter protocol buffer, and iter is the current iteration. + optional string lr_policy = 8; optional float gamma = 9; // The parameter to compute the learning rate. optional float power = 10; // The parameter to compute the learning rate. optional float momentum = 11; // The momentum value. @@ -269,7 +285,7 @@ message ParamSpec { // NOTE // Update the next available ID when you add a new LayerParameter field. // -// LayerParameter next available layer-specific ID: 137 (last added: reduction_param) +// LayerParameter next available layer-specific ID: 139 (last added: tile_param) message LayerParameter { optional string name = 1; // the layer name optional string type = 2; // the layer type @@ -290,7 +306,7 @@ message LayerParameter { // The blobs containing the numeric parameters of the layer. repeated BlobProto blobs = 7; - + // Specifies on which bottoms the backpropagation should be skipped. // The size must be either 0 or equal to the number of bottoms. repeated bool propagate_down = 11; @@ -435,7 +451,7 @@ message ContrastiveLossParameter { // Hadsell paper. New models should probably use this version. // legacy_version = true uses (margin - d^2). This is kept to support / // reproduce existing models and results - optional bool legacy_version = 2 [default = false]; + optional bool legacy_version = 2 [default = false]; } <<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da @@ -540,6 +556,21 @@ message EltwiseParameter { optional bool stable_prod_grad = 3 [default = true]; } +// Message that stores parameters used by EmbedLayer +message EmbedParameter { + optional uint32 num_output = 1; // The number of outputs for the layer + // The input is given as integers to be interpreted as one-hot + // vector indices with dimension num_input. Hence num_input should be + // 1 greater than the maximum possible input value. + optional uint32 input_dim = 2; + + optional bool bias_term = 3 [default = true]; // Whether to use a bias term + optional FillerParameter weight_filler = 4; // The filler for the weight + optional FillerParameter bias_filler = 5; // The filler for the bias + +} + +// Message that stores parameters used by ExpLayer message ExpParameter { // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. // Or if base is set to the default (-1), base is set to e, @@ -872,6 +903,16 @@ message TanHParameter { optional Engine engine = 1 [default = DEFAULT]; } +// Message that stores parameters used by TileLayer +message TileParameter { + // The index of the axis to tile. + optional int32 axis = 1 [default = 1]; + + // The number of copies (tiles) of the blob to output. + optional int32 tiles = 2; +} + +// Message that stores parameters used by ThresholdLayer message ThresholdParameter { optional float threshold = 1 [default = 0]; // Strictly positive values } diff --git a/src/caffe/proto/caffe.proto.orig b/src/caffe/proto/caffe.proto.orig index 0930486e7f0..c6021592c33 100644 --- a/src/caffe/proto/caffe.proto.orig +++ b/src/caffe/proto/caffe.proto.orig @@ -11,6 +11,8 @@ message BlobProto { optional BlobShape shape = 7; repeated float data = 5 [packed = true]; repeated float diff = 6 [packed = true]; + repeated double double_data = 8 [packed = true]; + repeated double double_diff = 9 [packed = true]; // 4D dimensions -- deprecated. Use "shape" instead. optional int32 num = 1 [default = 0]; @@ -173,6 +175,11 @@ message SolverParameter { // whether to snapshot diff in the results or not. Snapshotting diff will help // debugging but the final protocol buffer size will be much larger. optional bool snapshot_diff = 16 [default = false]; + enum SnapshotFormat { + HDF5 = 0; + BINARYPROTO = 1; + } + optional SnapshotFormat snapshot_format = 37 [default = BINARYPROTO]; // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default. enum SolverMode { CPU = 0; @@ -191,10 +198,19 @@ message SolverParameter { SGD = 0; NESTEROV = 1; ADAGRAD = 2; + RMSPROP = 3; + ADADELTA = 4; + ADAM = 5; } optional SolverType solver_type = 30 [default = SGD]; - // numerical stability for AdaGrad + // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam optional float delta = 31 [default = 1e-8]; + // parameters for the Adam solver + optional float momentum2 = 39 [default = 0.999]; + + // RMSProp decay value + // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t) + optional float rms_decay = 38; // If true, print information about the state of the net that may help with // debugging learning problems. @@ -269,7 +285,11 @@ message ParamSpec { // NOTE // Update the next available ID when you add a new LayerParameter field. // +<<<<<<< 7a85de9cf8ac6f0416eca95c1b991f127b8b7917 // LayerParameter next available layer-specific ID: 137 (last added: reduction_param) +======= +// LayerParameter next available layer-specific ID: 139 (last added: tile_param) +>>>>>>> Add TileLayer message LayerParameter { optional string name = 1; // the layer name optional string type = 2; // the layer type @@ -290,7 +310,7 @@ message LayerParameter { // The blobs containing the numeric parameters of the layer. repeated BlobProto blobs = 7; - + // Specifies on which bottoms the backpropagation should be skipped. // The size must be either 0 or equal to the number of bottoms. repeated bool propagate_down = 11; @@ -325,6 +345,7 @@ message LayerParameter { optional DropoutParameter dropout_param = 108; optional DummyDataParameter dummy_data_param = 109; optional EltwiseParameter eltwise_param = 110; + optional EmbedParameter embed_param = 137; optional ExpParameter exp_param = 111; optional FlattenParameter flatten_param = 135; optional HDF5DataParameter hdf5_data_param = 112; @@ -350,11 +371,9 @@ message LayerParameter { optional SliceParameter slice_param = 126; optional TanHParameter tanh_param = 127; optional ThresholdParameter threshold_param = 128; + optional TileParameter tile_param = 138; optional WindowDataParameter window_data_param = 129; -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da -======= optional TripletLossParameter triplet_loss_param = 137; ->>>>>>> triplet data generation and network update } // Message that stores parameters used to apply transformation @@ -435,22 +454,16 @@ message ContrastiveLossParameter { // Hadsell paper. New models should probably use this version. // legacy_version = true uses (margin - d^2). This is kept to support / // reproduce existing models and results - optional bool legacy_version = 2 [default = false]; + optional bool legacy_version = 2 [default = false]; } -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da -======= message TripletLossParameter { //margin for negative triplet optional float margin = 1 [default = 1.0]; -<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 -======= optional uint32 losstype = 2 [default = 1]; optional uint32 num_triplets = 3 [default = 3]; ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise } ->>>>>>> triplet data generation and network update message ConvolutionParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms @@ -484,11 +497,12 @@ message DataParameter { // Specify the data source. optional string source = 1; // Specify the batch size. - optional uint32 batch_size = 4; + optional uint32 batch_size = 4 [default = 1]; // The rand_skip variable is for the data layer to skip a few data points // to avoid all asynchronous sgd clients to start at the same point. The skip // point would be set as rand_skip * rand(0,1). Note that rand_skip should not // be larger than the number of keys in the database. + // DEPRECATED. Each solver accesses a different subset of the database. optional uint32 rand_skip = 7 [default = 0]; optional DB backend = 8 [default = LEVELDB]; // DEPRECATED. See TransformationParameter. For data pre-processing, we can do @@ -504,6 +518,9 @@ message DataParameter { optional bool mirror = 6 [default = false]; // Force the encoded image to have 3 color channels optional bool force_encoded_color = 9 [default = false]; + // Prefetch queue (Number of batches to prefetch to host memory, increase if + // data access bandwidth varies). + optional uint32 prefetch = 10 [default = 4]; } message DropoutParameter { @@ -720,6 +737,15 @@ message PowerParameter { message PythonParameter { optional string module = 1; optional string layer = 2; + // This value is set to the attribute `param_str` of the `PythonLayer` object + // in Python before calling the `setup()` method. This could be a number, + // string, dictionary in Python dict format, JSON, etc. You may parse this + // string in `setup` method and use it in `forward` and `backward`. + optional string param_str = 3 [default = '']; + // Whether this PythonLayer is shared among worker solvers during data parallelism. + // If true, each worker solver sequentially run forward from this layer. + // This value should be set true if you are using it as a data layer. + optional bool share_in_parallel = 4 [default = false]; } // Message that stores parameters used by ReductionLayer @@ -875,6 +901,16 @@ message TanHParameter { optional Engine engine = 1 [default = DEFAULT]; } +// Message that stores parameters used by TileLayer +message TileParameter { + // The index of the axis to tile. + optional int32 axis = 1 [default = 1]; + + // The number of copies (tiles) of the blob to output. + optional int32 tiles = 2; +} + +// Message that stores parameters used by ThresholdLayer message ThresholdParameter { optional float threshold = 1 [default = 0]; // Strictly positive values } @@ -974,6 +1010,7 @@ message V1LayerParameter { SPLIT = 22; SLICE = 33; TANH = 23; + TRIPLET_LOSS = 40; WINDOW_DATA = 24; THRESHOLD = 31; @@ -1020,6 +1057,7 @@ message V1LayerParameter { optional TransformationParameter transform_param = 36; optional LossParameter loss_param = 42; optional V0LayerParameter layer = 1; + optional TripletLossParameter triplet_loss_param = 43; } // DEPRECATED: V0LayerParameter is the old way of specifying layer parameters diff --git a/src/caffe/proto/caffe.proto.orig.orig b/src/caffe/proto/caffe.proto.orig.orig index 9bb8666a766..3a0a3490bd1 100644 --- a/src/caffe/proto/caffe.proto.orig.orig +++ b/src/caffe/proto/caffe.proto.orig.orig @@ -11,11 +11,6 @@ message BlobProto { optional BlobShape shape = 7; repeated float data = 5 [packed = true]; repeated float diff = 6 [packed = true]; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= - repeated double double_data = 8 [packed = true]; - repeated double double_diff = 9 [packed = true]; ->>>>>>> triplet data generation and network update // 4D dimensions -- deprecated. Use "shape" instead. optional int32 num = 1 [default = 0]; @@ -54,10 +49,6 @@ message FillerParameter { // The expected number of non-zero output weights for a given input in // Gaussian filler -- the default -1 means don't perform sparsification. optional int32 sparse = 7 [default = -1]; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig // Normalize the filler variance by fan_in, fan_out, or their average. // Applies to 'xavier' and 'msra' fillers. enum VarianceNorm { @@ -66,11 +57,6 @@ message FillerParameter { AVERAGE = 2; } optional VarianceNorm variance_norm = 8 [default = FAN_IN]; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -======= ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig } message NetParameter { @@ -110,19 +96,7 @@ message NetParameter { // NOTE // Update the next available ID when you add a new SolverParameter field. // -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -// SolverParameter next available ID: 40 (last added: momentum2) -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 // SolverParameter next available ID: 37 (last added: iter_size) -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -// SolverParameter next available ID: 40 (last added: momentum2) -======= -// SolverParameter next available ID: 36 (last added: clip_gradients) ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message SolverParameter { ////////////////////////////////////////////////////////////////////////////// // Specifying the train and test networks @@ -175,41 +149,9 @@ message SolverParameter { // Display the loss averaged over the last average_loss iterations optional int32 average_loss = 33 [default = 1]; optional int32 max_iter = 7; // the maximum number of iterations -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 // accumulate gradients over `iter_size` x `batch_size` instances optional int32 iter_size = 36 [default = 1]; optional string lr_policy = 8; // The learning rate decay policy. -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig - // accumulate gradients over `iter_size` x `batch_size` instances - optional int32 iter_size = 36 [default = 1]; - - // The learning rate decay policy. The currently implemented learning rate - // policies are as follows: - // - fixed: always return base_lr. - // - step: return base_lr * gamma ^ (floor(iter / step)) - // - exp: return base_lr * gamma ^ iter - // - inv: return base_lr * (1 + gamma * iter) ^ (- power) - // - multistep: similar to step but it allows non uniform steps defined by - // stepvalue - // - poly: the effective learning rate follows a polynomial decay, to be - // zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) - // - sigmoid: the effective learning rate follows a sigmod decay - // return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) - // - // where base_lr, max_iter, gamma, step, stepvalue and power are defined - // in the solver parameter protocol buffer, and iter is the current iteration. - optional string lr_policy = 8; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -======= - optional string lr_policy = 8; // The learning rate decay policy. ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig optional float gamma = 9; // The parameter to compute the learning rate. optional float power = 10; // The parameter to compute the learning rate. optional float momentum = 11; // The momentum value. @@ -231,14 +173,6 @@ message SolverParameter { // whether to snapshot diff in the results or not. Snapshotting diff will help // debugging but the final protocol buffer size will be much larger. optional bool snapshot_diff = 16 [default = false]; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= - enum SnapshotFormat { - HDF5 = 0; - BINARYPROTO = 1; - } - optional SnapshotFormat snapshot_format = 37 [default = BINARYPROTO]; ->>>>>>> triplet data generation and network update // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default. enum SolverMode { CPU = 0; @@ -257,26 +191,10 @@ message SolverParameter { SGD = 0; NESTEROV = 1; ADAGRAD = 2; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 } optional SolverType solver_type = 30 [default = SGD]; // numerical stability for AdaGrad optional float delta = 31 [default = 1e-8]; -======= - RMSPROP = 3; - ADADELTA = 4; - ADAM = 5; - } - optional SolverType solver_type = 30 [default = SGD]; - // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam - optional float delta = 31 [default = 1e-8]; - // parameters for the Adam solver - optional float momentum2 = 39 [default = 0.999]; - - // RMSProp decay value - // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t) - optional float rms_decay = 38; ->>>>>>> triplet data generation and network update // If true, print information about the state of the net that may help with // debugging learning problems. @@ -351,18 +269,15 @@ message ParamSpec { // NOTE // Update the next available ID when you add a new LayerParameter field. // -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig +<<<<<<< b30868495fbae44b9556c621a319178d919bf562 +// LayerParameter next available layer-specific ID: 137 (last added: reduction_param) ======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 90c50a1b3e5527cfa0d92174b79cb05438c5302e +<<<<<<< 7a85de9cf8ac6f0416eca95c1b991f127b8b7917 // LayerParameter next available layer-specific ID: 137 (last added: reduction_param) ======= // LayerParameter next available layer-specific ID: 139 (last added: tile_param) >>>>>>> Add TileLayer -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig -// LayerParameter next available layer-specific ID: 139 (last added: tile_param) +>>>>>>> add initiate class name of triplet loss layer message LayerParameter { optional string name = 1; // the layer name optional string type = 2; // the layer type @@ -383,25 +298,14 @@ message LayerParameter { // The blobs containing the numeric parameters of the layer. repeated BlobProto blobs = 7; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< b30868495fbae44b9556c621a319178d919bf562 + ======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +>>>>>>> add initiate class name of triplet loss layer // Specifies on which bottoms the backpropagation should be skipped. // The size must be either 0 or equal to the number of bottoms. repeated bool propagate_down = 11; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -======= ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig // Rules controlling whether and when a layer is included in the network, // based on the current NetState. You may specify a non-zero number of rules @@ -433,31 +337,15 @@ message LayerParameter { optional DropoutParameter dropout_param = 108; optional DummyDataParameter dummy_data_param = 109; optional EltwiseParameter eltwise_param = 110; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - optional ExpParameter exp_param = 111; - optional FlattenParameter flatten_param = 135; -======= - optional EmbedParameter embed_param = 137; optional ExpParameter exp_param = 111; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig optional FlattenParameter flatten_param = 135; -======= ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig optional HDF5DataParameter hdf5_data_param = 112; optional HDF5OutputParameter hdf5_output_param = 113; optional HingeLossParameter hinge_loss_param = 114; optional ImageDataParameter image_data_param = 115; optional InfogainLossParameter infogain_loss_param = 116; optional InnerProductParameter inner_product_param = 117; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig - optional LogParameter log_param = 134; -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 optional LogParameter log_param = 134; -======= ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig optional LRNParameter lrn_param = 118; optional MemoryDataParameter memory_data_param = 119; optional MVNParameter mvn_param = 120; @@ -465,45 +353,24 @@ message LayerParameter { optional PowerParameter power_param = 122; optional PReLUParameter prelu_param = 131; optional PythonParameter python_param = 130; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig optional ReductionParameter reduction_param = 136; optional ReLUParameter relu_param = 123; optional ReshapeParameter reshape_param = 133; optional SigmoidParameter sigmoid_param = 124; optional SoftmaxParameter softmax_param = 125; optional SPPParameter spp_param = 132; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -======= - optional ReLUParameter relu_param = 123; - optional SigmoidParameter sigmoid_param = 124; - optional SoftmaxParameter softmax_param = 125; ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig optional SliceParameter slice_param = 126; optional TanHParameter tanh_param = 127; optional ThresholdParameter threshold_param = 128; - optional TileParameter tile_param = 138; optional WindowDataParameter window_data_param = 129; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 - optional TripletLossParameter triplet_loss_param = 139; +<<<<<<< b30868495fbae44b9556c621a319178d919bf562 +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da ======= optional TripletLossParameter triplet_loss_param = 137; -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - optional TripletLossParameter triplet_loss_param = 139; -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - optional TripletLossParameter triplet_loss_param = 139; -======= - optional TripletLossParameter triplet_loss_param = 132; ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig >>>>>>> triplet data generation and network update +======= + optional TripletLossParameter triplet_loss_param = 137; +>>>>>>> add initiate class name of triplet loss layer } // Message that stores parameters used to apply transformation @@ -523,19 +390,10 @@ message TransformationParameter { // or can be repeated the same number of times as channels // (would subtract them from the corresponding channel) repeated float mean_value = 5; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig // Force the decoded image to have 3 color channels. optional bool force_color = 6 [default = false]; // Force the decoded image to have 1 color channels. optional bool force_gray = 7 [default = false]; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -======= ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig } // Message that stores parameters shared by loss layers @@ -547,19 +405,9 @@ message LossParameter { optional bool normalize = 2 [default = true]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -// Messages that store parameters used by individual layer types follow, in -// alphabetical order. - -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 // Messages that store parameters used by individual layer types follow, in // alphabetical order. -======= -// Message that stores parameters used by AccuracyLayer ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message AccuracyParameter { // When computing accuracy, count as correct by comparing the true label to // the top k scoring classes. By default, only compare to the top scoring @@ -577,26 +425,12 @@ message AccuracyParameter { optional int32 ignore_label = 3; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by ArgMaxLayer ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message ArgMaxParameter { // If true produce pairs (argmax, maxval) optional bool out_max_val = 1 [default = false]; optional uint32 top_k = 2 [default = 1]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by ConcatLayer ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message ConcatParameter { // The axis along which to concatenate -- may be negative to index from the // end (e.g., -1 for the last axis). Other axes must have the @@ -608,58 +442,36 @@ message ConcatParameter { optional uint32 concat_dim = 1 [default = 1]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -message ContrastiveLossParameter { - // margin for dissimilar pair - optional float margin = 1 [default = 1.0]; -======= -// Message that stores parameters used by ContrastiveLossLayer ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message ContrastiveLossParameter { // margin for dissimilar pair optional float margin = 1 [default = 1.0]; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig // The first implementation of this cost did not exactly match the cost of // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2. // legacy_version = false (the default) uses (margin - d)^2 as proposed in the // Hadsell paper. New models should probably use this version. // legacy_version = true uses (margin - d^2). This is kept to support / // reproduce existing models and results - optional bool legacy_version = 2 [default = false]; + optional bool legacy_version = 2 [default = false]; } +<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da +======= message TripletLossParameter { //margin for negative triplet optional float margin = 1 [default = 1.0]; +<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 +======= optional uint32 losstype = 2 [default = 1]; optional uint32 num_triplets = 3 [default = 3]; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< b30868495fbae44b9556c621a319178d919bf562 +>>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise } -======= -======= >>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig -} - -message TripletLossParameter { - //margin for negative triplet - optional float margin = 1 [default = 1.0]; +======= } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -// Message that stores parameters used by ConvolutionLayer ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig +>>>>>>> add initiate class name of triplet loss layer message ConvolutionParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms @@ -685,13 +497,6 @@ message ConvolutionParameter { optional Engine engine = 15 [default = DEFAULT]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by DataLayer ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message DataParameter { enum DB { LEVELDB = 0; @@ -700,15 +505,11 @@ message DataParameter { // Specify the data source. optional string source = 1; // Specify the batch size. - optional uint32 batch_size = 4; + optional uint32 batch_size = 4 [default = 1]; // The rand_skip variable is for the data layer to skip a few data points // to avoid all asynchronous sgd clients to start at the same point. The skip // point would be set as rand_skip * rand(0,1). Note that rand_skip should not // be larger than the number of keys in the database. -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= - // DEPRECATED. Each solver accesses a different subset of the database. ->>>>>>> triplet data generation and network update optional uint32 rand_skip = 7 [default = 0]; optional DB backend = 8 [default = LEVELDB]; // DEPRECATED. See TransformationParameter. For data pre-processing, we can do @@ -724,31 +525,12 @@ message DataParameter { optional bool mirror = 6 [default = false]; // Force the encoded image to have 3 color channels optional bool force_encoded_color = 9 [default = false]; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 } -======= - // Prefetch queue (Number of batches to prefetch to host memory, increase if - // data access bandwidth varies). - optional uint32 prefetch = 10 [default = 4]; -} - -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -// Message that stores parameters used by DropoutLayer ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message DropoutParameter { optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by DummyDataLayer. ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig // DummyDataLayer fills any number of arbitrarily shaped blobs with random // (or constant) data generated by "Fillers" (see "message FillerParameter"). message DummyDataParameter { @@ -768,13 +550,6 @@ message DummyDataParameter { repeated uint32 width = 5; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by EltwiseLayer ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message EltwiseParameter { enum EltwiseOp { PROD = 0; @@ -789,28 +564,6 @@ message EltwiseParameter { optional bool stable_prod_grad = 3 [default = true]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig -// Message that stores parameters used by EmbedLayer -message EmbedParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - // The input is given as integers to be interpreted as one-hot - // vector indices with dimension num_input. Hence num_input should be - // 1 greater than the maximum possible input value. - optional uint32 input_dim = 2; - - optional bool bias_term = 3 [default = true]; // Whether to use a bias term - optional FillerParameter weight_filler = 4; // The filler for the weight - optional FillerParameter bias_filler = 5; // The filler for the bias - -} - -// Message that stores parameters used by ExpLayer ->>>>>>> triplet data generation and network update message ExpParameter { // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. // Or if base is set to the default (-1), base is set to e, @@ -820,10 +573,6 @@ message ExpParameter { optional float shift = 3 [default = 0.0]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig /// Message that stores parameters used by FlattenLayer message FlattenParameter { // The first axis to flatten: all preceding axes are retained in the output. @@ -836,11 +585,6 @@ message FlattenParameter { optional int32 end_axis = 2 [default = -1]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -======= ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig // Message that stores parameters used by HDF5DataLayer message HDF5DataParameter { // Specify the data source. @@ -856,13 +600,6 @@ message HDF5DataParameter { optional bool shuffle = 3 [default = false]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by HDF5OutputLayer ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message HDF5OutputParameter { optional string file_name = 1; } @@ -876,13 +613,6 @@ message HingeLossParameter { optional Norm norm = 1 [default = L1]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by ImageDataLayer ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message ImageDataParameter { // Specify the data source. optional string source = 1; @@ -914,25 +644,11 @@ message ImageDataParameter { optional string root_folder = 12 [default = ""]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters InfogainLossLayer ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message InfogainLossParameter { // Specify the infogain matrix source. optional string source = 1; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by InnerProductLayer ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message InnerProductParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms @@ -945,10 +661,6 @@ message InnerProductParameter { optional int32 axis = 5 [default = 1]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig // Message that stores parameters used by LogLayer message LogParameter { // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. @@ -959,11 +671,6 @@ message LogParameter { optional float shift = 3 [default = 0.0]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -======= ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig // Message that stores parameters used by LRNLayer message LRNParameter { optional uint32 local_size = 1 [default = 5]; @@ -977,13 +684,6 @@ message LRNParameter { optional float k = 5 [default = 1.]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by MemoryDataLayer ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message MemoryDataParameter { optional uint32 batch_size = 1; optional uint32 channels = 2; @@ -991,38 +691,17 @@ message MemoryDataParameter { optional uint32 width = 4; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by MVNLayer ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message MVNParameter { // This parameter can be set to false to normalize mean only optional bool normalize_variance = 1 [default = true]; // This parameter can be set to true to perform DNN-like MVN optional bool across_channels = 2 [default = false]; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig // Epsilon for not dividing by zero while normalizing variance optional float eps = 3 [default = 1e-9]; } -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - - // Epsilon for not dividing by zero while normalizing variance - optional float eps = 3 [default = 1e-9]; -} - -======= -} - -// Message that stores parameters used by PoolingLayer ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message PoolingParameter { enum PoolMethod { MAX = 0; @@ -1052,13 +731,6 @@ message PoolingParameter { optional bool global_pooling = 12 [default = false]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by PowerLayer ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message PowerParameter { // PowerLayer computes outputs y = (shift + scale * x) ^ power. optional float power = 1 [default = 1.0]; @@ -1066,9 +738,6 @@ message PowerParameter { optional float shift = 3 [default = 0.0]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 message PythonParameter { optional string module = 1; optional string layer = 2; @@ -1101,22 +770,6 @@ message ReductionParameter { optional int32 axis = 2 [default = 0]; optional float coeff = 3 [default = 1.0]; // coefficient for output -======= -// Message that stores parameters used by PythonLayer ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig -message PythonParameter { - optional string module = 1; - optional string layer = 2; - // This value is set to the attribute `param_str` of the `PythonLayer` object - // in Python before calling the `setup()` method. This could be a number, - // string, dictionary in Python dict format, JSON, etc. You may parse this - // string in `setup` method and use it in `forward` and `backward`. - optional string param_str = 3 [default = '']; - // Whether this PythonLayer is shared among worker solvers during data parallelism. - // If true, each worker solver sequentially run forward from this layer. - // This value should be set true if you are using it as a data layer. - optional bool share_in_parallel = 4 [default = false]; ->>>>>>> triplet data generation and network update } // Message that stores parameters used by ReductionLayer @@ -1164,10 +817,6 @@ message ReLUParameter { optional Engine engine = 2 [default = DEFAULT]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message ReshapeParameter { // Specify the output dimensions. If some of the dimensions are set to 0, // the corresponding dimension from the bottom layer is used (unchanged). @@ -1232,12 +881,6 @@ message ReshapeParameter { optional int32 num_axes = 3 [default = -1]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -======= -// Message that stores parameters used by SigmoidLayer ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message SigmoidParameter { enum Engine { DEFAULT = 0; @@ -1247,13 +890,6 @@ message SigmoidParameter { optional Engine engine = 1 [default = DEFAULT]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by SliceLayer ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message SliceParameter { // The axis along which to slice -- may be negative to index from the end // (e.g., -1 for the last axis). @@ -1280,13 +916,6 @@ message SoftmaxParameter { optional int32 axis = 2 [default = 1]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by TanHLayer ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message TanHParameter { enum Engine { DEFAULT = 0; @@ -1296,13 +925,8 @@ message TanHParameter { optional Engine engine = 1 [default = DEFAULT]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 +<<<<<<< b30868495fbae44b9556c621a319178d919bf562 ======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig // Message that stores parameters used by TileLayer message TileParameter { // The index of the axis to tile. @@ -1312,26 +936,12 @@ message TileParameter { optional int32 tiles = 2; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -======= ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig // Message that stores parameters used by ThresholdLayer +>>>>>>> add initiate class name of triplet loss layer message ThresholdParameter { optional float threshold = 1 [default = 0]; // Strictly positive values } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by WindowDataLayer ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message WindowDataParameter { // Specify the data source. optional string source = 1; @@ -1365,10 +975,6 @@ message WindowDataParameter { optional string root_folder = 13 [default = ""]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message SPPParameter { enum PoolMethod { MAX = 0; @@ -1385,11 +991,6 @@ message SPPParameter { optional Engine engine = 6 [default = DEFAULT]; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -======= ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig // DEPRECATED: use LayerParameter. message V1LayerParameter { repeated string bottom = 2; @@ -1436,7 +1037,6 @@ message V1LayerParameter { SPLIT = 22; SLICE = 33; TANH = 23; - TRIPLET_LOSS = 40; WINDOW_DATA = 24; THRESHOLD = 31; @@ -1483,7 +1083,6 @@ message V1LayerParameter { optional TransformationParameter transform_param = 36; optional LossParameter loss_param = 42; optional V0LayerParameter layer = 1; - optional TripletLossParameter triplet_loss_param = 43; } // DEPRECATED: V0LayerParameter is the old way of specifying layer parameters @@ -1580,13 +1179,6 @@ message V0LayerParameter { optional HDF5OutputParameter hdf5_output_param = 1001; } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f:src/caffe/proto/caffe.proto.orig -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -// Message that stores parameters used by PReLULayer ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param:src/caffe/proto/caffe.proto.orig.orig message PReLUParameter { // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: // Surpassing Human-Level Performance on ImageNet Classification, 2015. diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index 32abc893559..45c149f0da5 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -618,10 +618,20 @@ class NetTest : public MultiDeviceTest { <<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD ======= >>>>>>> 011aef0... restore ======= >>>>>>> 80a07dd... macro define in upgrade_proto +======= +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +======= +>>>>>>> restore +>>>>>>> 0a85215... triplet data generation and network update +======= +>>>>>>> 03cac8c... fixed two bugs with prototext format virtual void InitSkipPropNet(bool test_skip_true) { string proto = "name: 'SkipPropTestNetwork' " @@ -713,12 +723,24 @@ class NetTest : public MultiDeviceTest { <<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD ======= >>>>>>> 00341b2... triplet data generation and network update ======= >>>>>>> 1882ac9... add initiate class name of triplet loss layer ======= >>>>>>> 08d5d6d... macro define in upgrade_proto +======= +>>>>>>> 1f7ef32... add RGB data training as an option in triplet training +======= +======= +>>>>>>> 8f22aea... add initiate class name of triplet loss layer +======= +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +>>>>>>> 0a85215... triplet data generation and network update +>>>>>>> 0dbadac... triplet data generation and network update proto += " propagate_down: true " " propagate_down: false "; else @@ -728,17 +750,21 @@ class NetTest : public MultiDeviceTest { proto += " propagate_down: [true, false] "; else proto += " propagate_down: [true, true] "; +<<<<<<< HEAD >>>>>>> 011aef0... restore <<<<<<< HEAD <<<<<<< HEAD ======= >>>>>>> 1882ac9... add initiate class name of triplet loss layer ======= +======= +>>>>>>> 03cac8c... fixed two bugs with prototext format proto += " propagate_down: true " " propagate_down: false "; else proto += " propagate_down: true " " propagate_down: true "; +<<<<<<< HEAD >>>>>>> 98fb438... fixed two bugs with prototext format <<<<<<< HEAD <<<<<<< HEAD @@ -752,7 +778,28 @@ class NetTest : public MultiDeviceTest { else proto += " propagate_down: [true, true] "; >>>>>>> 80a07dd... macro define in upgrade_proto +<<<<<<< HEAD >>>>>>> 08d5d6d... macro define in upgrade_proto +======= +======= + proto += " propagate_down: true " + " propagate_down: false "; + else + proto += " propagate_down: true " + " propagate_down: true "; +>>>>>>> b266250... fixed two bugs with prototext format +<<<<<<< HEAD +>>>>>>> 1f7ef32... add RGB data training as an option in triplet training +======= +======= +>>>>>>> restore +>>>>>>> 0a85215... triplet data generation and network update +<<<<<<< HEAD +>>>>>>> 0dbadac... triplet data generation and network update +======= +======= +>>>>>>> 03cac8c... fixed two bugs with prototext format +>>>>>>> 8f22aea... add initiate class name of triplet loss layer proto += " top: 'cross_entropy_loss' " " type: 'SigmoidCrossEntropyLoss' " @@ -761,6 +808,8 @@ class NetTest : public MultiDeviceTest { InitNetFromProtoString(proto); } +<<<<<<< HEAD +<<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD ======= @@ -771,6 +820,15 @@ class NetTest : public MultiDeviceTest { >>>>>>> 4d8130b... New triplet loss layer added(beta1 version-no test source files) ======= >>>>>>> 80a07dd... macro define in upgrade_proto +======= +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +======= +>>>>>>> triplet data generation and network update +======= +>>>>>>> restore +>>>>>>> 0a85215... triplet data generation and network update +======= +>>>>>>> 03cac8c... fixed two bugs with prototext format int seed_; shared_ptr > net_; }; @@ -1165,10 +1223,11 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); Blob* ip1_weights = this->net_->layers()[1]->blobs()[0].get(); Blob* ip2_weights = this->net_->layers()[2]->blobs()[0].get(); - // Check that data and diff blobs of shared weights share the same memory - // locations. + // Check that data blobs of shared weights share the same location in memory. EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); + // Check that diff blobs of shared weights are at different locations in + // memory. (The diffs should be accumulated at update time.) + EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); this->net_->Forward(bottom); this->net_->Backward(); // Compute the expected update as the data minus the two diffs. @@ -1181,7 +1240,11 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { // Make sure the diffs are non-trivial. for (int i = 0; i < count; ++i) { EXPECT_NE(0, ip1_weights->cpu_diff()[i]); + EXPECT_NE(0, ip2_weights->cpu_diff()[i]); + EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]); } + caffe_axpy(count, Dtype(1), ip2_weights->cpu_diff(), + shared_params.mutable_cpu_diff()); caffe_axpy(count, Dtype(-1), shared_params.cpu_diff(), shared_params.mutable_cpu_data()); const Dtype* expected_updated_params = shared_params.cpu_data(); @@ -1218,8 +1281,8 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { EXPECT_NE(0, ip1_weights->cpu_diff()[i]); EXPECT_NE(0, ip2_weights->cpu_diff()[i]); EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]); - EXPECT_FLOAT_EQ(ip1_weights->cpu_diff()[i] + ip2_weights->cpu_diff()[i], - shared_params.cpu_diff()[i]); + EXPECT_EQ(ip1_weights->cpu_diff()[i] + ip2_weights->cpu_diff()[i], + shared_params.cpu_diff()[i]); } caffe_axpy(count, Dtype(-1), ip1_weights->cpu_diff(), unshared_params1.mutable_cpu_data()); diff --git a/src/caffe/test/test_net.cpp.orig b/src/caffe/test/test_net.cpp.orig index 6183c38d3fd..2fcfad6ebd1 100644 --- a/src/caffe/test/test_net.cpp.orig +++ b/src/caffe/test/test_net.cpp.orig @@ -615,20 +615,12 @@ class NetTest : public MultiDeviceTest { } <<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 011aef0... restore -======= ->>>>>>> 80a07dd... macro define in upgrade_proto ======= <<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a <<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ======= >>>>>>> restore ->>>>>>> 0a85215... triplet data generation and network update +>>>>>>> 0dbadac... triplet data generation and network update virtual void InitSkipPropNet(bool test_skip_true) { string proto = "name: 'SkipPropTestNetwork' " @@ -717,67 +709,26 @@ class NetTest : public MultiDeviceTest { if (test_skip_true) <<<<<<< HEAD <<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= -<<<<<<< HEAD -<<<<<<< HEAD ->>>>>>> triplet data generation and network update -======= ->>>>>>> 00341b2... triplet data generation and network update -======= ->>>>>>> 1882ac9... add initiate class name of triplet loss layer -======= ->>>>>>> 08d5d6d... macro define in upgrade_proto -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= -======= ->>>>>>> 1f7ef32... add RGB data training as an option in triplet training ======= ======= +<<<<<<< 10b3f6404a1efb4e8f237bf204c88854bea7edb8 +>>>>>>> 8f22aea... add initiate class name of triplet loss layer <<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a ->>>>>>> 0a85215... triplet data generation and network update >>>>>>> 0dbadac... triplet data generation and network update ->>>>>>> triplet data generation and network update proto += " propagate_down: true " " propagate_down: false "; else proto += " propagate_down: true " " propagate_down: true "; -======= - proto += " propagate_down: [true, false] "; - else - proto += " propagate_down: [true, true] "; -<<<<<<< HEAD ->>>>>>> 011aef0... restore -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 1882ac9... add initiate class name of triplet loss layer -======= - proto += " propagate_down: true " - " propagate_down: false "; - else - proto += " propagate_down: true " - " propagate_down: true "; ->>>>>>> 98fb438... fixed two bugs with prototext format <<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 00341b2... triplet data generation and network update -======= ->>>>>>> 1882ac9... add initiate class name of triplet loss layer ======= ======= proto += " propagate_down: [true, false] "; else proto += " propagate_down: [true, true] "; ->>>>>>> 80a07dd... macro define in upgrade_proto ->>>>>>> 08d5d6d... macro define in upgrade_proto -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= +>>>>>>> restore +<<<<<<< HEAD +>>>>>>> 0dbadac... triplet data generation and network update ======= ======= proto += " propagate_down: true " @@ -785,15 +736,8 @@ class NetTest : public MultiDeviceTest { else proto += " propagate_down: true " " propagate_down: true "; ->>>>>>> b266250... fixed two bugs with prototext format -<<<<<<< HEAD ->>>>>>> 1f7ef32... add RGB data training as an option in triplet training -======= -======= ->>>>>>> restore ->>>>>>> 0a85215... triplet data generation and network update ->>>>>>> 0dbadac... triplet data generation and network update ->>>>>>> triplet data generation and network update +>>>>>>> fixed two bugs with prototext format +>>>>>>> 8f22aea... add initiate class name of triplet loss layer proto += " top: 'cross_entropy_loss' " " type: 'SigmoidCrossEntropyLoss' " @@ -803,23 +747,13 @@ class NetTest : public MultiDeviceTest { } <<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 083f61b... New triplet loss layer added(beta1 version-no test source files) -======= ->>>>>>> 011aef0... restore -======= ->>>>>>> 4d8130b... New triplet loss layer added(beta1 version-no test source files) -======= ->>>>>>> 80a07dd... macro define in upgrade_proto ======= <<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a ======= >>>>>>> triplet data generation and network update ======= >>>>>>> restore ->>>>>>> 0a85215... triplet data generation and network update +>>>>>>> 0dbadac... triplet data generation and network update int seed_; shared_ptr > net_; }; @@ -1214,10 +1148,18 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); Blob* ip1_weights = this->net_->layers()[1]->blobs()[0].get(); Blob* ip2_weights = this->net_->layers()[2]->blobs()[0].get(); +<<<<<<< HEAD + // Check that data blobs of shared weights share the same location in memory. + EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); + // Check that diff blobs of shared weights are at different locations in + // memory. (The diffs should be accumulated at update time.) + EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); +======= // Check that data and diff blobs of shared weights share the same memory // locations. EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); +>>>>>>> 0dbadac... triplet data generation and network update this->net_->Forward(bottom); this->net_->Backward(); // Compute the expected update as the data minus the two diffs. @@ -1230,7 +1172,15 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { // Make sure the diffs are non-trivial. for (int i = 0; i < count; ++i) { EXPECT_NE(0, ip1_weights->cpu_diff()[i]); +<<<<<<< HEAD + EXPECT_NE(0, ip2_weights->cpu_diff()[i]); + EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]); + } + caffe_axpy(count, Dtype(1), ip2_weights->cpu_diff(), + shared_params.mutable_cpu_diff()); +======= } +>>>>>>> 0dbadac... triplet data generation and network update caffe_axpy(count, Dtype(-1), shared_params.cpu_diff(), shared_params.mutable_cpu_data()); const Dtype* expected_updated_params = shared_params.cpu_data(); @@ -1267,8 +1217,13 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { EXPECT_NE(0, ip1_weights->cpu_diff()[i]); EXPECT_NE(0, ip2_weights->cpu_diff()[i]); EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]); +<<<<<<< HEAD + EXPECT_EQ(ip1_weights->cpu_diff()[i] + ip2_weights->cpu_diff()[i], + shared_params.cpu_diff()[i]); +======= EXPECT_FLOAT_EQ(ip1_weights->cpu_diff()[i] + ip2_weights->cpu_diff()[i], shared_params.cpu_diff()[i]); +>>>>>>> 0dbadac... triplet data generation and network update } caffe_axpy(count, Dtype(-1), ip1_weights->cpu_diff(), unshared_params1.mutable_cpu_data()); @@ -1298,10 +1253,18 @@ TYPED_TEST(NetTest, TestSharedWeightsResume) { EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); Blob* ip1_weights = this->net_->layers()[1]->blobs()[0].get(); Blob* ip2_weights = this->net_->layers()[2]->blobs()[0].get(); +<<<<<<< HEAD + // Check that data blobs of shared weights share the same location in memory. + EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); + // Check that diff blobs of shared weights are at different locations in + // memory. (The diffs should be accumulated at update time.) + EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); +======= // Check that data and diff blobs of shared weights share the same memory // locations. EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); +>>>>>>> 0dbadac... triplet data generation and network update this->net_->ForwardBackward(bottom); this->net_->Update(); Blob shared_params; @@ -1324,6 +1287,16 @@ TYPED_TEST(NetTest, TestSharedWeightsResume) { ASSERT_FALSE(NULL == ip1_weights); ASSERT_FALSE(NULL == ip2_weights); EXPECT_NE(ip1_weights, ip2_weights); +<<<<<<< HEAD + // Check that data blobs of shared weights share the same location in memory. + EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); + for (int i = 0; i < count; ++i) { + EXPECT_FLOAT_EQ(shared_params.cpu_data()[i], ip1_weights->cpu_data()[i]); + } + // Check that diff blobs of shared weights are at different locations in + // memory. (The diffs should be accumulated at update time.) + EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); +======= // Check that data and diff blobs of shared weights share the same memory // locations. EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); @@ -1331,6 +1304,7 @@ TYPED_TEST(NetTest, TestSharedWeightsResume) { for (int i = 0; i < count; ++i) { EXPECT_FLOAT_EQ(shared_params.cpu_data()[i], ip1_weights->cpu_data()[i]); } +>>>>>>> 0dbadac... triplet data generation and network update } TYPED_TEST(NetTest, TestParamPropagateDown) { diff --git a/src/caffe/test/test_net.cpp.orig.orig b/src/caffe/test/test_net.cpp.orig.orig new file mode 100644 index 00000000000..904e6cb44ec --- /dev/null +++ b/src/caffe/test/test_net.cpp.orig.orig @@ -0,0 +1,2508 @@ +#include +#include +#include + +#include "google/protobuf/text_format.h" + +#include "gtest/gtest.h" + +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/net.hpp" +#include "caffe/util/math_functions.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class NetTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + NetTest() : seed_(1701) {} + + virtual void InitNetFromProtoString(const string& proto) { + NetParameter param; + CHECK(google::protobuf::TextFormat::ParseFromString(proto, ¶m)); + net_.reset(new Net(param)); + } + + virtual void CopyNetBlobs(const bool copy_diff, + vector > >* blobs_copy) { + CHECK(net_); + const vector > >& net_blobs = net_->blobs(); + blobs_copy->clear(); + blobs_copy->resize(net_blobs.size()); + const bool kReshape = true; + for (int i = 0; i < net_blobs.size(); ++i) { + (*blobs_copy)[i].reset(new Blob()); + (*blobs_copy)[i]->CopyFrom(*net_blobs[i], copy_diff, kReshape); + } + } + + virtual void CopyNetParams(const bool copy_diff, + vector > >* params_copy) { + CHECK(net_); + const vector > >& net_params = net_->params(); + params_copy->clear(); + params_copy->resize(net_params.size()); + const bool kReshape = true; + for (int i = 0; i < net_params.size(); ++i) { + (*params_copy)[i].reset(new Blob()); + (*params_copy)[i]->CopyFrom(*net_params[i], copy_diff, kReshape); + } + } + + virtual void InitTinyNet(const bool force_backward = false, + const bool accuracy_layer = false) { + string proto = + "name: 'TinyTestNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " shape { " + " dim: 5 " + " dim: 2 " + " dim: 3 " + " dim: 4 " + " } " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " shape { " + " dim: 5 " + " } " + " data_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerproduct' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'data' " + " top: 'innerproduct' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerproduct' " + " bottom: 'label' " + " top: 'top_loss' " + "} "; + if (accuracy_layer) { + proto += + "layer { " + " name: 'loss' " + " type: 'Accuracy' " + " bottom: 'innerproduct' " + " bottom: 'label' " + " top: 'accuracy' " + "} "; + } + if (force_backward) { + proto += "force_backward: true "; + } + InitNetFromProtoString(proto); + } + + virtual void InitTinyNetEuclidean(const bool force_backward = false) { + string proto = + "name: 'TinyTestEuclidLossNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 5 " + " channels: 2 " + " height: 3 " + " width: 4 " + " num: 5 " + " channels: 1 " + " height: 1 " + " width: 1 " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerproduct' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'data' " + " top: 'innerproduct' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'innerproduct' " + " bottom: 'label' " + "} "; + if (force_backward) { + proto += "force_backward: true "; + } + InitNetFromProtoString(proto); + } + + virtual void InitTrickyNet(Dtype* loss_weight = NULL) { + ostringstream loss_weight_stream; + if (loss_weight) { + loss_weight_stream << " loss_weight: " << *loss_weight << " "; + } + const string& proto = + "name: 'TrickyTestNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 5 " + " channels: 2 " + " height: 3 " + " width: 4 " + " num: 5 " + " channels: 1 " + " height: 1 " + " width: 1 " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerproduct' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'data' " + " top: 'transformed_data' " + "} " + "layer { " + " name: 'innerproduct' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'label' " + " top: 'transformed_label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + + loss_weight_stream.str() + + " bottom: 'transformed_data' " + " bottom: 'transformed_label' " + "} "; + InitNetFromProtoString(proto); + } + + // loss_weight is the loss weight for the 'EuclideanLoss' layer output. + // midnet_loss_weight is the loss weight for the first 'InnerProduct' layer + // output. Should both default to 0.0 if unspecified (i.e., if NULL is + // passed to this function). + virtual void InitUnsharedWeightsNet(const Dtype* loss_weight = NULL, + const Dtype* midnet_loss_weight = NULL, + const bool force_backward = false, const bool bias_term = false, + const Dtype blobs_lr_w1 = 1, const Dtype blobs_lr_b1 = 2, + const Dtype blobs_lr_w2 = 1, const Dtype blobs_lr_b2 = 2) { + string bias_str = bias_term ? "true ":"false "; + ostringstream proto; + proto << "name: 'UnsharedWeightsNetwork' "; + if (force_backward) { + proto << "force_backward: true "; + } + proto << + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 5 " + " channels: 2 " + " height: 3 " + " width: 4 " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " } " + " top: 'data' " + "} " + "layer { " + " name: 'innerproduct1' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: " << bias_str << + " weight_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " param { " + " name: 'unsharedweights1' " + " lr_mult: " << blobs_lr_w1 << + " } "; + if (bias_term) { + proto << " param { lr_mult: " << blobs_lr_b1 << " } "; + } + proto << + " bottom: 'data' " + " top: 'innerproduct1' "; + if (midnet_loss_weight) { + proto << " loss_weight: " << *midnet_loss_weight << " "; + } + proto << + "} " + "layer { " + " name: 'innerproduct2' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: " << bias_str << + " weight_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " param { " + " name: 'unsharedweights2' " + " lr_mult: " << blobs_lr_w2 << + " } "; + if (bias_term) { + proto << " param { lr_mult: " << blobs_lr_b2 << " } "; + } + proto << + " bottom: 'data' " + " top: 'innerproduct2' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' "; + if (loss_weight) { + proto << " loss_weight: " << *loss_weight << " "; + } + proto << + " bottom: 'innerproduct1' " + " bottom: 'innerproduct2' " + "} "; + InitNetFromProtoString(proto.str()); + } + + virtual void InitSharedWeightsNet() { + const string& proto = + "name: 'SharedWeightsNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 5 " + " channels: 2 " + " height: 3 " + " width: 4 " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " } " + " top: 'data' " + "} " + "layer { " + " name: 'innerproduct1' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " param { name: 'sharedweights' } " + " bottom: 'data' " + " top: 'innerproduct1' " + "} " + "layer { " + " name: 'innerproduct2' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " param { name: 'sharedweights' } " + " bottom: 'data' " + " top: 'innerproduct2' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'innerproduct1' " + " bottom: 'innerproduct2' " + "} "; + InitNetFromProtoString(proto); + } + + virtual void InitDiffDataUnsharedWeightsNet() { + const string& proto = + "name: 'DiffDataUnsharedWeightsNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 10 " + " channels: 10 " + " height: 1 " + " width: 1 " + " num: 10 " + " channels: 10 " + " height: 1 " + " width: 1 " + " data_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " top: 'data1' " + " top: 'data2' " + "} " + "layer { " + " name: 'innerproduct1' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'constant' " + " value: 0.5 " + " } " + " } " + " param { name: 'unsharedweights1' } " + " bottom: 'data1' " + " top: 'innerproduct1' " + "} " + "layer { " + " name: 'innerproduct2' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'constant' " + " value: 0.5 " + " } " + " } " + " param { name: 'unsharedweights2' } " + " bottom: 'innerproduct1' " + " top: 'innerproduct2' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'data2' " + " bottom: 'innerproduct2' " + "} "; + InitNetFromProtoString(proto); + } + + virtual void InitDiffDataSharedWeightsNet() { + const string& proto = + "name: 'DiffDataSharedWeightsNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 10 " + " channels: 10 " + " height: 1 " + " width: 1 " + " num: 10 " + " channels: 10 " + " height: 1 " + " width: 1 " + " data_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " top: 'data1' " + " top: 'data2' " + "} " + "layer { " + " name: 'innerproduct1' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'constant' " + " value: 0.5 " + " } " + " } " + " param { name: 'sharedweights' } " + " bottom: 'data1' " + " top: 'innerproduct1' " + "} " + "layer { " + " name: 'innerproduct2' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'constant' " + " value: 0.5 " + " } " + " } " + " param { name: 'sharedweights' } " + " bottom: 'innerproduct1' " + " top: 'innerproduct2' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'data2' " + " bottom: 'innerproduct2' " + "} "; + InitNetFromProtoString(proto); + } + + virtual void InitReshapableNet() { + const string& proto = + "name: 'ReshapableNetwork' " + "input: 'data' " + "input_dim: 1 " + "input_dim: 3 " + "input_dim: 100 " + "input_dim: 100 " + "layer { " + " name: 'conv1' " + " type: 'Convolution' " + " bottom: 'data' " + " top: 'conv1' " + " convolution_param { " + " num_output: 5 " + " kernel_size: 3 " + " stride: 2 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0.2 " + " } " + " } " + "} " + "layer { " + " name: 'relu1' " + " type: 'ReLU' " + " bottom: 'conv1' " + " top: 'conv1' " + "} " + "layer { " + " name: 'pool1' " + " type: 'Pooling' " + " bottom: 'conv1' " + " top: 'pool1' " + " pooling_param { " + " pool: MAX " + " kernel_size: 2 " + " stride: 2 " + " } " + "} " + "layer { " + " name: 'norm1' " + " type: 'LRN' " + " bottom: 'pool1' " + " top: 'norm1' " + " lrn_param { " + " local_size: 3 " + " } " + "} " + "layer { " + " name: 'softmax' " + " type: 'Softmax' " + " bottom: 'norm1' " + " top: 'softmax' " + "} "; + InitNetFromProtoString(proto); + } + +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< b30868495fbae44b9556c621a319178d919bf562 +======= +<<<<<<< HEAD +<<<<<<< HEAD +>>>>>>> add initiate class name of triplet loss layer +======= +>>>>>>> 011aef0... restore +======= +>>>>>>> 80a07dd... macro define in upgrade_proto +<<<<<<< b30868495fbae44b9556c621a319178d919bf562 +======= +======= +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 +======= +>>>>>>> restore +>>>>>>> 0a85215... triplet data generation and network update +======= +>>>>>>> 03cac8c... fixed two bugs with prototext format +>>>>>>> add initiate class name of triplet loss layer + virtual void InitSkipPropNet(bool test_skip_true) { + string proto = + "name: 'SkipPropTestNetwork' " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " shape { " + " dim: 5 " + " dim: 2 " + " dim: 3 " + " dim: 4 " + " } " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " shape { " + " dim: 5 " + " } " + " data_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'silence' " + " bottom: 'label' " + " type: 'Silence' " + "} " + "layer { " + " name: 'innerproduct' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'data' " + " top: 'innerproduct' " + "} " + "layer { " + " name: 'ip_fake_labels' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " bottom: 'data' " + " top: 'fake_labels' " + "} " + "layer { " + " name: 'argmax' " + " bottom: 'fake_labels' " + " top: 'label_argmax' " + " type: 'ArgMax' " + "} " + "layer { " + " name: 'loss' " + " bottom: 'innerproduct' " + " bottom: 'label_argmax' "; + if (test_skip_true) +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< b30868495fbae44b9556c621a319178d919bf562 +======= +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< HEAD +>>>>>>> add initiate class name of triplet loss layer +======= +>>>>>>> 00341b2... triplet data generation and network update +======= +>>>>>>> 1882ac9... add initiate class name of triplet loss layer +======= +>>>>>>> 08d5d6d... macro define in upgrade_proto +<<<<<<< b30868495fbae44b9556c621a319178d919bf562 +======= +======= +>>>>>>> 1f7ef32... add RGB data training as an option in triplet training +======= +======= +>>>>>>> 8f22aea... add initiate class name of triplet loss layer +======= +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +>>>>>>> 0a85215... triplet data generation and network update +>>>>>>> 0dbadac... triplet data generation and network update +>>>>>>> add initiate class name of triplet loss layer + proto += " propagate_down: true " + " propagate_down: false "; + else + proto += " propagate_down: true " + " propagate_down: true "; +======= + proto += " propagate_down: [true, false] "; + else + proto += " propagate_down: [true, true] "; +>>>>>>> 011aef0... restore +<<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 1882ac9... add initiate class name of triplet loss layer +======= +======= +>>>>>>> 03cac8c... fixed two bugs with prototext format + proto += " propagate_down: true " + " propagate_down: false "; + else + proto += " propagate_down: true " + " propagate_down: true "; +<<<<<<< HEAD +>>>>>>> 98fb438... fixed two bugs with prototext format +<<<<<<< HEAD +<<<<<<< HEAD +======= +>>>>>>> 00341b2... triplet data generation and network update +======= +>>>>>>> 1882ac9... add initiate class name of triplet loss layer +======= +======= + proto += " propagate_down: [true, false] "; + else + proto += " propagate_down: [true, true] "; +>>>>>>> 80a07dd... macro define in upgrade_proto +>>>>>>> 08d5d6d... macro define in upgrade_proto +<<<<<<< b30868495fbae44b9556c621a319178d919bf562 +======= +======= +======= + proto += " propagate_down: true " + " propagate_down: false "; + else + proto += " propagate_down: true " + " propagate_down: true "; +>>>>>>> b266250... fixed two bugs with prototext format +<<<<<<< HEAD +>>>>>>> 1f7ef32... add RGB data training as an option in triplet training +======= +======= +>>>>>>> restore +>>>>>>> 0a85215... triplet data generation and network update +<<<<<<< HEAD +>>>>>>> 0dbadac... triplet data generation and network update +======= +======= +>>>>>>> 03cac8c... fixed two bugs with prototext format +>>>>>>> 8f22aea... add initiate class name of triplet loss layer +>>>>>>> add initiate class name of triplet loss layer + proto += + " top: 'cross_entropy_loss' " + " type: 'SigmoidCrossEntropyLoss' " + " loss_weight: 0.1 " + "} "; + InitNetFromProtoString(proto); + } + +<<<<<<< HEAD +<<<<<<< HEAD +<<<<<<< b30868495fbae44b9556c621a319178d919bf562 +======= +<<<<<<< HEAD +<<<<<<< HEAD +>>>>>>> add initiate class name of triplet loss layer +======= +>>>>>>> 083f61b... New triplet loss layer added(beta1 version-no test source files) +======= +>>>>>>> 011aef0... restore +======= +>>>>>>> 4d8130b... New triplet loss layer added(beta1 version-no test source files) +======= +>>>>>>> 80a07dd... macro define in upgrade_proto +<<<<<<< b30868495fbae44b9556c621a319178d919bf562 +======= +======= +<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a +======= +>>>>>>> triplet data generation and network update +======= +>>>>>>> restore +>>>>>>> 0a85215... triplet data generation and network update +======= +>>>>>>> 03cac8c... fixed two bugs with prototext format +>>>>>>> add initiate class name of triplet loss layer + int seed_; + shared_ptr > net_; +}; + +TYPED_TEST_CASE(NetTest, TestDtypesAndDevices); + +TYPED_TEST(NetTest, TestHasBlob) { + this->InitTinyNet(); + EXPECT_TRUE(this->net_->has_blob("data")); + EXPECT_TRUE(this->net_->has_blob("label")); + EXPECT_TRUE(this->net_->has_blob("innerproduct")); + EXPECT_FALSE(this->net_->has_blob("loss")); + EXPECT_TRUE(this->net_->has_blob("top_loss")); +} + +TYPED_TEST(NetTest, TestGetBlob) { + this->InitTinyNet(); + EXPECT_EQ(this->net_->blob_by_name("data"), this->net_->blobs()[0]); + EXPECT_EQ(this->net_->blob_by_name("label"), this->net_->blobs()[1]); + EXPECT_EQ(this->net_->blob_by_name("innerproduct"), this->net_->blobs()[2]); + EXPECT_FALSE(this->net_->blob_by_name("loss")); + EXPECT_EQ(this->net_->blob_by_name("top_loss"), this->net_->blobs()[3]); +} + +TYPED_TEST(NetTest, TestHasLayer) { + this->InitTinyNet(); + EXPECT_TRUE(this->net_->has_layer("data")); + EXPECT_TRUE(this->net_->has_layer("innerproduct")); + EXPECT_TRUE(this->net_->has_layer("loss")); + EXPECT_FALSE(this->net_->has_layer("label")); +} + +TYPED_TEST(NetTest, TestGetLayerByName) { + this->InitTinyNet(); + EXPECT_EQ(this->net_->layer_by_name("data"), this->net_->layers()[0]); + EXPECT_EQ(this->net_->layer_by_name("innerproduct"), this->net_->layers()[1]); + EXPECT_EQ(this->net_->layer_by_name("loss"), this->net_->layers()[2]); + EXPECT_FALSE(this->net_->layer_by_name("label")); +} + +TYPED_TEST(NetTest, TestBottomNeedBackward) { + this->InitTinyNet(); + const vector >& bottom_need_backward = + this->net_->bottom_need_backward(); + EXPECT_EQ(3, bottom_need_backward.size()); + EXPECT_EQ(0, bottom_need_backward[0].size()); + EXPECT_EQ(1, bottom_need_backward[1].size()); + EXPECT_EQ(false, bottom_need_backward[1][0]); + EXPECT_EQ(2, bottom_need_backward[2].size()); + EXPECT_EQ(true, bottom_need_backward[2][0]); + EXPECT_EQ(false, bottom_need_backward[2][1]); +} + +TYPED_TEST(NetTest, TestBottomNeedBackwardForce) { + const bool force_backward = true; + this->InitTinyNet(force_backward); + const vector >& bottom_need_backward = + this->net_->bottom_need_backward(); + EXPECT_EQ(3, bottom_need_backward.size()); + EXPECT_EQ(0, bottom_need_backward[0].size()); + EXPECT_EQ(1, bottom_need_backward[1].size()); + EXPECT_EQ(true, bottom_need_backward[1][0]); + EXPECT_EQ(2, bottom_need_backward[2].size()); + EXPECT_EQ(true, bottom_need_backward[2][0]); + EXPECT_EQ(false, bottom_need_backward[2][1]); +} + +TYPED_TEST(NetTest, TestBottomNeedBackwardEuclideanForce) { + const bool force_backward = true; + this->InitTinyNetEuclidean(force_backward); + const vector >& bottom_need_backward = + this->net_->bottom_need_backward(); + EXPECT_EQ(3, bottom_need_backward.size()); + EXPECT_EQ(0, bottom_need_backward[0].size()); + EXPECT_EQ(1, bottom_need_backward[1].size()); + EXPECT_EQ(true, bottom_need_backward[1][0]); + EXPECT_EQ(2, bottom_need_backward[2].size()); + EXPECT_EQ(true, bottom_need_backward[2][0]); + EXPECT_EQ(true, bottom_need_backward[2][1]); +} + +TYPED_TEST(NetTest, TestBottomNeedBackwardTricky) { + this->InitTrickyNet(); + const vector >& bottom_need_backward = + this->net_->bottom_need_backward(); + EXPECT_EQ(4, bottom_need_backward.size()); + EXPECT_EQ(0, bottom_need_backward[0].size()); + EXPECT_EQ(1, bottom_need_backward[1].size()); + EXPECT_EQ(false, bottom_need_backward[1][0]); + EXPECT_EQ(1, bottom_need_backward[2].size()); + EXPECT_EQ(false, bottom_need_backward[2][0]); + EXPECT_EQ(2, bottom_need_backward[3].size()); + EXPECT_EQ(true, bottom_need_backward[3][0]); + // The label input to the SoftmaxLossLayer should say it "needs backward" + // since it has weights under it, even though we expect this to cause a crash + // at training/test time. + EXPECT_EQ(true, bottom_need_backward[3][1]); +} + +TYPED_TEST(NetTest, TestLossWeight) { + typedef typename TypeParam::Dtype Dtype; + // First, compute the loss and gradients with no loss_weight specified. + // In this case, the loss weight for the 'EuclideanLoss' layer should default + // to 1. + vector*> bottom; + Caffe::set_random_seed(this->seed_); + const bool kForceBackward = true; + this->InitUnsharedWeightsNet(NULL, NULL, kForceBackward); + const Dtype loss = this->net_->ForwardBackward(bottom); + const bool kCopyDiff = true; + vector > > blob_grads; + this->CopyNetBlobs(kCopyDiff, &blob_grads); + vector > > param_grads; + this->CopyNetParams(kCopyDiff, ¶m_grads); + // Check that the loss is non-trivial, otherwise the test doesn't prove much. + const Dtype kMinLossAbsValue = 1e-2; + ASSERT_GE(fabs(loss), kMinLossAbsValue); + const Dtype kErrorMargin = 1e-4; + const int kNumLossWeights = 6; + Dtype kLossWeights[kNumLossWeights] = {2, 0, 1, -1, -2.5, 3.7}; + for (int i = 0; i < kNumLossWeights; ++i) { + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&kLossWeights[i], NULL, kForceBackward); + const Dtype weighted_loss = this->net_->ForwardBackward(bottom); + const Dtype error_margin = kErrorMargin * fabs(kLossWeights[i]); + EXPECT_NEAR(loss * kLossWeights[i], weighted_loss, error_margin) + << "loss weight = " << kLossWeights[i]; + const vector > >& weighted_blobs = + this->net_->blobs(); + ASSERT_EQ(blob_grads.size(), weighted_blobs.size()); + for (int j = 0; j < blob_grads.size(); ++j) { + ASSERT_EQ(blob_grads[j]->count(), weighted_blobs[j]->count()); + for (int k = 0; k < blob_grads[j]->count(); ++k) { + EXPECT_NEAR(blob_grads[j]->cpu_diff()[k] * kLossWeights[i], + weighted_blobs[j]->cpu_diff()[k], error_margin); + } + } + const vector > >& weighted_params = + this->net_->params(); + ASSERT_EQ(param_grads.size(), weighted_params.size()); + for (int j = 0; j < param_grads.size(); ++j) { + ASSERT_EQ(param_grads[j]->count(), weighted_params[j]->count()); + for (int k = 0; k < param_grads[j]->count(); ++k) { + EXPECT_NEAR(param_grads[j]->cpu_diff()[k] * kLossWeights[i], + weighted_params[j]->cpu_diff()[k], error_margin); + } + } + } +} + +TYPED_TEST(NetTest, TestLossWeightMidNet) { + typedef typename TypeParam::Dtype Dtype; + vector*> bottom; + Caffe::set_random_seed(this->seed_); + const bool kForceBackward = true; + Dtype loss_weight = 0; + Dtype midnet_loss_weight = 1; + this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, + kForceBackward); + const Dtype loss = this->net_->ForwardBackward(bottom); + const bool kCopyDiff = true; + const bool kReshape = true; + Blob data_grad; + data_grad.CopyFrom(*this->net_->blob_by_name("data"), kCopyDiff, kReshape); + // Check that the loss is non-trivial, otherwise the test doesn't prove much. + const Dtype kMinLossAbsValue = 1e-2; + ASSERT_GE(fabs(loss), kMinLossAbsValue); + const Dtype kErrorMargin = 1e-4; + const int kNumLossWeights = 6; + Dtype kLossWeights[kNumLossWeights] = {2, 0, 1, -1, -2.5, 3.7}; + for (int i = 0; i < kNumLossWeights; ++i) { + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&loss_weight, &kLossWeights[i], + kForceBackward); + const Dtype weighted_loss = this->net_->ForwardBackward(bottom); + const Dtype error_margin = kErrorMargin * fabs(kLossWeights[i]); + EXPECT_NEAR(loss * kLossWeights[i], weighted_loss, error_margin) + << "loss weight = " << kLossWeights[i]; + const shared_ptr >& weighted_blob = + this->net_->blob_by_name("data"); + ASSERT_EQ(data_grad.count(), weighted_blob->count()); + for (int j = 0; j < data_grad.count(); ++j) { + EXPECT_NEAR(data_grad.cpu_diff()[j] * kLossWeights[i], + weighted_blob->cpu_diff()[j], error_margin); + } + } +} + +TYPED_TEST(NetTest, TestComboLossWeight) { + typedef typename TypeParam::Dtype Dtype; + vector*> bottom; + Dtype loss_weight; + Dtype midnet_loss_weight; + const bool kForceBackward = true; + const Dtype kErrorMargin = 1e-4; + + // Get the loss and gradients with 'EuclideanLoss' weight 1, + // 'InnerProduct' weight 1. + loss_weight = 1; + midnet_loss_weight = 1; + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, + kForceBackward); + const Dtype loss = this->net_->ForwardBackward(bottom); + const bool kCopyDiff = true; + vector > > blob_grads; + this->CopyNetBlobs(kCopyDiff, &blob_grads); + vector > > param_grads; + this->CopyNetParams(kCopyDiff, ¶m_grads); + + loss_weight = 2; + midnet_loss_weight = 1; + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, + kForceBackward); + const Dtype loss_main_2 = this->net_->ForwardBackward(bottom); + vector > > blob_grads_loss_2; + this->CopyNetBlobs(kCopyDiff, &blob_grads_loss_2); + vector > > param_grads_loss_2; + this->CopyNetParams(kCopyDiff, ¶m_grads_loss_2); + + loss_weight = 3; + midnet_loss_weight = 1; + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, + kForceBackward); + const Dtype loss_main_3 = this->net_->ForwardBackward(bottom); + const vector > >& blob_grads_loss_3 = + this->net_->blobs(); + ASSERT_EQ(blob_grads.size(), blob_grads_loss_3.size()); + ASSERT_EQ(blob_grads_loss_2.size(), blob_grads_loss_3.size()); + for (int j = 0; j < blob_grads.size(); ++j) { + const string& blob_name = this->net_->blob_names()[j]; + bool grad_should_change = true; + if (blob_name == "innerproduct1_innerproduct1_0_split_0") { + grad_should_change = false; + } + ASSERT_EQ(blob_grads[j]->count(), blob_grads_loss_3[j]->count()); + ASSERT_EQ(blob_grads_loss_2[j]->count(), blob_grads_loss_3[j]->count()); + for (int k = 0; k < blob_grads[j]->count(); ++k) { + const Dtype grad_diff_2 = blob_grads_loss_2[j]->cpu_diff()[k] - + blob_grads[j]->cpu_diff()[k]; + const Dtype grad_diff_3 = blob_grads_loss_3[j]->cpu_diff()[k] - + blob_grads[j]->cpu_diff()[k]; + if (grad_should_change) { + // Test non-triviality. + const Dtype kMinGradDiffAbsValue = 1e-4; + EXPECT_GT(fabs(grad_diff_2), kMinGradDiffAbsValue) << blob_name; + EXPECT_NEAR(2 * grad_diff_2, grad_diff_3, kErrorMargin) << blob_name; + } else { + EXPECT_EQ(0, grad_diff_2) << blob_name; + EXPECT_EQ(0, grad_diff_3) << blob_name; + } + } + } + + loss_weight = 1; + midnet_loss_weight = 2; + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, + kForceBackward); + const Dtype loss_midnet_2 = this->net_->ForwardBackward(bottom); + this->CopyNetBlobs(kCopyDiff, &blob_grads_loss_2); + this->CopyNetParams(kCopyDiff, ¶m_grads_loss_2); + + loss_weight = 1; + midnet_loss_weight = 3; + Caffe::set_random_seed(this->seed_); + this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, + kForceBackward); + const Dtype loss_midnet_3 = this->net_->ForwardBackward(bottom); + const vector > >& blob_grads_midnet_loss_3 = + this->net_->blobs(); + ASSERT_EQ(blob_grads.size(), blob_grads_midnet_loss_3.size()); + ASSERT_EQ(blob_grads_loss_2.size(), blob_grads_midnet_loss_3.size()); + const vector& blob_names = this->net_->blob_names(); + for (int j = 0; j < blob_grads.size(); ++j) { + const string& blob_name = blob_names[j]; + bool grad_should_change = false; + if (blob_name == "innerproduct1" || + blob_name == "innerproduct1_innerproduct1_0_split_0" || + blob_name == "data_data_0_split_0" || blob_name == "data") { + grad_should_change = true; + } + ASSERT_EQ(blob_grads[j]->count(), blob_grads_midnet_loss_3[j]->count()); + ASSERT_EQ(blob_grads[j]->count(), blob_grads_loss_2[j]->count()); + for (int k = 0; k < blob_grads[j]->count(); ++k) { + const Dtype grad_diff_2 = blob_grads_loss_2[j]->cpu_diff()[k] - + blob_grads[j]->cpu_diff()[k]; + const Dtype grad_diff_3 = blob_grads_midnet_loss_3[j]->cpu_diff()[k] - + blob_grads[j]->cpu_diff()[k]; + if (grad_should_change) { + // Test non-triviality. + const Dtype kMinGradDiffAbsValue = 1e-4; + EXPECT_GT(fabs(grad_diff_2), kMinGradDiffAbsValue) << blob_name; + EXPECT_NEAR(2 * grad_diff_2, grad_diff_3, kErrorMargin) << blob_name; + } else { + EXPECT_EQ(0, grad_diff_2) << blob_name; + EXPECT_EQ(0, grad_diff_3) << blob_name; + } + } + } + + const Dtype kMinLossDiffAbsValue = 1e-4; + + Dtype loss_diff_2 = loss_main_2 - loss; + // Test non-triviality. + EXPECT_GT(fabs(loss_diff_2), kMinLossDiffAbsValue); + Dtype loss_diff_3 = loss_main_3 - loss; + EXPECT_NEAR(2 * loss_diff_2, loss_diff_3, kErrorMargin); + + loss_diff_2 = loss_midnet_2 - loss; + // Test non-triviality. + EXPECT_GT(fabs(loss_diff_2), kMinLossDiffAbsValue); + loss_diff_3 = loss_midnet_3 - loss; + EXPECT_NEAR(2 * loss_diff_2, loss_diff_3, kErrorMargin); +} + +TYPED_TEST(NetTest, TestBackwardWithAccuracyLayer) { + typedef typename TypeParam::Dtype Dtype; + const bool kForceBackward = false; + const bool kAccuracyLayer = true; + this->InitTinyNet(kForceBackward, kAccuracyLayer); + EXPECT_TRUE(this->net_->has_blob("accuracy")); + vector*> bottom; + // Test that we can do Backward even though we have an 'Accuracy' layer. + this->net_->ForwardBackward(bottom); +} + +TYPED_TEST(NetTest, TestUnsharedWeightsDataNet) { + typedef typename TypeParam::Dtype Dtype; + this->InitUnsharedWeightsNet(); + vector*> bottom; + Dtype loss; + this->net_->Forward(bottom, &loss); + EXPECT_GT(loss, 0); +} + +TYPED_TEST(NetTest, TestSharedWeightsDataNet) { + typedef typename TypeParam::Dtype Dtype; + this->InitSharedWeightsNet(); + vector*> bottom; + Dtype loss; + this->net_->Forward(bottom, &loss); + EXPECT_FLOAT_EQ(loss, 0); +} + +TYPED_TEST(NetTest, TestUnsharedWeightsDiffNet) { + typedef typename TypeParam::Dtype Dtype; + this->InitUnsharedWeightsNet(); + vector*> bottom; + Net* net = this->net_.get(); + net->Forward(bottom); + net->Backward(); + Layer* ip1_layer = net->layer_by_name("innerproduct1").get(); + Layer* ip2_layer = net->layer_by_name("innerproduct2").get(); + const int count = ip1_layer->blobs()[0]->count(); + const Dtype* grad1 = ip1_layer->blobs()[0]->cpu_diff(); + const Dtype* grad2 = ip2_layer->blobs()[0]->cpu_diff(); + for (int i = 0; i < count; ++i) { + EXPECT_GT(fabs(grad1[i]), 0); + EXPECT_FLOAT_EQ(-1 * grad1[i], grad2[i]); + } +} + +TYPED_TEST(NetTest, TestSharedWeightsDiffNet) { + typedef typename TypeParam::Dtype Dtype; + this->InitSharedWeightsNet(); + vector*> bottom; + Net* net = this->net_.get(); + Dtype loss; + net->Forward(bottom, &loss); + net->Backward(); + EXPECT_FLOAT_EQ(loss, 0); + Layer* ip1_layer = net->layer_by_name("innerproduct1").get(); + Layer* ip2_layer = net->layer_by_name("innerproduct2").get(); + const int count = ip1_layer->blobs()[0]->count(); + const Dtype* grad1 = ip1_layer->blobs()[0]->cpu_diff(); + const Dtype* grad2 = ip2_layer->blobs()[0]->cpu_diff(); + for (int i = 0; i < count; ++i) { + EXPECT_FLOAT_EQ(0, grad1[i]); + EXPECT_FLOAT_EQ(0, grad2[i]); + } +} + +TYPED_TEST(NetTest, TestSharedWeightsUpdate) { + typedef typename TypeParam::Dtype Dtype; + Caffe::set_random_seed(this->seed_); + this->InitDiffDataSharedWeightsNet(); + vector*> bottom; + EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1"); + EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); + Blob* ip1_weights = this->net_->layers()[1]->blobs()[0].get(); + Blob* ip2_weights = this->net_->layers()[2]->blobs()[0].get(); + // Check that data blobs of shared weights share the same location in memory. + EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); + // Check that diff blobs of shared weights are at different locations in + // memory. (The diffs should be accumulated at update time.) + EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); + this->net_->Forward(bottom); + this->net_->Backward(); + // Compute the expected update as the data minus the two diffs. + Blob shared_params; + const bool reshape = true; + const bool copy_diff = false; + shared_params.CopyFrom(*ip1_weights, copy_diff, reshape); + shared_params.CopyFrom(*ip1_weights, !copy_diff, reshape); + const int count = ip1_weights->count(); + // Make sure the diffs are non-trivial. + for (int i = 0; i < count; ++i) { + EXPECT_NE(0, ip1_weights->cpu_diff()[i]); + EXPECT_NE(0, ip2_weights->cpu_diff()[i]); + EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]); + } + caffe_axpy(count, Dtype(1), ip2_weights->cpu_diff(), + shared_params.mutable_cpu_diff()); + caffe_axpy(count, Dtype(-1), shared_params.cpu_diff(), + shared_params.mutable_cpu_data()); + const Dtype* expected_updated_params = shared_params.cpu_data(); + this->net_->Update(); + const Dtype* actual_updated_params = ip1_weights->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_EQ(expected_updated_params[i], actual_updated_params[i]); + } + // Check that data blobs of shared weights STILL point to the same memory + // location (because ... who knows). + EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); + + Caffe::set_random_seed(this->seed_); + this->InitDiffDataUnsharedWeightsNet(); + EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1"); + EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); + ip1_weights = this->net_->layers()[1]->blobs()[0].get(); + ip2_weights = this->net_->layers()[2]->blobs()[0].get(); + // Check that data and diff blobs of unshared weights are at different + // locations in memory. + EXPECT_NE(ip1_weights->cpu_data(), ip2_weights->cpu_data()); + EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); + this->net_->Forward(bottom); + this->net_->Backward(); + // Compute the expected update. + Blob unshared_params1; + unshared_params1.CopyFrom(*ip1_weights, copy_diff, reshape); + unshared_params1.CopyFrom(*ip1_weights, !copy_diff, reshape); + Blob unshared_params2; + unshared_params2.CopyFrom(*ip2_weights, copy_diff, reshape); + unshared_params2.CopyFrom(*ip2_weights, !copy_diff, reshape); + // Make sure the diffs are non-trivial and sum to the diff in the shared net. + for (int i = 0; i < count; ++i) { + EXPECT_NE(0, ip1_weights->cpu_diff()[i]); + EXPECT_NE(0, ip2_weights->cpu_diff()[i]); + EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]); + EXPECT_EQ(ip1_weights->cpu_diff()[i] + ip2_weights->cpu_diff()[i], + shared_params.cpu_diff()[i]); + } + caffe_axpy(count, Dtype(-1), ip1_weights->cpu_diff(), + unshared_params1.mutable_cpu_data()); + caffe_axpy(count, Dtype(-1), ip2_weights->cpu_diff(), + unshared_params2.mutable_cpu_data()); + const Dtype* expected_updated_params1 = unshared_params1.cpu_data(); + const Dtype* expected_updated_params2 = unshared_params2.cpu_data(); + this->net_->Update(); + const Dtype* actual_updated_params1 = ip1_weights->cpu_data(); + const Dtype* actual_updated_params2 = ip2_weights->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_EQ(expected_updated_params1[i], actual_updated_params1[i]); + EXPECT_EQ(expected_updated_params2[i], actual_updated_params2[i]); + EXPECT_NE(actual_updated_params1[i], actual_updated_params2[i]); + EXPECT_NE(expected_updated_params, expected_updated_params1); + } +} + +TYPED_TEST(NetTest, TestSharedWeightsResume) { + typedef typename TypeParam::Dtype Dtype; + + // Create a net with weight sharing; Update it once. + Caffe::set_random_seed(this->seed_); + this->InitDiffDataSharedWeightsNet(); + vector*> bottom; + EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1"); + EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); + Blob* ip1_weights = this->net_->layers()[1]->blobs()[0].get(); + Blob* ip2_weights = this->net_->layers()[2]->blobs()[0].get(); + // Check that data and diff blobs of shared weights share the same memory + // locations. + EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); + EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); + this->net_->ForwardBackward(bottom); + this->net_->Update(); + Blob shared_params; + const bool kReshape = true; + const bool kCopyDiff = false; + shared_params.CopyFrom(*ip1_weights, kCopyDiff, kReshape); + const int count = ip1_weights->count(); + + // Write the net to a NetParameter, as in Solver::Snapshot. + NetParameter net_param; + this->net_->ToProto(&net_param); + + // Reinitialize the net and copy parameters from net_param, as in + // Solver::Restore. + Caffe::set_random_seed(this->seed_); + this->InitDiffDataSharedWeightsNet(); + this->net_->CopyTrainedLayersFrom(net_param); + ip1_weights = this->net_->layers()[1]->blobs()[0].get(); + ip2_weights = this->net_->layers()[2]->blobs()[0].get(); + ASSERT_FALSE(NULL == ip1_weights); + ASSERT_FALSE(NULL == ip2_weights); + EXPECT_NE(ip1_weights, ip2_weights); + // Check that data and diff blobs of shared weights share the same memory + // locations. + EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); + EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); + for (int i = 0; i < count; ++i) { + EXPECT_FLOAT_EQ(shared_params.cpu_data()[i], ip1_weights->cpu_data()[i]); + } +} + +TYPED_TEST(NetTest, TestParamPropagateDown) { + typedef typename TypeParam::Dtype Dtype; + vector*> bottom; + const bool kBiasTerm = true, kForceBackward = false; + const Dtype* kLossWeight1 = NULL; + const Dtype* kLossWeight2 = NULL; + + // Run the net with all params learned; check that gradients are non-zero. + Caffe::set_random_seed(this->seed_); + Dtype blobs_lr_w1 = 1, blobs_lr_w2 = 1, blobs_lr_b1 = 2, blobs_lr_b2 = 2; + this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, + kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); + this->net_->Forward(bottom); + this->net_->Backward(); + const vector > >& params = this->net_->params(); + const int num_params = params.size(); + ASSERT_EQ(4, num_params); + const Dtype kNonZeroTestMin = 1e-3; + vector param_asums(params.size()); + for (int i = 0; i < num_params; ++i) { + const Dtype param_asum = + caffe_cpu_asum(params[i]->count(), params[i]->cpu_diff()); + param_asums[i] = param_asum; + EXPECT_GT(param_asum, kNonZeroTestMin); + } + + // Change the learning rates to different non-zero values; should see same + // gradients. + Caffe::set_random_seed(this->seed_); + blobs_lr_w1 *= 2, blobs_lr_w2 *= 2, blobs_lr_b1 *= 2, blobs_lr_b2 *= 2; + this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, + kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); + this->net_->Forward(bottom); + this->net_->Backward(); + const vector > >& params2 = this->net_->params(); + ASSERT_EQ(num_params, params2.size()); + for (int i = 0; i < num_params; ++i) { + const Dtype param_asum = + caffe_cpu_asum(params2[i]->count(), params2[i]->cpu_diff()); + EXPECT_FLOAT_EQ(param_asum, param_asums[i]); + } + + // Change a subset of the learning rates to zero; check that we see zero + // gradients for those. + Caffe::set_random_seed(this->seed_); + blobs_lr_w1 = 1, blobs_lr_w2 = 0, blobs_lr_b1 = 0, blobs_lr_b2 = 1; + this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, + kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); + this->net_->Forward(bottom); + this->net_->Backward(); + const vector > >& params3 = this->net_->params(); + ASSERT_EQ(num_params, params3.size()); + for (int i = 0; i < num_params; ++i) { + const Dtype param_asum = + caffe_cpu_asum(params3[i]->count(), params3[i]->cpu_diff()); + if (i == 1 || i == 2) { + EXPECT_FLOAT_EQ(0, param_asum); + } else { + EXPECT_FLOAT_EQ(param_asum, param_asums[i]); + } + } + + // Change the opposite subset of the learning rates to zero. + Caffe::set_random_seed(this->seed_); + blobs_lr_w1 = 0, blobs_lr_w2 = 1, blobs_lr_b1 = 1, blobs_lr_b2 = 0; + this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, + kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); + this->net_->Forward(bottom); + this->net_->Backward(); + const vector > >& params4 = this->net_->params(); + ASSERT_EQ(num_params, params4.size()); + for (int i = 0; i < num_params; ++i) { + const Dtype param_asum = + caffe_cpu_asum(params4[i]->count(), params4[i]->cpu_diff()); + if (i == 0 || i == 3) { + EXPECT_FLOAT_EQ(0, param_asum); + } else { + EXPECT_FLOAT_EQ(param_asum, param_asums[i]); + } + } +} + +TYPED_TEST(NetTest, TestFromTo) { + typedef typename TypeParam::Dtype Dtype; + this->InitTinyNet(); + + // Run Forward and Backward, recording the data diff and loss. + Blob data; + data.ReshapeLike(*this->net_->blob_by_name("data")); + this->net_->ForwardPrefilled(); + this->net_->Backward(); + data.CopyFrom(*this->net_->blob_by_name("data"), true, true); + const Dtype *loss_ptr = this->net_->output_blobs()[0]->cpu_data(); + Dtype loss = *loss_ptr; + + // Check that combining partial Forwards gives the same loss. + for (int i = 1; i < this->net_->layers().size(); ++i) { + // Note that we skip layer zero to keep the same data. + this->net_->ForwardFromTo(1, 1); + if (i < this->net_->layers().size() - 1) { + this->net_->ForwardFrom(i + 1); + } + EXPECT_EQ(loss, *loss_ptr); + } + + // Check that combining partial Backwards gives the same data diff. + for (int i = 1; i < this->net_->layers().size(); ++i) { + this->net_->BackwardTo(i); + this->net_->BackwardFrom(i - 1); + for (int j = 0; j < data.count(); ++j) { + EXPECT_EQ(data.cpu_diff()[j], + this->net_->blob_by_name("data")->cpu_diff()[j]); + } + } +} + +class FilterNetTest : public ::testing::Test { + protected: + void RunFilterNetTest( + const string& input_param_string, const string& filtered_param_string) { + NetParameter input_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + input_param_string, &input_param)); + NetParameter expected_filtered_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + filtered_param_string, &expected_filtered_param)); + NetParameter actual_filtered_param; + Net::FilterNet(input_param, &actual_filtered_param); + EXPECT_EQ(expected_filtered_param.DebugString(), + actual_filtered_param.DebugString()); + // Also test idempotence. + NetParameter double_filtered_param; + Net::FilterNet(actual_filtered_param, &double_filtered_param); + EXPECT_EQ(actual_filtered_param.DebugString(), + double_filtered_param.DebugString()); + } +}; + +TEST_F(FilterNetTest, TestNoFilter) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterLeNetTrainTest) { + const string& input_proto = + "name: 'LeNet' " + "layer { " + " name: 'mnist' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + " data_param { " + " source: 'mnist-train-leveldb' " + " batch_size: 64 " + " } " + " transform_param { " + " scale: 0.00390625 " + " } " + " include: { phase: TRAIN } " + "} " + "layer { " + " name: 'mnist' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + " data_param { " + " source: 'mnist-test-leveldb' " + " batch_size: 100 " + " } " + " transform_param { " + " scale: 0.00390625 " + " } " + " include: { phase: TEST } " + "} " + "layer { " + " name: 'conv1' " + " type: 'Convolution' " + " bottom: 'data' " + " top: 'conv1' " + " param { " + " lr_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " } " + " convolution_param { " + " num_output: 20 " + " kernel_size: 5 " + " stride: 1 " + " weight_filler { " + " type: 'xavier' " + " } " + " bias_filler { " + " type: 'constant' " + " } " + " } " + "} " + "layer { " + " name: 'ip1' " + " type: 'InnerProduct' " + " bottom: 'conv1' " + " top: 'ip1' " + " param { " + " lr_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " } " + " inner_product_param { " + " num_output: 10 " + " weight_filler { " + " type: 'xavier' " + " } " + " bias_filler { " + " type: 'constant' " + " } " + " } " + "} " + "layer { " + " name: 'accuracy' " + " type: 'Accuracy' " + " bottom: 'ip1' " + " bottom: 'label' " + " top: 'accuracy' " + " include: { phase: TEST } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'ip2' " + " bottom: 'label' " + " top: 'loss' " + "} "; + const string input_proto_train = "state: { phase: TRAIN } " + input_proto; + const string input_proto_test = "state: { phase: TEST } " + input_proto; + const string output_proto_train = + "name: 'LeNet' " + "layer { " + " name: 'mnist' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + " data_param { " + " source: 'mnist-train-leveldb' " + " batch_size: 64 " + " } " + " transform_param { " + " scale: 0.00390625 " + " } " + " include: { phase: TRAIN } " + "} " + "layer { " + " name: 'conv1' " + " type: 'Convolution' " + " bottom: 'data' " + " top: 'conv1' " + " param { " + " lr_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " } " + " convolution_param { " + " num_output: 20 " + " kernel_size: 5 " + " stride: 1 " + " weight_filler { " + " type: 'xavier' " + " } " + " bias_filler { " + " type: 'constant' " + " } " + " } " + "} " + "layer { " + " name: 'ip1' " + " type: 'InnerProduct' " + " bottom: 'conv1' " + " top: 'ip1' " + " param { " + " lr_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " } " + " inner_product_param { " + " num_output: 10 " + " weight_filler { " + " type: 'xavier' " + " } " + " bias_filler { " + " type: 'constant' " + " } " + " } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'ip2' " + " bottom: 'label' " + " top: 'loss' " + "} "; + const string& output_proto_test = + "name: 'LeNet' " + "layer { " + " name: 'mnist' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + " data_param { " + " source: 'mnist-test-leveldb' " + " batch_size: 100 " + " } " + " transform_param { " + " scale: 0.00390625 " + " } " + " include: { phase: TEST } " + "} " + "layer { " + " name: 'conv1' " + " type: 'Convolution' " + " bottom: 'data' " + " top: 'conv1' " + " param { " + " lr_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " } " + " convolution_param { " + " num_output: 20 " + " kernel_size: 5 " + " stride: 1 " + " weight_filler { " + " type: 'xavier' " + " } " + " bias_filler { " + " type: 'constant' " + " } " + " } " + "} " + "layer { " + " name: 'ip1' " + " type: 'InnerProduct' " + " bottom: 'conv1' " + " top: 'ip1' " + " param { " + " lr_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " } " + " inner_product_param { " + " num_output: 10 " + " weight_filler { " + " type: 'xavier' " + " } " + " bias_filler { " + " type: 'constant' " + " } " + " } " + "} " + "layer { " + " name: 'accuracy' " + " type: 'Accuracy' " + " bottom: 'ip1' " + " bottom: 'label' " + " top: 'accuracy' " + " include: { phase: TEST } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'ip2' " + " bottom: 'label' " + " top: 'loss' " + "} "; + const string output_proto_train_explicit = + output_proto_train + " state: { phase: TRAIN } "; + const string output_proto_test_explicit = + output_proto_test + " state: { phase: TEST } "; + this->RunFilterNetTest(input_proto_train, output_proto_train_explicit); + this->RunFilterNetTest(input_proto_test, output_proto_test_explicit); +} + +TEST_F(FilterNetTest, TestFilterOutByStage) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + " include: { stage: 'mystage' } " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, output_proto); +} + +TEST_F(FilterNetTest, TestFilterOutByStage2) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { stage: 'mystage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, output_proto); +} + +TEST_F(FilterNetTest, TestFilterInByStage) { + const string& input_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { stage: 'mystage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInByStage2) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " exclude: { stage: 'mystage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterOutByMultipleStage) { + const string& input_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { stage: 'mystage' stage: 'myotherstage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { stage: 'mystage' } " + "} "; + const string& output_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { stage: 'mystage' } " + "} "; + this->RunFilterNetTest(input_proto, output_proto); +} + +TEST_F(FilterNetTest, TestFilterInByMultipleStage) { + const string& input_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { stage: 'myotherstage' } " + " include: { stage: 'mystage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { stage: 'mystage' } " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInByMultipleStage2) { + const string& input_proto = + "state: { stage: 'mystage' stage: 'myotherstage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { stage: 'mystage' stage: 'myotherstage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { stage: 'mystage' } " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInByNotStage) { + const string& input_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { not_stage: 'myotherstage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { not_stage: 'myotherstage' } " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterOutByNotStage) { + const string& input_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { not_stage: 'mystage' } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { not_stage: 'mystage' } " + "} "; + const string& output_proto = + "state: { stage: 'mystage' } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} "; + this->RunFilterNetTest(input_proto, output_proto); +} + +TEST_F(FilterNetTest, TestFilterOutByMinLevel) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { min_level: 3 } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, output_proto); +} + +TEST_F(FilterNetTest, TestFilterOutByMaxLevel) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { max_level: -3 } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, output_proto); +} + +TEST_F(FilterNetTest, TestFilterInByMinLevel) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { min_level: 0 } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInByMinLevel2) { + const string& input_proto = + "state: { level: 7 } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { min_level: 3 } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInByMaxLevel) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { max_level: 0 } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInByMaxLevel2) { + const string& input_proto = + "state: { level: -7 } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { max_level: -3 } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunFilterNetTest(input_proto, input_proto); +} + +TEST_F(FilterNetTest, TestFilterInOutByIncludeMultiRule) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { min_level: 2 phase: TRAIN } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { min_level: 2 phase: TEST } " + "} "; + const string& input_proto_train = + "state: { level: 4 phase: TRAIN } " + input_proto; + const string& input_proto_test = + "state: { level: 4 phase: TEST } " + input_proto; + const string& output_proto_train = + "state: { level: 4 phase: TRAIN } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { min_level: 2 phase: TRAIN } " + "} "; + const string& output_proto_test = + "state: { level: 4 phase: TEST } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { min_level: 2 phase: TEST } " + "} "; + this->RunFilterNetTest(input_proto_train, output_proto_train); + this->RunFilterNetTest(input_proto_test, output_proto_test); +} + +TEST_F(FilterNetTest, TestFilterInByIncludeMultiRule) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " include: { min_level: 2 phase: TRAIN } " + " include: { phase: TEST } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " include: { min_level: 2 phase: TEST } " + " include: { phase: TRAIN } " + "} "; + const string& input_proto_train = + "state: { level: 2 phase: TRAIN } " + input_proto; + const string& input_proto_test = + "state: { level: 2 phase: TEST } " + input_proto; + this->RunFilterNetTest(input_proto_train, input_proto_train); + this->RunFilterNetTest(input_proto_test, input_proto_test); +} + +TEST_F(FilterNetTest, TestFilterInOutByExcludeMultiRule) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " exclude: { min_level: 2 phase: TRAIN } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " exclude: { min_level: 2 phase: TEST } " + "} "; + const string& input_proto_train = + "state: { level: 4 phase: TRAIN } " + input_proto; + const string& input_proto_test = + "state: { level: 4 phase: TEST } " + input_proto; + const string& output_proto_train = + "state: { level: 4 phase: TRAIN } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + " exclude: { min_level: 2 phase: TEST } " + "} "; + const string& output_proto_test = + "state: { level: 4 phase: TEST } " + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + " exclude: { min_level: 2 phase: TRAIN } " + "} "; + this->RunFilterNetTest(input_proto_train, output_proto_train); + this->RunFilterNetTest(input_proto_test, output_proto_test); +} + +TYPED_TEST(NetTest, TestReshape) { + typedef typename TypeParam::Dtype Dtype; + // We set up bottom blobs of two different sizes, switch between + // them, and check that forward and backward both run and the results + // are the same. + Caffe::set_random_seed(this->seed_); + Caffe::set_mode(Caffe::CPU); + FillerParameter filler_param; + filler_param.set_std(1); + GaussianFiller filler(filler_param); + Blob blob1(4, 3, 9, 11); + Blob blob2(2, 3, 12, 10); + filler.Fill(&blob1); + filler.Fill(&blob2); + + this->InitReshapableNet(); + Blob* input_blob = this->net_->input_blobs()[0]; + Blob* output_blob = this->net_->output_blobs()[0]; + input_blob->Reshape(blob1.num(), blob1.channels(), blob1.height(), + blob1.width()); + caffe_copy(blob1.count(), blob1.cpu_data(), input_blob->mutable_cpu_data()); + this->net_->ForwardPrefilled(); + // call backward just to make sure it runs + this->net_->Backward(); + Blob output1(output_blob->num(), output_blob->channels(), + output_blob->height(), output_blob->width()); + caffe_copy(output1.count(), output_blob->cpu_data(), + output1.mutable_cpu_data()); + + input_blob->Reshape(blob2.num(), blob2.channels(), blob2.height(), + blob2.width()); + caffe_copy(blob2.count(), blob2.cpu_data(), input_blob->mutable_cpu_data()); + this->net_->ForwardPrefilled(); + this->net_->Backward(); + Blob output2(output_blob->num(), output_blob->channels(), + output_blob->height(), output_blob->width()); + caffe_copy(output2.count(), output_blob->cpu_data(), + output2.mutable_cpu_data()); + + input_blob->Reshape(blob1.num(), blob1.channels(), blob1.height(), + blob1.width()); + caffe_copy(blob1.count(), blob1.cpu_data(), input_blob->mutable_cpu_data()); + this->net_->ForwardPrefilled(); + this->net_->Backward(); + for (int i = 0; i < output1.count(); ++i) { + CHECK_EQ(*(output1.cpu_data() + i), *(output_blob->cpu_data() + i)); + } + + input_blob->Reshape(blob2.num(), blob2.channels(), blob2.height(), + blob2.width()); + caffe_copy(blob2.count(), blob2.cpu_data(), input_blob->mutable_cpu_data()); + this->net_->ForwardPrefilled(); + this->net_->Backward(); + for (int i = 0; i < output2.count(); ++i) { + CHECK_EQ(*(output2.cpu_data() + i), *(output_blob->cpu_data() + i)); + } +} + +TYPED_TEST(NetTest, TestSkipPropagateDown) { + // check bottom_need_backward if propagate_down is true + this->InitSkipPropNet(false); + vector vec_layer_need_backward = this->net_->layer_need_backward(); + for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { + string layer_name = this->net_->layer_names()[layer_id]; + if (layer_name == "loss") { + // access to bottom_need_backward coresponding to label's blob + bool need_back = this->net_->bottom_need_backward()[layer_id][1]; + // if propagate_down is true, the loss layer will try to + // backpropagate on labels + EXPECT_TRUE(need_back) << "bottom_need_backward should be True"; + } + // layer_need_backward should be True except for data and silence layers + if (layer_name.find("data") != std::string::npos || + layer_name == "silence") { + EXPECT_FALSE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be False"; + } else { + EXPECT_TRUE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be True"; + } + } + // check bottom_need_backward if propagat_down is false + this->InitSkipPropNet(true); + vec_layer_need_backward.clear(); + vec_layer_need_backward = this->net_->layer_need_backward(); + for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { + string layer_name = this->net_->layer_names()[layer_id]; + if (layer_name == "loss") { + // access to bottom_need_backward coresponding to label's blob + bool need_back = this->net_->bottom_need_backward()[layer_id][1]; + // if propagate_down is false, the loss layer will not try to + // backpropagate on labels + EXPECT_FALSE(need_back) << "bottom_need_backward should be False"; + } + // layer_need_backward should be False except for innerproduct and + // loss layers + if (layer_name == "innerproduct" || layer_name == "loss") { + EXPECT_TRUE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be True"; + } else { + EXPECT_FALSE(vec_layer_need_backward[layer_id]) + << "layer_need_backward for " << layer_name << " should be False"; + } + } +} + +} // namespace caffe From 2abe7745bb6828aa746d05db38b34c78aacb5232 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Thu, 20 Aug 2015 07:24:59 +0800 Subject: [PATCH 78/82] new data and new feature dimension --- .../lenet_train_test_BACKUP_8918.prototxt | 180 ++++++++ .../mnist/lenet_train_test_BASE_8918.prototxt | 181 ++++++++ .../lenet_train_test_LOCAL_8918.prototxt | 168 +++++++ .../lenet_train_test_REMOTE_8918.prototxt | 181 ++++++++ examples/siamese/mnist_siamese.prototxt.orig | 11 + .../mnist_siamese_solver.prototxt.orig | 12 + examples/triplet/convert_3d_triplet_data.cpp | 6 +- .../convert_3d_triplet_data.cpp.orig.orig | 163 +++---- ...convert_3d_triplet_data.cpp.orig.orig.orig | 431 ++++++++++++++++++ examples/triplet/create_3d_triplet.sh | 9 +- examples/triplet/create_3d_triplet.sh.orig | 18 +- .../triplet/create_3d_triplet.sh.orig.orig | 24 +- src/caffe/data_transformer.cpp | 2 + src/caffe/test/test_net.cpp | 18 +- 14 files changed, 1252 insertions(+), 152 deletions(-) create mode 100644 examples/mnist/lenet_train_test_BACKUP_8918.prototxt create mode 100644 examples/mnist/lenet_train_test_BASE_8918.prototxt create mode 100644 examples/mnist/lenet_train_test_LOCAL_8918.prototxt create mode 100644 examples/mnist/lenet_train_test_REMOTE_8918.prototxt create mode 100644 examples/triplet/convert_3d_triplet_data.cpp.orig.orig.orig diff --git a/examples/mnist/lenet_train_test_BACKUP_8918.prototxt b/examples/mnist/lenet_train_test_BACKUP_8918.prototxt new file mode 100644 index 00000000000..2ab137326bc --- /dev/null +++ b/examples/mnist/lenet_train_test_BACKUP_8918.prototxt @@ -0,0 +1,180 @@ +name: "LeNet" +layer { + name: "mnist" + type: "Data" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/mnist/mnist_train_lmdb" + batch_size: 64 + backend: LMDB + } +} +layer { + name: "mnist" + type: "Data" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/mnist/mnist_test_lmdb" + batch_size: 100 + backend: LMDB + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21:examples/mnist/lenet_train_test.prototxt + num_output: 10 +======= + num_output: 16 +>>>>>>> new data and new feature dimension:examples/triplet/3d_triplet_train_test.prototxt + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "accuracy" + type: "Accuracy" + bottom: "ip2" + bottom: "label" + top: "accuracy" + include { + phase: TEST + } +} +layer { + name: "loss" + type: "SoftmaxWithLoss" + bottom: "ip2" + bottom: "label" + top: "loss" +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21:examples/mnist/lenet_train_test.prototxt +======= + triplet_loss_param { + margin: 0.01 + losstype: 1 + num_triplets: 3 + } +>>>>>>> new data and new feature dimension:examples/triplet/3d_triplet_train_test.prototxt +} diff --git a/examples/mnist/lenet_train_test_BASE_8918.prototxt b/examples/mnist/lenet_train_test_BASE_8918.prototxt new file mode 100644 index 00000000000..4c766ebe0e4 --- /dev/null +++ b/examples/mnist/lenet_train_test_BASE_8918.prototxt @@ -0,0 +1,181 @@ +name: "3d_triplet_train_test" +layer { + name: "data" + type: "Data" + top: "data" + top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/3d_triplet_train_leveldb" + batch_size: 250 + } +} +layer { + name: "data" + type: "Data" + top: "data" + top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/3d_triplet_test_leveldb" + batch_size: 250 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 16 + kernel_size: 8 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "pool1" + top: "pool1" +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 7 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu2" + type: "ReLU" + bottom: "pool2" + top: "pool2" +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 256 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu3" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip1" + top: "feat" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 4 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "loss" + type: "TripletLoss" + bottom: "feat" + bottom: "sim" + top: "loss" + triplet_loss_param { + margin: 0.02 + losstype: 1 + num_triplets: 3 + } +} diff --git a/examples/mnist/lenet_train_test_LOCAL_8918.prototxt b/examples/mnist/lenet_train_test_LOCAL_8918.prototxt new file mode 100644 index 00000000000..b18fc26cfd8 --- /dev/null +++ b/examples/mnist/lenet_train_test_LOCAL_8918.prototxt @@ -0,0 +1,168 @@ +name: "LeNet" +layer { + name: "mnist" + type: "Data" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/mnist/mnist_train_lmdb" + batch_size: 64 + backend: LMDB + } +} +layer { + name: "mnist" + type: "Data" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/mnist/mnist_test_lmdb" + batch_size: 100 + backend: LMDB + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } + param { + lr_mult: 2 + } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "accuracy" + type: "Accuracy" + bottom: "ip2" + bottom: "label" + top: "accuracy" + include { + phase: TEST + } +} +layer { + name: "loss" + type: "SoftmaxWithLoss" + bottom: "ip2" + bottom: "label" + top: "loss" +} diff --git a/examples/mnist/lenet_train_test_REMOTE_8918.prototxt b/examples/mnist/lenet_train_test_REMOTE_8918.prototxt new file mode 100644 index 00000000000..52398c3876d --- /dev/null +++ b/examples/mnist/lenet_train_test_REMOTE_8918.prototxt @@ -0,0 +1,181 @@ +name: "3d_triplet_train_test" +layer { + name: "data" + type: "Data" + top: "data" + top: "sim" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/3d_triplet_train_leveldb" + batch_size: 250 + } +} +layer { + name: "data" + type: "Data" + top: "data" + top: "sim" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/triplet/3d_triplet_test_leveldb" + batch_size: 250 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + name: "conv1_w" + lr_mult: 1 + } + param { + name: "conv1_b" + lr_mult: 2 + } + convolution_param { + num_output: 16 + kernel_size: 8 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "pool1" + top: "pool1" +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + name: "conv2_w" + lr_mult: 1 + } + param { + name: "conv2_b" + lr_mult: 2 + } + convolution_param { + num_output: 7 + kernel_size: 5 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "relu2" + type: "ReLU" + bottom: "pool2" + top: "pool2" +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + name: "ip1_w" + lr_mult: 1 + } + param { + name: "ip1_b" + lr_mult: 2 + } + inner_product_param { + num_output: 256 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "relu3" + type: "ReLU" + bottom: "ip1" + top: "ip1" +} +layer { + name: "feat" + type: "InnerProduct" + bottom: "ip1" + top: "feat" + param { + name: "feat_w" + lr_mult: 1 + } + param { + name: "feat_b" + lr_mult: 2 + } + inner_product_param { + num_output: 16 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + } + } +} +layer { + name: "loss" + type: "TripletLoss" + bottom: "feat" + bottom: "sim" + top: "loss" + triplet_loss_param { + margin: 0.01 + losstype: 1 + num_triplets: 3 + } +} diff --git a/examples/siamese/mnist_siamese.prototxt.orig b/examples/siamese/mnist_siamese.prototxt.orig index 332731bd75f..e496b6716b4 100644 --- a/examples/siamese/mnist_siamese.prototxt.orig +++ b/examples/siamese/mnist_siamese.prototxt.orig @@ -1,11 +1,18 @@ name: "mnist_siamese" input: "data" +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21:examples/siamese/mnist_siamese.prototxt input_shape { dim: 10000 dim: 1 dim: 28 dim: 28 } +======= +input_dim: 6480 +input_dim: 1 +input_dim: 64 +input_dim: 64 +>>>>>>> new data and new feature dimension:examples/triplet/3d_triplet.prototxt layer { name: "conv1" type: "Convolution" @@ -110,6 +117,10 @@ layer { lr_mult: 2 } inner_product_param { +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21:examples/siamese/mnist_siamese.prototxt num_output: 2 +======= + num_output: 16 +>>>>>>> new data and new feature dimension:examples/triplet/3d_triplet.prototxt } } diff --git a/examples/siamese/mnist_siamese_solver.prototxt.orig b/examples/siamese/mnist_siamese_solver.prototxt.orig index d4d994d1389..008115e5ff2 100644 --- a/examples/siamese/mnist_siamese_solver.prototxt.orig +++ b/examples/siamese/mnist_siamese_solver.prototxt.orig @@ -1,8 +1,13 @@ # The train/test net protocol buffer definition net: "examples/siamese/mnist_siamese_train_test.prototxt" # test_iter specifies how many forward passes the test should carry out. +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21:examples/siamese/mnist_siamese_solver.prototxt # In the case of MNIST, we have test batch size 100 and 100 test iterations, # covering the full 10,000 testing images. +======= +# In the case of 3d database, we have test batch size 250 and 250 test iterations: 50*(2+3)=250, +# covering the full 6480 testing images:162*4*10=6480. +>>>>>>> new data and new feature dimension:examples/triplet/3d_triplet_solver.prototxt test_iter: 100 # Carry out testing every 500 training iterations. test_interval: 500 @@ -17,9 +22,16 @@ power: 0.75 # Display every 100 iterations display: 100 # The maximum number of iterations +<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21:examples/siamese/mnist_siamese_solver.prototxt max_iter: 50000 # snapshot intermediate results snapshot: 5000 snapshot_prefix: "examples/siamese/mnist_siamese" +======= +max_iter: 40000 +# snapshot intermediate results +snapshot: 5000 +snapshot_prefix: "examples/triplet/3d_triplet" +>>>>>>> new data and new feature dimension:examples/triplet/3d_triplet_solver.prototxt # solver mode: CPU or GPU solver_mode: GPU diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp index aa61a164fa9..3a30d3fb082 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp +++ b/examples/triplet/convert_3d_triplet_data.cpp @@ -88,7 +88,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; int counter = 0; - for (unsigned int times = 0; times < 5; ++times) { + for (unsigned int times = 0; times < 10; ++times) { // iteration in the samples of all class for (unsigned int itemid = 0; itemid < num_items/class_num; ++itemid) { // iteration in the samples in one class @@ -132,7 +132,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, float dist_ij = std::sqrt(ij_x + ij_y + ij_z); float dist_im = std::sqrt(im_x + im_y + im_z); - if (*label_i == *label_j && dist_ij < 100/2 && dist_ij != 0) + if (*label_i == *label_j && dist_ij < 100/3 && dist_ij != 0) pair_pass = true; if (pair_pass && (*label_i != *label_k)) triplet1_pass = true; @@ -140,7 +140,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, triplet2_pass = true; if (pair_pass && (*label_i == *label_m)) triplet3_class_same = true; - if (triplet3_class_same && dist_im > 100*sqrt(2)) + if (triplet3_class_same && dist_im > 100/3 && dist_im < 100) triplet3_pass = true; if (pair_pass && triplet1_pass && triplet2_pass && triplet3_pass) { datum.set_data(pixels1, rows*cols); // set data diff --git a/examples/triplet/convert_3d_triplet_data.cpp.orig.orig b/examples/triplet/convert_3d_triplet_data.cpp.orig.orig index cb8ab886ab6..7b6979f39c3 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp.orig.orig +++ b/examples/triplet/convert_3d_triplet_data.cpp.orig.orig @@ -1,4 +1,3 @@ -<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 // Usage: // convert_3d_data input_image_file input_label_file output_db_file #include // NOLINT(readability/streams) @@ -11,25 +10,6 @@ #include "math.h" #include "stdint.h" -======= -// This script converts the MNIST dataset to the leveldb format used -// by caffe to train siamese network. -// Usage: -// convert_mnist_data input_image_file input_label_file output_db_file -// The MNIST dataset could be downloaded at -// http://yann.lecun.com/exdb/mnist/ -#include // NOLINT(readability/streams) -#include - -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "stdint.h" - -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" - ->>>>>>> add 3d network training param uint32_t swap_endian(uint32_t val) { val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); return (val << 16) | (val >> 16); @@ -37,7 +17,7 @@ uint32_t swap_endian(uint32_t val) { void read_image(std::ifstream* image_file, std::ifstream* label_file, uint32_t index, uint32_t rows, uint32_t cols, -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 char* pixels, char* label_temp, signed char* label) { image_file->seekg(index * rows * cols + 16); image_file->read(pixels, rows * cols); @@ -45,8 +25,11 @@ void read_image(std::ifstream* image_file, std::ifstream* label_file, label_file->read(label_temp, 4); for (int i = 0; i < 4; i++) *(label+i) = (signed char)*(label_temp+i); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename, const char* class_number) { ======= -<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 char* pixels, char* label_temp, signed char* label, int rgb_use) { if (rgb_use == 0) { image_file->seekg(index * rows * cols + 16); @@ -63,23 +46,14 @@ void read_image(std::ifstream* image_file, std::ifstream* label_file, for (int i = 0; i < 4; i++) *(label+i) = (signed char)*(label_temp+i); } ->>>>>>> add 3d network training param } void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename, const char* class_number) { + const char* db_filename, const char* class_number, + const char* rgb_use) { + int rgb_use1 = atoi(rgb_use); +>>>>>>> new data and new feature dimension int class_num = atoi(class_number); -======= - char* pixels, char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index + 8); - label_file->read(label, 1); -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename) { ->>>>>>> add 3d network training param // Open files std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); @@ -97,11 +71,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, CHECK_EQ(magic, 2051) << "Incorrect image file magic."; label_file.read(reinterpret_cast(&magic), 4); magic = swap_endian(magic); -<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 CHECK_EQ(magic, 2050) << "Incorrect label file magic."; -======= - CHECK_EQ(magic, 2049) << "Incorrect label file magic."; ->>>>>>> add 3d network training param image_file.read(reinterpret_cast(&num_items), 4); num_items = swap_endian(num_items); label_file.read(reinterpret_cast(&num_labels), 4); @@ -122,44 +92,45 @@ void convert_dataset(const char* image_filename, const char* label_filename, CHECK(status.ok()) << "Failed to open leveldb " << db_filename << ". Is it already existing?"; -<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 char* label_temp = new char[4]; // label for unsigned char* signed char* label_i = new signed char[4]; // label for triplet signed char* label_j = new signed char[4]; signed char* label_k = new signed char[4]; signed char* label_l = new signed char[4]; // label for pair wise signed char* label_m = new signed char[4]; +<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 char* pixels1 = new char[rows * cols]; char* pixels2 = new char[rows * cols]; char* pixels3 = new char[rows * cols]; char* pixels4 = new char[rows * cols]; char* pixels5 = new char[rows * cols]; - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - caffe::Datum datum; - datum.set_channels(1); ======= - char label_i; // label for triplet - char label_j; - char label_k; - char label_l; // label for pair wise - char label_m; - char* pixels = new char[5 * rows * cols]; + int db_size; + if (rgb_use1 == 0) + db_size = rows * cols; + else + db_size = 3 * rows * cols; + char* pixels1 = new char[db_size]; + char* pixels2 = new char[db_size]; + char* pixels3 = new char[db_size]; + char* pixels4 = new char[db_size]; + char* pixels5 = new char[db_size]; +>>>>>>> new data and new feature dimension const int kMaxKeyLength = 10; char key[kMaxKeyLength]; std::string value; - caffe::Datum datum; - datum.set_channels(5); // one channel for each image in the triplet and pair ->>>>>>> add 3d network training param + datum.set_channels(1); datum.set_height(rows); datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; -<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 int counter = 0; +<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 for (unsigned int times = 0; times < 5; ++times) { +======= + for (unsigned int times = 0; times < 10; ++times) { +>>>>>>> new data and new feature dimension // iteration in the samples of all class for (unsigned int itemid = 0; itemid < num_items/class_num; ++itemid) { // iteration in the samples in one class @@ -171,6 +142,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups int m = caffe::caffe_rng_rand() % num_items; read_image(&image_file, &label_file, i, rows, cols, // read triplet +<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 pixels1, label_temp, label_i); read_image(&image_file, &label_file, j, rows, cols, pixels2, label_temp, label_j); @@ -180,6 +152,17 @@ void convert_dataset(const char* image_filename, const char* label_filename, pixels4, label_temp, label_l); read_image(&image_file, &label_file, m, rows, cols, pixels5, label_temp, label_m); +======= + pixels1, label_temp, label_i, rgb_use1); + read_image(&image_file, &label_file, j, rows, cols, + pixels2, label_temp, label_j, rgb_use1); + read_image(&image_file, &label_file, k, rows, cols, + pixels3, label_temp, label_k, rgb_use1); + read_image(&image_file, &label_file, l, rows, cols, // read pair wise + pixels4, label_temp, label_l, rgb_use1); + read_image(&image_file, &label_file, m, rows, cols, + pixels5, label_temp, label_m, rgb_use1); +>>>>>>> new data and new feature dimension bool pair_pass = false; bool triplet1_pass = false; @@ -203,7 +186,11 @@ void convert_dataset(const char* image_filename, const char* label_filename, float dist_ij = std::sqrt(ij_x + ij_y + ij_z); float dist_im = std::sqrt(im_x + im_y + im_z); +<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 if (*label_i == *label_j && dist_ij < 100/2) +======= + if (*label_i == *label_j && dist_ij < 100/3 && dist_ij != 0) +>>>>>>> new data and new feature dimension pair_pass = true; if (pair_pass && (*label_i != *label_k)) triplet1_pass = true; @@ -211,7 +198,11 @@ void convert_dataset(const char* image_filename, const char* label_filename, triplet2_pass = true; if (pair_pass && (*label_i == *label_m)) triplet3_class_same = true; +<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 if (triplet3_class_same && dist_im > 100*sqrt(2)) +======= + if (triplet3_class_same && dist_im > 100/3 && dist_im < 100) +>>>>>>> new data and new feature dimension triplet3_pass = true; if (pair_pass && triplet1_pass && triplet2_pass && triplet3_pass) { datum.set_data(pixels1, rows*cols); // set data @@ -259,76 +250,26 @@ void convert_dataset(const char* image_filename, const char* label_filename, } int main(int argc, char** argv) { -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 if (argc != 5) { - printf("This script converts the images dataset to the leveldb format used\n" ======= if (argc != 6) { +>>>>>>> new data and new feature dimension printf("This script converts the dataset to the leveldb format used\n" ->>>>>>> add 3d network training param "by caffe to train a triplet network.\n" "Usage:\n" " convert_3d_data input_image_file input_label_file " +<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 "output_db_file class_number\n"); } else { google::InitGoogleLogging(argv[0]); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f convert_dataset(argv[1], argv[2], argv[3], argv[4]); ======= - convert_dataset(argv[1], argv[2], argv[3], argv[4], argv[5]); -======= - for (int itemid = 0; itemid < num_items; ++itemid) { - // pick triplet groups - int i = caffe::caffe_rng_rand() % num_items; - int j = caffe::caffe_rng_rand() % num_items; - int k = caffe::caffe_rng_rand() % num_items; - // pick pair wise groups - int l = caffe::caffe_rng_rand() % num_items; - int m = caffe::caffe_rng_rand() % num_items; - // read triplet groups - read_image(&image_file, &label_file, i, rows, cols, - pixels, &label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), &label_j); - read_image(&image_file, &label_file, k, rows, cols, - pixels + (2 * rows * cols), &label_k); - // read pair wise groups - read_image(&image_file, &label_file, l, rows, cols, - pixels + (3 * rows * cols), &label_l); - read_image(&image_file, &label_file, m, rows, cols, - pixels + (4 * rows * cols), &label_m); - - datum.set_data(pixels, 5*rows*cols); // set data - if ((label_i == label_j && label_i != label_k) && (label_l == label_m)) { - datum.set_label(1); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); - db->Put(leveldb::WriteOptions(), std::string(key), value); - } else { - itemid--; - datum.set_label(0); - } - } - - delete db; - delete pixels; -} - -int main(int argc, char** argv) { - if (argc != 4) { - printf("This script converts the MNIST dataset to the leveldb format used\n" - "by caffe to train a siamese network.\n" - "Usage:\n" - " convert_mnist_data input_image_file input_label_file " - "output_db_file\n" - "The MNIST dataset could be downloaded at\n" - " http://yann.lecun.com/exdb/mnist/\n" - "You should gunzip them after downloading.\n"); + "output_db_file class_number rgb_use \n"); } else { google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3]); ->>>>>>> add 3d network training param ->>>>>>> add 3d network training param + convert_dataset(argv[1], argv[2], argv[3], argv[4], argv[5]); +>>>>>>> new data and new feature dimension } return 0; } diff --git a/examples/triplet/convert_3d_triplet_data.cpp.orig.orig.orig b/examples/triplet/convert_3d_triplet_data.cpp.orig.orig.orig new file mode 100644 index 00000000000..1c27853a000 --- /dev/null +++ b/examples/triplet/convert_3d_triplet_data.cpp.orig.orig.orig @@ -0,0 +1,431 @@ +<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 +// Usage: +// convert_3d_data input_image_file input_label_file output_db_file +#include // NOLINT(readability/streams) +#include +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "math.h" +#include "stdint.h" + +======= +// This script converts the MNIST dataset to the leveldb format used +// by caffe to train siamese network. +// Usage: +// convert_mnist_data input_image_file input_label_file output_db_file +// The MNIST dataset could be downloaded at +// http://yann.lecun.com/exdb/mnist/ +#include // NOLINT(readability/streams) +#include + +#include "glog/logging.h" +#include "google/protobuf/text_format.h" +#include "leveldb/db.h" +#include "stdint.h" + +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" + +>>>>>>> add 3d network training param +uint32_t swap_endian(uint32_t val) { + val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); + return (val << 16) | (val >> 16); +} + +void read_image(std::ifstream* image_file, std::ifstream* label_file, + uint32_t index, uint32_t rows, uint32_t cols, +<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f +======= +<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 +>>>>>>> new data and new feature dimension + char* pixels, char* label_temp, signed char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); +<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 +======= +<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 +======= +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename, const char* class_number) { +======= +>>>>>>> new data and new feature dimension + char* pixels, char* label_temp, signed char* label, int rgb_use) { + if (rgb_use == 0) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); + } else { + image_file->seekg(3 * index * rows * cols + 16); + image_file->read(pixels, 3 * rows * cols); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); + } +>>>>>>> add 3d network training param +} + +void convert_dataset(const char* image_filename, const char* label_filename, +<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 + const char* db_filename, const char* class_number) { +======= + const char* db_filename, const char* class_number, + const char* rgb_use) { + int rgb_use1 = atoi(rgb_use); +>>>>>>> new data and new feature dimension +>>>>>>> new data and new feature dimension + int class_num = atoi(class_number); +======= + char* pixels, char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index + 8); + label_file->read(label, 1); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename) { +>>>>>>> add 3d network training param + // Open files + std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); + std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); + CHECK(image_file) << "Unable to open file " << image_filename; + CHECK(label_file) << "Unable to open file " << label_filename; + // Read the magic and the meta data + uint32_t magic; + uint32_t num_items; + uint32_t num_labels; + uint32_t rows; + uint32_t cols; + + image_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); + CHECK_EQ(magic, 2051) << "Incorrect image file magic."; + label_file.read(reinterpret_cast(&magic), 4); + magic = swap_endian(magic); +<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 + CHECK_EQ(magic, 2050) << "Incorrect label file magic."; +======= + CHECK_EQ(magic, 2049) << "Incorrect label file magic."; +>>>>>>> add 3d network training param + image_file.read(reinterpret_cast(&num_items), 4); + num_items = swap_endian(num_items); + label_file.read(reinterpret_cast(&num_labels), 4); + num_labels = swap_endian(num_labels); + CHECK_EQ(num_items, num_labels); + image_file.read(reinterpret_cast(&rows), 4); + rows = swap_endian(rows); + image_file.read(reinterpret_cast(&cols), 4); + cols = swap_endian(cols); + + // Open leveldb + leveldb::DB* db; + leveldb::Options options; + options.create_if_missing = true; + options.error_if_exists = true; + leveldb::Status status = leveldb::DB::Open( + options, db_filename, &db); + CHECK(status.ok()) << "Failed to open leveldb " << db_filename + << ". Is it already existing?"; + +<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 + char* label_temp = new char[4]; // label for unsigned char* + signed char* label_i = new signed char[4]; // label for triplet + signed char* label_j = new signed char[4]; + signed char* label_k = new signed char[4]; + signed char* label_l = new signed char[4]; // label for pair wise + signed char* label_m = new signed char[4]; +<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 +======= +<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 +>>>>>>> new data and new feature dimension + char* pixels1 = new char[rows * cols]; + char* pixels2 = new char[rows * cols]; + char* pixels3 = new char[rows * cols]; + char* pixels4 = new char[rows * cols]; + char* pixels5 = new char[rows * cols]; +<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 +======= +======= + int db_size; + if (rgb_use1 == 0) + db_size = rows * cols; + else + db_size = 3 * rows * cols; + char* pixels1 = new char[db_size]; + char* pixels2 = new char[db_size]; + char* pixels3 = new char[db_size]; + char* pixels4 = new char[db_size]; + char* pixels5 = new char[db_size]; +>>>>>>> new data and new feature dimension +>>>>>>> new data and new feature dimension + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + caffe::Datum datum; + datum.set_channels(1); +======= + char label_i; // label for triplet + char label_j; + char label_k; + char label_l; // label for pair wise + char label_m; + char* pixels = new char[5 * rows * cols]; + const int kMaxKeyLength = 10; + char key[kMaxKeyLength]; + std::string value; + + caffe::Datum datum; + datum.set_channels(5); // one channel for each image in the triplet and pair +>>>>>>> add 3d network training param + datum.set_height(rows); + datum.set_width(cols); + LOG(INFO) << "A total of " << num_items << " items."; + LOG(INFO) << "Rows: " << rows << " Cols: " << cols; +<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 +<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 +======= +>>>>>>> new data and new feature dimension + int counter = 0; +<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 + for (unsigned int times = 0; times < 5; ++times) { +======= + for (unsigned int times = 0; times < 10; ++times) { +>>>>>>> new data and new feature dimension + // iteration in the samples of all class + for (unsigned int itemid = 0; itemid < num_items/class_num; ++itemid) { + // iteration in the samples in one class + for (unsigned int class_ind = 0; class_ind < class_num; ++class_ind) { + // use reference sample one by one at each iteration + int i = itemid % num_items + class_ind*num_items/class_num; + int j = caffe::caffe_rng_rand() % num_items; // pick triplet groups + int k = caffe::caffe_rng_rand() % num_items; + int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups + int m = caffe::caffe_rng_rand() % num_items; + read_image(&image_file, &label_file, i, rows, cols, // read triplet +<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 + pixels1, label_temp, label_i); +======= +<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 + pixels1, label_temp, label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels2, label_temp, label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels3, label_temp, label_k); + read_image(&image_file, &label_file, l, rows, cols, // read pair wise + pixels4, label_temp, label_l); + read_image(&image_file, &label_file, m, rows, cols, + pixels5, label_temp, label_m); +======= + pixels1, label_temp, label_i, rgb_use1); +>>>>>>> new data and new feature dimension + read_image(&image_file, &label_file, j, rows, cols, + pixels2, label_temp, label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels3, label_temp, label_k); + read_image(&image_file, &label_file, l, rows, cols, // read pair wise + pixels4, label_temp, label_l); + read_image(&image_file, &label_file, m, rows, cols, +<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 + pixels5, label_temp, label_m); +======= + pixels5, label_temp, label_m, rgb_use1); +>>>>>>> new data and new feature dimension +>>>>>>> new data and new feature dimension + + bool pair_pass = false; + bool triplet1_pass = false; + bool triplet2_pass = false; + bool triplet3_class_same = false; + bool triplet3_pass = false; + + int ij_diff_x = static_cast(*(label_i+1)-*(label_j+1)); + int ij_diff_y = static_cast(*(label_i+2)-*(label_j+2)); + int ij_diff_z = static_cast(*(label_i+3)-*(label_j+3)); + int im_diff_x = static_cast(*(label_i+1)-*(label_m+1)); + int im_diff_y = static_cast(*(label_i+2)-*(label_m+2)); + int im_diff_z = static_cast(*(label_i+3)-*(label_m+3)); + + int ij_x = ij_diff_x*ij_diff_x; + int ij_y = ij_diff_y*ij_diff_y; + int ij_z = ij_diff_z*ij_diff_z; + int im_x = im_diff_x*im_diff_x; + int im_y = im_diff_y*im_diff_y; + int im_z = im_diff_z*im_diff_z; + + float dist_ij = std::sqrt(ij_x + ij_y + ij_z); + float dist_im = std::sqrt(im_x + im_y + im_z); +<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 + if (*label_i == *label_j && dist_ij < 100/2) +======= +<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 + if (*label_i == *label_j && dist_ij < 100/2) +======= + if (*label_i == *label_j && dist_ij < 100/3 && dist_ij != 0) +>>>>>>> new data and new feature dimension +>>>>>>> new data and new feature dimension + pair_pass = true; + if (pair_pass && (*label_i != *label_k)) + triplet1_pass = true; + if (pair_pass && (*label_i != *label_l)) + triplet2_pass = true; + if (pair_pass && (*label_i == *label_m)) + triplet3_class_same = true; +<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 + if (triplet3_class_same && dist_im > 100*sqrt(2)) +======= + if (triplet3_class_same && dist_im > 100/3 && dist_im < 100) +>>>>>>> new data and new feature dimension + triplet3_pass = true; + if (pair_pass && triplet1_pass && triplet2_pass && triplet3_pass) { + datum.set_data(pixels1, rows*cols); // set data + datum.set_label(static_cast(*label_i)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels2, rows*cols); // set data + datum.set_label(static_cast(*label_j)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels3, rows*cols); // set data + datum.set_label(static_cast(*label_k)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels4, rows*cols); // set data + datum.set_label(static_cast(*label_l)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + datum.set_data(pixels5, rows*cols); // set data + datum.set_label(static_cast(*label_m)); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", counter); + db->Put(leveldb::WriteOptions(), std::string(key), value); + counter++; + } else { + class_ind--; + } + } // iteration in the samples of all class + } // iteration in the samples in one class + } // iteration in times + delete db; + delete pixels1; + delete pixels2; + delete pixels3; + delete pixels4; + delete pixels5; +} + +int main(int argc, char** argv) { +<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f + if (argc != 5) { + printf("This script converts the images dataset to the leveldb format used\n" +======= +<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 + if (argc != 5) { +>>>>>>> new data and new feature dimension +======= + if (argc != 6) { +>>>>>>> new data and new feature dimension + printf("This script converts the dataset to the leveldb format used\n" +>>>>>>> add 3d network training param + "by caffe to train a triplet network.\n" + "Usage:\n" + " convert_3d_data input_image_file input_label_file " +<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 + "output_db_file class_number\n"); + } else { + google::InitGoogleLogging(argv[0]); +<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f + convert_dataset(argv[1], argv[2], argv[3], argv[4]); +======= + convert_dataset(argv[1], argv[2], argv[3], argv[4], argv[5]); +======= + for (int itemid = 0; itemid < num_items; ++itemid) { + // pick triplet groups + int i = caffe::caffe_rng_rand() % num_items; + int j = caffe::caffe_rng_rand() % num_items; + int k = caffe::caffe_rng_rand() % num_items; + // pick pair wise groups + int l = caffe::caffe_rng_rand() % num_items; + int m = caffe::caffe_rng_rand() % num_items; + // read triplet groups + read_image(&image_file, &label_file, i, rows, cols, + pixels, &label_i); + read_image(&image_file, &label_file, j, rows, cols, + pixels + (rows * cols), &label_j); + read_image(&image_file, &label_file, k, rows, cols, + pixels + (2 * rows * cols), &label_k); + // read pair wise groups + read_image(&image_file, &label_file, l, rows, cols, + pixels + (3 * rows * cols), &label_l); + read_image(&image_file, &label_file, m, rows, cols, + pixels + (4 * rows * cols), &label_m); + + datum.set_data(pixels, 5*rows*cols); // set data + if ((label_i == label_j && label_i != label_k) && (label_l == label_m)) { + datum.set_label(1); + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); + } else { + itemid--; + datum.set_label(0); + } + } + + delete db; + delete pixels; +} + +int main(int argc, char** argv) { + if (argc != 4) { + printf("This script converts the MNIST dataset to the leveldb format used\n" + "by caffe to train a siamese network.\n" + "Usage:\n" + " convert_mnist_data input_image_file input_label_file " + "output_db_file\n" + "The MNIST dataset could be downloaded at\n" + " http://yann.lecun.com/exdb/mnist/\n" + "You should gunzip them after downloading.\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3]); +>>>>>>> add 3d network training param +>>>>>>> add 3d network training param +======= +<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 + "output_db_file class_number\n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3], argv[4]); +======= + "output_db_file class_number rgb_use \n"); + } else { + google::InitGoogleLogging(argv[0]); + convert_dataset(argv[1], argv[2], argv[3], argv[4], argv[5]); +>>>>>>> new data and new feature dimension +>>>>>>> new data and new feature dimension + } + return 0; +} diff --git a/examples/triplet/create_3d_triplet.sh b/examples/triplet/create_3d_triplet.sh index 3cd8ee469ce..e2f65ea414c 100755 --- a/examples/triplet/create_3d_triplet.sh +++ b/examples/triplet/create_3d_triplet.sh @@ -12,10 +12,13 @@ rm -rf ./examples/triplet/3d_triplet_test_leveldb $EXAMPLES/convert_3d_triplet_data.bin \ $DATA/binary_image_train \ $DATA/binary_label_train \ - ./examples/triplet/3d_triplet_train_leveldb + ./examples/triplet/3d_triplet_train_leveldb \ + 4 \ + 0 $EXAMPLES/convert_3d_triplet_data.bin \ $DATA/binary_image_test \ $DATA/binary_label_test \ - ./examples/triplet/3d_triplet_test_leveldb - + ./examples/triplet/3d_triplet_test_leveldb \ + 4 \ + 0 echo "Done." diff --git a/examples/triplet/create_3d_triplet.sh.orig b/examples/triplet/create_3d_triplet.sh.orig index 662684c6c87..f634f552d3a 100755 --- a/examples/triplet/create_3d_triplet.sh.orig +++ b/examples/triplet/create_3d_triplet.sh.orig @@ -12,22 +12,22 @@ rm -rf ./examples/triplet/3d_triplet_test_leveldb $EXAMPLES/convert_3d_triplet_data.bin \ $DATA/binary_image_train \ $DATA/binary_label_train \ -<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 ./examples/triplet/3d_triplet_train_leveldb \ +<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 + 4 +======= 4 \ 0 +>>>>>>> new data and new feature dimension $EXAMPLES/convert_3d_triplet_data.bin \ $DATA/binary_image_test \ $DATA/binary_label_test \ ./examples/triplet/3d_triplet_test_leveldb \ +<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 + 4 + +======= 4 \ 0 -======= - ./examples/triplet/3d_triplet_train_leveldb -$EXAMPLES/convert_3d_triplet_data.bin \ - $DATA/binary_image_test \ - $DATA/binary_label_test \ - ./examples/triplet/3d_triplet_test_leveldb ->>>>>>> add 3d network training param - +>>>>>>> new data and new feature dimension echo "Done." diff --git a/examples/triplet/create_3d_triplet.sh.orig.orig b/examples/triplet/create_3d_triplet.sh.orig.orig index f8ac44ba37b..2c502fe7f0a 100755 --- a/examples/triplet/create_3d_triplet.sh.orig.orig +++ b/examples/triplet/create_3d_triplet.sh.orig.orig @@ -12,28 +12,16 @@ rm -rf ./examples/triplet/3d_triplet_test_leveldb $EXAMPLES/convert_3d_triplet_data.bin \ $DATA/binary_image_train \ $DATA/binary_label_train \ -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 - ./examples/triplet/3d_triplet_train_leveldb \ - 4 \ - 0 -$EXAMPLES/convert_3d_triplet_data.bin \ - $DATA/binary_image_test \ - $DATA/binary_label_test \ - ./examples/triplet/3d_triplet_test_leveldb \ - 4 \ - 0 -======= ->>>>>>> add 3d network training param ./examples/triplet/3d_triplet_train_leveldb $EXAMPLES/convert_3d_triplet_data.bin \ $DATA/binary_image_test \ $DATA/binary_label_test \ +<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 ./examples/triplet/3d_triplet_test_leveldb -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= ->>>>>>> add 3d network training param ->>>>>>> add 3d network training param +======= + ./examples/triplet/3d_triplet_test_leveldb \ + 4 \ + 0 +>>>>>>> new data and new feature dimension echo "Done." diff --git a/src/caffe/data_transformer.cpp b/src/caffe/data_transformer.cpp index 68e30f982cf..e5a8af55f27 100644 --- a/src/caffe/data_transformer.cpp +++ b/src/caffe/data_transformer.cpp @@ -465,6 +465,8 @@ void DataTransformer::Transform(Blob* input_blob, } } +<<<<<<< HEAD +<<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index 45c149f0da5..31890660636 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -315,7 +315,7 @@ class NetTest : public MultiDeviceTest { " type: 'InnerProduct' " " inner_product_param { " " num_output: 10 " - " bias_term: " << bias_str << + " bias_term: " << bias_term << " weight_filler { " " type: 'gaussian' " " std: 10 " @@ -341,7 +341,7 @@ class NetTest : public MultiDeviceTest { " type: 'InnerProduct' " " inner_product_param { " " num_output: 10 " - " bias_term: " << bias_str << + " bias_term: " << bias_term << " weight_filler { " " type: 'gaussian' " " std: 10 " @@ -1312,10 +1312,11 @@ TYPED_TEST(NetTest, TestSharedWeightsResume) { EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); Blob* ip1_weights = this->net_->layers()[1]->blobs()[0].get(); Blob* ip2_weights = this->net_->layers()[2]->blobs()[0].get(); - // Check that data and diff blobs of shared weights share the same memory - // locations. + // Check that data blobs of shared weights share the same location in memory. EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); + // Check that diff blobs of shared weights are at different locations in + // memory. (The diffs should be accumulated at update time.) + EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); this->net_->ForwardBackward(bottom); this->net_->Update(); Blob shared_params; @@ -1338,13 +1339,14 @@ TYPED_TEST(NetTest, TestSharedWeightsResume) { ASSERT_FALSE(NULL == ip1_weights); ASSERT_FALSE(NULL == ip2_weights); EXPECT_NE(ip1_weights, ip2_weights); - // Check that data and diff blobs of shared weights share the same memory - // locations. + // Check that data blobs of shared weights share the same location in memory. EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); for (int i = 0; i < count; ++i) { EXPECT_FLOAT_EQ(shared_params.cpu_data()[i], ip1_weights->cpu_data()[i]); } + // Check that diff blobs of shared weights are at different locations in + // memory. (The diffs should be accumulated at update time.) + EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); } TYPED_TEST(NetTest, TestParamPropagateDown) { From 54f81ccec12fe05658aad35301250aa8f9b307f1 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Fri, 28 Aug 2015 09:30:08 +0800 Subject: [PATCH 79/82] update triplet data generation codes for RGB using --- examples/triplet/convert_3d_triplet_data.cpp | 95 +++++++++------- .../triplet/convert_3d_triplet_data.cpp.orig | 105 +++++++++--------- 2 files changed, 112 insertions(+), 88 deletions(-) diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp index 3a30d3fb082..2147e1ae1ca 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp +++ b/examples/triplet/convert_3d_triplet_data.cpp @@ -1,13 +1,13 @@ // Usage: // convert_3d_data input_image_file input_label_file output_db_file #include // NOLINT(readability/streams) -#include #include #include "caffe/proto/caffe.pb.h" #include "caffe/util/math_functions.hpp" #include "glog/logging.h" #include "google/protobuf/text_format.h" #include "leveldb/db.h" +#include "math.h" #include "stdint.h" uint32_t swap_endian(uint32_t val) { @@ -17,17 +17,28 @@ uint32_t swap_endian(uint32_t val) { void read_image(std::ifstream* image_file, std::ifstream* label_file, uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label_temp, signed char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index * 4 + 8); - label_file->read(label_temp, 4); - for (int i = 0; i < 4; i++) - *(label+i) = (signed char)*(label_temp+i); + char* pixels, char* label_temp, signed char* label, int rgb_use) { + if (rgb_use == 0) + { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); + } else { + image_file->seekg(3 * index * rows * cols + 16); + image_file->read(pixels, 3 * rows * cols); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); + } } void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename, const char* class_number) { + const char* db_filename, const char* class_number, const char* rgb_use) { + int rgb_use1 = atoi(rgb_use); int class_num = atoi(class_number); // Open files std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); @@ -73,16 +84,24 @@ void convert_dataset(const char* image_filename, const char* label_filename, signed char* label_k = new signed char[4]; signed char* label_l = new signed char[4]; // label for pair wise signed char* label_m = new signed char[4]; - char* pixels1 = new char[rows * cols]; - char* pixels2 = new char[rows * cols]; - char* pixels3 = new char[rows * cols]; - char* pixels4 = new char[rows * cols]; - char* pixels5 = new char[rows * cols]; + int db_size; + if (rgb_use1 == 0) + db_size = rows * cols; + else + db_size = 3 * rows * cols; + char* pixels1 = new char[db_size]; + char* pixels2 = new char[db_size]; + char* pixels3 = new char[db_size]; + char* pixels4 = new char[db_size]; + char* pixels5 = new char[db_size]; const int kMaxKeyLength = 10; char key[kMaxKeyLength]; std::string value; caffe::Datum datum; - datum.set_channels(1); + if (rgb_use1 == 0) + datum.set_channels(1); + else + datum.set_channels(3); datum.set_height(rows); datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; @@ -100,15 +119,15 @@ void convert_dataset(const char* image_filename, const char* label_filename, int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups int m = caffe::caffe_rng_rand() % num_items; read_image(&image_file, &label_file, i, rows, cols, // read triplet - pixels1, label_temp, label_i); + pixels1, label_temp, label_i, rgb_use1); read_image(&image_file, &label_file, j, rows, cols, - pixels2, label_temp, label_j); + pixels2, label_temp, label_j, rgb_use1); read_image(&image_file, &label_file, k, rows, cols, - pixels3, label_temp, label_k); + pixels3, label_temp, label_k, rgb_use1); read_image(&image_file, &label_file, l, rows, cols, // read pair wise - pixels4, label_temp, label_l); + pixels4, label_temp, label_l, rgb_use1); read_image(&image_file, &label_file, m, rows, cols, - pixels5, label_temp, label_m); + pixels5, label_temp, label_m, rgb_use1); bool pair_pass = false; bool triplet1_pass = false; @@ -140,35 +159,35 @@ void convert_dataset(const char* image_filename, const char* label_filename, triplet2_pass = true; if (pair_pass && (*label_i == *label_m)) triplet3_class_same = true; - if (triplet3_class_same && dist_im > 100/3 && dist_im < 100) + if (triplet3_class_same && dist_im > 100/3) triplet3_pass = true; if (pair_pass && triplet1_pass && triplet2_pass && triplet3_pass) { - datum.set_data(pixels1, rows*cols); // set data - datum.set_label(int(*label_i)); + datum.set_data(pixels1, db_size); // set data + datum.set_label(static_cast(*label_i)); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", counter); db->Put(leveldb::WriteOptions(), std::string(key), value); counter++; - datum.set_data(pixels2, rows*cols); // set data - datum.set_label(int(*label_j)); + datum.set_data(pixels2, db_size); // set data + datum.set_label(static_cast(*label_j)); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", counter); db->Put(leveldb::WriteOptions(), std::string(key), value); counter++; - datum.set_data(pixels3, rows*cols); // set data - datum.set_label(int(*label_k)); + datum.set_data(pixels3, db_size); // set data + datum.set_label(static_cast(*label_k)); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", counter); db->Put(leveldb::WriteOptions(), std::string(key), value); counter++; - datum.set_data(pixels4, rows*cols); // set data - datum.set_label(int(*label_l)); + datum.set_data(pixels4, db_size); // set data + datum.set_label(static_cast(*label_l)); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", counter); db->Put(leveldb::WriteOptions(), std::string(key), value); counter++; - datum.set_data(pixels5, rows*cols); // set data - datum.set_label(int(*label_m)); + datum.set_data(pixels5, db_size); // set data + datum.set_label(static_cast(*label_m)); datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", counter); db->Put(leveldb::WriteOptions(), std::string(key), value); @@ -176,9 +195,9 @@ void convert_dataset(const char* image_filename, const char* label_filename, } else { class_ind--; } - } // iteration in the samples of all class - } // iteration in the samples in one class - } // iteration in times + } // iteration in the samples of all class + } // iteration in the samples in one class + } // iteration in times delete db; delete pixels1; delete pixels2; @@ -188,15 +207,15 @@ void convert_dataset(const char* image_filename, const char* label_filename, } int main(int argc, char** argv) { - if (argc != 5) { - printf("This script converts the images dataset to the leveldb format used\n" + if (argc != 6) { + printf("This script converts the dataset to the leveldb format used\n" "by caffe to train a triplet network.\n" "Usage:\n" " convert_3d_data input_image_file input_label_file " - "output_db_file class_number\n"); + "output_db_file class_number rgb_use \n"); } else { google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3], argv[4]); + convert_dataset(argv[1], argv[2], argv[3], argv[4], argv[5]); } return 0; } diff --git a/examples/triplet/convert_3d_triplet_data.cpp.orig b/examples/triplet/convert_3d_triplet_data.cpp.orig index 9a08806e70e..9d523f2309c 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp.orig +++ b/examples/triplet/convert_3d_triplet_data.cpp.orig @@ -1,13 +1,13 @@ // Usage: // convert_3d_data input_image_file input_label_file output_db_file #include // NOLINT(readability/streams) +#include #include #include "caffe/proto/caffe.pb.h" #include "caffe/util/math_functions.hpp" #include "glog/logging.h" #include "google/protobuf/text_format.h" #include "leveldb/db.h" -#include "math.h" #include "stdint.h" uint32_t swap_endian(uint32_t val) { @@ -17,8 +17,22 @@ uint32_t swap_endian(uint32_t val) { void read_image(std::ifstream* image_file, std::ifstream* label_file, uint32_t index, uint32_t rows, uint32_t cols, +<<<<<<< fe02a6fcce001ba81c2e792eb5325a24a5ea888a + char* pixels, char* label_temp, signed char* label) { + image_file->seekg(index * rows * cols + 16); + image_file->read(pixels, rows * cols); + label_file->seekg(index * 4 + 8); + label_file->read(label_temp, 4); + for (int i = 0; i < 4; i++) + *(label+i) = (signed char)*(label_temp+i); +} + +void convert_dataset(const char* image_filename, const char* label_filename, + const char* db_filename, const char* class_number) { +======= char* pixels, char* label_temp, signed char* label, int rgb_use) { - if (rgb_use == 0) { + if (rgb_use == 0) + { image_file->seekg(index * rows * cols + 16); image_file->read(pixels, rows * cols); label_file->seekg(index * 4 + 8); @@ -36,9 +50,9 @@ void read_image(std::ifstream* image_file, std::ifstream* label_file, } void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename, const char* class_number, - const char* rgb_use) { + const char* db_filename, const char* class_number, const char* rgb_use) { int rgb_use1 = atoi(rgb_use); +>>>>>>> update triplet data generation codes for RGB using int class_num = atoi(class_number); // Open files std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); @@ -84,35 +98,25 @@ void convert_dataset(const char* image_filename, const char* label_filename, signed char* label_k = new signed char[4]; signed char* label_l = new signed char[4]; // label for pair wise signed char* label_m = new signed char[4]; -<<<<<<< d189d0a9af2a0b5e5baf61f9ab66ee9cbf75c6ac - int db_size; - if (rgb_use1 == 0) - db_size = rows * cols; - else - db_size = 3 * rows * cols; - char* pixels1 = new char[db_size]; - char* pixels2 = new char[db_size]; - char* pixels3 = new char[db_size]; - char* pixels4 = new char[db_size]; - char* pixels5 = new char[db_size]; -======= char* pixels1 = new char[rows * cols]; char* pixels2 = new char[rows * cols]; char* pixels3 = new char[rows * cols]; char* pixels4 = new char[rows * cols]; char* pixels5 = new char[rows * cols]; ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise const int kMaxKeyLength = 10; char key[kMaxKeyLength]; std::string value; caffe::Datum datum; - datum.set_channels(1); + if (rgb_use1 == 0) + datum.set_channels(1); + else + datum.set_channels(3); datum.set_height(rows); datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; int counter = 0; - for (unsigned int times = 0; times < 5; ++times) { + for (unsigned int times = 0; times < 10; ++times) { // iteration in the samples of all class for (unsigned int itemid = 0; itemid < num_items/class_num; ++itemid) { // iteration in the samples in one class @@ -124,17 +128,6 @@ void convert_dataset(const char* image_filename, const char* label_filename, int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups int m = caffe::caffe_rng_rand() % num_items; read_image(&image_file, &label_file, i, rows, cols, // read triplet -<<<<<<< d189d0a9af2a0b5e5baf61f9ab66ee9cbf75c6ac - pixels1, label_temp, label_i, rgb_use1); - read_image(&image_file, &label_file, j, rows, cols, - pixels2, label_temp, label_j, rgb_use1); - read_image(&image_file, &label_file, k, rows, cols, - pixels3, label_temp, label_k, rgb_use1); - read_image(&image_file, &label_file, l, rows, cols, // read pair wise - pixels4, label_temp, label_l, rgb_use1); - read_image(&image_file, &label_file, m, rows, cols, - pixels5, label_temp, label_m, rgb_use1); -======= pixels1, label_temp, label_i); read_image(&image_file, &label_file, j, rows, cols, pixels2, label_temp, label_j); @@ -144,7 +137,6 @@ void convert_dataset(const char* image_filename, const char* label_filename, pixels4, label_temp, label_l); read_image(&image_file, &label_file, m, rows, cols, pixels5, label_temp, label_m); ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise bool pair_pass = false; bool triplet1_pass = false; @@ -168,11 +160,7 @@ void convert_dataset(const char* image_filename, const char* label_filename, float dist_ij = std::sqrt(ij_x + ij_y + ij_z); float dist_im = std::sqrt(im_x + im_y + im_z); -<<<<<<< d189d0a9af2a0b5e5baf61f9ab66ee9cbf75c6ac - if (*label_i == *label_j && dist_ij < 100/2 && dist_ij != 0) -======= - if (*label_i == *label_j && dist_ij < 100/2) ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise + if (*label_i == *label_j && dist_ij < 100/3 && dist_ij != 0) pair_pass = true; if (pair_pass && (*label_i != *label_k)) triplet1_pass = true; @@ -180,35 +168,60 @@ void convert_dataset(const char* image_filename, const char* label_filename, triplet2_pass = true; if (pair_pass && (*label_i == *label_m)) triplet3_class_same = true; - if (triplet3_class_same && dist_im > 100*sqrt(2)) + if (triplet3_class_same && dist_im > 100/3) triplet3_pass = true; if (pair_pass && triplet1_pass && triplet2_pass && triplet3_pass) { +<<<<<<< fe02a6fcce001ba81c2e792eb5325a24a5ea888a datum.set_data(pixels1, rows*cols); // set data + datum.set_label(int(*label_i)); +======= + datum.set_data(pixels1, db_size); // set data datum.set_label(static_cast(*label_i)); +>>>>>>> update triplet data generation codes for RGB using datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", counter); db->Put(leveldb::WriteOptions(), std::string(key), value); counter++; +<<<<<<< fe02a6fcce001ba81c2e792eb5325a24a5ea888a datum.set_data(pixels2, rows*cols); // set data + datum.set_label(int(*label_j)); +======= + datum.set_data(pixels2, db_size); // set data datum.set_label(static_cast(*label_j)); +>>>>>>> update triplet data generation codes for RGB using datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", counter); db->Put(leveldb::WriteOptions(), std::string(key), value); counter++; +<<<<<<< fe02a6fcce001ba81c2e792eb5325a24a5ea888a datum.set_data(pixels3, rows*cols); // set data + datum.set_label(int(*label_k)); +======= + datum.set_data(pixels3, db_size); // set data datum.set_label(static_cast(*label_k)); +>>>>>>> update triplet data generation codes for RGB using datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", counter); db->Put(leveldb::WriteOptions(), std::string(key), value); counter++; +<<<<<<< fe02a6fcce001ba81c2e792eb5325a24a5ea888a datum.set_data(pixels4, rows*cols); // set data + datum.set_label(int(*label_l)); +======= + datum.set_data(pixels4, db_size); // set data datum.set_label(static_cast(*label_l)); +>>>>>>> update triplet data generation codes for RGB using datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", counter); db->Put(leveldb::WriteOptions(), std::string(key), value); counter++; +<<<<<<< fe02a6fcce001ba81c2e792eb5325a24a5ea888a datum.set_data(pixels5, rows*cols); // set data + datum.set_label(int(*label_m)); +======= + datum.set_data(pixels5, db_size); // set data datum.set_label(static_cast(*label_m)); +>>>>>>> update triplet data generation codes for RGB using datum.SerializeToString(&value); snprintf(key, kMaxKeyLength, "%08d", counter); db->Put(leveldb::WriteOptions(), std::string(key), value); @@ -216,9 +229,9 @@ void convert_dataset(const char* image_filename, const char* label_filename, } else { class_ind--; } - } // iteration in the samples of all class - } // iteration in the samples in one class - } // iteration in times + } // iteration in the samples of all class + } // iteration in the samples in one class + } // iteration in times delete db; delete pixels1; delete pixels2; @@ -228,23 +241,15 @@ void convert_dataset(const char* image_filename, const char* label_filename, } int main(int argc, char** argv) { -<<<<<<< d189d0a9af2a0b5e5baf61f9ab66ee9cbf75c6ac - if (argc != 6) { -======= if (argc != 5) { ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - printf("This script converts the dataset to the leveldb format used\n" + printf("This script converts the images dataset to the leveldb format used\n" "by caffe to train a triplet network.\n" "Usage:\n" " convert_3d_data input_image_file input_label_file " -<<<<<<< d189d0a9af2a0b5e5baf61f9ab66ee9cbf75c6ac - "output_db_file class_number rgb_use \n"); -======= "output_db_file class_number\n"); ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise } else { google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3], argv[4], argv[5]); + convert_dataset(argv[1], argv[2], argv[3], argv[4]); } return 0; } From 12b3bd90864c6683b88e1c172882374624629fa4 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Sat, 29 Aug 2015 14:20:16 +0800 Subject: [PATCH 80/82] add 2 more class in data generation and set the dimension of feature as 3 useful codes useful codes --- CMakeLists.txt | 9 +- Makefile | 43 +- Makefile.config.example | 7 +- Makefile.orig | 649 ----- Makefile.orig.orig | 663 ----- Makefile_BACKUP_61727 | 649 ----- Makefile_BASE_61727 | 0 Makefile_LOCAL_61727 | 633 ----- Makefile_REMOTE_61727 | 631 ----- cmake/ConfigGen.cmake | 12 - cmake/Dependencies.cmake | 41 +- cmake/Summary.cmake | 18 +- cmake/Templates/CaffeConfig.cmake.in | 26 +- cmake/Templates/caffe_config.h.in | 5 - docs/installation.md | 9 +- docs/multigpu.md | 26 - .../cpp_classification/classification.cpp | 8 - examples/mnist/convert_mnist_data.cpp | 12 - examples/mnist/lenet_solver.prototxt | 2 +- examples/mnist/lenet_train_test.prototxt.orig | 168 -- .../lenet_train_test_BACKUP_8918.prototxt | 180 -- .../mnist/lenet_train_test_BASE_8918.prototxt | 181 -- .../lenet_train_test_LOCAL_8918.prototxt | 168 -- .../lenet_train_test_REMOTE_8918.prototxt | 181 -- .../siamese/convert_mnist_siamese_data.cpp | 34 +- examples/siamese/lfw_siamese.prototxt.orig | 117 - examples/siamese/mnist_siamese.prototxt.orig | 126 - .../mnist_siamese_solver.prototxt.orig | 37 - examples/triplet/3d_triplet.prototxt | 4 +- examples/triplet/3d_triplet_solver.prototxt | 12 +- .../triplet/3d_triplet_train_test.prototxt | 2 +- .../3d_triplet_train_test.prototxt.orig | 184 -- examples/triplet/convert_3d_triplet_data.cpp | 6 +- .../triplet/convert_3d_triplet_data.cpp.orig | 255 -- .../convert_3d_triplet_data.cpp.orig.orig | 275 -- .../triplet/convert_lfw_triplet_data.cpp.orig | 148 - .../convert_mnist_triplet_data.cpp.orig | 150 - examples/triplet/create_3d_triplet.sh | 4 +- examples/triplet/create_3d_triplet.sh.orig | 33 - .../triplet/lfw_triplet_solver.prototxt.orig | 40 - .../mnist_orpe_train_test.prototxt.orig | 184 -- examples/triplet/readme.md.orig | 243 -- include/caffe/common_layers.hpp | 4 +- include/caffe/data_layers.hpp | 3 +- include/caffe/data_layers.hpp.orig | 431 --- include/caffe/data_layers.hpp.orig.orig | 365 --- include/caffe/data_layers.hpp.orig.orig.orig | 464 ---- include/caffe/data_transformer.hpp.orig | 167 -- include/caffe/filler.hpp | 56 + include/caffe/filler.hpp.orig | 291 -- include/caffe/filler.hpp.orig.orig | 336 --- include/caffe/loss_layers.hpp | 3 - include/caffe/loss_layers.hpp.orig | 848 ------ include/caffe/loss_layers.hpp.orig.orig | 880 ------ include/caffe/python_layer.hpp | 30 +- include/caffe/python_layer.hpp.orig.orig | 74 - include/caffe/solver.hpp | 2 - include/caffe/util/db_leveldb.hpp | 2 - include/caffe/util/db_lmdb.hpp | 2 - include/caffe/util/io.hpp | 2 - include/caffe/vision_layers.hpp.orig | 549 ---- include/caffe/vision_layers.hpp.orig.orig | 555 ---- python/caffe/__init__.py | 2 +- python/caffe/_caffe.cpp | 9 - python/caffe/io.py | 2 +- python/caffe/net_spec.py | 10 +- python/caffe/pycaffe.py | 3 +- python/caffe/test/test_layer_type_list.py | 1 - python/caffe/test/test_net_spec.py.orig | 88 - python/caffe/test/test_python_layer.py.orig | 153 - scripts/travis/travis_build_and_test.sh | 15 +- .../travis/travis_setup_makefile_config.sh | 6 - src/caffe/data_transformer.cpp | 117 +- src/caffe/data_transformer.cpp.orig | 687 ----- src/caffe/layers/base_data_layer.cpp | 94 +- .../layers/base_data_layer.cpp.orig.orig | 179 -- src/caffe/layers/base_data_layer.cu | 17 +- src/caffe/layers/base_data_layer.cu.orig.orig | 60 - src/caffe/layers/concat_layer.cpp | 6 - src/caffe/layers/concat_layer.cu | 17 +- src/caffe/layers/concat_layer.cu.orig | 96 - src/caffe/layers/concat_layer.cu.orig.orig | 101 - src/caffe/layers/data_layer.cpp | 84 +- src/caffe/layers/data_layer.cpp.orig.orig | 234 -- src/caffe/layers/image_data_layer.cpp | 30 +- .../layers/image_data_layer.cpp.orig.orig | 213 -- src/caffe/layers/memory_data_layer.cpp | 4 - src/caffe/layers/slice_layer.cpp | 7 +- src/caffe/layers/slice_layer.cu | 3 +- src/caffe/layers/tile_layer.cu | 53 +- src/caffe/layers/triplet_loss_layer.cpp | 12 +- src/caffe/layers/triplet_loss_layer.cpp.orig | 472 ---- .../layers/triplet_loss_layer.cpp.orig.orig | 492 ---- ...ss_layer.cu.orig => triplet_loss_layer.cu} | 40 - .../layers/triplet_loss_layer.cu.orig.orig | 764 ----- .../triplet_loss_layer_BACKUP_62802.cpp | 423 --- .../layers/triplet_loss_layer_BASE_62802.cpp | 298 -- .../layers/triplet_loss_layer_LOCAL_62802.cpp | 261 -- .../triplet_loss_layer_REMOTE_62802.cpp | 397 --- src/caffe/layers/window_data_layer.cpp | 2 - src/caffe/net.cpp | 20 +- src/caffe/net.cpp.orig.orig | 856 ------ src/caffe/proto/caffe.proto | 43 +- src/caffe/proto/caffe.proto.orig | 1165 -------- src/caffe/proto/caffe.proto.orig.orig | 1190 -------- src/caffe/solver.cpp | 134 +- src/caffe/solver.cpp.orig | 1464 ---------- src/caffe/solver.cpp.orig.orig | 804 ------ src/caffe/test/test_accuracy_layer.cpp | 12 +- src/caffe/test/test_concat_layer.cpp | 23 - .../test/test_data/generate_sample_data.py | 14 +- .../generate_sample_data.py.orig.orig | 105 - src/caffe/test/test_data_layer.cpp | 6 - src/caffe/test/test_data_transformer.cpp | 2 - src/caffe/test/test_db.cpp | 2 - src/caffe/test/test_eltwise_layer.cpp | 4 +- src/caffe/test/test_gradient_based_solver.cpp | 236 +- .../test_gradient_based_solver.cpp.orig.orig | 967 ------- src/caffe/test/test_image_data_layer.cpp | 2 - src/caffe/test/test_io.cpp | 2 - src/caffe/test/test_layer_factory.cpp | 4 - src/caffe/test/test_memory_data_layer.cpp | 5 +- src/caffe/test/test_net.cpp | 149 +- src/caffe/test/test_net.cpp.orig | 2449 ----------------- src/caffe/test/test_slice_layer.cpp | 27 - src/caffe/test/test_triplet_loss_layer.orig | 230 -- .../test/test_triplet_loss_layer.orig.orig | 317 --- src/caffe/test/test_upgrade_proto.cpp | 12 +- src/caffe/util/db.cpp | 14 +- src/caffe/util/db_leveldb.cpp | 2 - src/caffe/util/db_lmdb.cpp | 2 - src/caffe/util/io.cpp | 10 +- src/caffe/util/upgrade_proto.cpp | 6 + tools/caffe.cpp | 1 - tools/compute_image_mean.cpp | 4 - tools/convert_imageset.cpp | 4 - tools/extract_features.cpp | 189 ++ 137 files changed, 894 insertions(+), 26273 deletions(-) delete mode 100644 Makefile.orig delete mode 100644 Makefile.orig.orig delete mode 100644 Makefile_BACKUP_61727 delete mode 100644 Makefile_BASE_61727 delete mode 100644 Makefile_LOCAL_61727 delete mode 100644 Makefile_REMOTE_61727 delete mode 100644 docs/multigpu.md delete mode 100644 examples/mnist/lenet_train_test.prototxt.orig delete mode 100644 examples/mnist/lenet_train_test_BACKUP_8918.prototxt delete mode 100644 examples/mnist/lenet_train_test_BASE_8918.prototxt delete mode 100644 examples/mnist/lenet_train_test_LOCAL_8918.prototxt delete mode 100644 examples/mnist/lenet_train_test_REMOTE_8918.prototxt delete mode 100644 examples/siamese/lfw_siamese.prototxt.orig delete mode 100644 examples/siamese/mnist_siamese.prototxt.orig delete mode 100644 examples/siamese/mnist_siamese_solver.prototxt.orig delete mode 100644 examples/triplet/3d_triplet_train_test.prototxt.orig delete mode 100644 examples/triplet/convert_3d_triplet_data.cpp.orig delete mode 100644 examples/triplet/convert_3d_triplet_data.cpp.orig.orig delete mode 100644 examples/triplet/convert_lfw_triplet_data.cpp.orig delete mode 100644 examples/triplet/convert_mnist_triplet_data.cpp.orig delete mode 100755 examples/triplet/create_3d_triplet.sh.orig delete mode 100644 examples/triplet/lfw_triplet_solver.prototxt.orig delete mode 100644 examples/triplet/mnist_orpe_train_test.prototxt.orig delete mode 100644 examples/triplet/readme.md.orig delete mode 100644 include/caffe/data_layers.hpp.orig delete mode 100644 include/caffe/data_layers.hpp.orig.orig delete mode 100644 include/caffe/data_layers.hpp.orig.orig.orig delete mode 100644 include/caffe/data_transformer.hpp.orig delete mode 100644 include/caffe/filler.hpp.orig delete mode 100644 include/caffe/filler.hpp.orig.orig delete mode 100644 include/caffe/loss_layers.hpp.orig delete mode 100644 include/caffe/loss_layers.hpp.orig.orig delete mode 100644 include/caffe/python_layer.hpp.orig.orig delete mode 100644 include/caffe/vision_layers.hpp.orig delete mode 100644 include/caffe/vision_layers.hpp.orig.orig delete mode 100644 python/caffe/test/test_net_spec.py.orig delete mode 100644 python/caffe/test/test_python_layer.py.orig delete mode 100644 src/caffe/data_transformer.cpp.orig delete mode 100644 src/caffe/layers/base_data_layer.cpp.orig.orig delete mode 100644 src/caffe/layers/base_data_layer.cu.orig.orig delete mode 100644 src/caffe/layers/concat_layer.cu.orig delete mode 100644 src/caffe/layers/concat_layer.cu.orig.orig delete mode 100644 src/caffe/layers/data_layer.cpp.orig.orig delete mode 100644 src/caffe/layers/image_data_layer.cpp.orig.orig delete mode 100644 src/caffe/layers/triplet_loss_layer.cpp.orig delete mode 100644 src/caffe/layers/triplet_loss_layer.cpp.orig.orig rename src/caffe/layers/{triplet_loss_layer.cu.orig => triplet_loss_layer.cu} (90%) delete mode 100644 src/caffe/layers/triplet_loss_layer.cu.orig.orig delete mode 100644 src/caffe/layers/triplet_loss_layer_BACKUP_62802.cpp delete mode 100644 src/caffe/layers/triplet_loss_layer_BASE_62802.cpp delete mode 100644 src/caffe/layers/triplet_loss_layer_LOCAL_62802.cpp delete mode 100644 src/caffe/layers/triplet_loss_layer_REMOTE_62802.cpp delete mode 100644 src/caffe/net.cpp.orig.orig delete mode 100644 src/caffe/proto/caffe.proto.orig delete mode 100644 src/caffe/proto/caffe.proto.orig.orig delete mode 100644 src/caffe/solver.cpp.orig delete mode 100644 src/caffe/solver.cpp.orig.orig delete mode 100644 src/caffe/test/test_data/generate_sample_data.py.orig.orig delete mode 100644 src/caffe/test/test_gradient_based_solver.cpp.orig.orig delete mode 100644 src/caffe/test/test_net.cpp.orig delete mode 100644 src/caffe/test/test_triplet_loss_layer.orig delete mode 100644 src/caffe/test/test_triplet_loss_layer.orig.orig create mode 100644 tools/extract_features.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 37f937fe489..ef599b68922 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,16 +16,13 @@ include(cmake/ConfigGen.cmake) # ---[ Options caffe_option(CPU_ONLY "Build Caffe without CUDA support" OFF) # TODO: rename to USE_CUDA -caffe_option(USE_CUDNN "Build Caffe with cuDNN library support" ON IF NOT CPU_ONLY) +caffe_option(USE_CUDNN "Build Caffe with cuDNN libary support" ON IF NOT CPU_ONLY) caffe_option(BUILD_SHARED_LIBS "Build shared libraries" ON) caffe_option(BUILD_python "Build Python wrapper" ON) -set(python_version "2" CACHE STRING "Specify which Python version to use") +set(python_version "2" CACHE STRING "Specify which python version to use") caffe_option(BUILD_matlab "Build Matlab wrapper" OFF IF UNIX OR APPLE) caffe_option(BUILD_docs "Build documentation" ON IF UNIX OR APPLE) -caffe_option(BUILD_python_layer "Build the Caffe Python layer" ON) -caffe_option(USE_LMDB "Build with lmdb" ON) -caffe_option(USE_LEVELDB "Build with levelDB" ON) -caffe_option(USE_OPENCV "Build with OpenCV support" ON) +caffe_option(BUILD_python_layer "Build the Caffe python layer" ON) # ---[ Dependencies include(cmake/Dependencies.cmake) diff --git a/Makefile b/Makefile index e445ebed568..2d59ee855ff 100644 --- a/Makefile +++ b/Makefile @@ -169,23 +169,9 @@ ifneq ($(CPU_ONLY), 1) LIBRARY_DIRS += $(CUDA_LIB_DIR) LIBRARIES := cudart cublas curand endif - -LIBRARIES += glog gflags protobuf boost_system m hdf5_hl hdf5 - -# handle IO dependencies -USE_LEVELDB ?= 1 -USE_LMDB ?= 1 -USE_OPENCV ?= 1 - -ifeq ($(USE_LEVELDB), 1) - LIBRARIES += leveldb snappy -endif -ifeq ($(USE_LMDB), 1) - LIBRARIES += lmdb -endif -ifeq ($(USE_OPENCV), 1) - LIBRARIES += opencv_core opencv_highgui opencv_imgproc -endif +LIBRARIES += glog gflags protobuf leveldb snappy \ + lmdb boost_system hdf5_hl hdf5 m \ + opencv_core opencv_highgui opencv_imgproc #opencv_imgcodecs PYTHON_LIBRARIES := boost_python python2.7 WARNINGS := -Wall -Wno-sign-compare @@ -242,7 +228,7 @@ ifeq ($(LINUX), 1) CXX ?= /usr/bin/g++ GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.) # older versions of gcc are too dumb to build boost with -Wuninitalized - ifeq ($(shell echo $(GCCVERSION) \< 4.6 | bc), 1) + ifeq ($(shell echo | awk '{exit $(GCCVERSION) < 4.6;}'), 1) WARNINGS += -Wno-uninitialized endif # boost::thread is reasonably called boost_thread (compare OS X) @@ -257,7 +243,7 @@ ifeq ($(OSX), 1) CXX := /usr/bin/clang++ ifneq ($(CPU_ONLY), 1) CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d') - ifeq ($(shell echo $(CUDA_VERSION) \< 7.0 | bc), 1) + ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1) CXXFLAGS += -stdlib=libstdc++ LINKFLAGS += -stdlib=libstdc++ endif @@ -304,17 +290,6 @@ ifeq ($(USE_CUDNN), 1) COMMON_FLAGS += -DUSE_CUDNN endif -# configure IO libraries -ifeq ($(USE_OPENCV), 1) - COMMON_FLAGS += -DUSE_OPENCV -endif -ifeq ($(USE_LEVELDB), 1) - COMMON_FLAGS += -DUSE_LEVELDB -endif -ifeq ($(USE_LMDB), 1) - COMMON_FLAGS += -DUSE_LMDB -endif - # CPU-only configuration ifeq ($(CPU_ONLY), 1) OBJS := $(PROTO_OBJS) $(CXX_OBJS) @@ -412,11 +387,13 @@ endif ############################## # Define build targets ############################## -.PHONY: all test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ +.PHONY: all lib test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ py mat py$(PROJECT) mat$(PROJECT) proto runtest \ superclean supercleanlist supercleanfiles warn everything -all: $(STATIC_NAME) $(DYNAMIC_NAME) tools examples +all: lib tools examples + +lib: $(STATIC_NAME) $(DYNAMIC_NAME) everything: $(EVERYTHING_TARGETS) @@ -496,7 +473,7 @@ runtest: $(TEST_ALL_BIN) pytest: py cd python; python -m unittest discover -s caffe/test - + mattest: mat cd matlab; $(MATLAB_DIR)/bin/matlab -nodisplay -r 'caffe.run_tests(), exit()' diff --git a/Makefile.config.example b/Makefile.config.example index d870f965737..a873502559f 100644 --- a/Makefile.config.example +++ b/Makefile.config.example @@ -5,12 +5,7 @@ # USE_CUDNN := 1 # CPU-only switch (uncomment to build without GPU support). -CPU_ONLY := 1 - -# uncomment to disable IO dependencies and corresponding data layers -# USE_LEVELDB := 0 -# USE_LMDB := 0 -# USE_OPENCV := 0 +# CPU_ONLY := 1 # To customize your choice of compiler, uncomment and set the following. # N.B. the default for Linux is g++ and the default for OSX is clang++ diff --git a/Makefile.orig b/Makefile.orig deleted file mode 100644 index 7653a57b6e1..00000000000 --- a/Makefile.orig +++ /dev/null @@ -1,649 +0,0 @@ -PROJECT := caffe - -CONFIG_FILE := Makefile.config -# Explicitly check for the config file, otherwise make -k will proceed anyway. -ifeq ($(wildcard $(CONFIG_FILE)),) -$(error $(CONFIG_FILE) not found. See $(CONFIG_FILE).example.) -endif -include $(CONFIG_FILE) - -BUILD_DIR_LINK := $(BUILD_DIR) -ifeq ($(RELEASE_BUILD_DIR),) - RELEASE_BUILD_DIR := .$(BUILD_DIR)_release -endif -ifeq ($(DEBUG_BUILD_DIR),) - DEBUG_BUILD_DIR := .$(BUILD_DIR)_debug -endif - -DEBUG ?= 0 -ifeq ($(DEBUG), 1) - BUILD_DIR := $(DEBUG_BUILD_DIR) - OTHER_BUILD_DIR := $(RELEASE_BUILD_DIR) -else - BUILD_DIR := $(RELEASE_BUILD_DIR) - OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR) -endif - -# All of the directories containing code. -SRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \ - \( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print) - -# The target shared library name -LIB_BUILD_DIR := $(BUILD_DIR)/lib -STATIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).a -DYNAMIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).so - -############################## -# Get all source files -############################## -# CXX_SRCS are the source files excluding the test ones. -CXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp") -# CU_SRCS are the cuda source files -CU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu") -# TEST_SRCS are the test source files -TEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp -TEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp") -TEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS)) -TEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu") -GTEST_SRC := src/gtest/gtest-all.cpp -# TOOL_SRCS are the source files for the tool binaries -TOOL_SRCS := $(shell find tools -name "*.cpp") -# EXAMPLE_SRCS are the source files for the example binaries -EXAMPLE_SRCS := $(shell find examples -name "*.cpp") -# BUILD_INCLUDE_DIR contains any generated header files we want to include. -BUILD_INCLUDE_DIR := $(BUILD_DIR)/src -# PROTO_SRCS are the protocol buffer definitions -PROTO_SRC_DIR := src/$(PROJECT)/proto -PROTO_SRCS := $(wildcard $(PROTO_SRC_DIR)/*.proto) -# PROTO_BUILD_DIR will contain the .cc and obj files generated from -# PROTO_SRCS; PROTO_BUILD_INCLUDE_DIR will contain the .h header files -PROTO_BUILD_DIR := $(BUILD_DIR)/$(PROTO_SRC_DIR) -PROTO_BUILD_INCLUDE_DIR := $(BUILD_INCLUDE_DIR)/$(PROJECT)/proto -# NONGEN_CXX_SRCS includes all source/header files except those generated -# automatically (e.g., by proto). -NONGEN_CXX_SRCS := $(shell find \ - src/$(PROJECT) \ - include/$(PROJECT) \ - python/$(PROJECT) \ - matlab/+$(PROJECT)/private \ - examples \ - tools \ - -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh") -LINT_SCRIPT := scripts/cpp_lint.py -LINT_OUTPUT_DIR := $(BUILD_DIR)/.lint -LINT_EXT := lint.txt -LINT_OUTPUTS := $(addsuffix .$(LINT_EXT), $(addprefix $(LINT_OUTPUT_DIR)/, $(NONGEN_CXX_SRCS))) -EMPTY_LINT_REPORT := $(BUILD_DIR)/.$(LINT_EXT) -NONEMPTY_LINT_REPORT := $(BUILD_DIR)/$(LINT_EXT) -# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT) -PY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp -PY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so -PY$(PROJECT)_HXX := include/$(PROJECT)/python_layer.hpp -# MAT$(PROJECT)_SRC is the mex entrance point of matlab package for $(PROJECT) -MAT$(PROJECT)_SRC := matlab/+$(PROJECT)/private/$(PROJECT)_.cpp -ifneq ($(MATLAB_DIR),) - MAT_SO_EXT := $(shell $(MATLAB_DIR)/bin/mexext) -endif -MAT$(PROJECT)_SO := matlab/+$(PROJECT)/private/$(PROJECT)_.$(MAT_SO_EXT) - -############################## -# Derive generated files -############################## -# The generated files for protocol buffers -PROTO_GEN_HEADER_SRCS := $(addprefix $(PROTO_BUILD_DIR)/, \ - $(notdir ${PROTO_SRCS:.proto=.pb.h})) -PROTO_GEN_HEADER := $(addprefix $(PROTO_BUILD_INCLUDE_DIR)/, \ - $(notdir ${PROTO_SRCS:.proto=.pb.h})) -PROTO_GEN_CC := $(addprefix $(BUILD_DIR)/, ${PROTO_SRCS:.proto=.pb.cc}) -PY_PROTO_BUILD_DIR := python/$(PROJECT)/proto -PY_PROTO_INIT := python/$(PROJECT)/proto/__init__.py -PROTO_GEN_PY := $(foreach file,${PROTO_SRCS:.proto=_pb2.py}, \ - $(PY_PROTO_BUILD_DIR)/$(notdir $(file))) -# The objects corresponding to the source files -# These objects will be linked into the final shared library, so we -# exclude the tool, example, and test objects. -CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o}) -CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o}) -PROTO_OBJS := ${PROTO_GEN_CC:.cc=.o} -OBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS) -# tool, example, and test objects -TOOL_OBJS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o}) -TOOL_BUILD_DIR := $(BUILD_DIR)/tools -TEST_CXX_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT)/test -TEST_CU_BUILD_DIR := $(BUILD_DIR)/cuda/src/$(PROJECT)/test -TEST_CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o}) -TEST_CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o}) -TEST_OBJS := $(TEST_CXX_OBJS) $(TEST_CU_OBJS) -GTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o}) -EXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o}) -# Output files for automatic dependency generation -DEPS := ${CXX_OBJS:.o=.d} ${CU_OBJS:.o=.d} ${TEST_CXX_OBJS:.o=.d} \ - ${TEST_CU_OBJS:.o=.d} $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d} -# tool, example, and test bins -TOOL_BINS := ${TOOL_OBJS:.o=.bin} -EXAMPLE_BINS := ${EXAMPLE_OBJS:.o=.bin} -# symlinks to tool bins without the ".bin" extension -TOOL_BIN_LINKS := ${TOOL_BINS:.bin=} -# Put the test binaries in build/test for convenience. -TEST_BIN_DIR := $(BUILD_DIR)/test -TEST_CU_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ - $(foreach obj,$(TEST_CU_OBJS),$(basename $(notdir $(obj)))))) -TEST_CXX_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ - $(foreach obj,$(TEST_CXX_OBJS),$(basename $(notdir $(obj)))))) -TEST_BINS := $(TEST_CXX_BINS) $(TEST_CU_BINS) -# TEST_ALL_BIN is the test binary that links caffe dynamically. -TEST_ALL_BIN := $(TEST_BIN_DIR)/test_all.testbin - -############################## -# Derive compiler warning dump locations -############################## -WARNS_EXT := warnings.txt -CXX_WARNS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o.$(WARNS_EXT)}) -CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o.$(WARNS_EXT)}) -TOOL_WARNS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o.$(WARNS_EXT)}) -EXAMPLE_WARNS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o.$(WARNS_EXT)}) -TEST_WARNS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o.$(WARNS_EXT)}) -TEST_CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o.$(WARNS_EXT)}) -ALL_CXX_WARNS := $(CXX_WARNS) $(TOOL_WARNS) $(EXAMPLE_WARNS) $(TEST_WARNS) -ALL_CU_WARNS := $(CU_WARNS) $(TEST_CU_WARNS) -ALL_WARNS := $(ALL_CXX_WARNS) $(ALL_CU_WARNS) - -EMPTY_WARN_REPORT := $(BUILD_DIR)/.$(WARNS_EXT) -NONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT) - -############################## -# Derive include and lib directories -############################## -CUDA_INCLUDE_DIR := $(CUDA_DIR)/include - -CUDA_LIB_DIR := -# add /lib64 only if it exists -ifneq ("$(wildcard $(CUDA_DIR)/lib64)","") - CUDA_LIB_DIR += $(CUDA_DIR)/lib64 -endif -CUDA_LIB_DIR += $(CUDA_DIR)/lib - -INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include -ifneq ($(CPU_ONLY), 1) - INCLUDE_DIRS += $(CUDA_INCLUDE_DIR) - LIBRARY_DIRS += $(CUDA_LIB_DIR) - LIBRARIES := cudart cublas curand -endif -LIBRARIES += glog gflags protobuf leveldb snappy \ - lmdb boost_system hdf5_hl hdf5 m \ - opencv_core opencv_highgui opencv_imgproc #opencv_imgcodecs -PYTHON_LIBRARIES := boost_python python2.7 -WARNINGS := -Wall -Wno-sign-compare - -############################## -# Set build directories -############################## - -DISTRIBUTE_DIR ?= distribute -DISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib -DIST_ALIASES := dist -ifneq ($(strip $(DISTRIBUTE_DIR)),distribute) - DIST_ALIASES += distribute -endif - -ALL_BUILD_DIRS := $(sort $(BUILD_DIR) $(addprefix $(BUILD_DIR)/, $(SRC_DIRS)) \ - $(addprefix $(BUILD_DIR)/cuda/, $(SRC_DIRS)) \ - $(LIB_BUILD_DIR) $(TEST_BIN_DIR) $(PY_PROTO_BUILD_DIR) $(LINT_OUTPUT_DIR) \ - $(DISTRIBUTE_SUBDIRS) $(PROTO_BUILD_INCLUDE_DIR)) - -############################## -# Set directory for Doxygen-generated documentation -############################## -DOXYGEN_CONFIG_FILE ?= ./.Doxyfile -# should be the same as OUTPUT_DIRECTORY in the .Doxyfile -DOXYGEN_OUTPUT_DIR ?= ./doxygen -DOXYGEN_COMMAND ?= doxygen -# All the files that might have Doxygen documentation. -DOXYGEN_SOURCES := $(shell find \ - src/$(PROJECT) \ - include/$(PROJECT) \ - python/ \ - matlab/ \ - examples \ - tools \ - -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh" -or \ - -name "*.py" -or -name "*.m") -DOXYGEN_SOURCES += $(DOXYGEN_CONFIG_FILE) - - -############################## -# Configure build -############################## - -# Determine platform -UNAME := $(shell uname -s) -ifeq ($(UNAME), Linux) - LINUX := 1 -else ifeq ($(UNAME), Darwin) - OSX := 1 -endif - -# Linux -ifeq ($(LINUX), 1) - CXX ?= /usr/bin/g++ - GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.) - # older versions of gcc are too dumb to build boost with -Wuninitalized -<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c - ifeq ($(shell echo | awk '{exit $(GCCVERSION) < 4.6;}'), 1) -======= - ifeq ($(shell echo $(GCCVERSION) \< 4.6 | bc), 1) ->>>>>>> GPU version added - WARNINGS += -Wno-uninitialized - endif - # boost::thread is reasonably called boost_thread (compare OS X) - # We will also explicitly add stdc++ to the link target. - LIBRARIES += boost_thread stdc++ -endif - -# OS X: -# clang++ instead of g++ -# libstdc++ for NVCC compatibility on OS X >= 10.9 with CUDA < 7.0 -ifeq ($(OSX), 1) - CXX := /usr/bin/clang++ - ifneq ($(CPU_ONLY), 1) - CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d') -<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c - ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1) -======= - ifeq ($(shell echo $(CUDA_VERSION) \< 7.0 | bc), 1) ->>>>>>> GPU version added - CXXFLAGS += -stdlib=libstdc++ - LINKFLAGS += -stdlib=libstdc++ - endif - # clang throws this warning for cuda headers - WARNINGS += -Wno-unneeded-internal-declaration - endif - # gtest needs to use its own tuple to not conflict with clang - COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1 - # boost::thread is called boost_thread-mt to mark multithreading on OS X - LIBRARIES += boost_thread-mt - # we need to explicitly ask for the rpath to be obeyed - DYNAMIC_FLAGS := -install_name @rpath/libcaffe.so - ORIGIN := @loader_path -else - ORIGIN := \$$ORIGIN -endif - -# Custom compiler -ifdef CUSTOM_CXX - CXX := $(CUSTOM_CXX) -endif - -# Static linking -ifneq (,$(findstring clang++,$(CXX))) - STATIC_LINK_COMMAND := -Wl,-force_load $(STATIC_NAME) -else ifneq (,$(findstring g++,$(CXX))) - STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive -else - # The following line must not be indented with a tab, since we are not inside a target - $(error Cannot static link with the $(CXX) compiler) -endif - -# Debugging -ifeq ($(DEBUG), 1) - COMMON_FLAGS += -DDEBUG -g -O0 - NVCCFLAGS += -G -else - COMMON_FLAGS += -DNDEBUG -O2 -endif - -# cuDNN acceleration configuration. -ifeq ($(USE_CUDNN), 1) - LIBRARIES += cudnn - COMMON_FLAGS += -DUSE_CUDNN -endif - -# CPU-only configuration -ifeq ($(CPU_ONLY), 1) - OBJS := $(PROTO_OBJS) $(CXX_OBJS) - TEST_OBJS := $(TEST_CXX_OBJS) - TEST_BINS := $(TEST_CXX_BINS) - ALL_WARNS := $(ALL_CXX_WARNS) - TEST_FILTER := --gtest_filter="-*GPU*" - COMMON_FLAGS += -DCPU_ONLY -endif - -# Python layer support -ifeq ($(WITH_PYTHON_LAYER), 1) - COMMON_FLAGS += -DWITH_PYTHON_LAYER - LIBRARIES += $(PYTHON_LIBRARIES) -endif - -# BLAS configuration (default = ATLAS) -BLAS ?= atlas -ifeq ($(BLAS), mkl) - # MKL - LIBRARIES += mkl_rt - COMMON_FLAGS += -DUSE_MKL - MKL_DIR ?= /opt/intel/mkl - BLAS_INCLUDE ?= $(MKL_DIR)/include - BLAS_LIB ?= $(MKL_DIR)/lib $(MKL_DIR)/lib/intel64 -else ifeq ($(BLAS), open) - # OpenBLAS - LIBRARIES += openblas -else - # ATLAS - ifeq ($(LINUX), 1) - ifeq ($(BLAS), atlas) - # Linux simply has cblas and atlas - LIBRARIES += cblas atlas - endif - else ifeq ($(OSX), 1) - # OS X packages atlas as the vecLib framework - LIBRARIES += cblas - # 10.10 has accelerate while 10.9 has veclib - XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep -o 'version: 6') - ifneq (,$(findstring version: 6,$(XCODE_CLT_VER))) - BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/ - LDFLAGS += -framework Accelerate - else - BLAS_INCLUDE ?= /System/Library/Frameworks/vecLib.framework/Versions/Current/Headers/ - LDFLAGS += -framework vecLib - endif - endif -endif -INCLUDE_DIRS += $(BLAS_INCLUDE) -LIBRARY_DIRS += $(BLAS_LIB) - -LIBRARY_DIRS += $(LIB_BUILD_DIR) - -# Automatic dependency generation (nvcc is handled separately) -CXXFLAGS += -MMD -MP - -# Complete build flags. -COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir)) -CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) -NVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS) -# mex may invoke an older gcc that is too liberal with -Wuninitalized -MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized -LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) - -USE_PKG_CONFIG ?= 0 -ifeq ($(USE_PKG_CONFIG), 1) - PKG_CONFIG := $(shell pkg-config opencv --libs) -else - PKG_CONFIG := -endif -LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) $(PKG_CONFIG) \ - $(foreach library,$(LIBRARIES),-l$(library)) -PYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library)) - -# 'superclean' target recursively* deletes all files ending with an extension -# in $(SUPERCLEAN_EXTS) below. This may be useful if you've built older -# versions of Caffe that do not place all generated files in a location known -# to the 'clean' target. -# -# 'supercleanlist' will list the files to be deleted by make superclean. -# -# * Recursive with the exception that symbolic links are never followed, per the -# default behavior of 'find'. -SUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo - -# Set the sub-targets of the 'everything' target. -EVERYTHING_TARGETS := all py$(PROJECT) test warn lint -# Only build matcaffe as part of "everything" if MATLAB_DIR is specified. -ifneq ($(MATLAB_DIR),) - EVERYTHING_TARGETS += mat$(PROJECT) -endif - -############################## -# Define build targets -############################## -<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c -.PHONY: all lib test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ - py mat py$(PROJECT) mat$(PROJECT) proto runtest \ - superclean supercleanlist supercleanfiles warn everything - -all: lib tools examples - -lib: $(STATIC_NAME) $(DYNAMIC_NAME) -======= -.PHONY: all test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ - py mat py$(PROJECT) mat$(PROJECT) proto runtest \ - superclean supercleanlist supercleanfiles warn everything - -all: $(STATIC_NAME) $(DYNAMIC_NAME) tools examples ->>>>>>> GPU version added - -everything: $(EVERYTHING_TARGETS) - -linecount: - cloc --read-lang-def=$(PROJECT).cloc \ - src/$(PROJECT) include/$(PROJECT) tools examples \ - python matlab - -lint: $(EMPTY_LINT_REPORT) - -lintclean: - @ $(RM) -r $(LINT_OUTPUT_DIR) $(EMPTY_LINT_REPORT) $(NONEMPTY_LINT_REPORT) - -docs: $(DOXYGEN_OUTPUT_DIR) - @ cd ./docs ; ln -sfn ../$(DOXYGEN_OUTPUT_DIR)/html doxygen - -$(DOXYGEN_OUTPUT_DIR): $(DOXYGEN_CONFIG_FILE) $(DOXYGEN_SOURCES) - $(DOXYGEN_COMMAND) $(DOXYGEN_CONFIG_FILE) - -$(EMPTY_LINT_REPORT): $(LINT_OUTPUTS) | $(BUILD_DIR) - @ cat $(LINT_OUTPUTS) > $@ - @ if [ -s "$@" ]; then \ - cat $@; \ - mv $@ $(NONEMPTY_LINT_REPORT); \ - echo "Found one or more lint errors."; \ - exit 1; \ - fi; \ - $(RM) $(NONEMPTY_LINT_REPORT); \ - echo "No lint errors!"; - -$(LINT_OUTPUTS): $(LINT_OUTPUT_DIR)/%.lint.txt : % $(LINT_SCRIPT) | $(LINT_OUTPUT_DIR) - @ mkdir -p $(dir $@) - @ python $(LINT_SCRIPT) $< 2>&1 \ - | grep -v "^Done processing " \ - | grep -v "^Total errors found: 0" \ - > $@ \ - || true - -test: $(TEST_ALL_BIN) $(TEST_ALL_DYNLINK_BIN) $(TEST_BINS) - -tools: $(TOOL_BINS) $(TOOL_BIN_LINKS) - -examples: $(EXAMPLE_BINS) - -py$(PROJECT): py - -py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY) - -$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME) - @ echo CXX/LD -o $@ $< - $(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \ - -o $@ $(LINKFLAGS) -l$(PROJECT) $(PYTHON_LDFLAGS) \ - -Wl,-rpath,$(ORIGIN)/../../build/lib - -mat$(PROJECT): mat - -mat: $(MAT$(PROJECT)_SO) - -$(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME) - @ if [ -z "$(MATLAB_DIR)" ]; then \ - echo "MATLAB_DIR must be specified in $(CONFIG_FILE)" \ - "to build mat$(PROJECT)."; \ - exit 1; \ - fi - @ echo MEX $< - $(Q)$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) \ - CXX="$(CXX)" \ - CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \ - CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS)" -output $@ - @ if [ -f "$(PROJECT)_.d" ]; then \ - mv -f $(PROJECT)_.d $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}; \ - fi - -runtest: $(TEST_ALL_BIN) - $(TOOL_BUILD_DIR)/caffe - $(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle $(TEST_FILTER) - -pytest: py - cd python; python -m unittest discover -s caffe/test - -mattest: mat - cd matlab; $(MATLAB_DIR)/bin/matlab -nodisplay -r 'caffe.run_tests(), exit()' - -warn: $(EMPTY_WARN_REPORT) - -$(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR) - @ cat $(ALL_WARNS) > $@ - @ if [ -s "$@" ]; then \ - cat $@; \ - mv $@ $(NONEMPTY_WARN_REPORT); \ - echo "Compiler produced one or more warnings."; \ - exit 1; \ - fi; \ - $(RM) $(NONEMPTY_WARN_REPORT); \ - echo "No compiler warnings!"; - -$(ALL_WARNS): %.o.$(WARNS_EXT) : %.o - -$(BUILD_DIR_LINK): $(BUILD_DIR)/.linked - -# Create a target ".linked" in this BUILD_DIR to tell Make that the "build" link -# is currently correct, then delete the one in the OTHER_BUILD_DIR in case it -# exists and $(DEBUG) is toggled later. -$(BUILD_DIR)/.linked: - @ mkdir -p $(BUILD_DIR) - @ $(RM) $(OTHER_BUILD_DIR)/.linked - @ $(RM) -r $(BUILD_DIR_LINK) - @ ln -s $(BUILD_DIR) $(BUILD_DIR_LINK) - @ touch $@ - -$(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK) - @ mkdir -p $@ - -$(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) - @ echo LD -o $@ - $(Q)$(CXX) -shared -o $@ $(OBJS) $(LINKFLAGS) $(LDFLAGS) $(DYNAMIC_FLAGS) - -$(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) - @ echo AR -o $@ - $(Q)ar rcs $@ $(OBJS) - -$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS) - @ echo CXX $< - $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) - -$(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \ - | $(PROTO_BUILD_DIR) - @ echo CXX $< - $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) - -$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS) - @ echo NVCC $< - $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \ - -odir $(@D) - $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) - -$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ - | $(DYNAMIC_NAME) $(TEST_BIN_DIR) - @ echo CXX/LD -o $@ $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib - -$(TEST_CU_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CU_BUILD_DIR)/%.o \ - $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) - @ echo LD $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib - -$(TEST_CXX_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CXX_BUILD_DIR)/%.o \ - $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) - @ echo LD $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib - -# Target for extension-less symlinks to tool binaries with extension '*.bin'. -$(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR) - @ $(RM) $@ - @ ln -s $(abspath $<) $@ - -$(TOOL_BINS): %.bin : %.o | $(DYNAMIC_NAME) - @ echo CXX/LD -o $@ - $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ - -Wl,-rpath,$(ORIGIN)/../lib - -$(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME) - @ echo CXX/LD -o $@ - $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ - -Wl,-rpath,$(ORIGIN)/../../lib - -proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER) - -$(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \ - $(PROTO_SRC_DIR)/%.proto | $(PROTO_BUILD_DIR) - @ echo PROTOC $< - $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --cpp_out=$(PROTO_BUILD_DIR) $< - -$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \ - $(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR) - @ echo PROTOC \(python\) $< - $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $< - -$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR) - touch $(PY_PROTO_INIT) - -clean: - @- $(RM) -rf $(ALL_BUILD_DIRS) - @- $(RM) -rf $(OTHER_BUILD_DIR) - @- $(RM) -rf $(BUILD_DIR_LINK) - @- $(RM) -rf $(DISTRIBUTE_DIR) - @- $(RM) $(PY$(PROJECT)_SO) - @- $(RM) $(MAT$(PROJECT)_SO) - -supercleanfiles: - $(eval SUPERCLEAN_FILES := $(strip \ - $(foreach ext,$(SUPERCLEAN_EXTS), $(shell find . -name '*$(ext)' \ - -not -path './data/*')))) - -supercleanlist: supercleanfiles - @ \ - if [ -z "$(SUPERCLEAN_FILES)" ]; then \ - echo "No generated files found."; \ - else \ - echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ - fi - -superclean: clean supercleanfiles - @ \ - if [ -z "$(SUPERCLEAN_FILES)" ]; then \ - echo "No generated files found."; \ - else \ - echo "Deleting the following generated files:"; \ - echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ - $(RM) $(SUPERCLEAN_FILES); \ - fi - -$(DIST_ALIASES): $(DISTRIBUTE_DIR) - -$(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS) - # add include - cp -r include $(DISTRIBUTE_DIR)/ - mkdir -p $(DISTRIBUTE_DIR)/include/caffe/proto - cp $(PROTO_GEN_HEADER_SRCS) $(DISTRIBUTE_DIR)/include/caffe/proto - # add tool and example binaries - cp $(TOOL_BINS) $(DISTRIBUTE_DIR)/bin - cp $(EXAMPLE_BINS) $(DISTRIBUTE_DIR)/bin - # add libraries - cp $(STATIC_NAME) $(DISTRIBUTE_DIR)/lib - cp $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib - # add python - it's not the standard way, indeed... - cp -r python $(DISTRIBUTE_DIR)/python - --include $(DEPS) diff --git a/Makefile.orig.orig b/Makefile.orig.orig deleted file mode 100644 index 65078e2ef46..00000000000 --- a/Makefile.orig.orig +++ /dev/null @@ -1,663 +0,0 @@ -PROJECT := caffe - -CONFIG_FILE := Makefile.config -# Explicitly check for the config file, otherwise make -k will proceed anyway. -ifeq ($(wildcard $(CONFIG_FILE)),) -$(error $(CONFIG_FILE) not found. See $(CONFIG_FILE).example.) -endif -include $(CONFIG_FILE) - -BUILD_DIR_LINK := $(BUILD_DIR) -ifeq ($(RELEASE_BUILD_DIR),) - RELEASE_BUILD_DIR := .$(BUILD_DIR)_release -endif -ifeq ($(DEBUG_BUILD_DIR),) - DEBUG_BUILD_DIR := .$(BUILD_DIR)_debug -endif - -DEBUG ?= 0 -ifeq ($(DEBUG), 1) - BUILD_DIR := $(DEBUG_BUILD_DIR) - OTHER_BUILD_DIR := $(RELEASE_BUILD_DIR) -else - BUILD_DIR := $(RELEASE_BUILD_DIR) - OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR) -endif - -# All of the directories containing code. -SRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \ - \( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print) - -# The target shared library name -LIB_BUILD_DIR := $(BUILD_DIR)/lib -STATIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).a -DYNAMIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).so - -############################## -# Get all source files -############################## -# CXX_SRCS are the source files excluding the test ones. -CXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp") -# CU_SRCS are the cuda source files -CU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu") -# TEST_SRCS are the test source files -TEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp -TEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp") -TEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS)) -TEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu") -GTEST_SRC := src/gtest/gtest-all.cpp -# TOOL_SRCS are the source files for the tool binaries -TOOL_SRCS := $(shell find tools -name "*.cpp") -# EXAMPLE_SRCS are the source files for the example binaries -EXAMPLE_SRCS := $(shell find examples -name "*.cpp") -# BUILD_INCLUDE_DIR contains any generated header files we want to include. -BUILD_INCLUDE_DIR := $(BUILD_DIR)/src -# PROTO_SRCS are the protocol buffer definitions -PROTO_SRC_DIR := src/$(PROJECT)/proto -PROTO_SRCS := $(wildcard $(PROTO_SRC_DIR)/*.proto) -# PROTO_BUILD_DIR will contain the .cc and obj files generated from -# PROTO_SRCS; PROTO_BUILD_INCLUDE_DIR will contain the .h header files -PROTO_BUILD_DIR := $(BUILD_DIR)/$(PROTO_SRC_DIR) -PROTO_BUILD_INCLUDE_DIR := $(BUILD_INCLUDE_DIR)/$(PROJECT)/proto -# NONGEN_CXX_SRCS includes all source/header files except those generated -# automatically (e.g., by proto). -NONGEN_CXX_SRCS := $(shell find \ - src/$(PROJECT) \ - include/$(PROJECT) \ - python/$(PROJECT) \ - matlab/+$(PROJECT)/private \ - examples \ - tools \ - -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh") -LINT_SCRIPT := scripts/cpp_lint.py -LINT_OUTPUT_DIR := $(BUILD_DIR)/.lint -LINT_EXT := lint.txt -LINT_OUTPUTS := $(addsuffix .$(LINT_EXT), $(addprefix $(LINT_OUTPUT_DIR)/, $(NONGEN_CXX_SRCS))) -EMPTY_LINT_REPORT := $(BUILD_DIR)/.$(LINT_EXT) -NONEMPTY_LINT_REPORT := $(BUILD_DIR)/$(LINT_EXT) -# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT) -PY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp -PY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so -PY$(PROJECT)_HXX := include/$(PROJECT)/python_layer.hpp -# MAT$(PROJECT)_SRC is the mex entrance point of matlab package for $(PROJECT) -MAT$(PROJECT)_SRC := matlab/+$(PROJECT)/private/$(PROJECT)_.cpp -ifneq ($(MATLAB_DIR),) - MAT_SO_EXT := $(shell $(MATLAB_DIR)/bin/mexext) -endif -MAT$(PROJECT)_SO := matlab/+$(PROJECT)/private/$(PROJECT)_.$(MAT_SO_EXT) - -############################## -# Derive generated files -############################## -# The generated files for protocol buffers -PROTO_GEN_HEADER_SRCS := $(addprefix $(PROTO_BUILD_DIR)/, \ - $(notdir ${PROTO_SRCS:.proto=.pb.h})) -PROTO_GEN_HEADER := $(addprefix $(PROTO_BUILD_INCLUDE_DIR)/, \ - $(notdir ${PROTO_SRCS:.proto=.pb.h})) -PROTO_GEN_CC := $(addprefix $(BUILD_DIR)/, ${PROTO_SRCS:.proto=.pb.cc}) -PY_PROTO_BUILD_DIR := python/$(PROJECT)/proto -PY_PROTO_INIT := python/$(PROJECT)/proto/__init__.py -PROTO_GEN_PY := $(foreach file,${PROTO_SRCS:.proto=_pb2.py}, \ - $(PY_PROTO_BUILD_DIR)/$(notdir $(file))) -# The objects corresponding to the source files -# These objects will be linked into the final shared library, so we -# exclude the tool, example, and test objects. -CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o}) -CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o}) -PROTO_OBJS := ${PROTO_GEN_CC:.cc=.o} -OBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS) -# tool, example, and test objects -TOOL_OBJS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o}) -TOOL_BUILD_DIR := $(BUILD_DIR)/tools -TEST_CXX_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT)/test -TEST_CU_BUILD_DIR := $(BUILD_DIR)/cuda/src/$(PROJECT)/test -TEST_CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o}) -TEST_CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o}) -TEST_OBJS := $(TEST_CXX_OBJS) $(TEST_CU_OBJS) -GTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o}) -EXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o}) -# Output files for automatic dependency generation -DEPS := ${CXX_OBJS:.o=.d} ${CU_OBJS:.o=.d} ${TEST_CXX_OBJS:.o=.d} \ - ${TEST_CU_OBJS:.o=.d} $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d} -# tool, example, and test bins -TOOL_BINS := ${TOOL_OBJS:.o=.bin} -EXAMPLE_BINS := ${EXAMPLE_OBJS:.o=.bin} -# symlinks to tool bins without the ".bin" extension -TOOL_BIN_LINKS := ${TOOL_BINS:.bin=} -# Put the test binaries in build/test for convenience. -TEST_BIN_DIR := $(BUILD_DIR)/test -TEST_CU_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ - $(foreach obj,$(TEST_CU_OBJS),$(basename $(notdir $(obj)))))) -TEST_CXX_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ - $(foreach obj,$(TEST_CXX_OBJS),$(basename $(notdir $(obj)))))) -TEST_BINS := $(TEST_CXX_BINS) $(TEST_CU_BINS) -# TEST_ALL_BIN is the test binary that links caffe dynamically. -TEST_ALL_BIN := $(TEST_BIN_DIR)/test_all.testbin - -############################## -# Derive compiler warning dump locations -############################## -WARNS_EXT := warnings.txt -CXX_WARNS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o.$(WARNS_EXT)}) -CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o.$(WARNS_EXT)}) -TOOL_WARNS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o.$(WARNS_EXT)}) -EXAMPLE_WARNS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o.$(WARNS_EXT)}) -TEST_WARNS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o.$(WARNS_EXT)}) -TEST_CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o.$(WARNS_EXT)}) -ALL_CXX_WARNS := $(CXX_WARNS) $(TOOL_WARNS) $(EXAMPLE_WARNS) $(TEST_WARNS) -ALL_CU_WARNS := $(CU_WARNS) $(TEST_CU_WARNS) -ALL_WARNS := $(ALL_CXX_WARNS) $(ALL_CU_WARNS) - -EMPTY_WARN_REPORT := $(BUILD_DIR)/.$(WARNS_EXT) -NONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT) - -############################## -# Derive include and lib directories -############################## -CUDA_INCLUDE_DIR := $(CUDA_DIR)/include - -CUDA_LIB_DIR := -# add /lib64 only if it exists -ifneq ("$(wildcard $(CUDA_DIR)/lib64)","") - CUDA_LIB_DIR += $(CUDA_DIR)/lib64 -endif -CUDA_LIB_DIR += $(CUDA_DIR)/lib - -INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include -ifneq ($(CPU_ONLY), 1) - INCLUDE_DIRS += $(CUDA_INCLUDE_DIR) - LIBRARY_DIRS += $(CUDA_LIB_DIR) - LIBRARIES := cudart cublas curand -endif -LIBRARIES += glog gflags protobuf leveldb snappy \ - lmdb boost_system hdf5_hl hdf5 m \ - opencv_core opencv_highgui opencv_imgproc #opencv_imgcodecs -PYTHON_LIBRARIES := boost_python python2.7 -WARNINGS := -Wall -Wno-sign-compare - -############################## -# Set build directories -############################## - -DISTRIBUTE_DIR ?= distribute -DISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib -DIST_ALIASES := dist -ifneq ($(strip $(DISTRIBUTE_DIR)),distribute) - DIST_ALIASES += distribute -endif - -ALL_BUILD_DIRS := $(sort $(BUILD_DIR) $(addprefix $(BUILD_DIR)/, $(SRC_DIRS)) \ - $(addprefix $(BUILD_DIR)/cuda/, $(SRC_DIRS)) \ - $(LIB_BUILD_DIR) $(TEST_BIN_DIR) $(PY_PROTO_BUILD_DIR) $(LINT_OUTPUT_DIR) \ - $(DISTRIBUTE_SUBDIRS) $(PROTO_BUILD_INCLUDE_DIR)) - -############################## -# Set directory for Doxygen-generated documentation -############################## -DOXYGEN_CONFIG_FILE ?= ./.Doxyfile -# should be the same as OUTPUT_DIRECTORY in the .Doxyfile -DOXYGEN_OUTPUT_DIR ?= ./doxygen -DOXYGEN_COMMAND ?= doxygen -# All the files that might have Doxygen documentation. -DOXYGEN_SOURCES := $(shell find \ - src/$(PROJECT) \ - include/$(PROJECT) \ - python/ \ - matlab/ \ - examples \ - tools \ - -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh" -or \ - -name "*.py" -or -name "*.m") -DOXYGEN_SOURCES += $(DOXYGEN_CONFIG_FILE) - - -############################## -# Configure build -############################## - -# Determine platform -UNAME := $(shell uname -s) -ifeq ($(UNAME), Linux) - LINUX := 1 -else ifeq ($(UNAME), Darwin) - OSX := 1 -endif - -# Linux -ifeq ($(LINUX), 1) - CXX ?= /usr/bin/g++ - GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.) - # older versions of gcc are too dumb to build boost with -Wuninitalized -<<<<<<< 0a8521567403409d70ece475762c203e38274530 - ifeq ($(shell echo | awk '{exit $(GCCVERSION) < 4.6;}'), 1) -======= -<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c - ifeq ($(shell echo | awk '{exit $(GCCVERSION) < 4.6;}'), 1) -======= - ifeq ($(shell echo $(GCCVERSION) \< 4.6 | bc), 1) ->>>>>>> GPU version added ->>>>>>> GPU version added - WARNINGS += -Wno-uninitialized - endif - # boost::thread is reasonably called boost_thread (compare OS X) - # We will also explicitly add stdc++ to the link target. - LIBRARIES += boost_thread stdc++ -endif - -# OS X: -# clang++ instead of g++ -# libstdc++ for NVCC compatibility on OS X >= 10.9 with CUDA < 7.0 -ifeq ($(OSX), 1) - CXX := /usr/bin/clang++ - ifneq ($(CPU_ONLY), 1) - CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d') -<<<<<<< 0a8521567403409d70ece475762c203e38274530 - ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1) -======= -<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c - ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1) -======= - ifeq ($(shell echo $(CUDA_VERSION) \< 7.0 | bc), 1) ->>>>>>> GPU version added ->>>>>>> GPU version added - CXXFLAGS += -stdlib=libstdc++ - LINKFLAGS += -stdlib=libstdc++ - endif - # clang throws this warning for cuda headers - WARNINGS += -Wno-unneeded-internal-declaration - endif - # gtest needs to use its own tuple to not conflict with clang - COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1 - # boost::thread is called boost_thread-mt to mark multithreading on OS X - LIBRARIES += boost_thread-mt - # we need to explicitly ask for the rpath to be obeyed - DYNAMIC_FLAGS := -install_name @rpath/libcaffe.so - ORIGIN := @loader_path -else - ORIGIN := \$$ORIGIN -endif - -# Custom compiler -ifdef CUSTOM_CXX - CXX := $(CUSTOM_CXX) -endif - -# Static linking -ifneq (,$(findstring clang++,$(CXX))) - STATIC_LINK_COMMAND := -Wl,-force_load $(STATIC_NAME) -else ifneq (,$(findstring g++,$(CXX))) - STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive -else - # The following line must not be indented with a tab, since we are not inside a target - $(error Cannot static link with the $(CXX) compiler) -endif - -# Debugging -ifeq ($(DEBUG), 1) - COMMON_FLAGS += -DDEBUG -g -O0 - NVCCFLAGS += -G -else - COMMON_FLAGS += -DNDEBUG -O2 -endif - -# cuDNN acceleration configuration. -ifeq ($(USE_CUDNN), 1) - LIBRARIES += cudnn - COMMON_FLAGS += -DUSE_CUDNN -endif - -# CPU-only configuration -ifeq ($(CPU_ONLY), 1) - OBJS := $(PROTO_OBJS) $(CXX_OBJS) - TEST_OBJS := $(TEST_CXX_OBJS) - TEST_BINS := $(TEST_CXX_BINS) - ALL_WARNS := $(ALL_CXX_WARNS) - TEST_FILTER := --gtest_filter="-*GPU*" - COMMON_FLAGS += -DCPU_ONLY -endif - -# Python layer support -ifeq ($(WITH_PYTHON_LAYER), 1) - COMMON_FLAGS += -DWITH_PYTHON_LAYER - LIBRARIES += $(PYTHON_LIBRARIES) -endif - -# BLAS configuration (default = ATLAS) -BLAS ?= atlas -ifeq ($(BLAS), mkl) - # MKL - LIBRARIES += mkl_rt - COMMON_FLAGS += -DUSE_MKL - MKL_DIR ?= /opt/intel/mkl - BLAS_INCLUDE ?= $(MKL_DIR)/include - BLAS_LIB ?= $(MKL_DIR)/lib $(MKL_DIR)/lib/intel64 -else ifeq ($(BLAS), open) - # OpenBLAS - LIBRARIES += openblas -else - # ATLAS - ifeq ($(LINUX), 1) - ifeq ($(BLAS), atlas) - # Linux simply has cblas and atlas - LIBRARIES += cblas atlas - endif - else ifeq ($(OSX), 1) - # OS X packages atlas as the vecLib framework - LIBRARIES += cblas - # 10.10 has accelerate while 10.9 has veclib - XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep -o 'version: 6') - ifneq (,$(findstring version: 6,$(XCODE_CLT_VER))) - BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/ - LDFLAGS += -framework Accelerate - else - BLAS_INCLUDE ?= /System/Library/Frameworks/vecLib.framework/Versions/Current/Headers/ - LDFLAGS += -framework vecLib - endif - endif -endif -INCLUDE_DIRS += $(BLAS_INCLUDE) -LIBRARY_DIRS += $(BLAS_LIB) - -LIBRARY_DIRS += $(LIB_BUILD_DIR) - -# Automatic dependency generation (nvcc is handled separately) -CXXFLAGS += -MMD -MP - -# Complete build flags. -COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir)) -CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) -NVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS) -# mex may invoke an older gcc that is too liberal with -Wuninitalized -MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized -LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) - -USE_PKG_CONFIG ?= 0 -ifeq ($(USE_PKG_CONFIG), 1) - PKG_CONFIG := $(shell pkg-config opencv --libs) -else - PKG_CONFIG := -endif -LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) $(PKG_CONFIG) \ - $(foreach library,$(LIBRARIES),-l$(library)) -PYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library)) - -# 'superclean' target recursively* deletes all files ending with an extension -# in $(SUPERCLEAN_EXTS) below. This may be useful if you've built older -# versions of Caffe that do not place all generated files in a location known -# to the 'clean' target. -# -# 'supercleanlist' will list the files to be deleted by make superclean. -# -# * Recursive with the exception that symbolic links are never followed, per the -# default behavior of 'find'. -SUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo - -# Set the sub-targets of the 'everything' target. -EVERYTHING_TARGETS := all py$(PROJECT) test warn lint -# Only build matcaffe as part of "everything" if MATLAB_DIR is specified. -ifneq ($(MATLAB_DIR),) - EVERYTHING_TARGETS += mat$(PROJECT) -endif - -############################## -# Define build targets -############################## -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -======= -<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c ->>>>>>> GPU version added -.PHONY: all lib test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ - py mat py$(PROJECT) mat$(PROJECT) proto runtest \ - superclean supercleanlist supercleanfiles warn everything - -all: lib tools examples - -lib: $(STATIC_NAME) $(DYNAMIC_NAME) -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -======= -======= -.PHONY: all test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ - py mat py$(PROJECT) mat$(PROJECT) proto runtest \ - superclean supercleanlist supercleanfiles warn everything - -all: $(STATIC_NAME) $(DYNAMIC_NAME) tools examples ->>>>>>> GPU version added ->>>>>>> GPU version added - -everything: $(EVERYTHING_TARGETS) - -linecount: - cloc --read-lang-def=$(PROJECT).cloc \ - src/$(PROJECT) include/$(PROJECT) tools examples \ - python matlab - -lint: $(EMPTY_LINT_REPORT) - -lintclean: - @ $(RM) -r $(LINT_OUTPUT_DIR) $(EMPTY_LINT_REPORT) $(NONEMPTY_LINT_REPORT) - -docs: $(DOXYGEN_OUTPUT_DIR) - @ cd ./docs ; ln -sfn ../$(DOXYGEN_OUTPUT_DIR)/html doxygen - -$(DOXYGEN_OUTPUT_DIR): $(DOXYGEN_CONFIG_FILE) $(DOXYGEN_SOURCES) - $(DOXYGEN_COMMAND) $(DOXYGEN_CONFIG_FILE) - -$(EMPTY_LINT_REPORT): $(LINT_OUTPUTS) | $(BUILD_DIR) - @ cat $(LINT_OUTPUTS) > $@ - @ if [ -s "$@" ]; then \ - cat $@; \ - mv $@ $(NONEMPTY_LINT_REPORT); \ - echo "Found one or more lint errors."; \ - exit 1; \ - fi; \ - $(RM) $(NONEMPTY_LINT_REPORT); \ - echo "No lint errors!"; - -$(LINT_OUTPUTS): $(LINT_OUTPUT_DIR)/%.lint.txt : % $(LINT_SCRIPT) | $(LINT_OUTPUT_DIR) - @ mkdir -p $(dir $@) - @ python $(LINT_SCRIPT) $< 2>&1 \ - | grep -v "^Done processing " \ - | grep -v "^Total errors found: 0" \ - > $@ \ - || true - -test: $(TEST_ALL_BIN) $(TEST_ALL_DYNLINK_BIN) $(TEST_BINS) - -tools: $(TOOL_BINS) $(TOOL_BIN_LINKS) - -examples: $(EXAMPLE_BINS) - -py$(PROJECT): py - -py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY) - -$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME) - @ echo CXX/LD -o $@ $< - $(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \ - -o $@ $(LINKFLAGS) -l$(PROJECT) $(PYTHON_LDFLAGS) \ - -Wl,-rpath,$(ORIGIN)/../../build/lib - -mat$(PROJECT): mat - -mat: $(MAT$(PROJECT)_SO) - -$(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME) - @ if [ -z "$(MATLAB_DIR)" ]; then \ - echo "MATLAB_DIR must be specified in $(CONFIG_FILE)" \ - "to build mat$(PROJECT)."; \ - exit 1; \ - fi - @ echo MEX $< - $(Q)$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) \ - CXX="$(CXX)" \ - CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \ - CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS)" -output $@ - @ if [ -f "$(PROJECT)_.d" ]; then \ - mv -f $(PROJECT)_.d $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}; \ - fi - -runtest: $(TEST_ALL_BIN) - $(TOOL_BUILD_DIR)/caffe - $(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle $(TEST_FILTER) - -pytest: py - cd python; python -m unittest discover -s caffe/test - -mattest: mat - cd matlab; $(MATLAB_DIR)/bin/matlab -nodisplay -r 'caffe.run_tests(), exit()' - -warn: $(EMPTY_WARN_REPORT) - -$(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR) - @ cat $(ALL_WARNS) > $@ - @ if [ -s "$@" ]; then \ - cat $@; \ - mv $@ $(NONEMPTY_WARN_REPORT); \ - echo "Compiler produced one or more warnings."; \ - exit 1; \ - fi; \ - $(RM) $(NONEMPTY_WARN_REPORT); \ - echo "No compiler warnings!"; - -$(ALL_WARNS): %.o.$(WARNS_EXT) : %.o - -$(BUILD_DIR_LINK): $(BUILD_DIR)/.linked - -# Create a target ".linked" in this BUILD_DIR to tell Make that the "build" link -# is currently correct, then delete the one in the OTHER_BUILD_DIR in case it -# exists and $(DEBUG) is toggled later. -$(BUILD_DIR)/.linked: - @ mkdir -p $(BUILD_DIR) - @ $(RM) $(OTHER_BUILD_DIR)/.linked - @ $(RM) -r $(BUILD_DIR_LINK) - @ ln -s $(BUILD_DIR) $(BUILD_DIR_LINK) - @ touch $@ - -$(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK) - @ mkdir -p $@ - -$(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) - @ echo LD -o $@ - $(Q)$(CXX) -shared -o $@ $(OBJS) $(LINKFLAGS) $(LDFLAGS) $(DYNAMIC_FLAGS) - -$(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) - @ echo AR -o $@ - $(Q)ar rcs $@ $(OBJS) - -$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS) - @ echo CXX $< - $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) - -$(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \ - | $(PROTO_BUILD_DIR) - @ echo CXX $< - $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) - -$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS) - @ echo NVCC $< - $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \ - -odir $(@D) - $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) - -$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ - | $(DYNAMIC_NAME) $(TEST_BIN_DIR) - @ echo CXX/LD -o $@ $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib - -$(TEST_CU_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CU_BUILD_DIR)/%.o \ - $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) - @ echo LD $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib - -$(TEST_CXX_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CXX_BUILD_DIR)/%.o \ - $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) - @ echo LD $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib - -# Target for extension-less symlinks to tool binaries with extension '*.bin'. -$(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR) - @ $(RM) $@ - @ ln -s $(abspath $<) $@ - -$(TOOL_BINS): %.bin : %.o | $(DYNAMIC_NAME) - @ echo CXX/LD -o $@ - $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ - -Wl,-rpath,$(ORIGIN)/../lib - -$(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME) - @ echo CXX/LD -o $@ - $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ - -Wl,-rpath,$(ORIGIN)/../../lib - -proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER) - -$(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \ - $(PROTO_SRC_DIR)/%.proto | $(PROTO_BUILD_DIR) - @ echo PROTOC $< - $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --cpp_out=$(PROTO_BUILD_DIR) $< - -$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \ - $(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR) - @ echo PROTOC \(python\) $< - $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $< - -$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR) - touch $(PY_PROTO_INIT) - -clean: - @- $(RM) -rf $(ALL_BUILD_DIRS) - @- $(RM) -rf $(OTHER_BUILD_DIR) - @- $(RM) -rf $(BUILD_DIR_LINK) - @- $(RM) -rf $(DISTRIBUTE_DIR) - @- $(RM) $(PY$(PROJECT)_SO) - @- $(RM) $(MAT$(PROJECT)_SO) - -supercleanfiles: - $(eval SUPERCLEAN_FILES := $(strip \ - $(foreach ext,$(SUPERCLEAN_EXTS), $(shell find . -name '*$(ext)' \ - -not -path './data/*')))) - -supercleanlist: supercleanfiles - @ \ - if [ -z "$(SUPERCLEAN_FILES)" ]; then \ - echo "No generated files found."; \ - else \ - echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ - fi - -superclean: clean supercleanfiles - @ \ - if [ -z "$(SUPERCLEAN_FILES)" ]; then \ - echo "No generated files found."; \ - else \ - echo "Deleting the following generated files:"; \ - echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ - $(RM) $(SUPERCLEAN_FILES); \ - fi - -$(DIST_ALIASES): $(DISTRIBUTE_DIR) - -$(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS) - # add include - cp -r include $(DISTRIBUTE_DIR)/ - mkdir -p $(DISTRIBUTE_DIR)/include/caffe/proto - cp $(PROTO_GEN_HEADER_SRCS) $(DISTRIBUTE_DIR)/include/caffe/proto - # add tool and example binaries - cp $(TOOL_BINS) $(DISTRIBUTE_DIR)/bin - cp $(EXAMPLE_BINS) $(DISTRIBUTE_DIR)/bin - # add libraries - cp $(STATIC_NAME) $(DISTRIBUTE_DIR)/lib - cp $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib - # add python - it's not the standard way, indeed... - cp -r python $(DISTRIBUTE_DIR)/python - --include $(DEPS) diff --git a/Makefile_BACKUP_61727 b/Makefile_BACKUP_61727 deleted file mode 100644 index 150cef28a83..00000000000 --- a/Makefile_BACKUP_61727 +++ /dev/null @@ -1,649 +0,0 @@ -PROJECT := caffe - -CONFIG_FILE := Makefile.config -# Explicitly check for the config file, otherwise make -k will proceed anyway. -ifeq ($(wildcard $(CONFIG_FILE)),) -$(error $(CONFIG_FILE) not found. See $(CONFIG_FILE).example.) -endif -include $(CONFIG_FILE) - -BUILD_DIR_LINK := $(BUILD_DIR) -ifeq ($(RELEASE_BUILD_DIR),) - RELEASE_BUILD_DIR := .$(BUILD_DIR)_release -endif -ifeq ($(DEBUG_BUILD_DIR),) - DEBUG_BUILD_DIR := .$(BUILD_DIR)_debug -endif - -DEBUG ?= 0 -ifeq ($(DEBUG), 1) - BUILD_DIR := $(DEBUG_BUILD_DIR) - OTHER_BUILD_DIR := $(RELEASE_BUILD_DIR) -else - BUILD_DIR := $(RELEASE_BUILD_DIR) - OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR) -endif - -# All of the directories containing code. -SRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \ - \( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print) - -# The target shared library name -LIB_BUILD_DIR := $(BUILD_DIR)/lib -STATIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).a -DYNAMIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).so - -############################## -# Get all source files -############################## -# CXX_SRCS are the source files excluding the test ones. -CXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp") -# CU_SRCS are the cuda source files -CU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu") -# TEST_SRCS are the test source files -TEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp -TEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp") -TEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS)) -TEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu") -GTEST_SRC := src/gtest/gtest-all.cpp -# TOOL_SRCS are the source files for the tool binaries -TOOL_SRCS := $(shell find tools -name "*.cpp") -# EXAMPLE_SRCS are the source files for the example binaries -EXAMPLE_SRCS := $(shell find examples -name "*.cpp") -# BUILD_INCLUDE_DIR contains any generated header files we want to include. -BUILD_INCLUDE_DIR := $(BUILD_DIR)/src -# PROTO_SRCS are the protocol buffer definitions -PROTO_SRC_DIR := src/$(PROJECT)/proto -PROTO_SRCS := $(wildcard $(PROTO_SRC_DIR)/*.proto) -# PROTO_BUILD_DIR will contain the .cc and obj files generated from -# PROTO_SRCS; PROTO_BUILD_INCLUDE_DIR will contain the .h header files -PROTO_BUILD_DIR := $(BUILD_DIR)/$(PROTO_SRC_DIR) -PROTO_BUILD_INCLUDE_DIR := $(BUILD_INCLUDE_DIR)/$(PROJECT)/proto -# NONGEN_CXX_SRCS includes all source/header files except those generated -# automatically (e.g., by proto). -NONGEN_CXX_SRCS := $(shell find \ - src/$(PROJECT) \ - include/$(PROJECT) \ - python/$(PROJECT) \ - matlab/+$(PROJECT)/private \ - examples \ - tools \ - -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh") -LINT_SCRIPT := scripts/cpp_lint.py -LINT_OUTPUT_DIR := $(BUILD_DIR)/.lint -LINT_EXT := lint.txt -LINT_OUTPUTS := $(addsuffix .$(LINT_EXT), $(addprefix $(LINT_OUTPUT_DIR)/, $(NONGEN_CXX_SRCS))) -EMPTY_LINT_REPORT := $(BUILD_DIR)/.$(LINT_EXT) -NONEMPTY_LINT_REPORT := $(BUILD_DIR)/$(LINT_EXT) -# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT) -PY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp -PY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so -PY$(PROJECT)_HXX := include/$(PROJECT)/python_layer.hpp -# MAT$(PROJECT)_SRC is the mex entrance point of matlab package for $(PROJECT) -MAT$(PROJECT)_SRC := matlab/+$(PROJECT)/private/$(PROJECT)_.cpp -ifneq ($(MATLAB_DIR),) - MAT_SO_EXT := $(shell $(MATLAB_DIR)/bin/mexext) -endif -MAT$(PROJECT)_SO := matlab/+$(PROJECT)/private/$(PROJECT)_.$(MAT_SO_EXT) - -############################## -# Derive generated files -############################## -# The generated files for protocol buffers -PROTO_GEN_HEADER_SRCS := $(addprefix $(PROTO_BUILD_DIR)/, \ - $(notdir ${PROTO_SRCS:.proto=.pb.h})) -PROTO_GEN_HEADER := $(addprefix $(PROTO_BUILD_INCLUDE_DIR)/, \ - $(notdir ${PROTO_SRCS:.proto=.pb.h})) -PROTO_GEN_CC := $(addprefix $(BUILD_DIR)/, ${PROTO_SRCS:.proto=.pb.cc}) -PY_PROTO_BUILD_DIR := python/$(PROJECT)/proto -PY_PROTO_INIT := python/$(PROJECT)/proto/__init__.py -PROTO_GEN_PY := $(foreach file,${PROTO_SRCS:.proto=_pb2.py}, \ - $(PY_PROTO_BUILD_DIR)/$(notdir $(file))) -# The objects corresponding to the source files -# These objects will be linked into the final shared library, so we -# exclude the tool, example, and test objects. -CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o}) -CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o}) -PROTO_OBJS := ${PROTO_GEN_CC:.cc=.o} -OBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS) -# tool, example, and test objects -TOOL_OBJS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o}) -TOOL_BUILD_DIR := $(BUILD_DIR)/tools -TEST_CXX_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT)/test -TEST_CU_BUILD_DIR := $(BUILD_DIR)/cuda/src/$(PROJECT)/test -TEST_CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o}) -TEST_CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o}) -TEST_OBJS := $(TEST_CXX_OBJS) $(TEST_CU_OBJS) -GTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o}) -EXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o}) -# Output files for automatic dependency generation -DEPS := ${CXX_OBJS:.o=.d} ${CU_OBJS:.o=.d} ${TEST_CXX_OBJS:.o=.d} \ - ${TEST_CU_OBJS:.o=.d} $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d} -# tool, example, and test bins -TOOL_BINS := ${TOOL_OBJS:.o=.bin} -EXAMPLE_BINS := ${EXAMPLE_OBJS:.o=.bin} -# symlinks to tool bins without the ".bin" extension -TOOL_BIN_LINKS := ${TOOL_BINS:.bin=} -# Put the test binaries in build/test for convenience. -TEST_BIN_DIR := $(BUILD_DIR)/test -TEST_CU_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ - $(foreach obj,$(TEST_CU_OBJS),$(basename $(notdir $(obj)))))) -TEST_CXX_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ - $(foreach obj,$(TEST_CXX_OBJS),$(basename $(notdir $(obj)))))) -TEST_BINS := $(TEST_CXX_BINS) $(TEST_CU_BINS) -# TEST_ALL_BIN is the test binary that links caffe dynamically. -TEST_ALL_BIN := $(TEST_BIN_DIR)/test_all.testbin - -############################## -# Derive compiler warning dump locations -############################## -WARNS_EXT := warnings.txt -CXX_WARNS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o.$(WARNS_EXT)}) -CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o.$(WARNS_EXT)}) -TOOL_WARNS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o.$(WARNS_EXT)}) -EXAMPLE_WARNS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o.$(WARNS_EXT)}) -TEST_WARNS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o.$(WARNS_EXT)}) -TEST_CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o.$(WARNS_EXT)}) -ALL_CXX_WARNS := $(CXX_WARNS) $(TOOL_WARNS) $(EXAMPLE_WARNS) $(TEST_WARNS) -ALL_CU_WARNS := $(CU_WARNS) $(TEST_CU_WARNS) -ALL_WARNS := $(ALL_CXX_WARNS) $(ALL_CU_WARNS) - -EMPTY_WARN_REPORT := $(BUILD_DIR)/.$(WARNS_EXT) -NONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT) - -############################## -# Derive include and lib directories -############################## -CUDA_INCLUDE_DIR := $(CUDA_DIR)/include - -CUDA_LIB_DIR := -# add /lib64 only if it exists -ifneq ("$(wildcard $(CUDA_DIR)/lib64)","") - CUDA_LIB_DIR += $(CUDA_DIR)/lib64 -endif -CUDA_LIB_DIR += $(CUDA_DIR)/lib - -INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include -ifneq ($(CPU_ONLY), 1) - INCLUDE_DIRS += $(CUDA_INCLUDE_DIR) - LIBRARY_DIRS += $(CUDA_LIB_DIR) - LIBRARIES := cudart cublas curand -endif -LIBRARIES += glog gflags protobuf leveldb snappy \ - lmdb boost_system hdf5_hl hdf5 m \ - opencv_core opencv_highgui opencv_imgproc #opencv_imgcodecs -PYTHON_LIBRARIES := boost_python python2.7 -WARNINGS := -Wall -Wno-sign-compare - -############################## -# Set build directories -############################## - -DISTRIBUTE_DIR ?= distribute -DISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib -DIST_ALIASES := dist -ifneq ($(strip $(DISTRIBUTE_DIR)),distribute) - DIST_ALIASES += distribute -endif - -ALL_BUILD_DIRS := $(sort $(BUILD_DIR) $(addprefix $(BUILD_DIR)/, $(SRC_DIRS)) \ - $(addprefix $(BUILD_DIR)/cuda/, $(SRC_DIRS)) \ - $(LIB_BUILD_DIR) $(TEST_BIN_DIR) $(PY_PROTO_BUILD_DIR) $(LINT_OUTPUT_DIR) \ - $(DISTRIBUTE_SUBDIRS) $(PROTO_BUILD_INCLUDE_DIR)) - -############################## -# Set directory for Doxygen-generated documentation -############################## -DOXYGEN_CONFIG_FILE ?= ./.Doxyfile -# should be the same as OUTPUT_DIRECTORY in the .Doxyfile -DOXYGEN_OUTPUT_DIR ?= ./doxygen -DOXYGEN_COMMAND ?= doxygen -# All the files that might have Doxygen documentation. -DOXYGEN_SOURCES := $(shell find \ - src/$(PROJECT) \ - include/$(PROJECT) \ - python/ \ - matlab/ \ - examples \ - tools \ - -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh" -or \ - -name "*.py" -or -name "*.m") -DOXYGEN_SOURCES += $(DOXYGEN_CONFIG_FILE) - - -############################## -# Configure build -############################## - -# Determine platform -UNAME := $(shell uname -s) -ifeq ($(UNAME), Linux) - LINUX := 1 -else ifeq ($(UNAME), Darwin) - OSX := 1 -endif - -# Linux -ifeq ($(LINUX), 1) - CXX ?= /usr/bin/g++ - GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.) - # older versions of gcc are too dumb to build boost with -Wuninitalized -<<<<<<< abf8aae89dd323f86a6c76ff671a08a9d2794749 - ifeq ($(shell echo | awk '{exit $(GCCVERSION) < 4.6;}'), 1) -======= - ifeq ($(shell echo $(GCCVERSION) \< 4.6 | bc), 1) ->>>>>>> GPU version added - WARNINGS += -Wno-uninitialized - endif - # boost::thread is reasonably called boost_thread (compare OS X) - # We will also explicitly add stdc++ to the link target. - LIBRARIES += boost_thread stdc++ -endif - -# OS X: -# clang++ instead of g++ -# libstdc++ for NVCC compatibility on OS X >= 10.9 with CUDA < 7.0 -ifeq ($(OSX), 1) - CXX := /usr/bin/clang++ - ifneq ($(CPU_ONLY), 1) - CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d') -<<<<<<< abf8aae89dd323f86a6c76ff671a08a9d2794749 - ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1) -======= - ifeq ($(shell echo $(CUDA_VERSION) \< 7.0 | bc), 1) ->>>>>>> GPU version added - CXXFLAGS += -stdlib=libstdc++ - LINKFLAGS += -stdlib=libstdc++ - endif - # clang throws this warning for cuda headers - WARNINGS += -Wno-unneeded-internal-declaration - endif - # gtest needs to use its own tuple to not conflict with clang - COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1 - # boost::thread is called boost_thread-mt to mark multithreading on OS X - LIBRARIES += boost_thread-mt - # we need to explicitly ask for the rpath to be obeyed - DYNAMIC_FLAGS := -install_name @rpath/libcaffe.so - ORIGIN := @loader_path -else - ORIGIN := \$$ORIGIN -endif - -# Custom compiler -ifdef CUSTOM_CXX - CXX := $(CUSTOM_CXX) -endif - -# Static linking -ifneq (,$(findstring clang++,$(CXX))) - STATIC_LINK_COMMAND := -Wl,-force_load $(STATIC_NAME) -else ifneq (,$(findstring g++,$(CXX))) - STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive -else - # The following line must not be indented with a tab, since we are not inside a target - $(error Cannot static link with the $(CXX) compiler) -endif - -# Debugging -ifeq ($(DEBUG), 1) - COMMON_FLAGS += -DDEBUG -g -O0 - NVCCFLAGS += -G -else - COMMON_FLAGS += -DNDEBUG -O2 -endif - -# cuDNN acceleration configuration. -ifeq ($(USE_CUDNN), 1) - LIBRARIES += cudnn - COMMON_FLAGS += -DUSE_CUDNN -endif - -# CPU-only configuration -ifeq ($(CPU_ONLY), 1) - OBJS := $(PROTO_OBJS) $(CXX_OBJS) - TEST_OBJS := $(TEST_CXX_OBJS) - TEST_BINS := $(TEST_CXX_BINS) - ALL_WARNS := $(ALL_CXX_WARNS) - TEST_FILTER := --gtest_filter="-*GPU*" - COMMON_FLAGS += -DCPU_ONLY -endif - -# Python layer support -ifeq ($(WITH_PYTHON_LAYER), 1) - COMMON_FLAGS += -DWITH_PYTHON_LAYER - LIBRARIES += $(PYTHON_LIBRARIES) -endif - -# BLAS configuration (default = ATLAS) -BLAS ?= atlas -ifeq ($(BLAS), mkl) - # MKL - LIBRARIES += mkl_rt - COMMON_FLAGS += -DUSE_MKL - MKL_DIR ?= /opt/intel/mkl - BLAS_INCLUDE ?= $(MKL_DIR)/include - BLAS_LIB ?= $(MKL_DIR)/lib $(MKL_DIR)/lib/intel64 -else ifeq ($(BLAS), open) - # OpenBLAS - LIBRARIES += openblas -else - # ATLAS - ifeq ($(LINUX), 1) - ifeq ($(BLAS), atlas) - # Linux simply has cblas and atlas - LIBRARIES += cblas atlas - endif - else ifeq ($(OSX), 1) - # OS X packages atlas as the vecLib framework - LIBRARIES += cblas - # 10.10 has accelerate while 10.9 has veclib - XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep -o 'version: 6') - ifneq (,$(findstring version: 6,$(XCODE_CLT_VER))) - BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/ - LDFLAGS += -framework Accelerate - else - BLAS_INCLUDE ?= /System/Library/Frameworks/vecLib.framework/Versions/Current/Headers/ - LDFLAGS += -framework vecLib - endif - endif -endif -INCLUDE_DIRS += $(BLAS_INCLUDE) -LIBRARY_DIRS += $(BLAS_LIB) - -LIBRARY_DIRS += $(LIB_BUILD_DIR) - -# Automatic dependency generation (nvcc is handled separately) -CXXFLAGS += -MMD -MP - -# Complete build flags. -COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir)) -CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) -NVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS) -# mex may invoke an older gcc that is too liberal with -Wuninitalized -MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized -LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) - -USE_PKG_CONFIG ?= 0 -ifeq ($(USE_PKG_CONFIG), 1) - PKG_CONFIG := $(shell pkg-config opencv --libs) -else - PKG_CONFIG := -endif -LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) $(PKG_CONFIG) \ - $(foreach library,$(LIBRARIES),-l$(library)) -PYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library)) - -# 'superclean' target recursively* deletes all files ending with an extension -# in $(SUPERCLEAN_EXTS) below. This may be useful if you've built older -# versions of Caffe that do not place all generated files in a location known -# to the 'clean' target. -# -# 'supercleanlist' will list the files to be deleted by make superclean. -# -# * Recursive with the exception that symbolic links are never followed, per the -# default behavior of 'find'. -SUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo - -# Set the sub-targets of the 'everything' target. -EVERYTHING_TARGETS := all py$(PROJECT) test warn lint -# Only build matcaffe as part of "everything" if MATLAB_DIR is specified. -ifneq ($(MATLAB_DIR),) - EVERYTHING_TARGETS += mat$(PROJECT) -endif - -############################## -# Define build targets -############################## -<<<<<<< abf8aae89dd323f86a6c76ff671a08a9d2794749 -.PHONY: all lib test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ - py mat py$(PROJECT) mat$(PROJECT) proto runtest \ - superclean supercleanlist supercleanfiles warn everything - -all: lib tools examples - -lib: $(STATIC_NAME) $(DYNAMIC_NAME) -======= -.PHONY: all test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ - py mat py$(PROJECT) mat$(PROJECT) proto runtest \ - superclean supercleanlist supercleanfiles warn everything - -all: $(STATIC_NAME) $(DYNAMIC_NAME) tools examples ->>>>>>> GPU version added - -everything: $(EVERYTHING_TARGETS) - -linecount: - cloc --read-lang-def=$(PROJECT).cloc \ - src/$(PROJECT) include/$(PROJECT) tools examples \ - python matlab - -lint: $(EMPTY_LINT_REPORT) - -lintclean: - @ $(RM) -r $(LINT_OUTPUT_DIR) $(EMPTY_LINT_REPORT) $(NONEMPTY_LINT_REPORT) - -docs: $(DOXYGEN_OUTPUT_DIR) - @ cd ./docs ; ln -sfn ../$(DOXYGEN_OUTPUT_DIR)/html doxygen - -$(DOXYGEN_OUTPUT_DIR): $(DOXYGEN_CONFIG_FILE) $(DOXYGEN_SOURCES) - $(DOXYGEN_COMMAND) $(DOXYGEN_CONFIG_FILE) - -$(EMPTY_LINT_REPORT): $(LINT_OUTPUTS) | $(BUILD_DIR) - @ cat $(LINT_OUTPUTS) > $@ - @ if [ -s "$@" ]; then \ - cat $@; \ - mv $@ $(NONEMPTY_LINT_REPORT); \ - echo "Found one or more lint errors."; \ - exit 1; \ - fi; \ - $(RM) $(NONEMPTY_LINT_REPORT); \ - echo "No lint errors!"; - -$(LINT_OUTPUTS): $(LINT_OUTPUT_DIR)/%.lint.txt : % $(LINT_SCRIPT) | $(LINT_OUTPUT_DIR) - @ mkdir -p $(dir $@) - @ python $(LINT_SCRIPT) $< 2>&1 \ - | grep -v "^Done processing " \ - | grep -v "^Total errors found: 0" \ - > $@ \ - || true - -test: $(TEST_ALL_BIN) $(TEST_ALL_DYNLINK_BIN) $(TEST_BINS) - -tools: $(TOOL_BINS) $(TOOL_BIN_LINKS) - -examples: $(EXAMPLE_BINS) - -py$(PROJECT): py - -py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY) - -$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME) - @ echo CXX/LD -o $@ $< - $(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \ - -o $@ $(LINKFLAGS) -l$(PROJECT) $(PYTHON_LDFLAGS) \ - -Wl,-rpath,$(ORIGIN)/../../build/lib - -mat$(PROJECT): mat - -mat: $(MAT$(PROJECT)_SO) - -$(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME) - @ if [ -z "$(MATLAB_DIR)" ]; then \ - echo "MATLAB_DIR must be specified in $(CONFIG_FILE)" \ - "to build mat$(PROJECT)."; \ - exit 1; \ - fi - @ echo MEX $< - $(Q)$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) \ - CXX="$(CXX)" \ - CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \ - CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS)" -output $@ - @ if [ -f "$(PROJECT)_.d" ]; then \ - mv -f $(PROJECT)_.d $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}; \ - fi - -runtest: $(TEST_ALL_BIN) - $(TOOL_BUILD_DIR)/caffe - $(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle $(TEST_FILTER) - -pytest: py - cd python; python -m unittest discover -s caffe/test - -mattest: mat - cd matlab; $(MATLAB_DIR)/bin/matlab -nodisplay -r 'caffe.run_tests(), exit()' - -warn: $(EMPTY_WARN_REPORT) - -$(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR) - @ cat $(ALL_WARNS) > $@ - @ if [ -s "$@" ]; then \ - cat $@; \ - mv $@ $(NONEMPTY_WARN_REPORT); \ - echo "Compiler produced one or more warnings."; \ - exit 1; \ - fi; \ - $(RM) $(NONEMPTY_WARN_REPORT); \ - echo "No compiler warnings!"; - -$(ALL_WARNS): %.o.$(WARNS_EXT) : %.o - -$(BUILD_DIR_LINK): $(BUILD_DIR)/.linked - -# Create a target ".linked" in this BUILD_DIR to tell Make that the "build" link -# is currently correct, then delete the one in the OTHER_BUILD_DIR in case it -# exists and $(DEBUG) is toggled later. -$(BUILD_DIR)/.linked: - @ mkdir -p $(BUILD_DIR) - @ $(RM) $(OTHER_BUILD_DIR)/.linked - @ $(RM) -r $(BUILD_DIR_LINK) - @ ln -s $(BUILD_DIR) $(BUILD_DIR_LINK) - @ touch $@ - -$(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK) - @ mkdir -p $@ - -$(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) - @ echo LD -o $@ - $(Q)$(CXX) -shared -o $@ $(OBJS) $(LINKFLAGS) $(LDFLAGS) $(DYNAMIC_FLAGS) - -$(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) - @ echo AR -o $@ - $(Q)ar rcs $@ $(OBJS) - -$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS) - @ echo CXX $< - $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) - -$(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \ - | $(PROTO_BUILD_DIR) - @ echo CXX $< - $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) - -$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS) - @ echo NVCC $< - $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \ - -odir $(@D) - $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) - -$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ - | $(DYNAMIC_NAME) $(TEST_BIN_DIR) - @ echo CXX/LD -o $@ $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib - -$(TEST_CU_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CU_BUILD_DIR)/%.o \ - $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) - @ echo LD $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib - -$(TEST_CXX_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CXX_BUILD_DIR)/%.o \ - $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) - @ echo LD $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib - -# Target for extension-less symlinks to tool binaries with extension '*.bin'. -$(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR) - @ $(RM) $@ - @ ln -s $(abspath $<) $@ - -$(TOOL_BINS): %.bin : %.o | $(DYNAMIC_NAME) - @ echo CXX/LD -o $@ - $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ - -Wl,-rpath,$(ORIGIN)/../lib - -$(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME) - @ echo CXX/LD -o $@ - $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ - -Wl,-rpath,$(ORIGIN)/../../lib - -proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER) - -$(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \ - $(PROTO_SRC_DIR)/%.proto | $(PROTO_BUILD_DIR) - @ echo PROTOC $< - $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --cpp_out=$(PROTO_BUILD_DIR) $< - -$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \ - $(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR) - @ echo PROTOC \(python\) $< - $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $< - -$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR) - touch $(PY_PROTO_INIT) - -clean: - @- $(RM) -rf $(ALL_BUILD_DIRS) - @- $(RM) -rf $(OTHER_BUILD_DIR) - @- $(RM) -rf $(BUILD_DIR_LINK) - @- $(RM) -rf $(DISTRIBUTE_DIR) - @- $(RM) $(PY$(PROJECT)_SO) - @- $(RM) $(MAT$(PROJECT)_SO) - -supercleanfiles: - $(eval SUPERCLEAN_FILES := $(strip \ - $(foreach ext,$(SUPERCLEAN_EXTS), $(shell find . -name '*$(ext)' \ - -not -path './data/*')))) - -supercleanlist: supercleanfiles - @ \ - if [ -z "$(SUPERCLEAN_FILES)" ]; then \ - echo "No generated files found."; \ - else \ - echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ - fi - -superclean: clean supercleanfiles - @ \ - if [ -z "$(SUPERCLEAN_FILES)" ]; then \ - echo "No generated files found."; \ - else \ - echo "Deleting the following generated files:"; \ - echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ - $(RM) $(SUPERCLEAN_FILES); \ - fi - -$(DIST_ALIASES): $(DISTRIBUTE_DIR) - -$(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS) - # add include - cp -r include $(DISTRIBUTE_DIR)/ - mkdir -p $(DISTRIBUTE_DIR)/include/caffe/proto - cp $(PROTO_GEN_HEADER_SRCS) $(DISTRIBUTE_DIR)/include/caffe/proto - # add tool and example binaries - cp $(TOOL_BINS) $(DISTRIBUTE_DIR)/bin - cp $(EXAMPLE_BINS) $(DISTRIBUTE_DIR)/bin - # add libraries - cp $(STATIC_NAME) $(DISTRIBUTE_DIR)/lib - cp $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib - # add python - it's not the standard way, indeed... - cp -r python $(DISTRIBUTE_DIR)/python - --include $(DEPS) diff --git a/Makefile_BASE_61727 b/Makefile_BASE_61727 deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/Makefile_LOCAL_61727 b/Makefile_LOCAL_61727 deleted file mode 100644 index 287fa4ea52f..00000000000 --- a/Makefile_LOCAL_61727 +++ /dev/null @@ -1,633 +0,0 @@ -PROJECT := caffe - -CONFIG_FILE := Makefile.config -# Explicitly check for the config file, otherwise make -k will proceed anyway. -ifeq ($(wildcard $(CONFIG_FILE)),) -$(error $(CONFIG_FILE) not found. See $(CONFIG_FILE).example.) -endif -include $(CONFIG_FILE) - -BUILD_DIR_LINK := $(BUILD_DIR) -ifeq ($(RELEASE_BUILD_DIR),) - RELEASE_BUILD_DIR := .$(BUILD_DIR)_release -endif -ifeq ($(DEBUG_BUILD_DIR),) - DEBUG_BUILD_DIR := .$(BUILD_DIR)_debug -endif - -DEBUG ?= 0 -ifeq ($(DEBUG), 1) - BUILD_DIR := $(DEBUG_BUILD_DIR) - OTHER_BUILD_DIR := $(RELEASE_BUILD_DIR) -else - BUILD_DIR := $(RELEASE_BUILD_DIR) - OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR) -endif - -# All of the directories containing code. -SRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \ - \( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print) - -# The target shared library name -LIB_BUILD_DIR := $(BUILD_DIR)/lib -STATIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).a -DYNAMIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).so - -############################## -# Get all source files -############################## -# CXX_SRCS are the source files excluding the test ones. -CXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp") -# CU_SRCS are the cuda source files -CU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu") -# TEST_SRCS are the test source files -TEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp -TEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp") -TEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS)) -TEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu") -GTEST_SRC := src/gtest/gtest-all.cpp -# TOOL_SRCS are the source files for the tool binaries -TOOL_SRCS := $(shell find tools -name "*.cpp") -# EXAMPLE_SRCS are the source files for the example binaries -EXAMPLE_SRCS := $(shell find examples -name "*.cpp") -# BUILD_INCLUDE_DIR contains any generated header files we want to include. -BUILD_INCLUDE_DIR := $(BUILD_DIR)/src -# PROTO_SRCS are the protocol buffer definitions -PROTO_SRC_DIR := src/$(PROJECT)/proto -PROTO_SRCS := $(wildcard $(PROTO_SRC_DIR)/*.proto) -# PROTO_BUILD_DIR will contain the .cc and obj files generated from -# PROTO_SRCS; PROTO_BUILD_INCLUDE_DIR will contain the .h header files -PROTO_BUILD_DIR := $(BUILD_DIR)/$(PROTO_SRC_DIR) -PROTO_BUILD_INCLUDE_DIR := $(BUILD_INCLUDE_DIR)/$(PROJECT)/proto -# NONGEN_CXX_SRCS includes all source/header files except those generated -# automatically (e.g., by proto). -NONGEN_CXX_SRCS := $(shell find \ - src/$(PROJECT) \ - include/$(PROJECT) \ - python/$(PROJECT) \ - matlab/+$(PROJECT)/private \ - examples \ - tools \ - -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh") -LINT_SCRIPT := scripts/cpp_lint.py -LINT_OUTPUT_DIR := $(BUILD_DIR)/.lint -LINT_EXT := lint.txt -LINT_OUTPUTS := $(addsuffix .$(LINT_EXT), $(addprefix $(LINT_OUTPUT_DIR)/, $(NONGEN_CXX_SRCS))) -EMPTY_LINT_REPORT := $(BUILD_DIR)/.$(LINT_EXT) -NONEMPTY_LINT_REPORT := $(BUILD_DIR)/$(LINT_EXT) -# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT) -PY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp -PY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so -PY$(PROJECT)_HXX := include/$(PROJECT)/python_layer.hpp -# MAT$(PROJECT)_SRC is the mex entrance point of matlab package for $(PROJECT) -MAT$(PROJECT)_SRC := matlab/+$(PROJECT)/private/$(PROJECT)_.cpp -ifneq ($(MATLAB_DIR),) - MAT_SO_EXT := $(shell $(MATLAB_DIR)/bin/mexext) -endif -MAT$(PROJECT)_SO := matlab/+$(PROJECT)/private/$(PROJECT)_.$(MAT_SO_EXT) - -############################## -# Derive generated files -############################## -# The generated files for protocol buffers -PROTO_GEN_HEADER_SRCS := $(addprefix $(PROTO_BUILD_DIR)/, \ - $(notdir ${PROTO_SRCS:.proto=.pb.h})) -PROTO_GEN_HEADER := $(addprefix $(PROTO_BUILD_INCLUDE_DIR)/, \ - $(notdir ${PROTO_SRCS:.proto=.pb.h})) -PROTO_GEN_CC := $(addprefix $(BUILD_DIR)/, ${PROTO_SRCS:.proto=.pb.cc}) -PY_PROTO_BUILD_DIR := python/$(PROJECT)/proto -PY_PROTO_INIT := python/$(PROJECT)/proto/__init__.py -PROTO_GEN_PY := $(foreach file,${PROTO_SRCS:.proto=_pb2.py}, \ - $(PY_PROTO_BUILD_DIR)/$(notdir $(file))) -# The objects corresponding to the source files -# These objects will be linked into the final shared library, so we -# exclude the tool, example, and test objects. -CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o}) -CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o}) -PROTO_OBJS := ${PROTO_GEN_CC:.cc=.o} -OBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS) -# tool, example, and test objects -TOOL_OBJS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o}) -TOOL_BUILD_DIR := $(BUILD_DIR)/tools -TEST_CXX_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT)/test -TEST_CU_BUILD_DIR := $(BUILD_DIR)/cuda/src/$(PROJECT)/test -TEST_CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o}) -TEST_CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o}) -TEST_OBJS := $(TEST_CXX_OBJS) $(TEST_CU_OBJS) -GTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o}) -EXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o}) -# Output files for automatic dependency generation -DEPS := ${CXX_OBJS:.o=.d} ${CU_OBJS:.o=.d} ${TEST_CXX_OBJS:.o=.d} \ - ${TEST_CU_OBJS:.o=.d} $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d} -# tool, example, and test bins -TOOL_BINS := ${TOOL_OBJS:.o=.bin} -EXAMPLE_BINS := ${EXAMPLE_OBJS:.o=.bin} -# symlinks to tool bins without the ".bin" extension -TOOL_BIN_LINKS := ${TOOL_BINS:.bin=} -# Put the test binaries in build/test for convenience. -TEST_BIN_DIR := $(BUILD_DIR)/test -TEST_CU_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ - $(foreach obj,$(TEST_CU_OBJS),$(basename $(notdir $(obj)))))) -TEST_CXX_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ - $(foreach obj,$(TEST_CXX_OBJS),$(basename $(notdir $(obj)))))) -TEST_BINS := $(TEST_CXX_BINS) $(TEST_CU_BINS) -# TEST_ALL_BIN is the test binary that links caffe dynamically. -TEST_ALL_BIN := $(TEST_BIN_DIR)/test_all.testbin - -############################## -# Derive compiler warning dump locations -############################## -WARNS_EXT := warnings.txt -CXX_WARNS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o.$(WARNS_EXT)}) -CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o.$(WARNS_EXT)}) -TOOL_WARNS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o.$(WARNS_EXT)}) -EXAMPLE_WARNS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o.$(WARNS_EXT)}) -TEST_WARNS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o.$(WARNS_EXT)}) -TEST_CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o.$(WARNS_EXT)}) -ALL_CXX_WARNS := $(CXX_WARNS) $(TOOL_WARNS) $(EXAMPLE_WARNS) $(TEST_WARNS) -ALL_CU_WARNS := $(CU_WARNS) $(TEST_CU_WARNS) -ALL_WARNS := $(ALL_CXX_WARNS) $(ALL_CU_WARNS) - -EMPTY_WARN_REPORT := $(BUILD_DIR)/.$(WARNS_EXT) -NONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT) - -############################## -# Derive include and lib directories -############################## -CUDA_INCLUDE_DIR := $(CUDA_DIR)/include - -CUDA_LIB_DIR := -# add /lib64 only if it exists -ifneq ("$(wildcard $(CUDA_DIR)/lib64)","") - CUDA_LIB_DIR += $(CUDA_DIR)/lib64 -endif -CUDA_LIB_DIR += $(CUDA_DIR)/lib - -INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include -ifneq ($(CPU_ONLY), 1) - INCLUDE_DIRS += $(CUDA_INCLUDE_DIR) - LIBRARY_DIRS += $(CUDA_LIB_DIR) - LIBRARIES := cudart cublas curand -endif -LIBRARIES += glog gflags protobuf leveldb snappy \ - lmdb boost_system hdf5_hl hdf5 m \ - opencv_core opencv_highgui opencv_imgproc #opencv_imgcodecs -PYTHON_LIBRARIES := boost_python python2.7 -WARNINGS := -Wall -Wno-sign-compare - -############################## -# Set build directories -############################## - -DISTRIBUTE_DIR ?= distribute -DISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib -DIST_ALIASES := dist -ifneq ($(strip $(DISTRIBUTE_DIR)),distribute) - DIST_ALIASES += distribute -endif - -ALL_BUILD_DIRS := $(sort $(BUILD_DIR) $(addprefix $(BUILD_DIR)/, $(SRC_DIRS)) \ - $(addprefix $(BUILD_DIR)/cuda/, $(SRC_DIRS)) \ - $(LIB_BUILD_DIR) $(TEST_BIN_DIR) $(PY_PROTO_BUILD_DIR) $(LINT_OUTPUT_DIR) \ - $(DISTRIBUTE_SUBDIRS) $(PROTO_BUILD_INCLUDE_DIR)) - -############################## -# Set directory for Doxygen-generated documentation -############################## -DOXYGEN_CONFIG_FILE ?= ./.Doxyfile -# should be the same as OUTPUT_DIRECTORY in the .Doxyfile -DOXYGEN_OUTPUT_DIR ?= ./doxygen -DOXYGEN_COMMAND ?= doxygen -# All the files that might have Doxygen documentation. -DOXYGEN_SOURCES := $(shell find \ - src/$(PROJECT) \ - include/$(PROJECT) \ - python/ \ - matlab/ \ - examples \ - tools \ - -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh" -or \ - -name "*.py" -or -name "*.m") -DOXYGEN_SOURCES += $(DOXYGEN_CONFIG_FILE) - - -############################## -# Configure build -############################## - -# Determine platform -UNAME := $(shell uname -s) -ifeq ($(UNAME), Linux) - LINUX := 1 -else ifeq ($(UNAME), Darwin) - OSX := 1 -endif - -# Linux -ifeq ($(LINUX), 1) - CXX ?= /usr/bin/g++ - GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.) - # older versions of gcc are too dumb to build boost with -Wuninitalized - ifeq ($(shell echo | awk '{exit $(GCCVERSION) < 4.6;}'), 1) - WARNINGS += -Wno-uninitialized - endif - # boost::thread is reasonably called boost_thread (compare OS X) - # We will also explicitly add stdc++ to the link target. - LIBRARIES += boost_thread stdc++ -endif - -# OS X: -# clang++ instead of g++ -# libstdc++ for NVCC compatibility on OS X >= 10.9 with CUDA < 7.0 -ifeq ($(OSX), 1) - CXX := /usr/bin/clang++ - ifneq ($(CPU_ONLY), 1) - CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d') - ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1) - CXXFLAGS += -stdlib=libstdc++ - LINKFLAGS += -stdlib=libstdc++ - endif - # clang throws this warning for cuda headers - WARNINGS += -Wno-unneeded-internal-declaration - endif - # gtest needs to use its own tuple to not conflict with clang - COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1 - # boost::thread is called boost_thread-mt to mark multithreading on OS X - LIBRARIES += boost_thread-mt - # we need to explicitly ask for the rpath to be obeyed - DYNAMIC_FLAGS := -install_name @rpath/libcaffe.so - ORIGIN := @loader_path -else - ORIGIN := \$$ORIGIN -endif - -# Custom compiler -ifdef CUSTOM_CXX - CXX := $(CUSTOM_CXX) -endif - -# Static linking -ifneq (,$(findstring clang++,$(CXX))) - STATIC_LINK_COMMAND := -Wl,-force_load $(STATIC_NAME) -else ifneq (,$(findstring g++,$(CXX))) - STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive -else - # The following line must not be indented with a tab, since we are not inside a target - $(error Cannot static link with the $(CXX) compiler) -endif - -# Debugging -ifeq ($(DEBUG), 1) - COMMON_FLAGS += -DDEBUG -g -O0 - NVCCFLAGS += -G -else - COMMON_FLAGS += -DNDEBUG -O2 -endif - -# cuDNN acceleration configuration. -ifeq ($(USE_CUDNN), 1) - LIBRARIES += cudnn - COMMON_FLAGS += -DUSE_CUDNN -endif - -# CPU-only configuration -ifeq ($(CPU_ONLY), 1) - OBJS := $(PROTO_OBJS) $(CXX_OBJS) - TEST_OBJS := $(TEST_CXX_OBJS) - TEST_BINS := $(TEST_CXX_BINS) - ALL_WARNS := $(ALL_CXX_WARNS) - TEST_FILTER := --gtest_filter="-*GPU*" - COMMON_FLAGS += -DCPU_ONLY -endif - -# Python layer support -ifeq ($(WITH_PYTHON_LAYER), 1) - COMMON_FLAGS += -DWITH_PYTHON_LAYER - LIBRARIES += $(PYTHON_LIBRARIES) -endif - -# BLAS configuration (default = ATLAS) -BLAS ?= atlas -ifeq ($(BLAS), mkl) - # MKL - LIBRARIES += mkl_rt - COMMON_FLAGS += -DUSE_MKL - MKL_DIR ?= /opt/intel/mkl - BLAS_INCLUDE ?= $(MKL_DIR)/include - BLAS_LIB ?= $(MKL_DIR)/lib $(MKL_DIR)/lib/intel64 -else ifeq ($(BLAS), open) - # OpenBLAS - LIBRARIES += openblas -else - # ATLAS - ifeq ($(LINUX), 1) - ifeq ($(BLAS), atlas) - # Linux simply has cblas and atlas - LIBRARIES += cblas atlas - endif - else ifeq ($(OSX), 1) - # OS X packages atlas as the vecLib framework - LIBRARIES += cblas - # 10.10 has accelerate while 10.9 has veclib - XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep -o 'version: 6') - ifneq (,$(findstring version: 6,$(XCODE_CLT_VER))) - BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/ - LDFLAGS += -framework Accelerate - else - BLAS_INCLUDE ?= /System/Library/Frameworks/vecLib.framework/Versions/Current/Headers/ - LDFLAGS += -framework vecLib - endif - endif -endif -INCLUDE_DIRS += $(BLAS_INCLUDE) -LIBRARY_DIRS += $(BLAS_LIB) - -LIBRARY_DIRS += $(LIB_BUILD_DIR) - -# Automatic dependency generation (nvcc is handled separately) -CXXFLAGS += -MMD -MP - -# Complete build flags. -COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir)) -CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) -NVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS) -# mex may invoke an older gcc that is too liberal with -Wuninitalized -MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized -LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) - -USE_PKG_CONFIG ?= 0 -ifeq ($(USE_PKG_CONFIG), 1) - PKG_CONFIG := $(shell pkg-config opencv --libs) -else - PKG_CONFIG := -endif -LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) $(PKG_CONFIG) \ - $(foreach library,$(LIBRARIES),-l$(library)) -PYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library)) - -# 'superclean' target recursively* deletes all files ending with an extension -# in $(SUPERCLEAN_EXTS) below. This may be useful if you've built older -# versions of Caffe that do not place all generated files in a location known -# to the 'clean' target. -# -# 'supercleanlist' will list the files to be deleted by make superclean. -# -# * Recursive with the exception that symbolic links are never followed, per the -# default behavior of 'find'. -SUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo - -# Set the sub-targets of the 'everything' target. -EVERYTHING_TARGETS := all py$(PROJECT) test warn lint -# Only build matcaffe as part of "everything" if MATLAB_DIR is specified. -ifneq ($(MATLAB_DIR),) - EVERYTHING_TARGETS += mat$(PROJECT) -endif - -############################## -# Define build targets -############################## -.PHONY: all lib test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ - py mat py$(PROJECT) mat$(PROJECT) proto runtest \ - superclean supercleanlist supercleanfiles warn everything - -all: lib tools examples - -lib: $(STATIC_NAME) $(DYNAMIC_NAME) - -everything: $(EVERYTHING_TARGETS) - -linecount: - cloc --read-lang-def=$(PROJECT).cloc \ - src/$(PROJECT) include/$(PROJECT) tools examples \ - python matlab - -lint: $(EMPTY_LINT_REPORT) - -lintclean: - @ $(RM) -r $(LINT_OUTPUT_DIR) $(EMPTY_LINT_REPORT) $(NONEMPTY_LINT_REPORT) - -docs: $(DOXYGEN_OUTPUT_DIR) - @ cd ./docs ; ln -sfn ../$(DOXYGEN_OUTPUT_DIR)/html doxygen - -$(DOXYGEN_OUTPUT_DIR): $(DOXYGEN_CONFIG_FILE) $(DOXYGEN_SOURCES) - $(DOXYGEN_COMMAND) $(DOXYGEN_CONFIG_FILE) - -$(EMPTY_LINT_REPORT): $(LINT_OUTPUTS) | $(BUILD_DIR) - @ cat $(LINT_OUTPUTS) > $@ - @ if [ -s "$@" ]; then \ - cat $@; \ - mv $@ $(NONEMPTY_LINT_REPORT); \ - echo "Found one or more lint errors."; \ - exit 1; \ - fi; \ - $(RM) $(NONEMPTY_LINT_REPORT); \ - echo "No lint errors!"; - -$(LINT_OUTPUTS): $(LINT_OUTPUT_DIR)/%.lint.txt : % $(LINT_SCRIPT) | $(LINT_OUTPUT_DIR) - @ mkdir -p $(dir $@) - @ python $(LINT_SCRIPT) $< 2>&1 \ - | grep -v "^Done processing " \ - | grep -v "^Total errors found: 0" \ - > $@ \ - || true - -test: $(TEST_ALL_BIN) $(TEST_ALL_DYNLINK_BIN) $(TEST_BINS) - -tools: $(TOOL_BINS) $(TOOL_BIN_LINKS) - -examples: $(EXAMPLE_BINS) - -py$(PROJECT): py - -py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY) - -$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME) - @ echo CXX/LD -o $@ $< - $(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \ - -o $@ $(LINKFLAGS) -l$(PROJECT) $(PYTHON_LDFLAGS) \ - -Wl,-rpath,$(ORIGIN)/../../build/lib - -mat$(PROJECT): mat - -mat: $(MAT$(PROJECT)_SO) - -$(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME) - @ if [ -z "$(MATLAB_DIR)" ]; then \ - echo "MATLAB_DIR must be specified in $(CONFIG_FILE)" \ - "to build mat$(PROJECT)."; \ - exit 1; \ - fi - @ echo MEX $< - $(Q)$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) \ - CXX="$(CXX)" \ - CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \ - CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS)" -output $@ - @ if [ -f "$(PROJECT)_.d" ]; then \ - mv -f $(PROJECT)_.d $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}; \ - fi - -runtest: $(TEST_ALL_BIN) - $(TOOL_BUILD_DIR)/caffe - $(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle $(TEST_FILTER) - -pytest: py - cd python; python -m unittest discover -s caffe/test - -mattest: mat - cd matlab; $(MATLAB_DIR)/bin/matlab -nodisplay -r 'caffe.run_tests(), exit()' - -warn: $(EMPTY_WARN_REPORT) - -$(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR) - @ cat $(ALL_WARNS) > $@ - @ if [ -s "$@" ]; then \ - cat $@; \ - mv $@ $(NONEMPTY_WARN_REPORT); \ - echo "Compiler produced one or more warnings."; \ - exit 1; \ - fi; \ - $(RM) $(NONEMPTY_WARN_REPORT); \ - echo "No compiler warnings!"; - -$(ALL_WARNS): %.o.$(WARNS_EXT) : %.o - -$(BUILD_DIR_LINK): $(BUILD_DIR)/.linked - -# Create a target ".linked" in this BUILD_DIR to tell Make that the "build" link -# is currently correct, then delete the one in the OTHER_BUILD_DIR in case it -# exists and $(DEBUG) is toggled later. -$(BUILD_DIR)/.linked: - @ mkdir -p $(BUILD_DIR) - @ $(RM) $(OTHER_BUILD_DIR)/.linked - @ $(RM) -r $(BUILD_DIR_LINK) - @ ln -s $(BUILD_DIR) $(BUILD_DIR_LINK) - @ touch $@ - -$(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK) - @ mkdir -p $@ - -$(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) - @ echo LD -o $@ - $(Q)$(CXX) -shared -o $@ $(OBJS) $(LINKFLAGS) $(LDFLAGS) $(DYNAMIC_FLAGS) - -$(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) - @ echo AR -o $@ - $(Q)ar rcs $@ $(OBJS) - -$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS) - @ echo CXX $< - $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) - -$(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \ - | $(PROTO_BUILD_DIR) - @ echo CXX $< - $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) - -$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS) - @ echo NVCC $< - $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \ - -odir $(@D) - $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) - -$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ - | $(DYNAMIC_NAME) $(TEST_BIN_DIR) - @ echo CXX/LD -o $@ $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib - -$(TEST_CU_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CU_BUILD_DIR)/%.o \ - $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) - @ echo LD $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib - -$(TEST_CXX_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CXX_BUILD_DIR)/%.o \ - $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) - @ echo LD $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib - -# Target for extension-less symlinks to tool binaries with extension '*.bin'. -$(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR) - @ $(RM) $@ - @ ln -s $(abspath $<) $@ - -$(TOOL_BINS): %.bin : %.o | $(DYNAMIC_NAME) - @ echo CXX/LD -o $@ - $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ - -Wl,-rpath,$(ORIGIN)/../lib - -$(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME) - @ echo CXX/LD -o $@ - $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ - -Wl,-rpath,$(ORIGIN)/../../lib - -proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER) - -$(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \ - $(PROTO_SRC_DIR)/%.proto | $(PROTO_BUILD_DIR) - @ echo PROTOC $< - $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --cpp_out=$(PROTO_BUILD_DIR) $< - -$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \ - $(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR) - @ echo PROTOC \(python\) $< - $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $< - -$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR) - touch $(PY_PROTO_INIT) - -clean: - @- $(RM) -rf $(ALL_BUILD_DIRS) - @- $(RM) -rf $(OTHER_BUILD_DIR) - @- $(RM) -rf $(BUILD_DIR_LINK) - @- $(RM) -rf $(DISTRIBUTE_DIR) - @- $(RM) $(PY$(PROJECT)_SO) - @- $(RM) $(MAT$(PROJECT)_SO) - -supercleanfiles: - $(eval SUPERCLEAN_FILES := $(strip \ - $(foreach ext,$(SUPERCLEAN_EXTS), $(shell find . -name '*$(ext)' \ - -not -path './data/*')))) - -supercleanlist: supercleanfiles - @ \ - if [ -z "$(SUPERCLEAN_FILES)" ]; then \ - echo "No generated files found."; \ - else \ - echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ - fi - -superclean: clean supercleanfiles - @ \ - if [ -z "$(SUPERCLEAN_FILES)" ]; then \ - echo "No generated files found."; \ - else \ - echo "Deleting the following generated files:"; \ - echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ - $(RM) $(SUPERCLEAN_FILES); \ - fi - -$(DIST_ALIASES): $(DISTRIBUTE_DIR) - -$(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS) - # add include - cp -r include $(DISTRIBUTE_DIR)/ - mkdir -p $(DISTRIBUTE_DIR)/include/caffe/proto - cp $(PROTO_GEN_HEADER_SRCS) $(DISTRIBUTE_DIR)/include/caffe/proto - # add tool and example binaries - cp $(TOOL_BINS) $(DISTRIBUTE_DIR)/bin - cp $(EXAMPLE_BINS) $(DISTRIBUTE_DIR)/bin - # add libraries - cp $(STATIC_NAME) $(DISTRIBUTE_DIR)/lib - cp $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib - # add python - it's not the standard way, indeed... - cp -r python $(DISTRIBUTE_DIR)/python - --include $(DEPS) diff --git a/Makefile_REMOTE_61727 b/Makefile_REMOTE_61727 deleted file mode 100644 index 2b918ffd26d..00000000000 --- a/Makefile_REMOTE_61727 +++ /dev/null @@ -1,631 +0,0 @@ -PROJECT := caffe - -CONFIG_FILE := Makefile.config -# Explicitly check for the config file, otherwise make -k will proceed anyway. -ifeq ($(wildcard $(CONFIG_FILE)),) -$(error $(CONFIG_FILE) not found. See $(CONFIG_FILE).example.) -endif -include $(CONFIG_FILE) - -BUILD_DIR_LINK := $(BUILD_DIR) -ifeq ($(RELEASE_BUILD_DIR),) - RELEASE_BUILD_DIR := .$(BUILD_DIR)_release -endif -ifeq ($(DEBUG_BUILD_DIR),) - DEBUG_BUILD_DIR := .$(BUILD_DIR)_debug -endif - -DEBUG ?= 0 -ifeq ($(DEBUG), 1) - BUILD_DIR := $(DEBUG_BUILD_DIR) - OTHER_BUILD_DIR := $(RELEASE_BUILD_DIR) -else - BUILD_DIR := $(RELEASE_BUILD_DIR) - OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR) -endif - -# All of the directories containing code. -SRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \ - \( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print) - -# The target shared library name -LIB_BUILD_DIR := $(BUILD_DIR)/lib -STATIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).a -DYNAMIC_NAME := $(LIB_BUILD_DIR)/lib$(PROJECT).so - -############################## -# Get all source files -############################## -# CXX_SRCS are the source files excluding the test ones. -CXX_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cpp" -name "*.cpp") -# CU_SRCS are the cuda source files -CU_SRCS := $(shell find src/$(PROJECT) ! -name "test_*.cu" -name "*.cu") -# TEST_SRCS are the test source files -TEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp -TEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp") -TEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS)) -TEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu") -GTEST_SRC := src/gtest/gtest-all.cpp -# TOOL_SRCS are the source files for the tool binaries -TOOL_SRCS := $(shell find tools -name "*.cpp") -# EXAMPLE_SRCS are the source files for the example binaries -EXAMPLE_SRCS := $(shell find examples -name "*.cpp") -# BUILD_INCLUDE_DIR contains any generated header files we want to include. -BUILD_INCLUDE_DIR := $(BUILD_DIR)/src -# PROTO_SRCS are the protocol buffer definitions -PROTO_SRC_DIR := src/$(PROJECT)/proto -PROTO_SRCS := $(wildcard $(PROTO_SRC_DIR)/*.proto) -# PROTO_BUILD_DIR will contain the .cc and obj files generated from -# PROTO_SRCS; PROTO_BUILD_INCLUDE_DIR will contain the .h header files -PROTO_BUILD_DIR := $(BUILD_DIR)/$(PROTO_SRC_DIR) -PROTO_BUILD_INCLUDE_DIR := $(BUILD_INCLUDE_DIR)/$(PROJECT)/proto -# NONGEN_CXX_SRCS includes all source/header files except those generated -# automatically (e.g., by proto). -NONGEN_CXX_SRCS := $(shell find \ - src/$(PROJECT) \ - include/$(PROJECT) \ - python/$(PROJECT) \ - matlab/+$(PROJECT)/private \ - examples \ - tools \ - -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh") -LINT_SCRIPT := scripts/cpp_lint.py -LINT_OUTPUT_DIR := $(BUILD_DIR)/.lint -LINT_EXT := lint.txt -LINT_OUTPUTS := $(addsuffix .$(LINT_EXT), $(addprefix $(LINT_OUTPUT_DIR)/, $(NONGEN_CXX_SRCS))) -EMPTY_LINT_REPORT := $(BUILD_DIR)/.$(LINT_EXT) -NONEMPTY_LINT_REPORT := $(BUILD_DIR)/$(LINT_EXT) -# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT) -PY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp -PY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so -PY$(PROJECT)_HXX := include/$(PROJECT)/python_layer.hpp -# MAT$(PROJECT)_SRC is the mex entrance point of matlab package for $(PROJECT) -MAT$(PROJECT)_SRC := matlab/+$(PROJECT)/private/$(PROJECT)_.cpp -ifneq ($(MATLAB_DIR),) - MAT_SO_EXT := $(shell $(MATLAB_DIR)/bin/mexext) -endif -MAT$(PROJECT)_SO := matlab/+$(PROJECT)/private/$(PROJECT)_.$(MAT_SO_EXT) - -############################## -# Derive generated files -############################## -# The generated files for protocol buffers -PROTO_GEN_HEADER_SRCS := $(addprefix $(PROTO_BUILD_DIR)/, \ - $(notdir ${PROTO_SRCS:.proto=.pb.h})) -PROTO_GEN_HEADER := $(addprefix $(PROTO_BUILD_INCLUDE_DIR)/, \ - $(notdir ${PROTO_SRCS:.proto=.pb.h})) -PROTO_GEN_CC := $(addprefix $(BUILD_DIR)/, ${PROTO_SRCS:.proto=.pb.cc}) -PY_PROTO_BUILD_DIR := python/$(PROJECT)/proto -PY_PROTO_INIT := python/$(PROJECT)/proto/__init__.py -PROTO_GEN_PY := $(foreach file,${PROTO_SRCS:.proto=_pb2.py}, \ - $(PY_PROTO_BUILD_DIR)/$(notdir $(file))) -# The objects corresponding to the source files -# These objects will be linked into the final shared library, so we -# exclude the tool, example, and test objects. -CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o}) -CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o}) -PROTO_OBJS := ${PROTO_GEN_CC:.cc=.o} -OBJS := $(PROTO_OBJS) $(CXX_OBJS) $(CU_OBJS) -# tool, example, and test objects -TOOL_OBJS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o}) -TOOL_BUILD_DIR := $(BUILD_DIR)/tools -TEST_CXX_BUILD_DIR := $(BUILD_DIR)/src/$(PROJECT)/test -TEST_CU_BUILD_DIR := $(BUILD_DIR)/cuda/src/$(PROJECT)/test -TEST_CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o}) -TEST_CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o}) -TEST_OBJS := $(TEST_CXX_OBJS) $(TEST_CU_OBJS) -GTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o}) -EXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o}) -# Output files for automatic dependency generation -DEPS := ${CXX_OBJS:.o=.d} ${CU_OBJS:.o=.d} ${TEST_CXX_OBJS:.o=.d} \ - ${TEST_CU_OBJS:.o=.d} $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d} -# tool, example, and test bins -TOOL_BINS := ${TOOL_OBJS:.o=.bin} -EXAMPLE_BINS := ${EXAMPLE_OBJS:.o=.bin} -# symlinks to tool bins without the ".bin" extension -TOOL_BIN_LINKS := ${TOOL_BINS:.bin=} -# Put the test binaries in build/test for convenience. -TEST_BIN_DIR := $(BUILD_DIR)/test -TEST_CU_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ - $(foreach obj,$(TEST_CU_OBJS),$(basename $(notdir $(obj)))))) -TEST_CXX_BINS := $(addsuffix .testbin,$(addprefix $(TEST_BIN_DIR)/, \ - $(foreach obj,$(TEST_CXX_OBJS),$(basename $(notdir $(obj)))))) -TEST_BINS := $(TEST_CXX_BINS) $(TEST_CU_BINS) -# TEST_ALL_BIN is the test binary that links caffe dynamically. -TEST_ALL_BIN := $(TEST_BIN_DIR)/test_all.testbin - -############################## -# Derive compiler warning dump locations -############################## -WARNS_EXT := warnings.txt -CXX_WARNS := $(addprefix $(BUILD_DIR)/, ${CXX_SRCS:.cpp=.o.$(WARNS_EXT)}) -CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${CU_SRCS:.cu=.o.$(WARNS_EXT)}) -TOOL_WARNS := $(addprefix $(BUILD_DIR)/, ${TOOL_SRCS:.cpp=.o.$(WARNS_EXT)}) -EXAMPLE_WARNS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o.$(WARNS_EXT)}) -TEST_WARNS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o.$(WARNS_EXT)}) -TEST_CU_WARNS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o.$(WARNS_EXT)}) -ALL_CXX_WARNS := $(CXX_WARNS) $(TOOL_WARNS) $(EXAMPLE_WARNS) $(TEST_WARNS) -ALL_CU_WARNS := $(CU_WARNS) $(TEST_CU_WARNS) -ALL_WARNS := $(ALL_CXX_WARNS) $(ALL_CU_WARNS) - -EMPTY_WARN_REPORT := $(BUILD_DIR)/.$(WARNS_EXT) -NONEMPTY_WARN_REPORT := $(BUILD_DIR)/$(WARNS_EXT) - -############################## -# Derive include and lib directories -############################## -CUDA_INCLUDE_DIR := $(CUDA_DIR)/include - -CUDA_LIB_DIR := -# add /lib64 only if it exists -ifneq ("$(wildcard $(CUDA_DIR)/lib64)","") - CUDA_LIB_DIR += $(CUDA_DIR)/lib64 -endif -CUDA_LIB_DIR += $(CUDA_DIR)/lib - -INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include -ifneq ($(CPU_ONLY), 1) - INCLUDE_DIRS += $(CUDA_INCLUDE_DIR) - LIBRARY_DIRS += $(CUDA_LIB_DIR) - LIBRARIES := cudart cublas curand -endif -LIBRARIES += glog gflags protobuf leveldb snappy \ - lmdb boost_system hdf5_hl hdf5 m \ - opencv_core opencv_highgui opencv_imgproc #opencv_imgcodecs -PYTHON_LIBRARIES := boost_python python2.7 -WARNINGS := -Wall -Wno-sign-compare - -############################## -# Set build directories -############################## - -DISTRIBUTE_DIR ?= distribute -DISTRIBUTE_SUBDIRS := $(DISTRIBUTE_DIR)/bin $(DISTRIBUTE_DIR)/lib -DIST_ALIASES := dist -ifneq ($(strip $(DISTRIBUTE_DIR)),distribute) - DIST_ALIASES += distribute -endif - -ALL_BUILD_DIRS := $(sort $(BUILD_DIR) $(addprefix $(BUILD_DIR)/, $(SRC_DIRS)) \ - $(addprefix $(BUILD_DIR)/cuda/, $(SRC_DIRS)) \ - $(LIB_BUILD_DIR) $(TEST_BIN_DIR) $(PY_PROTO_BUILD_DIR) $(LINT_OUTPUT_DIR) \ - $(DISTRIBUTE_SUBDIRS) $(PROTO_BUILD_INCLUDE_DIR)) - -############################## -# Set directory for Doxygen-generated documentation -############################## -DOXYGEN_CONFIG_FILE ?= ./.Doxyfile -# should be the same as OUTPUT_DIRECTORY in the .Doxyfile -DOXYGEN_OUTPUT_DIR ?= ./doxygen -DOXYGEN_COMMAND ?= doxygen -# All the files that might have Doxygen documentation. -DOXYGEN_SOURCES := $(shell find \ - src/$(PROJECT) \ - include/$(PROJECT) \ - python/ \ - matlab/ \ - examples \ - tools \ - -name "*.cpp" -or -name "*.hpp" -or -name "*.cu" -or -name "*.cuh" -or \ - -name "*.py" -or -name "*.m") -DOXYGEN_SOURCES += $(DOXYGEN_CONFIG_FILE) - - -############################## -# Configure build -############################## - -# Determine platform -UNAME := $(shell uname -s) -ifeq ($(UNAME), Linux) - LINUX := 1 -else ifeq ($(UNAME), Darwin) - OSX := 1 -endif - -# Linux -ifeq ($(LINUX), 1) - CXX ?= /usr/bin/g++ - GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.) - # older versions of gcc are too dumb to build boost with -Wuninitalized - ifeq ($(shell echo $(GCCVERSION) \< 4.6 | bc), 1) - WARNINGS += -Wno-uninitialized - endif - # boost::thread is reasonably called boost_thread (compare OS X) - # We will also explicitly add stdc++ to the link target. - LIBRARIES += boost_thread stdc++ -endif - -# OS X: -# clang++ instead of g++ -# libstdc++ for NVCC compatibility on OS X >= 10.9 with CUDA < 7.0 -ifeq ($(OSX), 1) - CXX := /usr/bin/clang++ - ifneq ($(CPU_ONLY), 1) - CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d') - ifeq ($(shell echo $(CUDA_VERSION) \< 7.0 | bc), 1) - CXXFLAGS += -stdlib=libstdc++ - LINKFLAGS += -stdlib=libstdc++ - endif - # clang throws this warning for cuda headers - WARNINGS += -Wno-unneeded-internal-declaration - endif - # gtest needs to use its own tuple to not conflict with clang - COMMON_FLAGS += -DGTEST_USE_OWN_TR1_TUPLE=1 - # boost::thread is called boost_thread-mt to mark multithreading on OS X - LIBRARIES += boost_thread-mt - # we need to explicitly ask for the rpath to be obeyed - DYNAMIC_FLAGS := -install_name @rpath/libcaffe.so - ORIGIN := @loader_path -else - ORIGIN := \$$ORIGIN -endif - -# Custom compiler -ifdef CUSTOM_CXX - CXX := $(CUSTOM_CXX) -endif - -# Static linking -ifneq (,$(findstring clang++,$(CXX))) - STATIC_LINK_COMMAND := -Wl,-force_load $(STATIC_NAME) -else ifneq (,$(findstring g++,$(CXX))) - STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive -else - # The following line must not be indented with a tab, since we are not inside a target - $(error Cannot static link with the $(CXX) compiler) -endif - -# Debugging -ifeq ($(DEBUG), 1) - COMMON_FLAGS += -DDEBUG -g -O0 - NVCCFLAGS += -G -else - COMMON_FLAGS += -DNDEBUG -O2 -endif - -# cuDNN acceleration configuration. -ifeq ($(USE_CUDNN), 1) - LIBRARIES += cudnn - COMMON_FLAGS += -DUSE_CUDNN -endif - -# CPU-only configuration -ifeq ($(CPU_ONLY), 1) - OBJS := $(PROTO_OBJS) $(CXX_OBJS) - TEST_OBJS := $(TEST_CXX_OBJS) - TEST_BINS := $(TEST_CXX_BINS) - ALL_WARNS := $(ALL_CXX_WARNS) - TEST_FILTER := --gtest_filter="-*GPU*" - COMMON_FLAGS += -DCPU_ONLY -endif - -# Python layer support -ifeq ($(WITH_PYTHON_LAYER), 1) - COMMON_FLAGS += -DWITH_PYTHON_LAYER - LIBRARIES += $(PYTHON_LIBRARIES) -endif - -# BLAS configuration (default = ATLAS) -BLAS ?= atlas -ifeq ($(BLAS), mkl) - # MKL - LIBRARIES += mkl_rt - COMMON_FLAGS += -DUSE_MKL - MKL_DIR ?= /opt/intel/mkl - BLAS_INCLUDE ?= $(MKL_DIR)/include - BLAS_LIB ?= $(MKL_DIR)/lib $(MKL_DIR)/lib/intel64 -else ifeq ($(BLAS), open) - # OpenBLAS - LIBRARIES += openblas -else - # ATLAS - ifeq ($(LINUX), 1) - ifeq ($(BLAS), atlas) - # Linux simply has cblas and atlas - LIBRARIES += cblas atlas - endif - else ifeq ($(OSX), 1) - # OS X packages atlas as the vecLib framework - LIBRARIES += cblas - # 10.10 has accelerate while 10.9 has veclib - XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep -o 'version: 6') - ifneq (,$(findstring version: 6,$(XCODE_CLT_VER))) - BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/ - LDFLAGS += -framework Accelerate - else - BLAS_INCLUDE ?= /System/Library/Frameworks/vecLib.framework/Versions/Current/Headers/ - LDFLAGS += -framework vecLib - endif - endif -endif -INCLUDE_DIRS += $(BLAS_INCLUDE) -LIBRARY_DIRS += $(BLAS_LIB) - -LIBRARY_DIRS += $(LIB_BUILD_DIR) - -# Automatic dependency generation (nvcc is handled separately) -CXXFLAGS += -MMD -MP - -# Complete build flags. -COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir)) -CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) -NVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS) -# mex may invoke an older gcc that is too liberal with -Wuninitalized -MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized -LINKFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) - -USE_PKG_CONFIG ?= 0 -ifeq ($(USE_PKG_CONFIG), 1) - PKG_CONFIG := $(shell pkg-config opencv --libs) -else - PKG_CONFIG := -endif -LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) $(PKG_CONFIG) \ - $(foreach library,$(LIBRARIES),-l$(library)) -PYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library)) - -# 'superclean' target recursively* deletes all files ending with an extension -# in $(SUPERCLEAN_EXTS) below. This may be useful if you've built older -# versions of Caffe that do not place all generated files in a location known -# to the 'clean' target. -# -# 'supercleanlist' will list the files to be deleted by make superclean. -# -# * Recursive with the exception that symbolic links are never followed, per the -# default behavior of 'find'. -SUPERCLEAN_EXTS := .so .a .o .bin .testbin .pb.cc .pb.h _pb2.py .cuo - -# Set the sub-targets of the 'everything' target. -EVERYTHING_TARGETS := all py$(PROJECT) test warn lint -# Only build matcaffe as part of "everything" if MATLAB_DIR is specified. -ifneq ($(MATLAB_DIR),) - EVERYTHING_TARGETS += mat$(PROJECT) -endif - -############################## -# Define build targets -############################## -.PHONY: all test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ - py mat py$(PROJECT) mat$(PROJECT) proto runtest \ - superclean supercleanlist supercleanfiles warn everything - -all: $(STATIC_NAME) $(DYNAMIC_NAME) tools examples - -everything: $(EVERYTHING_TARGETS) - -linecount: - cloc --read-lang-def=$(PROJECT).cloc \ - src/$(PROJECT) include/$(PROJECT) tools examples \ - python matlab - -lint: $(EMPTY_LINT_REPORT) - -lintclean: - @ $(RM) -r $(LINT_OUTPUT_DIR) $(EMPTY_LINT_REPORT) $(NONEMPTY_LINT_REPORT) - -docs: $(DOXYGEN_OUTPUT_DIR) - @ cd ./docs ; ln -sfn ../$(DOXYGEN_OUTPUT_DIR)/html doxygen - -$(DOXYGEN_OUTPUT_DIR): $(DOXYGEN_CONFIG_FILE) $(DOXYGEN_SOURCES) - $(DOXYGEN_COMMAND) $(DOXYGEN_CONFIG_FILE) - -$(EMPTY_LINT_REPORT): $(LINT_OUTPUTS) | $(BUILD_DIR) - @ cat $(LINT_OUTPUTS) > $@ - @ if [ -s "$@" ]; then \ - cat $@; \ - mv $@ $(NONEMPTY_LINT_REPORT); \ - echo "Found one or more lint errors."; \ - exit 1; \ - fi; \ - $(RM) $(NONEMPTY_LINT_REPORT); \ - echo "No lint errors!"; - -$(LINT_OUTPUTS): $(LINT_OUTPUT_DIR)/%.lint.txt : % $(LINT_SCRIPT) | $(LINT_OUTPUT_DIR) - @ mkdir -p $(dir $@) - @ python $(LINT_SCRIPT) $< 2>&1 \ - | grep -v "^Done processing " \ - | grep -v "^Total errors found: 0" \ - > $@ \ - || true - -test: $(TEST_ALL_BIN) $(TEST_ALL_DYNLINK_BIN) $(TEST_BINS) - -tools: $(TOOL_BINS) $(TOOL_BIN_LINKS) - -examples: $(EXAMPLE_BINS) - -py$(PROJECT): py - -py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY) - -$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME) - @ echo CXX/LD -o $@ $< - $(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \ - -o $@ $(LINKFLAGS) -l$(PROJECT) $(PYTHON_LDFLAGS) \ - -Wl,-rpath,$(ORIGIN)/../../build/lib - -mat$(PROJECT): mat - -mat: $(MAT$(PROJECT)_SO) - -$(MAT$(PROJECT)_SO): $(MAT$(PROJECT)_SRC) $(STATIC_NAME) - @ if [ -z "$(MATLAB_DIR)" ]; then \ - echo "MATLAB_DIR must be specified in $(CONFIG_FILE)" \ - "to build mat$(PROJECT)."; \ - exit 1; \ - fi - @ echo MEX $< - $(Q)$(MATLAB_DIR)/bin/mex $(MAT$(PROJECT)_SRC) \ - CXX="$(CXX)" \ - CXXFLAGS="\$$CXXFLAGS $(MATLAB_CXXFLAGS)" \ - CXXLIBS="\$$CXXLIBS $(STATIC_LINK_COMMAND) $(LDFLAGS)" -output $@ - @ if [ -f "$(PROJECT)_.d" ]; then \ - mv -f $(PROJECT)_.d $(BUILD_DIR)/${MAT$(PROJECT)_SO:.$(MAT_SO_EXT)=.d}; \ - fi - -runtest: $(TEST_ALL_BIN) - $(TOOL_BUILD_DIR)/caffe - $(TEST_ALL_BIN) $(TEST_GPUID) --gtest_shuffle $(TEST_FILTER) - -pytest: py - cd python; python -m unittest discover -s caffe/test - -mattest: mat - cd matlab; $(MATLAB_DIR)/bin/matlab -nodisplay -r 'caffe.run_tests(), exit()' - -warn: $(EMPTY_WARN_REPORT) - -$(EMPTY_WARN_REPORT): $(ALL_WARNS) | $(BUILD_DIR) - @ cat $(ALL_WARNS) > $@ - @ if [ -s "$@" ]; then \ - cat $@; \ - mv $@ $(NONEMPTY_WARN_REPORT); \ - echo "Compiler produced one or more warnings."; \ - exit 1; \ - fi; \ - $(RM) $(NONEMPTY_WARN_REPORT); \ - echo "No compiler warnings!"; - -$(ALL_WARNS): %.o.$(WARNS_EXT) : %.o - -$(BUILD_DIR_LINK): $(BUILD_DIR)/.linked - -# Create a target ".linked" in this BUILD_DIR to tell Make that the "build" link -# is currently correct, then delete the one in the OTHER_BUILD_DIR in case it -# exists and $(DEBUG) is toggled later. -$(BUILD_DIR)/.linked: - @ mkdir -p $(BUILD_DIR) - @ $(RM) $(OTHER_BUILD_DIR)/.linked - @ $(RM) -r $(BUILD_DIR_LINK) - @ ln -s $(BUILD_DIR) $(BUILD_DIR_LINK) - @ touch $@ - -$(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK) - @ mkdir -p $@ - -$(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) - @ echo LD -o $@ - $(Q)$(CXX) -shared -o $@ $(OBJS) $(LINKFLAGS) $(LDFLAGS) $(DYNAMIC_FLAGS) - -$(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) - @ echo AR -o $@ - $(Q)ar rcs $@ $(OBJS) - -$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS) - @ echo CXX $< - $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) - -$(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \ - | $(PROTO_BUILD_DIR) - @ echo CXX $< - $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) - -$(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS) - @ echo NVCC $< - $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -M $< -o ${@:.o=.d} \ - -odir $(@D) - $(Q)$(CUDA_DIR)/bin/nvcc $(NVCCFLAGS) $(CUDA_ARCH) -c $< -o $@ 2> $@.$(WARNS_EXT) \ - || (cat $@.$(WARNS_EXT); exit 1) - @ cat $@.$(WARNS_EXT) - -$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ - | $(DYNAMIC_NAME) $(TEST_BIN_DIR) - @ echo CXX/LD -o $@ $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib - -$(TEST_CU_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CU_BUILD_DIR)/%.o \ - $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) - @ echo LD $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib - -$(TEST_CXX_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CXX_BUILD_DIR)/%.o \ - $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) - @ echo LD $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(PROJECT) -Wl,-rpath,$(ORIGIN)/../lib - -# Target for extension-less symlinks to tool binaries with extension '*.bin'. -$(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR) - @ $(RM) $@ - @ ln -s $(abspath $<) $@ - -$(TOOL_BINS): %.bin : %.o | $(DYNAMIC_NAME) - @ echo CXX/LD -o $@ - $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ - -Wl,-rpath,$(ORIGIN)/../lib - -$(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME) - @ echo CXX/LD -o $@ - $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(PROJECT) $(LDFLAGS) \ - -Wl,-rpath,$(ORIGIN)/../../lib - -proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER) - -$(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_BUILD_DIR)/%.pb.h : \ - $(PROTO_SRC_DIR)/%.proto | $(PROTO_BUILD_DIR) - @ echo PROTOC $< - $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --cpp_out=$(PROTO_BUILD_DIR) $< - -$(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \ - $(PY_PROTO_INIT) | $(PY_PROTO_BUILD_DIR) - @ echo PROTOC \(python\) $< - $(Q)protoc --proto_path=$(PROTO_SRC_DIR) --python_out=$(PY_PROTO_BUILD_DIR) $< - -$(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR) - touch $(PY_PROTO_INIT) - -clean: - @- $(RM) -rf $(ALL_BUILD_DIRS) - @- $(RM) -rf $(OTHER_BUILD_DIR) - @- $(RM) -rf $(BUILD_DIR_LINK) - @- $(RM) -rf $(DISTRIBUTE_DIR) - @- $(RM) $(PY$(PROJECT)_SO) - @- $(RM) $(MAT$(PROJECT)_SO) - -supercleanfiles: - $(eval SUPERCLEAN_FILES := $(strip \ - $(foreach ext,$(SUPERCLEAN_EXTS), $(shell find . -name '*$(ext)' \ - -not -path './data/*')))) - -supercleanlist: supercleanfiles - @ \ - if [ -z "$(SUPERCLEAN_FILES)" ]; then \ - echo "No generated files found."; \ - else \ - echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ - fi - -superclean: clean supercleanfiles - @ \ - if [ -z "$(SUPERCLEAN_FILES)" ]; then \ - echo "No generated files found."; \ - else \ - echo "Deleting the following generated files:"; \ - echo $(SUPERCLEAN_FILES) | tr ' ' '\n'; \ - $(RM) $(SUPERCLEAN_FILES); \ - fi - -$(DIST_ALIASES): $(DISTRIBUTE_DIR) - -$(DISTRIBUTE_DIR): all py | $(DISTRIBUTE_SUBDIRS) - # add include - cp -r include $(DISTRIBUTE_DIR)/ - mkdir -p $(DISTRIBUTE_DIR)/include/caffe/proto - cp $(PROTO_GEN_HEADER_SRCS) $(DISTRIBUTE_DIR)/include/caffe/proto - # add tool and example binaries - cp $(TOOL_BINS) $(DISTRIBUTE_DIR)/bin - cp $(EXAMPLE_BINS) $(DISTRIBUTE_DIR)/bin - # add libraries - cp $(STATIC_NAME) $(DISTRIBUTE_DIR)/lib - cp $(DYNAMIC_NAME) $(DISTRIBUTE_DIR)/lib - # add python - it's not the standard way, indeed... - cp -r python $(DISTRIBUTE_DIR)/python - --include $(DEPS) diff --git a/cmake/ConfigGen.cmake b/cmake/ConfigGen.cmake index 8b259965359..566d6ca0aa7 100644 --- a/cmake/ConfigGen.cmake +++ b/cmake/ConfigGen.cmake @@ -56,18 +56,6 @@ function(caffe_generate_export_configs) list(APPEND Caffe_DEFINITIONS -DCPU_ONLY) endif() - if(USE_OPENCV) - list(APPEND Caffe_DEFINITIONS -DUSE_OPENCV) - endif() - - if(USE_LMDB) - list(APPEND Caffe_DEFINITIONS -DUSE_LMDB) - endif() - - if(USE_LEVELDB) - list(APPEND Caffe_DEFINITIONS -DUSE_LEVELDB) - endif() - if(NOT HAVE_CUDNN) set(HAVE_CUDNN FALSE) else() diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake index d68d7bfba66..7c86dd55a30 100644 --- a/cmake/Dependencies.cmake +++ b/cmake/Dependencies.cmake @@ -29,27 +29,19 @@ include_directories(SYSTEM ${HDF5_INCLUDE_DIRS} ${HDF5_HL_INCLUDE_DIR}) list(APPEND Caffe_LINKER_LIBS ${HDF5_LIBRARIES}) # ---[ LMDB -if(USE_LMDB) - find_package(LMDB REQUIRED) - include_directories(SYSTEM ${LMDB_INCLUDE_DIR}) - list(APPEND Caffe_LINKER_LIBS ${LMDB_LIBRARIES}) - add_definitions(-DUSE_LMDB) -endif() +find_package(LMDB REQUIRED) +include_directories(SYSTEM ${LMDB_INCLUDE_DIR}) +list(APPEND Caffe_LINKER_LIBS ${LMDB_LIBRARIES}) # ---[ LevelDB -if(USE_LEVELDB) - find_package(LevelDB REQUIRED) - include_directories(SYSTEM ${LevelDB_INCLUDE}) - list(APPEND Caffe_LINKER_LIBS ${LevelDB_LIBRARIES}) - add_definitions(-DUSE_LEVELDB) -endif() +find_package(LevelDB REQUIRED) +include_directories(SYSTEM ${LevelDB_INCLUDE}) +list(APPEND Caffe_LINKER_LIBS ${LevelDB_LIBRARIES}) # ---[ Snappy -if(USE_LEVELDB) - find_package(Snappy REQUIRED) - include_directories(SYSTEM ${Snappy_INCLUDE_DIR}) - list(APPEND Caffe_LINKER_LIBS ${Snappy_LIBRARIES}) -endif() +find_package(Snappy REQUIRED) +include_directories(SYSTEM ${Snappy_INCLUDE_DIR}) +list(APPEND Caffe_LINKER_LIBS ${Snappy_LIBRARIES}) # ---[ CUDA include(cmake/Cuda.cmake) @@ -65,16 +57,13 @@ if(NOT HAVE_CUDA) endif() # ---[ OpenCV -if(USE_OPENCV) - find_package(OpenCV QUIET COMPONENTS core highgui imgproc imgcodecs) - if(NOT OpenCV_FOUND) # if not OpenCV 3.x, then imgcodecs are not found - find_package(OpenCV REQUIRED COMPONENTS core highgui imgproc) - endif() - include_directories(SYSTEM ${OpenCV_INCLUDE_DIRS}) - list(APPEND Caffe_LINKER_LIBS ${OpenCV_LIBS}) - message(STATUS "OpenCV found (${OpenCV_CONFIG_PATH})") - add_definitions(-DUSE_OPENCV) +find_package(OpenCV QUIET COMPONENTS core highgui imgproc imgcodecs) +if(NOT OpenCV_FOUND) # if not OpenCV 3.x, then imgcodecs are not found + find_package(OpenCV REQUIRED COMPONENTS core highgui imgproc) endif() +include_directories(SYSTEM ${OpenCV_INCLUDE_DIRS}) +list(APPEND Caffe_LINKER_LIBS ${OpenCV_LIBS}) +message(STATUS "OpenCV found (${OpenCV_CONFIG_PATH})") # ---[ BLAS if(NOT APPLE) diff --git a/cmake/Summary.cmake b/cmake/Summary.cmake index 3d12e81a130..e094ac0040e 100644 --- a/cmake/Summary.cmake +++ b/cmake/Summary.cmake @@ -114,9 +114,6 @@ function(caffe_print_configuration_summary) caffe_status(" BUILD_matlab : ${BUILD_matlab}") caffe_status(" BUILD_docs : ${BUILD_docs}") caffe_status(" CPU_ONLY : ${CPU_ONLY}") - caffe_status(" USE_LMDB : ${USE_LMDB}") - caffe_status(" USE_LEVELDB : ${USE_LEVELDB}") - caffe_status(" USE_OPENCV : ${USE_OPENCV}") caffe_status("") caffe_status("Dependencies:") caffe_status(" BLAS : " APPLE THEN "Yes (vecLib)" ELSE "Yes (${BLAS})") @@ -124,16 +121,10 @@ function(caffe_print_configuration_summary) caffe_status(" glog : Yes") caffe_status(" gflags : Yes") caffe_status(" protobuf : " PROTOBUF_FOUND THEN "Yes (ver. ${PROTOBUF_VERSION})" ELSE "No" ) - if(USE_LMDB) - caffe_status(" lmdb : " LMDB_FOUND THEN "Yes (ver. ${LMDB_VERSION})" ELSE "No") - endif() - if(USE_LEVELDB) - caffe_status(" LevelDB : " LEVELDB_FOUND THEN "Yes (ver. ${LEVELDB_VERSION})" ELSE "No") - caffe_status(" Snappy : " SNAPPY_FOUND THEN "Yes (ver. ${Snappy_VERSION})" ELSE "No" ) - endif() - if(USE_OPENCV) - caffe_status(" OpenCV : Yes (ver. ${OpenCV_VERSION})") - endif() + caffe_status(" lmdb : " LMDB_FOUND THEN "Yes (ver. ${LMDB_VERSION})" ELSE "No") + caffe_status(" Snappy : " SNAPPY_FOUND THEN "Yes (ver. ${Snappy_VERSION})" ELSE "No" ) + caffe_status(" LevelDB : " LEVELDB_FOUND THEN "Yes (ver. ${LEVELDB_VERSION})" ELSE "No") + caffe_status(" OpenCV : Yes (ver. ${OpenCV_VERSION})") caffe_status(" CUDA : " HAVE_CUDA THEN "Yes (ver. ${CUDA_VERSION})" ELSE "No" ) caffe_status("") if(HAVE_CUDA) @@ -174,3 +165,4 @@ function(caffe_print_configuration_summary) caffe_status(" Install path : ${CMAKE_INSTALL_PREFIX}") caffe_status("") endfunction() + diff --git a/cmake/Templates/CaffeConfig.cmake.in b/cmake/Templates/CaffeConfig.cmake.in index 73f57ac2d74..8f23742e52e 100644 --- a/cmake/Templates/CaffeConfig.cmake.in +++ b/cmake/Templates/CaffeConfig.cmake.in @@ -17,24 +17,22 @@ # Caffe_HAVE_CUDNN - signals about cuDNN support -# OpenCV dependency (optional) +# OpenCV dependency -if(@USE_OPENCV@) - if(NOT OpenCV_FOUND) - set(Caffe_OpenCV_CONFIG_PATH "@OpenCV_CONFIG_PATH@") - if(Caffe_OpenCV_CONFIG_PATH) - get_filename_component(Caffe_OpenCV_CONFIG_PATH ${Caffe_OpenCV_CONFIG_PATH} ABSOLUTE) +if(NOT OpenCV_FOUND) + set(Caffe_OpenCV_CONFIG_PATH "@OpenCV_CONFIG_PATH@") + if(Caffe_OpenCV_CONFIG_PATH) + get_filename_component(Caffe_OpenCV_CONFIG_PATH ${Caffe_OpenCV_CONFIG_PATH} ABSOLUTE) - if(EXISTS ${Caffe_OpenCV_CONFIG_PATH} AND NOT TARGET opencv_core) - message(STATUS "Caffe: using OpenCV config from ${Caffe_OpenCV_CONFIG_PATH}") - include(${Caffe_OpenCV_CONFIG_PATH}/OpenCVModules.cmake) - endif() - - else() - find_package(OpenCV REQUIRED) + if(EXISTS ${Caffe_OpenCV_CONFIG_PATH} AND NOT TARGET opencv_core) + message(STATUS "Caffe: using OpenCV config from ${Caffe_OpenCV_CONFIG_PATH}") + include(${Caffe_OpenCV_CONFIG_PATH}/OpenCVModules.cmake) endif() - unset(Caffe_OpenCV_CONFIG_PATH) + + else() + find_package(OpenCV REQUIRED) endif() + unset(Caffe_OpenCV_CONFIG_PATH) endif() # Compute paths diff --git a/cmake/Templates/caffe_config.h.in b/cmake/Templates/caffe_config.h.in index 9302022d7da..6039e8f6b21 100644 --- a/cmake/Templates/caffe_config.h.in +++ b/cmake/Templates/caffe_config.h.in @@ -30,8 +30,3 @@ /* Matlab */ #cmakedefine HAVE_MATLAB - -/* IO libraries */ -#cmakedefine USE_OPENCV -#cmakedefine USE_LMDB -#cmakedefine USE_LEVELDB diff --git a/docs/installation.md b/docs/installation.md index 89a8c71c71a..d535c6d093d 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -17,19 +17,16 @@ When updating Caffe, it's best to `make clean` before re-compiling. ## Prerequisites -Caffe has several dependencies: +Caffe has several dependencies. * [CUDA](https://developer.nvidia.com/cuda-zone) is required for GPU mode. * library version 7.0 and the latest driver version are recommended, but 6.* is fine too * 5.5, and 5.0 are compatible but considered legacy * [BLAS](http://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) via ATLAS, MKL, or OpenBLAS. * [Boost](http://www.boost.org/) >= 1.55 -* `protobuf`, `glog`, `gflags`, `hdf5` - -Optional dependencies: - * [OpenCV](http://opencv.org/) >= 2.4 including 3.0 -* IO libraries: `lmdb`, `leveldb` (note: leveldb requires `snappy`) +* `protobuf`, `glog`, `gflags` +* IO libraries `hdf5`, `leveldb`, `snappy`, `lmdb` Pycaffe and Matcaffe interfaces have their own natural needs. diff --git a/docs/multigpu.md b/docs/multigpu.md deleted file mode 100644 index 01cfb8938b5..00000000000 --- a/docs/multigpu.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Multi-GPU Usage, Hardware Configuration Assumptions, and Performance ---- - -# Multi-GPU Usage - -Currently Multi-GPU is only supported via the C/C++ paths and only for training. - -The GPUs to be used for training can be set with the "-gpu" flag on the command line to the 'caffe' tool. e.g. "build/tools/caffe train --solver=models/bvlc_alexnet/solver.prototxt --gpu=0,1" will train on GPUs 0 and 1. - -**NOTE**: each GPU runs the batchsize specified in your train_val.prototxt. So if you go from 1 GPU to 2 GPU, your effective batchsize will double. e.g. if your train_val.prototxt specified a batchsize of 256, if you run 2 GPUs your effective batch size is now 512. So you need to adjust the batchsize when running multiple GPUs and/or adjust your solver params, specifically learning rate. - -# Hardware Configuration Assumptions - -The current implementation uses a tree reduction strategy. e.g. if there are 4 GPUs in the system, 0:1, 2:3 will exchange gradients, then 0:2 (top of the tree) will exchange gradients, 0 will calculate -updated model, 0\-\>2, and then 0\-\>1, 2\-\>3. - -For best performance, P2P DMA access between devices is needed. Without P2P access, for example crossing PCIe root complex, data is copied through host and effective exchange bandwidth is greatly reduced. - -Current implementation has a "soft" assumption that the devices being used are homogeneous. In practice, any devices of the same general class should work together, but performance and total size is limited by the smallest device being used. e.g. if you combine a TitanX and a GTX980, peformance will be limited by the 980. Mixing vastly different levels of boards, e.g. Kepler and Fermi, is not supported. - -"nvidia-smi topo -m" will show you the connectivity matrix. You can do P2P through PCIe bridges, but not across socket level links at this time, e.g. across CPU sockets on a multi-socket motherboard. - -# Scaling Performance - -Performance is **heavily** dependent on the PCIe topology of the system, the configuration of the neural network you are training, and the speed of each of the layers. Systems like the DIGITS DevBox have an optimized PCIe topology (X99-E WS chipset). In general, scaling on 2 GPUs tends to be ~1.8X on average for networks like AlexNet, CaffeNet, VGG, GoogleNet. 4 GPUs begins to have falloff in scaling. Generally with "weak scaling" where the batchsize increases with the number of GPUs you will see 3.5x scaling or so. With "strong scaling", the system can become communication bound, especially with layer performance optimizations like those in [cuDNNv3](http://nvidia.com/cudnn), and you will likely see closer to mid 2.x scaling in performance. Networks that have heavy computation compared to the number of parameters tend to have the best scaling performance. \ No newline at end of file diff --git a/examples/cpp_classification/classification.cpp b/examples/cpp_classification/classification.cpp index de48fb692c8..dc8b863f53f 100644 --- a/examples/cpp_classification/classification.cpp +++ b/examples/cpp_classification/classification.cpp @@ -1,9 +1,7 @@ #include -#ifdef USE_OPENCV #include #include #include -#endif // USE_OPENCV #include #include #include @@ -11,7 +9,6 @@ #include #include -#ifdef USE_OPENCV using namespace caffe; // NOLINT(build/namespaces) using std::string; @@ -258,8 +255,3 @@ int main(int argc, char** argv) { << p.first << "\"" << std::endl; } } -#else -int main(int argc, char** argv) { - LOG(FATAL) << "This example requires OpenCV; compile with USE_OPENCV."; -} -#endif // USE_OPENCV diff --git a/examples/mnist/convert_mnist_data.cpp b/examples/mnist/convert_mnist_data.cpp index 8f29bafde85..54443f11dd3 100644 --- a/examples/mnist/convert_mnist_data.cpp +++ b/examples/mnist/convert_mnist_data.cpp @@ -9,13 +9,9 @@ #include #include #include - -#if defined(USE_LEVELDB) && defined(USE_LMDB) #include #include #include -#endif - #include #include @@ -24,8 +20,6 @@ #include "caffe/proto/caffe.pb.h" -#if defined(USE_LEVELDB) && defined(USE_LMDB) - using namespace caffe; // NOLINT(build/namespaces) using std::string; @@ -202,9 +196,3 @@ int main(int argc, char** argv) { } return 0; } -#else -int main(int argc, char** argv) { - LOG(FATAL) << "This example requires LevelDB and LMDB; " << - "compile with USE_LEVELDB and USE_LMDB."; -} -#endif // USE_LEVELDB and USE_LMDB diff --git a/examples/mnist/lenet_solver.prototxt b/examples/mnist/lenet_solver.prototxt index c4c7d34c478..2dfbc834f41 100644 --- a/examples/mnist/lenet_solver.prototxt +++ b/examples/mnist/lenet_solver.prototxt @@ -7,7 +7,7 @@ test_iter: 100 # Carry out testing every 500 training iterations. test_interval: 500 # The base learning rate, momentum and the weight decay of the network. -base_lr: 0.001 +base_lr: 0.01 momentum: 0.9 weight_decay: 0.0005 # The learning rate policy diff --git a/examples/mnist/lenet_train_test.prototxt.orig b/examples/mnist/lenet_train_test.prototxt.orig deleted file mode 100644 index b18fc26cfd8..00000000000 --- a/examples/mnist/lenet_train_test.prototxt.orig +++ /dev/null @@ -1,168 +0,0 @@ -name: "LeNet" -layer { - name: "mnist" - type: "Data" - top: "data" - top: "label" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/mnist/mnist_train_lmdb" - batch_size: 64 - backend: LMDB - } -} -layer { - name: "mnist" - type: "Data" - top: "data" - top: "label" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/mnist/mnist_test_lmdb" - batch_size: 100 - backend: LMDB - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "accuracy" - type: "Accuracy" - bottom: "ip2" - bottom: "label" - top: "accuracy" - include { - phase: TEST - } -} -layer { - name: "loss" - type: "SoftmaxWithLoss" - bottom: "ip2" - bottom: "label" - top: "loss" -} diff --git a/examples/mnist/lenet_train_test_BACKUP_8918.prototxt b/examples/mnist/lenet_train_test_BACKUP_8918.prototxt deleted file mode 100644 index 2ab137326bc..00000000000 --- a/examples/mnist/lenet_train_test_BACKUP_8918.prototxt +++ /dev/null @@ -1,180 +0,0 @@ -name: "LeNet" -layer { - name: "mnist" - type: "Data" - top: "data" - top: "label" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/mnist/mnist_train_lmdb" - batch_size: 64 - backend: LMDB - } -} -layer { - name: "mnist" - type: "Data" - top: "data" - top: "label" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/mnist/mnist_test_lmdb" - batch_size: 100 - backend: LMDB - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21:examples/mnist/lenet_train_test.prototxt - num_output: 10 -======= - num_output: 16 ->>>>>>> new data and new feature dimension:examples/triplet/3d_triplet_train_test.prototxt - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "accuracy" - type: "Accuracy" - bottom: "ip2" - bottom: "label" - top: "accuracy" - include { - phase: TEST - } -} -layer { - name: "loss" - type: "SoftmaxWithLoss" - bottom: "ip2" - bottom: "label" - top: "loss" -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21:examples/mnist/lenet_train_test.prototxt -======= - triplet_loss_param { - margin: 0.01 - losstype: 1 - num_triplets: 3 - } ->>>>>>> new data and new feature dimension:examples/triplet/3d_triplet_train_test.prototxt -} diff --git a/examples/mnist/lenet_train_test_BASE_8918.prototxt b/examples/mnist/lenet_train_test_BASE_8918.prototxt deleted file mode 100644 index 4c766ebe0e4..00000000000 --- a/examples/mnist/lenet_train_test_BASE_8918.prototxt +++ /dev/null @@ -1,181 +0,0 @@ -name: "3d_triplet_train_test" -layer { - name: "data" - type: "Data" - top: "data" - top: "sim" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/3d_triplet_train_leveldb" - batch_size: 250 - } -} -layer { - name: "data" - type: "Data" - top: "data" - top: "sim" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/3d_triplet_test_leveldb" - batch_size: 250 - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 16 - kernel_size: 8 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "pool1" - top: "pool1" -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 7 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu2" - type: "ReLU" - bottom: "pool2" - top: "pool2" -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 256 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu3" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip1" - top: "feat" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 4 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "loss" - type: "TripletLoss" - bottom: "feat" - bottom: "sim" - top: "loss" - triplet_loss_param { - margin: 0.02 - losstype: 1 - num_triplets: 3 - } -} diff --git a/examples/mnist/lenet_train_test_LOCAL_8918.prototxt b/examples/mnist/lenet_train_test_LOCAL_8918.prototxt deleted file mode 100644 index b18fc26cfd8..00000000000 --- a/examples/mnist/lenet_train_test_LOCAL_8918.prototxt +++ /dev/null @@ -1,168 +0,0 @@ -name: "LeNet" -layer { - name: "mnist" - type: "Data" - top: "data" - top: "label" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/mnist/mnist_train_lmdb" - batch_size: 64 - backend: LMDB - } -} -layer { - name: "mnist" - type: "Data" - top: "data" - top: "label" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/mnist/mnist_test_lmdb" - batch_size: 100 - backend: LMDB - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 500 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 10 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "accuracy" - type: "Accuracy" - bottom: "ip2" - bottom: "label" - top: "accuracy" - include { - phase: TEST - } -} -layer { - name: "loss" - type: "SoftmaxWithLoss" - bottom: "ip2" - bottom: "label" - top: "loss" -} diff --git a/examples/mnist/lenet_train_test_REMOTE_8918.prototxt b/examples/mnist/lenet_train_test_REMOTE_8918.prototxt deleted file mode 100644 index 52398c3876d..00000000000 --- a/examples/mnist/lenet_train_test_REMOTE_8918.prototxt +++ /dev/null @@ -1,181 +0,0 @@ -name: "3d_triplet_train_test" -layer { - name: "data" - type: "Data" - top: "data" - top: "sim" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/3d_triplet_train_leveldb" - batch_size: 250 - } -} -layer { - name: "data" - type: "Data" - top: "data" - top: "sim" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/3d_triplet_test_leveldb" - batch_size: 250 - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 16 - kernel_size: 8 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "pool1" - top: "pool1" -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 7 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu2" - type: "ReLU" - bottom: "pool2" - top: "pool2" -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 256 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu3" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip1" - top: "feat" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 16 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "loss" - type: "TripletLoss" - bottom: "feat" - bottom: "sim" - top: "loss" - triplet_loss_param { - margin: 0.01 - losstype: 1 - num_triplets: 3 - } -} diff --git a/examples/siamese/convert_mnist_siamese_data.cpp b/examples/siamese/convert_mnist_siamese_data.cpp index 2a563b6bbf8..8008b4439c5 100644 --- a/examples/siamese/convert_mnist_siamese_data.cpp +++ b/examples/siamese/convert_mnist_siamese_data.cpp @@ -1,3 +1,4 @@ +// // This script converts the MNIST dataset to the leveldb format used // by caffe to train siamese network. // Usage: @@ -9,14 +10,12 @@ #include "glog/logging.h" #include "google/protobuf/text_format.h" +#include "leveldb/db.h" #include "stdint.h" #include "caffe/proto/caffe.pb.h" #include "caffe/util/math_functions.hpp" -#ifdef USE_LEVELDB -#include "leveldb/db.h" - uint32_t swap_endian(uint32_t val) { val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); return (val << 16) | (val >> 16); @@ -73,43 +72,37 @@ void convert_dataset(const char* image_filename, const char* label_filename, char label_i; char label_j; - char label_k; - char* pixels = new char[3 * rows * cols]; + char* pixels = new char[2 * rows * cols]; const int kMaxKeyLength = 10; char key[kMaxKeyLength]; std::string value; caffe::Datum datum; - datum.set_channels(3); // one channel for each image in the pair + datum.set_channels(2); // one channel for each image in the pair datum.set_height(rows); datum.set_width(cols); LOG(INFO) << "A total of " << num_items << " items."; LOG(INFO) << "Rows: " << rows << " Cols: " << cols; for (int itemid = 0; itemid < num_items; ++itemid) { - int i = caffe::caffe_rng_rand() % num_items; // pick triplet groups + int i = caffe::caffe_rng_rand() % num_items; // pick a random pair int j = caffe::caffe_rng_rand() % num_items; - int k = caffe::caffe_rng_rand() % num_items; read_image(&image_file, &label_file, i, rows, cols, pixels, &label_i); read_image(&image_file, &label_file, j, rows, cols, pixels + (rows * cols), &label_j); - read_image(&image_file, &label_file, k, rows, cols, - pixels + (2 * rows * cols), &label_k); - - datum.set_data(pixels, 3*rows*cols); - if (label_i == label_j && label_i != label_k) { + datum.set_data(pixels, 2*rows*cols); + if (label_i == label_j) { datum.set_label(1); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); - db->Put(leveldb::WriteOptions(), std::string(key), value); } else { - itemid--; datum.set_label(0); } + datum.SerializeToString(&value); + snprintf(key, kMaxKeyLength, "%08d", itemid); + db->Put(leveldb::WriteOptions(), std::string(key), value); } delete db; - delete pixels; + delete [] pixels; } int main(int argc, char** argv) { @@ -128,8 +121,3 @@ int main(int argc, char** argv) { } return 0; } -#else -int main(int argc, char** argv) { - LOG(FATAL) << "This example requires LevelDB; compile with USE_LEVELDB."; -} -#endif // USE_LEVELDB diff --git a/examples/siamese/lfw_siamese.prototxt.orig b/examples/siamese/lfw_siamese.prototxt.orig deleted file mode 100644 index b76f3e6bcd6..00000000000 --- a/examples/siamese/lfw_siamese.prototxt.orig +++ /dev/null @@ -1,117 +0,0 @@ -name: "lfw_siamese" -input: "data" -<<<<<<< 86eaaa954fbb3c3cfd1d225c98e16c38af7b34a9:examples/siamese/lfw_siamese.prototxt -input_dim: 10000 -======= -input_dim: 2760 ->>>>>>> rearrange the training samples selection codes:examples/triplet/3d_triplet.prototxt -input_dim: 1 -input_dim: 150 -input_dim: 80 -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 500 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 10 - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 2 - } -} diff --git a/examples/siamese/mnist_siamese.prototxt.orig b/examples/siamese/mnist_siamese.prototxt.orig deleted file mode 100644 index e496b6716b4..00000000000 --- a/examples/siamese/mnist_siamese.prototxt.orig +++ /dev/null @@ -1,126 +0,0 @@ -name: "mnist_siamese" -input: "data" -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21:examples/siamese/mnist_siamese.prototxt -input_shape { - dim: 10000 - dim: 1 - dim: 28 - dim: 28 -} -======= -input_dim: 6480 -input_dim: 1 -input_dim: 64 -input_dim: 64 ->>>>>>> new data and new feature dimension:examples/triplet/3d_triplet.prototxt -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 20 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - convolution_param { - num_output: 50 - kernel_size: 5 - stride: 1 - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 500 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "ip2" - type: "InnerProduct" - bottom: "ip1" - top: "ip2" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { - num_output: 10 - } -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip2" - top: "feat" - param { - lr_mult: 1 - } - param { - lr_mult: 2 - } - inner_product_param { -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21:examples/siamese/mnist_siamese.prototxt - num_output: 2 -======= - num_output: 16 ->>>>>>> new data and new feature dimension:examples/triplet/3d_triplet.prototxt - } -} diff --git a/examples/siamese/mnist_siamese_solver.prototxt.orig b/examples/siamese/mnist_siamese_solver.prototxt.orig deleted file mode 100644 index 008115e5ff2..00000000000 --- a/examples/siamese/mnist_siamese_solver.prototxt.orig +++ /dev/null @@ -1,37 +0,0 @@ -# The train/test net protocol buffer definition -net: "examples/siamese/mnist_siamese_train_test.prototxt" -# test_iter specifies how many forward passes the test should carry out. -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21:examples/siamese/mnist_siamese_solver.prototxt -# In the case of MNIST, we have test batch size 100 and 100 test iterations, -# covering the full 10,000 testing images. -======= -# In the case of 3d database, we have test batch size 250 and 250 test iterations: 50*(2+3)=250, -# covering the full 6480 testing images:162*4*10=6480. ->>>>>>> new data and new feature dimension:examples/triplet/3d_triplet_solver.prototxt -test_iter: 100 -# Carry out testing every 500 training iterations. -test_interval: 500 -# The base learning rate, momentum and the weight decay of the network. -base_lr: 0.01 -momentum: 0.9 -weight_decay: 0.0000 -# The learning rate policy -lr_policy: "inv" -gamma: 0.0001 -power: 0.75 -# Display every 100 iterations -display: 100 -# The maximum number of iterations -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21:examples/siamese/mnist_siamese_solver.prototxt -max_iter: 50000 -# snapshot intermediate results -snapshot: 5000 -snapshot_prefix: "examples/siamese/mnist_siamese" -======= -max_iter: 40000 -# snapshot intermediate results -snapshot: 5000 -snapshot_prefix: "examples/triplet/3d_triplet" ->>>>>>> new data and new feature dimension:examples/triplet/3d_triplet_solver.prototxt -# solver mode: CPU or GPU -solver_mode: GPU diff --git a/examples/triplet/3d_triplet.prototxt b/examples/triplet/3d_triplet.prototxt index 6865d67a6c3..076e0be5040 100644 --- a/examples/triplet/3d_triplet.prototxt +++ b/examples/triplet/3d_triplet.prototxt @@ -1,6 +1,6 @@ name: "3d_triplet" input: "data" -input_dim: 276 +input_dim: 9720 input_dim: 1 input_dim: 64 input_dim: 64 @@ -105,6 +105,6 @@ layer { lr_mult: 2 } inner_product_param { - num_output: 4 + num_output: 3 } } diff --git a/examples/triplet/3d_triplet_solver.prototxt b/examples/triplet/3d_triplet_solver.prototxt index e5719a8f96b..eea97da7603 100644 --- a/examples/triplet/3d_triplet_solver.prototxt +++ b/examples/triplet/3d_triplet_solver.prototxt @@ -1,11 +1,11 @@ # The train/test net protocol buffer definition net: "examples/triplet/3d_triplet_train_test.prototxt" # test_iter specifies how many forward passes the test should carry out. -# In the case of 3d database, we have test batch size 23 and 23 test iterations, -# covering the full 276 testing images. -test_iter: 23 +# In the case of 3d database, we have test batch size 250 and 250 test iterations: 50*(2+3)=250, +# covering the full 9720 testing images:162*6*10=9720. +test_iter: 100 # Carry out testing every 500 training iterations. -test_interval: 23 +test_interval: 100 # The base learning rate, momentum and the weight decay of the network. base_lr: 0.001 momentum: 0.9 @@ -17,9 +17,9 @@ power: 0.75 # Display every 100 iterations display: 100 # The maximum number of iterations -max_iter: 1000 +max_iter: 80000 # snapshot intermediate results -snapshot: 200 +snapshot: 5000 snapshot_prefix: "examples/triplet/3d_triplet" # solver mode: CPU or GPU solver_mode: CPU diff --git a/examples/triplet/3d_triplet_train_test.prototxt b/examples/triplet/3d_triplet_train_test.prototxt index 1ac185aa2cc..60637b1a66b 100644 --- a/examples/triplet/3d_triplet_train_test.prototxt +++ b/examples/triplet/3d_triplet_train_test.prototxt @@ -158,7 +158,7 @@ layer { lr_mult: 2 } inner_product_param { - num_output: 4 + num_output: 3 weight_filler { type: "xavier" } diff --git a/examples/triplet/3d_triplet_train_test.prototxt.orig b/examples/triplet/3d_triplet_train_test.prototxt.orig deleted file mode 100644 index f79629bda21..00000000000 --- a/examples/triplet/3d_triplet_train_test.prototxt.orig +++ /dev/null @@ -1,184 +0,0 @@ -name: "3d_triplet_train_test" -layer { - name: "data" - type: "Data" - top: "data" - top: "sim" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/3d_triplet_train_leveldb" - batch_size: 250 - } -} -layer { - name: "data" - type: "Data" - top: "data" - top: "sim" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/3d_triplet_test_leveldb" - batch_size: 250 - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 16 - kernel_size: 8 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "pool1" - top: "pool1" -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 7 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu2" - type: "ReLU" - bottom: "pool2" - top: "pool2" -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 256 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu3" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip1" - top: "feat" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 4 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "loss" - type: "TripletLoss" - bottom: "feat" - bottom: "sim" - top: "loss" - triplet_loss_param { - margin: 1 -<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 -======= - losstype: 0 - num_triplets: 3 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - } -} diff --git a/examples/triplet/convert_3d_triplet_data.cpp b/examples/triplet/convert_3d_triplet_data.cpp index 2147e1ae1ca..943efd9965f 100644 --- a/examples/triplet/convert_3d_triplet_data.cpp +++ b/examples/triplet/convert_3d_triplet_data.cpp @@ -18,8 +18,7 @@ uint32_t swap_endian(uint32_t val) { void read_image(std::ifstream* image_file, std::ifstream* label_file, uint32_t index, uint32_t rows, uint32_t cols, char* pixels, char* label_temp, signed char* label, int rgb_use) { - if (rgb_use == 0) - { + if (rgb_use == 0) { image_file->seekg(index * rows * cols + 16); image_file->read(pixels, rows * cols); label_file->seekg(index * 4 + 8); @@ -37,7 +36,8 @@ void read_image(std::ifstream* image_file, std::ifstream* label_file, } void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename, const char* class_number, const char* rgb_use) { + const char* db_filename, + const char* class_number, const char* rgb_use) { int rgb_use1 = atoi(rgb_use); int class_num = atoi(class_number); // Open files diff --git a/examples/triplet/convert_3d_triplet_data.cpp.orig b/examples/triplet/convert_3d_triplet_data.cpp.orig deleted file mode 100644 index 9d523f2309c..00000000000 --- a/examples/triplet/convert_3d_triplet_data.cpp.orig +++ /dev/null @@ -1,255 +0,0 @@ -// Usage: -// convert_3d_data input_image_file input_label_file output_db_file -#include // NOLINT(readability/streams) -#include -#include -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "stdint.h" - -uint32_t swap_endian(uint32_t val) { - val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); - return (val << 16) | (val >> 16); -} - -void read_image(std::ifstream* image_file, std::ifstream* label_file, - uint32_t index, uint32_t rows, uint32_t cols, -<<<<<<< fe02a6fcce001ba81c2e792eb5325a24a5ea888a - char* pixels, char* label_temp, signed char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index * 4 + 8); - label_file->read(label_temp, 4); - for (int i = 0; i < 4; i++) - *(label+i) = (signed char)*(label_temp+i); -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename, const char* class_number) { -======= - char* pixels, char* label_temp, signed char* label, int rgb_use) { - if (rgb_use == 0) - { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index * 4 + 8); - label_file->read(label_temp, 4); - for (int i = 0; i < 4; i++) - *(label+i) = (signed char)*(label_temp+i); - } else { - image_file->seekg(3 * index * rows * cols + 16); - image_file->read(pixels, 3 * rows * cols); - label_file->seekg(index * 4 + 8); - label_file->read(label_temp, 4); - for (int i = 0; i < 4; i++) - *(label+i) = (signed char)*(label_temp+i); - } -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename, const char* class_number, const char* rgb_use) { - int rgb_use1 = atoi(rgb_use); ->>>>>>> update triplet data generation codes for RGB using - int class_num = atoi(class_number); - // Open files - std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); - std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); - CHECK(image_file) << "Unable to open file " << image_filename; - CHECK(label_file) << "Unable to open file " << label_filename; - // Read the magic and the meta data - uint32_t magic; - uint32_t num_items; - uint32_t num_labels; - uint32_t rows; - uint32_t cols; - - image_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2051) << "Incorrect image file magic."; - label_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2050) << "Incorrect label file magic."; - image_file.read(reinterpret_cast(&num_items), 4); - num_items = swap_endian(num_items); - label_file.read(reinterpret_cast(&num_labels), 4); - num_labels = swap_endian(num_labels); - CHECK_EQ(num_items, num_labels); - image_file.read(reinterpret_cast(&rows), 4); - rows = swap_endian(rows); - image_file.read(reinterpret_cast(&cols), 4); - cols = swap_endian(cols); - - // Open leveldb - leveldb::DB* db; - leveldb::Options options; - options.create_if_missing = true; - options.error_if_exists = true; - leveldb::Status status = leveldb::DB::Open( - options, db_filename, &db); - CHECK(status.ok()) << "Failed to open leveldb " << db_filename - << ". Is it already existing?"; - - char* label_temp = new char[4]; // label for unsigned char* - signed char* label_i = new signed char[4]; // label for triplet - signed char* label_j = new signed char[4]; - signed char* label_k = new signed char[4]; - signed char* label_l = new signed char[4]; // label for pair wise - signed char* label_m = new signed char[4]; - char* pixels1 = new char[rows * cols]; - char* pixels2 = new char[rows * cols]; - char* pixels3 = new char[rows * cols]; - char* pixels4 = new char[rows * cols]; - char* pixels5 = new char[rows * cols]; - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - caffe::Datum datum; - if (rgb_use1 == 0) - datum.set_channels(1); - else - datum.set_channels(3); - datum.set_height(rows); - datum.set_width(cols); - LOG(INFO) << "A total of " << num_items << " items."; - LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - int counter = 0; - for (unsigned int times = 0; times < 10; ++times) { - // iteration in the samples of all class - for (unsigned int itemid = 0; itemid < num_items/class_num; ++itemid) { - // iteration in the samples in one class - for (unsigned int class_ind = 0; class_ind < class_num; ++class_ind) { - // use reference sample one by one at each iteration - int i = itemid % num_items + class_ind*num_items/class_num; - int j = caffe::caffe_rng_rand() % num_items; // pick triplet groups - int k = caffe::caffe_rng_rand() % num_items; - int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups - int m = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, // read triplet - pixels1, label_temp, label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels2, label_temp, label_j); - read_image(&image_file, &label_file, k, rows, cols, - pixels3, label_temp, label_k); - read_image(&image_file, &label_file, l, rows, cols, // read pair wise - pixels4, label_temp, label_l); - read_image(&image_file, &label_file, m, rows, cols, - pixels5, label_temp, label_m); - - bool pair_pass = false; - bool triplet1_pass = false; - bool triplet2_pass = false; - bool triplet3_class_same = false; - bool triplet3_pass = false; - - int ij_diff_x = static_cast(*(label_i+1)-*(label_j+1)); - int ij_diff_y = static_cast(*(label_i+2)-*(label_j+2)); - int ij_diff_z = static_cast(*(label_i+3)-*(label_j+3)); - int im_diff_x = static_cast(*(label_i+1)-*(label_m+1)); - int im_diff_y = static_cast(*(label_i+2)-*(label_m+2)); - int im_diff_z = static_cast(*(label_i+3)-*(label_m+3)); - - int ij_x = ij_diff_x*ij_diff_x; - int ij_y = ij_diff_y*ij_diff_y; - int ij_z = ij_diff_z*ij_diff_z; - int im_x = im_diff_x*im_diff_x; - int im_y = im_diff_y*im_diff_y; - int im_z = im_diff_z*im_diff_z; - - float dist_ij = std::sqrt(ij_x + ij_y + ij_z); - float dist_im = std::sqrt(im_x + im_y + im_z); - if (*label_i == *label_j && dist_ij < 100/3 && dist_ij != 0) - pair_pass = true; - if (pair_pass && (*label_i != *label_k)) - triplet1_pass = true; - if (pair_pass && (*label_i != *label_l)) - triplet2_pass = true; - if (pair_pass && (*label_i == *label_m)) - triplet3_class_same = true; - if (triplet3_class_same && dist_im > 100/3) - triplet3_pass = true; - if (pair_pass && triplet1_pass && triplet2_pass && triplet3_pass) { -<<<<<<< fe02a6fcce001ba81c2e792eb5325a24a5ea888a - datum.set_data(pixels1, rows*cols); // set data - datum.set_label(int(*label_i)); -======= - datum.set_data(pixels1, db_size); // set data - datum.set_label(static_cast(*label_i)); ->>>>>>> update triplet data generation codes for RGB using - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; -<<<<<<< fe02a6fcce001ba81c2e792eb5325a24a5ea888a - datum.set_data(pixels2, rows*cols); // set data - datum.set_label(int(*label_j)); -======= - datum.set_data(pixels2, db_size); // set data - datum.set_label(static_cast(*label_j)); ->>>>>>> update triplet data generation codes for RGB using - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; -<<<<<<< fe02a6fcce001ba81c2e792eb5325a24a5ea888a - datum.set_data(pixels3, rows*cols); // set data - datum.set_label(int(*label_k)); -======= - datum.set_data(pixels3, db_size); // set data - datum.set_label(static_cast(*label_k)); ->>>>>>> update triplet data generation codes for RGB using - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; -<<<<<<< fe02a6fcce001ba81c2e792eb5325a24a5ea888a - datum.set_data(pixels4, rows*cols); // set data - datum.set_label(int(*label_l)); -======= - datum.set_data(pixels4, db_size); // set data - datum.set_label(static_cast(*label_l)); ->>>>>>> update triplet data generation codes for RGB using - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; -<<<<<<< fe02a6fcce001ba81c2e792eb5325a24a5ea888a - datum.set_data(pixels5, rows*cols); // set data - datum.set_label(int(*label_m)); -======= - datum.set_data(pixels5, db_size); // set data - datum.set_label(static_cast(*label_m)); ->>>>>>> update triplet data generation codes for RGB using - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - } else { - class_ind--; - } - } // iteration in the samples of all class - } // iteration in the samples in one class - } // iteration in times - delete db; - delete pixels1; - delete pixels2; - delete pixels3; - delete pixels4; - delete pixels5; -} - -int main(int argc, char** argv) { - if (argc != 5) { - printf("This script converts the images dataset to the leveldb format used\n" - "by caffe to train a triplet network.\n" - "Usage:\n" - " convert_3d_data input_image_file input_label_file " - "output_db_file class_number\n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3], argv[4]); - } - return 0; -} diff --git a/examples/triplet/convert_3d_triplet_data.cpp.orig.orig b/examples/triplet/convert_3d_triplet_data.cpp.orig.orig deleted file mode 100644 index 7b6979f39c3..00000000000 --- a/examples/triplet/convert_3d_triplet_data.cpp.orig.orig +++ /dev/null @@ -1,275 +0,0 @@ -// Usage: -// convert_3d_data input_image_file input_label_file output_db_file -#include // NOLINT(readability/streams) -#include -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "math.h" -#include "stdint.h" - -uint32_t swap_endian(uint32_t val) { - val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); - return (val << 16) | (val >> 16); -} - -void read_image(std::ifstream* image_file, std::ifstream* label_file, - uint32_t index, uint32_t rows, uint32_t cols, -<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 - char* pixels, char* label_temp, signed char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index * 4 + 8); - label_file->read(label_temp, 4); - for (int i = 0; i < 4; i++) - *(label+i) = (signed char)*(label_temp+i); -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename, const char* class_number) { -======= - char* pixels, char* label_temp, signed char* label, int rgb_use) { - if (rgb_use == 0) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index * 4 + 8); - label_file->read(label_temp, 4); - for (int i = 0; i < 4; i++) - *(label+i) = (signed char)*(label_temp+i); - } else { - image_file->seekg(3 * index * rows * cols + 16); - image_file->read(pixels, 3 * rows * cols); - label_file->seekg(index * 4 + 8); - label_file->read(label_temp, 4); - for (int i = 0; i < 4; i++) - *(label+i) = (signed char)*(label_temp+i); - } -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename, const char* class_number, - const char* rgb_use) { - int rgb_use1 = atoi(rgb_use); ->>>>>>> new data and new feature dimension - int class_num = atoi(class_number); - // Open files - std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); - std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); - CHECK(image_file) << "Unable to open file " << image_filename; - CHECK(label_file) << "Unable to open file " << label_filename; - // Read the magic and the meta data - uint32_t magic; - uint32_t num_items; - uint32_t num_labels; - uint32_t rows; - uint32_t cols; - - image_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2051) << "Incorrect image file magic."; - label_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2050) << "Incorrect label file magic."; - image_file.read(reinterpret_cast(&num_items), 4); - num_items = swap_endian(num_items); - label_file.read(reinterpret_cast(&num_labels), 4); - num_labels = swap_endian(num_labels); - CHECK_EQ(num_items, num_labels); - image_file.read(reinterpret_cast(&rows), 4); - rows = swap_endian(rows); - image_file.read(reinterpret_cast(&cols), 4); - cols = swap_endian(cols); - - // Open leveldb - leveldb::DB* db; - leveldb::Options options; - options.create_if_missing = true; - options.error_if_exists = true; - leveldb::Status status = leveldb::DB::Open( - options, db_filename, &db); - CHECK(status.ok()) << "Failed to open leveldb " << db_filename - << ". Is it already existing?"; - - char* label_temp = new char[4]; // label for unsigned char* - signed char* label_i = new signed char[4]; // label for triplet - signed char* label_j = new signed char[4]; - signed char* label_k = new signed char[4]; - signed char* label_l = new signed char[4]; // label for pair wise - signed char* label_m = new signed char[4]; -<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 - char* pixels1 = new char[rows * cols]; - char* pixels2 = new char[rows * cols]; - char* pixels3 = new char[rows * cols]; - char* pixels4 = new char[rows * cols]; - char* pixels5 = new char[rows * cols]; -======= - int db_size; - if (rgb_use1 == 0) - db_size = rows * cols; - else - db_size = 3 * rows * cols; - char* pixels1 = new char[db_size]; - char* pixels2 = new char[db_size]; - char* pixels3 = new char[db_size]; - char* pixels4 = new char[db_size]; - char* pixels5 = new char[db_size]; ->>>>>>> new data and new feature dimension - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - caffe::Datum datum; - datum.set_channels(1); - datum.set_height(rows); - datum.set_width(cols); - LOG(INFO) << "A total of " << num_items << " items."; - LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - int counter = 0; -<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 - for (unsigned int times = 0; times < 5; ++times) { -======= - for (unsigned int times = 0; times < 10; ++times) { ->>>>>>> new data and new feature dimension - // iteration in the samples of all class - for (unsigned int itemid = 0; itemid < num_items/class_num; ++itemid) { - // iteration in the samples in one class - for (unsigned int class_ind = 0; class_ind < class_num; ++class_ind) { - // use reference sample one by one at each iteration - int i = itemid % num_items + class_ind*num_items/class_num; - int j = caffe::caffe_rng_rand() % num_items; // pick triplet groups - int k = caffe::caffe_rng_rand() % num_items; - int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups - int m = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, // read triplet -<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 - pixels1, label_temp, label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels2, label_temp, label_j); - read_image(&image_file, &label_file, k, rows, cols, - pixels3, label_temp, label_k); - read_image(&image_file, &label_file, l, rows, cols, // read pair wise - pixels4, label_temp, label_l); - read_image(&image_file, &label_file, m, rows, cols, - pixels5, label_temp, label_m); -======= - pixels1, label_temp, label_i, rgb_use1); - read_image(&image_file, &label_file, j, rows, cols, - pixels2, label_temp, label_j, rgb_use1); - read_image(&image_file, &label_file, k, rows, cols, - pixels3, label_temp, label_k, rgb_use1); - read_image(&image_file, &label_file, l, rows, cols, // read pair wise - pixels4, label_temp, label_l, rgb_use1); - read_image(&image_file, &label_file, m, rows, cols, - pixels5, label_temp, label_m, rgb_use1); ->>>>>>> new data and new feature dimension - - bool pair_pass = false; - bool triplet1_pass = false; - bool triplet2_pass = false; - bool triplet3_class_same = false; - bool triplet3_pass = false; - - int ij_diff_x = static_cast(*(label_i+1)-*(label_j+1)); - int ij_diff_y = static_cast(*(label_i+2)-*(label_j+2)); - int ij_diff_z = static_cast(*(label_i+3)-*(label_j+3)); - int im_diff_x = static_cast(*(label_i+1)-*(label_m+1)); - int im_diff_y = static_cast(*(label_i+2)-*(label_m+2)); - int im_diff_z = static_cast(*(label_i+3)-*(label_m+3)); - - int ij_x = ij_diff_x*ij_diff_x; - int ij_y = ij_diff_y*ij_diff_y; - int ij_z = ij_diff_z*ij_diff_z; - int im_x = im_diff_x*im_diff_x; - int im_y = im_diff_y*im_diff_y; - int im_z = im_diff_z*im_diff_z; - - float dist_ij = std::sqrt(ij_x + ij_y + ij_z); - float dist_im = std::sqrt(im_x + im_y + im_z); -<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 - if (*label_i == *label_j && dist_ij < 100/2) -======= - if (*label_i == *label_j && dist_ij < 100/3 && dist_ij != 0) ->>>>>>> new data and new feature dimension - pair_pass = true; - if (pair_pass && (*label_i != *label_k)) - triplet1_pass = true; - if (pair_pass && (*label_i != *label_l)) - triplet2_pass = true; - if (pair_pass && (*label_i == *label_m)) - triplet3_class_same = true; -<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 - if (triplet3_class_same && dist_im > 100*sqrt(2)) -======= - if (triplet3_class_same && dist_im > 100/3 && dist_im < 100) ->>>>>>> new data and new feature dimension - triplet3_pass = true; - if (pair_pass && triplet1_pass && triplet2_pass && triplet3_pass) { - datum.set_data(pixels1, rows*cols); // set data - datum.set_label(static_cast(*label_i)); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - datum.set_data(pixels2, rows*cols); // set data - datum.set_label(static_cast(*label_j)); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - datum.set_data(pixels3, rows*cols); // set data - datum.set_label(static_cast(*label_k)); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - datum.set_data(pixels4, rows*cols); // set data - datum.set_label(static_cast(*label_l)); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - datum.set_data(pixels5, rows*cols); // set data - datum.set_label(static_cast(*label_m)); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - } else { - class_ind--; - } - } // iteration in the samples of all class - } // iteration in the samples in one class - } // iteration in times - delete db; - delete pixels1; - delete pixels2; - delete pixels3; - delete pixels4; - delete pixels5; -} - -int main(int argc, char** argv) { -<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 - if (argc != 5) { -======= - if (argc != 6) { ->>>>>>> new data and new feature dimension - printf("This script converts the dataset to the leveldb format used\n" - "by caffe to train a triplet network.\n" - "Usage:\n" - " convert_3d_data input_image_file input_label_file " -<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 - "output_db_file class_number\n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3], argv[4]); -======= - "output_db_file class_number rgb_use \n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3], argv[4], argv[5]); ->>>>>>> new data and new feature dimension - } - return 0; -} diff --git a/examples/triplet/convert_lfw_triplet_data.cpp.orig b/examples/triplet/convert_lfw_triplet_data.cpp.orig deleted file mode 100644 index 64c1db06e02..00000000000 --- a/examples/triplet/convert_lfw_triplet_data.cpp.orig +++ /dev/null @@ -1,148 +0,0 @@ -// This script converts the MNIST dataset to the leveldb format used -// by caffe to train siamese network. -// Usage: -// convert_mnist_data input_image_file input_label_file output_db_file -// The MNIST dataset could be downloaded at -// http://yann.lecun.com/exdb/mnist/ -#include // NOLINT(readability/streams) -#include - -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "stdint.h" - -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" - -uint32_t swap_endian(uint32_t val) { - val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); - return (val << 16) | (val >> 16); -} - -void read_image(std::ifstream* image_file, std::ifstream* label_file, - uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index + 8); - label_file->read(label, 1); -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename) { - // Open files - std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); - std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); - CHECK(image_file) << "Unable to open file " << image_filename; - CHECK(label_file) << "Unable to open file " << label_filename; - // Read the magic and the meta data - uint32_t magic; - uint32_t num_items; - uint32_t num_labels; - uint32_t rows; - uint32_t cols; - - image_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - label_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - image_file.read(reinterpret_cast(&num_items), 4); - num_items = swap_endian(num_items); - label_file.read(reinterpret_cast(&num_labels), 4); - num_labels = swap_endian(num_labels); - CHECK_EQ(num_items, num_labels); - image_file.read(reinterpret_cast(&rows), 4); - rows = swap_endian(rows); - image_file.read(reinterpret_cast(&cols), 4); - cols = swap_endian(cols); - - // Open leveldb - leveldb::DB* db; - leveldb::Options options; - options.create_if_missing = true; - options.error_if_exists = true; - leveldb::Status status = leveldb::DB::Open( - options, db_filename, &db); - CHECK(status.ok()) << "Failed to open leveldb " << db_filename - << ". Is it already existing?"; - - char label_i; - char label_j; - char label_k; -<<<<<<< 85bc5292fc9e9df8aeb2b6b6dfc9b0b274ee4db0:examples/triplet/convert_lfw_triplet_data.cpp - char* pixels = new char[3 * rows * cols]; -======= - char label_l; // label for pair wise - char label_m; - char* pixels = new char[5 * rows * cols]; ->>>>>>> ready for a review on triplet training using the 3D data, net work definition is the same as described in paper:examples/triplet/convert_3d_triplet_data.cpp - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - - caffe::Datum datum; - datum.set_channels(3); // one channel for each image in the pair - datum.set_height(rows); - datum.set_width(cols); - LOG(INFO) << "A total of " << num_items << " items."; - LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (int itemid = 0; itemid < num_items; ++itemid) { - // pick triplet groups - int i = caffe::caffe_rng_rand() % num_items; - int j = caffe::caffe_rng_rand() % num_items; - int k = caffe::caffe_rng_rand() % num_items; -<<<<<<< 85bc5292fc9e9df8aeb2b6b6dfc9b0b274ee4db0:examples/triplet/convert_lfw_triplet_data.cpp -======= - // pick pair wise groups - int l = caffe::caffe_rng_rand() % num_items; - int m = caffe::caffe_rng_rand() % num_items; - // read triplet groups ->>>>>>> ready for a review on triplet training using the 3D data, net work definition is the same as described in paper:examples/triplet/convert_3d_triplet_data.cpp - read_image(&image_file, &label_file, i, rows, cols, - pixels, &label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), &label_j); - read_image(&image_file, &label_file, k, rows, cols, - pixels + (2 * rows * cols), &label_k); -<<<<<<< 85bc5292fc9e9df8aeb2b6b6dfc9b0b274ee4db0:examples/triplet/convert_lfw_triplet_data.cpp -======= - // read pair wise groups - read_image(&image_file, &label_file, l, rows, cols, - pixels + (3 * rows * cols), &label_l); - read_image(&image_file, &label_file, m, rows, cols, - pixels + (4 * rows * cols), &label_m); ->>>>>>> ready for a review on triplet training using the 3D data, net work definition is the same as described in paper:examples/triplet/convert_3d_triplet_data.cpp - - datum.set_data(pixels, 3*rows*cols); - if (label_i == label_j && label_i != label_k) { - datum.set_label(1); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); - db->Put(leveldb::WriteOptions(), std::string(key), value); - } else { - itemid--; - datum.set_label(0); - } - } - - delete db; - delete pixels; -} - -int main(int argc, char** argv) { - if (argc != 4) { - printf("This script converts the MNIST dataset to the leveldb format used\n" - "by caffe to train a siamese network.\n" - "Usage:\n" - " convert_mnist_data input_image_file input_label_file " - "output_db_file\n" - "The MNIST dataset could be downloaded at\n" - " http://yann.lecun.com/exdb/mnist/\n" - "You should gunzip them after downloading.\n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3]); - } - return 0; -} diff --git a/examples/triplet/convert_mnist_triplet_data.cpp.orig b/examples/triplet/convert_mnist_triplet_data.cpp.orig deleted file mode 100644 index 93ee55c67f0..00000000000 --- a/examples/triplet/convert_mnist_triplet_data.cpp.orig +++ /dev/null @@ -1,150 +0,0 @@ -// This script converts the MNIST dataset to the leveldb format used -// by caffe to train siamese network. -// Usage: -// convert_mnist_data input_image_file input_label_file output_db_file -// The MNIST dataset could be downloaded at -// http://yann.lecun.com/exdb/mnist/ -#include // NOLINT(readability/streams) -#include - -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "stdint.h" - -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" - -uint32_t swap_endian(uint32_t val) { - val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); - return (val << 16) | (val >> 16); -} - -void read_image(std::ifstream* image_file, std::ifstream* label_file, - uint32_t index, uint32_t rows, uint32_t cols, - char* pixels, char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index + 8); - label_file->read(label, 1); -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename) { - // Open files - std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); - std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); - CHECK(image_file) << "Unable to open file " << image_filename; - CHECK(label_file) << "Unable to open file " << label_filename; - // Read the magic and the meta data - uint32_t magic; - uint32_t num_items; - uint32_t num_labels; - uint32_t rows; - uint32_t cols; - - image_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2051) << "Incorrect image file magic."; - label_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2049) << "Incorrect label file magic."; - image_file.read(reinterpret_cast(&num_items), 4); - num_items = swap_endian(num_items); - label_file.read(reinterpret_cast(&num_labels), 4); - num_labels = swap_endian(num_labels); - CHECK_EQ(num_items, num_labels); - image_file.read(reinterpret_cast(&rows), 4); - rows = swap_endian(rows); - image_file.read(reinterpret_cast(&cols), 4); - cols = swap_endian(cols); - - // Open leveldb - leveldb::DB* db; - leveldb::Options options; - options.create_if_missing = true; - options.error_if_exists = true; - leveldb::Status status = leveldb::DB::Open( - options, db_filename, &db); - CHECK(status.ok()) << "Failed to open leveldb " << db_filename - << ". Is it already existing?"; - - char label_i; - char label_j; - char label_k; -<<<<<<< 85bc5292fc9e9df8aeb2b6b6dfc9b0b274ee4db0 - char* pixels = new char[3 * rows * cols]; -======= - char label_l; // label for pair wise - char label_m; - char* pixels = new char[5 * rows * cols]; ->>>>>>> ready for a review on triplet training using the 3D data, net work definition is the same as described in paper - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - - caffe::Datum datum; - datum.set_channels(3); // one channel for each image in the pair - datum.set_height(rows); - datum.set_width(cols); - LOG(INFO) << "A total of " << num_items << " items."; - LOG(INFO) << "Rows: " << rows << " Cols: " << cols; - for (int itemid = 0; itemid < num_items; ++itemid) { - // pick triplet groups - int i = caffe::caffe_rng_rand() % num_items; - int j = caffe::caffe_rng_rand() % num_items; - int k = caffe::caffe_rng_rand() % num_items; -<<<<<<< 85bc5292fc9e9df8aeb2b6b6dfc9b0b274ee4db0 -======= - // pick pair wise groups - int l = caffe::caffe_rng_rand() % num_items; - int m = caffe::caffe_rng_rand() % num_items; - // read triplet groups ->>>>>>> ready for a review on triplet training using the 3D data, net work definition is the same as described in paper - read_image(&image_file, &label_file, i, rows, cols, - pixels, &label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), &label_j); - read_image(&image_file, &label_file, k, rows, cols, - pixels + (2 * rows * cols), &label_k); -<<<<<<< 85bc5292fc9e9df8aeb2b6b6dfc9b0b274ee4db0 -======= - // pair wise groups - read_image(&image_file, &label_file, l, rows, cols, - pixels + (3 * rows * cols), &label_l); - read_image(&image_file, &label_file, m, rows, cols, - pixels + (4 * rows * cols), &label_m); ->>>>>>> ready for a review on triplet training using the 3D data, net work definition is the same as described in paper - - datum.set_data(pixels, 3*rows*cols); - if (label_i == label_j && label_i != label_k) { - datum.set_label(1); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); - db->Put(leveldb::WriteOptions(), std::string(key), value); - } else { - itemid--; - datum.set_label(0); - } - } - - delete db; - delete pixels; -} - -int main(int argc, char** argv) { - if (argc != 4) { - printf("This script converts the MNIST dataset to the leveldb format used\n" - "by caffe to train a siamese network.\n" - "Usage:\n" - " convert_mnist_data input_image_file input_label_file " - "output_db_file\n" - "The MNIST dataset could be downloaded at\n" - " http://yann.lecun.com/exdb/mnist/\n" - "You should gunzip them after downloading.\n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3]); - } - return 0; -} diff --git a/examples/triplet/create_3d_triplet.sh b/examples/triplet/create_3d_triplet.sh index e2f65ea414c..0fadd9b7e09 100755 --- a/examples/triplet/create_3d_triplet.sh +++ b/examples/triplet/create_3d_triplet.sh @@ -13,12 +13,12 @@ $EXAMPLES/convert_3d_triplet_data.bin \ $DATA/binary_image_train \ $DATA/binary_label_train \ ./examples/triplet/3d_triplet_train_leveldb \ - 4 \ + 6 \ 0 $EXAMPLES/convert_3d_triplet_data.bin \ $DATA/binary_image_test \ $DATA/binary_label_test \ ./examples/triplet/3d_triplet_test_leveldb \ - 4 \ + 6 \ 0 echo "Done." diff --git a/examples/triplet/create_3d_triplet.sh.orig b/examples/triplet/create_3d_triplet.sh.orig deleted file mode 100755 index f634f552d3a..00000000000 --- a/examples/triplet/create_3d_triplet.sh.orig +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env sh -# This script converts the mnist data into leveldb format. - -EXAMPLES=./build/examples/triplet -DATA=./data/linemod - -echo "Creating leveldb..." - -rm -rf ./examples/triplet/3d_triplet_train_leveldb -rm -rf ./examples/triplet/3d_triplet_test_leveldb - -$EXAMPLES/convert_3d_triplet_data.bin \ - $DATA/binary_image_train \ - $DATA/binary_label_train \ - ./examples/triplet/3d_triplet_train_leveldb \ -<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 - 4 -======= - 4 \ - 0 ->>>>>>> new data and new feature dimension -$EXAMPLES/convert_3d_triplet_data.bin \ - $DATA/binary_image_test \ - $DATA/binary_label_test \ - ./examples/triplet/3d_triplet_test_leveldb \ -<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 - 4 - -======= - 4 \ - 0 ->>>>>>> new data and new feature dimension -echo "Done." diff --git a/examples/triplet/lfw_triplet_solver.prototxt.orig b/examples/triplet/lfw_triplet_solver.prototxt.orig deleted file mode 100644 index 678ee05e150..00000000000 --- a/examples/triplet/lfw_triplet_solver.prototxt.orig +++ /dev/null @@ -1,40 +0,0 @@ -# The train/test net protocol buffer definition -net: "examples/triplet/lfw_triplet_train_test.prototxt" -# test_iter specifies how many forward passes the test should carry out. -<<<<<<< 86eaaa954fbb3c3cfd1d225c98e16c38af7b34a9:examples/triplet/lfw_triplet_solver.prototxt -# In the case of lfw, we have test batch size 100 and 100 test iterations, -# covering the full 10,000 testing images. -test_iter: 100 -# Carry out testing every 500 training iterations. -test_interval: 500 -======= -# In the case of 3d database, we have test batch size 23 and 23 test iterations, -# covering the full 2760 testing images. -test_iter: 100 -# Carry out testing every 500 training iterations. -test_interval: 100 ->>>>>>> rearrange the training samples selection codes:examples/triplet/3d_triplet_solver.prototxt -# The base learning rate, momentum and the weight decay of the network. -base_lr: 0.01 -momentum: 0.9 -weight_decay: 0.0000 -# The learning rate policy -lr_policy: "inv" -gamma: 0.0001 -power: 0.75 -# Display every 100 iterations -display: 100 -# The maximum number of iterations -<<<<<<< 86eaaa954fbb3c3cfd1d225c98e16c38af7b34a9:examples/triplet/lfw_triplet_solver.prototxt -max_iter: 50000 -# snapshot intermediate results -snapshot: 5000 -snapshot_prefix: "examples/triplet/lfw_triplet" -======= -max_iter: 20000 -# snapshot intermediate results -snapshot: 2000 -snapshot_prefix: "examples/triplet/3d_triplet" ->>>>>>> rearrange the training samples selection codes:examples/triplet/3d_triplet_solver.prototxt -# solver mode: CPU or GPU -solver_mode: CPU diff --git a/examples/triplet/mnist_orpe_train_test.prototxt.orig b/examples/triplet/mnist_orpe_train_test.prototxt.orig deleted file mode 100644 index f1a24f5a341..00000000000 --- a/examples/triplet/mnist_orpe_train_test.prototxt.orig +++ /dev/null @@ -1,184 +0,0 @@ -name: "3d_triplet_train_test" -layer { - name: "data" - type: "Data" - top: "data" - top: "sim" - include { - phase: TRAIN - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/3d_triplet_train_leveldb" - batch_size: 250 - } -} -layer { - name: "data" - type: "Data" - top: "data" - top: "sim" - include { - phase: TEST - } - transform_param { - scale: 0.00390625 - } - data_param { - source: "examples/triplet/3d_triplet_test_leveldb" - batch_size: 250 - } -} -layer { - name: "conv1" - type: "Convolution" - bottom: "data" - top: "conv1" - param { - name: "conv1_w" - lr_mult: 1 - } - param { - name: "conv1_b" - lr_mult: 2 - } - convolution_param { - num_output: 16 - kernel_size: 8 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool1" - type: "Pooling" - bottom: "conv1" - top: "pool1" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu1" - type: "ReLU" - bottom: "pool1" - top: "pool1" -} -layer { - name: "conv2" - type: "Convolution" - bottom: "pool1" - top: "conv2" - param { - name: "conv2_w" - lr_mult: 1 - } - param { - name: "conv2_b" - lr_mult: 2 - } - convolution_param { - num_output: 7 - kernel_size: 5 - stride: 1 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "pool2" - type: "Pooling" - bottom: "conv2" - top: "pool2" - pooling_param { - pool: MAX - kernel_size: 2 - stride: 2 - } -} -layer { - name: "relu2" - type: "ReLU" - bottom: "pool2" - top: "pool2" -} -layer { - name: "ip1" - type: "InnerProduct" - bottom: "pool2" - top: "ip1" - param { - name: "ip1_w" - lr_mult: 1 - } - param { - name: "ip1_b" - lr_mult: 2 - } - inner_product_param { - num_output: 256 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "relu3" - type: "ReLU" - bottom: "ip1" - top: "ip1" -} -layer { - name: "feat" - type: "InnerProduct" - bottom: "ip1" - top: "feat" - param { - name: "feat_w" - lr_mult: 1 - } - param { - name: "feat_b" - lr_mult: 2 - } - inner_product_param { - num_output: 4 - weight_filler { - type: "xavier" - } - bias_filler { - type: "constant" - } - } -} -layer { - name: "loss" - type: "TripletLoss" - bottom: "feat" - bottom: "sim" - top: "loss" - triplet_loss_param { - margin: 1 -<<<<<<< 027f1805a9555ff938185eb2ccb9a7599d35ba39:examples/triplet/mnist_orpe_train_test.prototxt -======= - losstype: 0 - num_triplets: 3 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise:examples/triplet/3d_triplet_train_test.prototxt - } -} diff --git a/examples/triplet/readme.md.orig b/examples/triplet/readme.md.orig deleted file mode 100644 index b08d30d07f0..00000000000 --- a/examples/triplet/readme.md.orig +++ /dev/null @@ -1,243 +0,0 @@ ---- -title: Triplet Network Tutorial -description: Train and test a triplet network on MNIST data. -category: example -include_in_docs: true -layout: default -priority: 100 ---- - -# Triplet Network Training with Caffe -This example shows how you can use weight sharing and a contrastive loss -function to learn a model using a triplet network in Caffe. - -We will assume that you have caffe successfully compiled. If not, please refer -to the [Installation page](../../installation.html). This example builds on the -[MNIST tutorial](mnist.html) so it would be a good idea to read that before -continuing. - -*The guide specifies all paths and assumes all commands are executed from the -root caffe directory* - -## Prepare Datasets - -You will first need to download and convert the data from the MNIST -website. To do this, simply run the following commands: - - ./data/mnist/get_mnist.sh - ./examples/triplet/create_mnist_triplet.sh - -After running the script there should be two datasets, -`./examples/triplet/mnist_triplet_train_leveldb`, and -`./examples/triplet/mnist_triplet_test_leveldb`. - -## The Model -First, we will define the model that we want to train using the triplet network. -We will use the convolutional net defined in -`./examples/triplet/mnist_triplet.prototxt`. This model is almost -exactly the same as the [LeNet model](mnist.html), the only difference is that -we have replaced the top layers that produced probabilities over the 10 digit -classes with a linear "feature" layer that produces a 2 dimensional vector. - - layers { - name: "feat" - type: INNER_PRODUCT - bottom: "ip2" - top: "feat" - blobs_lr: 1 - blobs_lr: 2 - inner_product_param { - num_output: 2 - } - } - -## Define the triplet Network - -In this section we will define the triplet network used for training. The -resulting network is defined in -`./examples/triplet/mnist_triplet_train_test.prototxt`. - -<<<<<<< f6d5f83312a008c4a1e252b2495b64b12cbe93b7 -### Reading in the Triplet Data - -We start with a data layer that reads from the LevelDB database we created -earlier. Each entry in this database contains the image data for a triplet of -images (`triplet_data`) and the label (`sim`) is not nessesary in our method. - - layers { - name: "triplet_data" - type: DATA - top: "triplet_data" -======= -### Reading in the Pair Data - -We start with a data layer that reads from the LevelDB database we created -earlier. Each entry in this database contains the image data for a pair of -images (`pair_data`) and a binary label saying if they belong to the same class -or different classes (`sim`). - - layers { - name: "pair_data" - type: DATA - top: "pair_data" ->>>>>>> triplet data generation and network update - top: "sim" - data_param { - source: "examples/triplet/mnist-triplet-train-leveldb" - scale: 0.00390625 - batch_size: 64 - } - include: { phase: TRAIN } - } - -<<<<<<< f6d5f83312a008c4a1e252b2495b64b12cbe93b7 -In order to pack a triplet of images into the same blob in the database we pack one -image per channel. We want to be able to work with these three images separately, -so we add a slice layer after the data layer. This takes the `triplet_data` and -slices it along the channel dimension so that we have a single image in `data` -and its positive image in `data_pos.` & its negative image in `data_neg.` - - layers { - name: "slice_triplet" - type: SLICE - bottom: "triplet_data" - top: "data" - top: "data_pos" - top: "data_neg" - slice_param { - slice_dim: 1 - slice_point: 1 - slice_point: 2 - } - } - -### Building the First part of the triplet Net -======= -In order to pack a pair of images into the same blob in the database we pack one -image per channel. We want to be able to work with these two images separately, -so we add a slice layer after the data layer. This takes the `pair_data` and -slices it along the channel dimension so that we have a single image in `data` -and its paired image in `data_p.` - - layers { - name: "slice_pair" - type: SLICE - bottom: "pair_data" - top: "data" - top: "data_p" - slice_param { - slice_dim: 1 - slice_point: 1 - } - } - -### Building the First Side of the triplet Net ->>>>>>> triplet data generation and network update - -Now we can specify the first side of the triplet net. This side operates on -`data` and produces `feat`. Starting from the net in -`./examples/triplet/mnist_triplet.prototxt` we add default weight fillers. Then -we name the parameters of the convolutional and inner product layers. Naming the -<<<<<<< f6d5f83312a008c4a1e252b2495b64b12cbe93b7 -parameters allows Caffe to share the parameters between layers on three channels of -======= -parameters allows Caffe to share the parameters between layers on both sides of ->>>>>>> triplet data generation and network update -the triplet net. In the definition this looks like: - - ... - param: "conv1_w" - param: "conv1_b" - ... - param: "conv2_w" - param: "conv2_b" - ... - param: "ip1_w" - param: "ip1_b" - ... - param: "ip2_w" - param: "ip2_b" - ... - -### Building the Second Side of the triplet Net - -Now we need to create the second path that operates on `data_pos` and produces -`feat_pos`. This path is exactly the same as the first. So we can just copy and -paste it. Then we change the name of each layer, input, and output by appending -`_pos` to differentiate the "paired" layers from the originals. - -### Building the Third Side of the triplet Net - -Now we need to create the second path that operates on `data_neg` and produces -`feat_neg`. This path is exactly the same as the first. So we can just copy and -paste it. Then we change the name of each layer, input, and output by appending -`_neg` to differentiate the "paired" layers from the originals. - -<<<<<<< f6d5f83312a008c4a1e252b2495b64b12cbe93b7 -### Adding the Triplet Loss Function - -To train the network we will optimize a triplet loss function proposed in: -This cost function is implemented with the `TRIPLET_LOSS` layer: -======= -### Adding the Contrastive Loss Function - -To train the network we will optimize a contrastive loss function proposed in: -Raia Hadsell, Sumit Chopra, and Yann LeCun "Dimensionality Reduction by Learning -an Invariant Mapping". This loss function encourages matching pairs to be close -together in feature space while pushing non-matching pairs apart. This cost -function is implemented with the `TRIPLET_LOSS` layer: ->>>>>>> triplet data generation and network update - - layers { - name: "loss" - type: TRIPLET_LOSS - triplet_loss_param { -<<<<<<< f6d5f83312a008c4a1e252b2495b64b12cbe93b7 - margin: 0.2 - } - bottom: "feat" - bottom: "feat_pos" - bottom: "feat_neg" -======= - margin: 1.0 - } - bottom: "feat" - bottom: "feat_pos" - bottom: "feat_neg" ->>>>>>> triplet data generation and network update - bottom: "sim" - top: "loss" - } - -## Define the Solver - -Nothing special needs to be done to the solver besides pointing it at the -correct model file. The solver is defined in -`./examples/triplet/mnist_triplet_solver.prototxt`. - -## Training and Testing the Model - -Training the model is simple after you have written the network definition -protobuf and solver protobuf files. Simply run -`./examples/triplet/train_mnist_triplet.sh`: - - ./examples/triplet/train_mnist_triplet.sh - -# Plotting the results - -First, we can draw the model and triplet networks by running the following -commands that draw the DAGs defined in the .prototxt files: - - ./python/draw_net.py \ - ./examples/triplet/mnist_triplet.prototxt \ - ./examples/triplet/mnist_triplet.png - - ./python/draw_net.py \ - ./examples/triplet/mnist_triplet_train_test.prototxt \ - ./examples/triplet/mnist_triplet_train_test.png - -Second, we can load the learned model and plot the features using the iPython -notebook: - - ipython notebook ./examples/triplet/mnist_triplet.ipynb - diff --git a/include/caffe/common_layers.hpp b/include/caffe/common_layers.hpp index 89bab8d6f3a..8e64b3e5dc5 100644 --- a/include/caffe/common_layers.hpp +++ b/include/caffe/common_layers.hpp @@ -85,7 +85,7 @@ class ConcatLayer : public Layer { const vector*>& top); virtual inline const char* type() const { return "Concat"; } - virtual inline int MinBottomBlobs() const { return 1; } + virtual inline int MinBottomBlobs() const { return 2; } virtual inline int ExactNumTopBlobs() const { return 1; } protected: @@ -625,7 +625,7 @@ class SliceLayer : public Layer { virtual inline const char* type() const { return "Slice"; } virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 2; } protected: virtual void Forward_cpu(const vector*>& bottom, diff --git a/include/caffe/data_layers.hpp b/include/caffe/data_layers.hpp index 90fd0d19917..552d814131e 100644 --- a/include/caffe/data_layers.hpp +++ b/include/caffe/data_layers.hpp @@ -4,6 +4,7 @@ #include #include #include + #include "hdf5.h" #include "caffe/blob.hpp" @@ -274,10 +275,8 @@ class MemoryDataLayer : public BaseDataLayer { virtual inline int ExactNumTopBlobs() const { return 2; } virtual void AddDatumVector(const vector& datum_vector); -#ifdef USE_OPENCV virtual void AddMatVector(const vector& mat_vector, const vector& labels); -#endif // USE_OPENCV // Reset should accept const pointers, but can't, because the memory // will be given to Blob, which is mutable diff --git a/include/caffe/data_layers.hpp.orig b/include/caffe/data_layers.hpp.orig deleted file mode 100644 index 362bbcde53d..00000000000 --- a/include/caffe/data_layers.hpp.orig +++ /dev/null @@ -1,431 +0,0 @@ -#ifndef CAFFE_DATA_LAYERS_HPP_ -#define CAFFE_DATA_LAYERS_HPP_ - -#include -#include -#include -<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 -<<<<<<< ca976a2b520c8024cc7f41a048d2c1abe73f6022 -======= - -#include "boost/scoped_ptr.hpp" ->>>>>>> macro define in upgrade_proto -======= - ->>>>>>> add initiate class name of triplet loss layer -#include "hdf5.h" - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 -======= -#include "caffe/data_reader.hpp" ->>>>>>> add initiate class name of triplet loss layer -#include "caffe/data_transformer.hpp" -#include "caffe/filler.hpp" -#include "caffe/internal_thread.hpp" -#include "caffe/layer.hpp" -#include "caffe/net.hpp" -#include "caffe/proto/caffe.pb.h" -<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 -======= -#include "caffe/util/blocking_queue.hpp" ->>>>>>> add initiate class name of triplet loss layer -#include "caffe/util/db.hpp" - -namespace caffe { - -/** - * @brief Provides base for data layers that feed blobs to the Net. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class BaseDataLayer : public Layer { - public: - explicit BaseDataLayer(const LayerParameter& param); - virtual ~BaseDataLayer() {} - // LayerSetUp: implements common data layer setup functionality, and calls - // DataLayerSetUp to do special data layer setup for individual layer types. - // This method may not be overridden except by the BasePrefetchingDataLayer. - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); -<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 -======= - // Data layers should be shared by multiple solvers in parallel - virtual inline bool ShareInParallel() const { return true; } ->>>>>>> add initiate class name of triplet loss layer - virtual void DataLayerSetUp(const vector*>& bottom, - const vector*>& top) {} - // Data layers have no bottoms, so reshaping is trivial. - virtual void Reshape(const vector*>& bottom, - const vector*>& top) {} - - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) {} - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) {} - - protected: - TransformationParameter transform_param_; - shared_ptr > data_transformer_; - bool output_labels_; -}; - -template -<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 -class BasePrefetchingDataLayer : - public BaseDataLayer, public InternalThread { - public: - explicit BasePrefetchingDataLayer(const LayerParameter& param) - : BaseDataLayer(param) {} -======= -class Batch { - public: - Blob data_, label_; -}; - -template -class BasePrefetchingDataLayer : - public BaseDataLayer, public InternalThread { - public: -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - explicit BasePrefetchingDataLayer(const LayerParameter& param); -======= - explicit BasePrefetchingDataLayer(const LayerParameter& param) - : BaseDataLayer(param) {} -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da ->>>>>>> macro define in upgrade_proto ->>>>>>> add initiate class name of triplet loss layer -======= - virtual ~BasePrefetchingDataLayer() {} ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update - // LayerSetUp: implements common data layer setup functionality, and calls - // DataLayerSetUp to do special data layer setup for individual layer types. - // This method may not be overridden. - void LayerSetUp(const vector*>& bottom, - const vector*>& top); - - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - -<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 - virtual void CreatePrefetchThread(); - virtual void JoinPrefetchThread(); - // The thread's function - virtual void InternalThreadEntry() {} - - protected: - Blob prefetch_data_; - Blob prefetch_label_; -======= - // Prefetches batches (asynchronously if to GPU memory) - static const int PREFETCH_COUNT = 3; - - protected: - virtual void InternalThreadEntry(); - virtual void load_batch(Batch* batch) = 0; - - Batch prefetch_[PREFETCH_COUNT]; - BlockingQueue*> prefetch_free_; - BlockingQueue*> prefetch_full_; - ->>>>>>> add initiate class name of triplet loss layer - Blob transformed_data_; -}; - -template -class DataLayer : public BasePrefetchingDataLayer { - public: -<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 - explicit DataLayer(const LayerParameter& param) - : BasePrefetchingDataLayer(param) {} - virtual ~DataLayer(); - virtual void DataLayerSetUp(const vector*>& bottom, - const vector*>& top); - -======= - explicit DataLayer(const LayerParameter& param); - virtual ~DataLayer(); - virtual void DataLayerSetUp(const vector*>& bottom, - const vector*>& top); - // DataLayer uses DataReader instead for sharing for parallelism - virtual inline bool ShareInParallel() const { return false; } ->>>>>>> add initiate class name of triplet loss layer - virtual inline const char* type() const { return "Data"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int MinTopBlobs() const { return 1; } - virtual inline int MaxTopBlobs() const { return 2; } - - protected: -<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 - virtual void InternalThreadEntry(); - - shared_ptr db_; - shared_ptr cursor_; -======= - virtual void load_batch(Batch* batch); - - DataReader reader_; ->>>>>>> add initiate class name of triplet loss layer -}; - -/** - * @brief Provides data to the Net generated by a Filler. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class DummyDataLayer : public Layer { - public: - explicit DummyDataLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); -<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 -======= - // Data layers should be shared by multiple solvers in parallel - virtual inline bool ShareInParallel() const { return true; } ->>>>>>> add initiate class name of triplet loss layer - // Data layers have no bottoms, so reshaping is trivial. - virtual void Reshape(const vector*>& bottom, - const vector*>& top) {} - - virtual inline const char* type() const { return "DummyData"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int MinTopBlobs() const { return 1; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) {} - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) {} - - vector > > fillers_; - vector refill_; -}; - -/** - * @brief Provides data to the Net from HDF5 files. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class HDF5DataLayer : public Layer { - public: - explicit HDF5DataLayer(const LayerParameter& param) - : Layer(param) {} - virtual ~HDF5DataLayer(); - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); -<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 -======= - // Data layers should be shared by multiple solvers in parallel - virtual inline bool ShareInParallel() const { return true; } ->>>>>>> add initiate class name of triplet loss layer - // Data layers have no bottoms, so reshaping is trivial. - virtual void Reshape(const vector*>& bottom, - const vector*>& top) {} - - virtual inline const char* type() const { return "HDF5Data"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int MinTopBlobs() const { return 1; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) {} - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) {} - virtual void LoadHDF5FileData(const char* filename); - - std::vector hdf_filenames_; - unsigned int num_files_; - unsigned int current_file_; - hsize_t current_row_; - std::vector > > hdf_blobs_; - std::vector data_permutation_; - std::vector file_permutation_; -}; - -/** - * @brief Write blobs to disk as HDF5 files. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class HDF5OutputLayer : public Layer { - public: - explicit HDF5OutputLayer(const LayerParameter& param) - : Layer(param), file_opened_(false) {} - virtual ~HDF5OutputLayer(); - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); -<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 -======= - // Data layers should be shared by multiple solvers in parallel - virtual inline bool ShareInParallel() const { return true; } ->>>>>>> add initiate class name of triplet loss layer - // Data layers have no bottoms, so reshaping is trivial. - virtual void Reshape(const vector*>& bottom, - const vector*>& top) {} - - virtual inline const char* type() const { return "HDF5Output"; } - // TODO: no limit on the number of blobs - virtual inline int ExactNumBottomBlobs() const { return 2; } - virtual inline int ExactNumTopBlobs() const { return 0; } - - inline std::string file_name() const { return file_name_; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void SaveBlobs(); - - bool file_opened_; - std::string file_name_; - hid_t file_id_; - Blob data_blob_; - Blob label_blob_; -}; - -/** - * @brief Provides data to the Net from image files. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class ImageDataLayer : public BasePrefetchingDataLayer { - public: - explicit ImageDataLayer(const LayerParameter& param) - : BasePrefetchingDataLayer(param) {} - virtual ~ImageDataLayer(); - virtual void DataLayerSetUp(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "ImageData"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int ExactNumTopBlobs() const { return 2; } - - protected: - shared_ptr prefetch_rng_; - virtual void ShuffleImages(); -<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 - virtual void InternalThreadEntry(); -======= - virtual void load_batch(Batch* batch); ->>>>>>> add initiate class name of triplet loss layer - - vector > lines_; - int lines_id_; -}; - -/** - * @brief Provides data to the Net from memory. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class MemoryDataLayer : public BaseDataLayer { - public: - explicit MemoryDataLayer(const LayerParameter& param) - : BaseDataLayer(param), has_new_data_(false) {} - virtual void DataLayerSetUp(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "MemoryData"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int ExactNumTopBlobs() const { return 2; } - - virtual void AddDatumVector(const vector& datum_vector); -<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 -#ifdef USE_OPENCV - virtual void AddMatVector(const vector& mat_vector, - const vector& labels); -#endif // USE_OPENCV -======= - virtual void AddMatVector(const vector& mat_vector, - const vector& labels); ->>>>>>> add initiate class name of triplet loss layer - - // Reset should accept const pointers, but can't, because the memory - // will be given to Blob, which is mutable - void Reset(Dtype* data, Dtype* label, int n); - void set_batch_size(int new_size); - - int batch_size() { return batch_size_; } - int channels() { return channels_; } - int height() { return height_; } - int width() { return width_; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - - int batch_size_, channels_, height_, width_, size_; - Dtype* data_; - Dtype* labels_; - int n_; - size_t pos_; - Blob added_data_; - Blob added_label_; - bool has_new_data_; -}; - -/** - * @brief Provides data to the Net from windows of images files, specified - * by a window data file. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class WindowDataLayer : public BasePrefetchingDataLayer { - public: - explicit WindowDataLayer(const LayerParameter& param) - : BasePrefetchingDataLayer(param) {} - virtual ~WindowDataLayer(); - virtual void DataLayerSetUp(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "WindowData"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int ExactNumTopBlobs() const { return 2; } - - protected: - virtual unsigned int PrefetchRand(); -<<<<<<< c90df3f9e2c00643425be1c4caf1b62cdf7bc398 - virtual void InternalThreadEntry(); -======= - virtual void load_batch(Batch* batch); ->>>>>>> add initiate class name of triplet loss layer - - shared_ptr prefetch_rng_; - vector > > image_database_; - enum WindowField { IMAGE_INDEX, LABEL, OVERLAP, X1, Y1, X2, Y2, NUM }; - vector > fg_windows_; - vector > bg_windows_; - Blob data_mean_; - vector mean_values_; - bool has_mean_file_; - bool has_mean_values_; - bool cache_images_; - vector > image_database_cache_; -}; - -} // namespace caffe - -#endif // CAFFE_DATA_LAYERS_HPP_ diff --git a/include/caffe/data_layers.hpp.orig.orig b/include/caffe/data_layers.hpp.orig.orig deleted file mode 100644 index da0118d2f9b..00000000000 --- a/include/caffe/data_layers.hpp.orig.orig +++ /dev/null @@ -1,365 +0,0 @@ -#ifndef CAFFE_DATA_LAYERS_HPP_ -#define CAFFE_DATA_LAYERS_HPP_ - -#include -#include -#include - -#include "hdf5.h" - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -#include "caffe/data_reader.hpp" -#include "caffe/data_transformer.hpp" -#include "caffe/filler.hpp" -#include "caffe/internal_thread.hpp" -#include "caffe/layer.hpp" -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -#include "caffe/net.hpp" ->>>>>>> triplet data generation and network update -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/blocking_queue.hpp" -#include "caffe/util/db.hpp" - -namespace caffe { - -/** - * @brief Provides base for data layers that feed blobs to the Net. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class BaseDataLayer : public Layer { - public: - explicit BaseDataLayer(const LayerParameter& param); -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= - virtual ~BaseDataLayer() {} ->>>>>>> triplet data generation and network update - // LayerSetUp: implements common data layer setup functionality, and calls - // DataLayerSetUp to do special data layer setup for individual layer types. - // This method may not be overridden except by the BasePrefetchingDataLayer. - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - // Data layers should be shared by multiple solvers in parallel - virtual inline bool ShareInParallel() const { return true; } - virtual void DataLayerSetUp(const vector*>& bottom, - const vector*>& top) {} - // Data layers have no bottoms, so reshaping is trivial. - virtual void Reshape(const vector*>& bottom, - const vector*>& top) {} - - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) {} - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) {} - - protected: - TransformationParameter transform_param_; - shared_ptr > data_transformer_; - bool output_labels_; -}; - -template -class Batch { - public: - Blob data_, label_; -}; - -template -class BasePrefetchingDataLayer : - public BaseDataLayer, public InternalThread { - public: -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> triplet data generation and network update - explicit BasePrefetchingDataLayer(const LayerParameter& param); -======= - explicit BasePrefetchingDataLayer(const LayerParameter& param) - : BaseDataLayer(param) {} -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ->>>>>>> macro define in upgrade_proto -======= - virtual ~BasePrefetchingDataLayer() {} ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update - // LayerSetUp: implements common data layer setup functionality, and calls - // DataLayerSetUp to do special data layer setup for individual layer types. - // This method may not be overridden. - void LayerSetUp(const vector*>& bottom, - const vector*>& top); - - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - - // Prefetches batches (asynchronously if to GPU memory) - static const int PREFETCH_COUNT = 3; - - protected: - virtual void InternalThreadEntry(); - virtual void load_batch(Batch* batch) = 0; - - Batch prefetch_[PREFETCH_COUNT]; - BlockingQueue*> prefetch_free_; - BlockingQueue*> prefetch_full_; - - Blob transformed_data_; -}; - -template -class DataLayer : public BasePrefetchingDataLayer { - public: - explicit DataLayer(const LayerParameter& param); - virtual ~DataLayer(); - virtual void DataLayerSetUp(const vector*>& bottom, - const vector*>& top); - // DataLayer uses DataReader instead for sharing for parallelism - virtual inline bool ShareInParallel() const { return false; } - virtual inline const char* type() const { return "Data"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int MinTopBlobs() const { return 1; } - virtual inline int MaxTopBlobs() const { return 2; } - - protected: - virtual void load_batch(Batch* batch); - - DataReader reader_; -}; - -/** - * @brief Provides data to the Net generated by a Filler. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class DummyDataLayer : public Layer { - public: - explicit DummyDataLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - // Data layers should be shared by multiple solvers in parallel - virtual inline bool ShareInParallel() const { return true; } - // Data layers have no bottoms, so reshaping is trivial. - virtual void Reshape(const vector*>& bottom, - const vector*>& top) {} - - virtual inline const char* type() const { return "DummyData"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int MinTopBlobs() const { return 1; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) {} - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) {} - - vector > > fillers_; - vector refill_; -}; - -/** - * @brief Provides data to the Net from HDF5 files. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class HDF5DataLayer : public Layer { - public: - explicit HDF5DataLayer(const LayerParameter& param) - : Layer(param) {} - virtual ~HDF5DataLayer(); - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - // Data layers should be shared by multiple solvers in parallel - virtual inline bool ShareInParallel() const { return true; } - // Data layers have no bottoms, so reshaping is trivial. - virtual void Reshape(const vector*>& bottom, - const vector*>& top) {} - - virtual inline const char* type() const { return "HDF5Data"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int MinTopBlobs() const { return 1; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) {} - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) {} - virtual void LoadHDF5FileData(const char* filename); - - std::vector hdf_filenames_; - unsigned int num_files_; - unsigned int current_file_; - hsize_t current_row_; - std::vector > > hdf_blobs_; - std::vector data_permutation_; - std::vector file_permutation_; -}; - -/** - * @brief Write blobs to disk as HDF5 files. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class HDF5OutputLayer : public Layer { - public: - explicit HDF5OutputLayer(const LayerParameter& param) - : Layer(param), file_opened_(false) {} - virtual ~HDF5OutputLayer(); - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - // Data layers should be shared by multiple solvers in parallel - virtual inline bool ShareInParallel() const { return true; } - // Data layers have no bottoms, so reshaping is trivial. - virtual void Reshape(const vector*>& bottom, - const vector*>& top) {} - - virtual inline const char* type() const { return "HDF5Output"; } - // TODO: no limit on the number of blobs - virtual inline int ExactNumBottomBlobs() const { return 2; } - virtual inline int ExactNumTopBlobs() const { return 0; } - - inline std::string file_name() const { return file_name_; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void SaveBlobs(); - - bool file_opened_; - std::string file_name_; - hid_t file_id_; - Blob data_blob_; - Blob label_blob_; -}; - -/** - * @brief Provides data to the Net from image files. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class ImageDataLayer : public BasePrefetchingDataLayer { - public: - explicit ImageDataLayer(const LayerParameter& param) - : BasePrefetchingDataLayer(param) {} - virtual ~ImageDataLayer(); - virtual void DataLayerSetUp(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "ImageData"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int ExactNumTopBlobs() const { return 2; } - - protected: - shared_ptr prefetch_rng_; - virtual void ShuffleImages(); - virtual void load_batch(Batch* batch); - - vector > lines_; - int lines_id_; -}; - -/** - * @brief Provides data to the Net from memory. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class MemoryDataLayer : public BaseDataLayer { - public: - explicit MemoryDataLayer(const LayerParameter& param) - : BaseDataLayer(param), has_new_data_(false) {} - virtual void DataLayerSetUp(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "MemoryData"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int ExactNumTopBlobs() const { return 2; } - - virtual void AddDatumVector(const vector& datum_vector); - virtual void AddMatVector(const vector& mat_vector, - const vector& labels); - - // Reset should accept const pointers, but can't, because the memory - // will be given to Blob, which is mutable - void Reset(Dtype* data, Dtype* label, int n); - void set_batch_size(int new_size); - - int batch_size() { return batch_size_; } - int channels() { return channels_; } - int height() { return height_; } - int width() { return width_; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - - int batch_size_, channels_, height_, width_, size_; - Dtype* data_; - Dtype* labels_; - int n_; - size_t pos_; - Blob added_data_; - Blob added_label_; - bool has_new_data_; -}; - -/** - * @brief Provides data to the Net from windows of images files, specified - * by a window data file. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class WindowDataLayer : public BasePrefetchingDataLayer { - public: - explicit WindowDataLayer(const LayerParameter& param) - : BasePrefetchingDataLayer(param) {} - virtual ~WindowDataLayer(); - virtual void DataLayerSetUp(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "WindowData"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int ExactNumTopBlobs() const { return 2; } - - protected: - virtual unsigned int PrefetchRand(); - virtual void load_batch(Batch* batch); - - shared_ptr prefetch_rng_; - vector > > image_database_; - enum WindowField { IMAGE_INDEX, LABEL, OVERLAP, X1, Y1, X2, Y2, NUM }; - vector > fg_windows_; - vector > bg_windows_; - Blob data_mean_; - vector mean_values_; - bool has_mean_file_; - bool has_mean_values_; - bool cache_images_; - vector > image_database_cache_; -}; - -} // namespace caffe - -#endif // CAFFE_DATA_LAYERS_HPP_ diff --git a/include/caffe/data_layers.hpp.orig.orig.orig b/include/caffe/data_layers.hpp.orig.orig.orig deleted file mode 100644 index 4059642d86e..00000000000 --- a/include/caffe/data_layers.hpp.orig.orig.orig +++ /dev/null @@ -1,464 +0,0 @@ -#ifndef CAFFE_DATA_LAYERS_HPP_ -#define CAFFE_DATA_LAYERS_HPP_ - -#include -#include -#include -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< ca976a2b520c8024cc7f41a048d2c1abe73f6022 -======= - -#include "boost/scoped_ptr.hpp" ->>>>>>> macro define in upgrade_proto -======= - ->>>>>>> add 3d network training param -#include "hdf5.h" - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -#include "caffe/data_reader.hpp" ->>>>>>> add 3d network training param -#include "caffe/data_transformer.hpp" -#include "caffe/filler.hpp" -#include "caffe/internal_thread.hpp" -#include "caffe/layer.hpp" -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -#include "caffe/net.hpp" -#include "caffe/proto/caffe.pb.h" -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -#include "caffe/net.hpp" ->>>>>>> triplet data generation and network update -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/blocking_queue.hpp" ->>>>>>> add 3d network training param -#include "caffe/util/db.hpp" - -namespace caffe { - -/** - * @brief Provides base for data layers that feed blobs to the Net. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class BaseDataLayer : public Layer { - public: - explicit BaseDataLayer(const LayerParameter& param); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f - virtual ~BaseDataLayer() {} -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= - virtual ~BaseDataLayer() {} ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param - // LayerSetUp: implements common data layer setup functionality, and calls - // DataLayerSetUp to do special data layer setup for individual layer types. - // This method may not be overridden except by the BasePrefetchingDataLayer. - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= - // Data layers should be shared by multiple solvers in parallel - virtual inline bool ShareInParallel() const { return true; } ->>>>>>> add 3d network training param - virtual void DataLayerSetUp(const vector*>& bottom, - const vector*>& top) {} - // Data layers have no bottoms, so reshaping is trivial. - virtual void Reshape(const vector*>& bottom, - const vector*>& top) {} - - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) {} - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) {} - - protected: - TransformationParameter transform_param_; - shared_ptr > data_transformer_; - bool output_labels_; -}; - -template -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -class BasePrefetchingDataLayer : - public BaseDataLayer, public InternalThread { - public: - explicit BasePrefetchingDataLayer(const LayerParameter& param) - : BaseDataLayer(param) {} -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= -======= -======= ->>>>>>> add 3d network training param -class Batch { - public: - Blob data_, label_; -}; - -template -class BasePrefetchingDataLayer : - public BaseDataLayer, public InternalThread { - public: -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param - explicit BasePrefetchingDataLayer(const LayerParameter& param); -======= - explicit BasePrefetchingDataLayer(const LayerParameter& param) - : BaseDataLayer(param) {} -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da ->>>>>>> macro define in upgrade_proto ->>>>>>> add initiate class name of triplet loss layer -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ->>>>>>> macro define in upgrade_proto ->>>>>>> add 3d network training param -======= - virtual ~BasePrefetchingDataLayer() {} ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f ->>>>>>> triplet data generation and network update -======= ->>>>>>> add 3d network training param - // LayerSetUp: implements common data layer setup functionality, and calls - // DataLayerSetUp to do special data layer setup for individual layer types. - // This method may not be overridden. - void LayerSetUp(const vector*>& bottom, - const vector*>& top); - - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f - virtual void CreatePrefetchThread(); - virtual void JoinPrefetchThread(); - // The thread's function - virtual void InternalThreadEntry() {} - - protected: - Blob prefetch_data_; - Blob prefetch_label_; -======= - // Prefetches batches (asynchronously if to GPU memory) - static const int PREFETCH_COUNT = 3; - - protected: - virtual void InternalThreadEntry(); - virtual void load_batch(Batch* batch) = 0; - - Batch prefetch_[PREFETCH_COUNT]; - BlockingQueue*> prefetch_free_; - BlockingQueue*> prefetch_full_; - ->>>>>>> add 3d network training param - Blob transformed_data_; -}; - -template -class DataLayer : public BasePrefetchingDataLayer { - public: -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f - explicit DataLayer(const LayerParameter& param) - : BasePrefetchingDataLayer(param) {} - virtual ~DataLayer(); - virtual void DataLayerSetUp(const vector*>& bottom, - const vector*>& top); - -======= - explicit DataLayer(const LayerParameter& param); - virtual ~DataLayer(); - virtual void DataLayerSetUp(const vector*>& bottom, - const vector*>& top); - // DataLayer uses DataReader instead for sharing for parallelism - virtual inline bool ShareInParallel() const { return false; } ->>>>>>> add 3d network training param - virtual inline const char* type() const { return "Data"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int MinTopBlobs() const { return 1; } - virtual inline int MaxTopBlobs() const { return 2; } - - protected: -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f - virtual void InternalThreadEntry(); - - shared_ptr db_; - shared_ptr cursor_; -======= - virtual void load_batch(Batch* batch); - - DataReader reader_; ->>>>>>> add 3d network training param -}; - -/** - * @brief Provides data to the Net generated by a Filler. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class DummyDataLayer : public Layer { - public: - explicit DummyDataLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= - // Data layers should be shared by multiple solvers in parallel - virtual inline bool ShareInParallel() const { return true; } ->>>>>>> add 3d network training param - // Data layers have no bottoms, so reshaping is trivial. - virtual void Reshape(const vector*>& bottom, - const vector*>& top) {} - - virtual inline const char* type() const { return "DummyData"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int MinTopBlobs() const { return 1; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) {} - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) {} - - vector > > fillers_; - vector refill_; -}; - -/** - * @brief Provides data to the Net from HDF5 files. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class HDF5DataLayer : public Layer { - public: - explicit HDF5DataLayer(const LayerParameter& param) - : Layer(param) {} - virtual ~HDF5DataLayer(); - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= - // Data layers should be shared by multiple solvers in parallel - virtual inline bool ShareInParallel() const { return true; } ->>>>>>> add 3d network training param - // Data layers have no bottoms, so reshaping is trivial. - virtual void Reshape(const vector*>& bottom, - const vector*>& top) {} - - virtual inline const char* type() const { return "HDF5Data"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int MinTopBlobs() const { return 1; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) {} - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) {} - virtual void LoadHDF5FileData(const char* filename); - - std::vector hdf_filenames_; - unsigned int num_files_; - unsigned int current_file_; - hsize_t current_row_; - std::vector > > hdf_blobs_; - std::vector data_permutation_; - std::vector file_permutation_; -}; - -/** - * @brief Write blobs to disk as HDF5 files. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class HDF5OutputLayer : public Layer { - public: - explicit HDF5OutputLayer(const LayerParameter& param) - : Layer(param), file_opened_(false) {} - virtual ~HDF5OutputLayer(); - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= - // Data layers should be shared by multiple solvers in parallel - virtual inline bool ShareInParallel() const { return true; } ->>>>>>> add 3d network training param - // Data layers have no bottoms, so reshaping is trivial. - virtual void Reshape(const vector*>& bottom, - const vector*>& top) {} - - virtual inline const char* type() const { return "HDF5Output"; } - // TODO: no limit on the number of blobs - virtual inline int ExactNumBottomBlobs() const { return 2; } - virtual inline int ExactNumTopBlobs() const { return 0; } - - inline std::string file_name() const { return file_name_; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void SaveBlobs(); - - bool file_opened_; - std::string file_name_; - hid_t file_id_; - Blob data_blob_; - Blob label_blob_; -}; - -/** - * @brief Provides data to the Net from image files. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class ImageDataLayer : public BasePrefetchingDataLayer { - public: - explicit ImageDataLayer(const LayerParameter& param) - : BasePrefetchingDataLayer(param) {} - virtual ~ImageDataLayer(); - virtual void DataLayerSetUp(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "ImageData"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int ExactNumTopBlobs() const { return 2; } - - protected: - shared_ptr prefetch_rng_; - virtual void ShuffleImages(); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f - virtual void InternalThreadEntry(); -======= - virtual void load_batch(Batch* batch); ->>>>>>> add 3d network training param - - vector > lines_; - int lines_id_; -}; - -/** - * @brief Provides data to the Net from memory. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class MemoryDataLayer : public BaseDataLayer { - public: - explicit MemoryDataLayer(const LayerParameter& param) - : BaseDataLayer(param), has_new_data_(false) {} - virtual void DataLayerSetUp(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "MemoryData"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int ExactNumTopBlobs() const { return 2; } - - virtual void AddDatumVector(const vector& datum_vector); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -#ifdef USE_OPENCV - virtual void AddMatVector(const vector& mat_vector, - const vector& labels); -#endif // USE_OPENCV -======= - virtual void AddMatVector(const vector& mat_vector, - const vector& labels); ->>>>>>> add 3d network training param - - // Reset should accept const pointers, but can't, because the memory - // will be given to Blob, which is mutable - void Reset(Dtype* data, Dtype* label, int n); - void set_batch_size(int new_size); - - int batch_size() { return batch_size_; } - int channels() { return channels_; } - int height() { return height_; } - int width() { return width_; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - - int batch_size_, channels_, height_, width_, size_; - Dtype* data_; - Dtype* labels_; - int n_; - size_t pos_; - Blob added_data_; - Blob added_label_; - bool has_new_data_; -}; - -/** - * @brief Provides data to the Net from windows of images files, specified - * by a window data file. - * - * TODO(dox): thorough documentation for Forward and proto params. - */ -template -class WindowDataLayer : public BasePrefetchingDataLayer { - public: - explicit WindowDataLayer(const LayerParameter& param) - : BasePrefetchingDataLayer(param) {} - virtual ~WindowDataLayer(); - virtual void DataLayerSetUp(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "WindowData"; } - virtual inline int ExactNumBottomBlobs() const { return 0; } - virtual inline int ExactNumTopBlobs() const { return 2; } - - protected: - virtual unsigned int PrefetchRand(); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f - virtual void InternalThreadEntry(); -======= - virtual void load_batch(Batch* batch); ->>>>>>> add 3d network training param - - shared_ptr prefetch_rng_; - vector > > image_database_; - enum WindowField { IMAGE_INDEX, LABEL, OVERLAP, X1, Y1, X2, Y2, NUM }; - vector > fg_windows_; - vector > bg_windows_; - Blob data_mean_; - vector mean_values_; - bool has_mean_file_; - bool has_mean_values_; - bool cache_images_; - vector > image_database_cache_; -}; - -} // namespace caffe - -#endif // CAFFE_DATA_LAYERS_HPP_ diff --git a/include/caffe/data_transformer.hpp.orig b/include/caffe/data_transformer.hpp.orig deleted file mode 100644 index 2cf205c6c8c..00000000000 --- a/include/caffe/data_transformer.hpp.orig +++ /dev/null @@ -1,167 +0,0 @@ -#ifndef CAFFE_DATA_TRANSFORMER_HPP -#define CAFFE_DATA_TRANSFORMER_HPP - -#include - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -#include "caffe/proto/caffe.pb.h" - -namespace caffe { - -/** - * @brief Applies common transformations to the input data, such as - * scaling, mirroring, substracting the image mean... - */ -template -class DataTransformer { - public: - explicit DataTransformer(const TransformationParameter& param, Phase phase); - virtual ~DataTransformer() {} - - /** - * @brief Initialize the Random number generations if needed by the - * transformation. - */ - void InitRand(); - - /** - * @brief Applies the transformation defined in the data layer's - * transform_param block to the data. - * - * @param datum - * Datum containing the data to be transformed. - * @param transformed_blob - * This is destination blob. It can be part of top blob's data if - * set_cpu_data() is used. See data_layer.cpp for an example. - */ - void Transform(const Datum& datum, Blob* transformed_blob); - - /** - * @brief Applies the transformation defined in the data layer's - * transform_param block to a vector of Datum. - * - * @param datum_vector - * A vector of Datum containing the data to be transformed. - * @param transformed_blob - * This is destination blob. It can be part of top blob's data if - * set_cpu_data() is used. See memory_layer.cpp for an example. - */ - void Transform(const vector & datum_vector, - Blob* transformed_blob); - -#ifdef USE_OPENCV - /** - * @brief Applies the transformation defined in the data layer's - * transform_param block to a vector of Mat. - * - * @param mat_vector - * A vector of Mat containing the data to be transformed. - * @param transformed_blob - * This is destination blob. It can be part of top blob's data if - * set_cpu_data() is used. See memory_layer.cpp for an example. - */ - void Transform(const vector & mat_vector, - Blob* transformed_blob); - - /** - * @brief Applies the transformation defined in the data layer's - * transform_param block to a cv::Mat - * - * @param cv_img - * cv::Mat containing the data to be transformed. - * @param transformed_blob - * This is destination blob. It can be part of top blob's data if - * set_cpu_data() is used. See image_data_layer.cpp for an example. - */ - void Transform(const cv::Mat& cv_img, Blob* transformed_blob); -#endif // USE_OPENCV - - /** - * @brief Applies the same transformation defined in the data layer's - * transform_param block to all the num images in a input_blob. - * - * @param input_blob - * A Blob containing the data to be transformed. It applies the same - * transformation to all the num images in the blob. - * @param transformed_blob - * This is destination blob, it will contain as many images as the - * input blob. It can be part of top blob's data. - */ - void Transform(Blob* input_blob, Blob* transformed_blob); - -<<<<<<< ca976a2b520c8024cc7f41a048d2c1abe73f6022 -<<<<<<< ed5d44f53efe58e4cc2cd299f23c5164cbd7172c -======= ->>>>>>> macro define in upgrade_proto - /** - * @brief Infers the shape of transformed_blob will have when - * the transformation is applied to the data. - * - * @param datum - * Datum containing the data to be transformed. - */ - vector InferBlobShape(const Datum& datum); - /** - * @brief Infers the shape of transformed_blob will have when - * the transformation is applied to the data. - * It uses the first element to infer the shape of the blob. - * - * @param datum_vector - * A vector of Datum containing the data to be transformed. - */ - vector InferBlobShape(const vector & datum_vector); - /** - * @brief Infers the shape of transformed_blob will have when - * the transformation is applied to the data. - * It uses the first element to infer the shape of the blob. - * - * @param mat_vector - * A vector of Mat containing the data to be transformed. - */ -<<<<<<< ca976a2b520c8024cc7f41a048d2c1abe73f6022 -#ifdef USE_OPENCV -======= ->>>>>>> macro define in upgrade_proto - vector InferBlobShape(const vector & mat_vector); - /** - * @brief Infers the shape of transformed_blob will have when - * the transformation is applied to the data. - * - * @param cv_img - * cv::Mat containing the data to be transformed. - */ - vector InferBlobShape(const cv::Mat& cv_img); -<<<<<<< ca976a2b520c8024cc7f41a048d2c1abe73f6022 -#endif // USE_OPENCV - -======= ->>>>>>> New triplet loss layer added(beta1 version-no test source files) -======= - ->>>>>>> macro define in upgrade_proto - protected: - /** - * @brief Generates a random integer from Uniform({0, 1, ..., n-1}). - * - * @param n - * The upperbound (exclusive) value of the random number. - * @return - * A uniformly random integer value from ({0, 1, ..., n-1}). - */ - virtual int Rand(int n); - - void Transform(const Datum& datum, Dtype* transformed_data); - // Tranformation parameters - TransformationParameter param_; - - - shared_ptr rng_; - Phase phase_; - Blob data_mean_; - vector mean_values_; -}; - -} // namespace caffe - -#endif // CAFFE_DATA_TRANSFORMER_HPP_ diff --git a/include/caffe/filler.hpp b/include/caffe/filler.hpp index ff3542e1f99..888f4a4ba3b 100644 --- a/include/caffe/filler.hpp +++ b/include/caffe/filler.hpp @@ -208,6 +208,60 @@ class MSRAFiller : public Filler { } }; +/*! +@brief Fills a Blob with coefficients for bilinear interpolation. + +A common use case is with the DeconvolutionLayer acting as upsampling. +You can upsample a feature map with shape of (B, C, H, W) by any integer factor +using the following proto. +\code +layer { + name: "upsample", type: "Deconvolution" + bottom: "{{bottom_name}}" top: "{{top_name}}" + convolution_param { + kernel_size: {{2 * factor - factor % 2}} stride: {{factor}} + num_output: {{C}} group: {{C}} + pad: {{ceil((factor - 1) / 2.)}} + weight_filler: { type: "bilinear" } bias_term: false + } + param { lr_mult: 0 decay_mult: 0 } +} +\endcode +Please use this by replacing `{{}}` with your values. By specifying +`num_output: {{C}} group: {{C}}`, it behaves as +channel-wise convolution. The filter shape of this deconvolution layer will be +(C, 1, K, K) where K is `kernel_size`, and this filler will set a (K, K) +interpolation kernel for every channel of the filter identically. The resulting +shape of the top feature map will be (B, C, factor * H, factor * W). +Note that the learning rate and the +weight decay are set to 0 in order to keep coefficient values of bilinear +interpolation unchanged during training. If you apply this to an image, this +operation is equivalent to the following call in Python with Scikit.Image. +\code{.py} +out = skimage.transform.rescale(img, factor, mode='constant', cval=0) +\endcode + */ +template +class BilinearFiller : public Filler { + public: + explicit BilinearFiller(const FillerParameter& param) + : Filler(param) {} + virtual void Fill(Blob* blob) { + CHECK_EQ(blob->num_axes(), 4) << "Blob must be 4 dim."; + CHECK_EQ(blob->width(), blob->height()) << "Filter must be square"; + Dtype* data = blob->mutable_cpu_data(); + int f = ceil(blob->width() / 2.); + float c = (2 * f - 1 - f % 2) / (2. * f); + for (int i = 0; i < blob->count(); ++i) { + float x = i % blob->width(); + float y = (i / blob->width()) % blob->height(); + data[i] = (1 - fabs(x / f - c)) * (1 - fabs(y / f - c)); + } + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } +}; + /** * @brief Get a specific filler from the specification given in FillerParameter. * @@ -229,6 +283,8 @@ Filler* GetFiller(const FillerParameter& param) { return new XavierFiller(param); } else if (type == "msra") { return new MSRAFiller(param); + } else if (type == "bilinear") { + return new BilinearFiller(param); } else { CHECK(false) << "Unknown filler name: " << param.type(); } diff --git a/include/caffe/filler.hpp.orig b/include/caffe/filler.hpp.orig deleted file mode 100644 index 006d994f75f..00000000000 --- a/include/caffe/filler.hpp.orig +++ /dev/null @@ -1,291 +0,0 @@ -// Fillers are random number generators that fills a blob using the specified -// algorithm. The expectation is that they are only going to be used during -// initialization time and will not involve any GPUs. - -#ifndef CAFFE_FILLER_HPP -#define CAFFE_FILLER_HPP - -#include - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -#include "caffe/proto/caffe.pb.h" -#include "caffe/syncedmem.hpp" -#include "caffe/util/math_functions.hpp" - -namespace caffe { - -/// @brief Fills a Blob with constant or randomly-generated data. -template -class Filler { - public: - explicit Filler(const FillerParameter& param) : filler_param_(param) {} - virtual ~Filler() {} - virtual void Fill(Blob* blob) = 0; - protected: - FillerParameter filler_param_; -}; // class Filler - - -/// @brief Fills a Blob with constant values @f$ x = 0 @f$. -template -class ConstantFiller : public Filler { - public: - explicit ConstantFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { - Dtype* data = blob->mutable_cpu_data(); - const int count = blob->count(); - const Dtype value = this->filler_param_.value(); - CHECK(count); - for (int i = 0; i < count; ++i) { - data[i] = value; - } - CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; - } -}; - -/// @brief Fills a Blob with uniformly distributed values @f$ x\sim U(a, b) @f$. -template -class UniformFiller : public Filler { - public: - explicit UniformFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { - CHECK(blob->count()); - caffe_rng_uniform(blob->count(), Dtype(this->filler_param_.min()), - Dtype(this->filler_param_.max()), blob->mutable_cpu_data()); - CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; - } -}; - -/// @brief Fills a Blob with Gaussian-distributed values @f$ x = a @f$. -template -class GaussianFiller : public Filler { - public: - explicit GaussianFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { - Dtype* data = blob->mutable_cpu_data(); - CHECK(blob->count()); - caffe_rng_gaussian(blob->count(), Dtype(this->filler_param_.mean()), - Dtype(this->filler_param_.std()), blob->mutable_cpu_data()); - int sparse = this->filler_param_.sparse(); - CHECK_GE(sparse, -1); - if (sparse >= 0) { - // Sparse initialization is implemented for "weight" blobs; i.e. matrices. - // These have num == channels == 1; width is number of inputs; height is - // number of outputs. The 'sparse' variable specifies the mean number - // of non-zero input weights for a given output. - CHECK_GE(blob->num_axes(), 1); - const int num_outputs = blob->shape(0); - Dtype non_zero_probability = Dtype(sparse) / Dtype(num_outputs); - rand_vec_.reset(new SyncedMemory(blob->count() * sizeof(int))); - int* mask = reinterpret_cast(rand_vec_->mutable_cpu_data()); - caffe_rng_bernoulli(blob->count(), non_zero_probability, mask); - for (int i = 0; i < blob->count(); ++i) { - data[i] *= mask[i]; - } - } - } - - protected: - shared_ptr rand_vec_; -}; - -/** @brief Fills a Blob with values @f$ x \in [0, 1] @f$ - * such that @f$ \forall i \sum_j x_{ij} = 1 @f$. - */ -template -class PositiveUnitballFiller : public Filler { - public: - explicit PositiveUnitballFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { - Dtype* data = blob->mutable_cpu_data(); - DCHECK(blob->count()); - caffe_rng_uniform(blob->count(), 0, 1, blob->mutable_cpu_data()); - // We expect the filler to not be called very frequently, so we will - // just use a simple implementation - int dim = blob->count() / blob->num(); - CHECK(dim); - for (int i = 0; i < blob->num(); ++i) { - Dtype sum = 0; - for (int j = 0; j < dim; ++j) { - sum += data[i * dim + j]; - } - for (int j = 0; j < dim; ++j) { - data[i * dim + j] /= sum; - } - } - CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; - } -}; - -/** - * @brief Fills a Blob with values @f$ x \sim U(-a, +a) @f$ where @f$ a @f$ - * is set inversely proportional to the number of incoming nodes. - * - * A Filler based on the paper [Bengio and Glorot 2010]: Understanding - * the difficulty of training deep feedforward neuralnetworks, but does not - * use the fan_out value. - * - * It fills the incoming matrix by randomly sampling uniform data from - * [-scale, scale] where scale = sqrt(3 / fan_in) where fan_in is the number - * of input nodes. You should make sure the input blob has shape (num, a, b, c) - * where a * b * c = fan_in. - * - * TODO(dox): make notation in above comment consistent with rest & use LaTeX. - */ -template -class XavierFiller : public Filler { - public: - explicit XavierFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { - CHECK(blob->count()); - int fan_in = blob->count() / blob->num(); - Dtype scale = sqrt(Dtype(3) / fan_in); - caffe_rng_uniform(blob->count(), -scale, scale, - blob->mutable_cpu_data()); - CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; - } -}; - -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46:include/caffe/filler.hpp -/** - * @brief Fills a Blob with values @f$ x \sim N(0, \sigma^2) @f$ where - * @f$ \sigma^2 @f$ is set inversely proportional to number of incoming - * nodes, outgoing nodes, or their average. - * - * A Filler based on the paper [He, Zhang, Ren and Sun 2015]: Specifically - * accounts for ReLU nonlinearities. - * - * Aside: for another perspective on the scaling factor, see the derivation of - * [Saxe, McClelland, and Ganguli 2013 (v3)]. - * - * It fills the incoming matrix by randomly sampling Gaussian data with std = - * sqrt(2 / n) where n is the fan_in, fan_out, or their average, depending on - * the variance_norm option. You should make sure the input blob has shape (num, - * a, b, c) where a * b * c = fan_in and num * b * c = fan_out. Note that this - * is currently not the case for inner product layers. - */ -template -class MSRAFiller : public Filler { - public: - explicit MSRAFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { - CHECK(blob->count()); - int fan_in = blob->count() / blob->num(); - int fan_out = blob->count() / blob->channels(); - Dtype n = fan_in; // default to fan_in - if (this->filler_param_.variance_norm() == - FillerParameter_VarianceNorm_AVERAGE) { - n = (fan_in + fan_out) / Dtype(2); - } else if (this->filler_param_.variance_norm() == - FillerParameter_VarianceNorm_FAN_OUT) { - n = fan_out; - } - Dtype std = sqrt(Dtype(2) / n); - caffe_rng_gaussian(blob->count(), Dtype(0), std, - blob->mutable_cpu_data()); -======= - -/*! -@brief Fills a Blob with coefficients for bilinear interpolation. - -A common use case is with the DeconvolutionLayer acting as upsampling. -You can upsample a feature map with shape of (B, C, H, W) by any integer factor -using the following proto. -\code -layer { - name: "upsample", type: "Deconvolution" - bottom: "{{bottom_name}}" top: "{{top_name}}" - convolution_param { - kernel_size: {{2 * factor - factor % 2}} stride: {{factor}} - num_output: {{C}} group: {{C}} - pad: {{ceil((factor - 1) / 2.)}} - weight_filler: { type: "bilinear" } bias_term: false - } - param { lr_mult: 0 decay_mult: 0 } -} -\endcode -Please use this by replacing `{{}}` with your values. By specifying -`num_output: {{C}} group: {{C}}`, it behaves as -channel-wise convolution. The filter shape of this deconvolution layer will be -(C, 1, K, K) where K is `kernel_size`, and this filler will set a (K, K) -interpolation kernel for every channel of the filter identically. The resulting -shape of the top feature map will be (B, C, factor * H, factor * W). -Note that the learning rate and the -weight decay are set to 0 in order to keep coefficient values of bilinear -interpolation unchanged during training. If you apply this to an image, this -operation is equivalent to the following call in Python with Scikit.Image. -\code{.py} -out = skimage.transform.rescale(img, factor, mode='constant', cval=0) -\endcode - */ -template -class BilinearFiller : public Filler { - public: - explicit BilinearFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { - CHECK_EQ(blob->num_axes(), 4) << "Blob must be 4 dim."; - CHECK_EQ(blob->width(), blob->height()) << "Filter must be square"; - Dtype* data = blob->mutable_cpu_data(); - int f = ceil(blob->width() / 2.); - float c = (2 * f - 1 - f % 2) / (2. * f); - for (int i = 0; i < blob->count(); ++i) { - float x = i % blob->width(); - float y = (i / blob->width()) % blob->height(); - data[i] = (1 - fabs(x / f - c)) * (1 - fabs(y / f - c)); - } ->>>>>>> triplet data generation and network update:include/caffe/filler.hpp.orig - CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; - } -}; - -/** - * @brief Get a specific filler from the specification given in FillerParameter. - * - * Ideally this would be replaced by a factory pattern, but we will leave it - * this way for now. - */ -template -Filler* GetFiller(const FillerParameter& param) { - const std::string& type = param.type(); - if (type == "constant") { - return new ConstantFiller(param); - } else if (type == "gaussian") { - return new GaussianFiller(param); - } else if (type == "positive_unitball") { - return new PositiveUnitballFiller(param); - } else if (type == "uniform") { - return new UniformFiller(param); - } else if (type == "xavier") { - return new XavierFiller(param); -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - } else if (type == "msra") { - return new MSRAFiller(param); -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46:include/caffe/filler.hpp -======= - } else if (type == "bilinear") { - return new BilinearFiller(param); -======= ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update:include/caffe/filler.hpp.orig - } else { - CHECK(false) << "Unknown filler name: " << param.type(); - } - return (Filler*)(NULL); -} - -} // namespace caffe - -#endif // CAFFE_FILLER_HPP_ diff --git a/include/caffe/filler.hpp.orig.orig b/include/caffe/filler.hpp.orig.orig deleted file mode 100644 index 3b69e97f81c..00000000000 --- a/include/caffe/filler.hpp.orig.orig +++ /dev/null @@ -1,336 +0,0 @@ -// Fillers are random number generators that fills a blob using the specified -// algorithm. The expectation is that they are only going to be used during -// initialization time and will not involve any GPUs. - -#ifndef CAFFE_FILLER_HPP -#define CAFFE_FILLER_HPP - -#include - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -#include "caffe/proto/caffe.pb.h" -#include "caffe/syncedmem.hpp" -#include "caffe/util/math_functions.hpp" - -namespace caffe { - -/// @brief Fills a Blob with constant or randomly-generated data. -template -class Filler { - public: - explicit Filler(const FillerParameter& param) : filler_param_(param) {} - virtual ~Filler() {} - virtual void Fill(Blob* blob) = 0; - protected: - FillerParameter filler_param_; -}; // class Filler - - -/// @brief Fills a Blob with constant values @f$ x = 0 @f$. -template -class ConstantFiller : public Filler { - public: - explicit ConstantFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { - Dtype* data = blob->mutable_cpu_data(); - const int count = blob->count(); - const Dtype value = this->filler_param_.value(); - CHECK(count); - for (int i = 0; i < count; ++i) { - data[i] = value; - } - CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; - } -}; - -/// @brief Fills a Blob with uniformly distributed values @f$ x\sim U(a, b) @f$. -template -class UniformFiller : public Filler { - public: - explicit UniformFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { - CHECK(blob->count()); - caffe_rng_uniform(blob->count(), Dtype(this->filler_param_.min()), - Dtype(this->filler_param_.max()), blob->mutable_cpu_data()); - CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; - } -}; - -/// @brief Fills a Blob with Gaussian-distributed values @f$ x = a @f$. -template -class GaussianFiller : public Filler { - public: - explicit GaussianFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { - Dtype* data = blob->mutable_cpu_data(); - CHECK(blob->count()); - caffe_rng_gaussian(blob->count(), Dtype(this->filler_param_.mean()), - Dtype(this->filler_param_.std()), blob->mutable_cpu_data()); - int sparse = this->filler_param_.sparse(); - CHECK_GE(sparse, -1); - if (sparse >= 0) { - // Sparse initialization is implemented for "weight" blobs; i.e. matrices. - // These have num == channels == 1; width is number of inputs; height is - // number of outputs. The 'sparse' variable specifies the mean number - // of non-zero input weights for a given output. - CHECK_GE(blob->num_axes(), 1); - const int num_outputs = blob->shape(0); - Dtype non_zero_probability = Dtype(sparse) / Dtype(num_outputs); - rand_vec_.reset(new SyncedMemory(blob->count() * sizeof(int))); - int* mask = reinterpret_cast(rand_vec_->mutable_cpu_data()); - caffe_rng_bernoulli(blob->count(), non_zero_probability, mask); - for (int i = 0; i < blob->count(); ++i) { - data[i] *= mask[i]; - } - } - } - - protected: - shared_ptr rand_vec_; -}; - -/** @brief Fills a Blob with values @f$ x \in [0, 1] @f$ - * such that @f$ \forall i \sum_j x_{ij} = 1 @f$. - */ -template -class PositiveUnitballFiller : public Filler { - public: - explicit PositiveUnitballFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { - Dtype* data = blob->mutable_cpu_data(); - DCHECK(blob->count()); - caffe_rng_uniform(blob->count(), 0, 1, blob->mutable_cpu_data()); - // We expect the filler to not be called very frequently, so we will - // just use a simple implementation - int dim = blob->count() / blob->num(); - CHECK(dim); - for (int i = 0; i < blob->num(); ++i) { - Dtype sum = 0; - for (int j = 0; j < dim; ++j) { - sum += data[i * dim + j]; - } - for (int j = 0; j < dim; ++j) { - data[i * dim + j] /= sum; - } - } - CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; - } -}; - -/** -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - * @brief Fills a Blob with values @f$ x \sim U(-a, +a) @f$ where @f$ a @f$ is - * set inversely proportional to number of incoming nodes, outgoing - * nodes, or their average. - * - * A Filler based on the paper [Bengio and Glorot 2010]: Understanding - * the difficulty of training deep feedforward neuralnetworks. - * - * It fills the incoming matrix by randomly sampling uniform data from [-scale, - * scale] where scale = sqrt(3 / n) where n is the fan_in, fan_out, or their - * average, depending on the variance_norm option. You should make sure the - * input blob has shape (num, a, b, c) where a * b * c = fan_in and num * b * c - * = fan_out. Note that this is currently not the case for inner product layers. -======= - * @brief Fills a Blob with values @f$ x \sim U(-a, +a) @f$ where @f$ a @f$ - * is set inversely proportional to the number of incoming nodes. - * - * A Filler based on the paper [Bengio and Glorot 2010]: Understanding - * the difficulty of training deep feedforward neuralnetworks, but does not - * use the fan_out value. - * - * It fills the incoming matrix by randomly sampling uniform data from - * [-scale, scale] where scale = sqrt(3 / fan_in) where fan_in is the number - * of input nodes. You should make sure the input blob has shape (num, a, b, c) - * where a * b * c = fan_in. ->>>>>>> triplet data generation and network update - * - * TODO(dox): make notation in above comment consistent with rest & use LaTeX. - */ -template -class XavierFiller : public Filler { - public: - explicit XavierFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { - CHECK(blob->count()); - int fan_in = blob->count() / blob->num(); -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - int fan_out = blob->count() / blob->channels(); - Dtype n = fan_in; // default to fan_in - if (this->filler_param_.variance_norm() == - FillerParameter_VarianceNorm_AVERAGE) { - n = (fan_in + fan_out) / Dtype(2); - } else if (this->filler_param_.variance_norm() == - FillerParameter_VarianceNorm_FAN_OUT) { - n = fan_out; - } - Dtype scale = sqrt(Dtype(3) / n); -======= - Dtype scale = sqrt(Dtype(3) / fan_in); ->>>>>>> triplet data generation and network update - caffe_rng_uniform(blob->count(), -scale, scale, - blob->mutable_cpu_data()); - CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; - } -}; - -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -/** - * @brief Fills a Blob with values @f$ x \sim N(0, \sigma^2) @f$ where - * @f$ \sigma^2 @f$ is set inversely proportional to number of incoming - * nodes, outgoing nodes, or their average. - * - * A Filler based on the paper [He, Zhang, Ren and Sun 2015]: Specifically - * accounts for ReLU nonlinearities. - * - * Aside: for another perspective on the scaling factor, see the derivation of - * [Saxe, McClelland, and Ganguli 2013 (v3)]. - * - * It fills the incoming matrix by randomly sampling Gaussian data with std = - * sqrt(2 / n) where n is the fan_in, fan_out, or their average, depending on - * the variance_norm option. You should make sure the input blob has shape (num, - * a, b, c) where a * b * c = fan_in and num * b * c = fan_out. Note that this - * is currently not the case for inner product layers. - */ -template -class MSRAFiller : public Filler { - public: - explicit MSRAFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { - CHECK(blob->count()); - int fan_in = blob->count() / blob->num(); - int fan_out = blob->count() / blob->channels(); - Dtype n = fan_in; // default to fan_in - if (this->filler_param_.variance_norm() == - FillerParameter_VarianceNorm_AVERAGE) { - n = (fan_in + fan_out) / Dtype(2); - } else if (this->filler_param_.variance_norm() == - FillerParameter_VarianceNorm_FAN_OUT) { - n = fan_out; - } - Dtype std = sqrt(Dtype(2) / n); - caffe_rng_gaussian(blob->count(), Dtype(0), std, - blob->mutable_cpu_data()); - CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; - } -}; -<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 -======= ->>>>>>> triplet data generation and network update - -/*! -@brief Fills a Blob with coefficients for bilinear interpolation. - -A common use case is with the DeconvolutionLayer acting as upsampling. -You can upsample a feature map with shape of (B, C, H, W) by any integer factor -using the following proto. -\code -layer { - name: "upsample", type: "Deconvolution" - bottom: "{{bottom_name}}" top: "{{top_name}}" - convolution_param { - kernel_size: {{2 * factor - factor % 2}} stride: {{factor}} - num_output: {{C}} group: {{C}} - pad: {{ceil((factor - 1) / 2.)}} - weight_filler: { type: "bilinear" } bias_term: false - } - param { lr_mult: 0 decay_mult: 0 } -} -\endcode -Please use this by replacing `{{}}` with your values. By specifying -`num_output: {{C}} group: {{C}}`, it behaves as -channel-wise convolution. The filter shape of this deconvolution layer will be -(C, 1, K, K) where K is `kernel_size`, and this filler will set a (K, K) -interpolation kernel for every channel of the filter identically. The resulting -shape of the top feature map will be (B, C, factor * H, factor * W). -Note that the learning rate and the -weight decay are set to 0 in order to keep coefficient values of bilinear -interpolation unchanged during training. If you apply this to an image, this -operation is equivalent to the following call in Python with Scikit.Image. -\code{.py} -out = skimage.transform.rescale(img, factor, mode='constant', cval=0) -\endcode - */ -template -class BilinearFiller : public Filler { - public: - explicit BilinearFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { - CHECK_EQ(blob->num_axes(), 4) << "Blob must be 4 dim."; - CHECK_EQ(blob->width(), blob->height()) << "Filter must be square"; - Dtype* data = blob->mutable_cpu_data(); - int f = ceil(blob->width() / 2.); - float c = (2 * f - 1 - f % 2) / (2. * f); - for (int i = 0; i < blob->count(); ++i) { - float x = i % blob->width(); - float y = (i / blob->width()) % blob->height(); - data[i] = (1 - fabs(x / f - c)) * (1 - fabs(y / f - c)); - } - CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; - } -}; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= ->>>>>>> macro define in upgrade_proto -======= ->>>>>>> triplet data generation and network update - -/** - * @brief Get a specific filler from the specification given in FillerParameter. - * - * Ideally this would be replaced by a factory pattern, but we will leave it - * this way for now. - */ -template -Filler* GetFiller(const FillerParameter& param) { - const std::string& type = param.type(); - if (type == "constant") { - return new ConstantFiller(param); - } else if (type == "gaussian") { - return new GaussianFiller(param); - } else if (type == "positive_unitball") { - return new PositiveUnitballFiller(param); - } else if (type == "uniform") { - return new UniformFiller(param); - } else if (type == "xavier") { - return new XavierFiller(param); -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - } else if (type == "msra") { - return new MSRAFiller(param); -<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 - } else if (type == "bilinear") { - return new BilinearFiller(param); -======= ->>>>>>> macro define in upgrade_proto -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - } else if (type == "msra") { - return new MSRAFiller(param); - } else if (type == "bilinear") { - return new BilinearFiller(param); -======= ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update - } else { - CHECK(false) << "Unknown filler name: " << param.type(); - } - return (Filler*)(NULL); -} - -} // namespace caffe - -#endif // CAFFE_FILLER_HPP_ diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index 0a9547749bd..0a513ae12a4 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -216,8 +216,6 @@ class ContrastiveLossLayer : public LossLayer { Blob summer_vec_; // tmp storage for gpu forward pass }; -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da -======= template class TripletLossLayer : public LossLayer { public: @@ -285,7 +283,6 @@ class TripletLossLayer : public LossLayer { Blob summer_vec_; // tmp storage for gpu forward pass }; ->>>>>>> triplet data generation and network update /** * @brief Computes the Euclidean (L2) loss @f$ * E = \frac{1}{2N} \sum\limits_{n=1}^N \left| \left| \hat{y}_n - y_n diff --git a/include/caffe/loss_layers.hpp.orig b/include/caffe/loss_layers.hpp.orig deleted file mode 100644 index 87c019d763b..00000000000 --- a/include/caffe/loss_layers.hpp.orig +++ /dev/null @@ -1,848 +0,0 @@ -#ifndef CAFFE_LOSS_LAYERS_HPP_ -#define CAFFE_LOSS_LAYERS_HPP_ - -#include -#include -#include - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -#include "caffe/layer.hpp" -#include "caffe/neuron_layers.hpp" -#include "caffe/proto/caffe.pb.h" - -namespace caffe { - -const float kLOG_THRESHOLD = 1e-20; - -/** - * @brief Computes the classification accuracy for a one-of-many - * classification task. - */ -template -class AccuracyLayer : public Layer { - public: - /** - * @param param provides AccuracyParameter accuracy_param, - * with AccuracyLayer options: - * - top_k (\b optional, default 1). - * Sets the maximum rank @f$ k @f$ at which a prediction is considered - * correct. For example, if @f$ k = 5 @f$, a prediction is counted - * correct if the correct label is among the top 5 predicted labels. - */ - explicit AccuracyLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "Accuracy"; } - virtual inline int ExactNumBottomBlobs() const { return 2; } - - // If there are two top blobs, then the second blob will contain - // accuracies per class. - virtual inline int MinTopBlobs() const { return 1; } - virtual inline int MaxTopBlos() const { return 2; } - - protected: - /** - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$ x @f$, a Blob with values in - * @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of - * the @f$ K = CHW @f$ classes. Each @f$ x_n @f$ is mapped to a predicted - * label @f$ \hat{l}_n @f$ given by its maximal index: - * @f$ \hat{l}_n = \arg\max\limits_k x_{nk} @f$ - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the labels @f$ l @f$, an integer-valued Blob with values - * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ - * indicating the correct class label among the @f$ K @f$ classes - * @param top output Blob vector (length 1) - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * the computed accuracy: @f$ - * \frac{1}{N} \sum\limits_{n=1}^N \delta\{ \hat{l}_n = l_n \} - * @f$, where @f$ - * \delta\{\mathrm{condition}\} = \left\{ - * \begin{array}{lr} - * 1 & \mbox{if condition} \\ - * 0 & \mbox{otherwise} - * \end{array} \right. - * @f$ - */ - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - - - /// @brief Not implemented -- AccuracyLayer cannot be used as a loss. - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - for (int i = 0; i < propagate_down.size(); ++i) { - if (propagate_down[i]) { NOT_IMPLEMENTED; } - } - } - - int label_axis_, outer_num_, inner_num_; - - int top_k_; - - /// Whether to ignore instances with a certain label. - bool has_ignore_label_; - /// The label indicating that an instance should be ignored. - int ignore_label_; - /// Keeps counts of the number of samples per class. - Blob nums_buffer_; -}; - -/** - * @brief An interface for Layer%s that take two Blob%s as input -- usually - * (1) predictions and (2) ground-truth labels -- and output a - * singleton Blob representing the loss. - * - * LossLayers are typically only capable of backpropagating to their first input - * -- the predictions. - */ -template -class LossLayer : public Layer { - public: - explicit LossLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp( - const vector*>& bottom, const vector*>& top); - virtual void Reshape( - const vector*>& bottom, const vector*>& top); - - virtual inline int ExactNumBottomBlobs() const { return 2; } - - /** - * @brief For convenience and backwards compatibility, instruct the Net to - * automatically allocate a single top Blob for LossLayers, into which - * they output their singleton loss, (even if the user didn't specify - * one in the prototxt, etc.). - */ - virtual inline bool AutoTopBlobs() const { return true; } - virtual inline int ExactNumTopBlobs() const { return 1; } - /** - * We usually cannot backpropagate to the labels; ignore force_backward for - * these inputs. - */ - virtual inline bool AllowForceBackward(const int bottom_index) const { - return bottom_index != 1; - } -}; - -/** - * @brief Computes the contrastive loss @f$ - * E = \frac{1}{2N} \sum\limits_{n=1}^N \left(y\right) d + - * \left(1-y\right) \max \left(margin-d, 0\right)^2 - * @f$ where @f$ - * d = \left| \left| a_n - b_n \right| \right|_2 @f$. This can be - * used to train siamese networks. - * - * @param bottom input Blob vector (length 3) - * -# @f$ (N \times C \times 1 \times 1) @f$ - * the features @f$ a \in [-\infty, +\infty]@f$ - * -# @f$ (N \times C \times 1 \times 1) @f$ - * the features @f$ b \in [-\infty, +\infty]@f$ - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the binary similarity @f$ s \in [0, 1]@f$ - * @param top output Blob vector (length 1) - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * the computed contrastive loss: @f$ E = - * \frac{1}{2N} \sum\limits_{n=1}^N \left(y\right) d + - * \left(1-y\right) \max \left(margin-d, 0\right)^2 - * @f$ where @f$ - * d = \left| \left| a_n - b_n \right| \right|_2 @f$. - * This can be used to train siamese networks. - */ -template -class ContrastiveLossLayer : public LossLayer { - public: - explicit ContrastiveLossLayer(const LayerParameter& param) - : LossLayer(param), diff_() {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - - virtual inline int ExactNumBottomBlobs() const { return 3; } - virtual inline const char* type() const { return "ContrastiveLoss"; } - /** - * Unlike most loss layers, in the ContrastiveLossLayer we can backpropagate - * to the first two inputs. - */ - virtual inline bool AllowForceBackward(const int bottom_index) const { - return bottom_index != 2; - } - - protected: - /// @copydoc ContrastiveLossLayer - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - - /** - * @brief Computes the Contrastive error gradient w.r.t. the inputs. - * - * Computes the gradients with respect to the two input vectors (bottom[0] and - * bottom[1]), but not the similarity label (bottom[2]). - * - * @param top output Blob vector (length 1), providing the error gradient with - * respect to the outputs - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, - * as @f$ \lambda @f$ is the coefficient of this layer's output - * @f$\ell_i@f$ in the overall Net loss - * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence - * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. - * (*Assuming that this top Blob is not used as a bottom (input) by any - * other layer of the Net.) - * @param propagate_down see Layer::Backward. - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times 1 \times 1) @f$ - * the features @f$a@f$; Backward fills their diff with - * gradients if propagate_down[0] - * -# @f$ (N \times C \times 1 \times 1) @f$ - * the features @f$b@f$; Backward fills their diff with gradients if - * propagate_down[1] - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - Blob diff_; // cached for backward pass - Blob dist_sq_; // cached for backward pass - Blob diff_sq_; // tmp storage for gpu forward pass - Blob summer_vec_; // tmp storage for gpu forward pass -}; - -template -class TripletLossLayer : public LossLayer { - public: - explicit TripletLossLayer(const LayerParameter& param) - : LossLayer(param), diff_() {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - virtual inline int ExactNumBottomBlobs() const { return 4; } -======= - virtual inline int ExactNumBottomBlobs() const { return 2; } ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - virtual inline const char* type() const { return "TripletLoss"; } - /** - * Unlike most loss layers, in the TripletLossLayer we can backpropagate - * to the first three inputs. - */ - virtual inline bool AllowForceBackward(const int bottom_index) const { -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - return bottom_index != 3; -======= - return bottom_index != 1; ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - } - - protected: - /// @copydoc TripletLossLayer - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - - /** - * @brief Computes the Triplet error gradient w.r.t. the inputs. - * - * Computes the gradients with respect to the two input vectors (bottom[0] and - * bottom[1]), but not the similarity label (bottom[2]). - * - * @param top output Blob vector (length 1), providing the error gradient with - * respect to the outputs - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, - * as @f$ \lambda @f$ is the coefficient of this layer's output - * @f$\ell_i@f$ in the overall Net loss - * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence - * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. - * (*Assuming that this top Blob is not used as a bottom (input) by any - * other layer of the Net.) - * @param propagate_down see Layer::Backward. - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times 1 \times 1) @f$ - * the features @f$a@f$; Backward fills their diff with - * gradients if propagate_down[0] - * -# @f$ (N \times C \times 1 \times 1) @f$ - * the features @f$b@f$; Backward fills their diff with gradients if - * propagate_down[1] - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - Blob diff_; // cached for backward pass - Blob diff_pos; - Blob diff_neg; - Blob dist_sq_; // cached for backward pass - Blob dist_sq_pos; - Blob dist_sq_neg; - Blob diff_sq_; // tmp storage for gpu forward pass - Blob diff_sq_pos; - Blob diff_sq_neg; - Blob summer_vec_; // tmp storage for gpu forward pass -}; - -/** - * @brief Computes the Euclidean (L2) loss @f$ - * E = \frac{1}{2N} \sum\limits_{n=1}^N \left| \left| \hat{y}_n - y_n - * \right| \right|_2^2 @f$ for real-valued regression tasks. - * - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$ \hat{y} \in [-\infty, +\infty]@f$ - * -# @f$ (N \times C \times H \times W) @f$ - * the targets @f$ y \in [-\infty, +\infty]@f$ - * @param top output Blob vector (length 1) - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * the computed Euclidean loss: @f$ E = - * \frac{1}{2n} \sum\limits_{n=1}^N \left| \left| \hat{y}_n - y_n - * \right| \right|_2^2 @f$ - * - * This can be used for least-squares regression tasks. An InnerProductLayer - * input to a EuclideanLossLayer exactly formulates a linear least squares - * regression problem. With non-zero weight decay the problem becomes one of - * ridge regression -- see src/caffe/test/test_sgd_solver.cpp for a concrete - * example wherein we check that the gradients computed for a Net with exactly - * this structure match hand-computed gradient formulas for ridge regression. - * - * (Note: Caffe, and SGD in general, is certainly \b not the best way to solve - * linear least squares problems! We use it only as an instructive example.) - */ -template -class EuclideanLossLayer : public LossLayer { - public: - explicit EuclideanLossLayer(const LayerParameter& param) - : LossLayer(param), diff_() {} - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "EuclideanLoss"; } - /** - * Unlike most loss layers, in the EuclideanLossLayer we can backpropagate - * to both inputs -- override to return true and always allow force_backward. - */ - virtual inline bool AllowForceBackward(const int bottom_index) const { - return true; - } - - protected: - /// @copydoc EuclideanLossLayer - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - - /** - * @brief Computes the Euclidean error gradient w.r.t. the inputs. - * - * Unlike other children of LossLayer, EuclideanLossLayer \b can compute - * gradients with respect to the label inputs bottom[1] (but still only will - * if propagate_down[1] is set, due to being produced by learnable parameters - * or if force_backward is set). In fact, this layer is "commutative" -- the - * result is the same regardless of the order of the two bottoms. - * - * @param top output Blob vector (length 1), providing the error gradient with - * respect to the outputs - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, - * as @f$ \lambda @f$ is the coefficient of this layer's output - * @f$\ell_i@f$ in the overall Net loss - * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence - * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. - * (*Assuming that this top Blob is not used as a bottom (input) by any - * other layer of the Net.) - * @param propagate_down see Layer::Backward. - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$\hat{y}@f$; Backward fills their diff with - * gradients @f$ - * \frac{\partial E}{\partial \hat{y}} = - * \frac{1}{n} \sum\limits_{n=1}^N (\hat{y}_n - y_n) - * @f$ if propagate_down[0] - * -# @f$ (N \times C \times H \times W) @f$ - * the targets @f$y@f$; Backward fills their diff with gradients - * @f$ \frac{\partial E}{\partial y} = - * \frac{1}{n} \sum\limits_{n=1}^N (y_n - \hat{y}_n) - * @f$ if propagate_down[1] - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - Blob diff_; -}; - -/** - * @brief Computes the hinge loss for a one-of-many classification task. - * - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$ t @f$, a Blob with values in - * @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of - * the @f$ K = CHW @f$ classes. In an SVM, @f$ t @f$ is the result of - * taking the inner product @f$ X^T W @f$ of the D-dimensional features - * @f$ X \in \mathcal{R}^{D \times N} @f$ and the learned hyperplane - * parameters @f$ W \in \mathcal{R}^{D \times K} @f$, so a Net with just - * an InnerProductLayer (with num_output = D) providing predictions to a - * HingeLossLayer and no other learnable parameters or losses is - * equivalent to an SVM. - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the labels @f$ l @f$, an integer-valued Blob with values - * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ - * indicating the correct class label among the @f$ K @f$ classes - * @param top output Blob vector (length 1) - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * the computed hinge loss: @f$ E = - * \frac{1}{N} \sum\limits_{n=1}^N \sum\limits_{k=1}^K - * [\max(0, 1 - \delta\{l_n = k\} t_{nk})] ^ p - * @f$, for the @f$ L^p @f$ norm - * (defaults to @f$ p = 1 @f$, the L1 norm; L2 norm, as in L2-SVM, - * is also available), and @f$ - * \delta\{\mathrm{condition}\} = \left\{ - * \begin{array}{lr} - * 1 & \mbox{if condition} \\ - * -1 & \mbox{otherwise} - * \end{array} \right. - * @f$ - * - * In an SVM, @f$ t \in \mathcal{R}^{N \times K} @f$ is the result of taking - * the inner product @f$ X^T W @f$ of the features - * @f$ X \in \mathcal{R}^{D \times N} @f$ - * and the learned hyperplane parameters - * @f$ W \in \mathcal{R}^{D \times K} @f$. So, a Net with just an - * InnerProductLayer (with num_output = @f$k@f$) providing predictions to a - * HingeLossLayer is equivalent to an SVM (assuming it has no other learned - * outside the InnerProductLayer and no other losses outside the - * HingeLossLayer). - */ -template -class HingeLossLayer : public LossLayer { - public: - explicit HingeLossLayer(const LayerParameter& param) - : LossLayer(param) {} - - virtual inline const char* type() const { return "HingeLoss"; } - - protected: - /// @copydoc HingeLossLayer - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - - /** - * @brief Computes the hinge loss error gradient w.r.t. the predictions. - * - * Gradients cannot be computed with respect to the label inputs (bottom[1]), - * so this method ignores bottom[1] and requires !propagate_down[1], crashing - * if propagate_down[1] is set. - * - * @param top output Blob vector (length 1), providing the error gradient with - * respect to the outputs - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, - * as @f$ \lambda @f$ is the coefficient of this layer's output - * @f$\ell_i@f$ in the overall Net loss - * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence - * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. - * (*Assuming that this top Blob is not used as a bottom (input) by any - * other layer of the Net.) - * @param propagate_down see Layer::Backward. - * propagate_down[1] must be false as we can't compute gradients with - * respect to the labels. - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$t@f$; Backward computes diff - * @f$ \frac{\partial E}{\partial t} @f$ - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the labels -- ignored as we can't compute their error gradients - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); -}; - -/** - * @brief A generalization of MultinomialLogisticLossLayer that takes an - * "information gain" (infogain) matrix specifying the "value" of all label - * pairs. - * - * Equivalent to the MultinomialLogisticLossLayer if the infogain matrix is the - * identity. - * - * @param bottom input Blob vector (length 2-3) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$ \hat{p} @f$, a Blob with values in - * @f$ [0, 1] @f$ indicating the predicted probability of each of the - * @f$ K = CHW @f$ classes. Each prediction vector @f$ \hat{p}_n @f$ - * should sum to 1 as in a probability distribution: @f$ - * \forall n \sum\limits_{k=1}^K \hat{p}_{nk} = 1 @f$. - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the labels @f$ l @f$, an integer-valued Blob with values - * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ - * indicating the correct class label among the @f$ K @f$ classes - * -# @f$ (1 \times 1 \times K \times K) @f$ - * (\b optional) the infogain matrix @f$ H @f$. This must be provided as - * the third bottom blob input if not provided as the infogain_mat in the - * InfogainLossParameter. If @f$ H = I @f$, this layer is equivalent to the - * MultinomialLogisticLossLayer. - * @param top output Blob vector (length 1) - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * the computed infogain multinomial logistic loss: @f$ E = - * \frac{-1}{N} \sum\limits_{n=1}^N H_{l_n} \log(\hat{p}_n) = - * \frac{-1}{N} \sum\limits_{n=1}^N \sum\limits_{k=1}^{K} H_{l_n,k} - * \log(\hat{p}_{n,k}) - * @f$, where @f$ H_{l_n} @f$ denotes row @f$l_n@f$ of @f$H@f$. - */ -template -class InfogainLossLayer : public LossLayer { - public: - explicit InfogainLossLayer(const LayerParameter& param) - : LossLayer(param), infogain_() {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - // InfogainLossLayer takes 2-3 bottom Blobs; if there are 3 the third should - // be the infogain matrix. (Otherwise the infogain matrix is loaded from a - // file specified by LayerParameter.) - virtual inline int ExactNumBottomBlobs() const { return -1; } - virtual inline int MinBottomBlobs() const { return 2; } - virtual inline int MaxBottomBlobs() const { return 3; } - - virtual inline const char* type() const { return "InfogainLoss"; } - - protected: - /// @copydoc InfogainLossLayer - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - - /** - * @brief Computes the infogain loss error gradient w.r.t. the predictions. - * - * Gradients cannot be computed with respect to the label inputs (bottom[1]), - * so this method ignores bottom[1] and requires !propagate_down[1], crashing - * if propagate_down[1] is set. (The same applies to the infogain matrix, if - * provided as bottom[2] rather than in the layer_param.) - * - * @param top output Blob vector (length 1), providing the error gradient - * with respect to the outputs - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, - * as @f$ \lambda @f$ is the coefficient of this layer's output - * @f$\ell_i@f$ in the overall Net loss - * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence - * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. - * (*Assuming that this top Blob is not used as a bottom (input) by any - * other layer of the Net.) - * @param propagate_down see Layer::Backward. - * propagate_down[1] must be false as we can't compute gradients with - * respect to the labels (similarly for propagate_down[2] and the - * infogain matrix, if provided as bottom[2]) - * @param bottom input Blob vector (length 2-3) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$ \hat{p} @f$; Backward computes diff - * @f$ \frac{\partial E}{\partial \hat{p}} @f$ - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the labels -- ignored as we can't compute their error gradients - * -# @f$ (1 \times 1 \times K \times K) @f$ - * (\b optional) the information gain matrix -- ignored as its error - * gradient computation is not implemented. - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - Blob infogain_; -}; - -/** - * @brief Computes the multinomial logistic loss for a one-of-many - * classification task, directly taking a predicted probability - * distribution as input. - * - * When predictions are not already a probability distribution, you should - * instead use the SoftmaxWithLossLayer, which maps predictions to a - * distribution using the SoftmaxLayer, before computing the multinomial - * logistic loss. The SoftmaxWithLossLayer should be preferred over separate - * SoftmaxLayer + MultinomialLogisticLossLayer - * as its gradient computation is more numerically stable. - * - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$ \hat{p} @f$, a Blob with values in - * @f$ [0, 1] @f$ indicating the predicted probability of each of the - * @f$ K = CHW @f$ classes. Each prediction vector @f$ \hat{p}_n @f$ - * should sum to 1 as in a probability distribution: @f$ - * \forall n \sum\limits_{k=1}^K \hat{p}_{nk} = 1 @f$. - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the labels @f$ l @f$, an integer-valued Blob with values - * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ - * indicating the correct class label among the @f$ K @f$ classes - * @param top output Blob vector (length 1) - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * the computed multinomial logistic loss: @f$ E = - * \frac{-1}{N} \sum\limits_{n=1}^N \log(\hat{p}_{n,l_n}) - * @f$ - */ -template -class MultinomialLogisticLossLayer : public LossLayer { - public: - explicit MultinomialLogisticLossLayer(const LayerParameter& param) - : LossLayer(param) {} - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "MultinomialLogisticLoss"; } - - protected: - /// @copydoc MultinomialLogisticLossLayer - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - - /** - * @brief Computes the multinomial logistic loss error gradient w.r.t. the - * predictions. - * - * Gradients cannot be computed with respect to the label inputs (bottom[1]), - * so this method ignores bottom[1] and requires !propagate_down[1], crashing - * if propagate_down[1] is set. - * - * @param top output Blob vector (length 1), providing the error gradient with - * respect to the outputs - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, - * as @f$ \lambda @f$ is the coefficient of this layer's output - * @f$\ell_i@f$ in the overall Net loss - * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence - * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. - * (*Assuming that this top Blob is not used as a bottom (input) by any - * other layer of the Net.) - * @param propagate_down see Layer::Backward. - * propagate_down[1] must be false as we can't compute gradients with - * respect to the labels. - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$ \hat{p} @f$; Backward computes diff - * @f$ \frac{\partial E}{\partial \hat{p}} @f$ - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the labels -- ignored as we can't compute their error gradients - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); -}; - -/** - * @brief Computes the cross-entropy (logistic) loss @f$ - * E = \frac{-1}{n} \sum\limits_{n=1}^N \left[ - * p_n \log \hat{p}_n + - * (1 - p_n) \log(1 - \hat{p}_n) - * \right] - * @f$, often used for predicting targets interpreted as probabilities. - * - * This layer is implemented rather than separate - * SigmoidLayer + CrossEntropyLayer - * as its gradient computation is more numerically stable. - * At test time, this layer can be replaced simply by a SigmoidLayer. - * - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the scores @f$ x \in [-\infty, +\infty]@f$, - * which this layer maps to probability predictions - * @f$ \hat{p}_n = \sigma(x_n) \in [0, 1] @f$ - * using the sigmoid function @f$ \sigma(.) @f$ (see SigmoidLayer). - * -# @f$ (N \times C \times H \times W) @f$ - * the targets @f$ y \in [0, 1] @f$ - * @param top output Blob vector (length 1) - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * the computed cross-entropy loss: @f$ - * E = \frac{-1}{n} \sum\limits_{n=1}^N \left[ - * p_n \log \hat{p}_n + (1 - p_n) \log(1 - \hat{p}_n) - * \right] - * @f$ - */ -template -class SigmoidCrossEntropyLossLayer : public LossLayer { - public: - explicit SigmoidCrossEntropyLossLayer(const LayerParameter& param) - : LossLayer(param), - sigmoid_layer_(new SigmoidLayer(param)), - sigmoid_output_(new Blob()) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "SigmoidCrossEntropyLoss"; } - - protected: - /// @copydoc SigmoidCrossEntropyLossLayer - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - - /** - * @brief Computes the sigmoid cross-entropy loss error gradient w.r.t. the - * predictions. - * - * Gradients cannot be computed with respect to the target inputs (bottom[1]), - * so this method ignores bottom[1] and requires !propagate_down[1], crashing - * if propagate_down[1] is set. - * - * @param top output Blob vector (length 1), providing the error gradient with - * respect to the outputs - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, - * as @f$ \lambda @f$ is the coefficient of this layer's output - * @f$\ell_i@f$ in the overall Net loss - * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence - * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. - * (*Assuming that this top Blob is not used as a bottom (input) by any - * other layer of the Net.) - * @param propagate_down see Layer::Backward. - * propagate_down[1] must be false as gradient computation with respect - * to the targets is not implemented. - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$x@f$; Backward computes diff - * @f$ \frac{\partial E}{\partial x} = - * \frac{1}{n} \sum\limits_{n=1}^N (\hat{p}_n - p_n) - * @f$ - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the labels -- ignored as we can't compute their error gradients - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - /// The internal SigmoidLayer used to map predictions to probabilities. - shared_ptr > sigmoid_layer_; - /// sigmoid_output stores the output of the SigmoidLayer. - shared_ptr > sigmoid_output_; - /// bottom vector holder to call the underlying SigmoidLayer::Forward - vector*> sigmoid_bottom_vec_; - /// top vector holder to call the underlying SigmoidLayer::Forward - vector*> sigmoid_top_vec_; -}; - -// Forward declare SoftmaxLayer for use in SoftmaxWithLossLayer. -template class SoftmaxLayer; - -/** - * @brief Computes the multinomial logistic loss for a one-of-many - * classification task, passing real-valued predictions through a - * softmax to get a probability distribution over classes. - * - * This layer should be preferred over separate - * SoftmaxLayer + MultinomialLogisticLossLayer - * as its gradient computation is more numerically stable. - * At test time, this layer can be replaced simply by a SoftmaxLayer. - * - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$ x @f$, a Blob with values in - * @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of - * the @f$ K = CHW @f$ classes. This layer maps these scores to a - * probability distribution over classes using the softmax function - * @f$ \hat{p}_{nk} = \exp(x_{nk}) / - * \left[\sum_{k'} \exp(x_{nk'})\right] @f$ (see SoftmaxLayer). - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the labels @f$ l @f$, an integer-valued Blob with values - * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ - * indicating the correct class label among the @f$ K @f$ classes - * @param top output Blob vector (length 1) - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * the computed cross-entropy classification loss: @f$ E = - * \frac{-1}{N} \sum\limits_{n=1}^N \log(\hat{p}_{n,l_n}) - * @f$, for softmax output class probabilites @f$ \hat{p} @f$ - */ -template -class SoftmaxWithLossLayer : public LossLayer { - public: - /** - * @param param provides LossParameter loss_param, with options: - * - ignore_label (optional) - * Specify a label value that should be ignored when computing the loss. - * - normalize (optional, default true) - * If true, the loss is normalized by the number of (nonignored) labels - * present; otherwise the loss is simply summed over spatial locations. - */ - explicit SoftmaxWithLossLayer(const LayerParameter& param) - : LossLayer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "SoftmaxWithLoss"; } - virtual inline int ExactNumTopBlobs() const { return -1; } - virtual inline int MinTopBlobs() const { return 1; } - virtual inline int MaxTopBlobs() const { return 2; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - /** - * @brief Computes the softmax loss error gradient w.r.t. the predictions. - * - * Gradients cannot be computed with respect to the label inputs (bottom[1]), - * so this method ignores bottom[1] and requires !propagate_down[1], crashing - * if propagate_down[1] is set. - * - * @param top output Blob vector (length 1), providing the error gradient with - * respect to the outputs - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, - * as @f$ \lambda @f$ is the coefficient of this layer's output - * @f$\ell_i@f$ in the overall Net loss - * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence - * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. - * (*Assuming that this top Blob is not used as a bottom (input) by any - * other layer of the Net.) - * @param propagate_down see Layer::Backward. - * propagate_down[1] must be false as we can't compute gradients with - * respect to the labels. - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$ x @f$; Backward computes diff - * @f$ \frac{\partial E}{\partial x} @f$ - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the labels -- ignored as we can't compute their error gradients - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - - /// The internal SoftmaxLayer used to map predictions to a distribution. - shared_ptr > softmax_layer_; - /// prob stores the output probability predictions from the SoftmaxLayer. - Blob prob_; - /// bottom vector holder used in call to the underlying SoftmaxLayer::Forward - vector*> softmax_bottom_vec_; - /// top vector holder used in call to the underlying SoftmaxLayer::Forward - vector*> softmax_top_vec_; - /// Whether to ignore instances with a certain label. - bool has_ignore_label_; - /// The label indicating that an instance should be ignored. - int ignore_label_; - /// Whether to normalize the loss by the total number of values present - /// (otherwise just by the batch size). - bool normalize_; - - int softmax_axis_, outer_num_, inner_num_; -}; - -} // namespace caffe - -#endif // CAFFE_LOSS_LAYERS_HPP_ diff --git a/include/caffe/loss_layers.hpp.orig.orig b/include/caffe/loss_layers.hpp.orig.orig deleted file mode 100644 index 26b506a7662..00000000000 --- a/include/caffe/loss_layers.hpp.orig.orig +++ /dev/null @@ -1,880 +0,0 @@ -#ifndef CAFFE_LOSS_LAYERS_HPP_ -#define CAFFE_LOSS_LAYERS_HPP_ - -#include -#include -#include - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -#include "caffe/layer.hpp" -#include "caffe/neuron_layers.hpp" -#include "caffe/proto/caffe.pb.h" - -namespace caffe { - -const float kLOG_THRESHOLD = 1e-20; - -/** - * @brief Computes the classification accuracy for a one-of-many - * classification task. - */ -template -class AccuracyLayer : public Layer { - public: - /** - * @param param provides AccuracyParameter accuracy_param, - * with AccuracyLayer options: - * - top_k (\b optional, default 1). - * Sets the maximum rank @f$ k @f$ at which a prediction is considered - * correct. For example, if @f$ k = 5 @f$, a prediction is counted - * correct if the correct label is among the top 5 predicted labels. - */ - explicit AccuracyLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "Accuracy"; } - virtual inline int ExactNumBottomBlobs() const { return 2; } - - // If there are two top blobs, then the second blob will contain - // accuracies per class. - virtual inline int MinTopBlobs() const { return 1; } - virtual inline int MaxTopBlos() const { return 2; } - - protected: - /** - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$ x @f$, a Blob with values in - * @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of - * the @f$ K = CHW @f$ classes. Each @f$ x_n @f$ is mapped to a predicted - * label @f$ \hat{l}_n @f$ given by its maximal index: - * @f$ \hat{l}_n = \arg\max\limits_k x_{nk} @f$ - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the labels @f$ l @f$, an integer-valued Blob with values - * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ - * indicating the correct class label among the @f$ K @f$ classes - * @param top output Blob vector (length 1) - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * the computed accuracy: @f$ - * \frac{1}{N} \sum\limits_{n=1}^N \delta\{ \hat{l}_n = l_n \} - * @f$, where @f$ - * \delta\{\mathrm{condition}\} = \left\{ - * \begin{array}{lr} - * 1 & \mbox{if condition} \\ - * 0 & \mbox{otherwise} - * \end{array} \right. - * @f$ - */ - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - - - /// @brief Not implemented -- AccuracyLayer cannot be used as a loss. - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - for (int i = 0; i < propagate_down.size(); ++i) { - if (propagate_down[i]) { NOT_IMPLEMENTED; } - } - } - - int label_axis_, outer_num_, inner_num_; - - int top_k_; - - /// Whether to ignore instances with a certain label. - bool has_ignore_label_; - /// The label indicating that an instance should be ignored. - int ignore_label_; - /// Keeps counts of the number of samples per class. - Blob nums_buffer_; -}; - -/** - * @brief An interface for Layer%s that take two Blob%s as input -- usually - * (1) predictions and (2) ground-truth labels -- and output a - * singleton Blob representing the loss. - * - * LossLayers are typically only capable of backpropagating to their first input - * -- the predictions. - */ -template -class LossLayer : public Layer { - public: - explicit LossLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp( - const vector*>& bottom, const vector*>& top); - virtual void Reshape( - const vector*>& bottom, const vector*>& top); - - virtual inline int ExactNumBottomBlobs() const { return 2; } - - /** - * @brief For convenience and backwards compatibility, instruct the Net to - * automatically allocate a single top Blob for LossLayers, into which - * they output their singleton loss, (even if the user didn't specify - * one in the prototxt, etc.). - */ - virtual inline bool AutoTopBlobs() const { return true; } - virtual inline int ExactNumTopBlobs() const { return 1; } - /** - * We usually cannot backpropagate to the labels; ignore force_backward for - * these inputs. - */ - virtual inline bool AllowForceBackward(const int bottom_index) const { - return bottom_index != 1; - } -}; - -/** - * @brief Computes the contrastive loss @f$ - * E = \frac{1}{2N} \sum\limits_{n=1}^N \left(y\right) d + - * \left(1-y\right) \max \left(margin-d, 0\right)^2 - * @f$ where @f$ - * d = \left| \left| a_n - b_n \right| \right|_2 @f$. This can be - * used to train siamese networks. - * - * @param bottom input Blob vector (length 3) - * -# @f$ (N \times C \times 1 \times 1) @f$ - * the features @f$ a \in [-\infty, +\infty]@f$ - * -# @f$ (N \times C \times 1 \times 1) @f$ - * the features @f$ b \in [-\infty, +\infty]@f$ - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the binary similarity @f$ s \in [0, 1]@f$ - * @param top output Blob vector (length 1) - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * the computed contrastive loss: @f$ E = - * \frac{1}{2N} \sum\limits_{n=1}^N \left(y\right) d + - * \left(1-y\right) \max \left(margin-d, 0\right)^2 - * @f$ where @f$ - * d = \left| \left| a_n - b_n \right| \right|_2 @f$. - * This can be used to train siamese networks. - */ -template -class ContrastiveLossLayer : public LossLayer { - public: - explicit ContrastiveLossLayer(const LayerParameter& param) - : LossLayer(param), diff_() {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - - virtual inline int ExactNumBottomBlobs() const { return 3; } - virtual inline const char* type() const { return "ContrastiveLoss"; } - /** - * Unlike most loss layers, in the ContrastiveLossLayer we can backpropagate - * to the first two inputs. - */ - virtual inline bool AllowForceBackward(const int bottom_index) const { - return bottom_index != 2; - } - - protected: - /// @copydoc ContrastiveLossLayer - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - - /** - * @brief Computes the Contrastive error gradient w.r.t. the inputs. - * - * Computes the gradients with respect to the two input vectors (bottom[0] and - * bottom[1]), but not the similarity label (bottom[2]). - * - * @param top output Blob vector (length 1), providing the error gradient with - * respect to the outputs - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, - * as @f$ \lambda @f$ is the coefficient of this layer's output - * @f$\ell_i@f$ in the overall Net loss - * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence - * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. - * (*Assuming that this top Blob is not used as a bottom (input) by any - * other layer of the Net.) - * @param propagate_down see Layer::Backward. - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times 1 \times 1) @f$ - * the features @f$a@f$; Backward fills their diff with - * gradients if propagate_down[0] - * -# @f$ (N \times C \times 1 \times 1) @f$ - * the features @f$b@f$; Backward fills their diff with gradients if - * propagate_down[1] - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - Blob diff_; // cached for backward pass - Blob dist_sq_; // cached for backward pass - Blob diff_sq_; // tmp storage for gpu forward pass - Blob summer_vec_; // tmp storage for gpu forward pass -}; - -template -class TripletLossLayer : public LossLayer { - public: - explicit TripletLossLayer(const LayerParameter& param) - : LossLayer(param), diff_() {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - -<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 - virtual inline int ExactNumBottomBlobs() const { return 2; } -======= - virtual inline int ExactNumBottomBlobs() const { return 4; } ->>>>>>> New triplet loss layer added(beta1 version-no test source files) -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - virtual inline int ExactNumBottomBlobs() const { return 2; } -======= - virtual inline int ExactNumBottomBlobs() const { return 4; } ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update -======= -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - virtual inline int ExactNumBottomBlobs() const { return 4; } -======= - virtual inline int ExactNumBottomBlobs() const { return 2; } ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - virtual inline const char* type() const { return "TripletLoss"; } - /** - * Unlike most loss layers, in the TripletLossLayer we can backpropagate - * to the first three inputs. - */ - virtual inline bool AllowForceBackward(const int bottom_index) const { -<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 - return bottom_index != 1; -======= - return bottom_index != 3; ->>>>>>> New triplet loss layer added(beta1 version-no test source files) -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - return bottom_index != 1; -======= - return bottom_index != 3; ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update -======= -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - return bottom_index != 3; -======= - return bottom_index != 1; ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - } - - protected: - /// @copydoc TripletLossLayer - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - - /** - * @brief Computes the Triplet error gradient w.r.t. the inputs. - * - * Computes the gradients with respect to the two input vectors (bottom[0] and - * bottom[1]), but not the similarity label (bottom[2]). - * - * @param top output Blob vector (length 1), providing the error gradient with - * respect to the outputs - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, - * as @f$ \lambda @f$ is the coefficient of this layer's output - * @f$\ell_i@f$ in the overall Net loss - * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence - * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. - * (*Assuming that this top Blob is not used as a bottom (input) by any - * other layer of the Net.) - * @param propagate_down see Layer::Backward. - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times 1 \times 1) @f$ - * the features @f$a@f$; Backward fills their diff with - * gradients if propagate_down[0] - * -# @f$ (N \times C \times 1 \times 1) @f$ - * the features @f$b@f$; Backward fills their diff with gradients if - * propagate_down[1] - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - Blob diff_; // cached for backward pass - Blob diff_pos; - Blob diff_neg; - Blob dist_sq_; // cached for backward pass - Blob dist_sq_pos; - Blob dist_sq_neg; - Blob diff_sq_; // tmp storage for gpu forward pass - Blob diff_sq_pos; - Blob diff_sq_neg; - Blob summer_vec_; // tmp storage for gpu forward pass -}; - -/** - * @brief Computes the Euclidean (L2) loss @f$ - * E = \frac{1}{2N} \sum\limits_{n=1}^N \left| \left| \hat{y}_n - y_n - * \right| \right|_2^2 @f$ for real-valued regression tasks. - * - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$ \hat{y} \in [-\infty, +\infty]@f$ - * -# @f$ (N \times C \times H \times W) @f$ - * the targets @f$ y \in [-\infty, +\infty]@f$ - * @param top output Blob vector (length 1) - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * the computed Euclidean loss: @f$ E = - * \frac{1}{2n} \sum\limits_{n=1}^N \left| \left| \hat{y}_n - y_n - * \right| \right|_2^2 @f$ - * - * This can be used for least-squares regression tasks. An InnerProductLayer - * input to a EuclideanLossLayer exactly formulates a linear least squares - * regression problem. With non-zero weight decay the problem becomes one of - * ridge regression -- see src/caffe/test/test_sgd_solver.cpp for a concrete - * example wherein we check that the gradients computed for a Net with exactly - * this structure match hand-computed gradient formulas for ridge regression. - * - * (Note: Caffe, and SGD in general, is certainly \b not the best way to solve - * linear least squares problems! We use it only as an instructive example.) - */ -template -class EuclideanLossLayer : public LossLayer { - public: - explicit EuclideanLossLayer(const LayerParameter& param) - : LossLayer(param), diff_() {} - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "EuclideanLoss"; } - /** - * Unlike most loss layers, in the EuclideanLossLayer we can backpropagate - * to both inputs -- override to return true and always allow force_backward. - */ - virtual inline bool AllowForceBackward(const int bottom_index) const { - return true; - } - - protected: - /// @copydoc EuclideanLossLayer - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - - /** - * @brief Computes the Euclidean error gradient w.r.t. the inputs. - * - * Unlike other children of LossLayer, EuclideanLossLayer \b can compute - * gradients with respect to the label inputs bottom[1] (but still only will - * if propagate_down[1] is set, due to being produced by learnable parameters - * or if force_backward is set). In fact, this layer is "commutative" -- the - * result is the same regardless of the order of the two bottoms. - * - * @param top output Blob vector (length 1), providing the error gradient with - * respect to the outputs - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, - * as @f$ \lambda @f$ is the coefficient of this layer's output - * @f$\ell_i@f$ in the overall Net loss - * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence - * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. - * (*Assuming that this top Blob is not used as a bottom (input) by any - * other layer of the Net.) - * @param propagate_down see Layer::Backward. - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$\hat{y}@f$; Backward fills their diff with - * gradients @f$ - * \frac{\partial E}{\partial \hat{y}} = - * \frac{1}{n} \sum\limits_{n=1}^N (\hat{y}_n - y_n) - * @f$ if propagate_down[0] - * -# @f$ (N \times C \times H \times W) @f$ - * the targets @f$y@f$; Backward fills their diff with gradients - * @f$ \frac{\partial E}{\partial y} = - * \frac{1}{n} \sum\limits_{n=1}^N (y_n - \hat{y}_n) - * @f$ if propagate_down[1] - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - Blob diff_; -}; - -/** - * @brief Computes the hinge loss for a one-of-many classification task. - * - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$ t @f$, a Blob with values in - * @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of - * the @f$ K = CHW @f$ classes. In an SVM, @f$ t @f$ is the result of - * taking the inner product @f$ X^T W @f$ of the D-dimensional features - * @f$ X \in \mathcal{R}^{D \times N} @f$ and the learned hyperplane - * parameters @f$ W \in \mathcal{R}^{D \times K} @f$, so a Net with just - * an InnerProductLayer (with num_output = D) providing predictions to a - * HingeLossLayer and no other learnable parameters or losses is - * equivalent to an SVM. - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the labels @f$ l @f$, an integer-valued Blob with values - * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ - * indicating the correct class label among the @f$ K @f$ classes - * @param top output Blob vector (length 1) - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * the computed hinge loss: @f$ E = - * \frac{1}{N} \sum\limits_{n=1}^N \sum\limits_{k=1}^K - * [\max(0, 1 - \delta\{l_n = k\} t_{nk})] ^ p - * @f$, for the @f$ L^p @f$ norm - * (defaults to @f$ p = 1 @f$, the L1 norm; L2 norm, as in L2-SVM, - * is also available), and @f$ - * \delta\{\mathrm{condition}\} = \left\{ - * \begin{array}{lr} - * 1 & \mbox{if condition} \\ - * -1 & \mbox{otherwise} - * \end{array} \right. - * @f$ - * - * In an SVM, @f$ t \in \mathcal{R}^{N \times K} @f$ is the result of taking - * the inner product @f$ X^T W @f$ of the features - * @f$ X \in \mathcal{R}^{D \times N} @f$ - * and the learned hyperplane parameters - * @f$ W \in \mathcal{R}^{D \times K} @f$. So, a Net with just an - * InnerProductLayer (with num_output = @f$k@f$) providing predictions to a - * HingeLossLayer is equivalent to an SVM (assuming it has no other learned - * outside the InnerProductLayer and no other losses outside the - * HingeLossLayer). - */ -template -class HingeLossLayer : public LossLayer { - public: - explicit HingeLossLayer(const LayerParameter& param) - : LossLayer(param) {} - - virtual inline const char* type() const { return "HingeLoss"; } - - protected: - /// @copydoc HingeLossLayer - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - - /** - * @brief Computes the hinge loss error gradient w.r.t. the predictions. - * - * Gradients cannot be computed with respect to the label inputs (bottom[1]), - * so this method ignores bottom[1] and requires !propagate_down[1], crashing - * if propagate_down[1] is set. - * - * @param top output Blob vector (length 1), providing the error gradient with - * respect to the outputs - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, - * as @f$ \lambda @f$ is the coefficient of this layer's output - * @f$\ell_i@f$ in the overall Net loss - * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence - * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. - * (*Assuming that this top Blob is not used as a bottom (input) by any - * other layer of the Net.) - * @param propagate_down see Layer::Backward. - * propagate_down[1] must be false as we can't compute gradients with - * respect to the labels. - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$t@f$; Backward computes diff - * @f$ \frac{\partial E}{\partial t} @f$ - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the labels -- ignored as we can't compute their error gradients - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); -}; - -/** - * @brief A generalization of MultinomialLogisticLossLayer that takes an - * "information gain" (infogain) matrix specifying the "value" of all label - * pairs. - * - * Equivalent to the MultinomialLogisticLossLayer if the infogain matrix is the - * identity. - * - * @param bottom input Blob vector (length 2-3) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$ \hat{p} @f$, a Blob with values in - * @f$ [0, 1] @f$ indicating the predicted probability of each of the - * @f$ K = CHW @f$ classes. Each prediction vector @f$ \hat{p}_n @f$ - * should sum to 1 as in a probability distribution: @f$ - * \forall n \sum\limits_{k=1}^K \hat{p}_{nk} = 1 @f$. - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the labels @f$ l @f$, an integer-valued Blob with values - * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ - * indicating the correct class label among the @f$ K @f$ classes - * -# @f$ (1 \times 1 \times K \times K) @f$ - * (\b optional) the infogain matrix @f$ H @f$. This must be provided as - * the third bottom blob input if not provided as the infogain_mat in the - * InfogainLossParameter. If @f$ H = I @f$, this layer is equivalent to the - * MultinomialLogisticLossLayer. - * @param top output Blob vector (length 1) - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * the computed infogain multinomial logistic loss: @f$ E = - * \frac{-1}{N} \sum\limits_{n=1}^N H_{l_n} \log(\hat{p}_n) = - * \frac{-1}{N} \sum\limits_{n=1}^N \sum\limits_{k=1}^{K} H_{l_n,k} - * \log(\hat{p}_{n,k}) - * @f$, where @f$ H_{l_n} @f$ denotes row @f$l_n@f$ of @f$H@f$. - */ -template -class InfogainLossLayer : public LossLayer { - public: - explicit InfogainLossLayer(const LayerParameter& param) - : LossLayer(param), infogain_() {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - // InfogainLossLayer takes 2-3 bottom Blobs; if there are 3 the third should - // be the infogain matrix. (Otherwise the infogain matrix is loaded from a - // file specified by LayerParameter.) - virtual inline int ExactNumBottomBlobs() const { return -1; } - virtual inline int MinBottomBlobs() const { return 2; } - virtual inline int MaxBottomBlobs() const { return 3; } - - virtual inline const char* type() const { return "InfogainLoss"; } - - protected: - /// @copydoc InfogainLossLayer - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - - /** - * @brief Computes the infogain loss error gradient w.r.t. the predictions. - * - * Gradients cannot be computed with respect to the label inputs (bottom[1]), - * so this method ignores bottom[1] and requires !propagate_down[1], crashing - * if propagate_down[1] is set. (The same applies to the infogain matrix, if - * provided as bottom[2] rather than in the layer_param.) - * - * @param top output Blob vector (length 1), providing the error gradient - * with respect to the outputs - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, - * as @f$ \lambda @f$ is the coefficient of this layer's output - * @f$\ell_i@f$ in the overall Net loss - * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence - * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. - * (*Assuming that this top Blob is not used as a bottom (input) by any - * other layer of the Net.) - * @param propagate_down see Layer::Backward. - * propagate_down[1] must be false as we can't compute gradients with - * respect to the labels (similarly for propagate_down[2] and the - * infogain matrix, if provided as bottom[2]) - * @param bottom input Blob vector (length 2-3) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$ \hat{p} @f$; Backward computes diff - * @f$ \frac{\partial E}{\partial \hat{p}} @f$ - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the labels -- ignored as we can't compute their error gradients - * -# @f$ (1 \times 1 \times K \times K) @f$ - * (\b optional) the information gain matrix -- ignored as its error - * gradient computation is not implemented. - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - Blob infogain_; -}; - -/** - * @brief Computes the multinomial logistic loss for a one-of-many - * classification task, directly taking a predicted probability - * distribution as input. - * - * When predictions are not already a probability distribution, you should - * instead use the SoftmaxWithLossLayer, which maps predictions to a - * distribution using the SoftmaxLayer, before computing the multinomial - * logistic loss. The SoftmaxWithLossLayer should be preferred over separate - * SoftmaxLayer + MultinomialLogisticLossLayer - * as its gradient computation is more numerically stable. - * - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$ \hat{p} @f$, a Blob with values in - * @f$ [0, 1] @f$ indicating the predicted probability of each of the - * @f$ K = CHW @f$ classes. Each prediction vector @f$ \hat{p}_n @f$ - * should sum to 1 as in a probability distribution: @f$ - * \forall n \sum\limits_{k=1}^K \hat{p}_{nk} = 1 @f$. - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the labels @f$ l @f$, an integer-valued Blob with values - * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ - * indicating the correct class label among the @f$ K @f$ classes - * @param top output Blob vector (length 1) - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * the computed multinomial logistic loss: @f$ E = - * \frac{-1}{N} \sum\limits_{n=1}^N \log(\hat{p}_{n,l_n}) - * @f$ - */ -template -class MultinomialLogisticLossLayer : public LossLayer { - public: - explicit MultinomialLogisticLossLayer(const LayerParameter& param) - : LossLayer(param) {} - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "MultinomialLogisticLoss"; } - - protected: - /// @copydoc MultinomialLogisticLossLayer - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - - /** - * @brief Computes the multinomial logistic loss error gradient w.r.t. the - * predictions. - * - * Gradients cannot be computed with respect to the label inputs (bottom[1]), - * so this method ignores bottom[1] and requires !propagate_down[1], crashing - * if propagate_down[1] is set. - * - * @param top output Blob vector (length 1), providing the error gradient with - * respect to the outputs - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, - * as @f$ \lambda @f$ is the coefficient of this layer's output - * @f$\ell_i@f$ in the overall Net loss - * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence - * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. - * (*Assuming that this top Blob is not used as a bottom (input) by any - * other layer of the Net.) - * @param propagate_down see Layer::Backward. - * propagate_down[1] must be false as we can't compute gradients with - * respect to the labels. - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$ \hat{p} @f$; Backward computes diff - * @f$ \frac{\partial E}{\partial \hat{p}} @f$ - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the labels -- ignored as we can't compute their error gradients - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); -}; - -/** - * @brief Computes the cross-entropy (logistic) loss @f$ - * E = \frac{-1}{n} \sum\limits_{n=1}^N \left[ - * p_n \log \hat{p}_n + - * (1 - p_n) \log(1 - \hat{p}_n) - * \right] - * @f$, often used for predicting targets interpreted as probabilities. - * - * This layer is implemented rather than separate - * SigmoidLayer + CrossEntropyLayer - * as its gradient computation is more numerically stable. - * At test time, this layer can be replaced simply by a SigmoidLayer. - * - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the scores @f$ x \in [-\infty, +\infty]@f$, - * which this layer maps to probability predictions - * @f$ \hat{p}_n = \sigma(x_n) \in [0, 1] @f$ - * using the sigmoid function @f$ \sigma(.) @f$ (see SigmoidLayer). - * -# @f$ (N \times C \times H \times W) @f$ - * the targets @f$ y \in [0, 1] @f$ - * @param top output Blob vector (length 1) - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * the computed cross-entropy loss: @f$ - * E = \frac{-1}{n} \sum\limits_{n=1}^N \left[ - * p_n \log \hat{p}_n + (1 - p_n) \log(1 - \hat{p}_n) - * \right] - * @f$ - */ -template -class SigmoidCrossEntropyLossLayer : public LossLayer { - public: - explicit SigmoidCrossEntropyLossLayer(const LayerParameter& param) - : LossLayer(param), - sigmoid_layer_(new SigmoidLayer(param)), - sigmoid_output_(new Blob()) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "SigmoidCrossEntropyLoss"; } - - protected: - /// @copydoc SigmoidCrossEntropyLossLayer - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - - /** - * @brief Computes the sigmoid cross-entropy loss error gradient w.r.t. the - * predictions. - * - * Gradients cannot be computed with respect to the target inputs (bottom[1]), - * so this method ignores bottom[1] and requires !propagate_down[1], crashing - * if propagate_down[1] is set. - * - * @param top output Blob vector (length 1), providing the error gradient with - * respect to the outputs - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, - * as @f$ \lambda @f$ is the coefficient of this layer's output - * @f$\ell_i@f$ in the overall Net loss - * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence - * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. - * (*Assuming that this top Blob is not used as a bottom (input) by any - * other layer of the Net.) - * @param propagate_down see Layer::Backward. - * propagate_down[1] must be false as gradient computation with respect - * to the targets is not implemented. - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$x@f$; Backward computes diff - * @f$ \frac{\partial E}{\partial x} = - * \frac{1}{n} \sum\limits_{n=1}^N (\hat{p}_n - p_n) - * @f$ - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the labels -- ignored as we can't compute their error gradients - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - /// The internal SigmoidLayer used to map predictions to probabilities. - shared_ptr > sigmoid_layer_; - /// sigmoid_output stores the output of the SigmoidLayer. - shared_ptr > sigmoid_output_; - /// bottom vector holder to call the underlying SigmoidLayer::Forward - vector*> sigmoid_bottom_vec_; - /// top vector holder to call the underlying SigmoidLayer::Forward - vector*> sigmoid_top_vec_; -}; - -// Forward declare SoftmaxLayer for use in SoftmaxWithLossLayer. -template class SoftmaxLayer; - -/** - * @brief Computes the multinomial logistic loss for a one-of-many - * classification task, passing real-valued predictions through a - * softmax to get a probability distribution over classes. - * - * This layer should be preferred over separate - * SoftmaxLayer + MultinomialLogisticLossLayer - * as its gradient computation is more numerically stable. - * At test time, this layer can be replaced simply by a SoftmaxLayer. - * - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$ x @f$, a Blob with values in - * @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of - * the @f$ K = CHW @f$ classes. This layer maps these scores to a - * probability distribution over classes using the softmax function - * @f$ \hat{p}_{nk} = \exp(x_{nk}) / - * \left[\sum_{k'} \exp(x_{nk'})\right] @f$ (see SoftmaxLayer). - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the labels @f$ l @f$, an integer-valued Blob with values - * @f$ l_n \in [0, 1, 2, ..., K - 1] @f$ - * indicating the correct class label among the @f$ K @f$ classes - * @param top output Blob vector (length 1) - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * the computed cross-entropy classification loss: @f$ E = - * \frac{-1}{N} \sum\limits_{n=1}^N \log(\hat{p}_{n,l_n}) - * @f$, for softmax output class probabilites @f$ \hat{p} @f$ - */ -template -class SoftmaxWithLossLayer : public LossLayer { - public: - /** - * @param param provides LossParameter loss_param, with options: - * - ignore_label (optional) - * Specify a label value that should be ignored when computing the loss. - * - normalize (optional, default true) - * If true, the loss is normalized by the number of (nonignored) labels - * present; otherwise the loss is simply summed over spatial locations. - */ - explicit SoftmaxWithLossLayer(const LayerParameter& param) - : LossLayer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "SoftmaxWithLoss"; } - virtual inline int ExactNumTopBlobs() const { return -1; } - virtual inline int MinTopBlobs() const { return 1; } - virtual inline int MaxTopBlobs() const { return 2; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - /** - * @brief Computes the softmax loss error gradient w.r.t. the predictions. - * - * Gradients cannot be computed with respect to the label inputs (bottom[1]), - * so this method ignores bottom[1] and requires !propagate_down[1], crashing - * if propagate_down[1] is set. - * - * @param top output Blob vector (length 1), providing the error gradient with - * respect to the outputs - * -# @f$ (1 \times 1 \times 1 \times 1) @f$ - * This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$, - * as @f$ \lambda @f$ is the coefficient of this layer's output - * @f$\ell_i@f$ in the overall Net loss - * @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence - * @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$. - * (*Assuming that this top Blob is not used as a bottom (input) by any - * other layer of the Net.) - * @param propagate_down see Layer::Backward. - * propagate_down[1] must be false as we can't compute gradients with - * respect to the labels. - * @param bottom input Blob vector (length 2) - * -# @f$ (N \times C \times H \times W) @f$ - * the predictions @f$ x @f$; Backward computes diff - * @f$ \frac{\partial E}{\partial x} @f$ - * -# @f$ (N \times 1 \times 1 \times 1) @f$ - * the labels -- ignored as we can't compute their error gradients - */ - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - - /// The internal SoftmaxLayer used to map predictions to a distribution. - shared_ptr > softmax_layer_; - /// prob stores the output probability predictions from the SoftmaxLayer. - Blob prob_; - /// bottom vector holder used in call to the underlying SoftmaxLayer::Forward - vector*> softmax_bottom_vec_; - /// top vector holder used in call to the underlying SoftmaxLayer::Forward - vector*> softmax_top_vec_; - /// Whether to ignore instances with a certain label. - bool has_ignore_label_; - /// The label indicating that an instance should be ignored. - int ignore_label_; - /// Whether to normalize the loss by the total number of values present - /// (otherwise just by the batch size). - bool normalize_; - - int softmax_axis_, outer_num_, inner_num_; -}; - -} // namespace caffe - -#endif // CAFFE_LOSS_LAYERS_HPP_ diff --git a/include/caffe/python_layer.hpp b/include/caffe/python_layer.hpp index 7449c40b8c6..c43c1e8a91b 100644 --- a/include/caffe/python_layer.hpp +++ b/include/caffe/python_layer.hpp @@ -18,21 +18,13 @@ class PythonLayer : public Layer { virtual void LayerSetUp(const vector*>& bottom, const vector*>& top) { - try { - self_.attr("setup")(bottom, top); - } catch (bp::error_already_set) { - PyErr_Print(); - throw; - } + self_.attr("param_str") = bp::str( + this->layer_param_.python_param().param_str()); + self_.attr("setup")(bottom, top); } virtual void Reshape(const vector*>& bottom, const vector*>& top) { - try { - self_.attr("reshape")(bottom, top); - } catch (bp::error_already_set) { - PyErr_Print(); - throw; - } + self_.attr("reshape")(bottom, top); } virtual inline bool ShareInParallel() const { @@ -44,21 +36,11 @@ class PythonLayer : public Layer { protected: virtual void Forward_cpu(const vector*>& bottom, const vector*>& top) { - try { - self_.attr("forward")(bottom, top); - } catch (bp::error_already_set) { - PyErr_Print(); - throw; - } + self_.attr("forward")(bottom, top); } virtual void Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - try { - self_.attr("backward")(top, propagate_down, bottom); - } catch (bp::error_already_set) { - PyErr_Print(); - throw; - } + self_.attr("backward")(top, propagate_down, bottom); } private: diff --git a/include/caffe/python_layer.hpp.orig.orig b/include/caffe/python_layer.hpp.orig.orig deleted file mode 100644 index 11d27f9740c..00000000000 --- a/include/caffe/python_layer.hpp.orig.orig +++ /dev/null @@ -1,74 +0,0 @@ -#ifndef CAFFE_PYTHON_LAYER_HPP_ -#define CAFFE_PYTHON_LAYER_HPP_ - -#include -#include - -#include "caffe/layer.hpp" - -namespace bp = boost::python; - -namespace caffe { - -template -class PythonLayer : public Layer { - public: - PythonLayer(PyObject* self, const LayerParameter& param) - : Layer(param), self_(bp::handle<>(bp::borrowed(self))) { } - - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top) { - try { - self_.attr("setup")(bottom, top); - } catch (bp::error_already_set) { - PyErr_Print(); - throw; - } - } - - virtual void Reshape(const vector*>& bottom, - const vector*>& top) { - try { - self_.attr("reshape")(bottom, top); - } catch (bp::error_already_set) { - PyErr_Print(); - throw; - } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= - } - - virtual inline bool ShareInParallel() const { - return this->layer_param_.python_param().share_in_parallel(); ->>>>>>> triplet data generation and network update - } - - virtual inline const char* type() const { return "Python"; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top) { - try { - self_.attr("forward")(bottom, top); - } catch (bp::error_already_set) { - PyErr_Print(); - throw; - } - } - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - try { - self_.attr("backward")(top, propagate_down, bottom); - } catch (bp::error_already_set) { - PyErr_Print(); - throw; - } - } - - private: - bp::object self_; -}; - -} // namespace caffe - -#endif diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index 2ecf539baef..7f92ffe7b99 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -82,8 +82,6 @@ class Solver { callbacks_.push_back(value); } - void CheckSnapshotWritePermissions(); - protected: // Make and apply the update value for the current iteration. virtual void ApplyUpdate() = 0; diff --git a/include/caffe/util/db_leveldb.hpp b/include/caffe/util/db_leveldb.hpp index e9fa0d32b66..10623554b67 100644 --- a/include/caffe/util/db_leveldb.hpp +++ b/include/caffe/util/db_leveldb.hpp @@ -1,4 +1,3 @@ -#ifdef USE_LEVELDB #ifndef CAFFE_UTIL_DB_LEVELDB_HPP #define CAFFE_UTIL_DB_LEVELDB_HPP @@ -72,4 +71,3 @@ class LevelDB : public DB { } // namespace caffe #endif // CAFFE_UTIL_DB_LEVELDB_HPP -#endif // USE_LEVELDB diff --git a/include/caffe/util/db_lmdb.hpp b/include/caffe/util/db_lmdb.hpp index 4e1568ace50..cc7c90afc4c 100644 --- a/include/caffe/util/db_lmdb.hpp +++ b/include/caffe/util/db_lmdb.hpp @@ -1,4 +1,3 @@ -#ifdef USE_LMDB #ifndef CAFFE_UTIL_DB_LMDB_HPP #define CAFFE_UTIL_DB_LMDB_HPP @@ -90,4 +89,3 @@ class LMDB : public DB { } // namespace caffe #endif // CAFFE_UTIL_DB_LMDB_HPP -#endif // USE_LMDB diff --git a/include/caffe/util/io.hpp b/include/caffe/util/io.hpp index 6070b4c7f3a..c0938ad0625 100644 --- a/include/caffe/util/io.hpp +++ b/include/caffe/util/io.hpp @@ -120,7 +120,6 @@ inline bool ReadImageToDatum(const string& filename, const int label, bool DecodeDatumNative(Datum* datum); bool DecodeDatum(Datum* datum, bool is_color); -#ifdef USE_OPENCV cv::Mat ReadImageToCVMat(const string& filename, const int height, const int width, const bool is_color); @@ -136,7 +135,6 @@ cv::Mat DecodeDatumToCVMatNative(const Datum& datum); cv::Mat DecodeDatumToCVMat(const Datum& datum, bool is_color); void CVMatToDatum(const cv::Mat& cv_img, Datum* datum); -#endif // USE_OPENCV } // namespace caffe diff --git a/include/caffe/vision_layers.hpp.orig b/include/caffe/vision_layers.hpp.orig deleted file mode 100644 index 014ebc22dc7..00000000000 --- a/include/caffe/vision_layers.hpp.orig +++ /dev/null @@ -1,549 +0,0 @@ -#ifndef CAFFE_VISION_LAYERS_HPP_ -#define CAFFE_VISION_LAYERS_HPP_ - -#include -#include -#include - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -#include "caffe/common_layers.hpp" -#include "caffe/data_layers.hpp" -#include "caffe/layer.hpp" -#include "caffe/loss_layers.hpp" -#include "caffe/neuron_layers.hpp" -#include "caffe/proto/caffe.pb.h" - -namespace caffe { - -/** - * @brief Abstract base class that factors out the BLAS code common to - * ConvolutionLayer and DeconvolutionLayer. - */ -template -class BaseConvolutionLayer : public Layer { - public: - explicit BaseConvolutionLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline int MinBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { return 1; } - virtual inline bool EqualNumBottomTopBlobs() const { return true; } - - protected: - // Helper functions that abstract away the column buffer and gemm arguments. - // The last argument in forward_cpu_gemm is so that we can skip the im2col if - // we just called weight_cpu_gemm with the same input. - void forward_cpu_gemm(const Dtype* input, const Dtype* weights, - Dtype* output, bool skip_im2col = false); - void forward_cpu_bias(Dtype* output, const Dtype* bias); - void backward_cpu_gemm(const Dtype* input, const Dtype* weights, - Dtype* output); - void weight_cpu_gemm(const Dtype* input, const Dtype* output, Dtype* - weights); - void backward_cpu_bias(Dtype* bias, const Dtype* input); - -#ifndef CPU_ONLY - void forward_gpu_gemm(const Dtype* col_input, const Dtype* weights, - Dtype* output, bool skip_im2col = false); - void forward_gpu_bias(Dtype* output, const Dtype* bias); - void backward_gpu_gemm(const Dtype* input, const Dtype* weights, - Dtype* col_output); - void weight_gpu_gemm(const Dtype* col_input, const Dtype* output, Dtype* - weights); - void backward_gpu_bias(Dtype* bias, const Dtype* input); -#endif - - /// @brief The spatial dimensions of the input. - inline int input_shape(int i) { - return (*bottom_shape_)[channel_axis_ + i]; - } - // reverse_dimensions should return true iff we are implementing deconv, so - // that conv helpers know which dimensions are which. - virtual bool reverse_dimensions() = 0; - // Compute height_out_ and width_out_ from other parameters. - virtual void compute_output_shape() = 0; - -<<<<<<< 6af90386551ae96a1468f15dc0bfc8ac566ea6d9 - /// @brief The spatial dimensions of a filter kernel. - Blob kernel_shape_; - /// @brief The spatial dimensions of the stride. - Blob stride_; - /// @brief The spatial dimensions of the padding. - Blob pad_; - /// @brief The spatial dimensions of the convolution input. - Blob conv_input_shape_; - /// @brief The spatial dimensions of the col_buffer. - vector col_buffer_shape_; - /// @brief The spatial dimensions of the output. - vector output_shape_; - const vector* bottom_shape_; - - int num_spatial_axes_; - int bottom_dim_; - int top_dim_; - - int channel_axis_; -======= - int kernel_h_, kernel_w_; - int stride_h_, stride_w_; ->>>>>>> triplet data generation and network update - int num_; - int channels_; - int pad_h_, pad_w_; - int height_, width_; - int group_; - int num_output_; - int height_out_, width_out_; - bool bias_term_; - bool is_1x1_; - - private: - // wrap im2col/col2im so we don't have to remember the (long) argument lists - inline void conv_im2col_cpu(const Dtype* data, Dtype* col_buff) { - im2col_cpu(data, conv_in_channels_, conv_in_height_, conv_in_width_, - kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_buff); - } - inline void conv_col2im_cpu(const Dtype* col_buff, Dtype* data) { - col2im_cpu(col_buff, conv_in_channels_, conv_in_height_, conv_in_width_, - kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, data); - } -#ifndef CPU_ONLY - inline void conv_im2col_gpu(const Dtype* data, Dtype* col_buff) { - im2col_gpu(data, conv_in_channels_, conv_in_height_, conv_in_width_, - kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_buff); - } - inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) { - col2im_gpu(col_buff, conv_in_channels_, conv_in_height_, conv_in_width_, - kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, data); - } -#endif - - int conv_out_channels_; - int conv_in_channels_; - int conv_out_spatial_dim_; - int conv_in_height_; - int conv_in_width_; - int kernel_dim_; - int weight_offset_; - int col_offset_; - int output_offset_; - - Blob col_buffer_; - Blob bias_multiplier_; -}; - -/** - * @brief Convolves the input image with a bank of learned filters, - * and (optionally) adds biases. - * - * Caffe convolves by reduction to matrix multiplication. This achieves - * high-throughput and generality of input and filter dimensions but comes at - * the cost of memory for matrices. This makes use of efficiency in BLAS. - * - * The input is "im2col" transformed to a channel K' x H x W data matrix - * for multiplication with the N x K' x H x W filter matrix to yield a - * N' x H x W output matrix that is then "col2im" restored. K' is the - * input channel * kernel height * kernel width dimension of the unrolled - * inputs so that the im2col matrix has a column for each input region to - * be filtered. col2im restores the output spatial structure by rolling up - * the output channel N' columns of the output matrix. - */ -template -class ConvolutionLayer : public BaseConvolutionLayer { - public: - /** - * @param param provides ConvolutionParameter convolution_param, - * with ConvolutionLayer options: - * - num_output. The number of filters. - * - kernel_size / kernel_h / kernel_w. The filter dimensions, given by - * kernel_size for square filters or kernel_h and kernel_w for rectangular - * filters. - * - stride / stride_h / stride_w (\b optional, default 1). The filter - * stride, given by stride_size for equal dimensions or stride_h and stride_w - * for different strides. By default the convolution is dense with stride 1. - * - pad / pad_h / pad_w (\b optional, default 0). The zero-padding for - * convolution, given by pad for equal dimensions or pad_h and pad_w for - * different padding. Input padding is computed implicitly instead of - * actually padding. - * - group (\b optional, default 1). The number of filter groups. Group - * convolution is a method for reducing parameterization by selectively - * connecting input and output channels. The input and output channel dimensions must be divisible - * by the number of groups. For group @f$ \geq 1 @f$, the - * convolutional filters' input and output channels are separated s.t. each - * group takes 1 / group of the input channels and makes 1 / group of the - * output channels. Concretely 4 input channels, 8 output channels, and - * 2 groups separate input channels 1-2 and output channels 1-4 into the - * first group and input channels 3-4 and output channels 5-8 into the second - * group. - * - bias_term (\b optional, default true). Whether to have a bias. - * - engine: convolution has CAFFE (matrix multiplication) and CUDNN (library - * kernels + stream parallelism) engines. - */ - explicit ConvolutionLayer(const LayerParameter& param) - : BaseConvolutionLayer(param) {} - - virtual inline const char* type() const { return "Convolution"; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual inline bool reverse_dimensions() { return false; } - virtual void compute_output_shape(); -}; - -/** - * @brief Convolve the input with a bank of learned filters, and (optionally) - * add biases, treating filters and convolution parameters in the - * opposite sense as ConvolutionLayer. - * - * ConvolutionLayer computes each output value by dotting an input window with - * a filter; DeconvolutionLayer multiplies each input value by a filter - * elementwise, and sums over the resulting output windows. In other words, - * DeconvolutionLayer is ConvolutionLayer with the forward and backward passes - * reversed. DeconvolutionLayer reuses ConvolutionParameter for its - * parameters, but they take the opposite sense as in ConvolutionLayer (so - * padding is removed from the output rather than added to the input, and - * stride results in upsampling rather than downsampling). - */ -template -class DeconvolutionLayer : public BaseConvolutionLayer { - public: - explicit DeconvolutionLayer(const LayerParameter& param) - : BaseConvolutionLayer(param) {} - - virtual inline const char* type() const { return "Deconvolution"; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual inline bool reverse_dimensions() { return true; } - virtual void compute_output_shape(); -}; - -#ifdef USE_CUDNN -/* - * @brief cuDNN implementation of ConvolutionLayer. - * Fallback to ConvolutionLayer for CPU mode. - * - * cuDNN accelerates convolution through forward kernels for filtering and bias - * plus backward kernels for the gradient w.r.t. the filters, biases, and - * inputs. Caffe + cuDNN further speeds up the computation through forward - * parallelism across groups and backward parallelism across gradients. - * - * The CUDNN engine does not have memory overhead for matrix buffers. For many - * input and filter regimes the CUDNN engine is faster than the CAFFE engine, - * but for fully-convolutional models and large inputs the CAFFE engine can be - * faster as long as it fits in memory. -*/ -template -class CuDNNConvolutionLayer : public ConvolutionLayer { - public: - explicit CuDNNConvolutionLayer(const LayerParameter& param) - : ConvolutionLayer(param), handles_setup_(false) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - virtual ~CuDNNConvolutionLayer(); - - protected: - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - bool handles_setup_; - cudnnHandle_t* handle_; - cudaStream_t* stream_; - vector bottom_descs_, top_descs_; - cudnnTensorDescriptor_t bias_desc_; - cudnnFilterDescriptor_t filter_desc_; - vector conv_descs_; - int bottom_offset_, top_offset_, weight_offset_, bias_offset_; - size_t workspaceSizeInBytes; - void *workspace; -}; -#endif - -/** - * @brief A helper for image operations that rearranges image regions into - * column vectors. Used by ConvolutionLayer to perform convolution - * by matrix multiplication. - * - * TODO(dox): thorough documentation for Forward, Backward, and proto params. - */ -template -class Im2colLayer : public Layer { - public: - explicit Im2colLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "Im2col"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - int kernel_h_, kernel_w_; - int stride_h_, stride_w_; - int channels_; - int height_, width_; - int pad_h_, pad_w_; -}; - -// Forward declare PoolingLayer and SplitLayer for use in LRNLayer. -template class PoolingLayer; -template class SplitLayer; - -/** - * @brief Normalize the input in a local region across or within feature maps. - * - * TODO(dox): thorough documentation for Forward, Backward, and proto params. - */ -template -class LRNLayer : public Layer { - public: - explicit LRNLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "LRN"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - virtual void CrossChannelForward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void CrossChannelForward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void WithinChannelForward(const vector*>& bottom, - const vector*>& top); - virtual void CrossChannelBackward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void CrossChannelBackward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void WithinChannelBackward(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - int size_; - int pre_pad_; - Dtype alpha_; - Dtype beta_; - Dtype k_; - int num_; - int channels_; - int height_; - int width_; - - // Fields used for normalization ACROSS_CHANNELS - // scale_ stores the intermediate summing results - Blob scale_; - - // Fields used for normalization WITHIN_CHANNEL - shared_ptr > split_layer_; - vector*> split_top_vec_; - shared_ptr > square_layer_; - Blob square_input_; - Blob square_output_; - vector*> square_bottom_vec_; - vector*> square_top_vec_; - shared_ptr > pool_layer_; - Blob pool_output_; - vector*> pool_top_vec_; - shared_ptr > power_layer_; - Blob power_output_; - vector*> power_top_vec_; - shared_ptr > product_layer_; - Blob product_input_; - vector*> product_bottom_vec_; -}; - - -/** - * @brief Pools the input image by taking the max, average, etc. within regions. - * - * TODO(dox): thorough documentation for Forward, Backward, and proto params. - */ -template -class PoolingLayer : public Layer { - public: - explicit PoolingLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "Pooling"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { return 1; } - // MAX POOL layers can output an extra top blob for the mask; - // others can only output the pooled inputs. - virtual inline int MaxTopBlobs() const { - return (this->layer_param_.pooling_param().pool() == - PoolingParameter_PoolMethod_MAX) ? 2 : 1; - } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - int kernel_h_, kernel_w_; - int stride_h_, stride_w_; - int pad_h_, pad_w_; - int channels_; - int height_, width_; - int pooled_height_, pooled_width_; - bool global_pooling_; - Blob rand_idx_; - Blob max_idx_; -}; - -#ifdef USE_CUDNN -/* - * @brief cuDNN implementation of PoolingLayer. - * Fallback to PoolingLayer for CPU mode. -*/ -template -class CuDNNPoolingLayer : public PoolingLayer { - public: - explicit CuDNNPoolingLayer(const LayerParameter& param) - : PoolingLayer(param), handles_setup_(false) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - virtual ~CuDNNPoolingLayer(); - // Currently, cuDNN does not support the extra top blob. - virtual inline int MinTopBlobs() const { return -1; } - virtual inline int ExactNumTopBlobs() const { return 1; } - - protected: - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - bool handles_setup_; - cudnnHandle_t handle_; - cudnnTensorDescriptor_t bottom_desc_, top_desc_; - cudnnPoolingDescriptor_t pooling_desc_; - cudnnPoolingMode_t mode_; -}; -#endif - -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -/** - * @brief Does spatial pyramid pooling on the input image - * by taking the max, average, etc. within regions - * so that the result vector of different sized - * images are of the same size. - */ -template -class SPPLayer : public Layer { - public: - explicit SPPLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "SPP"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - // calculates the kernel and stride dimensions for the pooling layer, - // returns a correctly configured LayerParameter for a PoolingLayer - virtual LayerParameter GetPoolingParam(const int pyramid_level, - const int bottom_h, const int bottom_w, const SPPParameter spp_param); - - int pyramid_height_; - int bottom_h_, bottom_w_; - int num_; - int channels_; - int kernel_h_, kernel_w_; - int pad_h_, pad_w_; - bool reshaped_first_time_; - - /// the internal Split layer that feeds the pooling layers - shared_ptr > split_layer_; - /// top vector holder used in call to the underlying SplitLayer::Forward - vector*> split_top_vec_; - /// bottom vector holder used in call to the underlying PoolingLayer::Forward - vector*>*> pooling_bottom_vecs_; - /// the internal Pooling layers of different kernel sizes - vector > > pooling_layers_; - /// top vector holders used in call to the underlying PoolingLayer::Forward - vector*>*> pooling_top_vecs_; - /// pooling_outputs stores the outputs of the PoolingLayers - vector*> pooling_outputs_; - /// the internal Flatten layers that the Pooling layers feed into - vector*> flatten_layers_; - /// top vector holders used in call to the underlying FlattenLayer::Forward - vector*>*> flatten_top_vecs_; - /// flatten_outputs stores the outputs of the FlattenLayers - vector*> flatten_outputs_; - /// bottom vector holder used in call to the underlying ConcatLayer::Forward - vector*> concat_bottom_vec_; - /// the internal Concat layers that the Flatten layers feed into - shared_ptr > concat_layer_; -}; - -======= ->>>>>>> triplet data generation and network update -} // namespace caffe - -#endif // CAFFE_VISION_LAYERS_HPP_ diff --git a/include/caffe/vision_layers.hpp.orig.orig b/include/caffe/vision_layers.hpp.orig.orig deleted file mode 100644 index 800a71a26f9..00000000000 --- a/include/caffe/vision_layers.hpp.orig.orig +++ /dev/null @@ -1,555 +0,0 @@ -#ifndef CAFFE_VISION_LAYERS_HPP_ -#define CAFFE_VISION_LAYERS_HPP_ - -#include -#include -#include - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -#include "caffe/common_layers.hpp" -#include "caffe/data_layers.hpp" -#include "caffe/layer.hpp" -#include "caffe/loss_layers.hpp" -#include "caffe/neuron_layers.hpp" -#include "caffe/proto/caffe.pb.h" - -namespace caffe { - -/** - * @brief Abstract base class that factors out the BLAS code common to - * ConvolutionLayer and DeconvolutionLayer. - */ -template -class BaseConvolutionLayer : public Layer { - public: - explicit BaseConvolutionLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline int MinBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { return 1; } - virtual inline bool EqualNumBottomTopBlobs() const { return true; } - - protected: - // Helper functions that abstract away the column buffer and gemm arguments. - // The last argument in forward_cpu_gemm is so that we can skip the im2col if - // we just called weight_cpu_gemm with the same input. - void forward_cpu_gemm(const Dtype* input, const Dtype* weights, - Dtype* output, bool skip_im2col = false); - void forward_cpu_bias(Dtype* output, const Dtype* bias); - void backward_cpu_gemm(const Dtype* input, const Dtype* weights, - Dtype* output); - void weight_cpu_gemm(const Dtype* input, const Dtype* output, Dtype* - weights); - void backward_cpu_bias(Dtype* bias, const Dtype* input); - -#ifndef CPU_ONLY - void forward_gpu_gemm(const Dtype* col_input, const Dtype* weights, - Dtype* output, bool skip_im2col = false); - void forward_gpu_bias(Dtype* output, const Dtype* bias); - void backward_gpu_gemm(const Dtype* input, const Dtype* weights, - Dtype* col_output); - void weight_gpu_gemm(const Dtype* col_input, const Dtype* output, Dtype* - weights); - void backward_gpu_bias(Dtype* bias, const Dtype* input); -#endif - - // reverse_dimensions should return true iff we are implementing deconv, so - // that conv helpers know which dimensions are which. - virtual bool reverse_dimensions() = 0; - // Compute height_out_ and width_out_ from other parameters. - virtual void compute_output_shape() = 0; - - int kernel_h_, kernel_w_; - int stride_h_, stride_w_; - int num_; - int channels_; - int pad_h_, pad_w_; - int height_, width_; - int group_; - int num_output_; - int height_out_, width_out_; - bool bias_term_; - bool is_1x1_; - - private: - // wrap im2col/col2im so we don't have to remember the (long) argument lists - inline void conv_im2col_cpu(const Dtype* data, Dtype* col_buff) { - im2col_cpu(data, conv_in_channels_, conv_in_height_, conv_in_width_, - kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_buff); - } - inline void conv_col2im_cpu(const Dtype* col_buff, Dtype* data) { - col2im_cpu(col_buff, conv_in_channels_, conv_in_height_, conv_in_width_, - kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, data); - } -#ifndef CPU_ONLY - inline void conv_im2col_gpu(const Dtype* data, Dtype* col_buff) { - im2col_gpu(data, conv_in_channels_, conv_in_height_, conv_in_width_, - kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_buff); - } - inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) { - col2im_gpu(col_buff, conv_in_channels_, conv_in_height_, conv_in_width_, - kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, data); - } -#endif - - int conv_out_channels_; - int conv_in_channels_; - int conv_out_spatial_dim_; - int conv_in_height_; - int conv_in_width_; - int kernel_dim_; - int weight_offset_; - int col_offset_; - int output_offset_; - - Blob col_buffer_; - Blob bias_multiplier_; -}; - -/** - * @brief Convolves the input image with a bank of learned filters, - * and (optionally) adds biases. - * - * Caffe convolves by reduction to matrix multiplication. This achieves - * high-throughput and generality of input and filter dimensions but comes at - * the cost of memory for matrices. This makes use of efficiency in BLAS. - * - * The input is "im2col" transformed to a channel K' x H x W data matrix - * for multiplication with the N x K' x H x W filter matrix to yield a - * N' x H x W output matrix that is then "col2im" restored. K' is the - * input channel * kernel height * kernel width dimension of the unrolled - * inputs so that the im2col matrix has a column for each input region to - * be filtered. col2im restores the output spatial structure by rolling up - * the output channel N' columns of the output matrix. - */ -template -class ConvolutionLayer : public BaseConvolutionLayer { - public: - /** - * @param param provides ConvolutionParameter convolution_param, - * with ConvolutionLayer options: - * - num_output. The number of filters. - * - kernel_size / kernel_h / kernel_w. The filter dimensions, given by - * kernel_size for square filters or kernel_h and kernel_w for rectangular - * filters. - * - stride / stride_h / stride_w (\b optional, default 1). The filter - * stride, given by stride_size for equal dimensions or stride_h and stride_w - * for different strides. By default the convolution is dense with stride 1. - * - pad / pad_h / pad_w (\b optional, default 0). The zero-padding for - * convolution, given by pad for equal dimensions or pad_h and pad_w for - * different padding. Input padding is computed implicitly instead of - * actually padding. - * - group (\b optional, default 1). The number of filter groups. Group - * convolution is a method for reducing parameterization by selectively - * connecting input and output channels. The input and output channel dimensions must be divisible - * by the number of groups. For group @f$ \geq 1 @f$, the - * convolutional filters' input and output channels are separated s.t. each - * group takes 1 / group of the input channels and makes 1 / group of the - * output channels. Concretely 4 input channels, 8 output channels, and - * 2 groups separate input channels 1-2 and output channels 1-4 into the - * first group and input channels 3-4 and output channels 5-8 into the second - * group. - * - bias_term (\b optional, default true). Whether to have a bias. - * - engine: convolution has CAFFE (matrix multiplication) and CUDNN (library - * kernels + stream parallelism) engines. - */ - explicit ConvolutionLayer(const LayerParameter& param) - : BaseConvolutionLayer(param) {} - - virtual inline const char* type() const { return "Convolution"; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual inline bool reverse_dimensions() { return false; } - virtual void compute_output_shape(); -}; - -/** - * @brief Convolve the input with a bank of learned filters, and (optionally) - * add biases, treating filters and convolution parameters in the - * opposite sense as ConvolutionLayer. - * - * ConvolutionLayer computes each output value by dotting an input window with - * a filter; DeconvolutionLayer multiplies each input value by a filter - * elementwise, and sums over the resulting output windows. In other words, - * DeconvolutionLayer is ConvolutionLayer with the forward and backward passes - * reversed. DeconvolutionLayer reuses ConvolutionParameter for its - * parameters, but they take the opposite sense as in ConvolutionLayer (so - * padding is removed from the output rather than added to the input, and - * stride results in upsampling rather than downsampling). - */ -template -class DeconvolutionLayer : public BaseConvolutionLayer { - public: - explicit DeconvolutionLayer(const LayerParameter& param) - : BaseConvolutionLayer(param) {} - - virtual inline const char* type() const { return "Deconvolution"; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual inline bool reverse_dimensions() { return true; } - virtual void compute_output_shape(); -}; - -#ifdef USE_CUDNN -/* - * @brief cuDNN implementation of ConvolutionLayer. - * Fallback to ConvolutionLayer for CPU mode. - * - * cuDNN accelerates convolution through forward kernels for filtering and bias - * plus backward kernels for the gradient w.r.t. the filters, biases, and - * inputs. Caffe + cuDNN further speeds up the computation through forward - * parallelism across groups and backward parallelism across gradients. - * - * The CUDNN engine does not have memory overhead for matrix buffers. For many - * input and filter regimes the CUDNN engine is faster than the CAFFE engine, - * but for fully-convolutional models and large inputs the CAFFE engine can be - * faster as long as it fits in memory. -*/ -template -class CuDNNConvolutionLayer : public ConvolutionLayer { - public: - explicit CuDNNConvolutionLayer(const LayerParameter& param) - : ConvolutionLayer(param), handles_setup_(false) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - virtual ~CuDNNConvolutionLayer(); - - protected: - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - bool handles_setup_; - cudnnHandle_t* handle_; - cudaStream_t* stream_; - vector bottom_descs_, top_descs_; - cudnnTensorDescriptor_t bias_desc_; - cudnnFilterDescriptor_t filter_desc_; - vector conv_descs_; - int bottom_offset_, top_offset_, weight_offset_, bias_offset_; - size_t workspaceSizeInBytes; - void *workspace; -}; -#endif - -/** - * @brief A helper for image operations that rearranges image regions into - * column vectors. Used by ConvolutionLayer to perform convolution - * by matrix multiplication. - * - * TODO(dox): thorough documentation for Forward, Backward, and proto params. - */ -template -class Im2colLayer : public Layer { - public: - explicit Im2colLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "Im2col"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - int kernel_h_, kernel_w_; - int stride_h_, stride_w_; - int channels_; - int height_, width_; - int pad_h_, pad_w_; -}; - -// Forward declare PoolingLayer and SplitLayer for use in LRNLayer. -template class PoolingLayer; -template class SplitLayer; - -/** - * @brief Normalize the input in a local region across or within feature maps. - * - * TODO(dox): thorough documentation for Forward, Backward, and proto params. - */ -template -class LRNLayer : public Layer { - public: - explicit LRNLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "LRN"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int ExactNumTopBlobs() const { return 1; } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - virtual void CrossChannelForward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void CrossChannelForward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void WithinChannelForward(const vector*>& bottom, - const vector*>& top); - virtual void CrossChannelBackward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void CrossChannelBackward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void WithinChannelBackward(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - int size_; - int pre_pad_; - Dtype alpha_; - Dtype beta_; - Dtype k_; - int num_; - int channels_; - int height_; - int width_; - - // Fields used for normalization ACROSS_CHANNELS - // scale_ stores the intermediate summing results - Blob scale_; - - // Fields used for normalization WITHIN_CHANNEL - shared_ptr > split_layer_; - vector*> split_top_vec_; - shared_ptr > square_layer_; - Blob square_input_; - Blob square_output_; - vector*> square_bottom_vec_; - vector*> square_top_vec_; - shared_ptr > pool_layer_; - Blob pool_output_; - vector*> pool_top_vec_; - shared_ptr > power_layer_; - Blob power_output_; - vector*> power_top_vec_; - shared_ptr > product_layer_; - Blob product_input_; - vector*> product_bottom_vec_; -}; - - -/** - * @brief Pools the input image by taking the max, average, etc. within regions. - * - * TODO(dox): thorough documentation for Forward, Backward, and proto params. - */ -template -class PoolingLayer : public Layer { - public: - explicit PoolingLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "Pooling"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { return 1; } - // MAX POOL layers can output an extra top blob for the mask; - // others can only output the pooled inputs. - virtual inline int MaxTopBlobs() const { - return (this->layer_param_.pooling_param().pool() == - PoolingParameter_PoolMethod_MAX) ? 2 : 1; - } - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - int kernel_h_, kernel_w_; - int stride_h_, stride_w_; - int pad_h_, pad_w_; - int channels_; - int height_, width_; - int pooled_height_, pooled_width_; - bool global_pooling_; - Blob rand_idx_; - Blob max_idx_; -}; - -#ifdef USE_CUDNN -/* - * @brief cuDNN implementation of PoolingLayer. - * Fallback to PoolingLayer for CPU mode. -*/ -template -class CuDNNPoolingLayer : public PoolingLayer { - public: - explicit CuDNNPoolingLayer(const LayerParameter& param) - : PoolingLayer(param), handles_setup_(false) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - virtual ~CuDNNPoolingLayer(); - // Currently, cuDNN does not support the extra top blob. - virtual inline int MinTopBlobs() const { return -1; } - virtual inline int ExactNumTopBlobs() const { return 1; } - - protected: - virtual void Forward_gpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - - bool handles_setup_; - cudnnHandle_t handle_; - cudnnTensorDescriptor_t bottom_desc_, top_desc_; - cudnnPoolingDescriptor_t pooling_desc_; - cudnnPoolingMode_t mode_; -}; -#endif - -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> triplet data generation and network update -/** - * @brief Does spatial pyramid pooling on the input image - * by taking the max, average, etc. within regions - * so that the result vector of different sized - * images are of the same size. - */ -template -class SPPLayer : public Layer { - public: - explicit SPPLayer(const LayerParameter& param) - : Layer(param) {} - virtual void LayerSetUp(const vector*>& bottom, - const vector*>& top); - virtual void Reshape(const vector*>& bottom, - const vector*>& top); - - virtual inline const char* type() const { return "SPP"; } - virtual inline int ExactNumBottomBlobs() const { return 1; } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 - virtual inline int ExactNumTopBlobs() const { return 1; } -======= - virtual inline int MinTopBlobs() const { return 1; } - // MAX POOL layers can output an extra top blob for the mask; - // others can only output the pooled inputs. - virtual inline int MaxTopBlobs() const { - return (this->layer_param_.pooling_param().pool() == - PoolingParameter_PoolMethod_MAX) ? 2 : 1; - } ->>>>>>> macro define in upgrade_proto -======= - virtual inline int ExactNumTopBlobs() const { return 1; } ->>>>>>> triplet data generation and network update - - protected: - virtual void Forward_cpu(const vector*>& bottom, - const vector*>& top); - virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom); - // calculates the kernel and stride dimensions for the pooling layer, - // returns a correctly configured LayerParameter for a PoolingLayer - virtual LayerParameter GetPoolingParam(const int pyramid_level, - const int bottom_h, const int bottom_w, const SPPParameter spp_param); - - int pyramid_height_; - int bottom_h_, bottom_w_; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 -======= ->>>>>>> triplet data generation and network update - int num_; - int channels_; - int kernel_h_, kernel_w_; - int pad_h_, pad_w_; - bool reshaped_first_time_; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= - int channels_; - int kernel_h_, kernel_w_; - int pad_h_, pad_w_; ->>>>>>> macro define in upgrade_proto -======= ->>>>>>> triplet data generation and network update - - /// the internal Split layer that feeds the pooling layers - shared_ptr > split_layer_; - /// top vector holder used in call to the underlying SplitLayer::Forward - vector*> split_top_vec_; - /// bottom vector holder used in call to the underlying PoolingLayer::Forward - vector*>*> pooling_bottom_vecs_; - /// the internal Pooling layers of different kernel sizes - vector > > pooling_layers_; - /// top vector holders used in call to the underlying PoolingLayer::Forward - vector*>*> pooling_top_vecs_; - /// pooling_outputs stores the outputs of the PoolingLayers - vector*> pooling_outputs_; - /// the internal Flatten layers that the Pooling layers feed into - vector*> flatten_layers_; - /// top vector holders used in call to the underlying FlattenLayer::Forward - vector*>*> flatten_top_vecs_; - /// flatten_outputs stores the outputs of the FlattenLayers - vector*> flatten_outputs_; - /// bottom vector holder used in call to the underlying ConcatLayer::Forward - vector*> concat_bottom_vec_; - /// the internal Concat layers that the Flatten layers feed into - shared_ptr > concat_layer_; -}; - -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -======= ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update -} // namespace caffe - -#endif // CAFFE_VISION_LAYERS_HPP_ diff --git a/python/caffe/__init__.py b/python/caffe/__init__.py index ccda1bcae4f..6cc44e729f4 100644 --- a/python/caffe/__init__.py +++ b/python/caffe/__init__.py @@ -1,4 +1,4 @@ -from .pycaffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, RMSPropSolver, AdaDeltaSolver, AdamSolver +from .pycaffe import Net, SGDSolver from ._caffe import set_mode_cpu, set_mode_gpu, set_device, Layer, get_solver, layer_type_list from .proto.caffe_pb2 import TRAIN, TEST from .classifier import Classifier diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index ccd5776ac40..cc49f60ab13 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -297,15 +297,6 @@ BOOST_PYTHON_MODULE(_caffe) { bp::class_, bp::bases >, shared_ptr >, boost::noncopyable>( "AdaGradSolver", bp::init()); - bp::class_, bp::bases >, - shared_ptr >, boost::noncopyable>( - "RMSPropSolver", bp::init()); - bp::class_, bp::bases >, - shared_ptr >, boost::noncopyable>( - "AdaDeltaSolver", bp::init()); - bp::class_, bp::bases >, - shared_ptr >, boost::noncopyable>( - "AdamSolver", bp::init()); bp::def("get_solver", &GetSolverFromFile, bp::return_value_policy()); diff --git a/python/caffe/io.py b/python/caffe/io.py index 0cad7211291..fc96266085f 100644 --- a/python/caffe/io.py +++ b/python/caffe/io.py @@ -329,7 +329,7 @@ def resize_image(im, new_dims, interp_order=1): return ret else: # ndimage interpolates anything but more slowly. - scale = tuple(np.array(new_dims, dtype=float) / np.array(im.shape[:2])) + scale = tuple(np.array(new_dims) / np.array(im.shape[:2])) resized_im = zoom(im, scale + (1,), order=interp_order) return resized_im.astype(np.float32) diff --git a/python/caffe/net_spec.py b/python/caffe/net_spec.py index 93fc01927db..77a0e0070ae 100644 --- a/python/caffe/net_spec.py +++ b/python/caffe/net_spec.py @@ -56,14 +56,8 @@ def to_proto(*tops): def assign_proto(proto, name, val): """Assign a Python object to a protobuf message, based on the Python type (in recursive fashion). Lists become repeated fields/messages, dicts - become messages, and other types are assigned directly. For convenience, - repeated fields whose values are not lists are converted to single-element - lists; e.g., `my_repeated_int_field=3` is converted to - `my_repeated_int_field=[3]`.""" - - is_repeated_field = hasattr(getattr(proto, name), 'extend') - if is_repeated_field and not isinstance(val, list): - val = [val] + become messages, and other types are assigned directly.""" + if isinstance(val, list): if isinstance(val[0], dict): for item in val: diff --git a/python/caffe/pycaffe.py b/python/caffe/pycaffe.py index 8ea24da4fdd..4f980a92c38 100644 --- a/python/caffe/pycaffe.py +++ b/python/caffe/pycaffe.py @@ -10,8 +10,7 @@ from itertools import zip_longest as izip_longest import numpy as np -from ._caffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, \ - RMSPropSolver, AdaDeltaSolver, AdamSolver +from ._caffe import Net, SGDSolver import caffe.io # We directly update methods from Net here (rather than using composition or diff --git a/python/caffe/test/test_layer_type_list.py b/python/caffe/test/test_layer_type_list.py index 47f4cf6d008..7edc80df069 100644 --- a/python/caffe/test/test_layer_type_list.py +++ b/python/caffe/test/test_layer_type_list.py @@ -5,7 +5,6 @@ class TestLayerTypeList(unittest.TestCase): def test_standard_types(self): - #removing 'Data' from list for type_name in ['Data', 'Convolution', 'InnerProduct']: self.assertIn(type_name, caffe.layer_type_list(), '%s not in layer_type_list()' % type_name) diff --git a/python/caffe/test/test_net_spec.py.orig b/python/caffe/test/test_net_spec.py.orig deleted file mode 100644 index 84d50797e02..00000000000 --- a/python/caffe/test/test_net_spec.py.orig +++ /dev/null @@ -1,88 +0,0 @@ -import unittest -import tempfile -import caffe -from caffe import layers as L -from caffe import params as P - -def lenet(batch_size): - n = caffe.NetSpec() - n.data, n.label = L.DummyData(shape=[dict(dim=[batch_size, 1, 28, 28]), - dict(dim=[batch_size, 1, 1, 1])], - transform_param=dict(scale=1./255), ntop=2) - n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, - weight_filler=dict(type='xavier')) - n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX) - n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, - weight_filler=dict(type='xavier')) - n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX) - n.ip1 = L.InnerProduct(n.pool2, num_output=500, - weight_filler=dict(type='xavier')) - n.relu1 = L.ReLU(n.ip1, in_place=True) - n.ip2 = L.InnerProduct(n.relu1, num_output=10, - weight_filler=dict(type='xavier')) - n.loss = L.SoftmaxWithLoss(n.ip2, n.label) - return n.to_proto() - -def anon_lenet(batch_size): - data, label = L.DummyData(shape=[dict(dim=[batch_size, 1, 28, 28]), - dict(dim=[batch_size, 1, 1, 1])], - transform_param=dict(scale=1./255), ntop=2) - conv1 = L.Convolution(data, kernel_size=5, num_output=20, - weight_filler=dict(type='xavier')) - pool1 = L.Pooling(conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX) - conv2 = L.Convolution(pool1, kernel_size=5, num_output=50, - weight_filler=dict(type='xavier')) - pool2 = L.Pooling(conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX) - ip1 = L.InnerProduct(pool2, num_output=500, - weight_filler=dict(type='xavier')) - relu1 = L.ReLU(ip1, in_place=True) - ip2 = L.InnerProduct(relu1, num_output=10, - weight_filler=dict(type='xavier')) - loss = L.SoftmaxWithLoss(ip2, label) - return loss.to_proto() - -def silent_net(): - n = caffe.NetSpec() -<<<<<<< b30868495fbae44b9556c621a319178d919bf562:python/caffe/test/test_net_spec.py -======= -<<<<<<< 273512afc2a5d2c56027b96ffeda45809d92328e - n.data, n.data2 = L.DummyData(shape=dict(dim=3), ntop=2) -======= ->>>>>>> add initiate class name of triplet loss layer:python/caffe/test/test_net_spec.py.orig - n.data, n.data2 = L.DummyData(shape=[dict(dim=[3]), dict(dim=[4, 2])], - ntop=2) - n.silence_data = L.Silence(n.data, ntop=0) - n.silence_data2 = L.Silence(n.data2, ntop=0) - return n.to_proto() - -class TestNetSpec(unittest.TestCase): - def load_net(self, net_proto): - f = tempfile.NamedTemporaryFile(mode='w+', delete=False) - f.write(str(net_proto)) - f.close() - return caffe.Net(f.name, caffe.TEST) - - def test_lenet(self): - """Construct and build the Caffe version of LeNet.""" - - net_proto = lenet(50) - # check that relu is in-place - self.assertEqual(net_proto.layer[6].bottom, - net_proto.layer[6].top) - net = self.load_net(net_proto) - # check that all layers are present - self.assertEqual(len(net.layers), 9) - - # now the check the version with automatically-generated layer names - net_proto = anon_lenet(50) - self.assertEqual(net_proto.layer[6].bottom, - net_proto.layer[6].top) - net = self.load_net(net_proto) - self.assertEqual(len(net.layers), 9) - - def test_zero_tops(self): - """Test net construction for top-less layers.""" - - net_proto = silent_net() - net = self.load_net(net_proto) - self.assertEqual(len(net.forward()), 0) diff --git a/python/caffe/test/test_python_layer.py.orig b/python/caffe/test/test_python_layer.py.orig deleted file mode 100644 index 722539428e1..00000000000 --- a/python/caffe/test/test_python_layer.py.orig +++ /dev/null @@ -1,153 +0,0 @@ -import unittest -import tempfile -import os -import six - -import caffe - - -class SimpleLayer(caffe.Layer): - """A layer that just multiplies by ten""" - - def setup(self, bottom, top): - pass - - def reshape(self, bottom, top): - top[0].reshape(*bottom[0].data.shape) - - def forward(self, bottom, top): - top[0].data[...] = 10 * bottom[0].data - - def backward(self, top, propagate_down, bottom): - bottom[0].diff[...] = 10 * top[0].diff - - -class ExceptionLayer(caffe.Layer): - """A layer for checking exceptions from Python""" - - def setup(self, bottom, top): - raise RuntimeError - -<<<<<<< 0edd44bede38fe9ded0a0f91ed29fbdab54557a5 -class ParameterLayer(caffe.Layer): - """A layer that just multiplies by ten""" - - def setup(self, bottom, top): - self.blobs.add_blob(1) - self.blobs[0].data[0] = 0 - - def reshape(self, bottom, top): - top[0].reshape(*bottom[0].data.shape) - - def forward(self, bottom, top): - pass - - def backward(self, top, propagate_down, bottom): - self.blobs[0].diff[0] = 1 -======= ->>>>>>> [pytest] check that Python receives (correct) exceptions from Python layers - -def python_net_file(): - with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f: - f.write("""name: 'pythonnet' force_backward: true - input: 'data' input_shape { dim: 10 dim: 9 dim: 8 } - layer { type: 'Python' name: 'one' bottom: 'data' top: 'one' - python_param { module: 'test_python_layer' layer: 'SimpleLayer' } } - layer { type: 'Python' name: 'two' bottom: 'one' top: 'two' - python_param { module: 'test_python_layer' layer: 'SimpleLayer' } } - layer { type: 'Python' name: 'three' bottom: 'two' top: 'three' - python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }""") - return f.name - - -def exception_net_file(): -<<<<<<< 0edd44bede38fe9ded0a0f91ed29fbdab54557a5 - with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f: -======= - with tempfile.NamedTemporaryFile(delete=False) as f: ->>>>>>> [pytest] check that Python receives (correct) exceptions from Python layers - f.write("""name: 'pythonnet' force_backward: true - input: 'data' input_shape { dim: 10 dim: 9 dim: 8 } - layer { type: 'Python' name: 'layer' bottom: 'data' top: 'top' - python_param { module: 'test_python_layer' layer: 'ExceptionLayer' } } - """) - return f.name - - -<<<<<<< 0edd44bede38fe9ded0a0f91ed29fbdab54557a5 -def parameter_net_file(): - with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f: - f.write("""name: 'pythonnet' force_backward: true - input: 'data' input_shape { dim: 10 dim: 9 dim: 8 } - layer { type: 'Python' name: 'layer' bottom: 'data' top: 'top' - python_param { module: 'test_python_layer' layer: 'ParameterLayer' } } - """) - return f.name - - -======= ->>>>>>> [pytest] check that Python receives (correct) exceptions from Python layers -class TestPythonLayer(unittest.TestCase): - def setUp(self): - net_file = python_net_file() - self.net = caffe.Net(net_file, caffe.TRAIN) - os.remove(net_file) - - def test_forward(self): - x = 8 - self.net.blobs['data'].data[...] = x - self.net.forward() - for y in self.net.blobs['three'].data.flat: - self.assertEqual(y, 10**3 * x) - - def test_backward(self): - x = 7 - self.net.blobs['three'].diff[...] = x - self.net.backward() - for y in self.net.blobs['data'].diff.flat: - self.assertEqual(y, 10**3 * x) - - def test_reshape(self): - s = 4 - self.net.blobs['data'].reshape(s, s, s, s) - self.net.forward() - for blob in six.itervalues(self.net.blobs): - for d in blob.data.shape: - self.assertEqual(s, d) - - def test_exception(self): - net_file = exception_net_file() - self.assertRaises(RuntimeError, caffe.Net, net_file, caffe.TEST) - os.remove(net_file) -<<<<<<< 0edd44bede38fe9ded0a0f91ed29fbdab54557a5 - - def test_parameter(self): - net_file = parameter_net_file() - net = caffe.Net(net_file, caffe.TRAIN) - # Test forward and backward - net.forward() - net.backward() - layer = net.layers[list(net._layer_names).index('layer')] - self.assertEqual(layer.blobs[0].data[0], 0) - self.assertEqual(layer.blobs[0].diff[0], 1) - layer.blobs[0].data[0] += layer.blobs[0].diff[0] - self.assertEqual(layer.blobs[0].data[0], 1) - - # Test saving and loading - h, caffemodel_file = tempfile.mkstemp() - net.save(caffemodel_file) - layer.blobs[0].data[0] = -1 - self.assertEqual(layer.blobs[0].data[0], -1) - net.copy_from(caffemodel_file) - self.assertEqual(layer.blobs[0].data[0], 1) - os.remove(caffemodel_file) - - # Test weight sharing - net2 = caffe.Net(net_file, caffe.TRAIN) - net2.share_with(net) - layer = net.layers[list(net2._layer_names).index('layer')] - self.assertEqual(layer.blobs[0].data[0], 1) - - os.remove(net_file) -======= ->>>>>>> [pytest] check that Python receives (correct) exceptions from Python layers diff --git a/scripts/travis/travis_build_and_test.sh b/scripts/travis/travis_build_and_test.sh index 174f1ee5a0a..9ba737e28a9 100755 --- a/scripts/travis/travis_build_and_test.sh +++ b/scripts/travis/travis_build_and_test.sh @@ -1,6 +1,5 @@ #!/bin/bash -# Script called by Travis to build and test Caffe. -# Travis CI tests are CPU-only for lack of compatible hardware. +# Script called by Travis to do a CPU-only build of and test Caffe. set -e MAKE="make --jobs=$NUM_THREADS --keep-going" @@ -16,12 +15,7 @@ if $WITH_CMAKE; then if [ "$PYTHON_VERSION" = "3" ]; then PYTHON_ARGS="$PYTHON_ARGS -Dpython_version=3 -DBOOST_LIBRARYDIR=$CONDA_DIR/lib/" fi - if $WITH_IO; then - IO_ARGS="-DUSE_OPENCV=ON -DUSE_LMDB=ON -DUSE_LEVELDB=ON" - else - IO_ARGS="-DUSE_OPENCV=OFF -DUSE_LMDB=OFF -DUSE_LEVELDB=OFF" - fi - cmake -DBUILD_python=ON -DCMAKE_BUILD_TYPE=Release $CPU_ONLY $PYTHON_ARGS -DCMAKE_INCLUDE_PATH="$CONDA_DIR/include/" -DCMAKE_LIBRARY_PATH="$CONDA_DIR/lib/" $IO_ARGS .. + cmake -DBUILD_python=ON -DCMAKE_BUILD_TYPE=Release $CPU_ONLY $PYTHON_ARGS -DCMAKE_INCLUDE_PATH="$CONDA_DIR/include/" -DCMAKE_LIBRARY_PATH="$CONDA_DIR/lib/" .. $MAKE $MAKE pytest if ! $WITH_CUDA; then @@ -34,11 +28,6 @@ else if ! $WITH_CUDA; then export CPU_ONLY=1 fi - if $WITH_IO; then - export USE_LMDB=1 - export USE_LEVELDB=1 - export USE_OPENCV=1 - fi $MAKE all test pycaffe warn lint || true if ! $WITH_CUDA; then $MAKE runtest diff --git a/scripts/travis/travis_setup_makefile_config.sh b/scripts/travis/travis_setup_makefile_config.sh index 83aacf11fb0..1440be2af8b 100755 --- a/scripts/travis/travis_setup_makefile_config.sh +++ b/scripts/travis/travis_setup_makefile_config.sh @@ -11,12 +11,6 @@ if $WITH_CUDA; then echo "CUDA_ARCH := $GENCODE" >> Makefile.config fi -# Remove IO library settings from Makefile.config -# to avoid conflicts with CI configuration -sed -i -e '/USE_LMDB/d' Makefile.config -sed -i -e '/USE_LEVELDB/d' Makefile.config -sed -i -e '/USE_OPENCV/d' Makefile.config - cat << 'EOF' >> Makefile.config # Travis' nvcc doesn't like newer boost versions NVCCFLAGS := -Xcudafe --diag_suppress=cc_clobber_ignored -Xcudafe --diag_suppress=useless_using_declaration -Xcudafe --diag_suppress=set_but_not_used diff --git a/src/caffe/data_transformer.cpp b/src/caffe/data_transformer.cpp index e5a8af55f27..4666d9bd881 100644 --- a/src/caffe/data_transformer.cpp +++ b/src/caffe/data_transformer.cpp @@ -1,6 +1,4 @@ -#ifdef USE_OPENCV #include -#endif // USE_OPENCV #include #include @@ -126,46 +124,12 @@ void DataTransformer::Transform(const Datum& datum, } } - template void DataTransformer::Transform(const Datum& datum, Blob* transformed_blob) { -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD // If datum is encoded, decoded and transform the cv::image. if (datum.encoded()) { -<<<<<<< fa941056111876e03082cdc5695d75339ed24ed9 -#ifdef USE_OPENCV - CHECK(!(param_.force_color() && param_.force_gray())) -======= -<<<<<<< HEAD -<<<<<<< HEAD CHECK(!(param_.force_color() && param_.force_gray())) -======= -<<<<<<< HEAD - CHECK(!param_.force_color() && !param_.force_gray()) ->>>>>>> 011aef0... restore -======= - CHECK(!(param_.force_color() && param_.force_gray())) ->>>>>>> d2acfed... fixed _force_color check, fixes #2635 -======= - // If datum is encoded, decoded and transform the cv::image. - if (datum.encoded()) { -<<<<<<< HEAD - CHECK(!param_.force_color() && !param_.force_gray()) ->>>>>>> 011aef0... restore -<<<<<<< HEAD ->>>>>>> 00341b2... triplet data generation and network update -<<<<<<< bd900fc47efb4f9fe6c0698d66ca08f7a5c1ed58 ->>>>>>> triplet data generation and network update -======= -======= -======= - CHECK(!(param_.force_color() && param_.force_gray())) ->>>>>>> d2acfed... fixed _force_color check, fixes #2635 ->>>>>>> 1882ac9... add initiate class name of triplet loss layer ->>>>>>> add initiate class name of triplet loss layer << "cannot set both force_color and force_gray"; cv::Mat cv_img; if (param_.force_color() || param_.force_gray()) { @@ -176,9 +140,6 @@ void DataTransformer::Transform(const Datum& datum, } // Transform the cv::image into blob. return Transform(cv_img, transformed_blob); -#else - LOG(FATAL) << "Encoded datum requires OpenCV; compile with USE_OPENCV."; -#endif // USE_OPENCV } else { if (param_.force_color() || param_.force_gray()) { LOG(ERROR) << "force_color and force_gray only for encoded datum"; @@ -186,17 +147,11 @@ void DataTransformer::Transform(const Datum& datum, } const int crop_size = param_.crop_size(); -<<<<<<< HEAD -======= ->>>>>>> 083f61b... New triplet loss layer added(beta1 version-no test source files) -======= ->>>>>>> 011aef0... restore -======= ->>>>>>> 4d8130b... New triplet loss layer added(beta1 version-no test source files) const int datum_channels = datum.channels(); const int datum_height = datum.height(); const int datum_width = datum.width(); + // Check dimensions. const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); const int width = transformed_blob->width(); @@ -207,8 +162,6 @@ void DataTransformer::Transform(const Datum& datum, CHECK_LE(width, datum_width); CHECK_GE(num, 1); - const int crop_size = param_.crop_size(); - if (crop_size) { CHECK_EQ(crop_size, height); CHECK_EQ(crop_size, width); @@ -241,7 +194,6 @@ void DataTransformer::Transform(const vector & datum_vector, } } -#ifdef USE_OPENCV template void DataTransformer::Transform(const vector & mat_vector, Blob* transformed_blob) { @@ -265,10 +217,12 @@ void DataTransformer::Transform(const vector & mat_vector, template void DataTransformer::Transform(const cv::Mat& cv_img, Blob* transformed_blob) { + const int crop_size = param_.crop_size(); const int img_channels = cv_img.channels(); const int img_height = cv_img.rows; const int img_width = cv_img.cols; + // Check dimensions. const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); const int width = transformed_blob->width(); @@ -281,7 +235,6 @@ void DataTransformer::Transform(const cv::Mat& cv_img, CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte"; - const int crop_size = param_.crop_size(); const Dtype scale = param_.scale(); const bool do_mirror = param_.mirror() && Rand(2); const bool has_mean_file = param_.has_mean_file(); @@ -362,16 +315,27 @@ void DataTransformer::Transform(const cv::Mat& cv_img, } } } -#endif // USE_OPENCV template void DataTransformer::Transform(Blob* input_blob, Blob* transformed_blob) { + const int crop_size = param_.crop_size(); const int input_num = input_blob->num(); const int input_channels = input_blob->channels(); const int input_height = input_blob->height(); const int input_width = input_blob->width(); + if (transformed_blob->count() == 0) { + // Initialize transformed_blob with the right shape. + if (crop_size) { + transformed_blob->Reshape(input_num, input_channels, + crop_size, crop_size); + } else { + transformed_blob->Reshape(input_num, input_channels, + input_height, input_width); + } + } + const int num = transformed_blob->num(); const int channels = transformed_blob->channels(); const int height = transformed_blob->height(); @@ -383,7 +347,7 @@ void DataTransformer::Transform(Blob* input_blob, CHECK_GE(input_height, height); CHECK_GE(input_width, width); - const int crop_size = param_.crop_size(); + const Dtype scale = param_.scale(); const bool do_mirror = param_.mirror() && Rand(2); const bool has_mean_file = param_.has_mean_file(); @@ -465,44 +429,10 @@ void DataTransformer::Transform(Blob* input_blob, } } -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD template vector DataTransformer::InferBlobShape(const Datum& datum) { if (datum.encoded()) { -#ifdef USE_OPENCV - CHECK(!(param_.force_color() && param_.force_gray())) -<<<<<<< fa941056111876e03082cdc5695d75339ed24ed9 -======= -======= - CHECK(!param_.force_color() && !param_.force_gray()) ->>>>>>> 011aef0... restore -======= CHECK(!(param_.force_color() && param_.force_gray())) -<<<<<<< HEAD ->>>>>>> d2acfed... fixed _force_color check, fixes #2635 -======= -======= -template -vector DataTransformer::InferBlobShape(const Datum& datum) { - if (datum.encoded()) { -<<<<<<< HEAD - CHECK(!param_.force_color() && !param_.force_gray()) ->>>>>>> 011aef0... restore -<<<<<<< HEAD ->>>>>>> 00341b2... triplet data generation and network update -<<<<<<< bd900fc47efb4f9fe6c0698d66ca08f7a5c1ed58 ->>>>>>> triplet data generation and network update -======= -======= -======= - CHECK(!(param_.force_color() && param_.force_gray())) ->>>>>>> d2acfed... fixed _force_color check, fixes #2635 ->>>>>>> 1882ac9... add initiate class name of triplet loss layer ->>>>>>> add initiate class name of triplet loss layer << "cannot set both force_color and force_gray"; cv::Mat cv_img; if (param_.force_color() || param_.force_gray()) { @@ -513,10 +443,8 @@ vector DataTransformer::InferBlobShape(const Datum& datum) { } // InferBlobShape using the cv::image. return InferBlobShape(cv_img); -#else - LOG(FATAL) << "Encoded datum requires OpenCV; compile with USE_OPENCV."; -#endif // USE_OPENCV } + const int crop_size = param_.crop_size(); const int datum_channels = datum.channels(); const int datum_height = datum.height(); @@ -546,7 +474,6 @@ vector DataTransformer::InferBlobShape( return shape; } -#ifdef USE_OPENCV template vector DataTransformer::InferBlobShape(const cv::Mat& cv_img) { const int crop_size = param_.crop_size(); @@ -577,15 +504,7 @@ vector DataTransformer::InferBlobShape( shape[0] = num; return shape; } -#endif // USE_OPENCV - -<<<<<<< HEAD -======= ->>>>>>> 083f61b... New triplet loss layer added(beta1 version-no test source files) -======= ->>>>>>> 011aef0... restore -======= ->>>>>>> 4d8130b... New triplet loss layer added(beta1 version-no test source files) + template void DataTransformer::InitRand() { const bool needs_rand = param_.mirror() || diff --git a/src/caffe/data_transformer.cpp.orig b/src/caffe/data_transformer.cpp.orig deleted file mode 100644 index d621d704c8e..00000000000 --- a/src/caffe/data_transformer.cpp.orig +++ /dev/null @@ -1,687 +0,0 @@ -#ifdef USE_OPENCV -#include -#endif // USE_OPENCV - -#include -#include - -#include "caffe/data_transformer.hpp" -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/util/rng.hpp" - -namespace caffe { - -template -DataTransformer::DataTransformer(const TransformationParameter& param, - Phase phase) - : param_(param), phase_(phase) { - // check if we want to use mean_file - if (param_.has_mean_file()) { - CHECK_EQ(param_.mean_value_size(), 0) << - "Cannot specify mean_file and mean_value at the same time"; - const string& mean_file = param.mean_file(); -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da - if (Caffe::root_solver()) { - LOG(INFO) << "Loading mean file from: " << mean_file; - } -======= -<<<<<<< HEAD - LOG(INFO) << "Loading mean file from: " << mean_file; -======= - if (Caffe::root_solver()) { - LOG(INFO) << "Loading mean file from: " << mean_file; - } ->>>>>>> 0dbadac... triplet data generation and network update ->>>>>>> triplet data generation and network update - BlobProto blob_proto; - ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto); - data_mean_.FromProto(blob_proto); - } - // check if we want to use mean_value - if (param_.mean_value_size() > 0) { - CHECK(param_.has_mean_file() == false) << - "Cannot specify mean_file and mean_value at the same time"; - for (int c = 0; c < param_.mean_value_size(); ++c) { - mean_values_.push_back(param_.mean_value(c)); - } - } -} - -template -void DataTransformer::Transform(const Datum& datum, - Dtype* transformed_data) { - const string& data = datum.data(); - const int datum_channels = datum.channels(); - const int datum_height = datum.height(); - const int datum_width = datum.width(); - - const int crop_size = param_.crop_size(); - const Dtype scale = param_.scale(); - const bool do_mirror = param_.mirror() && Rand(2); - const bool has_mean_file = param_.has_mean_file(); - const bool has_uint8 = data.size() > 0; - const bool has_mean_values = mean_values_.size() > 0; - - CHECK_GT(datum_channels, 0); - CHECK_GE(datum_height, crop_size); - CHECK_GE(datum_width, crop_size); - - Dtype* mean = NULL; - if (has_mean_file) { - CHECK_EQ(datum_channels, data_mean_.channels()); - CHECK_EQ(datum_height, data_mean_.height()); - CHECK_EQ(datum_width, data_mean_.width()); - mean = data_mean_.mutable_cpu_data(); - } - if (has_mean_values) { - CHECK(mean_values_.size() == 1 || mean_values_.size() == datum_channels) << - "Specify either 1 mean_value or as many as channels: " << datum_channels; - if (datum_channels > 1 && mean_values_.size() == 1) { - // Replicate the mean_value for simplicity - for (int c = 1; c < datum_channels; ++c) { - mean_values_.push_back(mean_values_[0]); - } - } - } - - int height = datum_height; - int width = datum_width; - - int h_off = 0; - int w_off = 0; - if (crop_size) { - height = crop_size; - width = crop_size; - // We only do random crop when we do training. - if (phase_ == TRAIN) { - h_off = Rand(datum_height - crop_size + 1); - w_off = Rand(datum_width - crop_size + 1); - } else { - h_off = (datum_height - crop_size) / 2; - w_off = (datum_width - crop_size) / 2; - } - } - - Dtype datum_element; - int top_index, data_index; - for (int c = 0; c < datum_channels; ++c) { - for (int h = 0; h < height; ++h) { - for (int w = 0; w < width; ++w) { - data_index = (c * datum_height + h_off + h) * datum_width + w_off + w; - if (do_mirror) { - top_index = (c * height + h) * width + (width - 1 - w); - } else { - top_index = (c * height + h) * width + w; - } - if (has_uint8) { - datum_element = - static_cast(static_cast(data[data_index])); - } else { - datum_element = datum.float_data(data_index); - } - if (has_mean_file) { - transformed_data[top_index] = - (datum_element - mean[data_index]) * scale; - } else { - if (has_mean_values) { - transformed_data[top_index] = - (datum_element - mean_values_[c]) * scale; - } else { - transformed_data[top_index] = datum_element * scale; - } - } - } - } - } -} - - -template -void DataTransformer::Transform(const Datum& datum, - Blob* transformed_blob) { -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da -<<<<<<< HEAD -<<<<<<< HEAD -======= -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> 0dbadac... triplet data generation and network update ->>>>>>> triplet data generation and network update - // If datum is encoded, decoded and transform the cv::image. - if (datum.encoded()) { -<<<<<<< fa941056111876e03082cdc5695d75339ed24ed9 -#ifdef USE_OPENCV - CHECK(!(param_.force_color() && param_.force_gray())) -======= -<<<<<<< HEAD -<<<<<<< HEAD - CHECK(!(param_.force_color() && param_.force_gray())) -======= -<<<<<<< HEAD - CHECK(!param_.force_color() && !param_.force_gray()) ->>>>>>> 011aef0... restore -======= - CHECK(!(param_.force_color() && param_.force_gray())) ->>>>>>> d2acfed... fixed _force_color check, fixes #2635 -======= - // If datum is encoded, decoded and transform the cv::image. - if (datum.encoded()) { -<<<<<<< HEAD - CHECK(!param_.force_color() && !param_.force_gray()) ->>>>>>> 011aef0... restore -<<<<<<< HEAD ->>>>>>> 00341b2... triplet data generation and network update -<<<<<<< bd900fc47efb4f9fe6c0698d66ca08f7a5c1ed58 ->>>>>>> triplet data generation and network update -======= -======= -======= - CHECK(!(param_.force_color() && param_.force_gray())) ->>>>>>> d2acfed... fixed _force_color check, fixes #2635 -<<<<<<< HEAD ->>>>>>> 1882ac9... add initiate class name of triplet loss layer -<<<<<<< ca976a2b520c8024cc7f41a048d2c1abe73f6022 ->>>>>>> add initiate class name of triplet loss layer -======= -======= -======= - // If datum is encoded, decoded and transform the cv::image. - if (datum.encoded()) { - CHECK(!param_.force_color() && !param_.force_gray()) ->>>>>>> 80a07dd... macro define in upgrade_proto ->>>>>>> 08d5d6d... macro define in upgrade_proto ->>>>>>> macro define in upgrade_proto - << "cannot set both force_color and force_gray"; - cv::Mat cv_img; - if (param_.force_color() || param_.force_gray()) { - // If force_color then decode in color otherwise decode in gray. - cv_img = DecodeDatumToCVMat(datum, param_.force_color()); - } else { - cv_img = DecodeDatumToCVMatNative(datum); - } - // Transform the cv::image into blob. - return Transform(cv_img, transformed_blob); -#else - LOG(FATAL) << "Encoded datum requires OpenCV; compile with USE_OPENCV."; -#endif // USE_OPENCV - } else { - if (param_.force_color() || param_.force_gray()) { - LOG(ERROR) << "force_color and force_gray only for encoded datum"; - } - } - - const int crop_size = param_.crop_size(); -<<<<<<< HEAD -<<<<<<< HEAD -======= -<<<<<<< HEAD ->>>>>>> 083f61b... New triplet loss layer added(beta1 version-no test source files) -======= -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da ->>>>>>> 011aef0... restore -======= ->>>>>>> 4d8130b... New triplet loss layer added(beta1 version-no test source files) -======= ->>>>>>> 80a07dd... macro define in upgrade_proto -======= ->>>>>>> triplet data generation and network update ->>>>>>> 0dbadac... triplet data generation and network update ->>>>>>> triplet data generation and network update - const int datum_channels = datum.channels(); - const int datum_height = datum.height(); - const int datum_width = datum.width(); - - // Check dimensions. - const int channels = transformed_blob->channels(); - const int height = transformed_blob->height(); - const int width = transformed_blob->width(); - const int num = transformed_blob->num(); - - CHECK_EQ(channels, datum_channels); - CHECK_LE(height, datum_height); - CHECK_LE(width, datum_width); - CHECK_GE(num, 1); - - if (crop_size) { - CHECK_EQ(crop_size, height); - CHECK_EQ(crop_size, width); - } else { - CHECK_EQ(datum_height, height); - CHECK_EQ(datum_width, width); - } - - Dtype* transformed_data = transformed_blob->mutable_cpu_data(); - Transform(datum, transformed_data); -} - -template -void DataTransformer::Transform(const vector & datum_vector, - Blob* transformed_blob) { - const int datum_num = datum_vector.size(); - const int num = transformed_blob->num(); - const int channels = transformed_blob->channels(); - const int height = transformed_blob->height(); - const int width = transformed_blob->width(); - - CHECK_GT(datum_num, 0) << "There is no datum to add"; - CHECK_LE(datum_num, num) << - "The size of datum_vector must be no greater than transformed_blob->num()"; - Blob uni_blob(1, channels, height, width); - for (int item_id = 0; item_id < datum_num; ++item_id) { - int offset = transformed_blob->offset(item_id); - uni_blob.set_cpu_data(transformed_blob->mutable_cpu_data() + offset); - Transform(datum_vector[item_id], &uni_blob); - } -} - -#ifdef USE_OPENCV -template -void DataTransformer::Transform(const vector & mat_vector, - Blob* transformed_blob) { - const int mat_num = mat_vector.size(); - const int num = transformed_blob->num(); - const int channels = transformed_blob->channels(); - const int height = transformed_blob->height(); - const int width = transformed_blob->width(); - - CHECK_GT(mat_num, 0) << "There is no MAT to add"; - CHECK_EQ(mat_num, num) << - "The size of mat_vector must be equals to transformed_blob->num()"; - Blob uni_blob(1, channels, height, width); - for (int item_id = 0; item_id < mat_num; ++item_id) { - int offset = transformed_blob->offset(item_id); - uni_blob.set_cpu_data(transformed_blob->mutable_cpu_data() + offset); - Transform(mat_vector[item_id], &uni_blob); - } -} - -template -void DataTransformer::Transform(const cv::Mat& cv_img, - Blob* transformed_blob) { - const int crop_size = param_.crop_size(); - const int img_channels = cv_img.channels(); - const int img_height = cv_img.rows; - const int img_width = cv_img.cols; - - // Check dimensions. - const int channels = transformed_blob->channels(); - const int height = transformed_blob->height(); - const int width = transformed_blob->width(); - const int num = transformed_blob->num(); - - CHECK_EQ(channels, img_channels); - CHECK_LE(height, img_height); - CHECK_LE(width, img_width); - CHECK_GE(num, 1); - - CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte"; - - const Dtype scale = param_.scale(); - const bool do_mirror = param_.mirror() && Rand(2); - const bool has_mean_file = param_.has_mean_file(); - const bool has_mean_values = mean_values_.size() > 0; - - CHECK_GT(img_channels, 0); - CHECK_GE(img_height, crop_size); - CHECK_GE(img_width, crop_size); - - Dtype* mean = NULL; - if (has_mean_file) { - CHECK_EQ(img_channels, data_mean_.channels()); - CHECK_EQ(img_height, data_mean_.height()); - CHECK_EQ(img_width, data_mean_.width()); - mean = data_mean_.mutable_cpu_data(); - } - if (has_mean_values) { - CHECK(mean_values_.size() == 1 || mean_values_.size() == img_channels) << - "Specify either 1 mean_value or as many as channels: " << img_channels; - if (img_channels > 1 && mean_values_.size() == 1) { - // Replicate the mean_value for simplicity - for (int c = 1; c < img_channels; ++c) { - mean_values_.push_back(mean_values_[0]); - } - } - } - - int h_off = 0; - int w_off = 0; - cv::Mat cv_cropped_img = cv_img; - if (crop_size) { - CHECK_EQ(crop_size, height); - CHECK_EQ(crop_size, width); - // We only do random crop when we do training. - if (phase_ == TRAIN) { - h_off = Rand(img_height - crop_size + 1); - w_off = Rand(img_width - crop_size + 1); - } else { - h_off = (img_height - crop_size) / 2; - w_off = (img_width - crop_size) / 2; - } - cv::Rect roi(w_off, h_off, crop_size, crop_size); - cv_cropped_img = cv_img(roi); - } else { - CHECK_EQ(img_height, height); - CHECK_EQ(img_width, width); - } - - CHECK(cv_cropped_img.data); - - Dtype* transformed_data = transformed_blob->mutable_cpu_data(); - int top_index; - for (int h = 0; h < height; ++h) { - const uchar* ptr = cv_cropped_img.ptr(h); - int img_index = 0; - for (int w = 0; w < width; ++w) { - for (int c = 0; c < img_channels; ++c) { - if (do_mirror) { - top_index = (c * height + h) * width + (width - 1 - w); - } else { - top_index = (c * height + h) * width + w; - } - // int top_index = (c * height + h) * width + w; - Dtype pixel = static_cast(ptr[img_index++]); - if (has_mean_file) { - int mean_index = (c * img_height + h_off + h) * img_width + w_off + w; - transformed_data[top_index] = - (pixel - mean[mean_index]) * scale; - } else { - if (has_mean_values) { - transformed_data[top_index] = - (pixel - mean_values_[c]) * scale; - } else { - transformed_data[top_index] = pixel * scale; - } - } - } - } - } -} -#endif // USE_OPENCV - -template -void DataTransformer::Transform(Blob* input_blob, - Blob* transformed_blob) { - const int crop_size = param_.crop_size(); - const int input_num = input_blob->num(); - const int input_channels = input_blob->channels(); - const int input_height = input_blob->height(); - const int input_width = input_blob->width(); - - if (transformed_blob->count() == 0) { - // Initialize transformed_blob with the right shape. - if (crop_size) { - transformed_blob->Reshape(input_num, input_channels, - crop_size, crop_size); - } else { - transformed_blob->Reshape(input_num, input_channels, - input_height, input_width); - } - } - - const int num = transformed_blob->num(); - const int channels = transformed_blob->channels(); - const int height = transformed_blob->height(); - const int width = transformed_blob->width(); - const int size = transformed_blob->count(); - - CHECK_LE(input_num, num); - CHECK_EQ(input_channels, channels); - CHECK_GE(input_height, height); - CHECK_GE(input_width, width); - - - const Dtype scale = param_.scale(); - const bool do_mirror = param_.mirror() && Rand(2); - const bool has_mean_file = param_.has_mean_file(); - const bool has_mean_values = mean_values_.size() > 0; - - int h_off = 0; - int w_off = 0; - if (crop_size) { - CHECK_EQ(crop_size, height); - CHECK_EQ(crop_size, width); - // We only do random crop when we do training. - if (phase_ == TRAIN) { - h_off = Rand(input_height - crop_size + 1); - w_off = Rand(input_width - crop_size + 1); - } else { - h_off = (input_height - crop_size) / 2; - w_off = (input_width - crop_size) / 2; - } - } else { - CHECK_EQ(input_height, height); - CHECK_EQ(input_width, width); - } - - Dtype* input_data = input_blob->mutable_cpu_data(); - if (has_mean_file) { - CHECK_EQ(input_channels, data_mean_.channels()); - CHECK_EQ(input_height, data_mean_.height()); - CHECK_EQ(input_width, data_mean_.width()); - for (int n = 0; n < input_num; ++n) { - int offset = input_blob->offset(n); - caffe_sub(data_mean_.count(), input_data + offset, - data_mean_.cpu_data(), input_data + offset); - } - } - - if (has_mean_values) { - CHECK(mean_values_.size() == 1 || mean_values_.size() == input_channels) << - "Specify either 1 mean_value or as many as channels: " << input_channels; - if (mean_values_.size() == 1) { - caffe_add_scalar(input_blob->count(), -(mean_values_[0]), input_data); - } else { - for (int n = 0; n < input_num; ++n) { - for (int c = 0; c < input_channels; ++c) { - int offset = input_blob->offset(n, c); - caffe_add_scalar(input_height * input_width, -(mean_values_[c]), - input_data + offset); - } - } - } - } - - Dtype* transformed_data = transformed_blob->mutable_cpu_data(); - - for (int n = 0; n < input_num; ++n) { - int top_index_n = n * channels; - int data_index_n = n * channels; - for (int c = 0; c < channels; ++c) { - int top_index_c = (top_index_n + c) * height; - int data_index_c = (data_index_n + c) * input_height + h_off; - for (int h = 0; h < height; ++h) { - int top_index_h = (top_index_c + h) * width; - int data_index_h = (data_index_c + h) * input_width + w_off; - if (do_mirror) { - int top_index_w = top_index_h + width - 1; - for (int w = 0; w < width; ++w) { - transformed_data[top_index_w-w] = input_data[data_index_h + w]; - } - } else { - for (int w = 0; w < width; ++w) { - transformed_data[top_index_h + w] = input_data[data_index_h + w]; - } - } - } - } - } - if (scale != Dtype(1)) { - DLOG(INFO) << "Scale: " << scale; - caffe_scal(size, scale, transformed_data); - } -} - -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da -<<<<<<< HEAD -<<<<<<< HEAD -======= -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> 0dbadac... triplet data generation and network update ->>>>>>> triplet data generation and network update -template -vector DataTransformer::InferBlobShape(const Datum& datum) { - if (datum.encoded()) { -#ifdef USE_OPENCV - CHECK(!(param_.force_color() && param_.force_gray())) -<<<<<<< fa941056111876e03082cdc5695d75339ed24ed9 -======= -======= - CHECK(!param_.force_color() && !param_.force_gray()) ->>>>>>> 011aef0... restore -======= - CHECK(!(param_.force_color() && param_.force_gray())) -<<<<<<< HEAD ->>>>>>> d2acfed... fixed _force_color check, fixes #2635 -======= -======= -template -vector DataTransformer::InferBlobShape(const Datum& datum) { - if (datum.encoded()) { -<<<<<<< HEAD - CHECK(!param_.force_color() && !param_.force_gray()) ->>>>>>> 011aef0... restore -<<<<<<< HEAD ->>>>>>> 00341b2... triplet data generation and network update -<<<<<<< bd900fc47efb4f9fe6c0698d66ca08f7a5c1ed58 ->>>>>>> triplet data generation and network update -======= -======= -======= - CHECK(!(param_.force_color() && param_.force_gray())) ->>>>>>> d2acfed... fixed _force_color check, fixes #2635 -<<<<<<< HEAD ->>>>>>> 1882ac9... add initiate class name of triplet loss layer -<<<<<<< ca976a2b520c8024cc7f41a048d2c1abe73f6022 ->>>>>>> add initiate class name of triplet loss layer -======= -======= -======= -template -vector DataTransformer::InferBlobShape(const Datum& datum) { - if (datum.encoded()) { - CHECK(!param_.force_color() && !param_.force_gray()) ->>>>>>> 80a07dd... macro define in upgrade_proto ->>>>>>> 08d5d6d... macro define in upgrade_proto ->>>>>>> macro define in upgrade_proto - << "cannot set both force_color and force_gray"; - cv::Mat cv_img; - if (param_.force_color() || param_.force_gray()) { - // If force_color then decode in color otherwise decode in gray. - cv_img = DecodeDatumToCVMat(datum, param_.force_color()); - } else { - cv_img = DecodeDatumToCVMatNative(datum); - } - // InferBlobShape using the cv::image. - return InferBlobShape(cv_img); -#else - LOG(FATAL) << "Encoded datum requires OpenCV; compile with USE_OPENCV."; -#endif // USE_OPENCV - } - const int crop_size = param_.crop_size(); - const int datum_channels = datum.channels(); - const int datum_height = datum.height(); - const int datum_width = datum.width(); - // Check dimensions. - CHECK_GT(datum_channels, 0); - CHECK_GE(datum_height, crop_size); - CHECK_GE(datum_width, crop_size); - // Build BlobShape. - vector shape(4); - shape[0] = 1; - shape[1] = datum_channels; - shape[2] = (crop_size)? crop_size: datum_height; - shape[3] = (crop_size)? crop_size: datum_width; - return shape; -} - -template -vector DataTransformer::InferBlobShape( - const vector & datum_vector) { - const int num = datum_vector.size(); - CHECK_GT(num, 0) << "There is no datum to in the vector"; - // Use first datum in the vector to InferBlobShape. - vector shape = InferBlobShape(datum_vector[0]); - // Adjust num to the size of the vector. - shape[0] = num; - return shape; -} - -#ifdef USE_OPENCV -template -vector DataTransformer::InferBlobShape(const cv::Mat& cv_img) { - const int crop_size = param_.crop_size(); - const int img_channels = cv_img.channels(); - const int img_height = cv_img.rows; - const int img_width = cv_img.cols; - // Check dimensions. - CHECK_GT(img_channels, 0); - CHECK_GE(img_height, crop_size); - CHECK_GE(img_width, crop_size); - // Build BlobShape. - vector shape(4); - shape[0] = 1; - shape[1] = img_channels; - shape[2] = (crop_size)? crop_size: img_height; - shape[3] = (crop_size)? crop_size: img_width; - return shape; -} - -template -vector DataTransformer::InferBlobShape( - const vector & mat_vector) { - const int num = mat_vector.size(); - CHECK_GT(num, 0) << "There is no cv_img to in the vector"; - // Use first cv_img in the vector to InferBlobShape. - vector shape = InferBlobShape(mat_vector[0]); - // Adjust num to the size of the vector. - shape[0] = num; - return shape; -} -#endif // USE_OPENCV - -<<<<<<< HEAD -<<<<<<< HEAD -======= -<<<<<<< HEAD ->>>>>>> 083f61b... New triplet loss layer added(beta1 version-no test source files) -======= -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da ->>>>>>> 011aef0... restore -======= ->>>>>>> 4d8130b... New triplet loss layer added(beta1 version-no test source files) -======= ->>>>>>> 80a07dd... macro define in upgrade_proto -======= ->>>>>>> triplet data generation and network update ->>>>>>> 0dbadac... triplet data generation and network update ->>>>>>> triplet data generation and network update -template -void DataTransformer::InitRand() { - const bool needs_rand = param_.mirror() || - (phase_ == TRAIN && param_.crop_size()); - if (needs_rand) { - const unsigned int rng_seed = caffe_rng_rand(); - rng_.reset(new Caffe::RNG(rng_seed)); - } else { - rng_.reset(); - } -} - -template -int DataTransformer::Rand(int n) { - CHECK(rng_); - CHECK_GT(n, 0); - caffe::rng_t* rng = - static_cast(rng_->generator()); - return ((*rng)() % n); -} - -INSTANTIATE_CLASS(DataTransformer); - -} // namespace caffe diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index 26a1118282f..b90bd4e0caf 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -1,7 +1,9 @@ +#include #include #include #include "caffe/data_layers.hpp" +#include "caffe/net.hpp" #include "caffe/util/io.hpp" namespace caffe { @@ -27,56 +29,96 @@ void BaseDataLayer::LayerSetUp(const vector*>& bottom, DataLayerSetUp(bottom, top); } +template +BasePrefetchingDataLayer::BasePrefetchingDataLayer( + const LayerParameter& param) + : BaseDataLayer(param), + prefetch_free_(), prefetch_full_() { + for (int i = 0; i < PREFETCH_COUNT; ++i) { + prefetch_free_.push(&prefetch_[i]); + } +} + template void BasePrefetchingDataLayer::LayerSetUp( const vector*>& bottom, const vector*>& top) { BaseDataLayer::LayerSetUp(bottom, top); - // Now, start the prefetch thread. Before calling prefetch, we make two - // cpu_data calls so that the prefetch thread does not accidentally make - // simultaneous cudaMalloc calls when the main thread is running. In some - // GPUs this seems to cause failures if we do not so. - this->prefetch_data_.mutable_cpu_data(); - if (this->output_labels_) { - this->prefetch_label_.mutable_cpu_data(); + // Before starting the prefetch thread, we make cpu_data and gpu_data + // calls so that the prefetch thread does not accidentally make simultaneous + // cudaMalloc calls when the main thread is running. In some GPUs this + // seems to cause failures if we do not so. + for (int i = 0; i < PREFETCH_COUNT; ++i) { + prefetch_[i].data_.mutable_cpu_data(); + if (this->output_labels_) { + prefetch_[i].label_.mutable_cpu_data(); + } } +#ifndef CPU_ONLY + if (Caffe::mode() == Caffe::GPU) { + for (int i = 0; i < PREFETCH_COUNT; ++i) { + prefetch_[i].data_.mutable_gpu_data(); + if (this->output_labels_) { + prefetch_[i].label_.mutable_gpu_data(); + } + } + } +#endif DLOG(INFO) << "Initializing prefetch"; - this->CreatePrefetchThread(); + this->data_transformer_->InitRand(); + StartInternalThread(); DLOG(INFO) << "Prefetch initialized."; } template -void BasePrefetchingDataLayer::CreatePrefetchThread() { - this->data_transformer_->InitRand(); - CHECK(StartInternalThread()) << "Thread execution failed"; -} +void BasePrefetchingDataLayer::InternalThreadEntry() { +#ifndef CPU_ONLY + cudaStream_t stream; + if (Caffe::mode() == Caffe::GPU) { + CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); + } +#endif -template -void BasePrefetchingDataLayer::JoinPrefetchThread() { - CHECK(WaitForInternalThreadToExit()) << "Thread joining failed"; + try { + while (!must_stop()) { + Batch* batch = prefetch_free_.pop(); + load_batch(batch); +#ifndef CPU_ONLY + if (Caffe::mode() == Caffe::GPU) { + batch->data_.data().get()->async_gpu_push(stream); + CUDA_CHECK(cudaStreamSynchronize(stream)); + } +#endif + prefetch_full_.push(batch); + } + } catch (boost::thread_interrupted&) { + // Interrupted exception is expected on shutdown + } +#ifndef CPU_ONLY + if (Caffe::mode() == Caffe::GPU) { + CUDA_CHECK(cudaStreamDestroy(stream)); + } +#endif } template void BasePrefetchingDataLayer::Forward_cpu( const vector*>& bottom, const vector*>& top) { - // First, join the thread - JoinPrefetchThread(); - DLOG(INFO) << "Thread joined"; + Batch* batch = prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. - top[0]->ReshapeLike(prefetch_data_); + top[0]->ReshapeLike(batch->data_); // Copy the data - caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), + caffe_copy(batch->data_.count(), batch->data_.cpu_data(), top[0]->mutable_cpu_data()); DLOG(INFO) << "Prefetch copied"; if (this->output_labels_) { // Reshape to loaded labels. - top[1]->ReshapeLike(prefetch_label_); + top[1]->ReshapeLike(batch->label_); // Copy the labels. - caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), - top[1]->mutable_cpu_data()); + caffe_copy(batch->label_.count(), batch->label_.cpu_data(), + top[1]->mutable_cpu_data()); } - // Start a new prefetch thread - DLOG(INFO) << "CreatePrefetchThread"; - CreatePrefetchThread(); + + prefetch_free_.push(batch); } #ifdef CPU_ONLY diff --git a/src/caffe/layers/base_data_layer.cpp.orig.orig b/src/caffe/layers/base_data_layer.cpp.orig.orig deleted file mode 100644 index 72e4909aafc..00000000000 --- a/src/caffe/layers/base_data_layer.cpp.orig.orig +++ /dev/null @@ -1,179 +0,0 @@ -#include -#include -#include - -#include "caffe/data_layers.hpp" -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -#include "caffe/net.hpp" ->>>>>>> triplet data generation and network update -#include "caffe/util/io.hpp" - -namespace caffe { - -template -BaseDataLayer::BaseDataLayer(const LayerParameter& param) - : Layer(param), - transform_param_(param.transform_param()) { -} - -template -void BaseDataLayer::LayerSetUp(const vector*>& bottom, - const vector*>& top) { - if (top.size() == 1) { - output_labels_ = false; - } else { - output_labels_ = true; - } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - data_transformer_.reset( - new DataTransformer(transform_param_, this->phase_)); - data_transformer_->InitRand(); - // The subclasses should setup the size of bottom and top - DataLayerSetUp(bottom, top); -<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 -======= - // The subclasses should setup the size of bottom and top - DataLayerSetUp(bottom, top); - data_transformer_.reset( - new DataTransformer(transform_param_, this->phase_)); - data_transformer_->InitRand(); ->>>>>>> triplet data generation and network update -} - -template -BasePrefetchingDataLayer::BasePrefetchingDataLayer( - const LayerParameter& param) - : BaseDataLayer(param), - prefetch_free_(), prefetch_full_() { - for (int i = 0; i < PREFETCH_COUNT; ++i) { - prefetch_free_.push(&prefetch_[i]); - } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= ->>>>>>> macro define in upgrade_proto -======= ->>>>>>> triplet data generation and network update -} - -template -void BasePrefetchingDataLayer::LayerSetUp( - const vector*>& bottom, const vector*>& top) { - BaseDataLayer::LayerSetUp(bottom, top); - // Before starting the prefetch thread, we make cpu_data and gpu_data - // calls so that the prefetch thread does not accidentally make simultaneous - // cudaMalloc calls when the main thread is running. In some GPUs this - // seems to cause failures if we do not so. - for (int i = 0; i < PREFETCH_COUNT; ++i) { - prefetch_[i].data_.mutable_cpu_data(); - if (this->output_labels_) { - prefetch_[i].label_.mutable_cpu_data(); - } - } -#ifndef CPU_ONLY - if (Caffe::mode() == Caffe::GPU) { - for (int i = 0; i < PREFETCH_COUNT; ++i) { - prefetch_[i].data_.mutable_gpu_data(); - if (this->output_labels_) { - prefetch_[i].label_.mutable_gpu_data(); - } - } - } -#endif - DLOG(INFO) << "Initializing prefetch"; - this->data_transformer_->InitRand(); - StartInternalThread(); - DLOG(INFO) << "Prefetch initialized."; -} - -template -void BasePrefetchingDataLayer::InternalThreadEntry() { -#ifndef CPU_ONLY - cudaStream_t stream; - if (Caffe::mode() == Caffe::GPU) { - CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); - } -#endif - - try { - while (!must_stop()) { - Batch* batch = prefetch_free_.pop(); - load_batch(batch); -#ifndef CPU_ONLY - if (Caffe::mode() == Caffe::GPU) { - batch->data_.data().get()->async_gpu_push(stream); - CUDA_CHECK(cudaStreamSynchronize(stream)); - } -#endif - prefetch_full_.push(batch); - } - } catch (boost::thread_interrupted&) { - // Interrupted exception is expected on shutdown - } -#ifndef CPU_ONLY - if (Caffe::mode() == Caffe::GPU) { - CUDA_CHECK(cudaStreamDestroy(stream)); - } -#endif -} - -template -void BasePrefetchingDataLayer::Forward_cpu( - const vector*>& bottom, const vector*>& top) { - Batch* batch = prefetch_full_.pop("Data layer prefetch queue empty"); - // Reshape to loaded data. -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 - top[0]->ReshapeLike(batch->data_); -======= - top[0]->ReshapeLike(prefetch_data_); ->>>>>>> macro define in upgrade_proto -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - top[0]->ReshapeLike(batch->data_); -======= - top[0]->Reshape(this->prefetch_data_.num(), this->prefetch_data_.channels(), - this->prefetch_data_.height(), this->prefetch_data_.width()); ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update - // Copy the data - caffe_copy(batch->data_.count(), batch->data_.cpu_data(), - top[0]->mutable_cpu_data()); - DLOG(INFO) << "Prefetch copied"; - if (this->output_labels_) { -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - // Reshape to loaded labels. -<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - // Reshape to loaded labels. ->>>>>>> triplet data generation and network update - top[1]->ReshapeLike(batch->label_); - // Copy the labels. - caffe_copy(batch->label_.count(), batch->label_.cpu_data(), - top[1]->mutable_cpu_data()); -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - top[1]->ReshapeLike(prefetch_label_); - // Copy the labels. - caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), - top[1]->mutable_cpu_data()); ->>>>>>> macro define in upgrade_proto -======= - caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), - top[1]->mutable_cpu_data()); ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update - } - - prefetch_free_.push(batch); -} - -#ifdef CPU_ONLY -STUB_GPU_FORWARD(BasePrefetchingDataLayer, Forward); -#endif - -INSTANTIATE_CLASS(BaseDataLayer); -INSTANTIATE_CLASS(BasePrefetchingDataLayer); - -} // namespace caffe diff --git a/src/caffe/layers/base_data_layer.cu b/src/caffe/layers/base_data_layer.cu index 9335a5bc9a9..ff6e412aba6 100644 --- a/src/caffe/layers/base_data_layer.cu +++ b/src/caffe/layers/base_data_layer.cu @@ -7,22 +7,23 @@ namespace caffe { template void BasePrefetchingDataLayer::Forward_gpu( const vector*>& bottom, const vector*>& top) { - // First, join the thread - JoinPrefetchThread(); + Batch* batch = prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. - top[0]->ReshapeLike(this->prefetch_data_); + top[0]->ReshapeLike(batch->data_); // Copy the data - caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), + caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data()); if (this->output_labels_) { // Reshape to loaded labels. - top[1]->ReshapeLike(prefetch_label_); + top[1]->ReshapeLike(batch->label_); // Copy the labels. - caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), + caffe_copy(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data()); } - // Start a new prefetch thread - CreatePrefetchThread(); + // Ensure the copy is synchronous wrt the host, so that the next batch isn't + // copied in meanwhile. + CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); + prefetch_free_.push(batch); } INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); diff --git a/src/caffe/layers/base_data_layer.cu.orig.orig b/src/caffe/layers/base_data_layer.cu.orig.orig deleted file mode 100644 index ef504680e82..00000000000 --- a/src/caffe/layers/base_data_layer.cu.orig.orig +++ /dev/null @@ -1,60 +0,0 @@ -#include - -#include "caffe/data_layers.hpp" - -namespace caffe { - -template -void BasePrefetchingDataLayer::Forward_gpu( - const vector*>& bottom, const vector*>& top) { - Batch* batch = prefetch_full_.pop("Data layer prefetch queue empty"); - // Reshape to loaded data. -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 - top[0]->ReshapeLike(batch->data_); -======= - top[0]->ReshapeLike(this->prefetch_data_); ->>>>>>> macro define in upgrade_proto -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - top[0]->ReshapeLike(batch->data_); -======= - top[0]->Reshape(this->prefetch_data_.num(), this->prefetch_data_.channels(), - this->prefetch_data_.height(), this->prefetch_data_.width()); ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update - // Copy the data - caffe_copy(batch->data_.count(), batch->data_.gpu_data(), - top[0]->mutable_gpu_data()); - if (this->output_labels_) { -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - // Reshape to loaded labels. -<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - // Reshape to loaded labels. ->>>>>>> triplet data generation and network update - top[1]->ReshapeLike(batch->label_); - // Copy the labels. - caffe_copy(batch->label_.count(), batch->label_.gpu_data(), -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - top[1]->ReshapeLike(prefetch_label_); - // Copy the labels. - caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), ->>>>>>> macro define in upgrade_proto -======= - caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update - top[1]->mutable_gpu_data()); - } - // Ensure the copy is synchronous wrt the host, so that the next batch isn't - // copied in meanwhile. - CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); - prefetch_free_.push(batch); -} - -INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer); - -} // namespace caffe diff --git a/src/caffe/layers/concat_layer.cpp b/src/caffe/layers/concat_layer.cpp index 86b500de859..95fba105b9a 100644 --- a/src/caffe/layers/concat_layer.cpp +++ b/src/caffe/layers/concat_layer.cpp @@ -48,16 +48,11 @@ void ConcatLayer::Reshape(const vector*>& bottom, } top[0]->Reshape(top_shape); CHECK_EQ(bottom_count_sum, top[0]->count()); - if (bottom.size() == 1) { - top[0]->ShareData(*bottom[0]); - top[0]->ShareDiff(*bottom[0]); - } } template void ConcatLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - if (bottom.size() == 1) { return; } Dtype* top_data = top[0]->mutable_cpu_data(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); @@ -77,7 +72,6 @@ void ConcatLayer::Forward_cpu(const vector*>& bottom, template void ConcatLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - if (bottom.size() == 1) { return; } const Dtype* top_diff = top[0]->cpu_diff(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); diff --git a/src/caffe/layers/concat_layer.cu b/src/caffe/layers/concat_layer.cu index 8f2e85d8f52..3c64c7ef224 100644 --- a/src/caffe/layers/concat_layer.cu +++ b/src/caffe/layers/concat_layer.cu @@ -53,15 +53,16 @@ void ConcatLayer::Backward_gpu(const vector*>& top, const int top_concat_axis = top[0]->shape(concat_axis_); const bool kForward = false; for (int i = 0; i < bottom.size(); ++i) { - if (!propagate_down[i]) { continue; } - Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); - const int bottom_concat_size = bottom_concat_axis * concat_input_size_; - const int nthreads = bottom_concat_size * num_concats_; - Concat // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, top_diff, kForward, num_concats_, concat_input_size_, - top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); + if (propagate_down[i]) { + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + const int bottom_concat_size = bottom_concat_axis * concat_input_size_; + const int nthreads = bottom_concat_size * num_concats_; + Concat // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, top_diff, kForward, num_concats_, concat_input_size_, + top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); + } offset_concat_axis += bottom_concat_axis; } } diff --git a/src/caffe/layers/concat_layer.cu.orig b/src/caffe/layers/concat_layer.cu.orig deleted file mode 100644 index d3ce34709ec..00000000000 --- a/src/caffe/layers/concat_layer.cu.orig +++ /dev/null @@ -1,96 +0,0 @@ -#include - -#include "caffe/layer.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/vision_layers.hpp" - -namespace caffe { - -template -__global__ void Concat(const int nthreads, const Dtype* in_data, - const bool forward, const int num_concats, const int concat_size, - const int top_concat_axis, const int bottom_concat_axis, - const int offset_concat_axis, Dtype* out_data) { - CUDA_KERNEL_LOOP(index, nthreads) { - const int total_concat_size = concat_size * bottom_concat_axis; - const int concat_num = index / total_concat_size; - const int concat_index = index % total_concat_size; - const int top_index = concat_index + - (concat_num * top_concat_axis + offset_concat_axis) * concat_size; - if (forward) { - out_data[top_index] = in_data[index]; - } else { - out_data[index] = in_data[top_index]; - } - } -} - -template -void ConcatLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { - Dtype* top_data = top[0]->mutable_gpu_data(); - int offset_concat_axis = 0; - const int top_concat_axis = top[0]->shape(concat_axis_); - const bool kForward = true; - for (int i = 0; i < bottom.size(); ++i) { - const Dtype* bottom_data = bottom[i]->gpu_data(); - const int bottom_concat_axis = bottom[i]->shape(concat_axis_); - const int bottom_concat_size = bottom_concat_axis * concat_input_size_; - const int nthreads = bottom_concat_size * num_concats_; - Concat // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, bottom_data, kForward, num_concats_, concat_input_size_, - top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); - offset_concat_axis += bottom_concat_axis; - } -} - -template -void ConcatLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - const Dtype* top_diff = top[0]->gpu_diff(); - int offset_concat_axis = 0; - const int top_concat_axis = top[0]->shape(concat_axis_); - const bool kForward = false; - for (int i = 0; i < bottom.size(); ++i) { - if (!propagate_down[i]) { continue; } - Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); - const int bottom_concat_axis = bottom[i]->shape(concat_axis_); -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46:src/caffe/layers/concat_layer.cu -======= -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - if (propagate_down[i]) { - Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); - const int bottom_concat_size = bottom_concat_axis * concat_input_size_; - const int nthreads = bottom_concat_size * num_concats_; - Concat // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, top_diff, kForward, num_concats_, concat_input_size_, - top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); -======= - for (int n = 0; n < num_concats_; ++n) { - caffe_copy(bottom_concat_axis * concat_input_size_, top_diff + - (n * top_concat_axis + offset_concat_axis) * concat_input_size_, - bottom_diff + n * bottom_concat_axis * concat_input_size_); ->>>>>>> triplet data generation and network update - } -======= ->>>>>>> triplet data generation and network update:src/caffe/layers/concat_layer.cu.orig - const int bottom_concat_size = bottom_concat_axis * concat_input_size_; - const int nthreads = bottom_concat_size * num_concats_; - Concat // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, top_diff, kForward, num_concats_, concat_input_size_, - top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46:src/caffe/layers/concat_layer.cu -======= ->>>>>>> restore ->>>>>>> triplet data generation and network update:src/caffe/layers/concat_layer.cu.orig - offset_concat_axis += bottom_concat_axis; - } -} - -INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer); - -} // namespace caffe diff --git a/src/caffe/layers/concat_layer.cu.orig.orig b/src/caffe/layers/concat_layer.cu.orig.orig deleted file mode 100644 index 35acd1f833a..00000000000 --- a/src/caffe/layers/concat_layer.cu.orig.orig +++ /dev/null @@ -1,101 +0,0 @@ -#include - -#include "caffe/layer.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/vision_layers.hpp" - -namespace caffe { - -template -__global__ void Concat(const int nthreads, const Dtype* in_data, - const bool forward, const int num_concats, const int concat_size, - const int top_concat_axis, const int bottom_concat_axis, - const int offset_concat_axis, Dtype* out_data) { - CUDA_KERNEL_LOOP(index, nthreads) { - const int total_concat_size = concat_size * bottom_concat_axis; - const int concat_num = index / total_concat_size; - const int concat_index = index % total_concat_size; - const int top_index = concat_index + - (concat_num * top_concat_axis + offset_concat_axis) * concat_size; - if (forward) { - out_data[top_index] = in_data[index]; - } else { - out_data[index] = in_data[top_index]; - } - } -} - -template -void ConcatLayer::Forward_gpu(const vector*>& bottom, - const vector*>& top) { - if (bottom.size() == 1) { return; } - Dtype* top_data = top[0]->mutable_gpu_data(); - int offset_concat_axis = 0; - const int top_concat_axis = top[0]->shape(concat_axis_); - const bool kForward = true; - for (int i = 0; i < bottom.size(); ++i) { - const Dtype* bottom_data = bottom[i]->gpu_data(); - const int bottom_concat_axis = bottom[i]->shape(concat_axis_); - const int bottom_concat_size = bottom_concat_axis * concat_input_size_; - const int nthreads = bottom_concat_size * num_concats_; - Concat // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, bottom_data, kForward, num_concats_, concat_input_size_, - top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); - offset_concat_axis += bottom_concat_axis; - } -} - -template -void ConcatLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - if (bottom.size() == 1) { return; } - const Dtype* top_diff = top[0]->gpu_diff(); - int offset_concat_axis = 0; - const int top_concat_axis = top[0]->shape(concat_axis_); - const bool kForward = false; - for (int i = 0; i < bottom.size(); ++i) { - const int bottom_concat_axis = bottom[i]->shape(concat_axis_); -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 4d8130bab0c7311e241e40f504e5fdbb4999bd84 -======= -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> triplet data generation and network update - if (propagate_down[i]) { - Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); - const int bottom_concat_size = bottom_concat_axis * concat_input_size_; - const int nthreads = bottom_concat_size * num_concats_; - Concat // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, top_diff, kForward, num_concats_, concat_input_size_, - top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -======= - for (int n = 0; n < num_concats_; ++n) { - caffe_copy(bottom_concat_axis * concat_input_size_, top_diff + - (n * top_concat_axis + offset_concat_axis) * concat_input_size_, - bottom_diff + n * bottom_concat_axis * concat_input_size_); ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update - } -======= - const int bottom_concat_size = bottom_concat_axis * concat_input_size_; - const int nthreads = bottom_concat_size * num_concats_; - Concat // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - nthreads, top_diff, kForward, num_concats_, concat_input_size_, - top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ->>>>>>> macro define in upgrade_proto -======= ->>>>>>> restore ->>>>>>> triplet data generation and network update - offset_concat_axis += bottom_concat_axis; - } -} - -INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer); - -} // namespace caffe diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp index fdf228bb269..0932d9feff3 100644 --- a/src/caffe/layers/data_layer.cpp +++ b/src/caffe/layers/data_layer.cpp @@ -1,6 +1,5 @@ -#ifdef USE_OPENCV #include -#endif // USE_OPENCV + #include #include @@ -12,93 +11,85 @@ #include "caffe/proto/caffe.pb.h" #include "caffe/util/benchmark.hpp" #include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/util/rng.hpp" namespace caffe { template -DataLayer::~DataLayer() { - this->JoinPrefetchThread(); +DataLayer::DataLayer(const LayerParameter& param) + : BasePrefetchingDataLayer(param), + reader_(param) { +} + +template +DataLayer::~DataLayer() { + this->StopInternalThread(); } template void DataLayer::DataLayerSetUp(const vector*>& bottom, const vector*>& top) { - // Initialize DB - db_.reset(db::GetDB(this->layer_param_.data_param().backend())); - db_->Open(this->layer_param_.data_param().source(), db::READ); - cursor_.reset(db_->NewCursor()); + const int batch_size = this->layer_param_.data_param().batch_size(); + // Read a data point, and use it to initialize the top blob. + Datum& datum = *(reader_.full().peek()); - // Check if we should randomly skip a few data points - if (this->layer_param_.data_param().rand_skip()) { - unsigned int skip = caffe_rng_rand() % - this->layer_param_.data_param().rand_skip(); - LOG(INFO) << "Skipping first " << skip << " data points."; - while (skip-- > 0) { - cursor_->Next(); - } - } - // Read a data point, to initialize the prefetch and top blobs. - Datum datum; - datum.ParseFromString(cursor_->value()); // Use data_transformer to infer the expected blob shape from datum. vector top_shape = this->data_transformer_->InferBlobShape(datum); this->transformed_data_.Reshape(top_shape); // Reshape top[0] and prefetch_data according to the batch_size. - top_shape[0] = this->layer_param_.data_param().batch_size(); - this->prefetch_data_.Reshape(top_shape); - top[0]->ReshapeLike(this->prefetch_data_); - + top_shape[0] = batch_size; + top[0]->Reshape(top_shape); + for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + this->prefetch_[i].data_.Reshape(top_shape); + } LOG(INFO) << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); // label if (this->output_labels_) { - vector label_shape(1, this->layer_param_.data_param().batch_size()); + vector label_shape(1, batch_size); top[1]->Reshape(label_shape); - this->prefetch_label_.Reshape(label_shape); + for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + this->prefetch_[i].label_.Reshape(label_shape); + } } } -// This function is used to create a thread that prefetches the data. -template -void DataLayer::InternalThreadEntry() { +// This function is called on prefetch thread +template +void DataLayer::load_batch(Batch* batch) { CPUTimer batch_timer; batch_timer.Start(); double read_time = 0; double trans_time = 0; CPUTimer timer; - CHECK(this->prefetch_data_.count()); + CHECK(batch->data_.count()); CHECK(this->transformed_data_.count()); // Reshape according to the first datum of each batch // on single input batches allows for inputs of varying dimension. const int batch_size = this->layer_param_.data_param().batch_size(); - Datum datum; - datum.ParseFromString(cursor_->value()); + Datum& datum = *(reader_.full().peek()); // Use data_transformer to infer the expected blob shape from datum. vector top_shape = this->data_transformer_->InferBlobShape(datum); this->transformed_data_.Reshape(top_shape); - // Reshape prefetch_data according to the batch_size. + // Reshape batch according to the batch_size. top_shape[0] = batch_size; - this->prefetch_data_.Reshape(top_shape); + batch->data_.Reshape(top_shape); - Dtype* top_data = this->prefetch_data_.mutable_cpu_data(); + Dtype* top_data = batch->data_.mutable_cpu_data(); Dtype* top_label = NULL; // suppress warnings about uninitialized variables if (this->output_labels_) { - top_label = this->prefetch_label_.mutable_cpu_data(); + top_label = batch->label_.mutable_cpu_data(); } - timer.Start(); for (int item_id = 0; item_id < batch_size; ++item_id) { + timer.Start(); // get a datum - Datum datum; - datum.ParseFromString(cursor_->value()); + Datum& datum = *(reader_.full().pop("Waiting for data")); read_time += timer.MicroSeconds(); timer.Start(); // Apply data transformations (mirror, scale, crop...) - int offset = this->prefetch_data_.offset(item_id); + int offset = batch->data_.offset(item_id); this->transformed_data_.set_cpu_data(top_data + offset); this->data_transformer_->Transform(datum, &(this->transformed_data_)); // Copy label. @@ -106,13 +97,8 @@ void DataLayer::InternalThreadEntry() { top_label[item_id] = datum.label(); } trans_time += timer.MicroSeconds(); - timer.Start(); - // go to the next item. - cursor_->Next(); - if (!cursor_->valid()) { - DLOG(INFO) << "Restarting data prefetching from start."; - cursor_->SeekToFirst(); - } + + reader_.free().push(const_cast(&datum)); } timer.Stop(); batch_timer.Stop(); diff --git a/src/caffe/layers/data_layer.cpp.orig.orig b/src/caffe/layers/data_layer.cpp.orig.orig deleted file mode 100644 index 9bfd1c95be7..00000000000 --- a/src/caffe/layers/data_layer.cpp.orig.orig +++ /dev/null @@ -1,234 +0,0 @@ -#include - -#include - -#include -#include - -#include "caffe/common.hpp" -#include "caffe/data_layers.hpp" -#include "caffe/layer.hpp" -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/benchmark.hpp" -#include "caffe/util/io.hpp" - -namespace caffe { - -template -DataLayer::DataLayer(const LayerParameter& param) - : BasePrefetchingDataLayer(param), - reader_(param) { -} - -template -DataLayer::~DataLayer() { - this->StopInternalThread(); -} - -template -void DataLayer::DataLayerSetUp(const vector*>& bottom, - const vector*>& top) { - const int batch_size = this->layer_param_.data_param().batch_size(); - // Read a data point, and use it to initialize the top blob. - Datum& datum = *(reader_.full().peek()); - -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> triplet data generation and network update - // Use data_transformer to infer the expected blob shape from datum. - vector top_shape = this->data_transformer_->InferBlobShape(datum); - this->transformed_data_.Reshape(top_shape); - // Reshape top[0] and prefetch_data according to the batch_size. - top_shape[0] = batch_size; - top[0]->Reshape(top_shape); - for (int i = 0; i < this->PREFETCH_COUNT; ++i) { - this->prefetch_[i].data_.Reshape(top_shape); -======= - // Check if we should randomly skip a few data points - if (this->layer_param_.data_param().rand_skip()) { - unsigned int skip = caffe_rng_rand() % - this->layer_param_.data_param().rand_skip(); - LOG(INFO) << "Skipping first " << skip << " data points."; - while (skip-- > 0) { - cursor_->Next(); - } - } - // Read a data point, and use it to initialize the top blob. - Datum datum; - datum.ParseFromString(cursor_->value()); - - bool force_color = this->layer_param_.data_param().force_encoded_color(); - if ((force_color && DecodeDatum(&datum, true)) || - DecodeDatumNative(&datum)) { - LOG(INFO) << "Decoding Datum"; - } - // image - int crop_size = this->layer_param_.transform_param().crop_size(); - if (crop_size > 0) { - top[0]->Reshape(this->layer_param_.data_param().batch_size(), - datum.channels(), crop_size, crop_size); - this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), - datum.channels(), crop_size, crop_size); - this->transformed_data_.Reshape(1, datum.channels(), crop_size, crop_size); - } else { - top[0]->Reshape( - this->layer_param_.data_param().batch_size(), datum.channels(), - datum.height(), datum.width()); - this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), - datum.channels(), datum.height(), datum.width()); - this->transformed_data_.Reshape(1, datum.channels(), - datum.height(), datum.width()); -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ->>>>>>> New triplet loss layer added(beta1 version-no test source files) -======= ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update - } - LOG(INFO) << "output data size: " << top[0]->num() << "," - << top[0]->channels() << "," << top[0]->height() << "," - << top[0]->width(); - // label - if (this->output_labels_) { - vector label_shape(1, batch_size); - top[1]->Reshape(label_shape); - for (int i = 0; i < this->PREFETCH_COUNT; ++i) { - this->prefetch_[i].label_.Reshape(label_shape); - } - } -} - -// This function is called on prefetch thread -template -void DataLayer::load_batch(Batch* batch) { - CPUTimer batch_timer; - batch_timer.Start(); - double read_time = 0; - double trans_time = 0; - CPUTimer timer; - CHECK(batch->data_.count()); - CHECK(this->transformed_data_.count()); - - // Reshape on single input batches for inputs of varying dimension. - const int batch_size = this->layer_param_.data_param().batch_size(); -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> triplet data generation and network update - Datum& datum = *(reader_.full().peek()); - // Use data_transformer to infer the expected blob shape from datum. - vector top_shape = this->data_transformer_->InferBlobShape(datum); - this->transformed_data_.Reshape(top_shape); - // Reshape batch according to the batch_size. - top_shape[0] = batch_size; - batch->data_.Reshape(top_shape); -======= - const int crop_size = this->layer_param_.transform_param().crop_size(); - bool force_color = this->layer_param_.data_param().force_encoded_color(); - if (batch_size == 1 && crop_size == 0) { - Datum datum; - datum.ParseFromString(cursor_->value()); - if (datum.encoded()) { - if (force_color) { - DecodeDatum(&datum, true); - } else { - DecodeDatumNative(&datum); - } - } - this->prefetch_data_.Reshape(1, datum.channels(), - datum.height(), datum.width()); - this->transformed_data_.Reshape(1, datum.channels(), - datum.height(), datum.width()); - } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ->>>>>>> New triplet loss layer added(beta1 version-no test source files) -======= ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update - - Dtype* top_data = batch->data_.mutable_cpu_data(); - Dtype* top_label = NULL; // suppress warnings about uninitialized variables - - if (this->output_labels_) { - top_label = batch->label_.mutable_cpu_data(); - } - for (int item_id = 0; item_id < batch_size; ++item_id) { - timer.Start(); -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> triplet data generation and network update - // get a datum - Datum& datum = *(reader_.full().pop("Waiting for data")); -======= - // get a blob - Datum datum; - datum.ParseFromString(cursor_->value()); - - cv::Mat cv_img; - if (datum.encoded()) { - if (force_color) { - cv_img = DecodeDatumToCVMat(datum, true); - } else { - cv_img = DecodeDatumToCVMatNative(datum); - } - if (cv_img.channels() != this->transformed_data_.channels()) { - LOG(WARNING) << "Your dataset contains encoded images with mixed " - << "channel sizes. Consider adding a 'force_color' flag to the " - << "model definition, or rebuild your dataset using " - << "convert_imageset."; - } - } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ->>>>>>> New triplet loss layer added(beta1 version-no test source files) -======= ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update - read_time += timer.MicroSeconds(); - timer.Start(); - - // Apply data transformations (mirror, scale, crop...) - int offset = batch->data_.offset(item_id); - this->transformed_data_.set_cpu_data(top_data + offset); - if (datum.encoded()) { - this->data_transformer_->Transform(cv_img, &(this->transformed_data_)); - } else { - this->data_transformer_->Transform(datum, &(this->transformed_data_)); - } - if (this->output_labels_) { - top_label[item_id] = datum.label(); - } - trans_time += timer.MicroSeconds(); -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> triplet data generation and network update - - reader_.free().push(const_cast(&datum)); -======= - // go to the next iter - cursor_->Next(); - if (!cursor_->valid()) { - DLOG(INFO) << "Restarting data prefetching from start."; - cursor_->SeekToFirst(); - } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ->>>>>>> New triplet loss layer added(beta1 version-no test source files) -======= ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update - } - batch_timer.Stop(); - DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; - DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; - DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; -} - -INSTANTIATE_CLASS(DataLayer); -REGISTER_LAYER_CLASS(Data); - -} // namespace caffe diff --git a/src/caffe/layers/image_data_layer.cpp b/src/caffe/layers/image_data_layer.cpp index fa3a66a5133..223ba3a75ca 100644 --- a/src/caffe/layers/image_data_layer.cpp +++ b/src/caffe/layers/image_data_layer.cpp @@ -1,4 +1,3 @@ -#ifdef USE_OPENCV #include #include // NOLINT(readability/streams) @@ -18,7 +17,7 @@ namespace caffe { template ImageDataLayer::~ImageDataLayer() { - this->JoinPrefetchThread(); + this->StopInternalThread(); } template @@ -71,8 +70,10 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, const int batch_size = this->layer_param_.image_data_param().batch_size(); CHECK_GT(batch_size, 0) << "Positive batch size required"; top_shape[0] = batch_size; - this->prefetch_data_.Reshape(top_shape); - top[0]->ReshapeLike(this->prefetch_data_); + for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + this->prefetch_[i].data_.Reshape(top_shape); + } + top[0]->Reshape(top_shape); LOG(INFO) << "output data size: " << top[0]->num() << "," << top[0]->channels() << "," << top[0]->height() << "," @@ -80,7 +81,9 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, // label vector label_shape(1, batch_size); top[1]->Reshape(label_shape); - this->prefetch_label_.Reshape(label_shape); + for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + this->prefetch_[i].label_.Reshape(label_shape); + } } template @@ -90,15 +93,15 @@ void ImageDataLayer::ShuffleImages() { shuffle(lines_.begin(), lines_.end(), prefetch_rng); } -// This function is used to create a thread that prefetches the data. +// This function is called on prefetch thread template -void ImageDataLayer::InternalThreadEntry() { +void ImageDataLayer::load_batch(Batch* batch) { CPUTimer batch_timer; batch_timer.Start(); double read_time = 0; double trans_time = 0; CPUTimer timer; - CHECK(this->prefetch_data_.count()); + CHECK(batch->data_.count()); CHECK(this->transformed_data_.count()); ImageDataParameter image_data_param = this->layer_param_.image_data_param(); const int batch_size = image_data_param.batch_size(); @@ -115,12 +118,12 @@ void ImageDataLayer::InternalThreadEntry() { // Use data_transformer to infer the expected blob shape from a cv_img. vector top_shape = this->data_transformer_->InferBlobShape(cv_img); this->transformed_data_.Reshape(top_shape); - // Reshape prefetch_data according to the batch_size. + // Reshape batch according to the batch_size. top_shape[0] = batch_size; - this->prefetch_data_.Reshape(top_shape); + batch->data_.Reshape(top_shape); - Dtype* prefetch_data = this->prefetch_data_.mutable_cpu_data(); - Dtype* prefetch_label = this->prefetch_label_.mutable_cpu_data(); + Dtype* prefetch_data = batch->data_.mutable_cpu_data(); + Dtype* prefetch_label = batch->label_.mutable_cpu_data(); // datum scales const int lines_size = lines_.size(); @@ -134,7 +137,7 @@ void ImageDataLayer::InternalThreadEntry() { read_time += timer.MicroSeconds(); timer.Start(); // Apply transformations (mirror, crop...) to the image - int offset = this->prefetch_data_.offset(item_id); + int offset = batch->data_.offset(item_id); this->transformed_data_.set_cpu_data(prefetch_data + offset); this->data_transformer_->Transform(cv_img, &(this->transformed_data_)); trans_time += timer.MicroSeconds(); @@ -161,4 +164,3 @@ INSTANTIATE_CLASS(ImageDataLayer); REGISTER_LAYER_CLASS(ImageData); } // namespace caffe -#endif // USE_OPENCV diff --git a/src/caffe/layers/image_data_layer.cpp.orig.orig b/src/caffe/layers/image_data_layer.cpp.orig.orig deleted file mode 100644 index 851fc635fb8..00000000000 --- a/src/caffe/layers/image_data_layer.cpp.orig.orig +++ /dev/null @@ -1,213 +0,0 @@ -#include - -#include // NOLINT(readability/streams) -#include // NOLINT(readability/streams) -#include -#include -#include - -#include "caffe/data_layers.hpp" -#include "caffe/layer.hpp" -#include "caffe/util/benchmark.hpp" -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/util/rng.hpp" - -namespace caffe { - -template -ImageDataLayer::~ImageDataLayer() { - this->StopInternalThread(); -} - -template -void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, - const vector*>& top) { - const int new_height = this->layer_param_.image_data_param().new_height(); - const int new_width = this->layer_param_.image_data_param().new_width(); - const bool is_color = this->layer_param_.image_data_param().is_color(); - string root_folder = this->layer_param_.image_data_param().root_folder(); - - CHECK((new_height == 0 && new_width == 0) || - (new_height > 0 && new_width > 0)) << "Current implementation requires " - "new_height and new_width to be set at the same time."; - // Read the file with filenames and labels - const string& source = this->layer_param_.image_data_param().source(); - LOG(INFO) << "Opening file " << source; - std::ifstream infile(source.c_str()); - string filename; - int label; - while (infile >> filename >> label) { - lines_.push_back(std::make_pair(filename, label)); - } - - if (this->layer_param_.image_data_param().shuffle()) { - // randomly shuffle data - LOG(INFO) << "Shuffling data"; - const unsigned int prefetch_rng_seed = caffe_rng_rand(); - prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed)); - ShuffleImages(); - } - LOG(INFO) << "A total of " << lines_.size() << " images."; - - lines_id_ = 0; - // Check if we would need to randomly skip a few data points - if (this->layer_param_.image_data_param().rand_skip()) { - unsigned int skip = caffe_rng_rand() % - this->layer_param_.image_data_param().rand_skip(); - LOG(INFO) << "Skipping first " << skip << " data points."; - CHECK_GT(lines_.size(), skip) << "Not enough points to skip"; - lines_id_ = skip; - } - // Read an image, and use it to initialize the top blob. - cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, - new_height, new_width, is_color); -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> triplet data generation and network update - CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first; - // Use data_transformer to infer the expected blob shape from a cv_image. - vector top_shape = this->data_transformer_->InferBlobShape(cv_img); - this->transformed_data_.Reshape(top_shape); - // Reshape prefetch_data and top[0] according to the batch_size. - const int batch_size = this->layer_param_.image_data_param().batch_size(); - CHECK_GT(batch_size, 0) << "Positive batch size required"; - top_shape[0] = batch_size; - for (int i = 0; i < this->PREFETCH_COUNT; ++i) { - this->prefetch_[i].data_.Reshape(top_shape); - } - top[0]->Reshape(top_shape); - -======= - const int channels = cv_img.channels(); - const int height = cv_img.rows; - const int width = cv_img.cols; - // image - const int crop_size = this->layer_param_.transform_param().crop_size(); - const int batch_size = this->layer_param_.image_data_param().batch_size(); - if (crop_size > 0) { - top[0]->Reshape(batch_size, channels, crop_size, crop_size); - this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size); - this->transformed_data_.Reshape(1, channels, crop_size, crop_size); - } else { - top[0]->Reshape(batch_size, channels, height, width); - this->prefetch_data_.Reshape(batch_size, channels, height, width); - this->transformed_data_.Reshape(1, channels, height, width); - } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ->>>>>>> New triplet loss layer added(beta1 version-no test source files) -======= ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update - LOG(INFO) << "output data size: " << top[0]->num() << "," - << top[0]->channels() << "," << top[0]->height() << "," - << top[0]->width(); - // label - vector label_shape(1, batch_size); - top[1]->Reshape(label_shape); - for (int i = 0; i < this->PREFETCH_COUNT; ++i) { - this->prefetch_[i].label_.Reshape(label_shape); - } -} - -template -void ImageDataLayer::ShuffleImages() { - caffe::rng_t* prefetch_rng = - static_cast(prefetch_rng_->generator()); - shuffle(lines_.begin(), lines_.end(), prefetch_rng); -} - -// This function is called on prefetch thread -template -void ImageDataLayer::load_batch(Batch* batch) { - CPUTimer batch_timer; - batch_timer.Start(); - double read_time = 0; - double trans_time = 0; - CPUTimer timer; - CHECK(batch->data_.count()); - CHECK(this->transformed_data_.count()); - ImageDataParameter image_data_param = this->layer_param_.image_data_param(); - const int batch_size = image_data_param.batch_size(); - const int new_height = image_data_param.new_height(); - const int new_width = image_data_param.new_width(); - const int crop_size = this->layer_param_.transform_param().crop_size(); - const bool is_color = image_data_param.is_color(); - string root_folder = image_data_param.root_folder(); - -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 9a40f059e6e96f739ad502aae165c4bfdbf1f369 -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 ->>>>>>> triplet data generation and network update - // Reshape according to the first image of each batch - // on single input batches allows for inputs of varying dimension. - cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, - new_height, new_width, is_color); - CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first; - // Use data_transformer to infer the expected blob shape from a cv_img. - vector top_shape = this->data_transformer_->InferBlobShape(cv_img); - this->transformed_data_.Reshape(top_shape); - // Reshape batch according to the batch_size. - top_shape[0] = batch_size; - batch->data_.Reshape(top_shape); -======= - // Reshape on single input batches for inputs of varying dimension. - if (batch_size == 1 && crop_size == 0 && new_height == 0 && new_width == 0) { - cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, - 0, 0, is_color); - this->prefetch_data_.Reshape(1, cv_img.channels(), - cv_img.rows, cv_img.cols); - this->transformed_data_.Reshape(1, cv_img.channels(), - cv_img.rows, cv_img.cols); - } -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ->>>>>>> New triplet loss layer added(beta1 version-no test source files) -======= ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update - - Dtype* prefetch_data = batch->data_.mutable_cpu_data(); - Dtype* prefetch_label = batch->label_.mutable_cpu_data(); - - // datum scales - const int lines_size = lines_.size(); - for (int item_id = 0; item_id < batch_size; ++item_id) { - // get a blob - timer.Start(); - CHECK_GT(lines_size, lines_id_); - cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, - new_height, new_width, is_color); - CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first; - read_time += timer.MicroSeconds(); - timer.Start(); - // Apply transformations (mirror, crop...) to the image - int offset = batch->data_.offset(item_id); - this->transformed_data_.set_cpu_data(prefetch_data + offset); - this->data_transformer_->Transform(cv_img, &(this->transformed_data_)); - trans_time += timer.MicroSeconds(); - - prefetch_label[item_id] = lines_[lines_id_].second; - // go to the next iter - lines_id_++; - if (lines_id_ >= lines_size) { - // We have reached the end. Restart from the first. - DLOG(INFO) << "Restarting data prefetching from start."; - lines_id_ = 0; - if (this->layer_param_.image_data_param().shuffle()) { - ShuffleImages(); - } - } - } - batch_timer.Stop(); - DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; - DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; - DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; -} - -INSTANTIATE_CLASS(ImageDataLayer); -REGISTER_LAYER_CLASS(ImageData); - -} // namespace caffe diff --git a/src/caffe/layers/memory_data_layer.cpp b/src/caffe/layers/memory_data_layer.cpp index 2370aa04d3b..42de4198bc4 100644 --- a/src/caffe/layers/memory_data_layer.cpp +++ b/src/caffe/layers/memory_data_layer.cpp @@ -1,6 +1,4 @@ -#ifdef USE_OPENCV #include -#endif // USE_OPENCV #include @@ -55,7 +53,6 @@ void MemoryDataLayer::AddDatumVector(const vector& datum_vector) { has_new_data_ = true; } -#ifdef USE_OPENCV template void MemoryDataLayer::AddMatVector(const vector& mat_vector, const vector& labels) { @@ -79,7 +76,6 @@ void MemoryDataLayer::AddMatVector(const vector& mat_vector, Reset(top_data, top_label, num); has_new_data_ = true; } -#endif // USE_OPENCV template void MemoryDataLayer::Reset(Dtype* data, Dtype* labels, int n) { diff --git a/src/caffe/layers/slice_layer.cpp b/src/caffe/layers/slice_layer.cpp index 0a059ae88fe..e4418c9cf9c 100644 --- a/src/caffe/layers/slice_layer.cpp +++ b/src/caffe/layers/slice_layer.cpp @@ -67,16 +67,11 @@ void SliceLayer::Reshape(const vector*>& bottom, } } CHECK_EQ(count, bottom[0]->count()); - if (top.size() == 1) { - top[0]->ShareData(*bottom[0]); - top[0]->ShareDiff(*bottom[0]); - } } template void SliceLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - if (top.size() == 1) { return; } int offset_slice_axis = 0; const Dtype* bottom_data = bottom[0]->cpu_data(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); @@ -97,7 +92,7 @@ void SliceLayer::Forward_cpu(const vector*>& bottom, template void SliceLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - if (!propagate_down[0] || top.size() == 1) { return; } + if (!propagate_down[0]) { return; } int offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); diff --git a/src/caffe/layers/slice_layer.cu b/src/caffe/layers/slice_layer.cu index e8dc6cd98fc..796841d3f52 100644 --- a/src/caffe/layers/slice_layer.cu +++ b/src/caffe/layers/slice_layer.cu @@ -28,7 +28,6 @@ __global__ void Slice(const int nthreads, const Dtype* in_data, template void SliceLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { - if (top.size() == 1) { return; } int offset_slice_axis = 0; const Dtype* bottom_data = bottom[0]->gpu_data(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); @@ -49,7 +48,7 @@ void SliceLayer::Forward_gpu(const vector*>& bottom, template void SliceLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { - if (!propagate_down[0] || top.size() == 1) { return; } + if (!propagate_down[0]) { return; } int offset_slice_axis = 0; Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_slice_axis = bottom[0]->shape(slice_axis_); diff --git a/src/caffe/layers/tile_layer.cu b/src/caffe/layers/tile_layer.cu index 3af8e2eb72f..7fd3bc47d0f 100644 --- a/src/caffe/layers/tile_layer.cu +++ b/src/caffe/layers/tile_layer.cu @@ -6,17 +6,45 @@ namespace caffe { +template +__global__ void Tile(const int nthreads, const Dtype* bottom_data, + const int tile_size, const int num_tiles, const int bottom_tile_axis, + Dtype* top_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int d = index % tile_size; + const int b = (index / tile_size / num_tiles) % bottom_tile_axis; + const int n = index / tile_size / num_tiles / bottom_tile_axis; + const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d; + top_data[index] = bottom_data[bottom_index]; + } +} + template void TileLayer::Forward_gpu( const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - for (int i = 0; i < outer_dim_; ++i) { - for (int t = 0; t < tiles_; ++t) { - caffe_copy(inner_dim_, bottom_data, top_data); - top_data += inner_dim_; + const int bottom_tile_axis = bottom[0]->shape(axis_); + const int nthreads = top[0]->count(); + Tile // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, bottom_data, inner_dim_, tiles_, bottom_tile_axis, top_data); +} + +template +__global__ void TileBackward(const int nthreads, const Dtype* top_diff, + const int tile_size, const int num_tiles, const int bottom_tile_axis, + Dtype* bottom_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int d = index % tile_size; + const int b = (index / tile_size) % bottom_tile_axis; + const int n = index / tile_size / bottom_tile_axis; + bottom_diff[index] = 0; + int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d; + for (int t = 0; t < num_tiles; ++t) { + bottom_diff[index] += top_diff[top_index]; + top_index += bottom_tile_axis * tile_size; } - bottom_data += inner_dim_; } } @@ -26,15 +54,12 @@ void TileLayer::Backward_gpu(const vector*>& top, if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - for (int i = 0; i < outer_dim_; ++i) { - caffe_copy(inner_dim_, top_diff, bottom_diff); - top_diff += inner_dim_; - for (int t = 1; t < tiles_; ++t) { - caffe_gpu_axpy(inner_dim_, Dtype(1), top_diff, bottom_diff); - top_diff += inner_dim_; - } - bottom_diff += inner_dim_; - } + const int bottom_tile_axis = bottom[0]->shape(axis_); + const int tile_size = inner_dim_ / bottom_tile_axis; + const int nthreads = bottom[0]->count(); + TileBackward // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, top_diff, tile_size, tiles_, bottom_tile_axis, bottom_diff); } INSTANTIATE_LAYER_GPU_FUNCS(TileLayer); diff --git a/src/caffe/layers/triplet_loss_layer.cpp b/src/caffe/layers/triplet_loss_layer.cpp index 613fa703676..7d11ff59ce4 100644 --- a/src/caffe/layers/triplet_loss_layer.cpp +++ b/src/caffe/layers/triplet_loss_layer.cpp @@ -94,9 +94,9 @@ void TripletLossLayer::Forward_cpu( // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] += margin; for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; + dist_sq_.mutable_cpu_data()[i] += margin; // Loss component calculated from negative part caffe_sub( dim, @@ -268,10 +268,10 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, diff_pos.cpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; // Loss component calculated from negative part caffe_sub( dim, @@ -319,10 +319,10 @@ void TripletLossLayer::Backward_cpu(const vector*>& top, diff_pos.cpu_data() + (j*dim), Dtype(0.0), bout + ((2 + num_triplets)*j + i)*dim); - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; // the num_triplets triplet part for (int triplet = 0; triplet < num_triplets; ++triplet) { + dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; + dist_sq_.mutable_cpu_data()[j] += margin; // Loss component calculated from negative part caffe_sub( dim, diff --git a/src/caffe/layers/triplet_loss_layer.cpp.orig b/src/caffe/layers/triplet_loss_layer.cpp.orig deleted file mode 100644 index 066d7967652..00000000000 --- a/src/caffe/layers/triplet_loss_layer.cpp.orig +++ /dev/null @@ -1,472 +0,0 @@ -#include -#include - -#include "caffe/layer.hpp" -#include "caffe/loss_layers.hpp" -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" - -namespace caffe { - -template -void TripletLossLayer::LayerSetUp( - const vector*>& bottom, const vector*>& top) { - LossLayer::LayerSetUp(bottom, top); - // number of triplet in a batch - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - // dimension of each descriptor - int dim = bottom[0]->count()/bottom[0]->num(); - CHECK_EQ(bottom[0]->channels(), dim); - CHECK_EQ(bottom[0]->height(), 1); - CHECK_EQ(bottom[0]->width(), 1); - CHECK_EQ(bottom[1]->channels(), 1); - CHECK_EQ(bottom[1]->height(), 1); - CHECK_EQ(bottom[1]->width(), 1); - // In each set, we have: - // the descriptor of reference sample, closest sample, and negative samples - // number of sets in the whole batch - int num_set = bottom[0]->num()/(2 + num_triplets); - dist_sq_.Reshape(num_set, 1, 1, 1); - diff_pos.Reshape(num_set, dim, 1, 1); - dist_sq_pos.Reshape(num_set, 1, 1, 1); - diff_neg.Reshape(num_set, dim, 1, 1); - dist_sq_neg.Reshape(num_set, 1, 1, 1); - // vector of ones used to sum along channels - summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); - for (int i = 0; i < bottom[0]->channels(); ++i) - summer_vec_.mutable_cpu_data()[i] = Dtype(1); -} - -template -void TripletLossLayer::Forward_cpu( - const vector*>& bottom, - const vector*>& top) { - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); - Dtype loss(0.0); - int dim = bottom[0]->count()/bottom[0]->num(); - int num_set = bottom[0]->num()/(2 + num_triplets); - if (losstype == 0) { - for (int i = 0; i < num_set; ++i) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive - diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close - // Loss component calculated from reference and close one - dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.cpu_data()[i]; - for (int triplet = 0; triplet < num_triplets; ++triplet) { - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + i*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); - } - } - loss = loss / static_cast(num_set) / Dtype(2); - top[0]->mutable_cpu_data()[0] = loss; - } else { - for (int i = 0; i < num_set; ++i) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive - diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close - // Loss component calculated from reference and close one - dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.cpu_data()[i]; -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] += margin; -======= - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] += margin; - for (int triplet = 0; triplet < num_triplets; ++triplet) { ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + i*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[i] = 1 - \ - dist_sq_neg.cpu_data()[i] / dist_sq_.mutable_cpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(dist_sq_.cpu_data()[i], Dtype(0.0)); - } - } - loss = loss / static_cast(num_set) / Dtype(2); - top[0]->mutable_cpu_data()[0] = loss; - } -} - -template -void TripletLossLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - int dim = bottom[0]->count()/bottom[0]->num(); - int num_set = bottom[0]->num()/(2 + num_triplets); - if (losstype == 0) { - // BP for feat1(extracted from reference) - for (int i = 0; i < 1; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - // Loss component calculated from negative part - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + (2 + num_triplets)*j*dim); - // dissimilar pair in triplet - caffe_cpu_axpby( - dim, - -alpha, - diff_neg.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 -======= - } - } - } - } - // BP for feat2(extracted from the closest sample) - for (int i = 1; i < 2; ++i) { - if (propagate_down[0]) { - const Dtype sign = -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - } - } - } - } -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - // BP for feat2(extracted from the closest sample) - for (int i = 1; i < 2; ++i) { - if (propagate_down[0]) { - const Dtype sign = -1; -======= - // BP for negative feature used in the num_triplets triplet part - for (int i = 2; i < 2 + num_triplets; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - // BP for negative feature used in the num_triplets triplet part - for (int i = 2; i < 2 + num_triplets; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); -======= ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // dissimilar pairs - caffe_cpu_axpby( - dim, - alpha, - diff_neg.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - } else { - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } else { - for (int i = 0; i < 1; ++i) { - // BP for data1(feat1) - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; -======= - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - dim, - alpha*dist_sq_neg.mutable_cpu_data()[j]\ - /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ - *(dist_sq_pos.mutable_cpu_data()[j]+margin)), - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - caffe_cpu_axpby( - dim, - -alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), - diff_neg.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - for (int i = 1; i < 2; ++i) { - // BP for positive data(feat2) - if (propagate_down[0]) { - const Dtype sign = -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; -======= - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - dim, - alpha*dist_sq_neg.mutable_cpu_data()[j]\ - /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ - *(dist_sq_pos.mutable_cpu_data()[j]+margin)), - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - for (int i = 2; i < 2 + num_triplets; ++i) { - // BP for negative data(feat3) - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - dim, - alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), - diff_neg.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - } else { - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } -} - -#ifdef CPU_ONLY -STUB_GPU(TripletLossLayer); -#endif - -INSTANTIATE_CLASS(TripletLossLayer); -REGISTER_LAYER_CLASS(TripletLoss); - -} // namespace caffe diff --git a/src/caffe/layers/triplet_loss_layer.cpp.orig.orig b/src/caffe/layers/triplet_loss_layer.cpp.orig.orig deleted file mode 100644 index 6d30c7f1878..00000000000 --- a/src/caffe/layers/triplet_loss_layer.cpp.orig.orig +++ /dev/null @@ -1,492 +0,0 @@ -#include -#include - -#include "caffe/layer.hpp" -#include "caffe/loss_layers.hpp" -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" - -namespace caffe { - -template -void TripletLossLayer::LayerSetUp( - const vector*>& bottom, const vector*>& top) { - LossLayer::LayerSetUp(bottom, top); - // number of triplet in a batch - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - // dimension of each descriptor - int dim = bottom[0]->count()/bottom[0]->num(); - CHECK_EQ(bottom[0]->channels(), dim); - CHECK_EQ(bottom[0]->height(), 1); - CHECK_EQ(bottom[0]->width(), 1); - CHECK_EQ(bottom[1]->channels(), 1); - CHECK_EQ(bottom[1]->height(), 1); - CHECK_EQ(bottom[1]->width(), 1); - // In each set, we have: - // the descriptor of reference sample, closest sample, and negative samples - // number of sets in the whole batch - int num_set = bottom[0]->num()/(2 + num_triplets); - dist_sq_.Reshape(num_set, 1, 1, 1); - diff_pos.Reshape(num_set, dim, 1, 1); - dist_sq_pos.Reshape(num_set, 1, 1, 1); - diff_neg.Reshape(num_set, dim, 1, 1); - dist_sq_neg.Reshape(num_set, 1, 1, 1); - // vector of ones used to sum along channels - summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); - for (int i = 0; i < bottom[0]->channels(); ++i) - summer_vec_.mutable_cpu_data()[i] = Dtype(1); -} - -template -void TripletLossLayer::Forward_cpu( - const vector*>& bottom, - const vector*>& top) { - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); - Dtype loss(0.0); - int dim = bottom[0]->count()/bottom[0]->num(); - int num_set = bottom[0]->num()/(2 + num_triplets); - if (losstype == 0) { - for (int i = 0; i < num_set; ++i) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive - diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close - // Loss component calculated from reference and close one - dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.cpu_data()[i]; - for (int triplet = 0; triplet < num_triplets; ++triplet) { - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + i*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); - } - } - loss = loss / static_cast(num_set) / Dtype(2); - top[0]->mutable_cpu_data()[0] = loss; - } else { - for (int i = 0; i < num_set; ++i) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive - diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close - // Loss component calculated from reference and close one - dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.cpu_data()[i]; -<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] += margin; - for (int triplet = 0; triplet < num_triplets; ++triplet) { -======= -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] += margin; -======= - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] += margin; - for (int triplet = 0; triplet < num_triplets; ++triplet) { ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + i*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[i] = 1 - \ - dist_sq_neg.cpu_data()[i] / dist_sq_.mutable_cpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(dist_sq_.cpu_data()[i], Dtype(0.0)); - } - } - loss = loss / static_cast(num_set) / Dtype(2); - top[0]->mutable_cpu_data()[0] = loss; - } -} - -template -void TripletLossLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - int dim = bottom[0]->count()/bottom[0]->num(); - int num_set = bottom[0]->num()/(2 + num_triplets); - if (losstype == 0) { - // BP for feat1(extracted from reference) - for (int i = 0; i < 1; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - // Loss component calculated from negative part - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + (2 + num_triplets)*j*dim); - // dissimilar pair in triplet - caffe_cpu_axpby( - dim, - -alpha, - diff_neg.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 -======= - } - } - } - } - // BP for feat2(extracted from the closest sample) - for (int i = 1; i < 2; ++i) { - if (propagate_down[0]) { - const Dtype sign = -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - } - } - } - } -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - // BP for feat2(extracted from the closest sample) - for (int i = 1; i < 2; ++i) { - if (propagate_down[0]) { - const Dtype sign = -1; -======= - // BP for negative feature used in the num_triplets triplet part - for (int i = 2; i < 2 + num_triplets; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - // BP for negative feature used in the num_triplets triplet part - for (int i = 2; i < 2 + num_triplets; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); -======= ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // dissimilar pairs - caffe_cpu_axpby( - dim, - alpha, - diff_neg.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - } else { - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } else { - for (int i = 0; i < 1; ++i) { - // BP for data1(feat1) - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); -<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { -======= -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; -======= - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - dim, - alpha*dist_sq_neg.mutable_cpu_data()[j]\ - /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ - *(dist_sq_pos.mutable_cpu_data()[j]+margin)), - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - caffe_cpu_axpby( - dim, - -alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), - diff_neg.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - for (int i = 1; i < 2; ++i) { - // BP for positive data(feat2) - if (propagate_down[0]) { - const Dtype sign = -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); -<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { -======= -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; -======= - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - dim, - alpha*dist_sq_neg.mutable_cpu_data()[j]\ - /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ - *(dist_sq_pos.mutable_cpu_data()[j]+margin)), - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - for (int i = 2; i < 2 + num_triplets; ++i) { - // BP for negative data(feat3) - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - dim, - alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), - diff_neg.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - } else { - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } -} - -#ifdef CPU_ONLY -STUB_GPU(TripletLossLayer); -#endif - -INSTANTIATE_CLASS(TripletLossLayer); -REGISTER_LAYER_CLASS(TripletLoss); - -} // namespace caffe diff --git a/src/caffe/layers/triplet_loss_layer.cu.orig b/src/caffe/layers/triplet_loss_layer.cu similarity index 90% rename from src/caffe/layers/triplet_loss_layer.cu.orig rename to src/caffe/layers/triplet_loss_layer.cu index 32dc48aafc2..cd1fbb1201b 100644 --- a/src/caffe/layers/triplet_loss_layer.cu.orig +++ b/src/caffe/layers/triplet_loss_layer.cu @@ -30,11 +30,7 @@ void TripletLossLayer::Forward_gpu( dim, diff_pos.gpu_data() + i*dim, diff_pos.gpu_data() + i*dim, -<<<<<<< 362a69f0c62096aa4083a981cab52469e40874d0 dist_sq_pos.mutable_cpu_data() + i); -======= - dist_sq_pos.mutable_gpu_data() + i); ->>>>>>> add initiate class name of triplet loss layer // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.gpu_data()[i]; @@ -52,11 +48,7 @@ void TripletLossLayer::Forward_gpu( dim, diff_neg.gpu_data() + i*dim, diff_neg.gpu_data() + i*dim, -<<<<<<< 362a69f0c62096aa4083a981cab52469e40874d0 dist_sq_neg.mutable_cpu_data() + i); -======= - dist_sq_neg.mutable_gpu_data() + i); ->>>>>>> add initiate class name of triplet loss layer // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; // loss accumulated accumulated by the triplet part @@ -77,11 +69,7 @@ void TripletLossLayer::Forward_gpu( dim, diff_pos.gpu_data() + i*dim, diff_pos.gpu_data() + i*dim, -<<<<<<< 362a69f0c62096aa4083a981cab52469e40874d0 dist_sq_pos.mutable_cpu_data() + i); -======= - dist_sq_pos.mutable_gpu_data() + i); ->>>>>>> add initiate class name of triplet loss layer // a b is a similar pair for pair wise // loss accumulated by the pair wise part loss += dist_sq_pos.gpu_data()[i]; @@ -98,11 +86,7 @@ void TripletLossLayer::Forward_gpu( dim, diff_neg.gpu_data() + i*dim, diff_neg.gpu_data() + i*dim, -<<<<<<< 362a69f0c62096aa4083a981cab52469e40874d0 dist_sq_neg.mutable_cpu_data() + i); -======= - dist_sq_neg.mutable_gpu_data() + i); ->>>>>>> add initiate class name of triplet loss layer // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[i] = 1 - \ dist_sq_neg.gpu_data()[i] / dist_sq_.mutable_gpu_data()[i]; @@ -150,11 +134,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, -<<<<<<< 362a69f0c62096aa4083a981cab52469e40874d0 dist_sq_neg.mutable_cpu_data() + j); -======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> add initiate class name of triplet loss layer // Triplet loss accumulation // a and negative[triplet] is a similar pair for triplet dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; @@ -210,11 +190,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, -<<<<<<< 362a69f0c62096aa4083a981cab52469e40874d0 dist_sq_neg.mutable_cpu_data() + j); -======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> add initiate class name of triplet loss layer // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -250,11 +226,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, -<<<<<<< 362a69f0c62096aa4083a981cab52469e40874d0 dist_sq_neg.mutable_cpu_data() + j); -======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> add initiate class name of triplet loss layer // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { @@ -301,11 +273,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, -<<<<<<< 362a69f0c62096aa4083a981cab52469e40874d0 dist_sq_neg.mutable_cpu_data() + j); -======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> add initiate class name of triplet loss layer // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -359,11 +327,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, -<<<<<<< 362a69f0c62096aa4083a981cab52469e40874d0 dist_sq_neg.mutable_cpu_data() + j); -======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> add initiate class name of triplet loss layer // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; @@ -402,11 +366,7 @@ void TripletLossLayer::Backward_gpu(const vector*>& top, dim, diff_neg.gpu_data() + j*dim, diff_neg.gpu_data() + j*dim, -<<<<<<< 362a69f0c62096aa4083a981cab52469e40874d0 dist_sq_neg.mutable_cpu_data() + j); -======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> add initiate class name of triplet loss layer // a and negative[triplet] is a dissimilar pair for triplet dist_sq_.mutable_gpu_data()[j] = 1 - \ dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; diff --git a/src/caffe/layers/triplet_loss_layer.cu.orig.orig b/src/caffe/layers/triplet_loss_layer.cu.orig.orig deleted file mode 100644 index 0911362a47f..00000000000 --- a/src/caffe/layers/triplet_loss_layer.cu.orig.orig +++ /dev/null @@ -1,764 +0,0 @@ -#include -#include - -#include "caffe/layer.hpp" -<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/vision_layers.hpp" -======= -#include "caffe/vision_layers.hpp" -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" ->>>>>>> GPU version added - -namespace caffe { - -template -void TripletLossLayer::Forward_gpu( - const vector*>& bottom, - const vector*>& top) { -<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 -<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c -======= ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); - Dtype loss(0.0); - int dim = bottom[0]->count()/bottom[0]->num(); - int num_set = bottom[0]->num()/(2 + num_triplets); - if (losstype == 0) { - for (int i = 0; i < num_set; ++i) { -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - caffe_gpu_sub( -======= - caffe_sub( ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive - diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - caffe_gpu_dot( - dim, - diff_pos.gpu_data() + i*dim, - diff_pos.gpu_data() + i*dim, - dist_sq_pos.mutable_cpu_data() + i); -======= - caffe_gpu_powx( - dim, - diff_pos.mutable_gpu_data() + i*dim, // reference-pose_close - Dtype(2), - diff_sq_pos.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_pos.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_pos.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.gpu_data()[i]; - for (int triplet = 0; triplet < num_triplets; ++triplet) { - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; - // Loss component calculated from negative part -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - caffe_gpu_sub( -======= - caffe_sub( ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, - diff_neg.mutable_gpu_data() + i*dim); // reference-negative -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + i*dim, - diff_neg.gpu_data() + i*dim, - dist_sq_neg.mutable_cpu_data() + i); -======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + i*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(margin + dist_sq_.gpu_data()[i], Dtype(0.0)); - } - } - loss = loss / static_cast(num_set) / Dtype(2); - top[0]->mutable_gpu_data()[0] = loss; - } else { - for (int i = 0; i < num_set; ++i) { -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - caffe_gpu_sub( -======= - caffe_sub( ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive - diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close - // Loss component calculated from reference and close one -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - caffe_gpu_dot( - dim, - diff_pos.gpu_data() + i*dim, - diff_pos.gpu_data() + i*dim, - dist_sq_pos.mutable_cpu_data() + i); - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.gpu_data()[i]; - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.mutable_gpu_data()[i]; - dist_sq_.mutable_gpu_data()[i] += margin; - // Loss component calculated from negative part - caffe_gpu_sub( -======= - caffe_gpu_powx( - dim, - diff_pos.mutable_gpu_data() + i*dim, // reference-pose_close - Dtype(2), - diff_sq_pos.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_pos.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_pos.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.gpu_data()[i]; - dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.mutable_gpu_data()[i]; - dist_sq_.mutable_gpu_data()[i] += margin; - for (int triplet = 0; triplet < num_triplets; ++triplet) { - // Loss component calculated from negative part - caffe_sub( ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, - diff_neg.mutable_gpu_data() + i*dim); // reference-negative -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + i*dim, - diff_neg.gpu_data() + i*dim, - dist_sq_neg.mutable_cpu_data() + i); -======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + i*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + i); // \Sum (reference-pose_close)^2 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[i] = 1 - \ - dist_sq_neg.gpu_data()[i] / dist_sq_.mutable_gpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(dist_sq_.gpu_data()[i], Dtype(0.0)); - } - } - loss = loss / static_cast(num_set) / Dtype(2); - top[0]->mutable_gpu_data()[0] = loss; - } -<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 -======= - int count = bottom[0]->count(); - caffe_gpu_sub( - count, - bottom[0]->gpu_data(), // a - bottom[1]->gpu_data(), // b - diff_pos.mutable_gpu_data()); // a_i-b_i - caffe_gpu_sub( - count, - bottom[0]->gpu_data(), // a - bottom[2]->gpu_data(), // c - diff_neg.mutable_gpu_data()); // a_i-c_i - caffe_gpu_powx( - count, - diff_pos.mutable_gpu_data(), // a_i-b_i - Dtype(2), - diff_sq_pos.mutable_gpu_data()); // (a_i-b_i)^2 - caffe_gpu_powx( - count, - diff_neg.mutable_gpu_data(), // a_i-c_i - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (a_i-c_i)^2 - const int channels = bottom[0]->channels(); - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype loss(0.0); - // Loss component calculated from ab - for (int i = 0; i < bottom[0]->num(); ++i) { - /*dist_sq_pos.mutable_gpu_data()[i] = caffe_gpu_dot(channels, - diff_pos.gpu_data() + (i*channels), diff_pos.gpu_data() + (i*channels));*/ - // ab is a similar pair - dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; - // Loss component calculated from ac - /*dist_sq_neg.mutable_gpu_data()[i] = caffe_gpu_dot(channels, - diff_neg.gpu_data() + (i*channels), diff_neg.gpu_data() + (i*channels));*/ - // ac is a dissimilar pair - dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; - loss += std::max(margin + dist_sq_.gpu_data()[i], Dtype(0.0)); - } - loss = loss / static_cast(bottom[0]->num()) / Dtype(2); - top[0]->mutable_gpu_data()[0] = loss; ->>>>>>> GPU version added -======= ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise -} - -template -void TripletLossLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Dtype margin = this->layer_param_.triplet_loss_param().margin(); -<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - int dim = bottom[0]->count()/bottom[0]->num(); - int num_set = bottom[0]->num()/(2 + num_triplets); - if (losstype == 0) { - // BP for feat1(extracted from reference) - for (int i = 0; i < 1; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); - // the pair part - caffe_gpu_axpby( - dim, - alpha, - diff_pos.gpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - caffe_gpu_sub( -======= - caffe_sub( ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_gpu_data() + j*dim); // reference-negative -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + j*dim, - diff_neg.gpu_data() + j*dim, - dist_sq_neg.mutable_cpu_data() + j); -======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; - // Loss component calculated from negative part - if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_gpu_axpby( - dim, - alpha, - diff_pos.gpu_data() + (j*dim), - Dtype(1.0), - bout + (2 + num_triplets)*j*dim); - // dissimilar pair in triplet - caffe_gpu_axpby( - dim, - -alpha, - diff_neg.gpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - // BP for feat2(extracted from the closest sample) - for (int i = 1; i < 2; ++i) { - if (propagate_down[0]) { - const Dtype sign = -1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); - // the pair part - caffe_gpu_axpby( - dim, - alpha, - diff_pos.gpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - caffe_gpu_sub( -======= - caffe_sub( ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_gpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + j*dim, - diff_neg.gpu_data() + j*dim, - dist_sq_neg.mutable_cpu_data() + j); -======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; - if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_gpu_axpby( - dim, - alpha, - diff_pos.gpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - // BP for negative feature used in the num_triplets triplet part - for (int i = 2; i < 2 + num_triplets; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - caffe_gpu_sub( -======= - caffe_sub( ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, - diff_neg.mutable_gpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + j*dim, - diff_neg.gpu_data() + j*dim, - dist_sq_neg.mutable_cpu_data() + j); -======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; - if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - // dissimilar pairs - caffe_gpu_axpby( - dim, - alpha, - diff_neg.gpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - } else { -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); -======= - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - } - } - } - } - } else { - for (int i = 0; i < 1; ++i) { - // BP for data1(feat1) - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); - // the pair part - caffe_gpu_axpby( - dim, - alpha, - diff_pos.gpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; - dist_sq_.mutable_gpu_data()[j] += margin; - // Loss component calculated from negative part - caffe_gpu_sub( -======= - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; - dist_sq_.mutable_gpu_data()[j] += margin; - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - // Loss component calculated from negative part - caffe_sub( ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_gpu_data() + j*dim); // reference-negative -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + j*dim, - diff_neg.gpu_data() + j*dim, - dist_sq_neg.mutable_cpu_data() + j); -======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[j] = 1 - \ - dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - caffe_gpu_axpby( - dim, - alpha*dist_sq_neg.mutable_gpu_data()[j]\ - /((dist_sq_pos.mutable_gpu_data()[j]+margin)\ - *(dist_sq_pos.mutable_gpu_data()[j]+margin)), - diff_pos.gpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - caffe_gpu_axpby( - dim, - -alpha/(dist_sq_pos.mutable_gpu_data()[j] + margin), - diff_neg.gpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - for (int i = 1; i < 2; ++i) { - // BP for positive data(feat2) - if (propagate_down[0]) { - const Dtype sign = -1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); - // the pair part - caffe_gpu_axpby( - dim, - alpha, - diff_pos.gpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; - dist_sq_.mutable_gpu_data()[j] += margin; - // Loss component calculated from negative part - caffe_gpu_sub( -======= - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; - dist_sq_.mutable_gpu_data()[j] += margin; - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - // Loss component calculated from negative part - caffe_sub( ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_gpu_data() + j*dim); // reference-negative -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + j*dim, - diff_neg.gpu_data() + j*dim, - dist_sq_neg.mutable_cpu_data() + j); -======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[j] = 1 - \ - dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - caffe_gpu_axpby( - dim, - alpha*dist_sq_neg.mutable_gpu_data()[j]\ - /((dist_sq_pos.mutable_gpu_data()[j]+margin)\ - *(dist_sq_pos.mutable_gpu_data()[j]+margin)), - diff_pos.gpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - for (int i = 2; i < 2 + num_triplets; ++i) { - // BP for negative data(feat3) - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; - dist_sq_.mutable_gpu_data()[j] += margin; - // Loss component calculated from negative part -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - caffe_gpu_sub( -======= - caffe_sub( ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, - diff_neg.mutable_gpu_data() + j*dim); // reference-negative -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + j*dim, - diff_neg.gpu_data() + j*dim, - dist_sq_neg.mutable_cpu_data() + j); -======= - caffe_gpu_powx( - dim, - diff_neg.mutable_gpu_data() + j*dim, // reference-pose_close - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (reference-pose_close)^2 - // Loss component calculated from reference and close one - caffe_gpu_gemv( - CblasNoTrans, - num_set, - bottom[0]->channels(), - Dtype(1.0), - diff_sq_neg.gpu_data(), // (reference-pose_close)^2 - summer_vec_.gpu_data(), - Dtype(0.0), - dist_sq_neg.mutable_gpu_data() + j); // \Sum (reference-pose_close)^2 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[j] = 1 - \ - dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - caffe_gpu_axpby( - dim, - alpha/(dist_sq_pos.mutable_gpu_data()[j] + margin), - diff_neg.gpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - } else { -<<<<<<< cc0e95492e5b127808b90dc9698f4969fd706b03 - caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); -======= -<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 -// there must be further check to ensure the gradient calc - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(bottom[0]->num()); - int num = bottom[0]->num(); - int channels = bottom[0]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); - if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - // similar pairs - caffe_gpu_axpby( - channels, - alpha, - diff_pos.gpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - // dissimilar pairs - caffe_gpu_axpby( - channels, - -alpha, - diff_neg.gpu_data() + (j*channels), - Dtype(1.0), - bout + (j*channels)); - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); - } - } - } - for (int i = 1; i < 3; ++i) { -// there must be further check to ensure the gradient calc - if (propagate_down[i]) { - const Dtype sign = (i == 1) ? -1 : 1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(bottom[i]->num()); - int num = bottom[i]->num(); - int channels = bottom[i]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[i]->mutable_gpu_diff(); - if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - if (i == 1) { - // similar pairs - caffe_gpu_axpby( - channels, - alpha, - diff_pos.gpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - } - else { - // dissimilar pairs - caffe_gpu_axpby( - channels, - alpha, - diff_neg.gpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - } - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); ->>>>>>> GPU version added -======= - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - } - } - } - } -<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 -<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c -======= ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - } -======= ->>>>>>> GPU version added -} - -#ifdef CPU_ONLY -STUB_GPU(TripletLossLayer); -#endif - -INSTANTIATE_CLASS(TripletLossLayer); -REGISTER_LAYER_CLASS(TripletLoss); - -} // namespace caffe diff --git a/src/caffe/layers/triplet_loss_layer_BACKUP_62802.cpp b/src/caffe/layers/triplet_loss_layer_BACKUP_62802.cpp deleted file mode 100644 index 5fca131cef5..00000000000 --- a/src/caffe/layers/triplet_loss_layer_BACKUP_62802.cpp +++ /dev/null @@ -1,423 +0,0 @@ -#include -#include - -#include "caffe/layer.hpp" -#include "caffe/loss_layers.hpp" -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" - -namespace caffe { - -template -void TripletLossLayer::LayerSetUp( - const vector*>& bottom, const vector*>& top) { - LossLayer::LayerSetUp(bottom, top); - // number of triplet in a batch - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - // dimension of each descriptor - int dim = bottom[0]->count()/bottom[0]->num(); - CHECK_EQ(bottom[0]->channels(), dim); - CHECK_EQ(bottom[0]->height(), 1); - CHECK_EQ(bottom[0]->width(), 1); - CHECK_EQ(bottom[1]->channels(), 1); - CHECK_EQ(bottom[1]->height(), 1); - CHECK_EQ(bottom[1]->width(), 1); - // In each set, we have: - // the descriptor of reference sample, closest sample, and negative samples - // number of sets in the whole batch - int num_set = bottom[0]->num()/(2 + num_triplets); - dist_sq_.Reshape(num_set, 1, 1, 1); - diff_pos.Reshape(num_set, dim, 1, 1); - dist_sq_pos.Reshape(num_set, 1, 1, 1); - diff_neg.Reshape(num_set, dim, 1, 1); - dist_sq_neg.Reshape(num_set, 1, 1, 1); - // vector of ones used to sum along channels - summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); - for (int i = 0; i < bottom[0]->channels(); ++i) - summer_vec_.mutable_cpu_data()[i] = Dtype(1); -} - -template -void TripletLossLayer::Forward_cpu( - const vector*>& bottom, - const vector*>& top) { - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); - Dtype loss(0.0); - int dim = bottom[0]->count()/bottom[0]->num(); - int num_set = bottom[0]->num()/(2 + num_triplets); - if (losstype == 0) { - for (int i = 0; i < num_set; ++i) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive - diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close - // Loss component calculated from reference and close one - dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.cpu_data()[i]; - for (int triplet = 0; triplet < num_triplets; ++triplet) { - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + i*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); - } -<<<<<<< 027f1805a9555ff938185eb2ccb9a7599d35ba39 -======= - } - loss = loss / static_cast(num_set) / Dtype(2); - top[0]->mutable_cpu_data()[0] = loss; - } else { - for (int i = 0; i < num_set; ++i) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive - diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close - // Loss component calculated from reference and close one - dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] += margin; - for (int triplet = 0; triplet < num_triplets; ++triplet) { - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + i*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[i] = 1 - \ - dist_sq_neg.cpu_data()[i] / dist_sq_.mutable_cpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(dist_sq_.cpu_data()[i], Dtype(0.0)); - } ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - } - loss = loss / static_cast(num_set) / Dtype(2); - top[0]->mutable_cpu_data()[0] = loss; -} - -template -void TripletLossLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - int dim = bottom[0]->count()/bottom[0]->num(); - int num_set = bottom[0]->num()/(2 + num_triplets); - if (losstype == 0) { - // BP for feat1(extracted from reference) - for (int i = 0; i < 1; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / -<<<<<<< 027f1805a9555ff938185eb2ccb9a7599d35ba39 - static_cast(bottom[i]->num()); - int num = bottom[i]->num(); - int channels = bottom[i]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[i]->mutable_cpu_diff(); - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pairs -======= - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - // Loss component calculated from negative part - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + (2 + num_triplets)*j*dim); - // dissimilar pair in triplet - caffe_cpu_axpby( - dim, - -alpha, - diff_neg.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - // BP for feat2(extracted from the closest sample) - for (int i = 1; i < 2; ++i) { - if (propagate_down[0]) { - const Dtype sign = -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - // BP for negative feature used in the num_triplets triplet part - for (int i = 2; i < 2 + num_triplets; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // dissimilar pairs ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - caffe_cpu_axpby( - dim, - alpha, - diff_neg.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - } else { - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } else { - for (int i = 0; i < 1; ++i) { - // BP for data1(feat1) - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); -<<<<<<< 027f1805a9555ff938185eb2ccb9a7599d35ba39 - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; -======= - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - dim, - alpha*dist_sq_neg.mutable_cpu_data()[j]\ - /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ - *(dist_sq_pos.mutable_cpu_data()[j]+margin)), - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - caffe_cpu_axpby( - dim, - -alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), - diff_neg.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - for (int i = 1; i < 2; ++i) { - // BP for positive data(feat2) - if (propagate_down[0]) { - const Dtype sign = -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); -<<<<<<< 027f1805a9555ff938185eb2ccb9a7599d35ba39 - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; -======= - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - dim, - alpha*dist_sq_neg.mutable_cpu_data()[j]\ - /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ - *(dist_sq_pos.mutable_cpu_data()[j]+margin)), - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - for (int i = 2; i < 2 + num_triplets; ++i) { - // BP for negative data(feat3) - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - dim, - alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), - diff_neg.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - } else { - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } -} - -#ifdef CPU_ONLY -STUB_GPU(TripletLossLayer); -#endif - -INSTANTIATE_CLASS(TripletLossLayer); -REGISTER_LAYER_CLASS(TripletLoss); - -} // namespace caffe diff --git a/src/caffe/layers/triplet_loss_layer_BASE_62802.cpp b/src/caffe/layers/triplet_loss_layer_BASE_62802.cpp deleted file mode 100644 index a4e6402c76a..00000000000 --- a/src/caffe/layers/triplet_loss_layer_BASE_62802.cpp +++ /dev/null @@ -1,298 +0,0 @@ -#include -#include - -#include "caffe/layer.hpp" -#include "caffe/loss_layers.hpp" -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" - -namespace caffe { - -template -void TripletLossLayer::LayerSetUp( - const vector*>& bottom, const vector*>& top) { - LossLayer::LayerSetUp(bottom, top); - CHECK_EQ(bottom[0]->channels(), bottom[1]->channels()); - CHECK_EQ(bottom[0]->channels(), bottom[2]->channels()); - CHECK_EQ(bottom[0]->channels(), bottom[3]->channels()); - CHECK_EQ(bottom[0]->channels(), bottom[4]->channels()); - CHECK_EQ(bottom[0]->height(), 1); - CHECK_EQ(bottom[0]->width(), 1); - CHECK_EQ(bottom[1]->height(), 1); - CHECK_EQ(bottom[1]->width(), 1); - CHECK_EQ(bottom[2]->height(), 1); - CHECK_EQ(bottom[2]->width(), 1); - CHECK_EQ(bottom[3]->height(), 1); - CHECK_EQ(bottom[3]->width(), 1); - CHECK_EQ(bottom[4]->height(), 1); - CHECK_EQ(bottom[4]->width(), 1); - CHECK_EQ(bottom[5]->channels(), 1); - CHECK_EQ(bottom[5]->height(), 1); - CHECK_EQ(bottom[5]->width(), 1); - diff_pos.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); - diff_neg.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); - diff_par.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); - diff_sq_pos.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); - diff_sq_neg.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); - diff_sq_par.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); - dist_sq_.Reshape(bottom[0]->num(), 1, 1, 1); - dist_sq_pos.Reshape(bottom[0]->num(), 1, 1, 1); - dist_sq_neg.Reshape(bottom[0]->num(), 1, 1, 1); - dist_sq_par.Reshape(bottom[0]->num(), 1, 1, 1); - // vector of ones used to sum along channels - summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); - for (int i = 0; i < bottom[0]->channels(); ++i) - summer_vec_.mutable_cpu_data()[i] = Dtype(1); -} - -template -void TripletLossLayer::Forward_cpu( - const vector*>& bottom, - const vector*>& top) { - int count = bottom[0]->count(); - caffe_sub( - count, - bottom[0]->cpu_data(), // a - bottom[1]->cpu_data(), // b - diff_pos.mutable_cpu_data()); // a_i-b_i: positive - caffe_sub( - count, - bottom[0]->cpu_data(), // a - bottom[2]->cpu_data(), // c - diff_neg.mutable_cpu_data()); // a_i-c_i: negative - caffe_sub( - count, - bottom[3]->cpu_data(), // d - bottom[4]->cpu_data(), // e - diff_par.mutable_cpu_data()); // d_i-e_i: pair wise - const int channels = bottom[0]->channels(); - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - Dtype loss(0.0); - if (losstype == 0) { - for (int i = 0; i < bottom[0]->num(); ++i) { - // Triplet loss accumulation - // Loss component calculated from a and b - dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, - diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); - // a b is a similar pair for triplet - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; - // Loss component calculated from a and c - dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, - diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); - // a c is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); - // Pair wise loss accumulation - // Loss component calculated from d and e - dist_sq_par.mutable_cpu_data()[i] = caffe_cpu_dot(channels, - diff_par.cpu_data() + (i*channels), diff_par.cpu_data() + (i*channels)); - // d e is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_par.cpu_data()[i]; - } - loss = loss / static_cast(bottom[0]->num()) / Dtype(2); - top[0]->mutable_cpu_data()[0] = loss; - } else { - for (int i = 0; i < bottom[0]->num(); ++i) { - // softTriplet loss accumulation - // Loss component calculated from a and b - dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, - diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); - // a b is a similar pair for triplet - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] += margin; - // Loss component calculated from a and c - dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, - diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); - // a c is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[i] = 1 - \ -dist_sq_neg.cpu_data()[i] / dist_sq_.mutable_cpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(dist_sq_.cpu_data()[i], Dtype(0.0)); - // Pair wise loss accumulation - // Loss component calculated from d and e - dist_sq_par.mutable_cpu_data()[i] = caffe_cpu_dot(channels, - diff_par.cpu_data() + (i*channels), diff_par.cpu_data() + (i*channels)); - // d e is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_par.cpu_data()[i]; - } - loss = loss / static_cast(bottom[0]->num()) / Dtype(2); - top[0]->mutable_cpu_data()[0] = loss; - } -} - -template -void TripletLossLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - if (losstype == 0) { - // BP for feat1 - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(bottom[0]->num()); - int num = bottom[0]->num(); - int channels = bottom[0]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pairs - caffe_cpu_axpby( - channels, - alpha, - diff_pos.cpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - // dissimilar pairs - caffe_cpu_axpby( - channels, - -alpha, - diff_neg.cpu_data() + (j*channels), - Dtype(1.0), - bout + (j*channels)); - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); - } - } - } - // BP for feat2 and feat3 - for (int i = 1; i < 3; ++i) { - if (propagate_down[i]) { - const Dtype sign = (i == 1) ? -1 : 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(bottom[i]->num()); - int num = bottom[i]->num(); - int channels = bottom[i]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[i]->mutable_cpu_diff(); - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - if (i == 1) { - // similar pairs - caffe_cpu_axpby( - channels, - alpha, - diff_pos.cpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - } else { - // dissimilar pairs - caffe_cpu_axpby( - channels, - alpha, - diff_neg.cpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - } - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); - } - } - } - } - } else { - // BP for data1(feat1) - if (propagate_down[0]) { - const Dtype alpha = top[0]->cpu_diff()[0] / - static_cast(bottom[0]->num()); - int num = bottom[0]->num(); - int channels = bottom[0]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - channels, - alpha*dist_sq_neg.mutable_cpu_data()[j]\ -/((dist_sq_pos.mutable_cpu_data()[j]+margin)\ -*(dist_sq_pos.mutable_cpu_data()[j]+margin)), - diff_pos.cpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - caffe_cpu_axpby( - channels, - -alpha*(dist_sq_pos.mutable_cpu_data()[j] + margin)\ -/((dist_sq_pos.mutable_cpu_data()[j] + margin)\ -*(dist_sq_pos.mutable_cpu_data()[j] + margin)), - diff_neg.cpu_data() + (j*channels), - Dtype(1.0), - bout + (j*channels)); - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); - } - } - } - // BP for positive data(feat2) - if (propagate_down[1]) { - const Dtype alpha = top[0]->cpu_diff()[0] / - static_cast(bottom[1]->num()); - int num = bottom[1]->num(); - int channels = bottom[1]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[1]->mutable_cpu_diff(); - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - channels, - -alpha*dist_sq_neg.mutable_cpu_data()[j]\ -/((dist_sq_pos.mutable_cpu_data()[j] + margin)\ -*(dist_sq_pos.mutable_cpu_data()[j] + margin)), - diff_pos.cpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); - } - } - } - // BP for negative data(feat3) - if (propagate_down[2]) { - const Dtype alpha = top[0]->cpu_diff()[0] / - static_cast(bottom[2]->num()); - int num = bottom[2]->num(); - int channels = bottom[2]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[2]->mutable_cpu_diff(); - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - channels, - alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), - diff_neg.cpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); - } - } - } - } - // pair wise back - for (int i = 3; i < 5; ++i) { - if (propagate_down[i]) { - const Dtype sign = (i == 3) ? 1 : -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(bottom[i]->num()); - int num = bottom[i]->num(); - int channels = bottom[i]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[i]->mutable_cpu_diff(); // similar pairs - caffe_cpu_axpby( - channels, - alpha, - diff_par.cpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - } - } - } -} - -#ifdef CPU_ONLY -STUB_GPU(TripletLossLayer); -#endif - -INSTANTIATE_CLASS(TripletLossLayer); -REGISTER_LAYER_CLASS(TripletLoss); - -} // namespace caffe diff --git a/src/caffe/layers/triplet_loss_layer_LOCAL_62802.cpp b/src/caffe/layers/triplet_loss_layer_LOCAL_62802.cpp deleted file mode 100644 index add25687899..00000000000 --- a/src/caffe/layers/triplet_loss_layer_LOCAL_62802.cpp +++ /dev/null @@ -1,261 +0,0 @@ -#include -#include - -#include "caffe/layer.hpp" -#include "caffe/loss_layers.hpp" -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" - -namespace caffe { - -template -void TripletLossLayer::LayerSetUp( - const vector*>& bottom, const vector*>& top) { - LossLayer::LayerSetUp(bottom, top); - // number of triplet in a batch - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - // dimension of each descriptor - int dim = bottom[0]->count()/bottom[0]->num(); - CHECK_EQ(bottom[0]->channels(), dim); - CHECK_EQ(bottom[0]->height(), 1); - CHECK_EQ(bottom[0]->width(), 1); - CHECK_EQ(bottom[1]->channels(), 1); - CHECK_EQ(bottom[1]->height(), 1); - CHECK_EQ(bottom[1]->width(), 1); - // In each set, we have: - // the descriptor of reference sample, closest sample, and negative samples - // number of sets in the whole batch - int num_set = bottom[0]->num()/(2 + num_triplets); - dist_sq_.Reshape(num_set, 1, 1, 1); - diff_pos.Reshape(num_set, dim, 1, 1); - dist_sq_pos.Reshape(num_set, 1, 1, 1); - diff_neg.Reshape(num_set, dim, 1, 1); - dist_sq_neg.Reshape(num_set, 1, 1, 1); - // vector of ones used to sum along channels - summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); - for (int i = 0; i < bottom[0]->channels(); ++i) - summer_vec_.mutable_cpu_data()[i] = Dtype(1); -} - -template -void TripletLossLayer::Forward_cpu( - const vector*>& bottom, - const vector*>& top) { - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); - Dtype loss(0.0); - int dim = bottom[0]->count()/bottom[0]->num(); - int num_set = bottom[0]->num()/(2 + num_triplets); - if (losstype == 0) { - for (int i = 0; i < num_set; ++i) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive - diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close - // Loss component calculated from reference and close one - dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.cpu_data()[i]; - for (int triplet = 0; triplet < num_triplets; ++triplet) { - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + i*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); - } - } - loss = loss / static_cast(num_set) / Dtype(2); - top[0]->mutable_cpu_data()[0] = loss; -} - -template -void TripletLossLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - int dim = bottom[0]->count()/bottom[0]->num(); - int num_set = bottom[0]->num()/(2 + num_triplets); - if (losstype == 0) { - // BP for feat1(extracted from reference) - for (int i = 0; i < 1; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(bottom[i]->num()); - int num = bottom[i]->num(); - int channels = bottom[i]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[i]->mutable_cpu_diff(); - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pairs - caffe_cpu_axpby( - dim, - alpha, - diff_neg.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - } else { - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } else { - for (int i = 0; i < 1; ++i) { - // BP for data1(feat1) - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - dim, - alpha*dist_sq_neg.mutable_cpu_data()[j]\ - /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ - *(dist_sq_pos.mutable_cpu_data()[j]+margin)), - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - caffe_cpu_axpby( - dim, - -alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), - diff_neg.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - for (int i = 1; i < 2; ++i) { - // BP for positive data(feat2) - if (propagate_down[0]) { - const Dtype sign = -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - dim, - alpha*dist_sq_neg.mutable_cpu_data()[j]\ - /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ - *(dist_sq_pos.mutable_cpu_data()[j]+margin)), - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - for (int i = 2; i < 2 + num_triplets; ++i) { - // BP for negative data(feat3) - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - dim, - alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), - diff_neg.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - } else { - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } -} - -#ifdef CPU_ONLY -STUB_GPU(TripletLossLayer); -#endif - -INSTANTIATE_CLASS(TripletLossLayer); -REGISTER_LAYER_CLASS(TripletLoss); - -} // namespace caffe diff --git a/src/caffe/layers/triplet_loss_layer_REMOTE_62802.cpp b/src/caffe/layers/triplet_loss_layer_REMOTE_62802.cpp deleted file mode 100644 index 613fa703676..00000000000 --- a/src/caffe/layers/triplet_loss_layer_REMOTE_62802.cpp +++ /dev/null @@ -1,397 +0,0 @@ -#include -#include - -#include "caffe/layer.hpp" -#include "caffe/loss_layers.hpp" -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" - -namespace caffe { - -template -void TripletLossLayer::LayerSetUp( - const vector*>& bottom, const vector*>& top) { - LossLayer::LayerSetUp(bottom, top); - // number of triplet in a batch - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - // dimension of each descriptor - int dim = bottom[0]->count()/bottom[0]->num(); - CHECK_EQ(bottom[0]->channels(), dim); - CHECK_EQ(bottom[0]->height(), 1); - CHECK_EQ(bottom[0]->width(), 1); - CHECK_EQ(bottom[1]->channels(), 1); - CHECK_EQ(bottom[1]->height(), 1); - CHECK_EQ(bottom[1]->width(), 1); - // In each set, we have: - // the descriptor of reference sample, closest sample, and negative samples - // number of sets in the whole batch - int num_set = bottom[0]->num()/(2 + num_triplets); - dist_sq_.Reshape(num_set, 1, 1, 1); - diff_pos.Reshape(num_set, dim, 1, 1); - dist_sq_pos.Reshape(num_set, 1, 1, 1); - diff_neg.Reshape(num_set, dim, 1, 1); - dist_sq_neg.Reshape(num_set, 1, 1, 1); - // vector of ones used to sum along channels - summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); - for (int i = 0; i < bottom[0]->channels(); ++i) - summer_vec_.mutable_cpu_data()[i] = Dtype(1); -} - -template -void TripletLossLayer::Forward_cpu( - const vector*>& bottom, - const vector*>& top) { - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); - Dtype loss(0.0); - int dim = bottom[0]->count()/bottom[0]->num(); - int num_set = bottom[0]->num()/(2 + num_triplets); - if (losstype == 0) { - for (int i = 0; i < num_set; ++i) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive - diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close - // Loss component calculated from reference and close one - dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.cpu_data()[i]; - for (int triplet = 0; triplet < num_triplets; ++triplet) { - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + i*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); - } - } - loss = loss / static_cast(num_set) / Dtype(2); - top[0]->mutable_cpu_data()[0] = loss; - } else { - for (int i = 0; i < num_set; ++i) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive - diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close - // Loss component calculated from reference and close one - dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] += margin; - for (int triplet = 0; triplet < num_triplets; ++triplet) { - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + i*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[i] = 1 - \ - dist_sq_neg.cpu_data()[i] / dist_sq_.mutable_cpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(dist_sq_.cpu_data()[i], Dtype(0.0)); - } - } - loss = loss / static_cast(num_set) / Dtype(2); - top[0]->mutable_cpu_data()[0] = loss; - } -} - -template -void TripletLossLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - int dim = bottom[0]->count()/bottom[0]->num(); - int num_set = bottom[0]->num()/(2 + num_triplets); - if (losstype == 0) { - // BP for feat1(extracted from reference) - for (int i = 0; i < 1; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - // Loss component calculated from negative part - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + (2 + num_triplets)*j*dim); - // dissimilar pair in triplet - caffe_cpu_axpby( - dim, - -alpha, - diff_neg.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - // BP for feat2(extracted from the closest sample) - for (int i = 1; i < 2; ++i) { - if (propagate_down[0]) { - const Dtype sign = -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - // BP for negative feature used in the num_triplets triplet part - for (int i = 2; i < 2 + num_triplets; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // dissimilar pairs - caffe_cpu_axpby( - dim, - alpha, - diff_neg.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - } else { - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } else { - for (int i = 0; i < 1; ++i) { - // BP for data1(feat1) - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - dim, - alpha*dist_sq_neg.mutable_cpu_data()[j]\ - /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ - *(dist_sq_pos.mutable_cpu_data()[j]+margin)), - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - caffe_cpu_axpby( - dim, - -alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), - diff_neg.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - for (int i = 1; i < 2; ++i) { - // BP for positive data(feat2) - if (propagate_down[0]) { - const Dtype sign = -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - dim, - alpha*dist_sq_neg.mutable_cpu_data()[j]\ - /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ - *(dist_sq_pos.mutable_cpu_data()[j]+margin)), - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - for (int i = 2; i < 2 + num_triplets; ++i) { - // BP for negative data(feat3) - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - dim, - alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), - diff_neg.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - } else { - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } -} - -#ifdef CPU_ONLY -STUB_GPU(TripletLossLayer); -#endif - -INSTANTIATE_CLASS(TripletLossLayer); -REGISTER_LAYER_CLASS(TripletLoss); - -} // namespace caffe diff --git a/src/caffe/layers/window_data_layer.cpp b/src/caffe/layers/window_data_layer.cpp index f8db61c9258..f637f2ec6d4 100644 --- a/src/caffe/layers/window_data_layer.cpp +++ b/src/caffe/layers/window_data_layer.cpp @@ -1,4 +1,3 @@ -#ifdef USE_OPENCV #include #include @@ -469,4 +468,3 @@ INSTANTIATE_CLASS(WindowDataLayer); REGISTER_LAYER_CLASS(WindowData); } // namespace caffe -#endif // USE_OPENCV diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index 00eb1d01ab4..89d14013dc9 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -98,8 +98,13 @@ void Net::Init(const NetParameter& in_param) { << "propagate_down param must be specified " << "either 0 or bottom_size times "; } - layers_.push_back(LayerRegistry::CreateLayer(layer_param)); ->>>>>>> triplet data generation and network update + if (share_from_root) { + LOG(INFO) << "Sharing layer " << layer_param.name() << " from root net"; + layers_.push_back(root_net_->layers_[layer_id]); + layers_[layer_id]->SetShared(true); + } else { + layers_.push_back(LayerRegistry::CreateLayer(layer_param)); + } layer_names_.push_back(layer_param.name()); if (Caffe::root_solver()) { LOG(INFO) << "Creating Layer " << layer_param.name(); @@ -805,11 +810,12 @@ void Net::Backward() { BackwardFromTo(layers_.size() - 1, 0); if (debug_info_) { Dtype asum_data = 0, asum_diff = 0, sumsq_data = 0, sumsq_diff = 0; - for (int i = 0; i < learnable_params_.size(); ++i) { - asum_data += learnable_params_[i]->asum_data(); - asum_diff += learnable_params_[i]->asum_diff(); - sumsq_data += learnable_params_[i]->sumsq_data(); - sumsq_diff += learnable_params_[i]->sumsq_diff(); + for (int i = 0; i < params_.size(); ++i) { + if (param_owners_[i] >= 0) { continue; } + asum_data += params_[i]->asum_data(); + asum_diff += params_[i]->asum_diff(); + sumsq_data += params_[i]->sumsq_data(); + sumsq_diff += params_[i]->sumsq_diff(); } const Dtype l2norm_data = std::sqrt(sumsq_data); const Dtype l2norm_diff = std::sqrt(sumsq_diff); diff --git a/src/caffe/net.cpp.orig.orig b/src/caffe/net.cpp.orig.orig deleted file mode 100644 index 05e20ba27a4..00000000000 --- a/src/caffe/net.cpp.orig.orig +++ /dev/null @@ -1,856 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include "caffe/common.hpp" -#include "caffe/layer.hpp" -#include "caffe/net.hpp" -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/insert_splits.hpp" -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/util/upgrade_proto.hpp" - -#include "caffe/test/test_caffe_main.hpp" - -namespace caffe { - -template -Net::Net(const NetParameter& param) { - Init(param); -} - -template -Net::Net(const string& param_file, Phase phase) { - NetParameter param; - ReadNetParamsFromTextFileOrDie(param_file, ¶m); - param.mutable_state()->set_phase(phase); - Init(param); -} - -template -void Net::Init(const NetParameter& in_param) { - // Set phase from the state. - phase_ = in_param.state().phase(); - // Filter layers based on their include/exclude rules and - // the current NetState. - NetParameter filtered_param; - FilterNet(in_param, &filtered_param); - LOG(INFO) << "Initializing net from parameters: " << std::endl - << filtered_param.DebugString(); - // Create a copy of filtered_param with splits added where necessary. - NetParameter param; - InsertSplits(filtered_param, ¶m); - // Basically, build all the layers and set up their connections. - name_ = param.name(); - map blob_name_to_idx; - set available_blobs; - CHECK(param.input_dim_size() == 0 || param.input_shape_size() == 0) - << "Must specify either input_shape OR deprecated input_dim, not both."; - if (param.input_dim_size() > 0) { - // Deprecated 4D dimensions. - CHECK_EQ(param.input_size() * 4, param.input_dim_size()) - << "Incorrect input blob dimension specifications."; - } else { - CHECK_EQ(param.input_size(), param.input_shape_size()) - << "Exactly one input_shape must be specified per input."; - } - memory_used_ = 0; - // set the input blobs - for (int input_id = 0; input_id < param.input_size(); ++input_id) { - const int layer_id = -1; // inputs have fake layer ID -1 - AppendTop(param, layer_id, input_id, &available_blobs, &blob_name_to_idx); - } - DLOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); - // For each layer, set up its input and output - bottom_vecs_.resize(param.layer_size()); - top_vecs_.resize(param.layer_size()); - bottom_id_vecs_.resize(param.layer_size()); - param_id_vecs_.resize(param.layer_size()); - top_id_vecs_.resize(param.layer_size()); - bottom_need_backward_.resize(param.layer_size()); - for (int layer_id = 0; layer_id < param.layer_size(); ++layer_id) { - // Inherit phase from net if unset. - if (!param.layer(layer_id).has_phase()) { - param.mutable_layer(layer_id)->set_phase(phase_); - } - // Setup layer. - const LayerParameter& layer_param = param.layer(layer_id); - if (layer_param.propagate_down_size() > 0) { - CHECK_EQ(layer_param.propagate_down_size(), - layer_param.bottom_size()) - << "propagate_down param must be specified " - << "either 0 or bottom_size times "; - } - layers_.push_back(LayerRegistry::CreateLayer(layer_param)); -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update - layer_names_.push_back(layer_param.name()); - LOG(INFO) << "Creating Layer " << layer_param.name(); - bool need_backward = false; - - // Figure out this layer's input and output - for (int bottom_id = 0; bottom_id < layer_param.bottom_size(); - ++bottom_id) { - const int blob_id = AppendBottom(param, layer_id, bottom_id, - &available_blobs, &blob_name_to_idx); - // If a blob needs backward, this layer should provide it. - need_backward |= blob_need_backward_[blob_id]; - } - int num_top = layer_param.top_size(); - for (int top_id = 0; top_id < num_top; ++top_id) { - AppendTop(param, layer_id, top_id, &available_blobs, &blob_name_to_idx); - } - // If the layer specifies that AutoTopBlobs() -> true and the LayerParameter - // specified fewer than the required number (as specified by - // ExactNumTopBlobs() or MinTopBlobs()), allocate them here. - Layer* layer = layers_[layer_id].get(); - if (layer->AutoTopBlobs()) { - const int needed_num_top = - std::max(layer->MinTopBlobs(), layer->ExactNumTopBlobs()); - for (; num_top < needed_num_top; ++num_top) { - // Add "anonymous" top blobs -- do not modify available_blobs or - // blob_name_to_idx as we don't want these blobs to be usable as input - // to other layers. - AppendTop(param, layer_id, num_top, NULL, NULL); - } - } - // After this layer is connected, set it up. - LOG(INFO) << "Setting up " << layer_names_[layer_id]; - layers_[layer_id]->SetUp(bottom_vecs_[layer_id], top_vecs_[layer_id]); - for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { - if (blob_loss_weights_.size() <= top_id_vecs_[layer_id][top_id]) { - blob_loss_weights_.resize(top_id_vecs_[layer_id][top_id] + 1, Dtype(0)); - } - blob_loss_weights_[top_id_vecs_[layer_id][top_id]] = layer->loss(top_id); - LOG(INFO) << "Top shape: " << top_vecs_[layer_id][top_id]->shape_string(); - if (layer->loss(top_id)) { - LOG(INFO) << " with loss weight " << layer->loss(top_id); - } - memory_used_ += top_vecs_[layer_id][top_id]->count(); - } - DLOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); - const int param_size = layer_param.param_size(); - const int num_param_blobs = layers_[layer_id]->blobs().size(); - CHECK_LE(param_size, num_param_blobs) - << "Too many params specified for layer " << layer_param.name(); - ParamSpec default_param_spec; - for (int param_id = 0; param_id < num_param_blobs; ++param_id) { - const ParamSpec* param_spec = (param_id < param_size) ? - &layer_param.param(param_id) : &default_param_spec; - const bool param_need_backward = param_spec->lr_mult() > 0; - need_backward |= param_need_backward; - layers_[layer_id]->set_param_propagate_down(param_id, - param_need_backward); - } - for (int param_id = 0; param_id < num_param_blobs; ++param_id) { - AppendParam(param, layer_id, param_id); - } - // Finally, set the backward flag - layer_need_backward_.push_back(need_backward); - if (need_backward) { - for (int top_id = 0; top_id < top_id_vecs_[layer_id].size(); ++top_id) { - blob_need_backward_[top_id_vecs_[layer_id][top_id]] = true; - } - } - } - // Go through the net backwards to determine which blobs contribute to the - // loss. We can skip backward computation for blobs that don't contribute - // to the loss. - // Also checks if all bottom blobs don't need backward computation (possible - // because the skip_propagate_down param) and so we can skip bacward - // computation for the entire layer - set blobs_under_loss; - set blobs_skip_backp; - for (int layer_id = layers_.size() - 1; layer_id >= 0; --layer_id) { - bool layer_contributes_loss = false; - bool layer_skip_propagate_down = true; - for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { - const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]]; - if (layers_[layer_id]->loss(top_id) || - (blobs_under_loss.find(blob_name) != blobs_under_loss.end())) { - layer_contributes_loss = true; - } - if (blobs_skip_backp.find(blob_name) == blobs_skip_backp.end()) { - layer_skip_propagate_down = false; - } - if (layer_contributes_loss && !layer_skip_propagate_down) - break; - } - // If this layer can skip backward computation, also all his bottom blobs - // don't need backpropagation - if (layer_need_backward_[layer_id] && layer_skip_propagate_down) { - layer_need_backward_[layer_id] = false; - for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); - ++bottom_id) { - bottom_need_backward_[layer_id][bottom_id] = false; - } - } - if (!layer_contributes_loss) { layer_need_backward_[layer_id] = false; } - if (layer_need_backward_[layer_id]) { - LOG(INFO) << layer_names_[layer_id] << " needs backward computation."; - } else { - LOG(INFO) << layer_names_[layer_id] - << " does not need backward computation."; - } - for (int bottom_id = 0; bottom_id < bottom_vecs_[layer_id].size(); - ++bottom_id) { - if (layer_contributes_loss) { - const string& blob_name = - blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; - blobs_under_loss.insert(blob_name); - } else { - bottom_need_backward_[layer_id][bottom_id] = false; - } - if (!bottom_need_backward_[layer_id][bottom_id]) { - const string& blob_name = - blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; - blobs_skip_backp.insert(blob_name); - } - } - } - // Handle force_backward if needed. - if (param.force_backward()) { - for (int layer_id = 0; layer_id < layers_.size(); ++layer_id) { - layer_need_backward_[layer_id] = true; - for (int bottom_id = 0; - bottom_id < bottom_need_backward_[layer_id].size(); ++bottom_id) { - bottom_need_backward_[layer_id][bottom_id] = - bottom_need_backward_[layer_id][bottom_id] || - layers_[layer_id]->AllowForceBackward(bottom_id); - blob_need_backward_[bottom_id_vecs_[layer_id][bottom_id]] = - blob_need_backward_[bottom_id_vecs_[layer_id][bottom_id]] || - bottom_need_backward_[layer_id][bottom_id]; - } - for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); - ++param_id) { - layers_[layer_id]->set_param_propagate_down(param_id, true); - } - } - } - // In the end, all remaining blobs are considered output blobs. - for (set::iterator it = available_blobs.begin(); - it != available_blobs.end(); ++it) { - LOG(INFO) << "This network produces output " << *it; - net_output_blobs_.push_back(blobs_[blob_name_to_idx[*it]].get()); - net_output_blob_indices_.push_back(blob_name_to_idx[*it]); - } - for (size_t blob_id = 0; blob_id < blob_names_.size(); ++blob_id) { - blob_names_index_[blob_names_[blob_id]] = blob_id; - } - for (size_t layer_id = 0; layer_id < layer_names_.size(); ++layer_id) { - layer_names_index_[layer_names_[layer_id]] = layer_id; - } - GetLearningRateAndWeightDecay(); - debug_info_ = param.debug_info(); - LOG(INFO) << "Network initialization done."; - LOG(INFO) << "Memory required for data: " << memory_used_ * sizeof(Dtype); -} - -template -void Net::FilterNet(const NetParameter& param, - NetParameter* param_filtered) { - NetState net_state(param.state()); - param_filtered->CopyFrom(param); - param_filtered->clear_layer(); - for (int i = 0; i < param.layer_size(); ++i) { - const LayerParameter& layer_param = param.layer(i); - const string& layer_name = layer_param.name(); - CHECK(layer_param.include_size() == 0 || layer_param.exclude_size() == 0) - << "Specify either include rules or exclude rules; not both."; - // If no include rules are specified, the layer is included by default and - // only excluded if it meets one of the exclude rules. - bool layer_included = (layer_param.include_size() == 0); - for (int j = 0; layer_included && j < layer_param.exclude_size(); ++j) { - if (StateMeetsRule(net_state, layer_param.exclude(j), layer_name)) { - layer_included = false; - } - } - for (int j = 0; !layer_included && j < layer_param.include_size(); ++j) { - if (StateMeetsRule(net_state, layer_param.include(j), layer_name)) { - layer_included = true; - } - } - if (layer_included) { - param_filtered->add_layer()->CopyFrom(layer_param); - } - } -} - -template -bool Net::StateMeetsRule(const NetState& state, - const NetStateRule& rule, const string& layer_name) { - // Check whether the rule is broken due to phase. - if (rule.has_phase()) { - if (rule.phase() != state.phase()) { - LOG(INFO) << "The NetState phase (" << state.phase() - << ") differed from the phase (" << rule.phase() - << ") specified by a rule in layer " << layer_name; - return false; - } - } - // Check whether the rule is broken due to min level. - if (rule.has_min_level()) { - if (state.level() < rule.min_level()) { - LOG(INFO) << "The NetState level (" << state.level() - << ") is above the min_level (" << rule.min_level() - << ") specified by a rule in layer " << layer_name; - return false; - } - } - // Check whether the rule is broken due to max level. - if (rule.has_max_level()) { - if (state.level() > rule.max_level()) { - LOG(INFO) << "The NetState level (" << state.level() - << ") is above the max_level (" << rule.max_level() - << ") specified by a rule in layer " << layer_name; - return false; - } - } - // Check whether the rule is broken due to stage. The NetState must - // contain ALL of the rule's stages to meet it. - for (int i = 0; i < rule.stage_size(); ++i) { - // Check that the NetState contains the rule's ith stage. - bool has_stage = false; - for (int j = 0; !has_stage && j < state.stage_size(); ++j) { - if (rule.stage(i) == state.stage(j)) { has_stage = true; } - } - if (!has_stage) { - LOG(INFO) << "The NetState did not contain stage '" << rule.stage(i) - << "' specified by a rule in layer " << layer_name; - return false; - } - } - // Check whether the rule is broken due to not_stage. The NetState must - // contain NONE of the rule's not_stages to meet it. - for (int i = 0; i < rule.not_stage_size(); ++i) { - // Check that the NetState contains the rule's ith not_stage. - bool has_stage = false; - for (int j = 0; !has_stage && j < state.stage_size(); ++j) { - if (rule.not_stage(i) == state.stage(j)) { has_stage = true; } - } - if (has_stage) { - LOG(INFO) << "The NetState contained a not_stage '" << rule.not_stage(i) - << "' specified by a rule in layer " << layer_name; - return false; - } - } - return true; -} - -// Helper for Net::Init: add a new input or top blob to the net. (Inputs have -// layer_id == -1, tops have layer_id >= 0.) -template -void Net::AppendTop(const NetParameter& param, const int layer_id, - const int top_id, set* available_blobs, - map* blob_name_to_idx) { - shared_ptr layer_param((layer_id >= 0) ? - (new LayerParameter(param.layer(layer_id))) : NULL); - const string& blob_name = layer_param ? - (layer_param->top_size() > top_id ? - layer_param->top(top_id) : "(automatic)") : param.input(top_id); - // Check if we are doing in-place computation - if (blob_name_to_idx && layer_param && layer_param->bottom_size() > top_id && - blob_name == layer_param->bottom(top_id)) { - // In-place computation - LOG(INFO) << layer_param->name() << " -> " << blob_name << " (in-place)"; - top_vecs_[layer_id].push_back(blobs_[(*blob_name_to_idx)[blob_name]].get()); - top_id_vecs_[layer_id].push_back((*blob_name_to_idx)[blob_name]); - } else if (blob_name_to_idx && - blob_name_to_idx->find(blob_name) != blob_name_to_idx->end()) { - // If we are not doing in-place computation but have duplicated blobs, - // raise an error. - LOG(FATAL) << "Duplicate blobs produced by multiple sources."; - } else { - // Normal output. - if (layer_param) { - LOG(INFO) << layer_param->name() << " -> " << blob_name; - } else { - LOG(INFO) << "Input " << top_id << " -> " << blob_name; - } - shared_ptr > blob_pointer(new Blob()); - const int blob_id = blobs_.size(); - blobs_.push_back(blob_pointer); - blob_names_.push_back(blob_name); - blob_need_backward_.push_back(false); - if (blob_name_to_idx) { (*blob_name_to_idx)[blob_name] = blob_id; } - if (layer_id == -1) { - // Set the (explicitly specified) dimensions of the input blob. - if (param.input_dim_size() > 0) { - blob_pointer->Reshape(param.input_dim(top_id * 4), - param.input_dim(top_id * 4 + 1), - param.input_dim(top_id * 4 + 2), - param.input_dim(top_id * 4 + 3)); - } else { - blob_pointer->Reshape(param.input_shape(top_id)); - } - net_input_blob_indices_.push_back(blob_id); - net_input_blobs_.push_back(blob_pointer.get()); - } else { - top_id_vecs_[layer_id].push_back(blob_id); - top_vecs_[layer_id].push_back(blob_pointer.get()); - } - } - if (available_blobs) { available_blobs->insert(blob_name); } -} - -// Helper for Net::Init: add a new bottom blob to the net. -template -int Net::AppendBottom(const NetParameter& param, const int layer_id, - const int bottom_id, set* available_blobs, - map* blob_name_to_idx) { - const LayerParameter& layer_param = param.layer(layer_id); - const string& blob_name = layer_param.bottom(bottom_id); - if (available_blobs->find(blob_name) == available_blobs->end()) { - LOG(FATAL) << "Unknown blob input " << blob_name - << " (at index " << bottom_id << ") to layer " << layer_id; - } - const int blob_id = (*blob_name_to_idx)[blob_name]; - LOG(INFO) << layer_names_[layer_id] << " <- " << blob_name; - bottom_vecs_[layer_id].push_back(blobs_[blob_id].get()); - bottom_id_vecs_[layer_id].push_back(blob_id); - available_blobs->erase(blob_name); - bool propagate_down = true; - // Check if the backpropagation on bottom_id should be skipped - if (layer_param.propagate_down_size() > 0) - propagate_down = layer_param.propagate_down(bottom_id); - const bool need_backward = blob_need_backward_[blob_id] && - propagate_down; - bottom_need_backward_[layer_id].push_back(need_backward); - return blob_id; -} - -template -void Net::AppendParam(const NetParameter& param, const int layer_id, - const int param_id) { - const LayerParameter& layer_param = layers_[layer_id]->layer_param(); - const int param_size = layer_param.param_size(); - string param_name = - (param_size > param_id) ? layer_param.param(param_id).name() : ""; - if (param_name.size()) { - param_display_names_.push_back(param_name); - } else { - ostringstream param_display_name; - param_display_name << param_id; - param_display_names_.push_back(param_display_name.str()); - } - const int net_param_id = params_.size(); - params_.push_back(layers_[layer_id]->blobs()[param_id]); - param_id_vecs_[layer_id].push_back(net_param_id); - param_layer_indices_.push_back(make_pair(layer_id, param_id)); - if (!param_size || !param_name.size() || (param_name.size() && - param_names_index_.find(param_name) == param_names_index_.end())) { - // This layer "owns" this parameter blob -- it is either anonymous - // (i.e., not given a param_name) or explicitly given a name that we - // haven't already seen. - param_owners_.push_back(-1); - if (param_name.size()) { - param_names_index_[param_name] = net_param_id; - } - } else { - // Named param blob with name we've seen before: share params - const int owner_net_param_id = param_names_index_[param_name]; - param_owners_.push_back(owner_net_param_id); - const pair& owner_index = - param_layer_indices_[owner_net_param_id]; - const int owner_layer_id = owner_index.first; - const int owner_param_id = owner_index.second; - LOG(INFO) << "Sharing parameters '" << param_name << "' owned by " - << "layer '" << layer_names_[owner_layer_id] << "', param " - << "index " << owner_param_id; - Blob* this_blob = layers_[layer_id]->blobs()[param_id].get(); - Blob* owner_blob = - layers_[owner_layer_id]->blobs()[owner_param_id].get(); - const int param_size = layer_param.param_size(); - if (param_size > param_id && (layer_param.param(param_id).share_mode() == - ParamSpec_DimCheckMode_PERMISSIVE)) { - // Permissive dimension checking -- only check counts are the same. - CHECK_EQ(this_blob->count(), owner_blob->count()) - << "Shared parameter blobs must have the same count."; - } else { - // Strict dimension checking -- all dims must be the same. - CHECK(this_blob->shape() == owner_blob->shape()); - } - layers_[layer_id]->blobs()[param_id]->ShareData( - *layers_[owner_layer_id]->blobs()[owner_param_id]); - } -} - -template -void Net::GetLearningRateAndWeightDecay() { - LOG(INFO) << "Collecting Learning Rate and Weight Decay."; - ParamSpec default_param_spec; - for (int i = 0; i < layers_.size(); ++i) { - vector > >& layer_blobs = layers_[i]->blobs(); - for (int j = 0; j < layer_blobs.size(); ++j) { - const ParamSpec* param_spec = - (layers_[i]->layer_param().param_size() > j) ? - &layers_[i]->layer_param().param(j) : &default_param_spec; - params_lr_.push_back(param_spec->lr_mult()); - params_weight_decay_.push_back(param_spec->decay_mult()); - } - } -} - -template -Dtype Net::ForwardFromTo(int start, int end) { - CHECK_GE(start, 0); - CHECK_LT(end, layers_.size()); - Dtype loss = 0; - if (debug_info_) { - for (int i = 0; i < net_input_blobs_.size(); ++i) { - InputDebugInfo(i); - } - } - for (int i = start; i <= end; ++i) { - // LOG(ERROR) << "Forwarding " << layer_names_[i]; - Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], top_vecs_[i]); - loss += layer_loss; - if (debug_info_) { ForwardDebugInfo(i); } - } - return loss; -} - -template -Dtype Net::ForwardFrom(int start) { - return ForwardFromTo(start, layers_.size() - 1); -} - -template -Dtype Net::ForwardTo(int end) { - return ForwardFromTo(0, end); -} - -template -const vector*>& Net::ForwardPrefilled(Dtype* loss) { - if (loss != NULL) { - *loss = ForwardFromTo(0, layers_.size() - 1); - } else { - ForwardFromTo(0, layers_.size() - 1); - } - return net_output_blobs_; -} - -template -const vector*>& Net::Forward( - const vector*> & bottom, Dtype* loss) { - // Copy bottom to internal bottom - for (int i = 0; i < bottom.size(); ++i) { - net_input_blobs_[i]->CopyFrom(*bottom[i]); - } - return ForwardPrefilled(loss); -} - -template -string Net::Forward(const string& input_blob_protos, Dtype* loss) { - BlobProtoVector blob_proto_vec; - if (net_input_blobs_.size()) { - blob_proto_vec.ParseFromString(input_blob_protos); - CHECK_EQ(blob_proto_vec.blobs_size(), net_input_blobs_.size()) - << "Incorrect input size."; - for (int i = 0; i < blob_proto_vec.blobs_size(); ++i) { - net_input_blobs_[i]->FromProto(blob_proto_vec.blobs(i)); - } - } - ForwardPrefilled(loss); - blob_proto_vec.Clear(); - for (int i = 0; i < net_output_blobs_.size(); ++i) { - net_output_blobs_[i]->ToProto(blob_proto_vec.add_blobs()); - } - string output; - blob_proto_vec.SerializeToString(&output); - return output; -} - -template -void Net::BackwardFromTo(int start, int end) { - CHECK_GE(end, 0); - CHECK_LT(start, layers_.size()); - for (int i = start; i >= end; --i) { - if (layer_need_backward_[i]) { - layers_[i]->Backward( - top_vecs_[i], bottom_need_backward_[i], bottom_vecs_[i]); - if (debug_info_) { BackwardDebugInfo(i); } - } - } -} - -template -void Net::InputDebugInfo(const int input_id) { - const Blob& blob = *net_input_blobs_[input_id]; - const string& blob_name = blob_names_[net_input_blob_indices_[input_id]]; - const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); - LOG(INFO) << " [Forward] " - << "Input " << blob_name << " data: " << data_abs_val_mean; -} - -template -void Net::ForwardDebugInfo(const int layer_id) { - for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { - const Blob& blob = *top_vecs_[layer_id][top_id]; - const string& blob_name = blob_names_[top_id_vecs_[layer_id][top_id]]; - const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); - LOG(INFO) << " [Forward] " - << "Layer " << layer_names_[layer_id] << ", top blob " << blob_name - << " data: " << data_abs_val_mean; - } - for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); - ++param_id) { - const Blob& blob = *layers_[layer_id]->blobs()[param_id]; - const int net_param_id = param_id_vecs_[layer_id][param_id]; - const string& blob_name = param_display_names_[net_param_id]; - const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); - LOG(INFO) << " [Forward] " - << "Layer " << layer_names_[layer_id] << ", param blob " << blob_name - << " data: " << data_abs_val_mean; - } -} - -template -void Net::BackwardDebugInfo(const int layer_id) { - const vector*>& bottom_vec = bottom_vecs_[layer_id]; - for (int bottom_id = 0; bottom_id < bottom_vec.size(); ++bottom_id) { - if (!bottom_need_backward_[layer_id][bottom_id]) { continue; } - const Blob& blob = *bottom_vec[bottom_id]; - const string& blob_name = blob_names_[bottom_id_vecs_[layer_id][bottom_id]]; - const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); - LOG(INFO) << " [Backward] " - << "Layer " << layer_names_[layer_id] << ", bottom blob " << blob_name - << " diff: " << diff_abs_val_mean; - } - for (int param_id = 0; param_id < layers_[layer_id]->blobs().size(); - ++param_id) { - if (!layers_[layer_id]->param_propagate_down(param_id)) { continue; } - const Blob& blob = *layers_[layer_id]->blobs()[param_id]; - const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); - LOG(INFO) << " [Backward] " - << "Layer " << layer_names_[layer_id] << ", param blob " << param_id - << " diff: " << diff_abs_val_mean; - } -} - -template -void Net::UpdateDebugInfo(const int param_id) { - const Blob& blob = *params_[param_id]; - const int param_owner = param_owners_[param_id]; - const string& layer_name = layer_names_[param_layer_indices_[param_id].first]; - const string& param_display_name = param_display_names_[param_id]; - const Dtype diff_abs_val_mean = blob.asum_diff() / blob.count(); - if (param_owner < 0) { - const Dtype data_abs_val_mean = blob.asum_data() / blob.count(); - LOG(INFO) << " [Update] Layer " << layer_name - << ", param " << param_display_name - << " data: " << data_abs_val_mean << "; diff: " << diff_abs_val_mean; - } else { - const string& owner_layer_name = - layer_names_[param_layer_indices_[param_owner].first]; - LOG(INFO) << " [Update] Layer " << layer_name - << ", param blob " << param_display_name - << " (owned by layer " << owner_layer_name << ", " - << "param " << param_display_names_[param_owners_[param_id]] << ")" - << " diff: " << diff_abs_val_mean; - } -} - -template -void Net::ShareTrainedLayersWith(const Net* other) { - int num_source_layers = other->layers().size(); - for (int i = 0; i < num_source_layers; ++i) { - Layer* source_layer = other->layers()[i].get(); - const string& source_layer_name = other->layer_names()[i]; - int target_layer_id = 0; - while (target_layer_id != layer_names_.size() && - layer_names_[target_layer_id] != source_layer_name) { - ++target_layer_id; - } - if (target_layer_id == layer_names_.size()) { - DLOG(INFO) << "Ignoring source layer " << source_layer_name; - continue; - } - DLOG(INFO) << "Copying source layer " << source_layer_name; - vector > >& target_blobs = - layers_[target_layer_id]->blobs(); - CHECK_EQ(target_blobs.size(), source_layer->blobs().size()) - << "Incompatible number of blobs for layer " << source_layer_name; - for (int j = 0; j < target_blobs.size(); ++j) { - Blob* source_blob = source_layer->blobs()[j].get(); - CHECK(target_blobs[j]->shape() == source_blob->shape()); - target_blobs[j]->ShareData(*source_blob); - } - } -} - -template -void Net::BackwardFrom(int start) { - BackwardFromTo(start, 0); -} - -template -void Net::BackwardTo(int end) { - BackwardFromTo(layers_.size() - 1, end); -} - -template -void Net::Backward() { - BackwardFromTo(layers_.size() - 1, 0); - if (debug_info_) { - Dtype asum_data = 0, asum_diff = 0, sumsq_data = 0, sumsq_diff = 0; - for (int i = 0; i < params_.size(); ++i) { - if (param_owners_[i] >= 0) { continue; } - asum_data += params_[i]->asum_data(); - asum_diff += params_[i]->asum_diff(); - sumsq_data += params_[i]->sumsq_data(); - sumsq_diff += params_[i]->sumsq_diff(); - } - const Dtype l2norm_data = std::sqrt(sumsq_data); - const Dtype l2norm_diff = std::sqrt(sumsq_diff); - LOG(ERROR) << " [Backward] All net params (data, diff): " - << "L1 norm = (" << asum_data << ", " << asum_diff << "); " - << "L2 norm = (" << l2norm_data << ", " << l2norm_diff << ")"; - } -} - -template -void Net::Reshape() { - for (int i = 0; i < layers_.size(); ++i) { - layers_[i]->Reshape(bottom_vecs_[i], top_vecs_[i]); - } -} - -template -void Net::CopyTrainedLayersFrom(const NetParameter& param) { - int num_source_layers = param.layer_size(); - for (int i = 0; i < num_source_layers; ++i) { - const LayerParameter& source_layer = param.layer(i); - const string& source_layer_name = source_layer.name(); - int target_layer_id = 0; - while (target_layer_id != layer_names_.size() && - layer_names_[target_layer_id] != source_layer_name) { - ++target_layer_id; - } - if (target_layer_id == layer_names_.size()) { - DLOG(INFO) << "Ignoring source layer " << source_layer_name; - continue; - } - DLOG(INFO) << "Copying source layer " << source_layer_name; - vector > >& target_blobs = - layers_[target_layer_id]->blobs(); - CHECK_EQ(target_blobs.size(), source_layer.blobs_size()) - << "Incompatible number of blobs for layer " << source_layer_name; - for (int j = 0; j < target_blobs.size(); ++j) { - const bool kReshape = false; - target_blobs[j]->FromProto(source_layer.blobs(j), kReshape); - } - } -} - -template -void Net::CopyTrainedLayersFrom(const string trained_filename) { - NetParameter param; - ReadNetParamsFromBinaryFileOrDie(trained_filename, ¶m); - CopyTrainedLayersFrom(param); -} - -template -void Net::ToProto(NetParameter* param, bool write_diff) const { - param->Clear(); - param->set_name(name_); - // Add bottom and top - for (int i = 0; i < net_input_blob_indices_.size(); ++i) { - param->add_input(blob_names_[net_input_blob_indices_[i]]); - } - DLOG(INFO) << "Serializing " << layers_.size() << " layers"; - for (int i = 0; i < layers_.size(); ++i) { - LayerParameter* layer_param = param->add_layer(); - for (int j = 0; j < bottom_id_vecs_[i].size(); ++j) { - layer_param->add_bottom(blob_names_[bottom_id_vecs_[i][j]]); - } - for (int j = 0; j < top_id_vecs_[i].size(); ++j) { - layer_param->add_top(blob_names_[top_id_vecs_[i][j]]); - } - layers_[i]->ToProto(layer_param, write_diff); - } -} - -template -void Net::Update() { - // First, accumulate the diffs of any shared parameters into their owner's - // diff. (Assumes that the learning rate, weight decay, etc. have already been - // accounted for in the current diff.) - for (int i = 0; i < params_.size(); ++i) { - if (param_owners_[i] < 0) { continue; } - if (debug_info_) { UpdateDebugInfo(i); } - const int count = params_[i]->count(); - const Dtype* this_diff; - Dtype* owner_diff; - switch (Caffe::mode()) { - case Caffe::CPU: - this_diff = params_[i]->cpu_diff(); - owner_diff = params_[param_owners_[i]]->mutable_cpu_diff(); - caffe_add(count, this_diff, owner_diff, owner_diff); - break; - case Caffe::GPU: -#ifndef CPU_ONLY - this_diff = params_[i]->gpu_diff(); - owner_diff = params_[param_owners_[i]]->mutable_gpu_diff(); - caffe_gpu_add(count, this_diff, owner_diff, owner_diff); -#else - NO_GPU; -#endif - break; - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } - } - // Now, update the owned parameters. - for (int i = 0; i < params_.size(); ++i) { - if (param_owners_[i] >= 0) { continue; } - if (debug_info_) { UpdateDebugInfo(i); } - params_[i]->Update(); - } -} - -template -bool Net::has_blob(const string& blob_name) const { - return blob_names_index_.find(blob_name) != blob_names_index_.end(); -} - -template -const shared_ptr > Net::blob_by_name( - const string& blob_name) const { - shared_ptr > blob_ptr; - if (has_blob(blob_name)) { - blob_ptr = blobs_[blob_names_index_.find(blob_name)->second]; - } else { - blob_ptr.reset((Blob*)(NULL)); - LOG(WARNING) << "Unknown blob name " << blob_name; - } - return blob_ptr; -} - -template -bool Net::has_layer(const string& layer_name) const { - return layer_names_index_.find(layer_name) != layer_names_index_.end(); -} - -template -const shared_ptr > Net::layer_by_name( - const string& layer_name) const { - shared_ptr > layer_ptr; - if (has_layer(layer_name)) { - layer_ptr = layers_[layer_names_index_.find(layer_name)->second]; - } else { - layer_ptr.reset((Layer*)(NULL)); - LOG(WARNING) << "Unknown layer name " << layer_name; - } - return layer_ptr; -} - -INSTANTIATE_CLASS(Net); - -} // namespace caffe diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 4a128eba8f9..331d31ce8f8 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -11,6 +11,8 @@ message BlobProto { optional BlobShape shape = 7; repeated float data = 5 [packed = true]; repeated float diff = 6 [packed = true]; + repeated double double_data = 8 [packed = true]; + repeated double double_diff = 9 [packed = true]; // 4D dimensions -- deprecated. Use "shape" instead. optional int32 num = 1 [default = 0]; @@ -189,6 +191,11 @@ message SolverParameter { // whether to snapshot diff in the results or not. Snapshotting diff will help // debugging but the final protocol buffer size will be much larger. optional bool snapshot_diff = 16 [default = false]; + enum SnapshotFormat { + HDF5 = 0; + BINARYPROTO = 1; + } + optional SnapshotFormat snapshot_format = 37 [default = BINARYPROTO]; // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default. enum SolverMode { CPU = 0; @@ -207,10 +214,19 @@ message SolverParameter { SGD = 0; NESTEROV = 1; ADAGRAD = 2; + RMSPROP = 3; + ADADELTA = 4; + ADAM = 5; } optional SolverType solver_type = 30 [default = SGD]; - // numerical stability for AdaGrad + // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam optional float delta = 31 [default = 1e-8]; + // parameters for the Adam solver + optional float momentum2 = 39 [default = 0.999]; + + // RMSProp decay value + // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t) + optional float rms_decay = 38; // If true, print information about the state of the net that may help with // debugging learning problems. @@ -341,6 +357,7 @@ message LayerParameter { optional DropoutParameter dropout_param = 108; optional DummyDataParameter dummy_data_param = 109; optional EltwiseParameter eltwise_param = 110; + optional EmbedParameter embed_param = 137; optional ExpParameter exp_param = 111; optional FlattenParameter flatten_param = 135; optional HDF5DataParameter hdf5_data_param = 112; @@ -366,11 +383,9 @@ message LayerParameter { optional SliceParameter slice_param = 126; optional TanHParameter tanh_param = 127; optional ThresholdParameter threshold_param = 128; + optional TileParameter tile_param = 138; optional WindowDataParameter window_data_param = 129; -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da -======= - optional TripletLossParameter triplet_loss_param = 137; ->>>>>>> triplet data generation and network update + optional TripletLossParameter triplet_loss_param = 139; } // Message that stores parameters used to apply transformation @@ -454,8 +469,6 @@ message ContrastiveLossParameter { optional bool legacy_version = 2 [default = false]; } -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da -======= message TripletLossParameter { //margin for negative triplet optional float margin = 1 [default = 1.0]; @@ -463,7 +476,6 @@ message TripletLossParameter { optional uint32 num_triplets = 3 [default = 3]; } ->>>>>>> triplet data generation and network update message ConvolutionParameter { optional uint32 num_output = 1; // The number of outputs for the layer optional bool bias_term = 2 [default = true]; // whether to have bias terms @@ -502,6 +514,7 @@ message DataParameter { // to avoid all asynchronous sgd clients to start at the same point. The skip // point would be set as rand_skip * rand(0,1). Note that rand_skip should not // be larger than the number of keys in the database. + // DEPRECATED. Each solver accesses a different subset of the database. optional uint32 rand_skip = 7 [default = 0]; optional DB backend = 8 [default = LEVELDB]; // DEPRECATED. See TransformationParameter. For data pre-processing, we can do @@ -517,6 +530,9 @@ message DataParameter { optional bool mirror = 6 [default = false]; // Force the encoded image to have 3 color channels optional bool force_encoded_color = 9 [default = false]; + // Prefetch queue (Number of batches to prefetch to host memory, increase if + // data access bandwidth varies). + optional uint32 prefetch = 10 [default = 4]; } message DropoutParameter { @@ -748,6 +764,15 @@ message PowerParameter { message PythonParameter { optional string module = 1; optional string layer = 2; + // This value is set to the attribute `param_str` of the `PythonLayer` object + // in Python before calling the `setup()` method. This could be a number, + // string, dictionary in Python dict format, JSON, etc. You may parse this + // string in `setup` method and use it in `forward` and `backward`. + optional string param_str = 3 [default = '']; + // Whether this PythonLayer is shared among worker solvers during data parallelism. + // If true, each worker solver sequentially run forward from this layer. + // This value should be set true if you are using it as a data layer. + optional bool share_in_parallel = 4 [default = false]; } // Message that stores parameters used by ReductionLayer @@ -1012,6 +1037,7 @@ message V1LayerParameter { SPLIT = 22; SLICE = 33; TANH = 23; + TRIPLET_LOSS = 40; WINDOW_DATA = 24; THRESHOLD = 31; @@ -1058,6 +1084,7 @@ message V1LayerParameter { optional TransformationParameter transform_param = 36; optional LossParameter loss_param = 42; optional V0LayerParameter layer = 1; + optional TripletLossParameter triplet_loss_param = 43; } // DEPRECATED: V0LayerParameter is the old way of specifying layer parameters diff --git a/src/caffe/proto/caffe.proto.orig b/src/caffe/proto/caffe.proto.orig deleted file mode 100644 index c6021592c33..00000000000 --- a/src/caffe/proto/caffe.proto.orig +++ /dev/null @@ -1,1165 +0,0 @@ -syntax = "proto2"; - -package caffe; - -// Specifies the shape (dimensions) of a Blob. -message BlobShape { - repeated int64 dim = 1 [packed = true]; -} - -message BlobProto { - optional BlobShape shape = 7; - repeated float data = 5 [packed = true]; - repeated float diff = 6 [packed = true]; - repeated double double_data = 8 [packed = true]; - repeated double double_diff = 9 [packed = true]; - - // 4D dimensions -- deprecated. Use "shape" instead. - optional int32 num = 1 [default = 0]; - optional int32 channels = 2 [default = 0]; - optional int32 height = 3 [default = 0]; - optional int32 width = 4 [default = 0]; -} - -// The BlobProtoVector is simply a way to pass multiple blobproto instances -// around. -message BlobProtoVector { - repeated BlobProto blobs = 1; -} - -message Datum { - optional int32 channels = 1; - optional int32 height = 2; - optional int32 width = 3; - // the actual image data, in bytes - optional bytes data = 4; - optional int32 label = 5; - // Optionally, the datum could also hold float data. - repeated float float_data = 6; - // If true data contains an encoded image that need to be decoded - optional bool encoded = 7 [default = false]; -} - -message FillerParameter { - // The filler type. - optional string type = 1 [default = 'constant']; - optional float value = 2 [default = 0]; // the value in constant filler - optional float min = 3 [default = 0]; // the min value in uniform filler - optional float max = 4 [default = 1]; // the max value in uniform filler - optional float mean = 5 [default = 0]; // the mean value in Gaussian filler - optional float std = 6 [default = 1]; // the std value in Gaussian filler - // The expected number of non-zero output weights for a given input in - // Gaussian filler -- the default -1 means don't perform sparsification. - optional int32 sparse = 7 [default = -1]; - // Normalize the filler variance by fan_in, fan_out, or their average. - // Applies to 'xavier' and 'msra' fillers. - enum VarianceNorm { - FAN_IN = 0; - FAN_OUT = 1; - AVERAGE = 2; - } - optional VarianceNorm variance_norm = 8 [default = FAN_IN]; -} - -message NetParameter { - optional string name = 1; // consider giving the network a name - // The input blobs to the network. - repeated string input = 3; - // The shape of the input blobs. - repeated BlobShape input_shape = 8; - - // 4D input dimensions -- deprecated. Use "shape" instead. - // If specified, for each input blob there should be four - // values specifying the num, channels, height and width of the input blob. - // Thus, there should be a total of (4 * #input) numbers. - repeated int32 input_dim = 4; - - // Whether the network will force every layer to carry out backward operation. - // If set False, then whether to carry out backward is determined - // automatically according to the net structure and learning rates. - optional bool force_backward = 5 [default = false]; - // The current "state" of the network, including the phase, level, and stage. - // Some layers may be included/excluded depending on this state and the states - // specified in the layers' include and exclude fields. - optional NetState state = 6; - - // Print debugging information about results while running Net::Forward, - // Net::Backward, and Net::Update. - optional bool debug_info = 7 [default = false]; - - // The layers that make up the net. Each of their configurations, including - // connectivity and behavior, is specified as a LayerParameter. - repeated LayerParameter layer = 100; // ID 100 so layers are printed last. - - // DEPRECATED: use 'layer' instead. - repeated V1LayerParameter layers = 2; -} - -// NOTE -// Update the next available ID when you add a new SolverParameter field. -// -// SolverParameter next available ID: 37 (last added: iter_size) -message SolverParameter { - ////////////////////////////////////////////////////////////////////////////// - // Specifying the train and test networks - // - // Exactly one train net must be specified using one of the following fields: - // train_net_param, train_net, net_param, net - // One or more test nets may be specified using any of the following fields: - // test_net_param, test_net, net_param, net - // If more than one test net field is specified (e.g., both net and - // test_net are specified), they will be evaluated in the field order given - // above: (1) test_net_param, (2) test_net, (3) net_param/net. - // A test_iter must be specified for each test_net. - // A test_level and/or a test_stage may also be specified for each test_net. - ////////////////////////////////////////////////////////////////////////////// - - // Proto filename for the train net, possibly combined with one or more - // test nets. - optional string net = 24; - // Inline train net param, possibly combined with one or more test nets. - optional NetParameter net_param = 25; - - optional string train_net = 1; // Proto filename for the train net. - repeated string test_net = 2; // Proto filenames for the test nets. - optional NetParameter train_net_param = 21; // Inline train net params. - repeated NetParameter test_net_param = 22; // Inline test net params. - - // The states for the train/test nets. Must be unspecified or - // specified once per net. - // - // By default, all states will have solver = true; - // train_state will have phase = TRAIN, - // and all test_state's will have phase = TEST. - // Other defaults are set according to the NetState defaults. - optional NetState train_state = 26; - repeated NetState test_state = 27; - - // The number of iterations for each test net. - repeated int32 test_iter = 3; - - // The number of iterations between two testing phases. - optional int32 test_interval = 4 [default = 0]; - optional bool test_compute_loss = 19 [default = false]; - // If true, run an initial test pass before the first iteration, - // ensuring memory availability and printing the starting value of the loss. - optional bool test_initialization = 32 [default = true]; - optional float base_lr = 5; // The base learning rate - // the number of iterations between displaying info. If display = 0, no info - // will be displayed. - optional int32 display = 6; - // Display the loss averaged over the last average_loss iterations - optional int32 average_loss = 33 [default = 1]; - optional int32 max_iter = 7; // the maximum number of iterations - // accumulate gradients over `iter_size` x `batch_size` instances - optional int32 iter_size = 36 [default = 1]; - optional string lr_policy = 8; // The learning rate decay policy. - optional float gamma = 9; // The parameter to compute the learning rate. - optional float power = 10; // The parameter to compute the learning rate. - optional float momentum = 11; // The momentum value. - optional float weight_decay = 12; // The weight decay. - // regularization types supported: L1 and L2 - // controlled by weight_decay - optional string regularization_type = 29 [default = "L2"]; - // the stepsize for learning rate policy "step" - optional int32 stepsize = 13; - // the stepsize for learning rate policy "multistep" - repeated int32 stepvalue = 34; - - // Set clip_gradients to >= 0 to clip parameter gradients to that L2 norm, - // whenever their actual L2 norm is larger. - optional float clip_gradients = 35 [default = -1]; - - optional int32 snapshot = 14 [default = 0]; // The snapshot interval - optional string snapshot_prefix = 15; // The prefix for the snapshot. - // whether to snapshot diff in the results or not. Snapshotting diff will help - // debugging but the final protocol buffer size will be much larger. - optional bool snapshot_diff = 16 [default = false]; - enum SnapshotFormat { - HDF5 = 0; - BINARYPROTO = 1; - } - optional SnapshotFormat snapshot_format = 37 [default = BINARYPROTO]; - // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default. - enum SolverMode { - CPU = 0; - GPU = 1; - } - optional SolverMode solver_mode = 17 [default = GPU]; - // the device_id will that be used in GPU mode. Use device_id = 0 in default. - optional int32 device_id = 18 [default = 0]; - // If non-negative, the seed with which the Solver will initialize the Caffe - // random number generator -- useful for reproducible results. Otherwise, - // (and by default) initialize using a seed derived from the system clock. - optional int64 random_seed = 20 [default = -1]; - - // Solver type - enum SolverType { - SGD = 0; - NESTEROV = 1; - ADAGRAD = 2; - RMSPROP = 3; - ADADELTA = 4; - ADAM = 5; - } - optional SolverType solver_type = 30 [default = SGD]; - // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam - optional float delta = 31 [default = 1e-8]; - // parameters for the Adam solver - optional float momentum2 = 39 [default = 0.999]; - - // RMSProp decay value - // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t) - optional float rms_decay = 38; - - // If true, print information about the state of the net that may help with - // debugging learning problems. - optional bool debug_info = 23 [default = false]; - - // If false, don't save a snapshot after training finishes. - optional bool snapshot_after_train = 28 [default = true]; -} - -// A message that stores the solver snapshots -message SolverState { - optional int32 iter = 1; // The current iteration - optional string learned_net = 2; // The file that stores the learned net. - repeated BlobProto history = 3; // The history for sgd solvers - optional int32 current_step = 4 [default = 0]; // The current step for learning rate -} - -enum Phase { - TRAIN = 0; - TEST = 1; -} - -message NetState { - optional Phase phase = 1 [default = TEST]; - optional int32 level = 2 [default = 0]; - repeated string stage = 3; -} - -message NetStateRule { - // Set phase to require the NetState have a particular phase (TRAIN or TEST) - // to meet this rule. - optional Phase phase = 1; - - // Set the minimum and/or maximum levels in which the layer should be used. - // Leave undefined to meet the rule regardless of level. - optional int32 min_level = 2; - optional int32 max_level = 3; - - // Customizable sets of stages to include or exclude. - // The net must have ALL of the specified stages and NONE of the specified - // "not_stage"s to meet the rule. - // (Use multiple NetStateRules to specify conjunctions of stages.) - repeated string stage = 4; - repeated string not_stage = 5; -} - -// Specifies training parameters (multipliers on global learning constants, -// and the name and other settings used for weight sharing). -message ParamSpec { - // The names of the parameter blobs -- useful for sharing parameters among - // layers, but never required otherwise. To share a parameter between two - // layers, give it a (non-empty) name. - optional string name = 1; - - // Whether to require shared weights to have the same shape, or just the same - // count -- defaults to STRICT if unspecified. - optional DimCheckMode share_mode = 2; - enum DimCheckMode { - // STRICT (default) requires that num, channels, height, width each match. - STRICT = 0; - // PERMISSIVE requires only the count (num*channels*height*width) to match. - PERMISSIVE = 1; - } - - // The multiplier on the global learning rate for this parameter. - optional float lr_mult = 3 [default = 1.0]; - - // The multiplier on the global weight decay for this parameter. - optional float decay_mult = 4 [default = 1.0]; -} - -// NOTE -// Update the next available ID when you add a new LayerParameter field. -// -<<<<<<< 7a85de9cf8ac6f0416eca95c1b991f127b8b7917 -// LayerParameter next available layer-specific ID: 137 (last added: reduction_param) -======= -// LayerParameter next available layer-specific ID: 139 (last added: tile_param) ->>>>>>> Add TileLayer -message LayerParameter { - optional string name = 1; // the layer name - optional string type = 2; // the layer type - repeated string bottom = 3; // the name of each bottom blob - repeated string top = 4; // the name of each top blob - - // The train / test phase for computation. - optional Phase phase = 10; - - // The amount of weight to assign each top blob in the objective. - // Each layer assigns a default value, usually of either 0 or 1, - // to each top blob. - repeated float loss_weight = 5; - - // Specifies training parameters (multipliers on global learning constants, - // and the name and other settings used for weight sharing). - repeated ParamSpec param = 6; - - // The blobs containing the numeric parameters of the layer. - repeated BlobProto blobs = 7; - - // Specifies on which bottoms the backpropagation should be skipped. - // The size must be either 0 or equal to the number of bottoms. - repeated bool propagate_down = 11; - - // Rules controlling whether and when a layer is included in the network, - // based on the current NetState. You may specify a non-zero number of rules - // to include OR exclude, but not both. If no include or exclude rules are - // specified, the layer is always included. If the current NetState meets - // ANY (i.e., one or more) of the specified rules, the layer is - // included/excluded. - repeated NetStateRule include = 8; - repeated NetStateRule exclude = 9; - - // Parameters for data pre-processing. - optional TransformationParameter transform_param = 100; - - // Parameters shared by loss layers. - optional LossParameter loss_param = 101; - - // Layer type-specific parameters. - // - // Note: certain layers may have more than one computational engine - // for their implementation. These layers include an Engine type and - // engine parameter for selecting the implementation. - // The default for the engine is set by the ENGINE switch at compile-time. - optional AccuracyParameter accuracy_param = 102; - optional ArgMaxParameter argmax_param = 103; - optional ConcatParameter concat_param = 104; - optional ContrastiveLossParameter contrastive_loss_param = 105; - optional ConvolutionParameter convolution_param = 106; - optional DataParameter data_param = 107; - optional DropoutParameter dropout_param = 108; - optional DummyDataParameter dummy_data_param = 109; - optional EltwiseParameter eltwise_param = 110; - optional EmbedParameter embed_param = 137; - optional ExpParameter exp_param = 111; - optional FlattenParameter flatten_param = 135; - optional HDF5DataParameter hdf5_data_param = 112; - optional HDF5OutputParameter hdf5_output_param = 113; - optional HingeLossParameter hinge_loss_param = 114; - optional ImageDataParameter image_data_param = 115; - optional InfogainLossParameter infogain_loss_param = 116; - optional InnerProductParameter inner_product_param = 117; - optional LogParameter log_param = 134; - optional LRNParameter lrn_param = 118; - optional MemoryDataParameter memory_data_param = 119; - optional MVNParameter mvn_param = 120; - optional PoolingParameter pooling_param = 121; - optional PowerParameter power_param = 122; - optional PReLUParameter prelu_param = 131; - optional PythonParameter python_param = 130; - optional ReductionParameter reduction_param = 136; - optional ReLUParameter relu_param = 123; - optional ReshapeParameter reshape_param = 133; - optional SigmoidParameter sigmoid_param = 124; - optional SoftmaxParameter softmax_param = 125; - optional SPPParameter spp_param = 132; - optional SliceParameter slice_param = 126; - optional TanHParameter tanh_param = 127; - optional ThresholdParameter threshold_param = 128; - optional TileParameter tile_param = 138; - optional WindowDataParameter window_data_param = 129; - optional TripletLossParameter triplet_loss_param = 137; -} - -// Message that stores parameters used to apply transformation -// to the data layer's data -message TransformationParameter { - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 1 [default = 1]; - // Specify if we want to randomly mirror data. - optional bool mirror = 2 [default = false]; - // Specify if we would like to randomly crop an image. - optional uint32 crop_size = 3 [default = 0]; - // mean_file and mean_value cannot be specified at the same time - optional string mean_file = 4; - // if specified can be repeated once (would substract it from all the channels) - // or can be repeated the same number of times as channels - // (would subtract them from the corresponding channel) - repeated float mean_value = 5; - // Force the decoded image to have 3 color channels. - optional bool force_color = 6 [default = false]; - // Force the decoded image to have 1 color channels. - optional bool force_gray = 7 [default = false]; -} - -// Message that stores parameters shared by loss layers -message LossParameter { - // If specified, ignore instances with the given label. - optional int32 ignore_label = 1; - // If true, normalize each batch across all instances (including spatial - // dimesions, but not ignored instances); else, divide by batch size only. - optional bool normalize = 2 [default = true]; -} - -// Messages that store parameters used by individual layer types follow, in -// alphabetical order. - -message AccuracyParameter { - // When computing accuracy, count as correct by comparing the true label to - // the top k scoring classes. By default, only compare to the top scoring - // class (i.e. argmax). - optional uint32 top_k = 1 [default = 1]; - - // The "label" axis of the prediction blob, whose argmax corresponds to the - // predicted label -- may be negative to index from the end (e.g., -1 for the - // last axis). For example, if axis == 1 and the predictions are - // (N x C x H x W), the label blob is expected to contain N*H*W ground truth - // labels with integer values in {0, 1, ..., C-1}. - optional int32 axis = 2 [default = 1]; - - // If specified, ignore instances with the given label. - optional int32 ignore_label = 3; -} - -message ArgMaxParameter { - // If true produce pairs (argmax, maxval) - optional bool out_max_val = 1 [default = false]; - optional uint32 top_k = 2 [default = 1]; -} - -message ConcatParameter { - // The axis along which to concatenate -- may be negative to index from the - // end (e.g., -1 for the last axis). Other axes must have the - // same dimension for all the bottom blobs. - // By default, ConcatLayer concatenates blobs along the "channels" axis (1). - optional int32 axis = 2 [default = 1]; - - // DEPRECATED: alias for "axis" -- does not support negative indexing. - optional uint32 concat_dim = 1 [default = 1]; -} - -message ContrastiveLossParameter { - // margin for dissimilar pair - optional float margin = 1 [default = 1.0]; - // The first implementation of this cost did not exactly match the cost of - // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2. - // legacy_version = false (the default) uses (margin - d)^2 as proposed in the - // Hadsell paper. New models should probably use this version. - // legacy_version = true uses (margin - d^2). This is kept to support / - // reproduce existing models and results - optional bool legacy_version = 2 [default = false]; -} - -message TripletLossParameter { - //margin for negative triplet - optional float margin = 1 [default = 1.0]; - optional uint32 losstype = 2 [default = 1]; - optional uint32 num_triplets = 3 [default = 3]; -} - -message ConvolutionParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - optional bool bias_term = 2 [default = true]; // whether to have bias terms - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in height and width or as Y, X pairs. - optional uint32 pad = 3 [default = 0]; // The padding size (equal in Y, X) - optional uint32 pad_h = 9 [default = 0]; // The padding height - optional uint32 pad_w = 10 [default = 0]; // The padding width - optional uint32 kernel_size = 4; // The kernel size (square) - optional uint32 kernel_h = 11; // The kernel height - optional uint32 kernel_w = 12; // The kernel width - optional uint32 group = 5 [default = 1]; // The group size for group conv - optional uint32 stride = 6 [default = 1]; // The stride (equal in Y, X) - optional uint32 stride_h = 13; // The stride height - optional uint32 stride_w = 14; // The stride width - optional FillerParameter weight_filler = 7; // The filler for the weight - optional FillerParameter bias_filler = 8; // The filler for the bias - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 15 [default = DEFAULT]; -} - -message DataParameter { - enum DB { - LEVELDB = 0; - LMDB = 1; - } - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 4 [default = 1]; - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - // DEPRECATED. Each solver accesses a different subset of the database. - optional uint32 rand_skip = 7 [default = 0]; - optional DB backend = 8 [default = LEVELDB]; - // DEPRECATED. See TransformationParameter. For data pre-processing, we can do - // simple scaling and subtracting the data mean, if provided. Note that the - // mean subtraction is always carried out before scaling. - optional float scale = 2 [default = 1]; - optional string mean_file = 3; - // DEPRECATED. See TransformationParameter. Specify if we would like to randomly - // crop an image. - optional uint32 crop_size = 5 [default = 0]; - // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror - // data. - optional bool mirror = 6 [default = false]; - // Force the encoded image to have 3 color channels - optional bool force_encoded_color = 9 [default = false]; - // Prefetch queue (Number of batches to prefetch to host memory, increase if - // data access bandwidth varies). - optional uint32 prefetch = 10 [default = 4]; -} - -message DropoutParameter { - optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio -} - -// DummyDataLayer fills any number of arbitrarily shaped blobs with random -// (or constant) data generated by "Fillers" (see "message FillerParameter"). -message DummyDataParameter { - // This layer produces N >= 1 top blobs. DummyDataParameter must specify 1 or N - // shape fields, and 0, 1 or N data_fillers. - // - // If 0 data_fillers are specified, ConstantFiller with a value of 0 is used. - // If 1 data_filler is specified, it is applied to all top blobs. If N are - // specified, the ith is applied to the ith top blob. - repeated FillerParameter data_filler = 1; - repeated BlobShape shape = 6; - - // 4D dimensions -- deprecated. Use "shape" instead. - repeated uint32 num = 2; - repeated uint32 channels = 3; - repeated uint32 height = 4; - repeated uint32 width = 5; -} - -message EltwiseParameter { - enum EltwiseOp { - PROD = 0; - SUM = 1; - MAX = 2; - } - optional EltwiseOp operation = 1 [default = SUM]; // element-wise operation - repeated float coeff = 2; // blob-wise coefficient for SUM operation - - // Whether to use an asymptotically slower (for >2 inputs) but stabler method - // of computing the gradient for the PROD operation. (No effect for SUM op.) - optional bool stable_prod_grad = 3 [default = true]; -} - -message ExpParameter { - // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. - // Or if base is set to the default (-1), base is set to e, - // so y = exp(shift + scale * x). - optional float base = 1 [default = -1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - -/// Message that stores parameters used by FlattenLayer -message FlattenParameter { - // The first axis to flatten: all preceding axes are retained in the output. - // May be negative to index from the end (e.g., -1 for the last axis). - optional int32 axis = 1 [default = 1]; - - // The last axis to flatten: all following axes are retained in the output. - // May be negative to index from the end (e.g., the default -1 for the last - // axis). - optional int32 end_axis = 2 [default = -1]; -} - -// Message that stores parameters used by HDF5DataLayer -message HDF5DataParameter { - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 2; - - // Specify whether to shuffle the data. - // If shuffle == true, the ordering of the HDF5 files is shuffled, - // and the ordering of data within any given HDF5 file is shuffled, - // but data between different files are not interleaved; all of a file's - // data are output (in a random order) before moving onto another file. - optional bool shuffle = 3 [default = false]; -} - -message HDF5OutputParameter { - optional string file_name = 1; -} - -message HingeLossParameter { - enum Norm { - L1 = 1; - L2 = 2; - } - // Specify the Norm to use L1 or L2 - optional Norm norm = 1 [default = L1]; -} - -message ImageDataParameter { - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 4 [default = 1]; - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - optional uint32 rand_skip = 7 [default = 0]; - // Whether or not ImageLayer should shuffle the list of files at every epoch. - optional bool shuffle = 8 [default = false]; - // It will also resize images if new_height or new_width are not zero. - optional uint32 new_height = 9 [default = 0]; - optional uint32 new_width = 10 [default = 0]; - // Specify if the images are color or gray - optional bool is_color = 11 [default = true]; - // DEPRECATED. See TransformationParameter. For data pre-processing, we can do - // simple scaling and subtracting the data mean, if provided. Note that the - // mean subtraction is always carried out before scaling. - optional float scale = 2 [default = 1]; - optional string mean_file = 3; - // DEPRECATED. See TransformationParameter. Specify if we would like to randomly - // crop an image. - optional uint32 crop_size = 5 [default = 0]; - // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror - // data. - optional bool mirror = 6 [default = false]; - optional string root_folder = 12 [default = ""]; -} - -message InfogainLossParameter { - // Specify the infogain matrix source. - optional string source = 1; -} - -message InnerProductParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - optional bool bias_term = 2 [default = true]; // whether to have bias terms - optional FillerParameter weight_filler = 3; // The filler for the weight - optional FillerParameter bias_filler = 4; // The filler for the bias - - // The first axis to be lumped into a single inner product computation; - // all preceding axes are retained in the output. - // May be negative to index from the end (e.g., -1 for the last axis). - optional int32 axis = 5 [default = 1]; -} - -// Message that stores parameters used by LogLayer -message LogParameter { - // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. - // Or if base is set to the default (-1), base is set to e, - // so y = ln(shift + scale * x) = log_e(shift + scale * x) - optional float base = 1 [default = -1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - -// Message that stores parameters used by LRNLayer -message LRNParameter { - optional uint32 local_size = 1 [default = 5]; - optional float alpha = 2 [default = 1.]; - optional float beta = 3 [default = 0.75]; - enum NormRegion { - ACROSS_CHANNELS = 0; - WITHIN_CHANNEL = 1; - } - optional NormRegion norm_region = 4 [default = ACROSS_CHANNELS]; - optional float k = 5 [default = 1.]; -} - -message MemoryDataParameter { - optional uint32 batch_size = 1; - optional uint32 channels = 2; - optional uint32 height = 3; - optional uint32 width = 4; -} - -message MVNParameter { - // This parameter can be set to false to normalize mean only - optional bool normalize_variance = 1 [default = true]; - - // This parameter can be set to true to perform DNN-like MVN - optional bool across_channels = 2 [default = false]; - - // Epsilon for not dividing by zero while normalizing variance - optional float eps = 3 [default = 1e-9]; -} - -message PoolingParameter { - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional PoolMethod pool = 1 [default = MAX]; // The pooling method - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in height and width or as Y, X pairs. - optional uint32 pad = 4 [default = 0]; // The padding size (equal in Y, X) - optional uint32 pad_h = 9 [default = 0]; // The padding height - optional uint32 pad_w = 10 [default = 0]; // The padding width - optional uint32 kernel_size = 2; // The kernel size (square) - optional uint32 kernel_h = 5; // The kernel height - optional uint32 kernel_w = 6; // The kernel width - optional uint32 stride = 3 [default = 1]; // The stride (equal in Y, X) - optional uint32 stride_h = 7; // The stride height - optional uint32 stride_w = 8; // The stride width - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 11 [default = DEFAULT]; - // If global_pooling then it will pool over the size of the bottom by doing - // kernel_h = bottom->height and kernel_w = bottom->width - optional bool global_pooling = 12 [default = false]; -} - -message PowerParameter { - // PowerLayer computes outputs y = (shift + scale * x) ^ power. - optional float power = 1 [default = 1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - -message PythonParameter { - optional string module = 1; - optional string layer = 2; - // This value is set to the attribute `param_str` of the `PythonLayer` object - // in Python before calling the `setup()` method. This could be a number, - // string, dictionary in Python dict format, JSON, etc. You may parse this - // string in `setup` method and use it in `forward` and `backward`. - optional string param_str = 3 [default = '']; - // Whether this PythonLayer is shared among worker solvers during data parallelism. - // If true, each worker solver sequentially run forward from this layer. - // This value should be set true if you are using it as a data layer. - optional bool share_in_parallel = 4 [default = false]; -} - -// Message that stores parameters used by ReductionLayer -message ReductionParameter { - enum ReductionOp { - SUM = 1; - ASUM = 2; - SUMSQ = 3; - MEAN = 4; - } - - optional ReductionOp operation = 1 [default = SUM]; // reduction operation - - // The first axis to reduce to a scalar -- may be negative to index from the - // end (e.g., -1 for the last axis). - // (Currently, only reduction along ALL "tail" axes is supported; reduction - // of axis M through N, where N < num_axes - 1, is unsupported.) - // Suppose we have an n-axis bottom Blob with shape: - // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). - // If axis == m, the output Blob will have shape - // (d0, d1, d2, ..., d(m-1)), - // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) - // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. - // If axis == 0 (the default), the output Blob always has the empty shape - // (count 1), performing reduction across the entire input -- - // often useful for creating new loss functions. - optional int32 axis = 2 [default = 0]; - - optional float coeff = 3 [default = 1.0]; // coefficient for output -} - -// Message that stores parameters used by ReLULayer -message ReLUParameter { - // Allow non-zero slope for negative inputs to speed up optimization - // Described in: - // Maas, A. L., Hannun, A. Y., & Ng, A. Y. (2013). Rectifier nonlinearities - // improve neural network acoustic models. In ICML Workshop on Deep Learning - // for Audio, Speech, and Language Processing. - optional float negative_slope = 1 [default = 0]; - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 2 [default = DEFAULT]; -} - -message ReshapeParameter { - // Specify the output dimensions. If some of the dimensions are set to 0, - // the corresponding dimension from the bottom layer is used (unchanged). - // Exactly one dimension may be set to -1, in which case its value is - // inferred from the count of the bottom blob and the remaining dimensions. - // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8: - // - // layer { - // type: "Reshape" bottom: "input" top: "output" - // reshape_param { ... } - // } - // - // If "input" is 2D with shape 2 x 8, then the following reshape_param - // specifications are all equivalent, producing a 3D blob "output" with shape - // 2 x 2 x 4: - // - // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 0 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 0 dim: 2 dim: -1 } } - // reshape_param { shape { dim: -1 dim: 0 dim: 2 } } - // - optional BlobShape shape = 1; - - // axis and num_axes control the portion of the bottom blob's shape that are - // replaced by (included in) the reshape. By default (axis == 0 and - // num_axes == -1), the entire bottom blob shape is included in the reshape, - // and hence the shape field must specify the entire output shape. - // - // axis may be non-zero to retain some portion of the beginning of the input - // shape (and may be negative to index from the end; e.g., -1 to begin the - // reshape after the last axis, including nothing in the reshape, - // -2 to include only the last axis, etc.). - // - // For example, suppose "input" is a 2D blob with shape 2 x 8. - // Then the following ReshapeLayer specifications are all equivalent, - // producing a blob "output" with shape 2 x 2 x 4: - // - // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 2 dim: 4 } axis: 1 } - // reshape_param { shape { dim: 2 dim: 4 } axis: -3 } - // - // num_axes specifies the extent of the reshape. - // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on - // input axes in the range [axis, axis+num_axes]. - // num_axes may also be -1, the default, to include all remaining axes - // (starting from axis). - // - // For example, suppose "input" is a 2D blob with shape 2 x 8. - // Then the following ReshapeLayer specifications are equivalent, - // producing a blob "output" with shape 1 x 2 x 8. - // - // reshape_param { shape { dim: 1 dim: 2 dim: 8 } } - // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 } - // reshape_param { shape { dim: 1 } num_axes: 0 } - // - // On the other hand, these would produce output blob shape 2 x 1 x 8: - // - // reshape_param { shape { dim: 2 dim: 1 dim: 8 } } - // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 } - // - optional int32 axis = 2 [default = 0]; - optional int32 num_axes = 3 [default = -1]; -} - -message SigmoidParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [default = DEFAULT]; -} - -message SliceParameter { - // The axis along which to slice -- may be negative to index from the end - // (e.g., -1 for the last axis). - // By default, SliceLayer concatenates blobs along the "channels" axis (1). - optional int32 axis = 3 [default = 1]; - repeated uint32 slice_point = 2; - - // DEPRECATED: alias for "axis" -- does not support negative indexing. - optional uint32 slice_dim = 1 [default = 1]; -} - -// Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer -message SoftmaxParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [default = DEFAULT]; - - // The axis along which to perform the softmax -- may be negative to index - // from the end (e.g., -1 for the last axis). - // Any other axes will be evaluated as independent softmaxes. - optional int32 axis = 2 [default = 1]; -} - -message TanHParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [default = DEFAULT]; -} - -// Message that stores parameters used by TileLayer -message TileParameter { - // The index of the axis to tile. - optional int32 axis = 1 [default = 1]; - - // The number of copies (tiles) of the blob to output. - optional int32 tiles = 2; -} - -// Message that stores parameters used by ThresholdLayer -message ThresholdParameter { - optional float threshold = 1 [default = 0]; // Strictly positive values -} - -message WindowDataParameter { - // Specify the data source. - optional string source = 1; - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 2 [default = 1]; - optional string mean_file = 3; - // Specify the batch size. - optional uint32 batch_size = 4; - // Specify if we would like to randomly crop an image. - optional uint32 crop_size = 5 [default = 0]; - // Specify if we want to randomly mirror data. - optional bool mirror = 6 [default = false]; - // Foreground (object) overlap threshold - optional float fg_threshold = 7 [default = 0.5]; - // Background (non-object) overlap threshold - optional float bg_threshold = 8 [default = 0.5]; - // Fraction of batch that should be foreground objects - optional float fg_fraction = 9 [default = 0.25]; - // Amount of contextual padding to add around a window - // (used only by the window_data_layer) - optional uint32 context_pad = 10 [default = 0]; - // Mode for cropping out a detection window - // warp: cropped window is warped to a fixed size and aspect ratio - // square: the tightest square around the window is cropped - optional string crop_mode = 11 [default = "warp"]; - // cache_images: will load all images in memory for faster access - optional bool cache_images = 12 [default = false]; - // append root_folder to locate images - optional string root_folder = 13 [default = ""]; -} - -message SPPParameter { - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional uint32 pyramid_height = 1; - optional PoolMethod pool = 2 [default = MAX]; // The pooling method - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 6 [default = DEFAULT]; -} - -// DEPRECATED: use LayerParameter. -message V1LayerParameter { - repeated string bottom = 2; - repeated string top = 3; - optional string name = 4; - repeated NetStateRule include = 32; - repeated NetStateRule exclude = 33; - enum LayerType { - NONE = 0; - ABSVAL = 35; - ACCURACY = 1; - ARGMAX = 30; - BNLL = 2; - CONCAT = 3; - CONTRASTIVE_LOSS = 37; - CONVOLUTION = 4; - DATA = 5; - DECONVOLUTION = 39; - DROPOUT = 6; - DUMMY_DATA = 32; - EUCLIDEAN_LOSS = 7; - ELTWISE = 25; - EXP = 38; - FLATTEN = 8; - HDF5_DATA = 9; - HDF5_OUTPUT = 10; - HINGE_LOSS = 28; - IM2COL = 11; - IMAGE_DATA = 12; - INFOGAIN_LOSS = 13; - INNER_PRODUCT = 14; - LRN = 15; - MEMORY_DATA = 29; - MULTINOMIAL_LOGISTIC_LOSS = 16; - MVN = 34; - POOLING = 17; - POWER = 26; - RELU = 18; - SIGMOID = 19; - SIGMOID_CROSS_ENTROPY_LOSS = 27; - SILENCE = 36; - SOFTMAX = 20; - SOFTMAX_LOSS = 21; - SPLIT = 22; - SLICE = 33; - TANH = 23; - TRIPLET_LOSS = 40; - WINDOW_DATA = 24; - THRESHOLD = 31; - - } - optional LayerType type = 5; - repeated BlobProto blobs = 6; - repeated string param = 1001; - repeated DimCheckMode blob_share_mode = 1002; - enum DimCheckMode { - STRICT = 0; - PERMISSIVE = 1; - } - repeated float blobs_lr = 7; - repeated float weight_decay = 8; - repeated float loss_weight = 35; - optional AccuracyParameter accuracy_param = 27; - optional ArgMaxParameter argmax_param = 23; - optional ConcatParameter concat_param = 9; - optional ContrastiveLossParameter contrastive_loss_param = 40; - optional ConvolutionParameter convolution_param = 10; - optional DataParameter data_param = 11; - optional DropoutParameter dropout_param = 12; - optional DummyDataParameter dummy_data_param = 26; - optional EltwiseParameter eltwise_param = 24; - optional ExpParameter exp_param = 41; - optional HDF5DataParameter hdf5_data_param = 13; - optional HDF5OutputParameter hdf5_output_param = 14; - optional HingeLossParameter hinge_loss_param = 29; - optional ImageDataParameter image_data_param = 15; - optional InfogainLossParameter infogain_loss_param = 16; - optional InnerProductParameter inner_product_param = 17; - optional LRNParameter lrn_param = 18; - optional MemoryDataParameter memory_data_param = 22; - optional MVNParameter mvn_param = 34; - optional PoolingParameter pooling_param = 19; - optional PowerParameter power_param = 21; - optional ReLUParameter relu_param = 30; - optional SigmoidParameter sigmoid_param = 38; - optional SoftmaxParameter softmax_param = 39; - optional SliceParameter slice_param = 31; - optional TanHParameter tanh_param = 37; - optional ThresholdParameter threshold_param = 25; - optional WindowDataParameter window_data_param = 20; - optional TransformationParameter transform_param = 36; - optional LossParameter loss_param = 42; - optional V0LayerParameter layer = 1; - optional TripletLossParameter triplet_loss_param = 43; -} - -// DEPRECATED: V0LayerParameter is the old way of specifying layer parameters -// in Caffe. We keep this message type around for legacy support. -message V0LayerParameter { - optional string name = 1; // the layer name - optional string type = 2; // the string to specify the layer type - - // Parameters to specify layers with inner products. - optional uint32 num_output = 3; // The number of outputs for the layer - optional bool biasterm = 4 [default = true]; // whether to have bias terms - optional FillerParameter weight_filler = 5; // The filler for the weight - optional FillerParameter bias_filler = 6; // The filler for the bias - - optional uint32 pad = 7 [default = 0]; // The padding size - optional uint32 kernelsize = 8; // The kernel size - optional uint32 group = 9 [default = 1]; // The group size for group conv - optional uint32 stride = 10 [default = 1]; // The stride - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional PoolMethod pool = 11 [default = MAX]; // The pooling method - optional float dropout_ratio = 12 [default = 0.5]; // dropout ratio - - optional uint32 local_size = 13 [default = 5]; // for local response norm - optional float alpha = 14 [default = 1.]; // for local response norm - optional float beta = 15 [default = 0.75]; // for local response norm - optional float k = 22 [default = 1.]; - - // For data layers, specify the data source - optional string source = 16; - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 17 [default = 1]; - optional string meanfile = 18; - // For data layers, specify the batch size. - optional uint32 batchsize = 19; - // For data layers, specify if we would like to randomly crop an image. - optional uint32 cropsize = 20 [default = 0]; - // For data layers, specify if we want to randomly mirror data. - optional bool mirror = 21 [default = false]; - - // The blobs containing the numeric parameters of the layer - repeated BlobProto blobs = 50; - // The ratio that is multiplied on the global learning rate. If you want to - // set the learning ratio for one blob, you need to set it for all blobs. - repeated float blobs_lr = 51; - // The weight decay that is multiplied on the global weight decay. - repeated float weight_decay = 52; - - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - optional uint32 rand_skip = 53 [default = 0]; - - // Fields related to detection (det_*) - // foreground (object) overlap threshold - optional float det_fg_threshold = 54 [default = 0.5]; - // background (non-object) overlap threshold - optional float det_bg_threshold = 55 [default = 0.5]; - // Fraction of batch that should be foreground objects - optional float det_fg_fraction = 56 [default = 0.25]; - - // optional bool OBSOLETE_can_clobber = 57 [default = true]; - - // Amount of contextual padding to add around a window - // (used only by the window_data_layer) - optional uint32 det_context_pad = 58 [default = 0]; - - // Mode for cropping out a detection window - // warp: cropped window is warped to a fixed size and aspect ratio - // square: the tightest square around the window is cropped - optional string det_crop_mode = 59 [default = "warp"]; - - // For ReshapeLayer, one needs to specify the new dimensions. - optional int32 new_num = 60 [default = 0]; - optional int32 new_channels = 61 [default = 0]; - optional int32 new_height = 62 [default = 0]; - optional int32 new_width = 63 [default = 0]; - - // Whether or not ImageLayer should shuffle the list of files at every epoch. - // It will also resize images if new_height or new_width are not zero. - optional bool shuffle_images = 64 [default = false]; - - // For ConcatLayer, one needs to specify the dimension for concatenation, and - // the other dimensions must be the same for all the bottom blobs. - // By default it will concatenate blobs along the channels dimension. - optional uint32 concat_dim = 65 [default = 1]; - - optional HDF5OutputParameter hdf5_output_param = 1001; -} - -message PReLUParameter { - // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: - // Surpassing Human-Level Performance on ImageNet Classification, 2015. - - // Initial value of a_i. Default is a_i=0.25 for all i. - optional FillerParameter filler = 1; - // Whether or not slope paramters are shared across channels. - optional bool channel_shared = 2 [default = false]; -} diff --git a/src/caffe/proto/caffe.proto.orig.orig b/src/caffe/proto/caffe.proto.orig.orig deleted file mode 100644 index 3a0a3490bd1..00000000000 --- a/src/caffe/proto/caffe.proto.orig.orig +++ /dev/null @@ -1,1190 +0,0 @@ -syntax = "proto2"; - -package caffe; - -// Specifies the shape (dimensions) of a Blob. -message BlobShape { - repeated int64 dim = 1 [packed = true]; -} - -message BlobProto { - optional BlobShape shape = 7; - repeated float data = 5 [packed = true]; - repeated float diff = 6 [packed = true]; - - // 4D dimensions -- deprecated. Use "shape" instead. - optional int32 num = 1 [default = 0]; - optional int32 channels = 2 [default = 0]; - optional int32 height = 3 [default = 0]; - optional int32 width = 4 [default = 0]; -} - -// The BlobProtoVector is simply a way to pass multiple blobproto instances -// around. -message BlobProtoVector { - repeated BlobProto blobs = 1; -} - -message Datum { - optional int32 channels = 1; - optional int32 height = 2; - optional int32 width = 3; - // the actual image data, in bytes - optional bytes data = 4; - optional int32 label = 5; - // Optionally, the datum could also hold float data. - repeated float float_data = 6; - // If true data contains an encoded image that need to be decoded - optional bool encoded = 7 [default = false]; -} - -message FillerParameter { - // The filler type. - optional string type = 1 [default = 'constant']; - optional float value = 2 [default = 0]; // the value in constant filler - optional float min = 3 [default = 0]; // the min value in uniform filler - optional float max = 4 [default = 1]; // the max value in uniform filler - optional float mean = 5 [default = 0]; // the mean value in Gaussian filler - optional float std = 6 [default = 1]; // the std value in Gaussian filler - // The expected number of non-zero output weights for a given input in - // Gaussian filler -- the default -1 means don't perform sparsification. - optional int32 sparse = 7 [default = -1]; - // Normalize the filler variance by fan_in, fan_out, or their average. - // Applies to 'xavier' and 'msra' fillers. - enum VarianceNorm { - FAN_IN = 0; - FAN_OUT = 1; - AVERAGE = 2; - } - optional VarianceNorm variance_norm = 8 [default = FAN_IN]; -} - -message NetParameter { - optional string name = 1; // consider giving the network a name - // The input blobs to the network. - repeated string input = 3; - // The shape of the input blobs. - repeated BlobShape input_shape = 8; - - // 4D input dimensions -- deprecated. Use "shape" instead. - // If specified, for each input blob there should be four - // values specifying the num, channels, height and width of the input blob. - // Thus, there should be a total of (4 * #input) numbers. - repeated int32 input_dim = 4; - - // Whether the network will force every layer to carry out backward operation. - // If set False, then whether to carry out backward is determined - // automatically according to the net structure and learning rates. - optional bool force_backward = 5 [default = false]; - // The current "state" of the network, including the phase, level, and stage. - // Some layers may be included/excluded depending on this state and the states - // specified in the layers' include and exclude fields. - optional NetState state = 6; - - // Print debugging information about results while running Net::Forward, - // Net::Backward, and Net::Update. - optional bool debug_info = 7 [default = false]; - - // The layers that make up the net. Each of their configurations, including - // connectivity and behavior, is specified as a LayerParameter. - repeated LayerParameter layer = 100; // ID 100 so layers are printed last. - - // DEPRECATED: use 'layer' instead. - repeated V1LayerParameter layers = 2; -} - -// NOTE -// Update the next available ID when you add a new SolverParameter field. -// -// SolverParameter next available ID: 37 (last added: iter_size) -message SolverParameter { - ////////////////////////////////////////////////////////////////////////////// - // Specifying the train and test networks - // - // Exactly one train net must be specified using one of the following fields: - // train_net_param, train_net, net_param, net - // One or more test nets may be specified using any of the following fields: - // test_net_param, test_net, net_param, net - // If more than one test net field is specified (e.g., both net and - // test_net are specified), they will be evaluated in the field order given - // above: (1) test_net_param, (2) test_net, (3) net_param/net. - // A test_iter must be specified for each test_net. - // A test_level and/or a test_stage may also be specified for each test_net. - ////////////////////////////////////////////////////////////////////////////// - - // Proto filename for the train net, possibly combined with one or more - // test nets. - optional string net = 24; - // Inline train net param, possibly combined with one or more test nets. - optional NetParameter net_param = 25; - - optional string train_net = 1; // Proto filename for the train net. - repeated string test_net = 2; // Proto filenames for the test nets. - optional NetParameter train_net_param = 21; // Inline train net params. - repeated NetParameter test_net_param = 22; // Inline test net params. - - // The states for the train/test nets. Must be unspecified or - // specified once per net. - // - // By default, all states will have solver = true; - // train_state will have phase = TRAIN, - // and all test_state's will have phase = TEST. - // Other defaults are set according to the NetState defaults. - optional NetState train_state = 26; - repeated NetState test_state = 27; - - // The number of iterations for each test net. - repeated int32 test_iter = 3; - - // The number of iterations between two testing phases. - optional int32 test_interval = 4 [default = 0]; - optional bool test_compute_loss = 19 [default = false]; - // If true, run an initial test pass before the first iteration, - // ensuring memory availability and printing the starting value of the loss. - optional bool test_initialization = 32 [default = true]; - optional float base_lr = 5; // The base learning rate - // the number of iterations between displaying info. If display = 0, no info - // will be displayed. - optional int32 display = 6; - // Display the loss averaged over the last average_loss iterations - optional int32 average_loss = 33 [default = 1]; - optional int32 max_iter = 7; // the maximum number of iterations - // accumulate gradients over `iter_size` x `batch_size` instances - optional int32 iter_size = 36 [default = 1]; - optional string lr_policy = 8; // The learning rate decay policy. - optional float gamma = 9; // The parameter to compute the learning rate. - optional float power = 10; // The parameter to compute the learning rate. - optional float momentum = 11; // The momentum value. - optional float weight_decay = 12; // The weight decay. - // regularization types supported: L1 and L2 - // controlled by weight_decay - optional string regularization_type = 29 [default = "L2"]; - // the stepsize for learning rate policy "step" - optional int32 stepsize = 13; - // the stepsize for learning rate policy "multistep" - repeated int32 stepvalue = 34; - - // Set clip_gradients to >= 0 to clip parameter gradients to that L2 norm, - // whenever their actual L2 norm is larger. - optional float clip_gradients = 35 [default = -1]; - - optional int32 snapshot = 14 [default = 0]; // The snapshot interval - optional string snapshot_prefix = 15; // The prefix for the snapshot. - // whether to snapshot diff in the results or not. Snapshotting diff will help - // debugging but the final protocol buffer size will be much larger. - optional bool snapshot_diff = 16 [default = false]; - // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default. - enum SolverMode { - CPU = 0; - GPU = 1; - } - optional SolverMode solver_mode = 17 [default = GPU]; - // the device_id will that be used in GPU mode. Use device_id = 0 in default. - optional int32 device_id = 18 [default = 0]; - // If non-negative, the seed with which the Solver will initialize the Caffe - // random number generator -- useful for reproducible results. Otherwise, - // (and by default) initialize using a seed derived from the system clock. - optional int64 random_seed = 20 [default = -1]; - - // Solver type - enum SolverType { - SGD = 0; - NESTEROV = 1; - ADAGRAD = 2; - } - optional SolverType solver_type = 30 [default = SGD]; - // numerical stability for AdaGrad - optional float delta = 31 [default = 1e-8]; - - // If true, print information about the state of the net that may help with - // debugging learning problems. - optional bool debug_info = 23 [default = false]; - - // If false, don't save a snapshot after training finishes. - optional bool snapshot_after_train = 28 [default = true]; -} - -// A message that stores the solver snapshots -message SolverState { - optional int32 iter = 1; // The current iteration - optional string learned_net = 2; // The file that stores the learned net. - repeated BlobProto history = 3; // The history for sgd solvers - optional int32 current_step = 4 [default = 0]; // The current step for learning rate -} - -enum Phase { - TRAIN = 0; - TEST = 1; -} - -message NetState { - optional Phase phase = 1 [default = TEST]; - optional int32 level = 2 [default = 0]; - repeated string stage = 3; -} - -message NetStateRule { - // Set phase to require the NetState have a particular phase (TRAIN or TEST) - // to meet this rule. - optional Phase phase = 1; - - // Set the minimum and/or maximum levels in which the layer should be used. - // Leave undefined to meet the rule regardless of level. - optional int32 min_level = 2; - optional int32 max_level = 3; - - // Customizable sets of stages to include or exclude. - // The net must have ALL of the specified stages and NONE of the specified - // "not_stage"s to meet the rule. - // (Use multiple NetStateRules to specify conjunctions of stages.) - repeated string stage = 4; - repeated string not_stage = 5; -} - -// Specifies training parameters (multipliers on global learning constants, -// and the name and other settings used for weight sharing). -message ParamSpec { - // The names of the parameter blobs -- useful for sharing parameters among - // layers, but never required otherwise. To share a parameter between two - // layers, give it a (non-empty) name. - optional string name = 1; - - // Whether to require shared weights to have the same shape, or just the same - // count -- defaults to STRICT if unspecified. - optional DimCheckMode share_mode = 2; - enum DimCheckMode { - // STRICT (default) requires that num, channels, height, width each match. - STRICT = 0; - // PERMISSIVE requires only the count (num*channels*height*width) to match. - PERMISSIVE = 1; - } - - // The multiplier on the global learning rate for this parameter. - optional float lr_mult = 3 [default = 1.0]; - - // The multiplier on the global weight decay for this parameter. - optional float decay_mult = 4 [default = 1.0]; -} - -// NOTE -// Update the next available ID when you add a new LayerParameter field. -// -<<<<<<< b30868495fbae44b9556c621a319178d919bf562 -// LayerParameter next available layer-specific ID: 137 (last added: reduction_param) -======= -<<<<<<< 7a85de9cf8ac6f0416eca95c1b991f127b8b7917 -// LayerParameter next available layer-specific ID: 137 (last added: reduction_param) -======= -// LayerParameter next available layer-specific ID: 139 (last added: tile_param) ->>>>>>> Add TileLayer ->>>>>>> add initiate class name of triplet loss layer -message LayerParameter { - optional string name = 1; // the layer name - optional string type = 2; // the layer type - repeated string bottom = 3; // the name of each bottom blob - repeated string top = 4; // the name of each top blob - - // The train / test phase for computation. - optional Phase phase = 10; - - // The amount of weight to assign each top blob in the objective. - // Each layer assigns a default value, usually of either 0 or 1, - // to each top blob. - repeated float loss_weight = 5; - - // Specifies training parameters (multipliers on global learning constants, - // and the name and other settings used for weight sharing). - repeated ParamSpec param = 6; - - // The blobs containing the numeric parameters of the layer. - repeated BlobProto blobs = 7; -<<<<<<< b30868495fbae44b9556c621a319178d919bf562 - -======= - ->>>>>>> add initiate class name of triplet loss layer - // Specifies on which bottoms the backpropagation should be skipped. - // The size must be either 0 or equal to the number of bottoms. - repeated bool propagate_down = 11; - - // Rules controlling whether and when a layer is included in the network, - // based on the current NetState. You may specify a non-zero number of rules - // to include OR exclude, but not both. If no include or exclude rules are - // specified, the layer is always included. If the current NetState meets - // ANY (i.e., one or more) of the specified rules, the layer is - // included/excluded. - repeated NetStateRule include = 8; - repeated NetStateRule exclude = 9; - - // Parameters for data pre-processing. - optional TransformationParameter transform_param = 100; - - // Parameters shared by loss layers. - optional LossParameter loss_param = 101; - - // Layer type-specific parameters. - // - // Note: certain layers may have more than one computational engine - // for their implementation. These layers include an Engine type and - // engine parameter for selecting the implementation. - // The default for the engine is set by the ENGINE switch at compile-time. - optional AccuracyParameter accuracy_param = 102; - optional ArgMaxParameter argmax_param = 103; - optional ConcatParameter concat_param = 104; - optional ContrastiveLossParameter contrastive_loss_param = 105; - optional ConvolutionParameter convolution_param = 106; - optional DataParameter data_param = 107; - optional DropoutParameter dropout_param = 108; - optional DummyDataParameter dummy_data_param = 109; - optional EltwiseParameter eltwise_param = 110; - optional ExpParameter exp_param = 111; - optional FlattenParameter flatten_param = 135; - optional HDF5DataParameter hdf5_data_param = 112; - optional HDF5OutputParameter hdf5_output_param = 113; - optional HingeLossParameter hinge_loss_param = 114; - optional ImageDataParameter image_data_param = 115; - optional InfogainLossParameter infogain_loss_param = 116; - optional InnerProductParameter inner_product_param = 117; - optional LogParameter log_param = 134; - optional LRNParameter lrn_param = 118; - optional MemoryDataParameter memory_data_param = 119; - optional MVNParameter mvn_param = 120; - optional PoolingParameter pooling_param = 121; - optional PowerParameter power_param = 122; - optional PReLUParameter prelu_param = 131; - optional PythonParameter python_param = 130; - optional ReductionParameter reduction_param = 136; - optional ReLUParameter relu_param = 123; - optional ReshapeParameter reshape_param = 133; - optional SigmoidParameter sigmoid_param = 124; - optional SoftmaxParameter softmax_param = 125; - optional SPPParameter spp_param = 132; - optional SliceParameter slice_param = 126; - optional TanHParameter tanh_param = 127; - optional ThresholdParameter threshold_param = 128; - optional WindowDataParameter window_data_param = 129; -<<<<<<< b30868495fbae44b9556c621a319178d919bf562 -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da -======= - optional TripletLossParameter triplet_loss_param = 137; ->>>>>>> triplet data generation and network update -======= - optional TripletLossParameter triplet_loss_param = 137; ->>>>>>> add initiate class name of triplet loss layer -} - -// Message that stores parameters used to apply transformation -// to the data layer's data -message TransformationParameter { - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 1 [default = 1]; - // Specify if we want to randomly mirror data. - optional bool mirror = 2 [default = false]; - // Specify if we would like to randomly crop an image. - optional uint32 crop_size = 3 [default = 0]; - // mean_file and mean_value cannot be specified at the same time - optional string mean_file = 4; - // if specified can be repeated once (would substract it from all the channels) - // or can be repeated the same number of times as channels - // (would subtract them from the corresponding channel) - repeated float mean_value = 5; - // Force the decoded image to have 3 color channels. - optional bool force_color = 6 [default = false]; - // Force the decoded image to have 1 color channels. - optional bool force_gray = 7 [default = false]; -} - -// Message that stores parameters shared by loss layers -message LossParameter { - // If specified, ignore instances with the given label. - optional int32 ignore_label = 1; - // If true, normalize each batch across all instances (including spatial - // dimesions, but not ignored instances); else, divide by batch size only. - optional bool normalize = 2 [default = true]; -} - -// Messages that store parameters used by individual layer types follow, in -// alphabetical order. - -message AccuracyParameter { - // When computing accuracy, count as correct by comparing the true label to - // the top k scoring classes. By default, only compare to the top scoring - // class (i.e. argmax). - optional uint32 top_k = 1 [default = 1]; - - // The "label" axis of the prediction blob, whose argmax corresponds to the - // predicted label -- may be negative to index from the end (e.g., -1 for the - // last axis). For example, if axis == 1 and the predictions are - // (N x C x H x W), the label blob is expected to contain N*H*W ground truth - // labels with integer values in {0, 1, ..., C-1}. - optional int32 axis = 2 [default = 1]; - - // If specified, ignore instances with the given label. - optional int32 ignore_label = 3; -} - -message ArgMaxParameter { - // If true produce pairs (argmax, maxval) - optional bool out_max_val = 1 [default = false]; - optional uint32 top_k = 2 [default = 1]; -} - -message ConcatParameter { - // The axis along which to concatenate -- may be negative to index from the - // end (e.g., -1 for the last axis). Other axes must have the - // same dimension for all the bottom blobs. - // By default, ConcatLayer concatenates blobs along the "channels" axis (1). - optional int32 axis = 2 [default = 1]; - - // DEPRECATED: alias for "axis" -- does not support negative indexing. - optional uint32 concat_dim = 1 [default = 1]; -} - -message ContrastiveLossParameter { - // margin for dissimilar pair - optional float margin = 1 [default = 1.0]; - // The first implementation of this cost did not exactly match the cost of - // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2. - // legacy_version = false (the default) uses (margin - d)^2 as proposed in the - // Hadsell paper. New models should probably use this version. - // legacy_version = true uses (margin - d^2). This is kept to support / - // reproduce existing models and results - optional bool legacy_version = 2 [default = false]; -} - -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da -======= -message TripletLossParameter { - //margin for negative triplet - optional float margin = 1 [default = 1.0]; -<<<<<<< 8bea48cb77e890cda255da58b142f3a402196328 -======= - optional uint32 losstype = 2 [default = 1]; - optional uint32 num_triplets = 3 [default = 3]; -<<<<<<< b30868495fbae44b9556c621a319178d919bf562 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise -} - ->>>>>>> triplet data generation and network update -======= -} - ->>>>>>> add initiate class name of triplet loss layer -message ConvolutionParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - optional bool bias_term = 2 [default = true]; // whether to have bias terms - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in height and width or as Y, X pairs. - optional uint32 pad = 3 [default = 0]; // The padding size (equal in Y, X) - optional uint32 pad_h = 9 [default = 0]; // The padding height - optional uint32 pad_w = 10 [default = 0]; // The padding width - optional uint32 kernel_size = 4; // The kernel size (square) - optional uint32 kernel_h = 11; // The kernel height - optional uint32 kernel_w = 12; // The kernel width - optional uint32 group = 5 [default = 1]; // The group size for group conv - optional uint32 stride = 6 [default = 1]; // The stride (equal in Y, X) - optional uint32 stride_h = 13; // The stride height - optional uint32 stride_w = 14; // The stride width - optional FillerParameter weight_filler = 7; // The filler for the weight - optional FillerParameter bias_filler = 8; // The filler for the bias - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 15 [default = DEFAULT]; -} - -message DataParameter { - enum DB { - LEVELDB = 0; - LMDB = 1; - } - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 4 [default = 1]; - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - optional uint32 rand_skip = 7 [default = 0]; - optional DB backend = 8 [default = LEVELDB]; - // DEPRECATED. See TransformationParameter. For data pre-processing, we can do - // simple scaling and subtracting the data mean, if provided. Note that the - // mean subtraction is always carried out before scaling. - optional float scale = 2 [default = 1]; - optional string mean_file = 3; - // DEPRECATED. See TransformationParameter. Specify if we would like to randomly - // crop an image. - optional uint32 crop_size = 5 [default = 0]; - // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror - // data. - optional bool mirror = 6 [default = false]; - // Force the encoded image to have 3 color channels - optional bool force_encoded_color = 9 [default = false]; -} - -message DropoutParameter { - optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio -} - -// DummyDataLayer fills any number of arbitrarily shaped blobs with random -// (or constant) data generated by "Fillers" (see "message FillerParameter"). -message DummyDataParameter { - // This layer produces N >= 1 top blobs. DummyDataParameter must specify 1 or N - // shape fields, and 0, 1 or N data_fillers. - // - // If 0 data_fillers are specified, ConstantFiller with a value of 0 is used. - // If 1 data_filler is specified, it is applied to all top blobs. If N are - // specified, the ith is applied to the ith top blob. - repeated FillerParameter data_filler = 1; - repeated BlobShape shape = 6; - - // 4D dimensions -- deprecated. Use "shape" instead. - repeated uint32 num = 2; - repeated uint32 channels = 3; - repeated uint32 height = 4; - repeated uint32 width = 5; -} - -message EltwiseParameter { - enum EltwiseOp { - PROD = 0; - SUM = 1; - MAX = 2; - } - optional EltwiseOp operation = 1 [default = SUM]; // element-wise operation - repeated float coeff = 2; // blob-wise coefficient for SUM operation - - // Whether to use an asymptotically slower (for >2 inputs) but stabler method - // of computing the gradient for the PROD operation. (No effect for SUM op.) - optional bool stable_prod_grad = 3 [default = true]; -} - -message ExpParameter { - // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. - // Or if base is set to the default (-1), base is set to e, - // so y = exp(shift + scale * x). - optional float base = 1 [default = -1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - -/// Message that stores parameters used by FlattenLayer -message FlattenParameter { - // The first axis to flatten: all preceding axes are retained in the output. - // May be negative to index from the end (e.g., -1 for the last axis). - optional int32 axis = 1 [default = 1]; - - // The last axis to flatten: all following axes are retained in the output. - // May be negative to index from the end (e.g., the default -1 for the last - // axis). - optional int32 end_axis = 2 [default = -1]; -} - -// Message that stores parameters used by HDF5DataLayer -message HDF5DataParameter { - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 2; - - // Specify whether to shuffle the data. - // If shuffle == true, the ordering of the HDF5 files is shuffled, - // and the ordering of data within any given HDF5 file is shuffled, - // but data between different files are not interleaved; all of a file's - // data are output (in a random order) before moving onto another file. - optional bool shuffle = 3 [default = false]; -} - -message HDF5OutputParameter { - optional string file_name = 1; -} - -message HingeLossParameter { - enum Norm { - L1 = 1; - L2 = 2; - } - // Specify the Norm to use L1 or L2 - optional Norm norm = 1 [default = L1]; -} - -message ImageDataParameter { - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 4 [default = 1]; - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - optional uint32 rand_skip = 7 [default = 0]; - // Whether or not ImageLayer should shuffle the list of files at every epoch. - optional bool shuffle = 8 [default = false]; - // It will also resize images if new_height or new_width are not zero. - optional uint32 new_height = 9 [default = 0]; - optional uint32 new_width = 10 [default = 0]; - // Specify if the images are color or gray - optional bool is_color = 11 [default = true]; - // DEPRECATED. See TransformationParameter. For data pre-processing, we can do - // simple scaling and subtracting the data mean, if provided. Note that the - // mean subtraction is always carried out before scaling. - optional float scale = 2 [default = 1]; - optional string mean_file = 3; - // DEPRECATED. See TransformationParameter. Specify if we would like to randomly - // crop an image. - optional uint32 crop_size = 5 [default = 0]; - // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror - // data. - optional bool mirror = 6 [default = false]; - optional string root_folder = 12 [default = ""]; -} - -message InfogainLossParameter { - // Specify the infogain matrix source. - optional string source = 1; -} - -message InnerProductParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - optional bool bias_term = 2 [default = true]; // whether to have bias terms - optional FillerParameter weight_filler = 3; // The filler for the weight - optional FillerParameter bias_filler = 4; // The filler for the bias - - // The first axis to be lumped into a single inner product computation; - // all preceding axes are retained in the output. - // May be negative to index from the end (e.g., -1 for the last axis). - optional int32 axis = 5 [default = 1]; -} - -// Message that stores parameters used by LogLayer -message LogParameter { - // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. - // Or if base is set to the default (-1), base is set to e, - // so y = ln(shift + scale * x) = log_e(shift + scale * x) - optional float base = 1 [default = -1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - -// Message that stores parameters used by LRNLayer -message LRNParameter { - optional uint32 local_size = 1 [default = 5]; - optional float alpha = 2 [default = 1.]; - optional float beta = 3 [default = 0.75]; - enum NormRegion { - ACROSS_CHANNELS = 0; - WITHIN_CHANNEL = 1; - } - optional NormRegion norm_region = 4 [default = ACROSS_CHANNELS]; - optional float k = 5 [default = 1.]; -} - -message MemoryDataParameter { - optional uint32 batch_size = 1; - optional uint32 channels = 2; - optional uint32 height = 3; - optional uint32 width = 4; -} - -message MVNParameter { - // This parameter can be set to false to normalize mean only - optional bool normalize_variance = 1 [default = true]; - - // This parameter can be set to true to perform DNN-like MVN - optional bool across_channels = 2 [default = false]; - - // Epsilon for not dividing by zero while normalizing variance - optional float eps = 3 [default = 1e-9]; -} - -message PoolingParameter { - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional PoolMethod pool = 1 [default = MAX]; // The pooling method - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in height and width or as Y, X pairs. - optional uint32 pad = 4 [default = 0]; // The padding size (equal in Y, X) - optional uint32 pad_h = 9 [default = 0]; // The padding height - optional uint32 pad_w = 10 [default = 0]; // The padding width - optional uint32 kernel_size = 2; // The kernel size (square) - optional uint32 kernel_h = 5; // The kernel height - optional uint32 kernel_w = 6; // The kernel width - optional uint32 stride = 3 [default = 1]; // The stride (equal in Y, X) - optional uint32 stride_h = 7; // The stride height - optional uint32 stride_w = 8; // The stride width - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 11 [default = DEFAULT]; - // If global_pooling then it will pool over the size of the bottom by doing - // kernel_h = bottom->height and kernel_w = bottom->width - optional bool global_pooling = 12 [default = false]; -} - -message PowerParameter { - // PowerLayer computes outputs y = (shift + scale * x) ^ power. - optional float power = 1 [default = 1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - -message PythonParameter { - optional string module = 1; - optional string layer = 2; -} - -// Message that stores parameters used by ReductionLayer -message ReductionParameter { - enum ReductionOp { - SUM = 1; - ASUM = 2; - SUMSQ = 3; - MEAN = 4; - } - - optional ReductionOp operation = 1 [default = SUM]; // reduction operation - - // The first axis to reduce to a scalar -- may be negative to index from the - // end (e.g., -1 for the last axis). - // (Currently, only reduction along ALL "tail" axes is supported; reduction - // of axis M through N, where N < num_axes - 1, is unsupported.) - // Suppose we have an n-axis bottom Blob with shape: - // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). - // If axis == m, the output Blob will have shape - // (d0, d1, d2, ..., d(m-1)), - // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) - // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. - // If axis == 0 (the default), the output Blob always has the empty shape - // (count 1), performing reduction across the entire input -- - // often useful for creating new loss functions. - optional int32 axis = 2 [default = 0]; - - optional float coeff = 3 [default = 1.0]; // coefficient for output -} - -// Message that stores parameters used by ReductionLayer -message ReductionParameter { - enum ReductionOp { - SUM = 1; - ASUM = 2; - SUMSQ = 3; - MEAN = 4; - } - - optional ReductionOp operation = 1 [default = SUM]; // reduction operation - - // The first axis to reduce to a scalar -- may be negative to index from the - // end (e.g., -1 for the last axis). - // (Currently, only reduction along ALL "tail" axes is supported; reduction - // of axis M through N, where N < num_axes - 1, is unsupported.) - // Suppose we have an n-axis bottom Blob with shape: - // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). - // If axis == m, the output Blob will have shape - // (d0, d1, d2, ..., d(m-1)), - // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) - // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. - // If axis == 0 (the default), the output Blob always has the empty shape - // (count 1), performing reduction across the entire input -- - // often useful for creating new loss functions. - optional int32 axis = 2 [default = 0]; - - optional float coeff = 3 [default = 1.0]; // coefficient for output -} - -// Message that stores parameters used by ReLULayer -message ReLUParameter { - // Allow non-zero slope for negative inputs to speed up optimization - // Described in: - // Maas, A. L., Hannun, A. Y., & Ng, A. Y. (2013). Rectifier nonlinearities - // improve neural network acoustic models. In ICML Workshop on Deep Learning - // for Audio, Speech, and Language Processing. - optional float negative_slope = 1 [default = 0]; - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 2 [default = DEFAULT]; -} - -message ReshapeParameter { - // Specify the output dimensions. If some of the dimensions are set to 0, - // the corresponding dimension from the bottom layer is used (unchanged). - // Exactly one dimension may be set to -1, in which case its value is - // inferred from the count of the bottom blob and the remaining dimensions. - // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8: - // - // layer { - // type: "Reshape" bottom: "input" top: "output" - // reshape_param { ... } - // } - // - // If "input" is 2D with shape 2 x 8, then the following reshape_param - // specifications are all equivalent, producing a 3D blob "output" with shape - // 2 x 2 x 4: - // - // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 0 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 0 dim: 2 dim: -1 } } - // reshape_param { shape { dim: -1 dim: 0 dim: 2 } } - // - optional BlobShape shape = 1; - - // axis and num_axes control the portion of the bottom blob's shape that are - // replaced by (included in) the reshape. By default (axis == 0 and - // num_axes == -1), the entire bottom blob shape is included in the reshape, - // and hence the shape field must specify the entire output shape. - // - // axis may be non-zero to retain some portion of the beginning of the input - // shape (and may be negative to index from the end; e.g., -1 to begin the - // reshape after the last axis, including nothing in the reshape, - // -2 to include only the last axis, etc.). - // - // For example, suppose "input" is a 2D blob with shape 2 x 8. - // Then the following ReshapeLayer specifications are all equivalent, - // producing a blob "output" with shape 2 x 2 x 4: - // - // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 2 dim: 4 } axis: 1 } - // reshape_param { shape { dim: 2 dim: 4 } axis: -3 } - // - // num_axes specifies the extent of the reshape. - // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on - // input axes in the range [axis, axis+num_axes]. - // num_axes may also be -1, the default, to include all remaining axes - // (starting from axis). - // - // For example, suppose "input" is a 2D blob with shape 2 x 8. - // Then the following ReshapeLayer specifications are equivalent, - // producing a blob "output" with shape 1 x 2 x 8. - // - // reshape_param { shape { dim: 1 dim: 2 dim: 8 } } - // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 } - // reshape_param { shape { dim: 1 } num_axes: 0 } - // - // On the other hand, these would produce output blob shape 2 x 1 x 8: - // - // reshape_param { shape { dim: 2 dim: 1 dim: 8 } } - // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 } - // - optional int32 axis = 2 [default = 0]; - optional int32 num_axes = 3 [default = -1]; -} - -message SigmoidParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [default = DEFAULT]; -} - -message SliceParameter { - // The axis along which to slice -- may be negative to index from the end - // (e.g., -1 for the last axis). - // By default, SliceLayer concatenates blobs along the "channels" axis (1). - optional int32 axis = 3 [default = 1]; - repeated uint32 slice_point = 2; - - // DEPRECATED: alias for "axis" -- does not support negative indexing. - optional uint32 slice_dim = 1 [default = 1]; -} - -// Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer -message SoftmaxParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [default = DEFAULT]; - - // The axis along which to perform the softmax -- may be negative to index - // from the end (e.g., -1 for the last axis). - // Any other axes will be evaluated as independent softmaxes. - optional int32 axis = 2 [default = 1]; -} - -message TanHParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [default = DEFAULT]; -} - -<<<<<<< b30868495fbae44b9556c621a319178d919bf562 -======= -// Message that stores parameters used by TileLayer -message TileParameter { - // The index of the axis to tile. - optional int32 axis = 1 [default = 1]; - - // The number of copies (tiles) of the blob to output. - optional int32 tiles = 2; -} - -// Message that stores parameters used by ThresholdLayer ->>>>>>> add initiate class name of triplet loss layer -message ThresholdParameter { - optional float threshold = 1 [default = 0]; // Strictly positive values -} - -message WindowDataParameter { - // Specify the data source. - optional string source = 1; - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 2 [default = 1]; - optional string mean_file = 3; - // Specify the batch size. - optional uint32 batch_size = 4; - // Specify if we would like to randomly crop an image. - optional uint32 crop_size = 5 [default = 0]; - // Specify if we want to randomly mirror data. - optional bool mirror = 6 [default = false]; - // Foreground (object) overlap threshold - optional float fg_threshold = 7 [default = 0.5]; - // Background (non-object) overlap threshold - optional float bg_threshold = 8 [default = 0.5]; - // Fraction of batch that should be foreground objects - optional float fg_fraction = 9 [default = 0.25]; - // Amount of contextual padding to add around a window - // (used only by the window_data_layer) - optional uint32 context_pad = 10 [default = 0]; - // Mode for cropping out a detection window - // warp: cropped window is warped to a fixed size and aspect ratio - // square: the tightest square around the window is cropped - optional string crop_mode = 11 [default = "warp"]; - // cache_images: will load all images in memory for faster access - optional bool cache_images = 12 [default = false]; - // append root_folder to locate images - optional string root_folder = 13 [default = ""]; -} - -message SPPParameter { - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional uint32 pyramid_height = 1; - optional PoolMethod pool = 2 [default = MAX]; // The pooling method - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 6 [default = DEFAULT]; -} - -// DEPRECATED: use LayerParameter. -message V1LayerParameter { - repeated string bottom = 2; - repeated string top = 3; - optional string name = 4; - repeated NetStateRule include = 32; - repeated NetStateRule exclude = 33; - enum LayerType { - NONE = 0; - ABSVAL = 35; - ACCURACY = 1; - ARGMAX = 30; - BNLL = 2; - CONCAT = 3; - CONTRASTIVE_LOSS = 37; - CONVOLUTION = 4; - DATA = 5; - DECONVOLUTION = 39; - DROPOUT = 6; - DUMMY_DATA = 32; - EUCLIDEAN_LOSS = 7; - ELTWISE = 25; - EXP = 38; - FLATTEN = 8; - HDF5_DATA = 9; - HDF5_OUTPUT = 10; - HINGE_LOSS = 28; - IM2COL = 11; - IMAGE_DATA = 12; - INFOGAIN_LOSS = 13; - INNER_PRODUCT = 14; - LRN = 15; - MEMORY_DATA = 29; - MULTINOMIAL_LOGISTIC_LOSS = 16; - MVN = 34; - POOLING = 17; - POWER = 26; - RELU = 18; - SIGMOID = 19; - SIGMOID_CROSS_ENTROPY_LOSS = 27; - SILENCE = 36; - SOFTMAX = 20; - SOFTMAX_LOSS = 21; - SPLIT = 22; - SLICE = 33; - TANH = 23; - WINDOW_DATA = 24; - THRESHOLD = 31; - - } - optional LayerType type = 5; - repeated BlobProto blobs = 6; - repeated string param = 1001; - repeated DimCheckMode blob_share_mode = 1002; - enum DimCheckMode { - STRICT = 0; - PERMISSIVE = 1; - } - repeated float blobs_lr = 7; - repeated float weight_decay = 8; - repeated float loss_weight = 35; - optional AccuracyParameter accuracy_param = 27; - optional ArgMaxParameter argmax_param = 23; - optional ConcatParameter concat_param = 9; - optional ContrastiveLossParameter contrastive_loss_param = 40; - optional ConvolutionParameter convolution_param = 10; - optional DataParameter data_param = 11; - optional DropoutParameter dropout_param = 12; - optional DummyDataParameter dummy_data_param = 26; - optional EltwiseParameter eltwise_param = 24; - optional ExpParameter exp_param = 41; - optional HDF5DataParameter hdf5_data_param = 13; - optional HDF5OutputParameter hdf5_output_param = 14; - optional HingeLossParameter hinge_loss_param = 29; - optional ImageDataParameter image_data_param = 15; - optional InfogainLossParameter infogain_loss_param = 16; - optional InnerProductParameter inner_product_param = 17; - optional LRNParameter lrn_param = 18; - optional MemoryDataParameter memory_data_param = 22; - optional MVNParameter mvn_param = 34; - optional PoolingParameter pooling_param = 19; - optional PowerParameter power_param = 21; - optional ReLUParameter relu_param = 30; - optional SigmoidParameter sigmoid_param = 38; - optional SoftmaxParameter softmax_param = 39; - optional SliceParameter slice_param = 31; - optional TanHParameter tanh_param = 37; - optional ThresholdParameter threshold_param = 25; - optional WindowDataParameter window_data_param = 20; - optional TransformationParameter transform_param = 36; - optional LossParameter loss_param = 42; - optional V0LayerParameter layer = 1; -} - -// DEPRECATED: V0LayerParameter is the old way of specifying layer parameters -// in Caffe. We keep this message type around for legacy support. -message V0LayerParameter { - optional string name = 1; // the layer name - optional string type = 2; // the string to specify the layer type - - // Parameters to specify layers with inner products. - optional uint32 num_output = 3; // The number of outputs for the layer - optional bool biasterm = 4 [default = true]; // whether to have bias terms - optional FillerParameter weight_filler = 5; // The filler for the weight - optional FillerParameter bias_filler = 6; // The filler for the bias - - optional uint32 pad = 7 [default = 0]; // The padding size - optional uint32 kernelsize = 8; // The kernel size - optional uint32 group = 9 [default = 1]; // The group size for group conv - optional uint32 stride = 10 [default = 1]; // The stride - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional PoolMethod pool = 11 [default = MAX]; // The pooling method - optional float dropout_ratio = 12 [default = 0.5]; // dropout ratio - - optional uint32 local_size = 13 [default = 5]; // for local response norm - optional float alpha = 14 [default = 1.]; // for local response norm - optional float beta = 15 [default = 0.75]; // for local response norm - optional float k = 22 [default = 1.]; - - // For data layers, specify the data source - optional string source = 16; - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 17 [default = 1]; - optional string meanfile = 18; - // For data layers, specify the batch size. - optional uint32 batchsize = 19; - // For data layers, specify if we would like to randomly crop an image. - optional uint32 cropsize = 20 [default = 0]; - // For data layers, specify if we want to randomly mirror data. - optional bool mirror = 21 [default = false]; - - // The blobs containing the numeric parameters of the layer - repeated BlobProto blobs = 50; - // The ratio that is multiplied on the global learning rate. If you want to - // set the learning ratio for one blob, you need to set it for all blobs. - repeated float blobs_lr = 51; - // The weight decay that is multiplied on the global weight decay. - repeated float weight_decay = 52; - - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - optional uint32 rand_skip = 53 [default = 0]; - - // Fields related to detection (det_*) - // foreground (object) overlap threshold - optional float det_fg_threshold = 54 [default = 0.5]; - // background (non-object) overlap threshold - optional float det_bg_threshold = 55 [default = 0.5]; - // Fraction of batch that should be foreground objects - optional float det_fg_fraction = 56 [default = 0.25]; - - // optional bool OBSOLETE_can_clobber = 57 [default = true]; - - // Amount of contextual padding to add around a window - // (used only by the window_data_layer) - optional uint32 det_context_pad = 58 [default = 0]; - - // Mode for cropping out a detection window - // warp: cropped window is warped to a fixed size and aspect ratio - // square: the tightest square around the window is cropped - optional string det_crop_mode = 59 [default = "warp"]; - - // For ReshapeLayer, one needs to specify the new dimensions. - optional int32 new_num = 60 [default = 0]; - optional int32 new_channels = 61 [default = 0]; - optional int32 new_height = 62 [default = 0]; - optional int32 new_width = 63 [default = 0]; - - // Whether or not ImageLayer should shuffle the list of files at every epoch. - // It will also resize images if new_height or new_width are not zero. - optional bool shuffle_images = 64 [default = false]; - - // For ConcatLayer, one needs to specify the dimension for concatenation, and - // the other dimensions must be the same for all the bottom blobs. - // By default it will concatenate blobs along the channels dimension. - optional uint32 concat_dim = 65 [default = 1]; - - optional HDF5OutputParameter hdf5_output_param = 1001; -} - -message PReLUParameter { - // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: - // Surpassing Human-Level Performance on ImageNet Classification, 2015. - - // Initial value of a_i. Default is a_i=0.25 for all i. - optional FillerParameter filler = 1; - // Whether or not slope paramters are shared across channels. - optional bool channel_shared = 2 [default = false]; -} diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index b6ebf2736ac..394ec3b3ad7 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -55,7 +55,6 @@ void Solver::Init(const SolverParameter& param) { << std::endl << param.DebugString(); param_ = param; CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative."; - CheckSnapshotWritePermissions(); if (Caffe::root_solver() && param_.random_seed() >= 0) { Caffe::set_random_seed(param_.random_seed()); } @@ -207,27 +206,19 @@ void Solver::Step(int iters) { while (iter_ < stop_iter) { // zero-init the params - for (int i = 0; i < net_->params().size(); ++i) { - shared_ptr > blob = net_->params()[i]; - switch (Caffe::mode()) { - case Caffe::CPU: - caffe_set(blob->count(), static_cast(0), - blob->mutable_cpu_diff()); - break; - case Caffe::GPU: -#ifndef CPU_ONLY - caffe_gpu_set(blob->count(), static_cast(0), - blob->mutable_gpu_diff()); -#else - NO_GPU; -#endif + net_->ClearParamDiffs(); + if (param_.test_interval() && iter_ % param_.test_interval() == 0 + && (iter_ > 0 || param_.test_initialization()) + && Caffe::root_solver()) { + TestAll(); + if (requested_early_exit_) { + // Break out of the while loop because stop was requested while testing. break; } } - if (param_.test_interval() && iter_ % param_.test_interval() == 0 - && (iter_ > 0 || param_.test_initialization())) { - TestAll(); + for (int i = 0; i < callbacks_.size(); ++i) { + callbacks_[i]->on_start(); } const bool display = param_.display() && iter_ % param_.display() == 0; net_->set_debug_info(display && param_.debug_info()); @@ -270,14 +261,22 @@ void Solver::Step(int iters) { } } } + for (int i = 0; i < callbacks_.size(); ++i) { + callbacks_[i]->on_gradients_ready(); + } ApplyUpdate(); // Increment the internal iter_ counter -- its value should always indicate // the number of times the weights have been updated. ++iter_; + SolverAction::Enum request = GetRequestedAction(); + // Save a snapshot if needed. - if (param_.snapshot() && iter_ % param_.snapshot() == 0) { + if ((param_.snapshot() + && iter_ % param_.snapshot() == 0 + && Caffe::root_solver()) || + (request == SolverAction::SNAPSHOT)) { Snapshot(); } if (SolverAction::STOP == request) { @@ -435,42 +434,13 @@ void Solver::Snapshot() { SnapshotSolverState(model_filename); } -template -void Solver::CheckSnapshotWritePermissions() { - if (Caffe::root_solver() && param_.snapshot()) { - CHECK(param_.has_snapshot_prefix()) - << "In solver params, snapshot is specified but snapshot_prefix is not"; - string probe_filename = SnapshotFilename(".tempfile"); - std::ofstream probe_ofs(probe_filename.c_str()); - if (probe_ofs.good()) { - probe_ofs.close(); - std::remove(probe_filename.c_str()); - } else { - LOG(FATAL) << "Cannot write to snapshot prefix '" - << param_.snapshot_prefix() << "'. Make sure " - << "that the directory exists and is writeable."; - } - } -} - template string Solver::SnapshotFilename(const string extension) { string filename(param_.snapshot_prefix()); const int kBufferSize = 20; char iter_str_buffer[kBufferSize]; snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); - filename += iter_str_buffer; - model_filename = filename + ".caffemodel"; - LOG(INFO) << "Snapshotting to " << model_filename; - WriteProtoToBinaryFile(net_param, model_filename.c_str()); - SolverState state; - SnapshotSolverState(&state); - state.set_iter(iter_); - state.set_learned_net(model_filename); - state.set_current_step(current_step_); - snapshot_filename = filename + ".solverstate"; - LOG(INFO) << "Snapshotting solver state to " << snapshot_filename; - WriteProtoToBinaryFile(state, snapshot_filename.c_str()); + return filename + iter_str_buffer + extension; } template @@ -595,12 +565,14 @@ void SGDSolver::ClipGradients() { template void SGDSolver::ApplyUpdate() { + CHECK(Caffe::root_solver()); Dtype rate = GetLearningRate(); if (this->param_.display() && this->iter_ % this->param_.display() == 0) { LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; } ClipGradients(); - for (int param_id = 0; param_id < this->net_->params().size(); ++param_id) { + for (int param_id = 0; param_id < this->net_->learnable_params().size(); + ++param_id) { Normalize(param_id); Regularize(param_id); ComputeUpdateValue(param_id, rate); @@ -612,7 +584,7 @@ template void SGDSolver::Normalize(int param_id) { if (this->param_.iter_size() == 1) { return; } // Scale gradient to counterbalance accumulation. - const vector > >& net_params = this->net_->params(); + const vector*>& net_params = this->net_->learnable_params(); const Dtype accum_normalization = Dtype(1.) / this->param_.iter_size(); switch (Caffe::mode()) { case Caffe::CPU: { @@ -636,7 +608,7 @@ void SGDSolver::Normalize(int param_id) { template void SGDSolver::Regularize(int param_id) { - const vector > >& net_params = this->net_->params(); + const vector*>& net_params = this->net_->learnable_params(); const vector& net_params_weight_decay = this->net_->params_weight_decay(); Dtype weight_decay = this->param_.weight_decay(); @@ -698,7 +670,7 @@ void SGDSolver::Regularize(int param_id) { template void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector > >& net_params = this->net_->params(); + const vector*>& net_params = this->net_->learnable_params(); const vector& net_params_lr = this->net_->params_lr(); Dtype momentum = this->param_.momentum(); Dtype local_rate = rate * net_params_lr[param_id]; @@ -732,8 +704,27 @@ void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { } template -void SGDSolver::SnapshotSolverState(SolverState* state) { - state->clear_history(); +void SGDSolver::SnapshotSolverState(const string& model_filename) { + switch (this->param_.snapshot_format()) { + case caffe::SolverParameter_SnapshotFormat_BINARYPROTO: + SnapshotSolverStateToBinaryProto(model_filename); + break; + case caffe::SolverParameter_SnapshotFormat_HDF5: + SnapshotSolverStateToHDF5(model_filename); + break; + default: + LOG(FATAL) << "Unsupported snapshot format."; + } +} + +template +void SGDSolver::SnapshotSolverStateToBinaryProto( + const string& model_filename) { + SolverState state; + state.set_iter(this->iter_); + state.set_learned_net(model_filename); + state.set_current_step(this->current_step_); + state.clear_history(); for (int i = 0; i < history_.size(); ++i) { // Add history BlobProto* history_blob = state.add_history(); @@ -741,7 +732,7 @@ void SGDSolver::SnapshotSolverState(SolverState* state) { } string snapshot_filename = Solver::SnapshotFilename(".solverstate"); LOG(INFO) - << "Snapshotting solver state to binary proto file " << snapshot_filename; + << "Snapshotting solver state to binary proto file" << snapshot_filename; WriteProtoToBinaryFile(state, snapshot_filename.c_str()); } @@ -791,9 +782,35 @@ void SGDSolver::RestoreSolverStateFromBinaryProto( } } +template +void SGDSolver::RestoreSolverStateFromHDF5(const string& state_file) { + hid_t file_hid = H5Fopen(state_file.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); + CHECK_GE(file_hid, 0) << "Couldn't open solver state file " << state_file; + this->iter_ = hdf5_load_int(file_hid, "iter"); + if (H5LTfind_dataset(file_hid, "learned_net")) { + string learned_net = hdf5_load_string(file_hid, "learned_net"); + this->net_->CopyTrainedLayersFrom(learned_net); + } + this->current_step_ = hdf5_load_int(file_hid, "current_step"); + hid_t history_hid = H5Gopen2(file_hid, "history", H5P_DEFAULT); + CHECK_GE(history_hid, 0) << "Error reading history from " << state_file; + int state_history_size = hdf5_get_num_links(history_hid); + CHECK_EQ(state_history_size, history_.size()) + << "Incorrect length of history blobs."; + for (int i = 0; i < history_.size(); ++i) { + ostringstream oss; + oss << i; + hdf5_load_nd_dataset(history_hid, oss.str().c_str(), 0, + kMaxBlobAxes, history_[i].get()); + } + H5Gclose(history_hid); + H5Fclose(file_hid); +} + template void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector > >& net_params = this->net_->params(); + CHECK(Caffe::root_solver()); + const vector*>& net_params = this->net_->learnable_params(); const vector& net_params_lr = this->net_->params_lr(); Dtype momentum = this->param_.momentum(); Dtype local_rate = rate * net_params_lr[param_id]; @@ -853,7 +870,8 @@ void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { template void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector > >& net_params = this->net_->params(); + CHECK(Caffe::root_solver()); + const vector*>& net_params = this->net_->learnable_params(); const vector& net_params_lr = this->net_->params_lr(); Dtype delta = this->param_.delta(); Dtype local_rate = rate * net_params_lr[param_id]; diff --git a/src/caffe/solver.cpp.orig b/src/caffe/solver.cpp.orig deleted file mode 100644 index 78355cd2812..00000000000 --- a/src/caffe/solver.cpp.orig +++ /dev/null @@ -1,1464 +0,0 @@ -#include - -#include -#include -#include - -#include "hdf5.h" -#include "hdf5_hl.h" - -#include "caffe/net.hpp" -#include "caffe/proto/caffe.pb.h" -#include "caffe/solver.hpp" -#include "caffe/util/hdf5.hpp" -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/util/upgrade_proto.hpp" - -namespace caffe { - -template -void Solver::SetActionFunction(ActionCallback func) { - action_request_function_ = func; -} - -template -SolverAction::Enum Solver::GetRequestedAction() { - if (action_request_function_) { - // If the external request function has been set, call it. - return action_request_function_(); - } - return SolverAction::NONE; -} - -template -Solver::Solver(const SolverParameter& param, const Solver* root_solver) - : net_(), callbacks_(), root_solver_(root_solver), - requested_early_exit_(false) { - Init(param); -} - -template -Solver::Solver(const string& param_file, const Solver* root_solver) - : net_(), callbacks_(), root_solver_(root_solver), - requested_early_exit_(false) { - SolverParameter param; - ReadProtoFromTextFileOrDie(param_file, ¶m); - Init(param); -} - -template -void Solver::Init(const SolverParameter& param) { - CHECK(Caffe::root_solver() || root_solver_) - << "root_solver_ needs to be set for all non-root solvers"; - LOG_IF(INFO, Caffe::root_solver()) << "Initializing solver from parameters: " - << std::endl << param.DebugString(); - param_ = param; - CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative."; - if (Caffe::root_solver() && param_.random_seed() >= 0) { - Caffe::set_random_seed(param_.random_seed()); - } - // Scaffolding code - InitTrainNet(); - if (Caffe::root_solver()) { - InitTestNets(); - LOG(INFO) << "Solver scaffolding done."; - } - iter_ = 0; - current_step_ = 0; -} - -template -void Solver::InitTrainNet() { - const int num_train_nets = param_.has_net() + param_.has_net_param() + - param_.has_train_net() + param_.has_train_net_param(); - const string& field_names = "net, net_param, train_net, train_net_param"; - CHECK_GE(num_train_nets, 1) << "SolverParameter must specify a train net " - << "using one of these fields: " << field_names; - CHECK_LE(num_train_nets, 1) << "SolverParameter must not contain more than " - << "one of these fields specifying a train_net: " << field_names; - NetParameter net_param; - if (param_.has_train_net_param()) { - LOG_IF(INFO, Caffe::root_solver()) - << "Creating training net specified in train_net_param."; - net_param.CopyFrom(param_.train_net_param()); - } else if (param_.has_train_net()) { - LOG_IF(INFO, Caffe::root_solver()) - << "Creating training net from train_net file: " << param_.train_net(); - ReadNetParamsFromTextFileOrDie(param_.train_net(), &net_param); - } - if (param_.has_net_param()) { - LOG_IF(INFO, Caffe::root_solver()) - << "Creating training net specified in net_param."; - net_param.CopyFrom(param_.net_param()); - } - if (param_.has_net()) { - LOG_IF(INFO, Caffe::root_solver()) - << "Creating training net from net file: " << param_.net(); - ReadNetParamsFromTextFileOrDie(param_.net(), &net_param); - } - // Set the correct NetState. We start with the solver defaults (lowest - // precedence); then, merge in any NetState specified by the net_param itself; - // finally, merge in any NetState specified by the train_state (highest - // precedence). - NetState net_state; - net_state.set_phase(TRAIN); - net_state.MergeFrom(net_param.state()); - net_state.MergeFrom(param_.train_state()); - net_param.mutable_state()->CopyFrom(net_state); - if (Caffe::root_solver()) { - net_.reset(new Net(net_param)); - } else { - net_.reset(new Net(net_param, root_solver_->net_.get())); - } -} - -template -void Solver::InitTestNets() { - CHECK(Caffe::root_solver()); - const bool has_net_param = param_.has_net_param(); - const bool has_net_file = param_.has_net(); - const int num_generic_nets = has_net_param + has_net_file; - CHECK_LE(num_generic_nets, 1) - << "Both net_param and net_file may not be specified."; - const int num_test_net_params = param_.test_net_param_size(); - const int num_test_net_files = param_.test_net_size(); - const int num_test_nets = num_test_net_params + num_test_net_files; - if (num_generic_nets) { - CHECK_GE(param_.test_iter_size(), num_test_nets) - << "test_iter must be specified for each test network."; - } else { - CHECK_EQ(param_.test_iter_size(), num_test_nets) - << "test_iter must be specified for each test network."; - } - // If we have a generic net (specified by net or net_param, rather than - // test_net or test_net_param), we may have an unlimited number of actual - // test networks -- the actual number is given by the number of remaining - // test_iters after any test nets specified by test_net_param and/or test_net - // are evaluated. - const int num_generic_net_instances = param_.test_iter_size() - num_test_nets; - const int num_test_net_instances = num_test_nets + num_generic_net_instances; - if (param_.test_state_size()) { - CHECK_EQ(param_.test_state_size(), num_test_net_instances) - << "test_state must be unspecified or specified once per test net."; - } - if (num_test_net_instances) { - CHECK_GT(param_.test_interval(), 0); - } - int test_net_id = 0; - vector sources(num_test_net_instances); - vector net_params(num_test_net_instances); - for (int i = 0; i < num_test_net_params; ++i, ++test_net_id) { - sources[test_net_id] = "test_net_param"; - net_params[test_net_id].CopyFrom(param_.test_net_param(i)); - } - for (int i = 0; i < num_test_net_files; ++i, ++test_net_id) { - sources[test_net_id] = "test_net file: " + param_.test_net(i); - ReadNetParamsFromTextFileOrDie(param_.test_net(i), - &net_params[test_net_id]); - } - const int remaining_test_nets = param_.test_iter_size() - test_net_id; - if (has_net_param) { - for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) { - sources[test_net_id] = "net_param"; - net_params[test_net_id].CopyFrom(param_.net_param()); - } - } - if (has_net_file) { - for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) { - sources[test_net_id] = "net file: " + param_.net(); - ReadNetParamsFromTextFileOrDie(param_.net(), &net_params[test_net_id]); - } - } - test_nets_.resize(num_test_net_instances); - for (int i = 0; i < num_test_net_instances; ++i) { - // Set the correct NetState. We start with the solver defaults (lowest - // precedence); then, merge in any NetState specified by the net_param - // itself; finally, merge in any NetState specified by the test_state - // (highest precedence). - NetState net_state; - net_state.set_phase(TEST); - net_state.MergeFrom(net_params[i].state()); - if (param_.test_state_size()) { - net_state.MergeFrom(param_.test_state(i)); - } - net_params[i].mutable_state()->CopyFrom(net_state); - LOG(INFO) - << "Creating test net (#" << i << ") specified by " << sources[i]; - if (Caffe::root_solver()) { - test_nets_[i].reset(new Net(net_params[i])); - } else { - test_nets_[i].reset(new Net(net_params[i], - root_solver_->test_nets_[i].get())); - } - test_nets_[i]->set_debug_info(param_.debug_info()); - } -} - -template -void Solver::Step(int iters) { - vector*> bottom_vec; - const int start_iter = iter_; - const int stop_iter = iter_ + iters; - int average_loss = this->param_.average_loss(); - vector losses; - Dtype smoothed_loss = 0; - -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - while (iter_ < stop_iter) { - // zero-init the params - net_->ClearParamDiffs(); - if (param_.test_interval() && iter_ % param_.test_interval() == 0 - && (iter_ > 0 || param_.test_initialization()) - && Caffe::root_solver()) { - TestAll(); - if (requested_early_exit_) { - // Break out of the while loop because stop was requested while testing. - break; - } - } - - for (int i = 0; i < callbacks_.size(); ++i) { - callbacks_[i]->on_start(); -======= - for (; iter_ < stop_iter; ++iter_) { - if (param_.test_interval() && iter_ % param_.test_interval() == 0 - && (iter_ > 0 || param_.test_initialization())) { - TestAll(); ->>>>>>> triplet data generation and network update - } - const bool display = param_.display() && iter_ % param_.display() == 0; - net_->set_debug_info(display && param_.debug_info()); - Dtype loss = net_->ForwardBackward(bottom_vec); - if (losses.size() < average_loss) { - losses.push_back(loss); - int size = losses.size(); - smoothed_loss = (smoothed_loss * (size - 1) + loss) / size; - } else { - int idx = (iter_ - start_iter) % average_loss; - smoothed_loss += (loss - losses[idx]) / average_loss; - losses[idx] = loss; - } - if (display) { - LOG_IF(INFO, Caffe::root_solver()) << "Iteration " << iter_ - << ", loss = " << smoothed_loss; - const vector*>& result = net_->output_blobs(); - int score_index = 0; - for (int j = 0; j < result.size(); ++j) { - const Dtype* result_vec = result[j]->cpu_data(); - const string& output_name = - net_->blob_names()[net_->output_blob_indices()[j]]; - const Dtype loss_weight = - net_->blob_loss_weights()[net_->output_blob_indices()[j]]; - for (int k = 0; k < result[j]->count(); ++k) { - ostringstream loss_msg_stream; - if (loss_weight) { - loss_msg_stream << " (* " << loss_weight - << " = " << loss_weight * result_vec[k] << " loss)"; - } - LOG_IF(INFO, Caffe::root_solver()) << " Train net output #" - << score_index++ << ": " << output_name << " = " - << result_vec[k] << loss_msg_stream.str(); - } - } - } -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - for (int i = 0; i < callbacks_.size(); ++i) { - callbacks_[i]->on_gradients_ready(); - } - ApplyUpdate(); - - // Increment the internal iter_ counter -- its value should always indicate - // the number of times the weights have been updated. - ++iter_; -======= - ComputeUpdateValue(); - net_->Update(); ->>>>>>> triplet data generation and network update - - SolverAction::Enum request = GetRequestedAction(); - - // Save a snapshot if needed. -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - if ((param_.snapshot() - && iter_ % param_.snapshot() == 0 - && Caffe::root_solver()) || - (request == SolverAction::SNAPSHOT)) { -======= - if (param_.snapshot() && (iter_ + 1) % param_.snapshot() == 0) { ->>>>>>> triplet data generation and network update - Snapshot(); - } - if (SolverAction::STOP == request) { - requested_early_exit_ = true; - // Break out of training loop. - break; - } - } -} - -template -void Solver::Solve(const char* resume_file) { - CHECK(Caffe::root_solver()); - LOG(INFO) << "Solving " << net_->name(); - LOG(INFO) << "Learning Rate Policy: " << param_.lr_policy(); - - // Initialize to false every time we start solving. - requested_early_exit_ = false; - - if (resume_file) { - LOG(INFO) << "Restoring previous solver status from " << resume_file; - Restore(resume_file); - } - - // For a network that is trained by the solver, no bottom or top vecs - // should be given, and we will just provide dummy vecs. - Step(param_.max_iter() - iter_); - // If we haven't already, save a snapshot after optimization, unless - // overridden by setting snapshot_after_train := false - if (param_.snapshot_after_train() - && (!param_.snapshot() || iter_ % param_.snapshot() != 0)) { - Snapshot(); - } - if (requested_early_exit_) { - LOG(INFO) << "Optimization stopped early."; - return; - } - // After the optimization is done, run an additional train and test pass to - // display the train and test loss/outputs if appropriate (based on the - // display and test_interval settings, respectively). Unlike in the rest of - // training, for the train net we only run a forward pass as we've already - // updated the parameters "max_iter" times -- this final pass is only done to - // display the loss, which is computed in the forward pass. - if (param_.display() && iter_ % param_.display() == 0) { - Dtype loss; - net_->ForwardPrefilled(&loss); - LOG(INFO) << "Iteration " << iter_ << ", loss = " << loss; - } - if (param_.test_interval() && iter_ % param_.test_interval() == 0) { - TestAll(); - } - LOG(INFO) << "Optimization Done."; -} - -template -void Solver::TestAll() { - for (int test_net_id = 0; - test_net_id < test_nets_.size() && !requested_early_exit_; - ++test_net_id) { - Test(test_net_id); - } -} - -template -void Solver::Test(const int test_net_id) { - CHECK(Caffe::root_solver()); - LOG(INFO) << "Iteration " << iter_ - << ", Testing net (#" << test_net_id << ")"; - CHECK_NOTNULL(test_nets_[test_net_id].get())-> - ShareTrainedLayersWith(net_.get()); - vector test_score; - vector test_score_output_id; - vector*> bottom_vec; - const shared_ptr >& test_net = test_nets_[test_net_id]; - Dtype loss = 0; - for (int i = 0; i < param_.test_iter(test_net_id); ++i) { - SolverAction::Enum request = GetRequestedAction(); - // Check to see if stoppage of testing/training has been requested. - while (request != SolverAction::NONE) { - if (SolverAction::SNAPSHOT == request) { - Snapshot(); - } else if (SolverAction::STOP == request) { - requested_early_exit_ = true; - } - request = GetRequestedAction(); - } - if (requested_early_exit_) { - // break out of test loop. - break; - } - - Dtype iter_loss; - const vector*>& result = - test_net->Forward(bottom_vec, &iter_loss); - if (param_.test_compute_loss()) { - loss += iter_loss; - } - if (i == 0) { - for (int j = 0; j < result.size(); ++j) { - const Dtype* result_vec = result[j]->cpu_data(); - for (int k = 0; k < result[j]->count(); ++k) { - test_score.push_back(result_vec[k]); - test_score_output_id.push_back(j); - } - } - } else { - int idx = 0; - for (int j = 0; j < result.size(); ++j) { - const Dtype* result_vec = result[j]->cpu_data(); - for (int k = 0; k < result[j]->count(); ++k) { - test_score[idx++] += result_vec[k]; - } - } - } - } - if (requested_early_exit_) { - LOG(INFO) << "Test interrupted."; - return; - } - if (param_.test_compute_loss()) { - loss /= param_.test_iter(test_net_id); - LOG(INFO) << "Test loss: " << loss; - } - for (int i = 0; i < test_score.size(); ++i) { - const int output_blob_index = - test_net->output_blob_indices()[test_score_output_id[i]]; - const string& output_name = test_net->blob_names()[output_blob_index]; - const Dtype loss_weight = test_net->blob_loss_weights()[output_blob_index]; - ostringstream loss_msg_stream; - const Dtype mean_score = test_score[i] / param_.test_iter(test_net_id); - if (loss_weight) { - loss_msg_stream << " (* " << loss_weight - << " = " << loss_weight * mean_score << " loss)"; - } - LOG(INFO) << " Test net output #" << i << ": " << output_name << " = " - << mean_score << loss_msg_stream.str(); - } -} - -template -void Solver::Snapshot() { - CHECK(Caffe::root_solver()); - string model_filename; - switch (param_.snapshot_format()) { - case caffe::SolverParameter_SnapshotFormat_BINARYPROTO: - model_filename = SnapshotToBinaryProto(); - break; - case caffe::SolverParameter_SnapshotFormat_HDF5: - model_filename = SnapshotToHDF5(); - break; - default: - LOG(FATAL) << "Unsupported snapshot format."; - } - - SnapshotSolverState(model_filename); -} - -template -string Solver::SnapshotFilename(const string extension) { - string filename(param_.snapshot_prefix()); - const int kBufferSize = 20; - char iter_str_buffer[kBufferSize]; -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); - return filename + iter_str_buffer + extension; -======= - // Add one to iter_ to get the number of iterations that have completed. - snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_ + 1); - filename += iter_str_buffer; - model_filename = filename + ".caffemodel"; - LOG(INFO) << "Snapshotting to " << model_filename; - WriteProtoToBinaryFile(net_param, model_filename.c_str()); - SolverState state; - SnapshotSolverState(&state); - state.set_iter(iter_ + 1); - state.set_learned_net(model_filename); - state.set_current_step(current_step_); - snapshot_filename = filename + ".solverstate"; - LOG(INFO) << "Snapshotting solver state to " << snapshot_filename; - WriteProtoToBinaryFile(state, snapshot_filename.c_str()); ->>>>>>> triplet data generation and network update -} - -template -string Solver::SnapshotToBinaryProto() { - string model_filename = SnapshotFilename(".caffemodel"); - LOG(INFO) << "Snapshotting to binary proto file " << model_filename; - NetParameter net_param; - net_->ToProto(&net_param, param_.snapshot_diff()); - WriteProtoToBinaryFile(net_param, model_filename); - return model_filename; -} - -template -string Solver::SnapshotToHDF5() { - string model_filename = SnapshotFilename(".caffemodel.h5"); - LOG(INFO) << "Snapshotting to HDF5 file " << model_filename; - net_->ToHDF5(model_filename, param_.snapshot_diff()); - return model_filename; -} - -template -void Solver::Restore(const char* state_file) { - CHECK(Caffe::root_solver()); - string state_filename(state_file); - if (state_filename.size() >= 3 && - state_filename.compare(state_filename.size() - 3, 3, ".h5") == 0) { - RestoreSolverStateFromHDF5(state_filename); - } else { - RestoreSolverStateFromBinaryProto(state_filename); - } -} - -// Return the current learning rate. The currently implemented learning rate -// policies are as follows: -// - fixed: always return base_lr. -// - step: return base_lr * gamma ^ (floor(iter / step)) -// - exp: return base_lr * gamma ^ iter -// - inv: return base_lr * (1 + gamma * iter) ^ (- power) -// - multistep: similar to step but it allows non uniform steps defined by -// stepvalue -// - poly: the effective learning rate follows a polynomial decay, to be -// zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) -// - sigmoid: the effective learning rate follows a sigmod decay -// return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) -// -// where base_lr, max_iter, gamma, step, stepvalue and power are defined -// in the solver parameter protocol buffer, and iter is the current iteration. -template -Dtype SGDSolver::GetLearningRate() { - Dtype rate; - const string& lr_policy = this->param_.lr_policy(); - if (lr_policy == "fixed") { - rate = this->param_.base_lr(); - } else if (lr_policy == "step") { - this->current_step_ = this->iter_ / this->param_.stepsize(); - rate = this->param_.base_lr() * - pow(this->param_.gamma(), this->current_step_); - } else if (lr_policy == "exp") { - rate = this->param_.base_lr() * pow(this->param_.gamma(), this->iter_); - } else if (lr_policy == "inv") { - rate = this->param_.base_lr() * - pow(Dtype(1) + this->param_.gamma() * this->iter_, - - this->param_.power()); - } else if (lr_policy == "multistep") { - if (this->current_step_ < this->param_.stepvalue_size() && - this->iter_ >= this->param_.stepvalue(this->current_step_)) { - this->current_step_++; - LOG(INFO) << "MultiStep Status: Iteration " << - this->iter_ << ", step = " << this->current_step_; - } - rate = this->param_.base_lr() * - pow(this->param_.gamma(), this->current_step_); - } else if (lr_policy == "poly") { - rate = this->param_.base_lr() * pow(Dtype(1.) - - (Dtype(this->iter_) / Dtype(this->param_.max_iter())), - this->param_.power()); - } else if (lr_policy == "sigmoid") { - rate = this->param_.base_lr() * (Dtype(1.) / - (Dtype(1.) + exp(-this->param_.gamma() * (Dtype(this->iter_) - - Dtype(this->param_.stepsize()))))); - } else { - LOG(FATAL) << "Unknown learning rate policy: " << lr_policy; - } - return rate; -} - -template -void SGDSolver::PreSolve() { - // Initialize the history - const vector*>& net_params = this->net_->learnable_params(); - history_.clear(); - update_.clear(); - temp_.clear(); - for (int i = 0; i < net_params.size(); ++i) { - const vector& shape = net_params[i]->shape(); - history_.push_back(shared_ptr >(new Blob(shape))); - update_.push_back(shared_ptr >(new Blob(shape))); - temp_.push_back(shared_ptr >(new Blob(shape))); - } -} - -template -void SGDSolver::ClipGradients() { - const Dtype clip_gradients = this->param_.clip_gradients(); - if (clip_gradients < 0) { return; } - const vector*>& net_params = this->net_->learnable_params(); - Dtype sumsq_diff = 0; - for (int i = 0; i < net_params.size(); ++i) { - sumsq_diff += net_params[i]->sumsq_diff(); - } - const Dtype l2norm_diff = std::sqrt(sumsq_diff); - if (l2norm_diff > clip_gradients) { - Dtype scale_factor = clip_gradients / l2norm_diff; - LOG(INFO) << "Gradient clipping: scaling down gradients (L2 norm " - << l2norm_diff << " > " << clip_gradients << ") " - << "by scale factor " << scale_factor; - for (int i = 0; i < net_params.size(); ++i) { - net_params[i]->scale_diff(scale_factor); - } - } -} - -template -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -void SGDSolver::ApplyUpdate() { - CHECK(Caffe::root_solver()); -======= -void SGDSolver::ComputeUpdateValue() { - const vector > >& net_params = this->net_->params(); - const vector& net_params_lr = this->net_->params_lr(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); - // get the learning rate ->>>>>>> triplet data generation and network update - Dtype rate = GetLearningRate(); - if (this->param_.display() && this->iter_ % this->param_.display() == 0) { - LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; - } - ClipGradients(); -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - for (int param_id = 0; param_id < this->net_->learnable_params().size(); - ++param_id) { - Normalize(param_id); - Regularize(param_id); - ComputeUpdateValue(param_id, rate); - } - this->net_->Update(); -} - -template -void SGDSolver::Normalize(int param_id) { - if (this->param_.iter_size() == 1) { return; } - // Scale gradient to counterbalance accumulation. - const vector*>& net_params = this->net_->learnable_params(); - const Dtype accum_normalization = Dtype(1.) / this->param_.iter_size(); - switch (Caffe::mode()) { - case Caffe::CPU: { - caffe_scal(net_params[param_id]->count(), accum_normalization, - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - caffe_gpu_scal(net_params[param_id]->count(), accum_normalization, - net_params[param_id]->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void SGDSolver::Regularize(int param_id) { - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); -======= - Dtype momentum = this->param_.momentum(); ->>>>>>> triplet data generation and network update - Dtype weight_decay = this->param_.weight_decay(); - string regularization_type = this->param_.regularization_type(); - switch (Caffe::mode()) { - case Caffe::CPU: - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // Compute the value to history, and then copy them to the blob's diff. - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - history_[param_id]->mutable_cpu_data()); - // copy - caffe_copy(net_params[param_id]->count(), - history_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } - break; - case Caffe::GPU: -#ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // Compute the value to history, and then copy them to the blob's diff. - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -template -void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype momentum = this->param_.momentum(); - Dtype local_rate = rate * net_params_lr[param_id]; - // Compute the update to history, then copy it to the parameter diff. - switch (Caffe::mode()) { - case Caffe::CPU: { - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - history_[param_id]->mutable_cpu_data()); - caffe_copy(net_params[param_id]->count(), - history_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - history_[param_id]->mutable_gpu_data()); - caffe_copy(net_params[param_id]->count(), - history_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); -======= - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - history_[param_id]->mutable_gpu_data()); - // copy - caffe_copy(net_params[param_id]->count(), - history_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } ->>>>>>> triplet data generation and network update -#else - NO_GPU; -#endif - break; - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void SGDSolver::SnapshotSolverState(const string& model_filename) { - switch (this->param_.snapshot_format()) { - case caffe::SolverParameter_SnapshotFormat_BINARYPROTO: - SnapshotSolverStateToBinaryProto(model_filename); - break; - case caffe::SolverParameter_SnapshotFormat_HDF5: - SnapshotSolverStateToHDF5(model_filename); - break; - default: - LOG(FATAL) << "Unsupported snapshot format."; - } -} - -template -void SGDSolver::SnapshotSolverStateToBinaryProto( - const string& model_filename) { - SolverState state; - state.set_iter(this->iter_); - state.set_learned_net(model_filename); - state.set_current_step(this->current_step_); - state.clear_history(); - for (int i = 0; i < history_.size(); ++i) { - // Add history - BlobProto* history_blob = state.add_history(); - history_[i]->ToProto(history_blob); - } - string snapshot_filename = Solver::SnapshotFilename(".solverstate"); - LOG(INFO) - << "Snapshotting solver state to binary proto file " << snapshot_filename; - WriteProtoToBinaryFile(state, snapshot_filename.c_str()); -} - -template -void SGDSolver::SnapshotSolverStateToHDF5( - const string& model_filename) { - string snapshot_filename = - Solver::SnapshotFilename(".solverstate.h5"); - LOG(INFO) << "Snapshotting solver state to HDF5 file " << snapshot_filename; - hid_t file_hid = H5Fcreate(snapshot_filename.c_str(), H5F_ACC_TRUNC, - H5P_DEFAULT, H5P_DEFAULT); - CHECK_GE(file_hid, 0) - << "Couldn't open " << snapshot_filename << " to save solver state."; - hdf5_save_int(file_hid, "iter", this->iter_); - hdf5_save_string(file_hid, "learned_net", model_filename); - hdf5_save_int(file_hid, "current_step", this->current_step_); - hid_t history_hid = H5Gcreate2(file_hid, "history", H5P_DEFAULT, H5P_DEFAULT, - H5P_DEFAULT); - CHECK_GE(history_hid, 0) - << "Error saving solver state to " << snapshot_filename << "."; - for (int i = 0; i < history_.size(); ++i) { - ostringstream oss; - oss << i; - hdf5_save_nd_dataset(history_hid, oss.str(), *history_[i]); - } - H5Gclose(history_hid); - H5Fclose(file_hid); -} - -template -void SGDSolver::RestoreSolverStateFromBinaryProto( - const string& state_file) { - SolverState state; - ReadProtoFromBinaryFile(state_file, &state); - this->iter_ = state.iter(); - if (state.has_learned_net()) { - NetParameter net_param; - ReadNetParamsFromBinaryFileOrDie(state.learned_net().c_str(), &net_param); - this->net_->CopyTrainedLayersFrom(net_param); - } - this->current_step_ = state.current_step(); - CHECK_EQ(state.history_size(), history_.size()) - << "Incorrect length of history blobs."; - LOG(INFO) << "SGDSolver: restoring history"; - for (int i = 0; i < history_.size(); ++i) { - history_[i]->FromProto(state.history(i)); - } -} - -template -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -void SGDSolver::RestoreSolverStateFromHDF5(const string& state_file) { - hid_t file_hid = H5Fopen(state_file.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK_GE(file_hid, 0) << "Couldn't open solver state file " << state_file; - this->iter_ = hdf5_load_int(file_hid, "iter"); - if (H5LTfind_dataset(file_hid, "learned_net")) { - string learned_net = hdf5_load_string(file_hid, "learned_net"); - this->net_->CopyTrainedLayersFrom(learned_net); - } - this->current_step_ = hdf5_load_int(file_hid, "current_step"); - hid_t history_hid = H5Gopen2(file_hid, "history", H5P_DEFAULT); - CHECK_GE(history_hid, 0) << "Error reading history from " << state_file; - int state_history_size = hdf5_get_num_links(history_hid); - CHECK_EQ(state_history_size, history_.size()) - << "Incorrect length of history blobs."; - for (int i = 0; i < history_.size(); ++i) { - ostringstream oss; - oss << i; - hdf5_load_nd_dataset(history_hid, oss.str().c_str(), 0, - kMaxBlobAxes, history_[i].get()); - } - H5Gclose(history_hid); - H5Fclose(file_hid); -} - -template -void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { - CHECK(Caffe::root_solver()); - const vector*>& net_params = this->net_->learnable_params(); -======= -void NesterovSolver::ComputeUpdateValue() { - const vector > >& net_params = this->net_->params(); ->>>>>>> triplet data generation and network update - const vector& net_params_lr = this->net_->params_lr(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); - // get the learning rate - Dtype rate = this->GetLearningRate(); - if (this->param_.display() && this->iter_ % this->param_.display() == 0) { - LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; - } - SGDSolver::ClipGradients(); - Dtype momentum = this->param_.momentum(); - Dtype weight_decay = this->param_.weight_decay(); - string regularization_type = this->param_.regularization_type(); - switch (Caffe::mode()) { - case Caffe::CPU: - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - this->temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - - // update history - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - this->history_[param_id]->mutable_cpu_data()); - - // compute udpate: step back then over step - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->cpu_data(), -momentum, - this->update_[param_id]->mutable_cpu_data()); - - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } - break; - case Caffe::GPU: -#ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - this->temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - - // update history - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - this->history_[param_id]->mutable_gpu_data()); - - // compute udpate: step back then over step - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->gpu_data(), -momentum, - this->update_[param_id]->mutable_gpu_data()); - - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } -#else - NO_GPU; -#endif - break; - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { - CHECK(Caffe::root_solver()); - const vector*>& net_params = this->net_->learnable_params(); -======= -void AdaGradSolver::ComputeUpdateValue() { - const vector > >& net_params = this->net_->params(); ->>>>>>> triplet data generation and network update - const vector& net_params_lr = this->net_->params_lr(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); - // get the learning rate - Dtype rate = this->GetLearningRate(); - Dtype delta = this->param_.delta(); - if (this->param_.display() && this->iter_ % this->param_.display() == 0) { - LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; - } - SGDSolver::ClipGradients(); - Dtype weight_decay = this->param_.weight_decay(); - string regularization_type = this->param_.regularization_type(); - switch (Caffe::mode()) { - case Caffe::CPU: - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - this->temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - - // compute square of gradient in update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history - caffe_add(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - this->history_[param_id]->cpu_data(), - this->history_[param_id]->mutable_cpu_data()); - - // prepare update - caffe_powx(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_cpu_data()); - - caffe_div(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), - this->update_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // scale and copy - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->cpu_data(), Dtype(0), - net_params[param_id]->mutable_cpu_diff()); - } - break; - case Caffe::GPU: -#ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - this->temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - - // compute square of gradient in update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history - caffe_gpu_add(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - this->history_[param_id]->gpu_data(), - this->history_[param_id]->mutable_gpu_data()); - - // prepare update - caffe_gpu_powx(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_div(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), - this->update_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // scale and copy - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->gpu_data(), Dtype(0), - net_params[param_id]->mutable_gpu_diff()); - } -#else - NO_GPU; -#endif - break; - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void RMSPropSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_lr = this->net_->params_lr(); - - // get the learning rate - Dtype delta = this->param_.delta(); - Dtype rms_decay = this->param_.rms_decay(); - Dtype local_rate = rate * net_params_lr[param_id]; - - switch (Caffe::mode()) { - case Caffe::CPU: - // compute square of gradient in update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history - caffe_cpu_axpby(net_params[param_id] -> count(), - Dtype(1-rms_decay), this->update_[param_id]->cpu_data(), - rms_decay, this->history_[param_id]-> mutable_cpu_data()); - - // prepare update - caffe_powx(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_cpu_data()); - - caffe_div(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), this->update_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // scale and copy - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->cpu_data(), Dtype(0), - net_params[param_id]->mutable_cpu_diff()); - break; - case Caffe::GPU: -#ifndef CPU_ONLY - // compute square of gradient in update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history - caffe_gpu_axpby(net_params[param_id] -> count(), - Dtype(1-rms_decay), this->update_[param_id]->gpu_data(), - rms_decay, this->history_[param_id]-> mutable_gpu_data()); - - // prepare update - caffe_gpu_powx(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_div(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), this->update_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->gpu_data(), Dtype(0), - net_params[param_id]->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void AdaDeltaSolver::AdaDeltaPreSolve() { - // Add the extra history entries for AdaDelta after those from - // SGDSolver::PreSolve - const vector*>& net_params = this->net_->learnable_params(); - for (int i = 0; i < net_params.size(); ++i) { - const vector& shape = net_params[i]->shape(); - this->history_.push_back( - shared_ptr >(new Blob(shape))); - } -} - -template -void AdaDeltaSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype delta = this->param_.delta(); - Dtype momentum = this->param_.momentum(); - Dtype local_rate = rate * net_params_lr[param_id]; - size_t update_history_offset = net_params.size(); - switch (Caffe::mode()) { - case Caffe::CPU: { - // compute square of gradient in update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history of gradients - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, - this->update_[param_id]->cpu_data(), momentum, - this->history_[param_id]->mutable_cpu_data()); - - // add delta to history to guard against dividing by zero later - caffe_set(net_params[param_id]->count(), delta, - this->temp_[param_id]->mutable_cpu_data()); - - caffe_add(net_params[param_id]->count(), - this->temp_[param_id]->cpu_data(), - this->history_[update_history_offset + param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add(net_params[param_id]->count(), - this->temp_[param_id]->cpu_data(), - this->history_[param_id]->cpu_data(), - this->temp_[param_id]->mutable_cpu_data()); - - // divide history of updates by history of gradients - caffe_div(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - this->temp_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // jointly compute the RMS of both for update and gradient history - caffe_powx(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - // compute the update - caffe_mul(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), - this->update_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - - // compute square of update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history of updates - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, - this->update_[param_id]->cpu_data(), momentum, - this->history_[update_history_offset + param_id]->mutable_cpu_data()); - - // apply learning rate - caffe_cpu_scale(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - // compute square of gradient in update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history of gradients - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, - this->update_[param_id]->gpu_data(), momentum, - this->history_[param_id]->mutable_gpu_data()); - - // add delta to history to guard against dividing by zero later - caffe_gpu_set(net_params[param_id]->count(), delta, - this->temp_[param_id]->mutable_gpu_data()); - - caffe_gpu_add(net_params[param_id]->count(), - this->temp_[param_id]->gpu_data(), - this->history_[update_history_offset + param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add(net_params[param_id]->count(), - this->temp_[param_id]->gpu_data(), - this->history_[param_id]->gpu_data(), - this->temp_[param_id]->mutable_gpu_data()); - - // divide history of updates by history of gradients - caffe_gpu_div(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - this->temp_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // jointly compute the RMS of both for update and gradient history - caffe_gpu_powx(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - // compute the update and copy to net_diff - caffe_gpu_mul(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), - this->update_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - - // compute square of update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history of updates - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, - this->update_[param_id]->gpu_data(), momentum, - this->history_[update_history_offset + param_id]->mutable_gpu_data()); - - // apply learning rate - caffe_gpu_scale(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), - net_params[param_id]->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void AdamSolver::AdamPreSolve() { - // Add the extra history entries for Adam after those from - // SGDSolver::PreSolve - const vector*>& net_params = this->net_->learnable_params(); - for (int i = 0; i < net_params.size(); ++i) { - const vector& shape = net_params[i]->shape(); - this->history_.push_back( - shared_ptr >(new Blob(shape))); - } -} - -template -void AdamSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype local_rate = rate * net_params_lr[param_id]; - const Dtype beta1 = this->param_.momentum(); - const Dtype beta2 = this->param_.momentum2(); - - // we create aliases for convenience - size_t update_history_offset = net_params.size(); - Blob* val_m = this->history_[param_id].get(); - Blob* val_v = this->history_[param_id + update_history_offset].get(); - Blob* val_t = this->temp_[param_id].get(); - - const int t = this->iter_ + 1; - const Dtype correction = std::sqrt(Dtype(1) - pow(beta2, t)) / - (Dtype(1.) - pow(beta1, t)); - const int N = net_params[param_id]->count(); - const Dtype eps_hat = this->param_.delta(); - - switch (Caffe::mode()) { - case Caffe::CPU: { - // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t - caffe_cpu_axpby(N, Dtype(1)-beta1, - net_params[param_id]->cpu_diff(), beta1, - val_m->mutable_cpu_data()); - - // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 - caffe_mul(N, - net_params[param_id]->cpu_diff(), - net_params[param_id]->cpu_diff(), - val_t->mutable_cpu_data()); - caffe_cpu_axpby(N, Dtype(1)-beta2, - val_t->cpu_data(), beta2, - val_v->mutable_cpu_data()); - - // set update - caffe_powx(N, - val_v->cpu_data(), Dtype(0.5), - val_t->mutable_cpu_data()); - caffe_add_scalar(N, eps_hat, val_t->mutable_cpu_data()); - caffe_div(N, - val_m->cpu_data(), - val_t->cpu_data(), - val_t->mutable_cpu_data()); - - caffe_cpu_scale(N, local_rate*correction, - val_t->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t - caffe_gpu_axpby(N, Dtype(1)-beta1, - net_params[param_id]->gpu_diff(), beta1, - val_m->mutable_gpu_data()); - - // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 - caffe_gpu_mul(N, - net_params[param_id]->gpu_diff(), - net_params[param_id]->gpu_diff(), - val_t->mutable_gpu_data()); - caffe_gpu_axpby(N, Dtype(1)-beta2, - val_t->gpu_data(), beta2, - val_v->mutable_gpu_data()); - - // set update - caffe_gpu_powx(N, - val_v->gpu_data(), Dtype(0.5), - val_t->mutable_gpu_data()); - caffe_gpu_add_scalar(N, eps_hat, - val_t->mutable_gpu_data()); - caffe_gpu_div(N, - val_m->gpu_data(), - val_t->gpu_data(), - val_t->mutable_gpu_data()); - - caffe_gpu_scale(N, local_rate*correction, - val_t->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -INSTANTIATE_CLASS(Solver); -INSTANTIATE_CLASS(SGDSolver); -INSTANTIATE_CLASS(NesterovSolver); -INSTANTIATE_CLASS(AdaGradSolver); -INSTANTIATE_CLASS(RMSPropSolver); -INSTANTIATE_CLASS(AdaDeltaSolver); -INSTANTIATE_CLASS(AdamSolver); - -} // namespace caffe diff --git a/src/caffe/solver.cpp.orig.orig b/src/caffe/solver.cpp.orig.orig deleted file mode 100644 index 8b6f3c51779..00000000000 --- a/src/caffe/solver.cpp.orig.orig +++ /dev/null @@ -1,804 +0,0 @@ -#include - -#include -#include -#include - -#include "caffe/net.hpp" -#include "caffe/proto/caffe.pb.h" -#include "caffe/solver.hpp" -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/util/upgrade_proto.hpp" - -namespace caffe { - -template -Solver::Solver(const SolverParameter& param) - : net_() { - Init(param); -} - -template -Solver::Solver(const string& param_file) - : net_() { - SolverParameter param; - ReadProtoFromTextFileOrDie(param_file, ¶m); - Init(param); -} - -template -void Solver::Init(const SolverParameter& param) { - LOG(INFO) << "Initializing solver from parameters: " << std::endl - << param.DebugString(); - param_ = param; - CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative."; - if (param_.random_seed() >= 0) { - Caffe::set_random_seed(param_.random_seed()); - } - // Scaffolding code - InitTrainNet(); - InitTestNets(); - LOG(INFO) << "Solver scaffolding done."; - iter_ = 0; - current_step_ = 0; -} - -template -void Solver::InitTrainNet() { - const int num_train_nets = param_.has_net() + param_.has_net_param() + - param_.has_train_net() + param_.has_train_net_param(); - const string& field_names = "net, net_param, train_net, train_net_param"; - CHECK_GE(num_train_nets, 1) << "SolverParameter must specify a train net " - << "using one of these fields: " << field_names; - CHECK_LE(num_train_nets, 1) << "SolverParameter must not contain more than " - << "one of these fields specifying a train_net: " << field_names; - NetParameter net_param; - if (param_.has_train_net_param()) { - LOG(INFO) << "Creating training net specified in train_net_param."; - net_param.CopyFrom(param_.train_net_param()); - } else if (param_.has_train_net()) { - LOG(INFO) << "Creating training net from train_net file: " - << param_.train_net(); - ReadNetParamsFromTextFileOrDie(param_.train_net(), &net_param); - } - if (param_.has_net_param()) { - LOG(INFO) << "Creating training net specified in net_param."; - net_param.CopyFrom(param_.net_param()); - } - if (param_.has_net()) { - LOG(INFO) << "Creating training net from net file: " << param_.net(); - ReadNetParamsFromTextFileOrDie(param_.net(), &net_param); - } - // Set the correct NetState. We start with the solver defaults (lowest - // precedence); then, merge in any NetState specified by the net_param itself; - // finally, merge in any NetState specified by the train_state (highest - // precedence). - NetState net_state; - net_state.set_phase(TRAIN); - net_state.MergeFrom(net_param.state()); - net_state.MergeFrom(param_.train_state()); - net_param.mutable_state()->CopyFrom(net_state); - net_.reset(new Net(net_param)); -} - -template -void Solver::InitTestNets() { - const bool has_net_param = param_.has_net_param(); - const bool has_net_file = param_.has_net(); - const int num_generic_nets = has_net_param + has_net_file; - CHECK_LE(num_generic_nets, 1) - << "Both net_param and net_file may not be specified."; - const int num_test_net_params = param_.test_net_param_size(); - const int num_test_net_files = param_.test_net_size(); - const int num_test_nets = num_test_net_params + num_test_net_files; - if (num_generic_nets) { - CHECK_GE(param_.test_iter_size(), num_test_nets) - << "test_iter must be specified for each test network."; - } else { - CHECK_EQ(param_.test_iter_size(), num_test_nets) - << "test_iter must be specified for each test network."; - } - // If we have a generic net (specified by net or net_param, rather than - // test_net or test_net_param), we may have an unlimited number of actual - // test networks -- the actual number is given by the number of remaining - // test_iters after any test nets specified by test_net_param and/or test_net - // are evaluated. - const int num_generic_net_instances = param_.test_iter_size() - num_test_nets; - const int num_test_net_instances = num_test_nets + num_generic_net_instances; - if (param_.test_state_size()) { - CHECK_EQ(param_.test_state_size(), num_test_net_instances) - << "test_state must be unspecified or specified once per test net."; - } - if (num_test_net_instances) { - CHECK_GT(param_.test_interval(), 0); - } - int test_net_id = 0; - vector sources(num_test_net_instances); - vector net_params(num_test_net_instances); - for (int i = 0; i < num_test_net_params; ++i, ++test_net_id) { - sources[test_net_id] = "test_net_param"; - net_params[test_net_id].CopyFrom(param_.test_net_param(i)); - } - for (int i = 0; i < num_test_net_files; ++i, ++test_net_id) { - sources[test_net_id] = "test_net file: " + param_.test_net(i); - ReadNetParamsFromTextFileOrDie(param_.test_net(i), - &net_params[test_net_id]); - } - const int remaining_test_nets = param_.test_iter_size() - test_net_id; - if (has_net_param) { - for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) { - sources[test_net_id] = "net_param"; - net_params[test_net_id].CopyFrom(param_.net_param()); - } - } - if (has_net_file) { - for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) { - sources[test_net_id] = "net file: " + param_.net(); - ReadNetParamsFromTextFileOrDie(param_.net(), &net_params[test_net_id]); - } - } - test_nets_.resize(num_test_net_instances); - for (int i = 0; i < num_test_net_instances; ++i) { - // Set the correct NetState. We start with the solver defaults (lowest - // precedence); then, merge in any NetState specified by the net_param - // itself; finally, merge in any NetState specified by the test_state - // (highest precedence). - NetState net_state; - net_state.set_phase(TEST); - net_state.MergeFrom(net_params[i].state()); - if (param_.test_state_size()) { - net_state.MergeFrom(param_.test_state(i)); - } - net_params[i].mutable_state()->CopyFrom(net_state); - LOG(INFO) - << "Creating test net (#" << i << ") specified by " << sources[i]; - test_nets_[i].reset(new Net(net_params[i])); - test_nets_[i]->set_debug_info(param_.debug_info()); - } -} - -template -void Solver::Step(int iters) { - vector*> bottom_vec; - const int start_iter = iter_; - const int stop_iter = iter_ + iters; - int average_loss = this->param_.average_loss(); - vector losses; - Dtype smoothed_loss = 0; - - while (iter_ < stop_iter) { - // zero-init the params - for (int i = 0; i < net_->params().size(); ++i) { - shared_ptr > blob = net_->params()[i]; - switch (Caffe::mode()) { - case Caffe::CPU: - caffe_set(blob->count(), static_cast(0), - blob->mutable_cpu_diff()); - break; - case Caffe::GPU: -#ifndef CPU_ONLY - caffe_gpu_set(blob->count(), static_cast(0), - blob->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - } - } - - if (param_.test_interval() && iter_ % param_.test_interval() == 0 - && (iter_ > 0 || param_.test_initialization())) { - TestAll(); - } - - const bool display = param_.display() && iter_ % param_.display() == 0; - net_->set_debug_info(display && param_.debug_info()); - // accumulate the loss and gradient - Dtype loss = 0; - for (int i = 0; i < param_.iter_size(); ++i) { - loss += net_->ForwardBackward(bottom_vec); - } - loss /= param_.iter_size(); - // average the loss across iterations for smoothed reporting - if (losses.size() < average_loss) { - losses.push_back(loss); - int size = losses.size(); - smoothed_loss = (smoothed_loss * (size - 1) + loss) / size; - } else { - int idx = (iter_ - start_iter) % average_loss; - smoothed_loss += (loss - losses[idx]) / average_loss; - losses[idx] = loss; - } - if (display) { - LOG(INFO) << "Iteration " << iter_ << ", loss = " << smoothed_loss; - const vector*>& result = net_->output_blobs(); - int score_index = 0; - for (int j = 0; j < result.size(); ++j) { - const Dtype* result_vec = result[j]->cpu_data(); - const string& output_name = - net_->blob_names()[net_->output_blob_indices()[j]]; - const Dtype loss_weight = - net_->blob_loss_weights()[net_->output_blob_indices()[j]]; - for (int k = 0; k < result[j]->count(); ++k) { - ostringstream loss_msg_stream; - if (loss_weight) { - loss_msg_stream << " (* " << loss_weight - << " = " << loss_weight * result_vec[k] << " loss)"; - } - LOG(INFO) << " Train net output #" - << score_index++ << ": " << output_name << " = " - << result_vec[k] << loss_msg_stream.str(); - } - } - } - ApplyUpdate(); - - // Increment the internal iter_ counter -- its value should always indicate - // the number of times the weights have been updated. - ++iter_; - - // Save a snapshot if needed. - if (param_.snapshot() && iter_ % param_.snapshot() == 0) { - Snapshot(); - } - } -} - -template -void Solver::Solve(const char* resume_file) { - LOG(INFO) << "Solving " << net_->name(); - LOG(INFO) << "Learning Rate Policy: " << param_.lr_policy(); - - if (resume_file) { - LOG(INFO) << "Restoring previous solver status from " << resume_file; - Restore(resume_file); - } - - // For a network that is trained by the solver, no bottom or top vecs - // should be given, and we will just provide dummy vecs. - Step(param_.max_iter() - iter_); - // If we haven't already, save a snapshot after optimization, unless - // overridden by setting snapshot_after_train := false - if (param_.snapshot_after_train() - && (!param_.snapshot() || iter_ % param_.snapshot() != 0)) { - Snapshot(); - } - // After the optimization is done, run an additional train and test pass to - // display the train and test loss/outputs if appropriate (based on the - // display and test_interval settings, respectively). Unlike in the rest of - // training, for the train net we only run a forward pass as we've already - // updated the parameters "max_iter" times -- this final pass is only done to - // display the loss, which is computed in the forward pass. - if (param_.display() && iter_ % param_.display() == 0) { - Dtype loss; - net_->ForwardPrefilled(&loss); - LOG(INFO) << "Iteration " << iter_ << ", loss = " << loss; - } - if (param_.test_interval() && iter_ % param_.test_interval() == 0) { - TestAll(); - } - LOG(INFO) << "Optimization Done."; -} - - -template -void Solver::TestAll() { - for (int test_net_id = 0; test_net_id < test_nets_.size(); ++test_net_id) { - Test(test_net_id); - } -} - -template -void Solver::Test(const int test_net_id) { - LOG(INFO) << "Iteration " << iter_ - << ", Testing net (#" << test_net_id << ")"; - CHECK_NOTNULL(test_nets_[test_net_id].get())-> - ShareTrainedLayersWith(net_.get()); - vector test_score; - vector test_score_output_id; - vector*> bottom_vec; - const shared_ptr >& test_net = test_nets_[test_net_id]; - Dtype loss = 0; - for (int i = 0; i < param_.test_iter(test_net_id); ++i) { - Dtype iter_loss; - const vector*>& result = - test_net->Forward(bottom_vec, &iter_loss); - if (param_.test_compute_loss()) { - loss += iter_loss; - } - if (i == 0) { - for (int j = 0; j < result.size(); ++j) { - const Dtype* result_vec = result[j]->cpu_data(); - for (int k = 0; k < result[j]->count(); ++k) { - test_score.push_back(result_vec[k]); - test_score_output_id.push_back(j); - } - } - } else { - int idx = 0; - for (int j = 0; j < result.size(); ++j) { - const Dtype* result_vec = result[j]->cpu_data(); - for (int k = 0; k < result[j]->count(); ++k) { - test_score[idx++] += result_vec[k]; - } - } - } - } - if (param_.test_compute_loss()) { - loss /= param_.test_iter(test_net_id); - LOG(INFO) << "Test loss: " << loss; - } - for (int i = 0; i < test_score.size(); ++i) { - const int output_blob_index = - test_net->output_blob_indices()[test_score_output_id[i]]; - const string& output_name = test_net->blob_names()[output_blob_index]; - const Dtype loss_weight = test_net->blob_loss_weights()[output_blob_index]; - ostringstream loss_msg_stream; - const Dtype mean_score = test_score[i] / param_.test_iter(test_net_id); - if (loss_weight) { - loss_msg_stream << " (* " << loss_weight - << " = " << loss_weight * mean_score << " loss)"; - } - LOG(INFO) << " Test net output #" << i << ": " << output_name << " = " - << mean_score << loss_msg_stream.str(); - } -} - - -template -void Solver::Snapshot() { - NetParameter net_param; - // For intermediate results, we will also dump the gradient values. - net_->ToProto(&net_param, param_.snapshot_diff()); - string filename(param_.snapshot_prefix()); - string model_filename, snapshot_filename; - const int kBufferSize = 20; - char iter_str_buffer[kBufferSize]; - snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); - filename += iter_str_buffer; - model_filename = filename + ".caffemodel"; - LOG(INFO) << "Snapshotting to " << model_filename; - WriteProtoToBinaryFile(net_param, model_filename.c_str()); - SolverState state; - SnapshotSolverState(&state); - state.set_iter(iter_); - state.set_learned_net(model_filename); - state.set_current_step(current_step_); - snapshot_filename = filename + ".solverstate"; - LOG(INFO) << "Snapshotting solver state to " << snapshot_filename; - WriteProtoToBinaryFile(state, snapshot_filename.c_str()); -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -} - -template -string Solver::SnapshotToBinaryProto() { - string model_filename = SnapshotFilename(".caffemodel"); - LOG(INFO) << "Snapshotting to binary proto file " << model_filename; - NetParameter net_param; - net_->ToProto(&net_param, param_.snapshot_diff()); - WriteProtoToBinaryFile(net_param, model_filename); - return model_filename; -} - -template -string Solver::SnapshotToHDF5() { - string model_filename = SnapshotFilename(".caffemodel.h5"); - LOG(INFO) << "Snapshotting to HDF5 file " << model_filename; - net_->ToHDF5(model_filename, param_.snapshot_diff()); - return model_filename; ->>>>>>> triplet data generation and network update -} - -template -void Solver::Restore(const char* state_file) { - SolverState state; - NetParameter net_param; - ReadProtoFromBinaryFile(state_file, &state); - if (state.has_learned_net()) { - ReadNetParamsFromBinaryFileOrDie(state.learned_net().c_str(), &net_param); - net_->CopyTrainedLayersFrom(net_param); - } - iter_ = state.iter(); - current_step_ = state.current_step(); - RestoreSolverState(state); -} - - -// Return the current learning rate. The currently implemented learning rate -// policies are as follows: -// - fixed: always return base_lr. -// - step: return base_lr * gamma ^ (floor(iter / step)) -// - exp: return base_lr * gamma ^ iter -// - inv: return base_lr * (1 + gamma * iter) ^ (- power) -// - multistep: similar to step but it allows non uniform steps defined by -// stepvalue -// - poly: the effective learning rate follows a polynomial decay, to be -// zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) -// - sigmoid: the effective learning rate follows a sigmod decay -// return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) -// -// where base_lr, max_iter, gamma, step, stepvalue and power are defined -// in the solver parameter protocol buffer, and iter is the current iteration. -template -Dtype SGDSolver::GetLearningRate() { - Dtype rate; - const string& lr_policy = this->param_.lr_policy(); - if (lr_policy == "fixed") { - rate = this->param_.base_lr(); - } else if (lr_policy == "step") { - this->current_step_ = this->iter_ / this->param_.stepsize(); - rate = this->param_.base_lr() * - pow(this->param_.gamma(), this->current_step_); - } else if (lr_policy == "exp") { - rate = this->param_.base_lr() * pow(this->param_.gamma(), this->iter_); - } else if (lr_policy == "inv") { - rate = this->param_.base_lr() * - pow(Dtype(1) + this->param_.gamma() * this->iter_, - - this->param_.power()); - } else if (lr_policy == "multistep") { - if (this->current_step_ < this->param_.stepvalue_size() && - this->iter_ >= this->param_.stepvalue(this->current_step_)) { - this->current_step_++; - LOG(INFO) << "MultiStep Status: Iteration " << - this->iter_ << ", step = " << this->current_step_; - } - rate = this->param_.base_lr() * - pow(this->param_.gamma(), this->current_step_); - } else if (lr_policy == "poly") { - rate = this->param_.base_lr() * pow(Dtype(1.) - - (Dtype(this->iter_) / Dtype(this->param_.max_iter())), - this->param_.power()); - } else if (lr_policy == "sigmoid") { - rate = this->param_.base_lr() * (Dtype(1.) / - (Dtype(1.) + exp(-this->param_.gamma() * (Dtype(this->iter_) - - Dtype(this->param_.stepsize()))))); - } else { - LOG(FATAL) << "Unknown learning rate policy: " << lr_policy; - } - return rate; -} - -template -void SGDSolver::PreSolve() { - // Initialize the history - const vector > >& net_params = this->net_->params(); - history_.clear(); - update_.clear(); - temp_.clear(); - for (int i = 0; i < net_params.size(); ++i) { - const vector& shape = net_params[i]->shape(); - history_.push_back(shared_ptr >(new Blob(shape))); - update_.push_back(shared_ptr >(new Blob(shape))); - temp_.push_back(shared_ptr >(new Blob(shape))); - } -} - -template -void SGDSolver::ClipGradients() { - const Dtype clip_gradients = this->param_.clip_gradients(); - if (clip_gradients < 0) { return; } - const vector > >& net_params = this->net_->params(); - Dtype sumsq_diff = 0; - for (int i = 0; i < net_params.size(); ++i) { - if (this->net_->param_owners()[i] < 0) { - sumsq_diff += net_params[i]->sumsq_diff(); - } - } - const Dtype l2norm_diff = std::sqrt(sumsq_diff); - if (l2norm_diff > clip_gradients) { - Dtype scale_factor = clip_gradients / l2norm_diff; - LOG(INFO) << "Gradient clipping: scaling down gradients (L2 norm " - << l2norm_diff << " > " << clip_gradients << ") " - << "by scale factor " << scale_factor; - for (int i = 0; i < net_params.size(); ++i) { - if (this->net_->param_owners()[i] < 0) { - net_params[i]->scale_diff(scale_factor); - } - } - } -} - -template -void SGDSolver::ApplyUpdate() { - Dtype rate = GetLearningRate(); - if (this->param_.display() && this->iter_ % this->param_.display() == 0) { - LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; - } - ClipGradients(); - for (int param_id = 0; param_id < this->net_->params().size(); ++param_id) { - Normalize(param_id); - Regularize(param_id); - ComputeUpdateValue(param_id, rate); - } - this->net_->Update(); -} - -template -void SGDSolver::Normalize(int param_id) { - if (this->param_.iter_size() == 1) { return; } - // Scale gradient to counterbalance accumulation. - const vector > >& net_params = this->net_->params(); - const Dtype accum_normalization = Dtype(1.) / this->param_.iter_size(); - switch (Caffe::mode()) { - case Caffe::CPU: { - caffe_scal(net_params[param_id]->count(), accum_normalization, - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - caffe_gpu_scal(net_params[param_id]->count(), accum_normalization, - net_params[param_id]->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void SGDSolver::Regularize(int param_id) { - const vector > >& net_params = this->net_->params(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); - Dtype weight_decay = this->param_.weight_decay(); - string regularization_type = this->param_.regularization_type(); - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - switch (Caffe::mode()) { - case Caffe::CPU: { - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector > >& net_params = this->net_->params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype momentum = this->param_.momentum(); - Dtype local_rate = rate * net_params_lr[param_id]; - // Compute the update to history, then copy it to the parameter diff. - switch (Caffe::mode()) { - case Caffe::CPU: { - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - history_[param_id]->mutable_cpu_data()); - caffe_copy(net_params[param_id]->count(), - history_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - history_[param_id]->mutable_gpu_data()); - caffe_copy(net_params[param_id]->count(), - history_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void SGDSolver::SnapshotSolverState(SolverState* state) { - state->clear_history(); - for (int i = 0; i < history_.size(); ++i) { - // Add history - BlobProto* history_blob = state->add_history(); - history_[i]->ToProto(history_blob); - } -} - -template -void SGDSolver::RestoreSolverState(const SolverState& state) { - CHECK_EQ(state.history_size(), history_.size()) - << "Incorrect length of history blobs."; - LOG(INFO) << "SGDSolver: restoring history"; - for (int i = 0; i < history_.size(); ++i) { - history_[i]->FromProto(state.history(i)); - } -} - -template -void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector > >& net_params = this->net_->params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype momentum = this->param_.momentum(); - Dtype local_rate = rate * net_params_lr[param_id]; - switch (Caffe::mode()) { - case Caffe::CPU: { - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // update history - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - this->history_[param_id]->mutable_cpu_data()); - - // compute update: step back then over step - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->cpu_data(), -momentum, - this->update_[param_id]->mutable_cpu_data()); - - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // update history - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - this->history_[param_id]->mutable_gpu_data()); - - // compute update: step back then over step - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->gpu_data(), -momentum, - this->update_[param_id]->mutable_gpu_data()); - - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector > >& net_params = this->net_->params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype delta = this->param_.delta(); - Dtype local_rate = rate * net_params_lr[param_id]; - switch (Caffe::mode()) { - case Caffe::CPU: { - // compute square of gradient in update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history - caffe_add(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - this->history_[param_id]->cpu_data(), - this->history_[param_id]->mutable_cpu_data()); - - // prepare update - caffe_powx(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_cpu_data()); - - caffe_div(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), - this->update_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // scale and copy - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->cpu_data(), Dtype(0), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - // compute square of gradient in update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history - caffe_gpu_add(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - this->history_[param_id]->gpu_data(), - this->history_[param_id]->mutable_gpu_data()); - - // prepare update - caffe_gpu_powx(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_div(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), - this->update_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // scale and copy - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->gpu_data(), Dtype(0), - net_params[param_id]->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -INSTANTIATE_CLASS(Solver); -INSTANTIATE_CLASS(SGDSolver); -INSTANTIATE_CLASS(NesterovSolver); -INSTANTIATE_CLASS(AdaGradSolver); - -} // namespace caffe diff --git a/src/caffe/test/test_accuracy_layer.cpp b/src/caffe/test/test_accuracy_layer.cpp index ef0e57a37a1..94e529b5eee 100644 --- a/src/caffe/test/test_accuracy_layer.cpp +++ b/src/caffe/test/test_accuracy_layer.cpp @@ -250,6 +250,7 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPUTopK) { TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClass) { LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); AccuracyLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_per_class_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_per_class_vec_); @@ -278,16 +279,16 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClass) { EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), num_correct_labels / 100.0, 1e-4); for (int i = 0; i < num_class; ++i) { - TypeParam accuracy_per_class = (num_per_class[i] > 0 ? - static_cast(correct_per_class[i]) / num_per_class[i] : 0); EXPECT_NEAR(this->blob_top_per_class_->data_at(i, 0, 0, 0), - accuracy_per_class, 1e-4); + static_cast(correct_per_class[i]) / num_per_class[i], + 1e-4); } } TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClassWithIgnoreLabel) { LayerParameter layer_param; + Caffe::set_mode(Caffe::CPU); const TypeParam kIgnoreLabelValue = -1; layer_param.mutable_accuracy_param()->set_ignore_label(kIgnoreLabelValue); AccuracyLayer layer(layer_param); @@ -328,10 +329,9 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPUPerClassWithIgnoreLabel) { EXPECT_NEAR(this->blob_top_->data_at(0, 0, 0, 0), num_correct_labels / TypeParam(count), 1e-4); for (int i = 0; i < 10; ++i) { - TypeParam accuracy_per_class = (num_per_class[i] > 0 ? - static_cast(correct_per_class[i]) / num_per_class[i] : 0); EXPECT_NEAR(this->blob_top_per_class_->data_at(i, 0, 0, 0), - accuracy_per_class, 1e-4); + TypeParam(correct_per_class[i]) / num_per_class[i], + 1e-4); } } diff --git a/src/caffe/test/test_concat_layer.cpp b/src/caffe/test/test_concat_layer.cpp index ccd97eb1d66..088e0a41685 100644 --- a/src/caffe/test/test_concat_layer.cpp +++ b/src/caffe/test/test_concat_layer.cpp @@ -99,19 +99,6 @@ TYPED_TEST(ConcatLayerTest, TestSetupChannelsNegativeIndexing) { EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0_->width()); } -TYPED_TEST(ConcatLayerTest, TestForwardTrivial) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - ConcatLayer layer(layer_param); - this->blob_bottom_vec_0_.resize(1); - layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_0_, this->blob_top_vec_); - for (int i = 0; i < this->blob_bottom_0_->count(); ++i) { - EXPECT_EQ(this->blob_bottom_0_->cpu_data()[i], - this->blob_top_->cpu_data()[i]); - } -} - TYPED_TEST(ConcatLayerTest, TestForwardNum) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; @@ -167,16 +154,6 @@ TYPED_TEST(ConcatLayerTest, TestForwardChannels) { } } -TYPED_TEST(ConcatLayerTest, TestGradientTrivial) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - ConcatLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-2); - this->blob_bottom_vec_0_.resize(1); - checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_0_, - this->blob_top_vec_); -} - TYPED_TEST(ConcatLayerTest, TestGradientNum) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; diff --git a/src/caffe/test/test_data/generate_sample_data.py b/src/caffe/test/test_data/generate_sample_data.py index a9b3c866d6c..3703b41823b 100644 --- a/src/caffe/test/test_data/generate_sample_data.py +++ b/src/caffe/test/test_data/generate_sample_data.py @@ -1,5 +1,5 @@ """ -Generate data used in the HDF5DataLayer test. +Generate data used in the HDF5DataLayer and GradientBasedSolver tests. """ import os import numpy as np @@ -7,6 +7,8 @@ script_dir = os.path.dirname(os.path.abspath(__file__)) +# Generate HDF5DataLayer sample_data.h5 + num_cols = 8 num_rows = 10 height = 6 @@ -75,13 +77,3 @@ with open(script_dir + '/solver_data_list.txt', 'w') as f: f.write(script_dir + '/solver_data.h5\n') -======= -with open(os.path.dirname(__file__) + '/sample_data_list.txt', 'w') as f: - f.write(os.path.dirname(__file__) + '/sample_data.h5\n') - f.write(os.path.dirname(__file__) + '/sample_data_2_gzip.h5\n') ->>>>>>> triplet data generation and network update -======= -with open(script_dir + '/sample_data_list.txt', 'w') as f: - f.write(script_dir + '/sample_data.h5\n') - f.write(script_dir + '/sample_data_2_gzip.h5\n') ->>>>>>> restore diff --git a/src/caffe/test/test_data/generate_sample_data.py.orig.orig b/src/caffe/test/test_data/generate_sample_data.py.orig.orig deleted file mode 100644 index 8e2a6e94175..00000000000 --- a/src/caffe/test/test_data/generate_sample_data.py.orig.orig +++ /dev/null @@ -1,105 +0,0 @@ -""" -Generate data used in the HDF5DataLayer test. -""" -import os -import numpy as np -import h5py - -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -script_dir = os.path.dirname(os.path.abspath(__file__)) - -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -# Generate HDF5DataLayer sample_data.h5 - -======= ->>>>>>> triplet data generation and network update -======= -script_dir = os.path.dirname(os.path.abspath(__file__)) - ->>>>>>> restore ->>>>>>> triplet data generation and network update -num_cols = 8 -num_rows = 10 -height = 6 -width = 5 -total_size = num_cols * num_rows * height * width - -data = np.arange(total_size) -data = data.reshape(num_rows, num_cols, height, width) -data = data.astype('float32') - -# We had a bug where data was copied into label, but the tests weren't -# catching it, so let's make label 1-indexed. -label = 1 + np.arange(num_rows)[:, np.newaxis] -label = label.astype('float32') - -# We add an extra label2 dataset to test HDF5 layer's ability -# to handle arbitrary number of output ("top") Blobs. -label2 = label + 1 - -print data -print label - -with h5py.File(script_dir + '/sample_data.h5', 'w') as f: - f['data'] = data - f['label'] = label - f['label2'] = label2 - -with h5py.File(script_dir + '/sample_data_2_gzip.h5', 'w') as f: - f.create_dataset( - 'data', data=data + total_size, - compression='gzip', compression_opts=1 - ) - f.create_dataset( - 'label', data=label, - compression='gzip', compression_opts=1 - ) - f.create_dataset( - 'label2', data=label2, - compression='gzip', compression_opts=1 - ) - -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -with open(script_dir + '/sample_data_list.txt', 'w') as f: - f.write(script_dir + '/sample_data.h5\n') - f.write(script_dir + '/sample_data_2_gzip.h5\n') -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= - -# Generate GradientBasedSolver solver_data.h5 - -num_cols = 3 -num_rows = 8 -height = 10 -width = 10 - -data = np.random.randn(num_rows, num_cols, height, width) -data = data.reshape(num_rows, num_cols, height, width) -data = data.astype('float32') - -targets = np.random.randn(num_rows, 1) -targets = targets.astype('float32') - -print data -print targets - -with h5py.File(script_dir + '/solver_data.h5', 'w') as f: - f['data'] = data - f['targets'] = targets - -with open(script_dir + '/solver_data_list.txt', 'w') as f: - f.write(script_dir + '/solver_data.h5\n') -======= -with open(os.path.dirname(__file__) + '/sample_data_list.txt', 'w') as f: - f.write(os.path.dirname(__file__) + '/sample_data.h5\n') - f.write(os.path.dirname(__file__) + '/sample_data_2_gzip.h5\n') ->>>>>>> triplet data generation and network update -======= -with open(script_dir + '/sample_data_list.txt', 'w') as f: - f.write(script_dir + '/sample_data.h5\n') - f.write(script_dir + '/sample_data_2_gzip.h5\n') ->>>>>>> restore ->>>>>>> triplet data generation and network update diff --git a/src/caffe/test/test_data_layer.cpp b/src/caffe/test/test_data_layer.cpp index 9e03954a543..afe2a40d227 100644 --- a/src/caffe/test/test_data_layer.cpp +++ b/src/caffe/test/test_data_layer.cpp @@ -1,4 +1,3 @@ -#ifdef USE_OPENCV #include #include @@ -349,7 +348,6 @@ class DataLayerTest : public MultiDeviceTest { TYPED_TEST_CASE(DataLayerTest, TestDtypesAndDevices); -#ifdef USE_LEVELDB TYPED_TEST(DataLayerTest, TestReadLevelDB) { const bool unique_pixels = false; // all pixels the same; images different this->Fill(unique_pixels, DataParameter_DB_LEVELDB); @@ -387,9 +385,7 @@ TYPED_TEST(DataLayerTest, TestReadCropTestLevelDB) { this->Fill(unique_pixels, DataParameter_DB_LEVELDB); this->TestReadCrop(TEST); } -#endif // USE_LEVELDB -#ifdef USE_LMDB TYPED_TEST(DataLayerTest, TestReadLMDB) { const bool unique_pixels = false; // all pixels the same; images different this->Fill(unique_pixels, DataParameter_DB_LMDB); @@ -428,6 +424,4 @@ TYPED_TEST(DataLayerTest, TestReadCropTestLMDB) { this->TestReadCrop(TEST); } -#endif // USE_LMDB } // namespace caffe -#endif // USE_OPENCV diff --git a/src/caffe/test/test_data_transformer.cpp b/src/caffe/test/test_data_transformer.cpp index 8a1013744e8..16570e20356 100644 --- a/src/caffe/test/test_data_transformer.cpp +++ b/src/caffe/test/test_data_transformer.cpp @@ -1,4 +1,3 @@ -#ifdef USE_OPENCV #include #include @@ -354,4 +353,3 @@ TYPED_TEST(DataTransformTest, TestMeanFile) { } } // namespace caffe -#endif // USE_OPENCV diff --git a/src/caffe/test/test_db.cpp b/src/caffe/test/test_db.cpp index 1b487b14c58..5b2ac230a0b 100644 --- a/src/caffe/test/test_db.cpp +++ b/src/caffe/test/test_db.cpp @@ -1,4 +1,3 @@ -#if defined(USE_LEVELDB) && defined(USE_LMDB) && defined(USE_OPENCV) #include #include "boost/scoped_ptr.hpp" @@ -133,4 +132,3 @@ TYPED_TEST(DBTest, TestWrite) { } } // namespace caffe -#endif // USE_LEVELDB, USE_LMDB and USE_OPENCV diff --git a/src/caffe/test/test_eltwise_layer.cpp b/src/caffe/test/test_eltwise_layer.cpp index 8031f6e9022..be0c1347709 100644 --- a/src/caffe/test/test_eltwise_layer.cpp +++ b/src/caffe/test/test_eltwise_layer.cpp @@ -80,7 +80,7 @@ TYPED_TEST(EltwiseLayerTest, TestProd) { const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); for (int i = 0; i < count; ++i) { - EXPECT_NEAR(data[i], in_data_a[i] * in_data_b[i] * in_data_c[i], 1e-4); + EXPECT_EQ(data[i], in_data_a[i] * in_data_b[i] * in_data_c[i]); } } @@ -99,7 +99,7 @@ TYPED_TEST(EltwiseLayerTest, TestSum) { const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); for (int i = 0; i < count; ++i) { - EXPECT_NEAR(data[i], in_data_a[i] + in_data_b[i] + in_data_c[i], 1e-4); + EXPECT_EQ(data[i], in_data_a[i] + in_data_b[i] + in_data_c[i]); } } diff --git a/src/caffe/test/test_gradient_based_solver.cpp b/src/caffe/test/test_gradient_based_solver.cpp index 809122404a7..7ad7467f86f 100644 --- a/src/caffe/test/test_gradient_based_solver.cpp +++ b/src/caffe/test/test_gradient_based_solver.cpp @@ -25,7 +25,14 @@ class GradientBasedSolverTest : public MultiDeviceTest { protected: GradientBasedSolverTest() : - seed_(1701), num_(4), channels_(3), height_(10), width_(10) {} + seed_(1701), num_(4), channels_(3), height_(10), width_(10), + share_(false) { + input_file_ = new string( + CMAKE_SOURCE_DIR "caffe/test/test_data/solver_data_list.txt" CMAKE_EXT); + } + ~GradientBasedSolverTest() { + delete input_file_; + } string snapshot_prefix_; shared_ptr > solver_; @@ -61,9 +68,10 @@ class GradientBasedSolverTest : public MultiDeviceTest { delta_ = param.delta(); } - void RunLeastSquaresSolver(const Dtype learning_rate, + string RunLeastSquaresSolver(const Dtype learning_rate, const Dtype weight_decay, const Dtype momentum, const int num_iters, - const int iter_size = 1) { + const int iter_size = 1, const int devices = 1, + const bool snapshot = false, const char* from_snapshot = NULL) { ostringstream proto; int device_id = 0; #ifndef CPU_ONLY @@ -77,27 +85,15 @@ class GradientBasedSolverTest : public MultiDeviceTest { "base_lr: " << learning_rate << " " "lr_policy: 'fixed' " "iter_size: " << iter_size << " " + "device_id: " << device_id << " " "net_param { " " name: 'TestNetwork' " " layer { " " name: 'data' " - " type: 'DummyData' " - " dummy_data_param { " - " num: " << num_ / iter_size << " " - " channels: " << channels_ << " " - " height: " << height_ << " " - " width: " << width_ << " " - " channels: 1 " - " height: 1 " - " width: 1 " - " data_filler { " - " type: 'constant' " - " value: 1.0 " - " } " - " data_filler { " - " type: 'gaussian' " - " std: 1.0 " - " } " + " type: 'HDF5Data' " + " hdf5_data_param { " + " source: '" << *(this->input_file_) << "' " + " batch_size: " << num_ / iter_size << " " " } " " top: 'data' " " top: 'targets' " @@ -658,6 +654,18 @@ TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverything) { } } +TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.5; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { typedef typename TypeParam::Dtype Dtype; const Dtype kLearningRate = 0.01; @@ -704,18 +712,6 @@ TYPED_TEST(SGDSolverTest, TestSnapshotShare) { } } -======= -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} ->>>>>>> restore template class AdaGradSolverTest : public GradientBasedSolverTest { @@ -760,11 +756,24 @@ TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithEverything) { } } +TYPED_TEST(AdaGradSolverTest, + TestAdaGradLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { typedef typename TypeParam::Dtype Dtype; const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.0; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; const int kNumIters = 4; const int kIterSize = 2; this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, @@ -806,8 +815,6 @@ TYPED_TEST(AdaGradSolverTest, TestSnapshotShare) { } } -======= ->>>>>>> restore template class NesterovSolverTest : public GradientBasedSolverTest { @@ -886,6 +893,19 @@ TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithEverything) { } } +TYPED_TEST(NesterovSolverTest, + TestNesterovLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { typedef typename TypeParam::Dtype Dtype; const Dtype kLearningRate = 0.01; @@ -1124,22 +1144,156 @@ TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverythingShare) { } } -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { typedef typename TypeParam::Dtype Dtype; const Dtype kLearningRate = 0.01; const Dtype kWeightDecay = 0.5; -======= -TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { typedef typename TypeParam::Dtype Dtype; const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.1; ->>>>>>> restore + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + const int kIterSize = 2; + this->share_ = true; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(AdamSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.9; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(AdamSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; const Dtype kMomentum = 0.9; const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +template +class RMSPropSolverTest : public GradientBasedSolverTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + virtual void InitSolver(const SolverParameter& param) { + const Dtype rms_decay = 0.95; + SolverParameter new_param = param; + new_param.set_rms_decay(rms_decay); + this->solver_.reset(new RMSPropSolver(new_param)); + } + virtual SolverParameter_SolverType solver_type() { + return SolverParameter_SolverType_RMSPROP; + } +}; + +TYPED_TEST_CASE(RMSPropSolverTest, TestDtypesAndDevices); + +TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithWeightDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 1.0; + const Dtype kWeightDecay = 0.5; + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); +} + +TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithRmsDecay) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.0; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithEverything) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(RMSPropSolverTest, + TestRMSPropLeastSquaresUpdateWithEverythingShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + this->share_ = true; + for (int i = 0; i <= kNumIters; ++i) { + this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; + const int kIterSize = 2; + this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, + kIterSize); +} + +TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0.0; + const int kNumIters = 4; const int kIterSize = 2; + this->share_ = true; this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, kIterSize); } +TYPED_TEST(RMSPropSolverTest, TestSnapshot) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + +TYPED_TEST(RMSPropSolverTest, TestSnapshotShare) { + typedef typename TypeParam::Dtype Dtype; + const Dtype kLearningRate = 0.01; + const Dtype kWeightDecay = 0.5; + const Dtype kMomentum = 0; + const int kNumIters = 4; + this->share_ = true; + for (int i = 1; i <= kNumIters; ++i) { + this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); + } +} + } // namespace caffe diff --git a/src/caffe/test/test_gradient_based_solver.cpp.orig.orig b/src/caffe/test/test_gradient_based_solver.cpp.orig.orig deleted file mode 100644 index b39955cd038..00000000000 --- a/src/caffe/test/test_gradient_based_solver.cpp.orig.orig +++ /dev/null @@ -1,967 +0,0 @@ -#include -#include -#include -#include - -#include "google/protobuf/text_format.h" - -#include "gtest/gtest.h" - -#include "caffe/common.hpp" -#include "caffe/proto/caffe.pb.h" -#include "caffe/solver.hpp" - -#include "caffe/test/test_caffe_main.hpp" - -using std::ostringstream; - -namespace caffe { - -template -class GradientBasedSolverTest : public MultiDeviceTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - GradientBasedSolverTest() : -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - seed_(1701), num_(4), channels_(3), height_(10), width_(10) {} -======= -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - seed_(1701), num_(4), channels_(3), height_(10), width_(10), - share_(false) { - input_file_ = new string( - CMAKE_SOURCE_DIR "caffe/test/test_data/solver_data_list.txt" CMAKE_EXT); - } - ~GradientBasedSolverTest() { - delete input_file_; - } -======= - seed_(1701), num_(5), channels_(3), height_(10), width_(10) {} ->>>>>>> triplet data generation and network update -======= - seed_(1701), num_(4), channels_(3), height_(10), width_(10) {} ->>>>>>> restore ->>>>>>> triplet data generation and network update - - shared_ptr > solver_; - int seed_; - int num_, channels_, height_, width_; - Dtype delta_; // Stability constant for AdaGrad. - - virtual SolverParameter_SolverType solver_type() = 0; - virtual void InitSolver(const SolverParameter& param) = 0; - - virtual void InitSolverFromProtoString(const string& proto) { - SolverParameter param; - CHECK(google::protobuf::TextFormat::ParseFromString(proto, ¶m)); - // Disable saving a final snapshot so the tests don't pollute the user's - // working directory with useless snapshots. - param.set_snapshot_after_train(false); - // Set the solver_mode according to current Caffe::mode. - switch (Caffe::mode()) { - case Caffe::CPU: - param.set_solver_mode(SolverParameter_SolverMode_CPU); - break; - case Caffe::GPU: - param.set_solver_mode(SolverParameter_SolverMode_GPU); - break; - default: - LOG(FATAL) << "Unknown Caffe mode: " << Caffe::mode(); - } - InitSolver(param); - delta_ = (solver_type() == SolverParameter_SolverType_ADAGRAD) ? - param.delta() : 0; - } - -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - void RunLeastSquaresSolver(const Dtype learning_rate, - const Dtype weight_decay, const Dtype momentum, const int num_iters, - const int iter_size = 1) { -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - string RunLeastSquaresSolver(const Dtype learning_rate, - const Dtype weight_decay, const Dtype momentum, const int num_iters, - const int iter_size = 1, const int devices = 1, - const bool snapshot = false, const char* from_snapshot = NULL) { -======= - void RunLeastSquaresSolver(const Dtype learning_rate, -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a - const Dtype weight_decay, const Dtype momentum, const int num_iters) { ->>>>>>> triplet data generation and network update -======= - const Dtype weight_decay, const Dtype momentum, const int num_iters, - const int iter_size = 1) { ->>>>>>> restore ->>>>>>> triplet data generation and network update - ostringstream proto; - proto << - "max_iter: " << num_iters << " " - "base_lr: " << learning_rate << " " - "lr_policy: 'fixed' " -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - "iter_size: " << iter_size << " " -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= - "device_id: " << device_id << " " -======= ->>>>>>> triplet data generation and network update -======= - "iter_size: " << iter_size << " " ->>>>>>> restore ->>>>>>> triplet data generation and network update - "net_param { " - " name: 'TestNetwork' " - " layer { " - " name: 'data' " -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - " type: 'HDF5Data' " - " hdf5_data_param { " - " source: '" << *(this->input_file_) << "' " - " batch_size: " << num_ / iter_size << " " -======= ->>>>>>> triplet data generation and network update - " type: 'DummyData' " - " dummy_data_param { " - " num: " << num_ / iter_size << " " - " channels: " << channels_ << " " - " height: " << height_ << " " - " width: " << width_ << " " - " channels: 1 " - " height: 1 " - " width: 1 " - " data_filler { " - " type: 'constant' " - " value: 1.0 " - " } " - " data_filler { " - " type: 'gaussian' " - " std: 1.0 " - " } " -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update - " } " - " top: 'data' " - " top: 'targets' " - " } " - " layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 1 " - " weight_filler { " - " type: 'gaussian' " - " std: 1.0 " - " } " - " bias_filler { " - " type: 'gaussian' " - " std: 1.0 " - " } " - " } " - " bottom: 'data' " - " top: 'innerprod' " - " } " - " layer { " - " name: 'loss' " - " type: 'EuclideanLoss' " - " bottom: 'innerprod' " - " bottom: 'targets' " - " } " - "} "; - if (weight_decay != 0) { - proto << "weight_decay: " << weight_decay << " "; - } - if (momentum != 0) { - proto << "momentum: " << momentum << " "; - } - Caffe::set_random_seed(this->seed_); - this->InitSolverFromProtoString(proto.str()); - this->solver_->Solve(); - } - - // Compute an update value given the current state of the train net, - // using the analytical formula for the least squares gradient. - // updated_params will store the updated weight and bias results, - // using the blobs' diffs to hold the update values themselves. - void ComputeLeastSquaresUpdate(const Dtype learning_rate, - const Dtype weight_decay, const Dtype momentum, - vector > >* updated_params) { - const int N = num_; - const int D = channels_ * height_ * width_; - - // Run a forward pass, and manually compute the update values from the - // result. - Net& net = *this->solver_->net(); - vector*> empty_bottom_vec; - net.Forward(empty_bottom_vec); - ASSERT_TRUE(net.has_blob("data")); - const Blob& data = *net.blob_by_name("data"); - ASSERT_TRUE(net.has_blob("targets")); - const Blob& targets = *net.blob_by_name("targets"); - ASSERT_TRUE(net.has_layer("innerprod")); - const vector > >& param_blobs = - net.layer_by_name("innerprod")->blobs(); - const int num_param_blobs = 2; - ASSERT_EQ(num_param_blobs, param_blobs.size()); - const Blob& weights = *param_blobs[0]; - const Blob& bias = *param_blobs[1]; - ASSERT_EQ(D * N, data.count()); - ASSERT_EQ(N, targets.count()); - ASSERT_EQ(D, weights.count()); - ASSERT_EQ(1, bias.count()); - - updated_params->clear(); - updated_params->resize(num_param_blobs); - for (int i = 0; i < num_param_blobs; ++i) { - (*updated_params)[i].reset(new Blob()); - } - Blob& updated_weights = *(*updated_params)[0]; - updated_weights.ReshapeLike(weights); - Blob& updated_bias = *(*updated_params)[1]; - updated_bias.ReshapeLike(bias); - - for (int i = 0; i <= D; ++i) { - // Compute the derivative with respect to the ith weight (i.e., the ith - // element of the gradient). - Dtype grad = 0; - for (int j = 0; j <= D; ++j) { - // Compute element (i, j) of X^T * X. - Dtype element = 0; - for (int k = 0; k < N; ++k) { - // (i, k) in X^T (== (k, i) in X) times (k, j) in X. - const Dtype element_i = (i == D) ? 1 : data.cpu_data()[k * D + i]; - const Dtype element_j = (j == D) ? 1 : data.cpu_data()[k * D + j]; - element += element_i * element_j; - } - if (j == D) { - grad += element * bias.cpu_data()[0]; - } else { - grad += element * weights.cpu_data()[j]; - } - } - for (int k = 0; k < N; ++k) { - const Dtype element_i = (i == D) ? 1 : data.cpu_data()[k * D + i]; - grad -= element_i * targets.cpu_data()[k]; - } - // Scale the gradient over the N samples. - grad /= N; - // Add the weight decay to the gradient. - grad += weight_decay * - ((i == D) ? bias.cpu_data()[0] : weights.cpu_data()[i]); - // Finally, compute update. - const vector > >& history = solver_->history(); - ASSERT_EQ(2, history.size()); // 1 blob for weights, 1 for bias - Dtype update_value = learning_rate * grad; - const Dtype history_value = (i == D) ? - history[1]->cpu_data()[0] : history[0]->cpu_data()[i]; - const Dtype temp = momentum * history_value; - switch (solver_type()) { - case SolverParameter_SolverType_SGD: - update_value += temp; - break; - case SolverParameter_SolverType_NESTEROV: - update_value += temp; - // step back then over-step - update_value = (1 + momentum) * update_value - temp; - break; - case SolverParameter_SolverType_ADAGRAD: - update_value /= std::sqrt(history_value + grad * grad) + delta_; - break; - default: - LOG(FATAL) << "Unknown solver type: " << solver_type(); - } - if (i == D) { - updated_bias.mutable_cpu_diff()[0] = update_value; - updated_bias.mutable_cpu_data()[0] = bias.cpu_data()[0] - update_value; - } else { - updated_weights.mutable_cpu_diff()[i] = update_value; - updated_weights.mutable_cpu_data()[i] = - weights.cpu_data()[i] - update_value; - } - } - } - - void CheckLeastSquaresUpdate( - const vector > >& updated_params) { - const int D = channels_ * height_ * width_; - - const Blob& updated_weights = *updated_params[0]; - const Blob& updated_bias = *updated_params[1]; - - Net& net = *this->solver_->net(); - ASSERT_TRUE(net.has_layer("innerprod")); - const vector > >& param_blobs = - net.layer_by_name("innerprod")->blobs(); - ASSERT_EQ(2, param_blobs.size()); - const Blob& solver_updated_weights = *param_blobs[0]; - ASSERT_EQ(D, solver_updated_weights.count()); - const double kPrecision = 1e-2; - const double kMinPrecision = 1e-7; - for (int i = 0; i < D; ++i) { - const Dtype expected_updated_weight = updated_weights.cpu_data()[i]; - const Dtype solver_updated_weight = solver_updated_weights.cpu_data()[i]; - const Dtype error_margin = std::max(kMinPrecision, kPrecision * - std::min(fabs(expected_updated_weight), fabs(solver_updated_weight))); - EXPECT_NEAR(expected_updated_weight, solver_updated_weight, error_margin); - } - const Blob& solver_updated_bias_blob = *param_blobs[1]; - ASSERT_EQ(1, solver_updated_bias_blob.count()); - const Dtype expected_updated_bias = updated_bias.cpu_data()[0]; - const Dtype solver_updated_bias = solver_updated_bias_blob.cpu_data()[0]; - const Dtype error_margin = std::max(kMinPrecision, kPrecision * - std::min(fabs(expected_updated_bias), fabs(solver_updated_bias))); - EXPECT_NEAR(expected_updated_bias, solver_updated_bias, error_margin); - - // Check the solver's history -- should contain the previous update value. - if (solver_type() == SolverParameter_SolverType_SGD) { - const vector > >& history = solver_->history(); - ASSERT_EQ(2, history.size()); - for (int i = 0; i < D; ++i) { - const Dtype expected_history = updated_weights.cpu_diff()[i]; - const Dtype solver_history = history[0]->cpu_data()[i]; - const Dtype error_margin_hist = std::max(kMinPrecision, kPrecision * - std::min(fabs(expected_history), fabs(solver_history))); - EXPECT_NEAR(expected_history, solver_history, error_margin_hist); - } - const Dtype expected_history = updated_bias.cpu_diff()[0]; - const Dtype solver_history = history[1]->cpu_data()[0]; - const Dtype error_margin_hist = std::max(kMinPrecision, kPrecision * - std::min(fabs(expected_history), fabs(solver_history))); - EXPECT_NEAR(expected_history, solver_history, error_margin_hist); - } - } - - void CheckAccumulation(const Dtype kLearningRate, const Dtype kWeightDecay, - const Dtype kMomentum, const int kNumIters, const int kIterSize) { - const double kPrecision = 1e-2; - const double kMinPrecision = 1e-7; - // Solve without accumulation and save parameters. - this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, - kNumIters); - // Save parameters for comparison. - Net& net = *this->solver_->net(); - const vector > >& param_blobs = - net.layer_by_name("innerprod")->blobs(); - vector > > noaccum_params(param_blobs.size()); - for (int i = 0; i < param_blobs.size(); ++i) { - noaccum_params[i].reset(new Blob()); - noaccum_params[i]->CopyFrom(*param_blobs[i], false, true); - } - // Solve by equivalent accumulation of gradients over divided batches. - this->RunLeastSquaresSolver(kLearningRate, kWeightDecay, kMomentum, - kNumIters, kIterSize); - Net& net_accum = *this->solver_->net(); - const vector > >& accum_params = - net_accum.layer_by_name("innerprod")->blobs(); - // Compare accumulated parameters against no accumulation standard. - const int D = this->channels_ * this->height_ * this->width_; - for (int i = 0; i < D; ++i) { - const Dtype expected_param = noaccum_params[0]->cpu_data()[i]; - const Dtype accum_param = accum_params[0]->cpu_data()[i]; - const Dtype error_margin = std::max(kMinPrecision, kPrecision * - std::min(fabs(expected_param), fabs(accum_param))); - EXPECT_NEAR(expected_param, accum_param, error_margin); - } - ASSERT_EQ(1, accum_params[1]->count()); - const Dtype expected_bias = noaccum_params[1]->cpu_data()[0]; - const Dtype accum_bias = accum_params[1]->cpu_data()[0]; - const Dtype error_margin = std::max(kMinPrecision, kPrecision * - std::min(fabs(expected_bias), fabs(accum_bias))); - EXPECT_NEAR(expected_bias, accum_bias, error_margin); - } - - // Test that the correct update is computed for a regularized least squares - // problem: - // - // E = (1/(2n)) || X w - y ||^2 + (lambda / 2) || w ||^2 - // \nabla_w E = (1/n) (X^T X w - X^T y) + lambda * w - // - // X \in R^{n x (d+1)} (each example is a row, (d+1)th element is always 1) - // w \in R^{(d+1) x 1} ((d+1)th element is the bias) - // y \in R^{n x 1} - // lambda is weight_decay - // - // TestLeastSquaresUpdate works "inductively", assuming that the solver - // correctly updates the net K (= iter_to_check) times, then given the history - // from the Kth update, we compute the (K+1)th update and check that it - // matches the solver's (K+1)th update. - void TestLeastSquaresUpdate(const Dtype learning_rate = 1.0, - const Dtype weight_decay = 0.0, const Dtype momentum = 0.0, - const int iter_to_check = 0) { - // Initialize the solver and run K (= iter_to_check) solver iterations. - RunLeastSquaresSolver(learning_rate, weight_decay, momentum, iter_to_check); - - // Compute the (K+1)th update using the analytic least squares gradient. - vector > > updated_params; - ComputeLeastSquaresUpdate(learning_rate, weight_decay, momentum, - &updated_params); - - // Reinitialize the solver and run K+1 solver iterations. - RunLeastSquaresSolver(learning_rate, weight_decay, momentum, - iter_to_check + 1); - - // Check that the solver's solution matches ours. - CheckLeastSquaresUpdate(updated_params); - } -}; - - -template -class SGDSolverTest : public GradientBasedSolverTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - virtual void InitSolver(const SolverParameter& param) { - this->solver_.reset(new SGDSolver(param)); - } - - virtual SolverParameter_SolverType solver_type() { - return SolverParameter_SolverType_SGD; - } -}; - -TYPED_TEST_CASE(SGDSolverTest, TestDtypesAndDevices); - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdate) { - this->TestLeastSquaresUpdate(); -} - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateLROneTenth) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - this->TestLeastSquaresUpdate(kLearningRate); -} - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithWeightDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 1.0; - const Dtype kWeightDecay = 0.5; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); -} - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 1.0; - const Dtype kWeightDecay = 0.0; - const Dtype kMomentum = 0.5; - const int kNumIters = 1; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 1.0; - const Dtype kWeightDecay = 0.0; - const Dtype kMomentum = 0.5; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverything) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.9; -======= - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.5; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.5; ->>>>>>> triplet data generation and network update - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - const Dtype kWeightDecay = 0.1; -======= - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} -======= ->>>>>>> triplet data generation and network update - -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; ->>>>>>> triplet data generation and network update - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -TYPED_TEST(SGDSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(SGDSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -======= -TYPED_TEST(SGDSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} ->>>>>>> restore - ->>>>>>> triplet data generation and network update -template -class AdaGradSolverTest : public GradientBasedSolverTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - virtual void InitSolver(const SolverParameter& param) { - this->solver_.reset(new AdaGradSolver(param)); - } - virtual SolverParameter_SolverType solver_type() { - return SolverParameter_SolverType_ADAGRAD; - } -}; - -TYPED_TEST_CASE(AdaGradSolverTest, TestDtypesAndDevices); - -TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdate) { - this->TestLeastSquaresUpdate(); -} - -TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateLROneTenth) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - this->TestLeastSquaresUpdate(kLearningRate); -} - -TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithWeightDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 1.0; - const Dtype kWeightDecay = 0.5; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); -} - -TYPED_TEST(AdaGradSolverTest, TestAdaGradLeastSquaresUpdateWithEverything) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.0; -======= - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -TYPED_TEST(AdaGradSolverTest, - TestAdaGradLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; ->>>>>>> triplet data generation and network update - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.0; -======= - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; -======= -TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.0; ->>>>>>> restore - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a -======= ->>>>>>> triplet data generation and network update - -TYPED_TEST(AdaGradSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; ->>>>>>> triplet data generation and network update - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -TYPED_TEST(AdaGradSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdaGradSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -======= ->>>>>>> restore - ->>>>>>> triplet data generation and network update -template -class NesterovSolverTest : public GradientBasedSolverTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - virtual void InitSolver(const SolverParameter& param) { - this->solver_.reset(new NesterovSolver(param)); - } - virtual SolverParameter_SolverType solver_type() { - return SolverParameter_SolverType_NESTEROV; - } -}; - -TYPED_TEST_CASE(NesterovSolverTest, TestDtypesAndDevices); - -TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdate) { - this->TestLeastSquaresUpdate(); -} - -TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateLROneTenth) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.1; - this->TestLeastSquaresUpdate(kLearningRate); -} - -TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithWeightDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 1.0; - const Dtype kWeightDecay = 0.5; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); -} - -TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithMomentum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 1.0; - const Dtype kWeightDecay = 0.0; - const Dtype kMomentum = 0.5; - const int kNumIters = 1; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithMomentumMultiIter) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 1.0; - const Dtype kWeightDecay = 0.0; - const Dtype kMomentum = 0.5; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(NesterovSolverTest, TestNesterovLeastSquaresUpdateWithEverything) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - const Dtype kWeightDecay = 0.1; -======= - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -TYPED_TEST(NesterovSolverTest, - TestNesterovLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; ->>>>>>> triplet data generation and network update - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.1; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdamSolverTest, TestAdamLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - this->share_ = true; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a -TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; -======= -TYPED_TEST(NesterovSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.1; ->>>>>>> restore - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a -TYPED_TEST(AdamSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - const int kIterSize = 2; - this->share_ = true; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(AdamSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(AdamSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.9; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -template -class RMSPropSolverTest : public GradientBasedSolverTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - virtual void InitSolver(const SolverParameter& param) { - const Dtype rms_decay = 0.95; - SolverParameter new_param = param; - new_param.set_rms_decay(rms_decay); - this->solver_.reset(new RMSPropSolver(new_param)); - } - virtual SolverParameter_SolverType solver_type() { - return SolverParameter_SolverType_RMSPROP; - } -}; - -TYPED_TEST_CASE(RMSPropSolverTest, TestDtypesAndDevices); - -TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithWeightDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 1.0; - const Dtype kWeightDecay = 0.5; - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay); -} - -TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithRmsDecay) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.0; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(RMSPropSolverTest, TestRMSPropLeastSquaresUpdateWithEverything) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(RMSPropSolverTest, - TestRMSPropLeastSquaresUpdateWithEverythingShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - this->share_ = true; - for (int i = 0; i <= kNumIters; ++i) { - this->TestLeastSquaresUpdate(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccum) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -TYPED_TEST(RMSPropSolverTest, TestLeastSquaresUpdateWithEverythingAccumShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0.0; - const int kNumIters = 4; ->>>>>>> triplet data generation and network update - const int kIterSize = 2; - this->CheckAccumulation(kLearningRate, kWeightDecay, kMomentum, kNumIters, - kIterSize); -} - -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -TYPED_TEST(RMSPropSolverTest, TestSnapshot) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -TYPED_TEST(RMSPropSolverTest, TestSnapshotShare) { - typedef typename TypeParam::Dtype Dtype; - const Dtype kLearningRate = 0.01; - const Dtype kWeightDecay = 0.5; - const Dtype kMomentum = 0; - const int kNumIters = 4; - this->share_ = true; - for (int i = 1; i <= kNumIters; ++i) { - this->TestSnapshot(kLearningRate, kWeightDecay, kMomentum, i); - } -} - -======= ->>>>>>> triplet data generation and network update -======= ->>>>>>> restore ->>>>>>> triplet data generation and network update -} // namespace caffe diff --git a/src/caffe/test/test_image_data_layer.cpp b/src/caffe/test/test_image_data_layer.cpp index 481fcef7b27..931a5ebf137 100644 --- a/src/caffe/test/test_image_data_layer.cpp +++ b/src/caffe/test/test_image_data_layer.cpp @@ -1,4 +1,3 @@ -#ifdef USE_OPENCV #include #include #include @@ -178,4 +177,3 @@ TYPED_TEST(ImageDataLayerTest, TestShuffle) { } } // namespace caffe -#endif // USE_OPENCV diff --git a/src/caffe/test/test_io.cpp b/src/caffe/test/test_io.cpp index c2c919e90dc..4ab96311bbc 100644 --- a/src/caffe/test/test_io.cpp +++ b/src/caffe/test/test_io.cpp @@ -1,4 +1,3 @@ -#ifdef USE_OPENCV #include #include #include @@ -421,4 +420,3 @@ TEST_F(IOTest, TestDecodeDatumToCVMatContentNative) { } } // namespace caffe -#endif // USE_OPENCV diff --git a/src/caffe/test/test_layer_factory.cpp b/src/caffe/test/test_layer_factory.cpp index 7d5d39d8b91..c86fafd000c 100644 --- a/src/caffe/test/test_layer_factory.cpp +++ b/src/caffe/test/test_layer_factory.cpp @@ -31,16 +31,12 @@ TYPED_TEST(LayerFactoryTest, TestCreateLayer) { LayerParameter layer_param; // Data layers expect a DB if (iter->first == "Data") { -#ifdef USE_LEVELDB string tmp; MakeTempDir(&tmp); boost::scoped_ptr db(db::GetDB(DataParameter_DB_LEVELDB)); db->Open(tmp, db::NEW); db->Close(); layer_param.mutable_data_param()->set_source(tmp); -#else - continue; -#endif // USE_LEVELDB } layer_param.set_type(iter->first); layer = LayerRegistry::CreateLayer(layer_param); diff --git a/src/caffe/test/test_memory_data_layer.cpp b/src/caffe/test/test_memory_data_layer.cpp index 7269a4d441b..a79033f59f1 100644 --- a/src/caffe/test/test_memory_data_layer.cpp +++ b/src/caffe/test/test_memory_data_layer.cpp @@ -1,6 +1,4 @@ -#ifdef USE_OPENCV #include -#endif // USE_OPENCV #include #include @@ -115,7 +113,6 @@ TYPED_TEST(MemoryDataLayerTest, TestForward) { } } -#ifdef USE_OPENCV TYPED_TEST(MemoryDataLayerTest, AddDatumVectorDefaultTransform) { typedef typename TypeParam::Dtype Dtype; @@ -295,5 +292,5 @@ TYPED_TEST(MemoryDataLayerTest, TestSetBatchSize) { } } } -#endif // USE_OPENCV + } // namespace caffe diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index 31890660636..12998d8912f 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -315,7 +315,7 @@ class NetTest : public MultiDeviceTest { " type: 'InnerProduct' " " inner_product_param { " " num_output: 10 " - " bias_term: " << bias_term << + " bias_term: " << bias_str << " weight_filler { " " type: 'gaussian' " " std: 10 " @@ -341,7 +341,7 @@ class NetTest : public MultiDeviceTest { " type: 'InnerProduct' " " inner_product_param { " " num_output: 10 " - " bias_term: " << bias_term << + " bias_term: " << bias_str << " weight_filler { " " type: 'gaussian' " " std: 10 " @@ -614,24 +614,6 @@ class NetTest : public MultiDeviceTest { InitNetFromProtoString(proto); } -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 011aef0... restore -======= ->>>>>>> 80a07dd... macro define in upgrade_proto -======= -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -======= ->>>>>>> restore ->>>>>>> 0a85215... triplet data generation and network update -======= ->>>>>>> 03cac8c... fixed two bugs with prototext format virtual void InitSkipPropNet(bool test_skip_true) { string proto = "name: 'SkipPropTestNetwork' " @@ -718,88 +700,11 @@ class NetTest : public MultiDeviceTest { " bottom: 'innerproduct' " " bottom: 'label_argmax' "; if (test_skip_true) -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 00341b2... triplet data generation and network update -======= ->>>>>>> 1882ac9... add initiate class name of triplet loss layer -======= ->>>>>>> 08d5d6d... macro define in upgrade_proto -======= ->>>>>>> 1f7ef32... add RGB data training as an option in triplet training -======= -======= ->>>>>>> 8f22aea... add initiate class name of triplet loss layer -======= -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a ->>>>>>> 0a85215... triplet data generation and network update ->>>>>>> 0dbadac... triplet data generation and network update proto += " propagate_down: true " " propagate_down: false "; else proto += " propagate_down: true " " propagate_down: true "; -======= - proto += " propagate_down: [true, false] "; - else - proto += " propagate_down: [true, true] "; -<<<<<<< HEAD ->>>>>>> 011aef0... restore -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 1882ac9... add initiate class name of triplet loss layer -======= -======= ->>>>>>> 03cac8c... fixed two bugs with prototext format - proto += " propagate_down: true " - " propagate_down: false "; - else - proto += " propagate_down: true " - " propagate_down: true "; -<<<<<<< HEAD ->>>>>>> 98fb438... fixed two bugs with prototext format -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 00341b2... triplet data generation and network update -======= ->>>>>>> 1882ac9... add initiate class name of triplet loss layer -======= -======= - proto += " propagate_down: [true, false] "; - else - proto += " propagate_down: [true, true] "; ->>>>>>> 80a07dd... macro define in upgrade_proto -<<<<<<< HEAD ->>>>>>> 08d5d6d... macro define in upgrade_proto -======= -======= - proto += " propagate_down: true " - " propagate_down: false "; - else - proto += " propagate_down: true " - " propagate_down: true "; ->>>>>>> b266250... fixed two bugs with prototext format -<<<<<<< HEAD ->>>>>>> 1f7ef32... add RGB data training as an option in triplet training -======= -======= ->>>>>>> restore ->>>>>>> 0a85215... triplet data generation and network update -<<<<<<< HEAD ->>>>>>> 0dbadac... triplet data generation and network update -======= -======= ->>>>>>> 03cac8c... fixed two bugs with prototext format ->>>>>>> 8f22aea... add initiate class name of triplet loss layer proto += " top: 'cross_entropy_loss' " " type: 'SigmoidCrossEntropyLoss' " @@ -808,27 +713,6 @@ class NetTest : public MultiDeviceTest { InitNetFromProtoString(proto); } -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 083f61b... New triplet loss layer added(beta1 version-no test source files) -======= ->>>>>>> 011aef0... restore -======= ->>>>>>> 4d8130b... New triplet loss layer added(beta1 version-no test source files) -======= ->>>>>>> 80a07dd... macro define in upgrade_proto -======= -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a -======= ->>>>>>> triplet data generation and network update -======= ->>>>>>> restore ->>>>>>> 0a85215... triplet data generation and network update -======= ->>>>>>> 03cac8c... fixed two bugs with prototext format int seed_; shared_ptr > net_; }; @@ -1223,11 +1107,10 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); Blob* ip1_weights = this->net_->layers()[1]->blobs()[0].get(); Blob* ip2_weights = this->net_->layers()[2]->blobs()[0].get(); - // Check that data blobs of shared weights share the same location in memory. + // Check that data and diff blobs of shared weights share the same memory + // locations. EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - // Check that diff blobs of shared weights are at different locations in - // memory. (The diffs should be accumulated at update time.) - EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); + EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); this->net_->Forward(bottom); this->net_->Backward(); // Compute the expected update as the data minus the two diffs. @@ -1240,11 +1123,7 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { // Make sure the diffs are non-trivial. for (int i = 0; i < count; ++i) { EXPECT_NE(0, ip1_weights->cpu_diff()[i]); - EXPECT_NE(0, ip2_weights->cpu_diff()[i]); - EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]); } - caffe_axpy(count, Dtype(1), ip2_weights->cpu_diff(), - shared_params.mutable_cpu_diff()); caffe_axpy(count, Dtype(-1), shared_params.cpu_diff(), shared_params.mutable_cpu_data()); const Dtype* expected_updated_params = shared_params.cpu_data(); @@ -1281,8 +1160,8 @@ TYPED_TEST(NetTest, TestSharedWeightsUpdate) { EXPECT_NE(0, ip1_weights->cpu_diff()[i]); EXPECT_NE(0, ip2_weights->cpu_diff()[i]); EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]); - EXPECT_EQ(ip1_weights->cpu_diff()[i] + ip2_weights->cpu_diff()[i], - shared_params.cpu_diff()[i]); + EXPECT_FLOAT_EQ(ip1_weights->cpu_diff()[i] + ip2_weights->cpu_diff()[i], + shared_params.cpu_diff()[i]); } caffe_axpy(count, Dtype(-1), ip1_weights->cpu_diff(), unshared_params1.mutable_cpu_data()); @@ -1312,11 +1191,10 @@ TYPED_TEST(NetTest, TestSharedWeightsResume) { EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); Blob* ip1_weights = this->net_->layers()[1]->blobs()[0].get(); Blob* ip2_weights = this->net_->layers()[2]->blobs()[0].get(); - // Check that data blobs of shared weights share the same location in memory. + // Check that data and diff blobs of shared weights share the same memory + // locations. EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - // Check that diff blobs of shared weights are at different locations in - // memory. (The diffs should be accumulated at update time.) - EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); + EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); this->net_->ForwardBackward(bottom); this->net_->Update(); Blob shared_params; @@ -1339,14 +1217,13 @@ TYPED_TEST(NetTest, TestSharedWeightsResume) { ASSERT_FALSE(NULL == ip1_weights); ASSERT_FALSE(NULL == ip2_weights); EXPECT_NE(ip1_weights, ip2_weights); - // Check that data blobs of shared weights share the same location in memory. + // Check that data and diff blobs of shared weights share the same memory + // locations. EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); + EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); for (int i = 0; i < count; ++i) { EXPECT_FLOAT_EQ(shared_params.cpu_data()[i], ip1_weights->cpu_data()[i]); } - // Check that diff blobs of shared weights are at different locations in - // memory. (The diffs should be accumulated at update time.) - EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); } TYPED_TEST(NetTest, TestParamPropagateDown) { diff --git a/src/caffe/test/test_net.cpp.orig b/src/caffe/test/test_net.cpp.orig deleted file mode 100644 index 2fcfad6ebd1..00000000000 --- a/src/caffe/test/test_net.cpp.orig +++ /dev/null @@ -1,2449 +0,0 @@ -#include -#include -#include - -#include "google/protobuf/text_format.h" - -#include "gtest/gtest.h" - -#include "caffe/common.hpp" -#include "caffe/filler.hpp" -#include "caffe/net.hpp" -#include "caffe/util/math_functions.hpp" - -#include "caffe/test/test_caffe_main.hpp" -#include "caffe/test/test_gradient_check_util.hpp" - -namespace caffe { - -template -class NetTest : public MultiDeviceTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - NetTest() : seed_(1701) {} - - virtual void InitNetFromProtoString(const string& proto) { - NetParameter param; - CHECK(google::protobuf::TextFormat::ParseFromString(proto, ¶m)); - net_.reset(new Net(param)); - } - - virtual void CopyNetBlobs(const bool copy_diff, - vector > >* blobs_copy) { - CHECK(net_); - const vector > >& net_blobs = net_->blobs(); - blobs_copy->clear(); - blobs_copy->resize(net_blobs.size()); - const bool kReshape = true; - for (int i = 0; i < net_blobs.size(); ++i) { - (*blobs_copy)[i].reset(new Blob()); - (*blobs_copy)[i]->CopyFrom(*net_blobs[i], copy_diff, kReshape); - } - } - - virtual void CopyNetParams(const bool copy_diff, - vector > >* params_copy) { - CHECK(net_); - const vector > >& net_params = net_->params(); - params_copy->clear(); - params_copy->resize(net_params.size()); - const bool kReshape = true; - for (int i = 0; i < net_params.size(); ++i) { - (*params_copy)[i].reset(new Blob()); - (*params_copy)[i]->CopyFrom(*net_params[i], copy_diff, kReshape); - } - } - - virtual void InitTinyNet(const bool force_backward = false, - const bool accuracy_layer = false) { - string proto = - "name: 'TinyTestNetwork' " - "layer { " - " name: 'data' " - " type: 'DummyData' " - " dummy_data_param { " - " shape { " - " dim: 5 " - " dim: 2 " - " dim: 3 " - " dim: 4 " - " } " - " data_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " shape { " - " dim: 5 " - " } " - " data_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerproduct' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 1000 " - " weight_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " bias_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " param { " - " lr_mult: 1 " - " decay_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " decay_mult: 0 " - " } " - " bottom: 'data' " - " top: 'innerproduct' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerproduct' " - " bottom: 'label' " - " top: 'top_loss' " - "} "; - if (accuracy_layer) { - proto += - "layer { " - " name: 'loss' " - " type: 'Accuracy' " - " bottom: 'innerproduct' " - " bottom: 'label' " - " top: 'accuracy' " - "} "; - } - if (force_backward) { - proto += "force_backward: true "; - } - InitNetFromProtoString(proto); - } - - virtual void InitTinyNetEuclidean(const bool force_backward = false) { - string proto = - "name: 'TinyTestEuclidLossNetwork' " - "layer { " - " name: 'data' " - " type: 'DummyData' " - " dummy_data_param { " - " num: 5 " - " channels: 2 " - " height: 3 " - " width: 4 " - " num: 5 " - " channels: 1 " - " height: 1 " - " width: 1 " - " data_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " } " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerproduct' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 1 " - " weight_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " bias_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " param { " - " lr_mult: 1 " - " decay_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " decay_mult: 0 " - " } " - " bottom: 'data' " - " top: 'innerproduct' " - "} " - "layer { " - " name: 'loss' " - " type: 'EuclideanLoss' " - " bottom: 'innerproduct' " - " bottom: 'label' " - "} "; - if (force_backward) { - proto += "force_backward: true "; - } - InitNetFromProtoString(proto); - } - - virtual void InitTrickyNet(Dtype* loss_weight = NULL) { - ostringstream loss_weight_stream; - if (loss_weight) { - loss_weight_stream << " loss_weight: " << *loss_weight << " "; - } - const string& proto = - "name: 'TrickyTestNetwork' " - "layer { " - " name: 'data' " - " type: 'DummyData' " - " dummy_data_param { " - " num: 5 " - " channels: 2 " - " height: 3 " - " width: 4 " - " num: 5 " - " channels: 1 " - " height: 1 " - " width: 1 " - " data_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " } " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerproduct' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 1000 " - " weight_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " bias_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " param { " - " lr_mult: 1 " - " decay_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " decay_mult: 0 " - " } " - " bottom: 'data' " - " top: 'transformed_data' " - "} " - "layer { " - " name: 'innerproduct' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 1 " - " weight_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " bias_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " param { " - " lr_mult: 1 " - " decay_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " decay_mult: 0 " - " } " - " bottom: 'label' " - " top: 'transformed_label' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " + - loss_weight_stream.str() + - " bottom: 'transformed_data' " - " bottom: 'transformed_label' " - "} "; - InitNetFromProtoString(proto); - } - - // loss_weight is the loss weight for the 'EuclideanLoss' layer output. - // midnet_loss_weight is the loss weight for the first 'InnerProduct' layer - // output. Should both default to 0.0 if unspecified (i.e., if NULL is - // passed to this function). - virtual void InitUnsharedWeightsNet(const Dtype* loss_weight = NULL, - const Dtype* midnet_loss_weight = NULL, - const bool force_backward = false, const bool bias_term = false, - const Dtype blobs_lr_w1 = 1, const Dtype blobs_lr_b1 = 2, - const Dtype blobs_lr_w2 = 1, const Dtype blobs_lr_b2 = 2) { - string bias_str = bias_term ? "true ":"false "; - ostringstream proto; - proto << "name: 'UnsharedWeightsNetwork' "; - if (force_backward) { - proto << "force_backward: true "; - } - proto << - "layer { " - " name: 'data' " - " type: 'DummyData' " - " dummy_data_param { " - " num: 5 " - " channels: 2 " - " height: 3 " - " width: 4 " - " data_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " } " - " top: 'data' " - "} " - "layer { " - " name: 'innerproduct1' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 10 " - " bias_term: " << bias_str << - " weight_filler { " - " type: 'gaussian' " - " std: 10 " - " } " - " } " - " param { " - " name: 'unsharedweights1' " - " lr_mult: " << blobs_lr_w1 << - " } "; - if (bias_term) { - proto << " param { lr_mult: " << blobs_lr_b1 << " } "; - } - proto << - " bottom: 'data' " - " top: 'innerproduct1' "; - if (midnet_loss_weight) { - proto << " loss_weight: " << *midnet_loss_weight << " "; - } - proto << - "} " - "layer { " - " name: 'innerproduct2' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 10 " - " bias_term: " << bias_str << - " weight_filler { " - " type: 'gaussian' " - " std: 10 " - " } " - " } " - " param { " - " name: 'unsharedweights2' " - " lr_mult: " << blobs_lr_w2 << - " } "; - if (bias_term) { - proto << " param { lr_mult: " << blobs_lr_b2 << " } "; - } - proto << - " bottom: 'data' " - " top: 'innerproduct2' " - "} " - "layer { " - " name: 'loss' " - " type: 'EuclideanLoss' "; - if (loss_weight) { - proto << " loss_weight: " << *loss_weight << " "; - } - proto << - " bottom: 'innerproduct1' " - " bottom: 'innerproduct2' " - "} "; - InitNetFromProtoString(proto.str()); - } - - virtual void InitSharedWeightsNet() { - const string& proto = - "name: 'SharedWeightsNetwork' " - "layer { " - " name: 'data' " - " type: 'DummyData' " - " dummy_data_param { " - " num: 5 " - " channels: 2 " - " height: 3 " - " width: 4 " - " data_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " } " - " top: 'data' " - "} " - "layer { " - " name: 'innerproduct1' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 10 " - " bias_term: false " - " weight_filler { " - " type: 'gaussian' " - " std: 10 " - " } " - " } " - " param { name: 'sharedweights' } " - " bottom: 'data' " - " top: 'innerproduct1' " - "} " - "layer { " - " name: 'innerproduct2' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 10 " - " bias_term: false " - " weight_filler { " - " type: 'gaussian' " - " std: 10 " - " } " - " } " - " param { name: 'sharedweights' } " - " bottom: 'data' " - " top: 'innerproduct2' " - "} " - "layer { " - " name: 'loss' " - " type: 'EuclideanLoss' " - " bottom: 'innerproduct1' " - " bottom: 'innerproduct2' " - "} "; - InitNetFromProtoString(proto); - } - - virtual void InitDiffDataUnsharedWeightsNet() { - const string& proto = - "name: 'DiffDataUnsharedWeightsNetwork' " - "layer { " - " name: 'data' " - " type: 'DummyData' " - " dummy_data_param { " - " num: 10 " - " channels: 10 " - " height: 1 " - " width: 1 " - " num: 10 " - " channels: 10 " - " height: 1 " - " width: 1 " - " data_filler { " - " type: 'gaussian' " - " std: 10 " - " } " - " } " - " top: 'data1' " - " top: 'data2' " - "} " - "layer { " - " name: 'innerproduct1' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 10 " - " bias_term: false " - " weight_filler { " - " type: 'constant' " - " value: 0.5 " - " } " - " } " - " param { name: 'unsharedweights1' } " - " bottom: 'data1' " - " top: 'innerproduct1' " - "} " - "layer { " - " name: 'innerproduct2' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 10 " - " bias_term: false " - " weight_filler { " - " type: 'constant' " - " value: 0.5 " - " } " - " } " - " param { name: 'unsharedweights2' } " - " bottom: 'innerproduct1' " - " top: 'innerproduct2' " - "} " - "layer { " - " name: 'loss' " - " type: 'EuclideanLoss' " - " bottom: 'data2' " - " bottom: 'innerproduct2' " - "} "; - InitNetFromProtoString(proto); - } - - virtual void InitDiffDataSharedWeightsNet() { - const string& proto = - "name: 'DiffDataSharedWeightsNetwork' " - "layer { " - " name: 'data' " - " type: 'DummyData' " - " dummy_data_param { " - " num: 10 " - " channels: 10 " - " height: 1 " - " width: 1 " - " num: 10 " - " channels: 10 " - " height: 1 " - " width: 1 " - " data_filler { " - " type: 'gaussian' " - " std: 10 " - " } " - " } " - " top: 'data1' " - " top: 'data2' " - "} " - "layer { " - " name: 'innerproduct1' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 10 " - " bias_term: false " - " weight_filler { " - " type: 'constant' " - " value: 0.5 " - " } " - " } " - " param { name: 'sharedweights' } " - " bottom: 'data1' " - " top: 'innerproduct1' " - "} " - "layer { " - " name: 'innerproduct2' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 10 " - " bias_term: false " - " weight_filler { " - " type: 'constant' " - " value: 0.5 " - " } " - " } " - " param { name: 'sharedweights' } " - " bottom: 'innerproduct1' " - " top: 'innerproduct2' " - "} " - "layer { " - " name: 'loss' " - " type: 'EuclideanLoss' " - " bottom: 'data2' " - " bottom: 'innerproduct2' " - "} "; - InitNetFromProtoString(proto); - } - - virtual void InitReshapableNet() { - const string& proto = - "name: 'ReshapableNetwork' " - "input: 'data' " - "input_dim: 1 " - "input_dim: 3 " - "input_dim: 100 " - "input_dim: 100 " - "layer { " - " name: 'conv1' " - " type: 'Convolution' " - " bottom: 'data' " - " top: 'conv1' " - " convolution_param { " - " num_output: 5 " - " kernel_size: 3 " - " stride: 2 " - " weight_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " bias_filler { " - " type: 'constant' " - " value: 0.2 " - " } " - " } " - "} " - "layer { " - " name: 'relu1' " - " type: 'ReLU' " - " bottom: 'conv1' " - " top: 'conv1' " - "} " - "layer { " - " name: 'pool1' " - " type: 'Pooling' " - " bottom: 'conv1' " - " top: 'pool1' " - " pooling_param { " - " pool: MAX " - " kernel_size: 2 " - " stride: 2 " - " } " - "} " - "layer { " - " name: 'norm1' " - " type: 'LRN' " - " bottom: 'pool1' " - " top: 'norm1' " - " lrn_param { " - " local_size: 3 " - " } " - "} " - "layer { " - " name: 'softmax' " - " type: 'Softmax' " - " bottom: 'norm1' " - " top: 'softmax' " - "} "; - InitNetFromProtoString(proto); - } - -<<<<<<< HEAD -======= -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -======= ->>>>>>> restore ->>>>>>> 0dbadac... triplet data generation and network update - virtual void InitSkipPropNet(bool test_skip_true) { - string proto = - "name: 'SkipPropTestNetwork' " - "layer { " - " name: 'data' " - " type: 'DummyData' " - " dummy_data_param { " - " shape { " - " dim: 5 " - " dim: 2 " - " dim: 3 " - " dim: 4 " - " } " - " data_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " shape { " - " dim: 5 " - " } " - " data_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'silence' " - " bottom: 'label' " - " type: 'Silence' " - "} " - "layer { " - " name: 'innerproduct' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 1 " - " weight_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " bias_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " param { " - " lr_mult: 1 " - " decay_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " decay_mult: 0 " - " } " - " bottom: 'data' " - " top: 'innerproduct' " - "} " - "layer { " - " name: 'ip_fake_labels' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 1 " - " weight_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " bias_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " bottom: 'data' " - " top: 'fake_labels' " - "} " - "layer { " - " name: 'argmax' " - " bottom: 'fake_labels' " - " top: 'label_argmax' " - " type: 'ArgMax' " - "} " - "layer { " - " name: 'loss' " - " bottom: 'innerproduct' " - " bottom: 'label_argmax' "; - if (test_skip_true) -<<<<<<< HEAD -<<<<<<< HEAD -======= -======= -<<<<<<< 10b3f6404a1efb4e8f237bf204c88854bea7edb8 ->>>>>>> 8f22aea... add initiate class name of triplet loss layer -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a ->>>>>>> 0dbadac... triplet data generation and network update - proto += " propagate_down: true " - " propagate_down: false "; - else - proto += " propagate_down: true " - " propagate_down: true "; -<<<<<<< HEAD -======= -======= - proto += " propagate_down: [true, false] "; - else - proto += " propagate_down: [true, true] "; ->>>>>>> restore -<<<<<<< HEAD ->>>>>>> 0dbadac... triplet data generation and network update -======= -======= - proto += " propagate_down: true " - " propagate_down: false "; - else - proto += " propagate_down: true " - " propagate_down: true "; ->>>>>>> fixed two bugs with prototext format ->>>>>>> 8f22aea... add initiate class name of triplet loss layer - proto += - " top: 'cross_entropy_loss' " - " type: 'SigmoidCrossEntropyLoss' " - " loss_weight: 0.1 " - "} "; - InitNetFromProtoString(proto); - } - -<<<<<<< HEAD -======= -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a -======= ->>>>>>> triplet data generation and network update -======= ->>>>>>> restore ->>>>>>> 0dbadac... triplet data generation and network update - int seed_; - shared_ptr > net_; -}; - -TYPED_TEST_CASE(NetTest, TestDtypesAndDevices); - -TYPED_TEST(NetTest, TestHasBlob) { - this->InitTinyNet(); - EXPECT_TRUE(this->net_->has_blob("data")); - EXPECT_TRUE(this->net_->has_blob("label")); - EXPECT_TRUE(this->net_->has_blob("innerproduct")); - EXPECT_FALSE(this->net_->has_blob("loss")); - EXPECT_TRUE(this->net_->has_blob("top_loss")); -} - -TYPED_TEST(NetTest, TestGetBlob) { - this->InitTinyNet(); - EXPECT_EQ(this->net_->blob_by_name("data"), this->net_->blobs()[0]); - EXPECT_EQ(this->net_->blob_by_name("label"), this->net_->blobs()[1]); - EXPECT_EQ(this->net_->blob_by_name("innerproduct"), this->net_->blobs()[2]); - EXPECT_FALSE(this->net_->blob_by_name("loss")); - EXPECT_EQ(this->net_->blob_by_name("top_loss"), this->net_->blobs()[3]); -} - -TYPED_TEST(NetTest, TestHasLayer) { - this->InitTinyNet(); - EXPECT_TRUE(this->net_->has_layer("data")); - EXPECT_TRUE(this->net_->has_layer("innerproduct")); - EXPECT_TRUE(this->net_->has_layer("loss")); - EXPECT_FALSE(this->net_->has_layer("label")); -} - -TYPED_TEST(NetTest, TestGetLayerByName) { - this->InitTinyNet(); - EXPECT_EQ(this->net_->layer_by_name("data"), this->net_->layers()[0]); - EXPECT_EQ(this->net_->layer_by_name("innerproduct"), this->net_->layers()[1]); - EXPECT_EQ(this->net_->layer_by_name("loss"), this->net_->layers()[2]); - EXPECT_FALSE(this->net_->layer_by_name("label")); -} - -TYPED_TEST(NetTest, TestBottomNeedBackward) { - this->InitTinyNet(); - const vector >& bottom_need_backward = - this->net_->bottom_need_backward(); - EXPECT_EQ(3, bottom_need_backward.size()); - EXPECT_EQ(0, bottom_need_backward[0].size()); - EXPECT_EQ(1, bottom_need_backward[1].size()); - EXPECT_EQ(false, bottom_need_backward[1][0]); - EXPECT_EQ(2, bottom_need_backward[2].size()); - EXPECT_EQ(true, bottom_need_backward[2][0]); - EXPECT_EQ(false, bottom_need_backward[2][1]); -} - -TYPED_TEST(NetTest, TestBottomNeedBackwardForce) { - const bool force_backward = true; - this->InitTinyNet(force_backward); - const vector >& bottom_need_backward = - this->net_->bottom_need_backward(); - EXPECT_EQ(3, bottom_need_backward.size()); - EXPECT_EQ(0, bottom_need_backward[0].size()); - EXPECT_EQ(1, bottom_need_backward[1].size()); - EXPECT_EQ(true, bottom_need_backward[1][0]); - EXPECT_EQ(2, bottom_need_backward[2].size()); - EXPECT_EQ(true, bottom_need_backward[2][0]); - EXPECT_EQ(false, bottom_need_backward[2][1]); -} - -TYPED_TEST(NetTest, TestBottomNeedBackwardEuclideanForce) { - const bool force_backward = true; - this->InitTinyNetEuclidean(force_backward); - const vector >& bottom_need_backward = - this->net_->bottom_need_backward(); - EXPECT_EQ(3, bottom_need_backward.size()); - EXPECT_EQ(0, bottom_need_backward[0].size()); - EXPECT_EQ(1, bottom_need_backward[1].size()); - EXPECT_EQ(true, bottom_need_backward[1][0]); - EXPECT_EQ(2, bottom_need_backward[2].size()); - EXPECT_EQ(true, bottom_need_backward[2][0]); - EXPECT_EQ(true, bottom_need_backward[2][1]); -} - -TYPED_TEST(NetTest, TestBottomNeedBackwardTricky) { - this->InitTrickyNet(); - const vector >& bottom_need_backward = - this->net_->bottom_need_backward(); - EXPECT_EQ(4, bottom_need_backward.size()); - EXPECT_EQ(0, bottom_need_backward[0].size()); - EXPECT_EQ(1, bottom_need_backward[1].size()); - EXPECT_EQ(false, bottom_need_backward[1][0]); - EXPECT_EQ(1, bottom_need_backward[2].size()); - EXPECT_EQ(false, bottom_need_backward[2][0]); - EXPECT_EQ(2, bottom_need_backward[3].size()); - EXPECT_EQ(true, bottom_need_backward[3][0]); - // The label input to the SoftmaxLossLayer should say it "needs backward" - // since it has weights under it, even though we expect this to cause a crash - // at training/test time. - EXPECT_EQ(true, bottom_need_backward[3][1]); -} - -TYPED_TEST(NetTest, TestLossWeight) { - typedef typename TypeParam::Dtype Dtype; - // First, compute the loss and gradients with no loss_weight specified. - // In this case, the loss weight for the 'EuclideanLoss' layer should default - // to 1. - vector*> bottom; - Caffe::set_random_seed(this->seed_); - const bool kForceBackward = true; - this->InitUnsharedWeightsNet(NULL, NULL, kForceBackward); - const Dtype loss = this->net_->ForwardBackward(bottom); - const bool kCopyDiff = true; - vector > > blob_grads; - this->CopyNetBlobs(kCopyDiff, &blob_grads); - vector > > param_grads; - this->CopyNetParams(kCopyDiff, ¶m_grads); - // Check that the loss is non-trivial, otherwise the test doesn't prove much. - const Dtype kMinLossAbsValue = 1e-2; - ASSERT_GE(fabs(loss), kMinLossAbsValue); - const Dtype kErrorMargin = 1e-4; - const int kNumLossWeights = 6; - Dtype kLossWeights[kNumLossWeights] = {2, 0, 1, -1, -2.5, 3.7}; - for (int i = 0; i < kNumLossWeights; ++i) { - Caffe::set_random_seed(this->seed_); - this->InitUnsharedWeightsNet(&kLossWeights[i], NULL, kForceBackward); - const Dtype weighted_loss = this->net_->ForwardBackward(bottom); - const Dtype error_margin = kErrorMargin * fabs(kLossWeights[i]); - EXPECT_NEAR(loss * kLossWeights[i], weighted_loss, error_margin) - << "loss weight = " << kLossWeights[i]; - const vector > >& weighted_blobs = - this->net_->blobs(); - ASSERT_EQ(blob_grads.size(), weighted_blobs.size()); - for (int j = 0; j < blob_grads.size(); ++j) { - ASSERT_EQ(blob_grads[j]->count(), weighted_blobs[j]->count()); - for (int k = 0; k < blob_grads[j]->count(); ++k) { - EXPECT_NEAR(blob_grads[j]->cpu_diff()[k] * kLossWeights[i], - weighted_blobs[j]->cpu_diff()[k], error_margin); - } - } - const vector > >& weighted_params = - this->net_->params(); - ASSERT_EQ(param_grads.size(), weighted_params.size()); - for (int j = 0; j < param_grads.size(); ++j) { - ASSERT_EQ(param_grads[j]->count(), weighted_params[j]->count()); - for (int k = 0; k < param_grads[j]->count(); ++k) { - EXPECT_NEAR(param_grads[j]->cpu_diff()[k] * kLossWeights[i], - weighted_params[j]->cpu_diff()[k], error_margin); - } - } - } -} - -TYPED_TEST(NetTest, TestLossWeightMidNet) { - typedef typename TypeParam::Dtype Dtype; - vector*> bottom; - Caffe::set_random_seed(this->seed_); - const bool kForceBackward = true; - Dtype loss_weight = 0; - Dtype midnet_loss_weight = 1; - this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, - kForceBackward); - const Dtype loss = this->net_->ForwardBackward(bottom); - const bool kCopyDiff = true; - const bool kReshape = true; - Blob data_grad; - data_grad.CopyFrom(*this->net_->blob_by_name("data"), kCopyDiff, kReshape); - // Check that the loss is non-trivial, otherwise the test doesn't prove much. - const Dtype kMinLossAbsValue = 1e-2; - ASSERT_GE(fabs(loss), kMinLossAbsValue); - const Dtype kErrorMargin = 1e-4; - const int kNumLossWeights = 6; - Dtype kLossWeights[kNumLossWeights] = {2, 0, 1, -1, -2.5, 3.7}; - for (int i = 0; i < kNumLossWeights; ++i) { - Caffe::set_random_seed(this->seed_); - this->InitUnsharedWeightsNet(&loss_weight, &kLossWeights[i], - kForceBackward); - const Dtype weighted_loss = this->net_->ForwardBackward(bottom); - const Dtype error_margin = kErrorMargin * fabs(kLossWeights[i]); - EXPECT_NEAR(loss * kLossWeights[i], weighted_loss, error_margin) - << "loss weight = " << kLossWeights[i]; - const shared_ptr >& weighted_blob = - this->net_->blob_by_name("data"); - ASSERT_EQ(data_grad.count(), weighted_blob->count()); - for (int j = 0; j < data_grad.count(); ++j) { - EXPECT_NEAR(data_grad.cpu_diff()[j] * kLossWeights[i], - weighted_blob->cpu_diff()[j], error_margin); - } - } -} - -TYPED_TEST(NetTest, TestComboLossWeight) { - typedef typename TypeParam::Dtype Dtype; - vector*> bottom; - Dtype loss_weight; - Dtype midnet_loss_weight; - const bool kForceBackward = true; - const Dtype kErrorMargin = 1e-4; - - // Get the loss and gradients with 'EuclideanLoss' weight 1, - // 'InnerProduct' weight 1. - loss_weight = 1; - midnet_loss_weight = 1; - Caffe::set_random_seed(this->seed_); - this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, - kForceBackward); - const Dtype loss = this->net_->ForwardBackward(bottom); - const bool kCopyDiff = true; - vector > > blob_grads; - this->CopyNetBlobs(kCopyDiff, &blob_grads); - vector > > param_grads; - this->CopyNetParams(kCopyDiff, ¶m_grads); - - loss_weight = 2; - midnet_loss_weight = 1; - Caffe::set_random_seed(this->seed_); - this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, - kForceBackward); - const Dtype loss_main_2 = this->net_->ForwardBackward(bottom); - vector > > blob_grads_loss_2; - this->CopyNetBlobs(kCopyDiff, &blob_grads_loss_2); - vector > > param_grads_loss_2; - this->CopyNetParams(kCopyDiff, ¶m_grads_loss_2); - - loss_weight = 3; - midnet_loss_weight = 1; - Caffe::set_random_seed(this->seed_); - this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, - kForceBackward); - const Dtype loss_main_3 = this->net_->ForwardBackward(bottom); - const vector > >& blob_grads_loss_3 = - this->net_->blobs(); - ASSERT_EQ(blob_grads.size(), blob_grads_loss_3.size()); - ASSERT_EQ(blob_grads_loss_2.size(), blob_grads_loss_3.size()); - for (int j = 0; j < blob_grads.size(); ++j) { - const string& blob_name = this->net_->blob_names()[j]; - bool grad_should_change = true; - if (blob_name == "innerproduct1_innerproduct1_0_split_0") { - grad_should_change = false; - } - ASSERT_EQ(blob_grads[j]->count(), blob_grads_loss_3[j]->count()); - ASSERT_EQ(blob_grads_loss_2[j]->count(), blob_grads_loss_3[j]->count()); - for (int k = 0; k < blob_grads[j]->count(); ++k) { - const Dtype grad_diff_2 = blob_grads_loss_2[j]->cpu_diff()[k] - - blob_grads[j]->cpu_diff()[k]; - const Dtype grad_diff_3 = blob_grads_loss_3[j]->cpu_diff()[k] - - blob_grads[j]->cpu_diff()[k]; - if (grad_should_change) { - // Test non-triviality. - const Dtype kMinGradDiffAbsValue = 1e-4; - EXPECT_GT(fabs(grad_diff_2), kMinGradDiffAbsValue) << blob_name; - EXPECT_NEAR(2 * grad_diff_2, grad_diff_3, kErrorMargin) << blob_name; - } else { - EXPECT_EQ(0, grad_diff_2) << blob_name; - EXPECT_EQ(0, grad_diff_3) << blob_name; - } - } - } - - loss_weight = 1; - midnet_loss_weight = 2; - Caffe::set_random_seed(this->seed_); - this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, - kForceBackward); - const Dtype loss_midnet_2 = this->net_->ForwardBackward(bottom); - this->CopyNetBlobs(kCopyDiff, &blob_grads_loss_2); - this->CopyNetParams(kCopyDiff, ¶m_grads_loss_2); - - loss_weight = 1; - midnet_loss_weight = 3; - Caffe::set_random_seed(this->seed_); - this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, - kForceBackward); - const Dtype loss_midnet_3 = this->net_->ForwardBackward(bottom); - const vector > >& blob_grads_midnet_loss_3 = - this->net_->blobs(); - ASSERT_EQ(blob_grads.size(), blob_grads_midnet_loss_3.size()); - ASSERT_EQ(blob_grads_loss_2.size(), blob_grads_midnet_loss_3.size()); - const vector& blob_names = this->net_->blob_names(); - for (int j = 0; j < blob_grads.size(); ++j) { - const string& blob_name = blob_names[j]; - bool grad_should_change = false; - if (blob_name == "innerproduct1" || - blob_name == "innerproduct1_innerproduct1_0_split_0" || - blob_name == "data_data_0_split_0" || blob_name == "data") { - grad_should_change = true; - } - ASSERT_EQ(blob_grads[j]->count(), blob_grads_midnet_loss_3[j]->count()); - ASSERT_EQ(blob_grads[j]->count(), blob_grads_loss_2[j]->count()); - for (int k = 0; k < blob_grads[j]->count(); ++k) { - const Dtype grad_diff_2 = blob_grads_loss_2[j]->cpu_diff()[k] - - blob_grads[j]->cpu_diff()[k]; - const Dtype grad_diff_3 = blob_grads_midnet_loss_3[j]->cpu_diff()[k] - - blob_grads[j]->cpu_diff()[k]; - if (grad_should_change) { - // Test non-triviality. - const Dtype kMinGradDiffAbsValue = 1e-4; - EXPECT_GT(fabs(grad_diff_2), kMinGradDiffAbsValue) << blob_name; - EXPECT_NEAR(2 * grad_diff_2, grad_diff_3, kErrorMargin) << blob_name; - } else { - EXPECT_EQ(0, grad_diff_2) << blob_name; - EXPECT_EQ(0, grad_diff_3) << blob_name; - } - } - } - - const Dtype kMinLossDiffAbsValue = 1e-4; - - Dtype loss_diff_2 = loss_main_2 - loss; - // Test non-triviality. - EXPECT_GT(fabs(loss_diff_2), kMinLossDiffAbsValue); - Dtype loss_diff_3 = loss_main_3 - loss; - EXPECT_NEAR(2 * loss_diff_2, loss_diff_3, kErrorMargin); - - loss_diff_2 = loss_midnet_2 - loss; - // Test non-triviality. - EXPECT_GT(fabs(loss_diff_2), kMinLossDiffAbsValue); - loss_diff_3 = loss_midnet_3 - loss; - EXPECT_NEAR(2 * loss_diff_2, loss_diff_3, kErrorMargin); -} - -TYPED_TEST(NetTest, TestBackwardWithAccuracyLayer) { - typedef typename TypeParam::Dtype Dtype; - const bool kForceBackward = false; - const bool kAccuracyLayer = true; - this->InitTinyNet(kForceBackward, kAccuracyLayer); - EXPECT_TRUE(this->net_->has_blob("accuracy")); - vector*> bottom; - // Test that we can do Backward even though we have an 'Accuracy' layer. - this->net_->ForwardBackward(bottom); -} - -TYPED_TEST(NetTest, TestUnsharedWeightsDataNet) { - typedef typename TypeParam::Dtype Dtype; - this->InitUnsharedWeightsNet(); - vector*> bottom; - Dtype loss; - this->net_->Forward(bottom, &loss); - EXPECT_GT(loss, 0); -} - -TYPED_TEST(NetTest, TestSharedWeightsDataNet) { - typedef typename TypeParam::Dtype Dtype; - this->InitSharedWeightsNet(); - vector*> bottom; - Dtype loss; - this->net_->Forward(bottom, &loss); - EXPECT_FLOAT_EQ(loss, 0); -} - -TYPED_TEST(NetTest, TestUnsharedWeightsDiffNet) { - typedef typename TypeParam::Dtype Dtype; - this->InitUnsharedWeightsNet(); - vector*> bottom; - Net* net = this->net_.get(); - net->Forward(bottom); - net->Backward(); - Layer* ip1_layer = net->layer_by_name("innerproduct1").get(); - Layer* ip2_layer = net->layer_by_name("innerproduct2").get(); - const int count = ip1_layer->blobs()[0]->count(); - const Dtype* grad1 = ip1_layer->blobs()[0]->cpu_diff(); - const Dtype* grad2 = ip2_layer->blobs()[0]->cpu_diff(); - for (int i = 0; i < count; ++i) { - EXPECT_GT(fabs(grad1[i]), 0); - EXPECT_FLOAT_EQ(-1 * grad1[i], grad2[i]); - } -} - -TYPED_TEST(NetTest, TestSharedWeightsDiffNet) { - typedef typename TypeParam::Dtype Dtype; - this->InitSharedWeightsNet(); - vector*> bottom; - Net* net = this->net_.get(); - Dtype loss; - net->Forward(bottom, &loss); - net->Backward(); - EXPECT_FLOAT_EQ(loss, 0); - Layer* ip1_layer = net->layer_by_name("innerproduct1").get(); - Layer* ip2_layer = net->layer_by_name("innerproduct2").get(); - const int count = ip1_layer->blobs()[0]->count(); - const Dtype* grad1 = ip1_layer->blobs()[0]->cpu_diff(); - const Dtype* grad2 = ip2_layer->blobs()[0]->cpu_diff(); - for (int i = 0; i < count; ++i) { - EXPECT_FLOAT_EQ(0, grad1[i]); - EXPECT_FLOAT_EQ(0, grad2[i]); - } -} - -TYPED_TEST(NetTest, TestSharedWeightsUpdate) { - typedef typename TypeParam::Dtype Dtype; - Caffe::set_random_seed(this->seed_); - this->InitDiffDataSharedWeightsNet(); - vector*> bottom; - EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1"); - EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); - Blob* ip1_weights = this->net_->layers()[1]->blobs()[0].get(); - Blob* ip2_weights = this->net_->layers()[2]->blobs()[0].get(); -<<<<<<< HEAD - // Check that data blobs of shared weights share the same location in memory. - EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - // Check that diff blobs of shared weights are at different locations in - // memory. (The diffs should be accumulated at update time.) - EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); -======= - // Check that data and diff blobs of shared weights share the same memory - // locations. - EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); ->>>>>>> 0dbadac... triplet data generation and network update - this->net_->Forward(bottom); - this->net_->Backward(); - // Compute the expected update as the data minus the two diffs. - Blob shared_params; - const bool reshape = true; - const bool copy_diff = false; - shared_params.CopyFrom(*ip1_weights, copy_diff, reshape); - shared_params.CopyFrom(*ip1_weights, !copy_diff, reshape); - const int count = ip1_weights->count(); - // Make sure the diffs are non-trivial. - for (int i = 0; i < count; ++i) { - EXPECT_NE(0, ip1_weights->cpu_diff()[i]); -<<<<<<< HEAD - EXPECT_NE(0, ip2_weights->cpu_diff()[i]); - EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]); - } - caffe_axpy(count, Dtype(1), ip2_weights->cpu_diff(), - shared_params.mutable_cpu_diff()); -======= - } ->>>>>>> 0dbadac... triplet data generation and network update - caffe_axpy(count, Dtype(-1), shared_params.cpu_diff(), - shared_params.mutable_cpu_data()); - const Dtype* expected_updated_params = shared_params.cpu_data(); - this->net_->Update(); - const Dtype* actual_updated_params = ip1_weights->cpu_data(); - for (int i = 0; i < count; ++i) { - EXPECT_EQ(expected_updated_params[i], actual_updated_params[i]); - } - // Check that data blobs of shared weights STILL point to the same memory - // location (because ... who knows). - EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - - Caffe::set_random_seed(this->seed_); - this->InitDiffDataUnsharedWeightsNet(); - EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1"); - EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); - ip1_weights = this->net_->layers()[1]->blobs()[0].get(); - ip2_weights = this->net_->layers()[2]->blobs()[0].get(); - // Check that data and diff blobs of unshared weights are at different - // locations in memory. - EXPECT_NE(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); - this->net_->Forward(bottom); - this->net_->Backward(); - // Compute the expected update. - Blob unshared_params1; - unshared_params1.CopyFrom(*ip1_weights, copy_diff, reshape); - unshared_params1.CopyFrom(*ip1_weights, !copy_diff, reshape); - Blob unshared_params2; - unshared_params2.CopyFrom(*ip2_weights, copy_diff, reshape); - unshared_params2.CopyFrom(*ip2_weights, !copy_diff, reshape); - // Make sure the diffs are non-trivial and sum to the diff in the shared net. - for (int i = 0; i < count; ++i) { - EXPECT_NE(0, ip1_weights->cpu_diff()[i]); - EXPECT_NE(0, ip2_weights->cpu_diff()[i]); - EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]); -<<<<<<< HEAD - EXPECT_EQ(ip1_weights->cpu_diff()[i] + ip2_weights->cpu_diff()[i], - shared_params.cpu_diff()[i]); -======= - EXPECT_FLOAT_EQ(ip1_weights->cpu_diff()[i] + ip2_weights->cpu_diff()[i], - shared_params.cpu_diff()[i]); ->>>>>>> 0dbadac... triplet data generation and network update - } - caffe_axpy(count, Dtype(-1), ip1_weights->cpu_diff(), - unshared_params1.mutable_cpu_data()); - caffe_axpy(count, Dtype(-1), ip2_weights->cpu_diff(), - unshared_params2.mutable_cpu_data()); - const Dtype* expected_updated_params1 = unshared_params1.cpu_data(); - const Dtype* expected_updated_params2 = unshared_params2.cpu_data(); - this->net_->Update(); - const Dtype* actual_updated_params1 = ip1_weights->cpu_data(); - const Dtype* actual_updated_params2 = ip2_weights->cpu_data(); - for (int i = 0; i < count; ++i) { - EXPECT_EQ(expected_updated_params1[i], actual_updated_params1[i]); - EXPECT_EQ(expected_updated_params2[i], actual_updated_params2[i]); - EXPECT_NE(actual_updated_params1[i], actual_updated_params2[i]); - EXPECT_NE(expected_updated_params, expected_updated_params1); - } -} - -TYPED_TEST(NetTest, TestSharedWeightsResume) { - typedef typename TypeParam::Dtype Dtype; - - // Create a net with weight sharing; Update it once. - Caffe::set_random_seed(this->seed_); - this->InitDiffDataSharedWeightsNet(); - vector*> bottom; - EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1"); - EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); - Blob* ip1_weights = this->net_->layers()[1]->blobs()[0].get(); - Blob* ip2_weights = this->net_->layers()[2]->blobs()[0].get(); -<<<<<<< HEAD - // Check that data blobs of shared weights share the same location in memory. - EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - // Check that diff blobs of shared weights are at different locations in - // memory. (The diffs should be accumulated at update time.) - EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); -======= - // Check that data and diff blobs of shared weights share the same memory - // locations. - EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); ->>>>>>> 0dbadac... triplet data generation and network update - this->net_->ForwardBackward(bottom); - this->net_->Update(); - Blob shared_params; - const bool kReshape = true; - const bool kCopyDiff = false; - shared_params.CopyFrom(*ip1_weights, kCopyDiff, kReshape); - const int count = ip1_weights->count(); - - // Write the net to a NetParameter, as in Solver::Snapshot. - NetParameter net_param; - this->net_->ToProto(&net_param); - - // Reinitialize the net and copy parameters from net_param, as in - // Solver::Restore. - Caffe::set_random_seed(this->seed_); - this->InitDiffDataSharedWeightsNet(); - this->net_->CopyTrainedLayersFrom(net_param); - ip1_weights = this->net_->layers()[1]->blobs()[0].get(); - ip2_weights = this->net_->layers()[2]->blobs()[0].get(); - ASSERT_FALSE(NULL == ip1_weights); - ASSERT_FALSE(NULL == ip2_weights); - EXPECT_NE(ip1_weights, ip2_weights); -<<<<<<< HEAD - // Check that data blobs of shared weights share the same location in memory. - EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - for (int i = 0; i < count; ++i) { - EXPECT_FLOAT_EQ(shared_params.cpu_data()[i], ip1_weights->cpu_data()[i]); - } - // Check that diff blobs of shared weights are at different locations in - // memory. (The diffs should be accumulated at update time.) - EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); -======= - // Check that data and diff blobs of shared weights share the same memory - // locations. - EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); - for (int i = 0; i < count; ++i) { - EXPECT_FLOAT_EQ(shared_params.cpu_data()[i], ip1_weights->cpu_data()[i]); - } ->>>>>>> 0dbadac... triplet data generation and network update -} - -TYPED_TEST(NetTest, TestParamPropagateDown) { - typedef typename TypeParam::Dtype Dtype; - vector*> bottom; - const bool kBiasTerm = true, kForceBackward = false; - const Dtype* kLossWeight1 = NULL; - const Dtype* kLossWeight2 = NULL; - - // Run the net with all params learned; check that gradients are non-zero. - Caffe::set_random_seed(this->seed_); - Dtype blobs_lr_w1 = 1, blobs_lr_w2 = 1, blobs_lr_b1 = 2, blobs_lr_b2 = 2; - this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, - kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); - this->net_->Forward(bottom); - this->net_->Backward(); - const vector > >& params = this->net_->params(); - const int num_params = params.size(); - ASSERT_EQ(4, num_params); - const Dtype kNonZeroTestMin = 1e-3; - vector param_asums(params.size()); - for (int i = 0; i < num_params; ++i) { - const Dtype param_asum = - caffe_cpu_asum(params[i]->count(), params[i]->cpu_diff()); - param_asums[i] = param_asum; - EXPECT_GT(param_asum, kNonZeroTestMin); - } - - // Change the learning rates to different non-zero values; should see same - // gradients. - Caffe::set_random_seed(this->seed_); - blobs_lr_w1 *= 2, blobs_lr_w2 *= 2, blobs_lr_b1 *= 2, blobs_lr_b2 *= 2; - this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, - kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); - this->net_->Forward(bottom); - this->net_->Backward(); - const vector > >& params2 = this->net_->params(); - ASSERT_EQ(num_params, params2.size()); - for (int i = 0; i < num_params; ++i) { - const Dtype param_asum = - caffe_cpu_asum(params2[i]->count(), params2[i]->cpu_diff()); - EXPECT_FLOAT_EQ(param_asum, param_asums[i]); - } - - // Change a subset of the learning rates to zero; check that we see zero - // gradients for those. - Caffe::set_random_seed(this->seed_); - blobs_lr_w1 = 1, blobs_lr_w2 = 0, blobs_lr_b1 = 0, blobs_lr_b2 = 1; - this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, - kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); - this->net_->Forward(bottom); - this->net_->Backward(); - const vector > >& params3 = this->net_->params(); - ASSERT_EQ(num_params, params3.size()); - for (int i = 0; i < num_params; ++i) { - const Dtype param_asum = - caffe_cpu_asum(params3[i]->count(), params3[i]->cpu_diff()); - if (i == 1 || i == 2) { - EXPECT_FLOAT_EQ(0, param_asum); - } else { - EXPECT_FLOAT_EQ(param_asum, param_asums[i]); - } - } - - // Change the opposite subset of the learning rates to zero. - Caffe::set_random_seed(this->seed_); - blobs_lr_w1 = 0, blobs_lr_w2 = 1, blobs_lr_b1 = 1, blobs_lr_b2 = 0; - this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, - kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); - this->net_->Forward(bottom); - this->net_->Backward(); - const vector > >& params4 = this->net_->params(); - ASSERT_EQ(num_params, params4.size()); - for (int i = 0; i < num_params; ++i) { - const Dtype param_asum = - caffe_cpu_asum(params4[i]->count(), params4[i]->cpu_diff()); - if (i == 0 || i == 3) { - EXPECT_FLOAT_EQ(0, param_asum); - } else { - EXPECT_FLOAT_EQ(param_asum, param_asums[i]); - } - } -} - -TYPED_TEST(NetTest, TestFromTo) { - typedef typename TypeParam::Dtype Dtype; - this->InitTinyNet(); - - // Run Forward and Backward, recording the data diff and loss. - Blob data; - data.ReshapeLike(*this->net_->blob_by_name("data")); - this->net_->ForwardPrefilled(); - this->net_->Backward(); - data.CopyFrom(*this->net_->blob_by_name("data"), true, true); - const Dtype *loss_ptr = this->net_->output_blobs()[0]->cpu_data(); - Dtype loss = *loss_ptr; - - // Check that combining partial Forwards gives the same loss. - for (int i = 1; i < this->net_->layers().size(); ++i) { - // Note that we skip layer zero to keep the same data. - this->net_->ForwardFromTo(1, 1); - if (i < this->net_->layers().size() - 1) { - this->net_->ForwardFrom(i + 1); - } - EXPECT_EQ(loss, *loss_ptr); - } - - // Check that combining partial Backwards gives the same data diff. - for (int i = 1; i < this->net_->layers().size(); ++i) { - this->net_->BackwardTo(i); - this->net_->BackwardFrom(i - 1); - for (int j = 0; j < data.count(); ++j) { - EXPECT_EQ(data.cpu_diff()[j], - this->net_->blob_by_name("data")->cpu_diff()[j]); - } - } -} - -class FilterNetTest : public ::testing::Test { - protected: - void RunFilterNetTest( - const string& input_param_string, const string& filtered_param_string) { - NetParameter input_param; - CHECK(google::protobuf::TextFormat::ParseFromString( - input_param_string, &input_param)); - NetParameter expected_filtered_param; - CHECK(google::protobuf::TextFormat::ParseFromString( - filtered_param_string, &expected_filtered_param)); - NetParameter actual_filtered_param; - Net::FilterNet(input_param, &actual_filtered_param); - EXPECT_EQ(expected_filtered_param.DebugString(), - actual_filtered_param.DebugString()); - // Also test idempotence. - NetParameter double_filtered_param; - Net::FilterNet(actual_filtered_param, &double_filtered_param); - EXPECT_EQ(actual_filtered_param.DebugString(), - double_filtered_param.DebugString()); - } -}; - -TEST_F(FilterNetTest, TestNoFilter) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, input_proto); -} - -TEST_F(FilterNetTest, TestFilterLeNetTrainTest) { - const string& input_proto = - "name: 'LeNet' " - "layer { " - " name: 'mnist' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - " data_param { " - " source: 'mnist-train-leveldb' " - " batch_size: 64 " - " } " - " transform_param { " - " scale: 0.00390625 " - " } " - " include: { phase: TRAIN } " - "} " - "layer { " - " name: 'mnist' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - " data_param { " - " source: 'mnist-test-leveldb' " - " batch_size: 100 " - " } " - " transform_param { " - " scale: 0.00390625 " - " } " - " include: { phase: TEST } " - "} " - "layer { " - " name: 'conv1' " - " type: 'Convolution' " - " bottom: 'data' " - " top: 'conv1' " - " param { " - " lr_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " } " - " convolution_param { " - " num_output: 20 " - " kernel_size: 5 " - " stride: 1 " - " weight_filler { " - " type: 'xavier' " - " } " - " bias_filler { " - " type: 'constant' " - " } " - " } " - "} " - "layer { " - " name: 'ip1' " - " type: 'InnerProduct' " - " bottom: 'conv1' " - " top: 'ip1' " - " param { " - " lr_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " } " - " inner_product_param { " - " num_output: 10 " - " weight_filler { " - " type: 'xavier' " - " } " - " bias_filler { " - " type: 'constant' " - " } " - " } " - "} " - "layer { " - " name: 'accuracy' " - " type: 'Accuracy' " - " bottom: 'ip1' " - " bottom: 'label' " - " top: 'accuracy' " - " include: { phase: TEST } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'ip2' " - " bottom: 'label' " - " top: 'loss' " - "} "; - const string input_proto_train = "state: { phase: TRAIN } " + input_proto; - const string input_proto_test = "state: { phase: TEST } " + input_proto; - const string output_proto_train = - "name: 'LeNet' " - "layer { " - " name: 'mnist' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - " data_param { " - " source: 'mnist-train-leveldb' " - " batch_size: 64 " - " } " - " transform_param { " - " scale: 0.00390625 " - " } " - " include: { phase: TRAIN } " - "} " - "layer { " - " name: 'conv1' " - " type: 'Convolution' " - " bottom: 'data' " - " top: 'conv1' " - " param { " - " lr_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " } " - " convolution_param { " - " num_output: 20 " - " kernel_size: 5 " - " stride: 1 " - " weight_filler { " - " type: 'xavier' " - " } " - " bias_filler { " - " type: 'constant' " - " } " - " } " - "} " - "layer { " - " name: 'ip1' " - " type: 'InnerProduct' " - " bottom: 'conv1' " - " top: 'ip1' " - " param { " - " lr_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " } " - " inner_product_param { " - " num_output: 10 " - " weight_filler { " - " type: 'xavier' " - " } " - " bias_filler { " - " type: 'constant' " - " } " - " } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'ip2' " - " bottom: 'label' " - " top: 'loss' " - "} "; - const string& output_proto_test = - "name: 'LeNet' " - "layer { " - " name: 'mnist' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - " data_param { " - " source: 'mnist-test-leveldb' " - " batch_size: 100 " - " } " - " transform_param { " - " scale: 0.00390625 " - " } " - " include: { phase: TEST } " - "} " - "layer { " - " name: 'conv1' " - " type: 'Convolution' " - " bottom: 'data' " - " top: 'conv1' " - " param { " - " lr_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " } " - " convolution_param { " - " num_output: 20 " - " kernel_size: 5 " - " stride: 1 " - " weight_filler { " - " type: 'xavier' " - " } " - " bias_filler { " - " type: 'constant' " - " } " - " } " - "} " - "layer { " - " name: 'ip1' " - " type: 'InnerProduct' " - " bottom: 'conv1' " - " top: 'ip1' " - " param { " - " lr_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " } " - " inner_product_param { " - " num_output: 10 " - " weight_filler { " - " type: 'xavier' " - " } " - " bias_filler { " - " type: 'constant' " - " } " - " } " - "} " - "layer { " - " name: 'accuracy' " - " type: 'Accuracy' " - " bottom: 'ip1' " - " bottom: 'label' " - " top: 'accuracy' " - " include: { phase: TEST } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'ip2' " - " bottom: 'label' " - " top: 'loss' " - "} "; - const string output_proto_train_explicit = - output_proto_train + " state: { phase: TRAIN } "; - const string output_proto_test_explicit = - output_proto_test + " state: { phase: TEST } "; - this->RunFilterNetTest(input_proto_train, output_proto_train_explicit); - this->RunFilterNetTest(input_proto_test, output_proto_test_explicit); -} - -TEST_F(FilterNetTest, TestFilterOutByStage) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - " include: { stage: 'mystage' } " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - const string& output_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, output_proto); -} - -TEST_F(FilterNetTest, TestFilterOutByStage2) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { stage: 'mystage' } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - const string& output_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, output_proto); -} - -TEST_F(FilterNetTest, TestFilterInByStage) { - const string& input_proto = - "state: { stage: 'mystage' } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { stage: 'mystage' } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, input_proto); -} - -TEST_F(FilterNetTest, TestFilterInByStage2) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " exclude: { stage: 'mystage' } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, input_proto); -} - -TEST_F(FilterNetTest, TestFilterOutByMultipleStage) { - const string& input_proto = - "state: { stage: 'mystage' } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { stage: 'mystage' stage: 'myotherstage' } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " include: { stage: 'mystage' } " - "} "; - const string& output_proto = - "state: { stage: 'mystage' } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " include: { stage: 'mystage' } " - "} "; - this->RunFilterNetTest(input_proto, output_proto); -} - -TEST_F(FilterNetTest, TestFilterInByMultipleStage) { - const string& input_proto = - "state: { stage: 'mystage' } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { stage: 'myotherstage' } " - " include: { stage: 'mystage' } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " include: { stage: 'mystage' } " - "} "; - this->RunFilterNetTest(input_proto, input_proto); -} - -TEST_F(FilterNetTest, TestFilterInByMultipleStage2) { - const string& input_proto = - "state: { stage: 'mystage' stage: 'myotherstage' } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { stage: 'mystage' stage: 'myotherstage' } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " include: { stage: 'mystage' } " - "} "; - this->RunFilterNetTest(input_proto, input_proto); -} - -TEST_F(FilterNetTest, TestFilterInByNotStage) { - const string& input_proto = - "state: { stage: 'mystage' } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { not_stage: 'myotherstage' } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " include: { not_stage: 'myotherstage' } " - "} "; - this->RunFilterNetTest(input_proto, input_proto); -} - -TEST_F(FilterNetTest, TestFilterOutByNotStage) { - const string& input_proto = - "state: { stage: 'mystage' } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { not_stage: 'mystage' } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " include: { not_stage: 'mystage' } " - "} "; - const string& output_proto = - "state: { stage: 'mystage' } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} "; - this->RunFilterNetTest(input_proto, output_proto); -} - -TEST_F(FilterNetTest, TestFilterOutByMinLevel) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { min_level: 3 } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - const string& output_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, output_proto); -} - -TEST_F(FilterNetTest, TestFilterOutByMaxLevel) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { max_level: -3 } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - const string& output_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, output_proto); -} - -TEST_F(FilterNetTest, TestFilterInByMinLevel) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { min_level: 0 } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, input_proto); -} - -TEST_F(FilterNetTest, TestFilterInByMinLevel2) { - const string& input_proto = - "state: { level: 7 } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { min_level: 3 } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, input_proto); -} - -TEST_F(FilterNetTest, TestFilterInByMaxLevel) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { max_level: 0 } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, input_proto); -} - -TEST_F(FilterNetTest, TestFilterInByMaxLevel2) { - const string& input_proto = - "state: { level: -7 } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { max_level: -3 } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, input_proto); -} - -TEST_F(FilterNetTest, TestFilterInOutByIncludeMultiRule) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { min_level: 2 phase: TRAIN } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " include: { min_level: 2 phase: TEST } " - "} "; - const string& input_proto_train = - "state: { level: 4 phase: TRAIN } " + input_proto; - const string& input_proto_test = - "state: { level: 4 phase: TEST } " + input_proto; - const string& output_proto_train = - "state: { level: 4 phase: TRAIN } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { min_level: 2 phase: TRAIN } " - "} "; - const string& output_proto_test = - "state: { level: 4 phase: TEST } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " include: { min_level: 2 phase: TEST } " - "} "; - this->RunFilterNetTest(input_proto_train, output_proto_train); - this->RunFilterNetTest(input_proto_test, output_proto_test); -} - -TEST_F(FilterNetTest, TestFilterInByIncludeMultiRule) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { min_level: 2 phase: TRAIN } " - " include: { phase: TEST } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " include: { min_level: 2 phase: TEST } " - " include: { phase: TRAIN } " - "} "; - const string& input_proto_train = - "state: { level: 2 phase: TRAIN } " + input_proto; - const string& input_proto_test = - "state: { level: 2 phase: TEST } " + input_proto; - this->RunFilterNetTest(input_proto_train, input_proto_train); - this->RunFilterNetTest(input_proto_test, input_proto_test); -} - -TEST_F(FilterNetTest, TestFilterInOutByExcludeMultiRule) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " exclude: { min_level: 2 phase: TRAIN } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " exclude: { min_level: 2 phase: TEST } " - "} "; - const string& input_proto_train = - "state: { level: 4 phase: TRAIN } " + input_proto; - const string& input_proto_test = - "state: { level: 4 phase: TEST } " + input_proto; - const string& output_proto_train = - "state: { level: 4 phase: TRAIN } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " exclude: { min_level: 2 phase: TEST } " - "} "; - const string& output_proto_test = - "state: { level: 4 phase: TEST } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " exclude: { min_level: 2 phase: TRAIN } " - "} "; - this->RunFilterNetTest(input_proto_train, output_proto_train); - this->RunFilterNetTest(input_proto_test, output_proto_test); -} - -TYPED_TEST(NetTest, TestReshape) { - typedef typename TypeParam::Dtype Dtype; - // We set up bottom blobs of two different sizes, switch between - // them, and check that forward and backward both run and the results - // are the same. - Caffe::set_random_seed(this->seed_); - Caffe::set_mode(Caffe::CPU); - FillerParameter filler_param; - filler_param.set_std(1); - GaussianFiller filler(filler_param); - Blob blob1(4, 3, 9, 11); - Blob blob2(2, 3, 12, 10); - filler.Fill(&blob1); - filler.Fill(&blob2); - - this->InitReshapableNet(); - Blob* input_blob = this->net_->input_blobs()[0]; - Blob* output_blob = this->net_->output_blobs()[0]; - input_blob->Reshape(blob1.num(), blob1.channels(), blob1.height(), - blob1.width()); - caffe_copy(blob1.count(), blob1.cpu_data(), input_blob->mutable_cpu_data()); - this->net_->ForwardPrefilled(); - // call backward just to make sure it runs - this->net_->Backward(); - Blob output1(output_blob->num(), output_blob->channels(), - output_blob->height(), output_blob->width()); - caffe_copy(output1.count(), output_blob->cpu_data(), - output1.mutable_cpu_data()); - - input_blob->Reshape(blob2.num(), blob2.channels(), blob2.height(), - blob2.width()); - caffe_copy(blob2.count(), blob2.cpu_data(), input_blob->mutable_cpu_data()); - this->net_->ForwardPrefilled(); - this->net_->Backward(); - Blob output2(output_blob->num(), output_blob->channels(), - output_blob->height(), output_blob->width()); - caffe_copy(output2.count(), output_blob->cpu_data(), - output2.mutable_cpu_data()); - - input_blob->Reshape(blob1.num(), blob1.channels(), blob1.height(), - blob1.width()); - caffe_copy(blob1.count(), blob1.cpu_data(), input_blob->mutable_cpu_data()); - this->net_->ForwardPrefilled(); - this->net_->Backward(); - for (int i = 0; i < output1.count(); ++i) { - CHECK_EQ(*(output1.cpu_data() + i), *(output_blob->cpu_data() + i)); - } - - input_blob->Reshape(blob2.num(), blob2.channels(), blob2.height(), - blob2.width()); - caffe_copy(blob2.count(), blob2.cpu_data(), input_blob->mutable_cpu_data()); - this->net_->ForwardPrefilled(); - this->net_->Backward(); - for (int i = 0; i < output2.count(); ++i) { - CHECK_EQ(*(output2.cpu_data() + i), *(output_blob->cpu_data() + i)); - } -} - -TYPED_TEST(NetTest, TestSkipPropagateDown) { - // check bottom_need_backward if propagate_down is true - this->InitSkipPropNet(false); - vector vec_layer_need_backward = this->net_->layer_need_backward(); - for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { - string layer_name = this->net_->layer_names()[layer_id]; - if (layer_name == "loss") { - // access to bottom_need_backward coresponding to label's blob - bool need_back = this->net_->bottom_need_backward()[layer_id][1]; - // if propagate_down is true, the loss layer will try to - // backpropagate on labels - EXPECT_TRUE(need_back) << "bottom_need_backward should be True"; - } - // layer_need_backward should be True except for data and silence layers - if (layer_name.find("data") != std::string::npos || - layer_name == "silence") { - EXPECT_FALSE(vec_layer_need_backward[layer_id]) - << "layer_need_backward for " << layer_name << " should be False"; - } else { - EXPECT_TRUE(vec_layer_need_backward[layer_id]) - << "layer_need_backward for " << layer_name << " should be True"; - } - } - // check bottom_need_backward if propagat_down is false - this->InitSkipPropNet(true); - vec_layer_need_backward.clear(); - vec_layer_need_backward = this->net_->layer_need_backward(); - for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { - string layer_name = this->net_->layer_names()[layer_id]; - if (layer_name == "loss") { - // access to bottom_need_backward coresponding to label's blob - bool need_back = this->net_->bottom_need_backward()[layer_id][1]; - // if propagate_down is false, the loss layer will not try to - // backpropagate on labels - EXPECT_FALSE(need_back) << "bottom_need_backward should be False"; - } - // layer_need_backward should be False except for innerproduct and - // loss layers - if (layer_name == "innerproduct" || layer_name == "loss") { - EXPECT_TRUE(vec_layer_need_backward[layer_id]) - << "layer_need_backward for " << layer_name << " should be True"; - } else { - EXPECT_FALSE(vec_layer_need_backward[layer_id]) - << "layer_need_backward for " << layer_name << " should be False"; - } - } -} - -} // namespace caffe diff --git a/src/caffe/test/test_slice_layer.cpp b/src/caffe/test/test_slice_layer.cpp index 2d2d0fdc005..ccd03646d19 100644 --- a/src/caffe/test/test_slice_layer.cpp +++ b/src/caffe/test/test_slice_layer.cpp @@ -88,21 +88,6 @@ TYPED_TEST(SliceLayerTest, TestSetupChannels) { EXPECT_EQ(this->blob_bottom_->width(), this->blob_top_0_->width()); } -TYPED_TEST(SliceLayerTest, TestTrivialSlice) { - // Test the trivial (single output) "slice" operation -- - // should be the identity. - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - SliceLayer layer(layer_param); - this->blob_top_vec_0_.resize(1); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_0_); - ASSERT_EQ(this->blob_bottom_->shape(), this->blob_top_0_->shape()); - for (int i = 0; i < this->blob_bottom_->count(); ++i) { - EXPECT_EQ(this->blob_bottom_->cpu_data()[i], - this->blob_top_0_->cpu_data()[i]); - } -} - TYPED_TEST(SliceLayerTest, TestSliceAcrossNum) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; @@ -176,18 +161,6 @@ TYPED_TEST(SliceLayerTest, TestSliceAcrossChannels) { } } -TYPED_TEST(SliceLayerTest, TestGradientTrivial) { - // Test the trivial (single output) "slice" operation -- - // should be the identity. - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - SliceLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3); - this->blob_top_vec_0_.resize(1); - checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, - this->blob_top_vec_0_); -} - TYPED_TEST(SliceLayerTest, TestGradientAcrossNum) { typedef typename TypeParam::Dtype Dtype; // Gradient checks are slow; reduce blob size. diff --git a/src/caffe/test/test_triplet_loss_layer.orig b/src/caffe/test/test_triplet_loss_layer.orig deleted file mode 100644 index 9eccca9e2e5..00000000000 --- a/src/caffe/test/test_triplet_loss_layer.orig +++ /dev/null @@ -1,230 +0,0 @@ -#include -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -#include "caffe/filler.hpp" -#include "caffe/vision_layers.hpp" - -#include "caffe/test/test_caffe_main.hpp" -#include "caffe/test/test_gradient_check_util.hpp" - -namespace caffe { - -template -class TripletLossLayerTest : public MultiDeviceTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - TripletLossLayerTest() -<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c - : blob_bottom_data_(new Blob(50, 1, 1, 1)), - blob_bottom_y_(new Blob(50, 1, 1, 1)), -======= - : blob_bottom_data_i_(new Blob(512, 2, 1, 1)), - blob_bottom_data_j_(new Blob(512, 2, 1, 1)), - blob_bottom_data_k_(new Blob(512, 2, 1, 1)), - blob_bottom_y_(new Blob(512, 1, 1, 1)), ->>>>>>> suit for opencv3.0.0 - blob_top_loss_(new Blob()) { - // fill the values - FillerParameter filler_param; - filler_param.set_min(-1.0); - filler_param.set_max(1.0); // distances~=1.0 to test both sides of margin - UniformFiller filler(filler_param); -<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c - filler.Fill(this->blob_bottom_data_); - blob_bottom_vec_.push_back(blob_bottom_data_); -======= - filler.Fill(this->blob_bottom_data_i_); - blob_bottom_vec_.push_back(blob_bottom_data_i_); - filler.Fill(this->blob_bottom_data_j_); - blob_bottom_vec_.push_back(blob_bottom_data_j_); - filler.Fill(this->blob_bottom_data_k_); - blob_bottom_vec_.push_back(blob_bottom_data_k_); ->>>>>>> suit for opencv3.0.0 - for (int i = 0; i < blob_bottom_y_->count(); ++i) { - blob_bottom_y_->mutable_cpu_data()[i] = caffe_rng_rand() % 2; // 0 or 1 - } - blob_bottom_vec_.push_back(blob_bottom_y_); - blob_top_vec_.push_back(blob_top_loss_); - } - virtual ~TripletLossLayerTest() { -<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c - delete blob_bottom_data_; -======= - delete blob_bottom_data_i_; - delete blob_bottom_data_j_; - delete blob_bottom_data_k_; ->>>>>>> suit for opencv3.0.0 - delete blob_bottom_y_; - delete blob_top_loss_; - } - -<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c - Blob* const blob_bottom_data_; -======= - Blob* const blob_bottom_data_i_; - Blob* const blob_bottom_data_j_; - Blob* const blob_bottom_data_k_; ->>>>>>> suit for opencv3.0.0 - Blob* const blob_bottom_y_; - Blob* const blob_top_loss_; - vector*> blob_bottom_vec_; - vector*> blob_top_vec_; -}; - -TYPED_TEST_CASE(TripletLossLayerTest, TestDtypesAndDevices); - -TYPED_TEST(TripletLossLayerTest, TestForward) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // manually compute to compare - const Dtype margin = layer_param.triplet_loss_param().margin(); -<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c - const Dtype losstype = 0;//layer_param.triplet_loss_param().losstype(); - const int num_triplets = 3; - const int num_set = this->blob_bottom_data_->num()/(2 + num_triplets); - const int channels = this->blob_bottom_data_->channels(); - Dtype loss(0); - if (losstype == 0) { - for (int i = 0; i < num_set; ++i) { - Dtype dist_par(0); - for (int j = 0; j < channels; ++j) { - Dtype diff_pos = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - - this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+1)*channels+j]; - dist_par = diff_pos*diff_pos; - loss += dist_par; - } - for (int triplet = 0; triplet < num_triplets; ++triplet) { - Dtype dist_sq(0); - for (int j = 0; j < channels; ++j) { - Dtype diff_pos = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - - this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+1)*channels+j]; - dist_sq += diff_pos*diff_pos; - Dtype diff_neg = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - - this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+2+triplet)*channels+j]; - dist_sq -= diff_neg*diff_neg; - } - loss += std::max(margin + dist_sq, Dtype(0.0)); - } - } - } /*else { - for (int i = 0; i < num; ++i) { - Dtype dist_sq(0); - Dtype dist_par(0); -======= - const int num = this->blob_bottom_data_i_->num(); - const int channels = this->blob_bottom_data_i_->channels(); - Dtype loss(0); - for (int i = 0; i < num; ++i) { - Dtype dist_sq(0); ->>>>>>> suit for opencv3.0.0 - for (int j = 0; j < channels; ++j) { - Dtype diff_pos = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; - dist_sq += diff_pos*diff_pos; -<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c - dist_sq += margin; - Dtype diff_neg = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_k_->cpu_data()[i*channels+j]; - dist_sq = 1 - diff_neg*diff_neg/dist_sq; - Dtype diff_par = this->blob_bottom_data_l_->cpu_data()[i*channels+j] - - this->blob_bottom_data_m_->cpu_data()[i*channels+j]; - dist_par = diff_par*diff_par; - } - loss += std::max(dist_sq, Dtype(0.0)); - loss += dist_par; - } - }*/ - loss /= static_cast(num_set) * Dtype(2); -======= - Dtype diff_neg = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; - dist_sq -= diff_neg*diff_neg; - } - loss += std::max(margin + dist_sq, 0.0); - /*if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs - loss += dist_sq; - } else { - Dtype dist = std::max(margin - sqrt(dist_sq), 0.0); - loss += dist*dist; - }*/ - } - loss /= static_cast(num) * Dtype(2); ->>>>>>> suit for opencv3.0.0 - EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); -} - -TYPED_TEST(TripletLossLayerTest, TestGradient) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - GradientChecker checker(1e-2, 1e-2, 1701); -<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c - // check the gradient for the first 5 bottom layers - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 0); -} -======= - // check the gradient for the first two bottom layers - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 0); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 1); -} - -/*TYPED_TEST(TripletLossLayerTest, TestForwardLegacy) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_triplet_loss_param()->set_legacy_version(true); - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // manually compute to compare - const Dtype margin = layer_param.triplet_loss_param().margin(); - const int num = this->blob_bottom_data_i_->num(); - const int channels = this->blob_bottom_data_i_->channels(); - Dtype loss(0); - for (int i = 0; i < num; ++i) { - Dtype dist_sq(0); - for (int j = 0; j < channels; ++j) { - Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; - dist_sq += diff*diff; - } - if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs - loss += dist_sq; - } else { - loss += std::max(margin - dist_sq, Dtype(0.0)); - } - } - loss /= static_cast(num) * Dtype(2); - EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); -} - -TYPED_TEST(TripletLossLayerTest, TestGradientLegacy) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_triplet_loss_param()->set_legacy_version(true); - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - GradientChecker checker(1e-2, 1e-2, 1701); - // check the gradient for the first two bottom layers - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 0); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 1); -}*/ - ->>>>>>> suit for opencv3.0.0 -} // namespace caffe diff --git a/src/caffe/test/test_triplet_loss_layer.orig.orig b/src/caffe/test/test_triplet_loss_layer.orig.orig deleted file mode 100644 index 4f1764d2e33..00000000000 --- a/src/caffe/test/test_triplet_loss_layer.orig.orig +++ /dev/null @@ -1,317 +0,0 @@ -#include -#include -#include -#include -#include - -#include "gtest/gtest.h" - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -#include "caffe/filler.hpp" -#include "caffe/vision_layers.hpp" - -#include "caffe/test/test_caffe_main.hpp" -#include "caffe/test/test_gradient_check_util.hpp" - -namespace caffe { - -template -class TripletLossLayerTest : public MultiDeviceTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - TripletLossLayerTest() -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c -======= -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig -<<<<<<< 80a07dd611590aa6547f3a3b2af35c791f32dfcf ->>>>>>> triplet data generation and network update - : blob_bottom_data_(new Blob(50, 1, 1, 1)), - blob_bottom_y_(new Blob(50, 1, 1, 1)), -======= -======= ->>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer - : blob_bottom_data_i_(new Blob(512, 2, 1, 1)), - blob_bottom_data_j_(new Blob(512, 2, 1, 1)), - blob_bottom_data_k_(new Blob(512, 2, 1, 1)), - blob_bottom_y_(new Blob(512, 1, 1, 1)), -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig ->>>>>>> suit for opencv3.0.0 -======= ->>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer - blob_top_loss_(new Blob()) { - // fill the values - FillerParameter filler_param; - filler_param.set_min(-1.0); - filler_param.set_max(1.0); // distances~=1.0 to test both sides of margin - UniformFiller filler(filler_param); -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c -======= -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig -<<<<<<< 80a07dd611590aa6547f3a3b2af35c791f32dfcf ->>>>>>> triplet data generation and network update - filler.Fill(this->blob_bottom_data_); - blob_bottom_vec_.push_back(blob_bottom_data_); -======= -======= ->>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer - filler.Fill(this->blob_bottom_data_i_); - blob_bottom_vec_.push_back(blob_bottom_data_i_); - filler.Fill(this->blob_bottom_data_j_); - blob_bottom_vec_.push_back(blob_bottom_data_j_); - filler.Fill(this->blob_bottom_data_k_); - blob_bottom_vec_.push_back(blob_bottom_data_k_); -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig ->>>>>>> suit for opencv3.0.0 -======= ->>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer - for (int i = 0; i < blob_bottom_y_->count(); ++i) { - blob_bottom_y_->mutable_cpu_data()[i] = caffe_rng_rand() % 2; // 0 or 1 - } - blob_bottom_vec_.push_back(blob_bottom_y_); - blob_top_vec_.push_back(blob_top_loss_); - } - virtual ~TripletLossLayerTest() { -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c -======= -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig -<<<<<<< 80a07dd611590aa6547f3a3b2af35c791f32dfcf ->>>>>>> triplet data generation and network update - delete blob_bottom_data_; -======= - delete blob_bottom_data_i_; - delete blob_bottom_data_j_; - delete blob_bottom_data_k_; ->>>>>>> suit for opencv3.0.0 -======= - delete blob_bottom_data_i_; - delete blob_bottom_data_j_; - delete blob_bottom_data_k_; ->>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer - delete blob_bottom_y_; - delete blob_top_loss_; - } - -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c -======= -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig -<<<<<<< 80a07dd611590aa6547f3a3b2af35c791f32dfcf ->>>>>>> triplet data generation and network update - Blob* const blob_bottom_data_; -======= - Blob* const blob_bottom_data_i_; - Blob* const blob_bottom_data_j_; - Blob* const blob_bottom_data_k_; ->>>>>>> suit for opencv3.0.0 -======= - Blob* const blob_bottom_data_i_; - Blob* const blob_bottom_data_j_; - Blob* const blob_bottom_data_k_; ->>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer - Blob* const blob_bottom_y_; - Blob* const blob_top_loss_; - vector*> blob_bottom_vec_; - vector*> blob_top_vec_; -}; - -TYPED_TEST_CASE(TripletLossLayerTest, TestDtypesAndDevices); - -TYPED_TEST(TripletLossLayerTest, TestForward) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // manually compute to compare - const Dtype margin = layer_param.triplet_loss_param().margin(); -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c -======= -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig -<<<<<<< 80a07dd611590aa6547f3a3b2af35c791f32dfcf ->>>>>>> triplet data generation and network update - const Dtype losstype = 0;//layer_param.triplet_loss_param().losstype(); - const int num_triplets = 3; - const int num_set = this->blob_bottom_data_->num()/(2 + num_triplets); - const int channels = this->blob_bottom_data_->channels(); -======= - const int num = this->blob_bottom_data_i_->num(); - const int channels = this->blob_bottom_data_i_->channels(); ->>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer - Dtype loss(0); - for (int i = 0; i < num; ++i) { - Dtype dist_sq(0); -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig - Dtype dist_par(0); -======= - const int num = this->blob_bottom_data_i_->num(); - const int channels = this->blob_bottom_data_i_->channels(); - Dtype loss(0); - for (int i = 0; i < num; ++i) { - Dtype dist_sq(0); ->>>>>>> suit for opencv3.0.0 -======= ->>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer - for (int j = 0; j < channels; ++j) { - Dtype diff_pos = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; - dist_sq += diff_pos*diff_pos; -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c -======= -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig -<<<<<<< 80a07dd611590aa6547f3a3b2af35c791f32dfcf ->>>>>>> triplet data generation and network update - dist_sq += margin; -======= ->>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer - Dtype diff_neg = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; - dist_sq -= diff_neg*diff_neg; - } - loss += std::max(margin + dist_sq, 0.0); - /*if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs - loss += dist_sq; - } else { - Dtype dist = std::max(margin - sqrt(dist_sq), 0.0); - loss += dist*dist; - }*/ - } -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig - }*/ - loss /= static_cast(num_set) * Dtype(2); -======= - Dtype diff_neg = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; - dist_sq -= diff_neg*diff_neg; - } - loss += std::max(margin + dist_sq, 0.0); - /*if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs - loss += dist_sq; - } else { - Dtype dist = std::max(margin - sqrt(dist_sq), 0.0); - loss += dist*dist; - }*/ - } - loss /= static_cast(num) * Dtype(2); ->>>>>>> suit for opencv3.0.0 -======= - loss /= static_cast(num) * Dtype(2); ->>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer - EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); -} - -TYPED_TEST(TripletLossLayerTest, TestGradient) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - GradientChecker checker(1e-2, 1e-2, 1701); -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -<<<<<<< 22f42667487341b9aa1d7d1f9854410111b1601c -======= -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig -<<<<<<< 80a07dd611590aa6547f3a3b2af35c791f32dfcf ->>>>>>> triplet data generation and network update - // check the gradient for the first 5 bottom layers -======= - // check the gradient for the first two bottom layers ->>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 0); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 1); -} - -/*TYPED_TEST(TripletLossLayerTest, TestForwardLegacy) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_triplet_loss_param()->set_legacy_version(true); - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // manually compute to compare - const Dtype margin = layer_param.triplet_loss_param().margin(); - const int num = this->blob_bottom_data_i_->num(); - const int channels = this->blob_bottom_data_i_->channels(); - Dtype loss(0); - for (int i = 0; i < num; ++i) { - Dtype dist_sq(0); - for (int j = 0; j < channels; ++j) { - Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; - dist_sq += diff*diff; - } - if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs - loss += dist_sq; - } else { - loss += std::max(margin - dist_sq, Dtype(0.0)); - } - } - loss /= static_cast(num) * Dtype(2); - EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); -} -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig -======= - // check the gradient for the first two bottom layers - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 0); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 1); -} - -/*TYPED_TEST(TripletLossLayerTest, TestForwardLegacy) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_triplet_loss_param()->set_legacy_version(true); - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // manually compute to compare - const Dtype margin = layer_param.triplet_loss_param().margin(); - const int num = this->blob_bottom_data_i_->num(); - const int channels = this->blob_bottom_data_i_->channels(); - Dtype loss(0); - for (int i = 0; i < num; ++i) { - Dtype dist_sq(0); - for (int j = 0; j < channels; ++j) { - Dtype diff = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - - this->blob_bottom_data_j_->cpu_data()[i*channels+j]; - dist_sq += diff*diff; - } - if (this->blob_bottom_y_->cpu_data()[i]) { // similar pairs - loss += dist_sq; - } else { - loss += std::max(margin - dist_sq, Dtype(0.0)); - } - } - loss /= static_cast(num) * Dtype(2); - EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); -} -======= ->>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer - -TYPED_TEST(TripletLossLayerTest, TestGradientLegacy) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - layer_param.mutable_triplet_loss_param()->set_legacy_version(true); - TripletLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - GradientChecker checker(1e-2, 1e-2, 1701); - // check the gradient for the first two bottom layers - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 0); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_, 1); -}*/ - -<<<<<<< c1f1dd90b4c8cde3ee05a2b077d0ebe69c9766da:src/caffe/test/test_triplet_loss_layer.orig ->>>>>>> suit for opencv3.0.0 -======= ->>>>>>> triplet data generation and network update:src/caffe/test/test_triplet_loss_layer -} // namespace caffe diff --git a/src/caffe/test/test_upgrade_proto.cpp b/src/caffe/test/test_upgrade_proto.cpp index ee05b151e72..006720231a5 100644 --- a/src/caffe/test/test_upgrade_proto.cpp +++ b/src/caffe/test/test_upgrade_proto.cpp @@ -2892,7 +2892,6 @@ TEST_F(NetUpgradeTest, TestImageNet) { this->RunV1UpgradeTest(expected_v1_proto, expected_v2_proto); } // NOLINT(readability/fn_size) -#ifdef USE_OPENCV TEST_F(NetUpgradeTest, TestUpgradeV1LayerType) { LayerParameter layer_param; shared_ptr > layer; @@ -2907,25 +2906,16 @@ TEST_F(NetUpgradeTest, TestUpgradeV1LayerType) { layer_param.set_type(v2_layer_type); // Data layers expect a DB if (v2_layer_type == "Data") { - #ifdef USE_LEVELDB string tmp; MakeTempDir(&tmp); boost::scoped_ptr db(db::GetDB(DataParameter_DB_LEVELDB)); db->Open(tmp, db::NEW); db->Close(); layer_param.mutable_data_param()->set_source(tmp); - #else - continue; - #endif // USE_LEVELDB } - #ifndef USE_OPENCV - if (v2_layer_type == "ImageData" || v2_layer_type == "WindowData") { - continue; - } - #endif // !USE_OPENCV layer = LayerRegistry::CreateLayer(layer_param); EXPECT_EQ(v2_layer_type, layer->type()); } } -#endif // USE_OPENCV + } // NOLINT(readability/fn_size) // namespace caffe diff --git a/src/caffe/util/db.cpp b/src/caffe/util/db.cpp index ccda054d881..f55420e9840 100644 --- a/src/caffe/util/db.cpp +++ b/src/caffe/util/db.cpp @@ -8,31 +8,23 @@ namespace caffe { namespace db { DB* GetDB(DataParameter::DB backend) { switch (backend) { -#ifdef USE_LEVELDB case DataParameter_DB_LEVELDB: return new LevelDB(); -#endif // USE_LEVELDB -#ifdef USE_LMDB case DataParameter_DB_LMDB: return new LMDB(); -#endif // USE_LMDB default: LOG(FATAL) << "Unknown database backend"; } } DB* GetDB(const string& backend) { -#ifdef USE_LEVELDB if (backend == "leveldb") { return new LevelDB(); - } -#endif // USE_LEVELDB -#ifdef USE_LMDB - if (backend == "lmdb") { + } else if (backend == "lmdb") { return new LMDB(); + } else { + LOG(FATAL) << "Unknown database backend"; } -#endif // USE_LMDB - LOG(FATAL) << "Unknown database backend"; } } // namespace db diff --git a/src/caffe/util/db_leveldb.cpp b/src/caffe/util/db_leveldb.cpp index f5c4d8a660d..06c46627d31 100644 --- a/src/caffe/util/db_leveldb.cpp +++ b/src/caffe/util/db_leveldb.cpp @@ -1,4 +1,3 @@ -#ifdef USE_LEVELDB #include "caffe/util/db_leveldb.hpp" #include @@ -20,4 +19,3 @@ void LevelDB::Open(const string& source, Mode mode) { } // namespace db } // namespace caffe -#endif // USE_LEVELDB diff --git a/src/caffe/util/db_lmdb.cpp b/src/caffe/util/db_lmdb.cpp index 78dd880ac41..a054b796806 100644 --- a/src/caffe/util/db_lmdb.cpp +++ b/src/caffe/util/db_lmdb.cpp @@ -1,4 +1,3 @@ -#ifdef USE_LMDB #include "caffe/util/db_lmdb.hpp" #include @@ -50,4 +49,3 @@ void LMDBTransaction::Put(const string& key, const string& value) { } // namespace db } // namespace caffe -#endif // USE_LMDB diff --git a/src/caffe/util/io.cpp b/src/caffe/util/io.cpp index f2b1dd98423..6f03314202c 100644 --- a/src/caffe/util/io.cpp +++ b/src/caffe/util/io.cpp @@ -3,11 +3,9 @@ #include #include #include -#ifdef USE_OPENCV #include #include #include -#endif // USE_OPENCV #include #include @@ -69,7 +67,6 @@ void WriteProtoToBinaryFile(const Message& proto, const char* filename) { CHECK(proto.SerializeToOstream(&output)); } -#ifdef USE_OPENCV cv::Mat ReadImageToCVMat(const string& filename, const int height, const int width, const bool is_color) { cv::Mat cv_img; @@ -101,7 +98,6 @@ cv::Mat ReadImageToCVMat(const string& filename, cv::Mat ReadImageToCVMat(const string& filename) { return ReadImageToCVMat(filename, 0, 0, true); } - // Do the file extension and encoding match? static bool matchExt(const std::string & fn, std::string en) { @@ -115,7 +111,6 @@ static bool matchExt(const std::string & fn, return true; return false; } - bool ReadImageToDatum(const string& filename, const int label, const int height, const int width, const bool is_color, const std::string & encoding, Datum* datum) { @@ -140,7 +135,6 @@ bool ReadImageToDatum(const string& filename, const int label, return false; } } -#endif // USE_OPENCV bool ReadFileToDatum(const string& filename, const int label, Datum* datum) { @@ -162,7 +156,6 @@ bool ReadFileToDatum(const string& filename, const int label, } } -#ifdef USE_OPENCV cv::Mat DecodeDatumToCVMatNative(const Datum& datum) { cv::Mat cv_img; CHECK(datum.encoded()) << "Datum not encoded"; @@ -234,5 +227,6 @@ void CVMatToDatum(const cv::Mat& cv_img, Datum* datum) { } datum->set_data(buffer); } -#endif // USE_OPENCV + + } // namespace caffe diff --git a/src/caffe/util/upgrade_proto.cpp b/src/caffe/util/upgrade_proto.cpp index ac379e50f4f..07b9727a27d 100644 --- a/src/caffe/util/upgrade_proto.cpp +++ b/src/caffe/util/upgrade_proto.cpp @@ -816,6 +816,10 @@ bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param, layer_param->mutable_threshold_param()->CopyFrom( v1_layer_param.threshold_param()); } + if (v1_layer_param.has_triplet_loss_param()) { + layer_param->mutable_triplet_loss_param()->CopyFrom( + v1_layer_param.triplet_loss_param()); + } if (v1_layer_param.has_window_data_param()) { layer_param->mutable_window_data_param()->CopyFrom( v1_layer_param.window_data_param()); @@ -913,6 +917,8 @@ const char* UpgradeV1LayerType(const V1LayerParameter_LayerType type) { return "Slice"; case V1LayerParameter_LayerType_TANH: return "TanH"; + case V1LayerParameter_LayerType_TRIPLET_LOSS: + return "TripletLoss"; case V1LayerParameter_LayerType_WINDOW_DATA: return "WindowData"; case V1LayerParameter_LayerType_THRESHOLD: diff --git a/tools/caffe.cpp b/tools/caffe.cpp index e3f684b5ab3..ff63860a3c1 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -174,7 +174,6 @@ int train() { vector gpus; get_gpus(&gpus); if (gpus.size() == 0) { - LOG(INFO) << "Use CPU."; Caffe::set_mode(Caffe::CPU); } else { ostringstream s; diff --git a/tools/compute_image_mean.cpp b/tools/compute_image_mean.cpp index 2035d515195..b1fc7cae38f 100644 --- a/tools/compute_image_mean.cpp +++ b/tools/compute_image_mean.cpp @@ -24,7 +24,6 @@ DEFINE_string(backend, "lmdb", int main(int argc, char** argv) { ::google::InitGoogleLogging(argv[0]); -#ifdef USE_OPENCV #ifndef GFLAGS_GFLAGS_H_ namespace gflags = google; #endif @@ -116,8 +115,5 @@ int main(int argc, char** argv) { } LOG(INFO) << "mean_value channel [" << c << "]:" << mean_values[c] / dim; } -#else - LOG(FATAL) << "This tool requires OpenCV; compile with USE_OPENCV."; -#endif // USE_OPENCV return 0; } diff --git a/tools/convert_imageset.cpp b/tools/convert_imageset.cpp index e51a2631077..aad1f1fe216 100644 --- a/tools/convert_imageset.cpp +++ b/tools/convert_imageset.cpp @@ -43,7 +43,6 @@ DEFINE_string(encode_type, "", "Optional: What type should we encode the image as ('png','jpg',...)."); int main(int argc, char** argv) { -#ifdef USE_OPENCV ::google::InitGoogleLogging(argv[0]); // Print output to stderr (while still logging) FLAGS_alsologtostderr = 1; @@ -151,8 +150,5 @@ int main(int argc, char** argv) { txn->Commit(); LOG(INFO) << "Processed " << count << " files."; } -#else - LOG(FATAL) << "This tool requires OpenCV; compile with USE_OPENCV."; -#endif // USE_OPENCV return 0; } diff --git a/tools/extract_features.cpp b/tools/extract_features.cpp new file mode 100644 index 00000000000..365dd495bbf --- /dev/null +++ b/tools/extract_features.cpp @@ -0,0 +1,189 @@ +#include // for snprintf +#include +#include + +#include "boost/algorithm/string.hpp" +#include "google/protobuf/text_format.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/net.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/db.hpp" +#include "caffe/util/io.hpp" +#include "caffe/vision_layers.hpp" + +using caffe::Blob; +using caffe::Caffe; +using caffe::Datum; +using caffe::Net; +using boost::shared_ptr; +using std::string; +namespace db = caffe::db; + +template +int feature_extraction_pipeline(int argc, char** argv); + +int main(int argc, char** argv) { + return feature_extraction_pipeline(argc, argv); +// return feature_extraction_pipeline(argc, argv); +} + +template +int feature_extraction_pipeline(int argc, char** argv) { + ::google::InitGoogleLogging(argv[0]); + const int num_required_args = 7; + if (argc < num_required_args) { + LOG(ERROR)<< + "This program takes in a trained network and an input data layer, and then" + " extract features of the input data produced by the net.\n" + "Usage: extract_features pretrained_net_param" + " feature_extraction_proto_file extract_feature_blob_name1[,name2,...]" + " save_feature_dataset_name1[,name2,...] num_mini_batches db_type" + " [CPU/GPU] [DEVICE_ID=0]\n" + "Note: you can extract multiple features in one pass by specifying" + " multiple feature blob names and dataset names seperated by ','." + " The names cannot contain white space characters and the number of blobs" + " and datasets must be equal."; + return 1; + } + int arg_pos = num_required_args; + + arg_pos = num_required_args; + if (argc > arg_pos && strcmp(argv[arg_pos], "GPU") == 0) { + LOG(ERROR)<< "Using GPU"; + uint device_id = 0; + if (argc > arg_pos + 1) { + device_id = atoi(argv[arg_pos + 1]); + CHECK_GE(device_id, 0); + } + LOG(ERROR) << "Using Device_id=" << device_id; + Caffe::SetDevice(device_id); + Caffe::set_mode(Caffe::GPU); + } else { + LOG(ERROR) << "Using CPU"; + Caffe::set_mode(Caffe::CPU); + } + + arg_pos = 0; // the name of the executable + std::string pretrained_binary_proto(argv[++arg_pos]); + + // Expected prototxt contains at least one data layer such as + // the layer data_layer_name and one feature blob such as the + // fc7 top blob to extract features. + /* + layers { + name: "data_layer_name" + type: DATA + data_param { + source: "/path/to/your/images/to/extract/feature/images_leveldb" + mean_file: "/path/to/your/image_mean.binaryproto" + batch_size: 128 + crop_size: 227 + mirror: false + } + top: "data_blob_name" + top: "label_blob_name" + } + layers { + name: "drop7" + type: DROPOUT + dropout_param { + dropout_ratio: 0.5 + } + bottom: "fc7" + top: "fc7" + } + */ + std::string feature_extraction_proto(argv[++arg_pos]); + shared_ptr > feature_extraction_net( + new Net(feature_extraction_proto, caffe::TEST)); + feature_extraction_net->CopyTrainedLayersFrom(pretrained_binary_proto); + + std::string extract_feature_blob_names(argv[++arg_pos]); + std::vector blob_names; + boost::split(blob_names, extract_feature_blob_names, boost::is_any_of(",")); + + std::string save_feature_dataset_names(argv[++arg_pos]); + std::vector dataset_names; + boost::split(dataset_names, save_feature_dataset_names, + boost::is_any_of(",")); + CHECK_EQ(blob_names.size(), dataset_names.size()) << + " the number of blob names and dataset names must be equal"; + size_t num_features = blob_names.size(); + + for (size_t i = 0; i < num_features; i++) { + CHECK(feature_extraction_net->has_blob(blob_names[i])) + << "Unknown feature blob name " << blob_names[i] + << " in the network " << feature_extraction_proto; + } + + int num_mini_batches = atoi(argv[++arg_pos]); + + std::vector > feature_dbs; + std::vector > txns; + const char* db_type = argv[++arg_pos]; + for (size_t i = 0; i < num_features; ++i) { + LOG(INFO)<< "Opening dataset " << dataset_names[i]; + shared_ptr db(db::GetDB(db_type)); + db->Open(dataset_names.at(i), db::NEW); + feature_dbs.push_back(db); + shared_ptr txn(db->NewTransaction()); + txns.push_back(txn); + } + + LOG(ERROR)<< "Extacting Features"; + + Datum datum; + const int kMaxKeyStrLength = 100; + char key_str[kMaxKeyStrLength]; + std::vector*> input_vec; + std::vector image_indices(num_features, 0); + for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index) { + feature_extraction_net->Forward(input_vec); + for (int i = 0; i < num_features; ++i) { + const shared_ptr > feature_blob = feature_extraction_net + ->blob_by_name(blob_names[i]); + int batch_size = feature_blob->num(); + int dim_features = feature_blob->count() / batch_size; + const Dtype* feature_blob_data; + for (int n = 0; n < batch_size; ++n) { + datum.set_height(feature_blob->height()); + datum.set_width(feature_blob->width()); + datum.set_channels(feature_blob->channels()); + datum.clear_data(); + datum.clear_float_data(); + feature_blob_data = feature_blob->cpu_data() + + feature_blob->offset(n); + for (int d = 0; d < dim_features; ++d) { + datum.add_float_data(feature_blob_data[d]); + } + int length = snprintf(key_str, kMaxKeyStrLength, "%010d", + image_indices[i]); + string out; + CHECK(datum.SerializeToString(&out)); + txns.at(i)->Put(std::string(key_str, length), out); + ++image_indices[i]; + if (image_indices[i] % 1000 == 0) { + txns.at(i)->Commit(); + txns.at(i).reset(feature_dbs.at(i)->NewTransaction()); + LOG(ERROR)<< "Extracted features of " << image_indices[i] << + " query images for feature blob " << blob_names[i]; + } + } // for (int n = 0; n < batch_size; ++n) + } // for (int i = 0; i < num_features; ++i) + } // for (int batch_index = 0; batch_index < num_mini_batches; ++batch_index) + // write the last batch + for (int i = 0; i < num_features; ++i) { + if (image_indices[i] % 1000 != 0) { + txns.at(i)->Commit(); + } + LOG(ERROR)<< "Extracted features of " << image_indices[i] << + " query images for feature blob " << blob_names[i]; + feature_dbs.at(i)->Close(); + } + + LOG(ERROR)<< "Successfully extracted the features!"; + return 0; +} + From c6787cd9d4bc51316edb8e99da796b23698dc45c Mon Sep 17 00:00:00 2001 From: Wangyida Date: Tue, 22 Sep 2015 08:08:23 +0800 Subject: [PATCH 81/82] delete orig files --- ...convert_3d_triplet_data.cpp.orig.orig.orig | 431 --- .../triplet/create_3d_triplet.sh.orig.orig | 27 - .../triplet_loss_layer.cpp.orig.orig.orig | 626 ---- .../triplet_loss_layer.cu.orig.orig.orig | 728 ----- src/caffe/solver.cpp.orig.orig.orig | 1692 ----------- src/caffe/test/test_im2col_kernel.cu.orig | 210 -- src/caffe/test/test_net.cpp.orig.orig | 2508 ----------------- 7 files changed, 6222 deletions(-) delete mode 100644 examples/triplet/convert_3d_triplet_data.cpp.orig.orig.orig delete mode 100755 examples/triplet/create_3d_triplet.sh.orig.orig delete mode 100644 src/caffe/layers/triplet_loss_layer.cpp.orig.orig.orig delete mode 100644 src/caffe/layers/triplet_loss_layer.cu.orig.orig.orig delete mode 100644 src/caffe/solver.cpp.orig.orig.orig delete mode 100644 src/caffe/test/test_im2col_kernel.cu.orig delete mode 100644 src/caffe/test/test_net.cpp.orig.orig diff --git a/examples/triplet/convert_3d_triplet_data.cpp.orig.orig.orig b/examples/triplet/convert_3d_triplet_data.cpp.orig.orig.orig deleted file mode 100644 index 1c27853a000..00000000000 --- a/examples/triplet/convert_3d_triplet_data.cpp.orig.orig.orig +++ /dev/null @@ -1,431 +0,0 @@ -<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 -// Usage: -// convert_3d_data input_image_file input_label_file output_db_file -#include // NOLINT(readability/streams) -#include -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "math.h" -#include "stdint.h" - -======= -// This script converts the MNIST dataset to the leveldb format used -// by caffe to train siamese network. -// Usage: -// convert_mnist_data input_image_file input_label_file output_db_file -// The MNIST dataset could be downloaded at -// http://yann.lecun.com/exdb/mnist/ -#include // NOLINT(readability/streams) -#include - -#include "glog/logging.h" -#include "google/protobuf/text_format.h" -#include "leveldb/db.h" -#include "stdint.h" - -#include "caffe/proto/caffe.pb.h" -#include "caffe/util/math_functions.hpp" - ->>>>>>> add 3d network training param -uint32_t swap_endian(uint32_t val) { - val = ((val << 8) & 0xFF00FF00) | ((val >> 8) & 0xFF00FF); - return (val << 16) | (val >> 16); -} - -void read_image(std::ifstream* image_file, std::ifstream* label_file, - uint32_t index, uint32_t rows, uint32_t cols, -<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 ->>>>>>> new data and new feature dimension - char* pixels, char* label_temp, signed char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index * 4 + 8); - label_file->read(label_temp, 4); - for (int i = 0; i < 4; i++) - *(label+i) = (signed char)*(label_temp+i); -<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 -======= -<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 -======= -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename, const char* class_number) { -======= ->>>>>>> new data and new feature dimension - char* pixels, char* label_temp, signed char* label, int rgb_use) { - if (rgb_use == 0) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index * 4 + 8); - label_file->read(label_temp, 4); - for (int i = 0; i < 4; i++) - *(label+i) = (signed char)*(label_temp+i); - } else { - image_file->seekg(3 * index * rows * cols + 16); - image_file->read(pixels, 3 * rows * cols); - label_file->seekg(index * 4 + 8); - label_file->read(label_temp, 4); - for (int i = 0; i < 4; i++) - *(label+i) = (signed char)*(label_temp+i); - } ->>>>>>> add 3d network training param -} - -void convert_dataset(const char* image_filename, const char* label_filename, -<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 - const char* db_filename, const char* class_number) { -======= - const char* db_filename, const char* class_number, - const char* rgb_use) { - int rgb_use1 = atoi(rgb_use); ->>>>>>> new data and new feature dimension ->>>>>>> new data and new feature dimension - int class_num = atoi(class_number); -======= - char* pixels, char* label) { - image_file->seekg(index * rows * cols + 16); - image_file->read(pixels, rows * cols); - label_file->seekg(index + 8); - label_file->read(label, 1); -} - -void convert_dataset(const char* image_filename, const char* label_filename, - const char* db_filename) { ->>>>>>> add 3d network training param - // Open files - std::ifstream image_file(image_filename, std::ios::in | std::ios::binary); - std::ifstream label_file(label_filename, std::ios::in | std::ios::binary); - CHECK(image_file) << "Unable to open file " << image_filename; - CHECK(label_file) << "Unable to open file " << label_filename; - // Read the magic and the meta data - uint32_t magic; - uint32_t num_items; - uint32_t num_labels; - uint32_t rows; - uint32_t cols; - - image_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); - CHECK_EQ(magic, 2051) << "Incorrect image file magic."; - label_file.read(reinterpret_cast(&magic), 4); - magic = swap_endian(magic); -<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 - CHECK_EQ(magic, 2050) << "Incorrect label file magic."; -======= - CHECK_EQ(magic, 2049) << "Incorrect label file magic."; ->>>>>>> add 3d network training param - image_file.read(reinterpret_cast(&num_items), 4); - num_items = swap_endian(num_items); - label_file.read(reinterpret_cast(&num_labels), 4); - num_labels = swap_endian(num_labels); - CHECK_EQ(num_items, num_labels); - image_file.read(reinterpret_cast(&rows), 4); - rows = swap_endian(rows); - image_file.read(reinterpret_cast(&cols), 4); - cols = swap_endian(cols); - - // Open leveldb - leveldb::DB* db; - leveldb::Options options; - options.create_if_missing = true; - options.error_if_exists = true; - leveldb::Status status = leveldb::DB::Open( - options, db_filename, &db); - CHECK(status.ok()) << "Failed to open leveldb " << db_filename - << ". Is it already existing?"; - -<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 - char* label_temp = new char[4]; // label for unsigned char* - signed char* label_i = new signed char[4]; // label for triplet - signed char* label_j = new signed char[4]; - signed char* label_k = new signed char[4]; - signed char* label_l = new signed char[4]; // label for pair wise - signed char* label_m = new signed char[4]; -<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 -======= -<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 ->>>>>>> new data and new feature dimension - char* pixels1 = new char[rows * cols]; - char* pixels2 = new char[rows * cols]; - char* pixels3 = new char[rows * cols]; - char* pixels4 = new char[rows * cols]; - char* pixels5 = new char[rows * cols]; -<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 -======= -======= - int db_size; - if (rgb_use1 == 0) - db_size = rows * cols; - else - db_size = 3 * rows * cols; - char* pixels1 = new char[db_size]; - char* pixels2 = new char[db_size]; - char* pixels3 = new char[db_size]; - char* pixels4 = new char[db_size]; - char* pixels5 = new char[db_size]; ->>>>>>> new data and new feature dimension ->>>>>>> new data and new feature dimension - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - caffe::Datum datum; - datum.set_channels(1); -======= - char label_i; // label for triplet - char label_j; - char label_k; - char label_l; // label for pair wise - char label_m; - char* pixels = new char[5 * rows * cols]; - const int kMaxKeyLength = 10; - char key[kMaxKeyLength]; - std::string value; - - caffe::Datum datum; - datum.set_channels(5); // one channel for each image in the triplet and pair ->>>>>>> add 3d network training param - datum.set_height(rows); - datum.set_width(cols); - LOG(INFO) << "A total of " << num_items << " items."; - LOG(INFO) << "Rows: " << rows << " Cols: " << cols; -<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 -<<<<<<< 165e1d595232eb2a908f62887bcf2d5e1743ed10 -======= ->>>>>>> new data and new feature dimension - int counter = 0; -<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 - for (unsigned int times = 0; times < 5; ++times) { -======= - for (unsigned int times = 0; times < 10; ++times) { ->>>>>>> new data and new feature dimension - // iteration in the samples of all class - for (unsigned int itemid = 0; itemid < num_items/class_num; ++itemid) { - // iteration in the samples in one class - for (unsigned int class_ind = 0; class_ind < class_num; ++class_ind) { - // use reference sample one by one at each iteration - int i = itemid % num_items + class_ind*num_items/class_num; - int j = caffe::caffe_rng_rand() % num_items; // pick triplet groups - int k = caffe::caffe_rng_rand() % num_items; - int l = caffe::caffe_rng_rand() % num_items; // pick pair wise groups - int m = caffe::caffe_rng_rand() % num_items; - read_image(&image_file, &label_file, i, rows, cols, // read triplet -<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 - pixels1, label_temp, label_i); -======= -<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 - pixels1, label_temp, label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels2, label_temp, label_j); - read_image(&image_file, &label_file, k, rows, cols, - pixels3, label_temp, label_k); - read_image(&image_file, &label_file, l, rows, cols, // read pair wise - pixels4, label_temp, label_l); - read_image(&image_file, &label_file, m, rows, cols, - pixels5, label_temp, label_m); -======= - pixels1, label_temp, label_i, rgb_use1); ->>>>>>> new data and new feature dimension - read_image(&image_file, &label_file, j, rows, cols, - pixels2, label_temp, label_j); - read_image(&image_file, &label_file, k, rows, cols, - pixels3, label_temp, label_k); - read_image(&image_file, &label_file, l, rows, cols, // read pair wise - pixels4, label_temp, label_l); - read_image(&image_file, &label_file, m, rows, cols, -<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 - pixels5, label_temp, label_m); -======= - pixels5, label_temp, label_m, rgb_use1); ->>>>>>> new data and new feature dimension ->>>>>>> new data and new feature dimension - - bool pair_pass = false; - bool triplet1_pass = false; - bool triplet2_pass = false; - bool triplet3_class_same = false; - bool triplet3_pass = false; - - int ij_diff_x = static_cast(*(label_i+1)-*(label_j+1)); - int ij_diff_y = static_cast(*(label_i+2)-*(label_j+2)); - int ij_diff_z = static_cast(*(label_i+3)-*(label_j+3)); - int im_diff_x = static_cast(*(label_i+1)-*(label_m+1)); - int im_diff_y = static_cast(*(label_i+2)-*(label_m+2)); - int im_diff_z = static_cast(*(label_i+3)-*(label_m+3)); - - int ij_x = ij_diff_x*ij_diff_x; - int ij_y = ij_diff_y*ij_diff_y; - int ij_z = ij_diff_z*ij_diff_z; - int im_x = im_diff_x*im_diff_x; - int im_y = im_diff_y*im_diff_y; - int im_z = im_diff_z*im_diff_z; - - float dist_ij = std::sqrt(ij_x + ij_y + ij_z); - float dist_im = std::sqrt(im_x + im_y + im_z); -<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 - if (*label_i == *label_j && dist_ij < 100/2) -======= -<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 - if (*label_i == *label_j && dist_ij < 100/2) -======= - if (*label_i == *label_j && dist_ij < 100/3 && dist_ij != 0) ->>>>>>> new data and new feature dimension ->>>>>>> new data and new feature dimension - pair_pass = true; - if (pair_pass && (*label_i != *label_k)) - triplet1_pass = true; - if (pair_pass && (*label_i != *label_l)) - triplet2_pass = true; - if (pair_pass && (*label_i == *label_m)) - triplet3_class_same = true; -<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 - if (triplet3_class_same && dist_im > 100*sqrt(2)) -======= - if (triplet3_class_same && dist_im > 100/3 && dist_im < 100) ->>>>>>> new data and new feature dimension - triplet3_pass = true; - if (pair_pass && triplet1_pass && triplet2_pass && triplet3_pass) { - datum.set_data(pixels1, rows*cols); // set data - datum.set_label(static_cast(*label_i)); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - datum.set_data(pixels2, rows*cols); // set data - datum.set_label(static_cast(*label_j)); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - datum.set_data(pixels3, rows*cols); // set data - datum.set_label(static_cast(*label_k)); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - datum.set_data(pixels4, rows*cols); // set data - datum.set_label(static_cast(*label_l)); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - datum.set_data(pixels5, rows*cols); // set data - datum.set_label(static_cast(*label_m)); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", counter); - db->Put(leveldb::WriteOptions(), std::string(key), value); - counter++; - } else { - class_ind--; - } - } // iteration in the samples of all class - } // iteration in the samples in one class - } // iteration in times - delete db; - delete pixels1; - delete pixels2; - delete pixels3; - delete pixels4; - delete pixels5; -} - -int main(int argc, char** argv) { -<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f - if (argc != 5) { - printf("This script converts the images dataset to the leveldb format used\n" -======= -<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 - if (argc != 5) { ->>>>>>> new data and new feature dimension -======= - if (argc != 6) { ->>>>>>> new data and new feature dimension - printf("This script converts the dataset to the leveldb format used\n" ->>>>>>> add 3d network training param - "by caffe to train a triplet network.\n" - "Usage:\n" - " convert_3d_data input_image_file input_label_file " -<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 - "output_db_file class_number\n"); - } else { - google::InitGoogleLogging(argv[0]); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f - convert_dataset(argv[1], argv[2], argv[3], argv[4]); -======= - convert_dataset(argv[1], argv[2], argv[3], argv[4], argv[5]); -======= - for (int itemid = 0; itemid < num_items; ++itemid) { - // pick triplet groups - int i = caffe::caffe_rng_rand() % num_items; - int j = caffe::caffe_rng_rand() % num_items; - int k = caffe::caffe_rng_rand() % num_items; - // pick pair wise groups - int l = caffe::caffe_rng_rand() % num_items; - int m = caffe::caffe_rng_rand() % num_items; - // read triplet groups - read_image(&image_file, &label_file, i, rows, cols, - pixels, &label_i); - read_image(&image_file, &label_file, j, rows, cols, - pixels + (rows * cols), &label_j); - read_image(&image_file, &label_file, k, rows, cols, - pixels + (2 * rows * cols), &label_k); - // read pair wise groups - read_image(&image_file, &label_file, l, rows, cols, - pixels + (3 * rows * cols), &label_l); - read_image(&image_file, &label_file, m, rows, cols, - pixels + (4 * rows * cols), &label_m); - - datum.set_data(pixels, 5*rows*cols); // set data - if ((label_i == label_j && label_i != label_k) && (label_l == label_m)) { - datum.set_label(1); - datum.SerializeToString(&value); - snprintf(key, kMaxKeyLength, "%08d", itemid); - db->Put(leveldb::WriteOptions(), std::string(key), value); - } else { - itemid--; - datum.set_label(0); - } - } - - delete db; - delete pixels; -} - -int main(int argc, char** argv) { - if (argc != 4) { - printf("This script converts the MNIST dataset to the leveldb format used\n" - "by caffe to train a siamese network.\n" - "Usage:\n" - " convert_mnist_data input_image_file input_label_file " - "output_db_file\n" - "The MNIST dataset could be downloaded at\n" - " http://yann.lecun.com/exdb/mnist/\n" - "You should gunzip them after downloading.\n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3]); ->>>>>>> add 3d network training param ->>>>>>> add 3d network training param -======= -<<<<<<< 3aaa859dba8ca4f7a962d285da0d762656b6d444 - "output_db_file class_number\n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3], argv[4]); -======= - "output_db_file class_number rgb_use \n"); - } else { - google::InitGoogleLogging(argv[0]); - convert_dataset(argv[1], argv[2], argv[3], argv[4], argv[5]); ->>>>>>> new data and new feature dimension ->>>>>>> new data and new feature dimension - } - return 0; -} diff --git a/examples/triplet/create_3d_triplet.sh.orig.orig b/examples/triplet/create_3d_triplet.sh.orig.orig deleted file mode 100755 index 2c502fe7f0a..00000000000 --- a/examples/triplet/create_3d_triplet.sh.orig.orig +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env sh -# This script converts the mnist data into leveldb format. - -EXAMPLES=./build/examples/triplet -DATA=./data/linemod - -echo "Creating leveldb..." - -rm -rf ./examples/triplet/3d_triplet_train_leveldb -rm -rf ./examples/triplet/3d_triplet_test_leveldb - -$EXAMPLES/convert_3d_triplet_data.bin \ - $DATA/binary_image_train \ - $DATA/binary_label_train \ - ./examples/triplet/3d_triplet_train_leveldb -$EXAMPLES/convert_3d_triplet_data.bin \ - $DATA/binary_image_test \ - $DATA/binary_label_test \ -<<<<<<< befbd1e1805c429d9d9e767289933604428356c9 - ./examples/triplet/3d_triplet_test_leveldb - -======= - ./examples/triplet/3d_triplet_test_leveldb \ - 4 \ - 0 ->>>>>>> new data and new feature dimension -echo "Done." diff --git a/src/caffe/layers/triplet_loss_layer.cpp.orig.orig.orig b/src/caffe/layers/triplet_loss_layer.cpp.orig.orig.orig deleted file mode 100644 index 460042f5ff5..00000000000 --- a/src/caffe/layers/triplet_loss_layer.cpp.orig.orig.orig +++ /dev/null @@ -1,626 +0,0 @@ -#include -#include - -#include "caffe/layer.hpp" -#include "caffe/loss_layers.hpp" -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" - -namespace caffe { - -template -void TripletLossLayer::LayerSetUp( - const vector*>& bottom, const vector*>& top) { - LossLayer::LayerSetUp(bottom, top); - // number of triplet in a batch - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - // dimension of each descriptor - int dim = bottom[0]->count()/bottom[0]->num(); - CHECK_EQ(bottom[0]->channels(), dim); - CHECK_EQ(bottom[0]->height(), 1); - CHECK_EQ(bottom[0]->width(), 1); - CHECK_EQ(bottom[1]->channels(), 1); - CHECK_EQ(bottom[1]->height(), 1); - CHECK_EQ(bottom[1]->width(), 1); - // In each set, we have: - // the descriptor of reference sample, closest sample, and negative samples - // number of sets in the whole batch - int num_set = bottom[0]->num()/(2 + num_triplets); - dist_sq_.Reshape(num_set, 1, 1, 1); - diff_pos.Reshape(num_set, dim, 1, 1); - dist_sq_pos.Reshape(num_set, 1, 1, 1); - diff_neg.Reshape(num_set, dim, 1, 1); - dist_sq_neg.Reshape(num_set, 1, 1, 1); - // vector of ones used to sum along channels - summer_vec_.Reshape(bottom[0]->channels(), 1, 1, 1); - for (int i = 0; i < bottom[0]->channels(); ++i) - summer_vec_.mutable_cpu_data()[i] = Dtype(1); -} - -template -void TripletLossLayer::Forward_cpu( - const vector*>& bottom, - const vector*>& top) { - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); - Dtype loss(0.0); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 46f6a4f657c9a5f4ffb83b7c8540b4fd2b8208bb -======= -<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c ->>>>>>> GPU version added -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param - int dim = bottom[0]->count()/bottom[0]->num(); - int num_set = bottom[0]->num()/(2 + num_triplets); - if (losstype == 0) { - for (int i = 0; i < num_set; ++i) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive - diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close - // Loss component calculated from reference and close one - dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.cpu_data()[i]; - for (int triplet = 0; triplet < num_triplets; ++triplet) { - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + i*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); - } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 46f6a4f657c9a5f4ffb83b7c8540b4fd2b8208bb -======= -======= -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -======= - ->>>>>>> add 3d network training param - // Loss component calculated from ab - for (int i = 0; i < bottom[0]->num(); ++i) { - dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(channels, - diff_pos.cpu_data() + (i*channels), diff_pos.cpu_data() + (i*channels)); - // ab is a similar pair -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.cpu_data()[i]; -======= - dist_sq_.mutable_cpu_data()[i] += dist_sq_pos.cpu_data()[i]; ->>>>>>> add 3d network training param - // Loss component calculated from ac - dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(channels, - diff_neg.cpu_data() + (i*channels), diff_neg.cpu_data() + (i*channels)); - // ac is a dissimilar pair - dist_sq_.mutable_cpu_data()[i] -= dist_sq_neg.cpu_data()[i]; - loss += std::max(margin + dist_sq_.cpu_data()[i], Dtype(0.0)); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f ->>>>>>> GPU version added ->>>>>>> GPU version added -======= ->>>>>>> restore ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param - } - loss = loss / static_cast(num_set) / Dtype(2); - top[0]->mutable_cpu_data()[0] = loss; - } else { - for (int i = 0; i < num_set; ++i) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive - diff_pos.mutable_cpu_data() + i*dim); // reference-pose_close - // Loss component calculated from reference and close one - dist_sq_pos.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_pos.cpu_data() + i*dim, diff_pos.cpu_data() + i*dim); - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.cpu_data()[i]; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] += margin; - for (int triplet = 0; triplet < num_triplets; ++triplet) { -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] += margin; -======= - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] += margin; - for (int triplet = 0; triplet < num_triplets; ++triplet) { ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise -======= - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[i] = dist_sq_pos.mutable_cpu_data()[i]; - dist_sq_.mutable_cpu_data()[i] += margin; ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + i*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[i] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + i*dim, diff_neg.cpu_data() + i*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[i] = 1 - \ - dist_sq_neg.cpu_data()[i] / dist_sq_.mutable_cpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(dist_sq_.cpu_data()[i], Dtype(0.0)); - } - } - loss = loss / static_cast(num_set) / Dtype(2); - top[0]->mutable_cpu_data()[0] = loss; - } -} - -template -void TripletLossLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Dtype margin = this->layer_param_.triplet_loss_param().margin(); -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a ->>>>>>> triplet data generation and network update - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - int dim = bottom[0]->count()/bottom[0]->num(); - int num_set = bottom[0]->num()/(2 + num_triplets); - if (losstype == 0) { - // BP for feat1(extracted from reference) - for (int i = 0; i < 1; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - // Loss component calculated from negative part - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + (2 + num_triplets)*j*dim); - // dissimilar pair in triplet - caffe_cpu_axpby( - dim, - -alpha, - diff_neg.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 -======= -======= ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param - } - } - } - } - // BP for feat2(extracted from the closest sample) - for (int i = 1; i < 2; ++i) { - if (propagate_down[0]) { - const Dtype sign = -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise -======= ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param - } - } - } - } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 - // BP for feat2(extracted from the closest sample) - for (int i = 1; i < 2; ++i) { - if (propagate_down[0]) { - const Dtype sign = -1; -======= -======= ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param - // BP for negative feature used in the num_triplets triplet part - for (int i = 2; i < 2 + num_triplets; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise -======= ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - // BP for negative feature used in the num_triplets triplet part - for (int i = 2; i < 2 + num_triplets; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); -======= ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise -======= ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.cpu_data()[j]; - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] -= dist_sq_neg.cpu_data()[j]; - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // dissimilar pairs -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -======= - for (int i = 1; i < 3; ++i) { -// there must be further check to ensure the gradient calc - if (propagate_down[i]) { - const Dtype sign = (i == 2) ? 1 : -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(bottom[i]->num()); - int num = bottom[i]->num(); - int channels = bottom[i]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[i]->mutable_cpu_diff(); - if ((margin + dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - // similar pairs ->>>>>>> restore ->>>>>>> triplet data generation and network update - caffe_cpu_axpby( - dim, - alpha, - diff_neg.cpu_data() + (j*dim), - Dtype(0.0), -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a ->>>>>>> triplet data generation and network update - bout + ((2 + num_triplets)*j + i)*dim); - } else { - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } else { - for (int i = 0; i < 1; ++i) { - // BP for data1(feat1) - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 -======= ->>>>>>> triplet data generation and network update - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= ->>>>>>> add 3d network training param - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise -======= ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - dim, - alpha*dist_sq_neg.mutable_cpu_data()[j]\ - /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ - *(dist_sq_pos.mutable_cpu_data()[j]+margin)), - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - caffe_cpu_axpby( - dim, - -alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), - diff_neg.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - for (int i = 1; i < 2; ++i) { - // BP for positive data(feat2) - if (propagate_down[0]) { - const Dtype sign = -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - // the pair part - caffe_cpu_axpby( - dim, - alpha, - diff_pos.cpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -<<<<<<< 8b0f51f43a3aeac04970b675f35321c2608ed301 -======= ->>>>>>> triplet data generation and network update - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= ->>>>>>> add 3d network training param - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= ->>>>>>> No sclice layer version which could forward a set of triplets together with 1 pair wise -======= ->>>>>>> triplet data generation and network update ->>>>>>> add 3d network training param - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { - caffe_cpu_axpby( - dim, - alpha*dist_sq_neg.mutable_cpu_data()[j]\ - /((dist_sq_pos.mutable_cpu_data()[j]+margin)\ - *(dist_sq_pos.mutable_cpu_data()[j]+margin)), - diff_pos.cpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - for (int i = 2; i < 2 + num_triplets; ++i) { - // BP for negative data(feat3) - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_cpu_diff(); - dist_sq_.mutable_cpu_data()[j] = dist_sq_pos.mutable_cpu_data()[j]; - dist_sq_.mutable_cpu_data()[j] += margin; - // Loss component calculated from negative part - caffe_sub( - dim, - bottom[0]->cpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->cpu_data() + ((2 + num_triplets)*j + i)*dim, - diff_neg.mutable_cpu_data() + j*dim); // reference-negative - dist_sq_neg.mutable_cpu_data()[j] = caffe_cpu_dot(dim, - diff_neg.cpu_data() + j*dim, diff_neg.cpu_data() + j*dim); - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_cpu_data()[j] = 1 - \ - dist_sq_neg.cpu_data()[j] / dist_sq_.mutable_cpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.cpu_data()[j]) > Dtype(0.0)) { -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= -======= - bout + (j*channels)); - // dissimilar pairs ->>>>>>> restore ->>>>>>> triplet data generation and network update - caffe_cpu_axpby( - dim, - alpha/(dist_sq_pos.mutable_cpu_data()[j] + margin), - diff_neg.cpu_data() + (j*dim), - Dtype(0.0), -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 - bout + ((2 + num_triplets)*j + i)*dim); - } else { - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); -======= -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a - bout + ((2 + num_triplets)*j + i)*dim); - } else { - caffe_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); -======= - bout + (j*channels)); - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); ->>>>>>> restore ->>>>>>> triplet data generation and network update - } - } - } - } - } -} - -#ifdef CPU_ONLY -STUB_GPU(TripletLossLayer); -#endif - -INSTANTIATE_CLASS(TripletLossLayer); -REGISTER_LAYER_CLASS(TripletLoss); - -} // namespace caffe diff --git a/src/caffe/layers/triplet_loss_layer.cu.orig.orig.orig b/src/caffe/layers/triplet_loss_layer.cu.orig.orig.orig deleted file mode 100644 index 2ebebd327f3..00000000000 --- a/src/caffe/layers/triplet_loss_layer.cu.orig.orig.orig +++ /dev/null @@ -1,728 +0,0 @@ -#include -#include - -#include "caffe/layer.hpp" -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/vision_layers.hpp" -======= -<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/vision_layers.hpp" -======= -#include "caffe/vision_layers.hpp" -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" ->>>>>>> GPU version added ->>>>>>> GPU version added - -namespace caffe { - -template -void TripletLossLayer::Forward_gpu( - const vector*>& bottom, - const vector*>& top) { -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -======= -<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c ->>>>>>> GPU version added - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - CHECK_EQ(bottom[0]->num()%(2 + num_triplets), 0); - Dtype loss(0.0); - int dim = bottom[0]->count()/bottom[0]->num(); - int num_set = bottom[0]->num()/(2 + num_triplets); - if (losstype == 0) { - for (int i = 0; i < num_set; ++i) { - caffe_gpu_sub( - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive - diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close - caffe_gpu_dot( - dim, - diff_pos.gpu_data() + i*dim, - diff_pos.gpu_data() + i*dim, -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 - dist_sq_pos.mutable_cpu_data() + i); -======= - dist_sq_pos.mutable_gpu_data() + i); ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 - dist_sq_pos.mutable_cpu_data() + i); -======= - dist_sq_pos.mutable_gpu_data() + i); ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= - dist_sq_pos.mutable_cpu_data() + i); ->>>>>>> GPU version added ->>>>>>> add 3d network training param - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.gpu_data()[i]; - for (int triplet = 0; triplet < num_triplets; ++triplet) { - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; - // Loss component calculated from negative part - caffe_gpu_sub( - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, - diff_neg.mutable_gpu_data() + i*dim); // reference-negative -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 -======= ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= ->>>>>>> add 3d network training param - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + i*dim, - diff_neg.gpu_data() + i*dim, -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -<<<<<<< 0a8521567403409d70ece475762c203e38274530 ->>>>>>> add 3d network training param -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 - dist_sq_neg.mutable_cpu_data() + i); -======= - dist_sq_neg.mutable_gpu_data() + i); ->>>>>>> debug GPU triplet loss codes for loss type 0 -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -======= - dist_sq_neg.mutable_cpu_data() + i); ->>>>>>> GPU version added ->>>>>>> add 3d network training param - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(margin + dist_sq_.gpu_data()[i], Dtype(0.0)); - } - } - loss = loss / static_cast(num_set) / Dtype(2); - top[0]->mutable_gpu_data()[0] = loss; - } else { - for (int i = 0; i < num_set; ++i) { - caffe_gpu_sub( - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*i + 1)*dim, // positive - diff_pos.mutable_gpu_data() + i*dim); // reference-pose_close - // Loss component calculated from reference and close one - caffe_gpu_dot( - dim, - diff_pos.gpu_data() + i*dim, - diff_pos.gpu_data() + i*dim, -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 - dist_sq_pos.mutable_cpu_data() + i); -======= - dist_sq_pos.mutable_gpu_data() + i); ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 - dist_sq_pos.mutable_cpu_data() + i); -======= - dist_sq_pos.mutable_gpu_data() + i); ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= - dist_sq_pos.mutable_cpu_data() + i); ->>>>>>> GPU version added ->>>>>>> add 3d network training param - // a b is a similar pair for pair wise - // loss accumulated by the pair wise part - loss += dist_sq_pos.gpu_data()[i]; - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.mutable_gpu_data()[i]; - dist_sq_.mutable_gpu_data()[i] += margin; - // Loss component calculated from negative part - caffe_gpu_sub( - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*i*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*i + 2 + triplet)*dim, - diff_neg.mutable_gpu_data() + i*dim); // reference-negative -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 -======= ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= ->>>>>>> add 3d network training param - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + i*dim, - diff_neg.gpu_data() + i*dim, -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -<<<<<<< 0a8521567403409d70ece475762c203e38274530 ->>>>>>> add 3d network training param -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 - dist_sq_neg.mutable_cpu_data() + i); -======= - dist_sq_neg.mutable_gpu_data() + i); ->>>>>>> debug GPU triplet loss codes for loss type 0 -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -======= - dist_sq_neg.mutable_cpu_data() + i); ->>>>>>> GPU version added ->>>>>>> add 3d network training param - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[i] = 1 - \ - dist_sq_neg.gpu_data()[i] / dist_sq_.mutable_gpu_data()[i]; - // loss accumulated accumulated by the triplet part - loss += std::max(dist_sq_.gpu_data()[i], Dtype(0.0)); - } - } - loss = loss / static_cast(num_set) / Dtype(2); - top[0]->mutable_gpu_data()[0] = loss; - } -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -======= -======= - int count = bottom[0]->count(); - caffe_gpu_sub( - count, - bottom[0]->gpu_data(), // a - bottom[1]->gpu_data(), // b - diff_pos.mutable_gpu_data()); // a_i-b_i - caffe_gpu_sub( - count, - bottom[0]->gpu_data(), // a - bottom[2]->gpu_data(), // c - diff_neg.mutable_gpu_data()); // a_i-c_i - caffe_gpu_powx( - count, - diff_pos.mutable_gpu_data(), // a_i-b_i - Dtype(2), - diff_sq_pos.mutable_gpu_data()); // (a_i-b_i)^2 - caffe_gpu_powx( - count, - diff_neg.mutable_gpu_data(), // a_i-c_i - Dtype(2), - diff_sq_neg.mutable_gpu_data()); // (a_i-c_i)^2 - const int channels = bottom[0]->channels(); - Dtype margin = this->layer_param_.triplet_loss_param().margin(); - Dtype loss(0.0); - // Loss component calculated from ab - for (int i = 0; i < bottom[0]->num(); ++i) { - /*dist_sq_pos.mutable_gpu_data()[i] = caffe_gpu_dot(channels, - diff_pos.gpu_data() + (i*channels), diff_pos.gpu_data() + (i*channels));*/ - // ab is a similar pair - dist_sq_.mutable_gpu_data()[i] = dist_sq_pos.gpu_data()[i]; - // Loss component calculated from ac - /*dist_sq_neg.mutable_gpu_data()[i] = caffe_gpu_dot(channels, - diff_neg.gpu_data() + (i*channels), diff_neg.gpu_data() + (i*channels));*/ - // ac is a dissimilar pair - dist_sq_.mutable_gpu_data()[i] -= dist_sq_neg.gpu_data()[i]; - loss += std::max(margin + dist_sq_.gpu_data()[i], Dtype(0.0)); - } - loss = loss / static_cast(bottom[0]->num()) / Dtype(2); - top[0]->mutable_gpu_data()[0] = loss; ->>>>>>> GPU version added ->>>>>>> GPU version added -} - -template -void TripletLossLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, const vector*>& bottom) { - Dtype margin = this->layer_param_.triplet_loss_param().margin(); -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -======= -<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c ->>>>>>> GPU version added - Dtype losstype = this->layer_param_.triplet_loss_param().losstype(); - int num_triplets = this->layer_param_.triplet_loss_param().num_triplets(); - int dim = bottom[0]->count()/bottom[0]->num(); - int num_set = bottom[0]->num()/(2 + num_triplets); - if (losstype == 0) { - // BP for feat1(extracted from reference) - for (int i = 0; i < 1; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); - // the pair part - caffe_gpu_axpby( - dim, - alpha, - diff_pos.gpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_gpu_sub( - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_gpu_data() + j*dim); // reference-negative -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 -======= ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= ->>>>>>> add 3d network training param - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + j*dim, - diff_neg.gpu_data() + j*dim, -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -<<<<<<< 0a8521567403409d70ece475762c203e38274530 ->>>>>>> add 3d network training param -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 - dist_sq_neg.mutable_cpu_data() + j); -======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> debug GPU triplet loss codes for loss type 0 -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -======= - dist_sq_neg.mutable_cpu_data() + j); ->>>>>>> GPU version added ->>>>>>> add 3d network training param - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; - // Loss component calculated from negative part - if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_gpu_axpby( - dim, - alpha, - diff_pos.gpu_data() + (j*dim), - Dtype(1.0), - bout + (2 + num_triplets)*j*dim); - // dissimilar pair in triplet - caffe_gpu_axpby( - dim, - -alpha, - diff_neg.gpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - // BP for feat2(extracted from the closest sample) - for (int i = 1; i < 2; ++i) { - if (propagate_down[0]) { - const Dtype sign = -1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); - // the pair part - caffe_gpu_axpby( - dim, - alpha, - diff_pos.gpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - caffe_gpu_sub( - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_gpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 -======= ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= ->>>>>>> add 3d network training param - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + j*dim, - diff_neg.gpu_data() + j*dim, -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -<<<<<<< 0a8521567403409d70ece475762c203e38274530 ->>>>>>> add 3d network training param -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 - dist_sq_neg.mutable_cpu_data() + j); -======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> debug GPU triplet loss codes for loss type 0 -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -======= - dist_sq_neg.mutable_cpu_data() + j); ->>>>>>> GPU version added ->>>>>>> add 3d network training param - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; - if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - // similar pair in triplet - caffe_gpu_axpby( - dim, - alpha, - diff_pos.gpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - // BP for negative feature used in the num_triplets triplet part - for (int i = 2; i < 2 + num_triplets; ++i) { - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); - caffe_gpu_sub( - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, - diff_neg.mutable_gpu_data() + j*dim); // reference-negative - // Triplet loss accumulation - // a and negative[triplet] is a similar pair for triplet - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.gpu_data()[j]; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 -======= ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= ->>>>>>> add 3d network training param - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + j*dim, - diff_neg.gpu_data() + j*dim, -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -<<<<<<< 0a8521567403409d70ece475762c203e38274530 ->>>>>>> add 3d network training param -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 - dist_sq_neg.mutable_cpu_data() + j); -======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> debug GPU triplet loss codes for loss type 0 -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -======= - dist_sq_neg.mutable_cpu_data() + j); ->>>>>>> GPU version added ->>>>>>> add 3d network training param - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[j] -= dist_sq_neg.gpu_data()[j]; - if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - // dissimilar pairs - caffe_gpu_axpby( - dim, - alpha, - diff_neg.gpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - } else { - caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } else { - for (int i = 0; i < 1; ++i) { - // BP for data1(feat1) - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); - // the pair part - caffe_gpu_axpby( - dim, - alpha, - diff_pos.gpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; - dist_sq_.mutable_gpu_data()[j] += margin; - // Loss component calculated from negative part - caffe_gpu_sub( - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_gpu_data() + j*dim); // reference-negative -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 -======= ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= ->>>>>>> add 3d network training param - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + j*dim, - diff_neg.gpu_data() + j*dim, -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -<<<<<<< 0a8521567403409d70ece475762c203e38274530 ->>>>>>> add 3d network training param -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 - dist_sq_neg.mutable_cpu_data() + j); -======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> debug GPU triplet loss codes for loss type 0 -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -======= - dist_sq_neg.mutable_cpu_data() + j); ->>>>>>> GPU version added ->>>>>>> add 3d network training param - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[j] = 1 - \ - dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - caffe_gpu_axpby( - dim, - alpha*dist_sq_neg.mutable_gpu_data()[j]\ - /((dist_sq_pos.mutable_gpu_data()[j]+margin)\ - *(dist_sq_pos.mutable_gpu_data()[j]+margin)), - diff_pos.gpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - caffe_gpu_axpby( - dim, - -alpha/(dist_sq_pos.mutable_gpu_data()[j] + margin), - diff_neg.gpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - for (int i = 1; i < 2; ++i) { - // BP for positive data(feat2) - if (propagate_down[0]) { - const Dtype sign = -1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); - // the pair part - caffe_gpu_axpby( - dim, - alpha, - diff_pos.gpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - // the num_triplets triplet part - for (int triplet = 0; triplet < num_triplets; ++triplet) { - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; - dist_sq_.mutable_gpu_data()[j] += margin; - // Loss component calculated from negative part - caffe_gpu_sub( - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*j + 2 + triplet)*dim, - diff_neg.mutable_gpu_data() + j*dim); // reference-negative -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 -======= ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= ->>>>>>> add 3d network training param - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + j*dim, - diff_neg.gpu_data() + j*dim, -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -<<<<<<< 0a8521567403409d70ece475762c203e38274530 ->>>>>>> add 3d network training param -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 - dist_sq_neg.mutable_cpu_data() + j); -======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> debug GPU triplet loss codes for loss type 0 -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -======= - dist_sq_neg.mutable_cpu_data() + j); ->>>>>>> GPU version added ->>>>>>> add 3d network training param - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[j] = 1 - \ - dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - caffe_gpu_axpby( - dim, - alpha*dist_sq_neg.mutable_gpu_data()[j]\ - /((dist_sq_pos.mutable_gpu_data()[j]+margin)\ - *(dist_sq_pos.mutable_gpu_data()[j]+margin)), - diff_pos.gpu_data() + (j*dim), - Dtype(1.0), - bout + ((2 + num_triplets)*j + i)*dim); - } - } - } - } - } - for (int i = 2; i < 2 + num_triplets; ++i) { - // BP for negative data(feat3) - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(num_set); - for (int j = 0; j < num_set; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); - dist_sq_.mutable_gpu_data()[j] = dist_sq_pos.mutable_gpu_data()[j]; - dist_sq_.mutable_gpu_data()[j] += margin; - // Loss component calculated from negative part - caffe_gpu_sub( - dim, - bottom[0]->gpu_data() + (2 + num_triplets)*j*dim, // reference - bottom[0]->gpu_data() + ((2 + num_triplets)*j + i)*dim, - diff_neg.mutable_gpu_data() + j*dim); // reference-negative -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< e7a31554fff6f6e13d1acc02201044edf0b2e322 -<<<<<<< 08db753eda85cf7bd42e39bdd8961688e10ecc29 -======= ->>>>>>> debug GPU triplet loss codes for loss type 0 -======= ->>>>>>> add 3d network training param - caffe_gpu_dot( - dim, - diff_neg.gpu_data() + j*dim, - diff_neg.gpu_data() + j*dim, -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -<<<<<<< 0a8521567403409d70ece475762c203e38274530 ->>>>>>> add 3d network training param -<<<<<<< 65945ba97bc4807a127b870090422c5caea43722 - dist_sq_neg.mutable_cpu_data() + j); -======= - dist_sq_neg.mutable_gpu_data() + j); ->>>>>>> debug GPU triplet loss codes for loss type 0 -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -======= - dist_sq_neg.mutable_cpu_data() + j); ->>>>>>> GPU version added ->>>>>>> add 3d network training param - // a and negative[triplet] is a dissimilar pair for triplet - dist_sq_.mutable_gpu_data()[j] = 1 - \ - dist_sq_neg.gpu_data()[j] / dist_sq_.mutable_gpu_data()[j]; - // loss accumulated accumulated by the triplet part - if ((dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - caffe_gpu_axpby( - dim, - alpha/(dist_sq_pos.mutable_gpu_data()[j] + margin), - diff_neg.gpu_data() + (j*dim), - Dtype(0.0), - bout + ((2 + num_triplets)*j + i)*dim); - } else { - caffe_gpu_set(dim, Dtype(0), bout + ((2 + num_triplets)*j + i)*dim); -<<<<<<< 0a8521567403409d70ece475762c203e38274530 -======= -======= -// there must be further check to ensure the gradient calc - if (propagate_down[0]) { - const Dtype sign = 1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(bottom[0]->num()); - int num = bottom[0]->num(); - int channels = bottom[0]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[0]->mutable_gpu_diff(); - if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - // similar pairs - caffe_gpu_axpby( - channels, - alpha, - diff_pos.gpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - // dissimilar pairs - caffe_gpu_axpby( - channels, - -alpha, - diff_neg.gpu_data() + (j*channels), - Dtype(1.0), - bout + (j*channels)); - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); - } - } - } - for (int i = 1; i < 3; ++i) { -// there must be further check to ensure the gradient calc - if (propagate_down[i]) { - const Dtype sign = (i == 1) ? -1 : 1; - const Dtype alpha = sign * top[0]->gpu_diff()[0] / - static_cast(bottom[i]->num()); - int num = bottom[i]->num(); - int channels = bottom[i]->channels(); - for (int j = 0; j < num; ++j) { - Dtype* bout = bottom[i]->mutable_gpu_diff(); - if ((margin + dist_sq_.gpu_data()[j]) > Dtype(0.0)) { - if (i == 1) { - // similar pairs - caffe_gpu_axpby( - channels, - alpha, - diff_pos.gpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - } - else { - // dissimilar pairs - caffe_gpu_axpby( - channels, - alpha, - diff_neg.gpu_data() + (j*channels), - Dtype(0.0), - bout + (j*channels)); - } - } else { - caffe_set(channels, Dtype(0), bout + (j*channels)); ->>>>>>> GPU version added ->>>>>>> GPU version added - } - } - } - } -<<<<<<< 0a8521567403409d70ece475762c203e38274530 - } -======= -<<<<<<< ef48dd03cce8b66ce612ed57bea741fd034dae0c - } -======= ->>>>>>> GPU version added ->>>>>>> GPU version added -} - -INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer); - -} // namespace caffe diff --git a/src/caffe/solver.cpp.orig.orig.orig b/src/caffe/solver.cpp.orig.orig.orig deleted file mode 100644 index 53b16a6217d..00000000000 --- a/src/caffe/solver.cpp.orig.orig.orig +++ /dev/null @@ -1,1692 +0,0 @@ -#include - -#include -#include -#include - -#include "caffe/net.hpp" -#include "caffe/proto/caffe.pb.h" -#include "caffe/solver.hpp" -#include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/util/upgrade_proto.hpp" - -namespace caffe { - -template -Solver::Solver(const SolverParameter& param) - : net_() { - Init(param); -} - -template -Solver::Solver(const string& param_file) - : net_() { - SolverParameter param; - ReadProtoFromTextFileOrDie(param_file, ¶m); - Init(param); -} - -template -void Solver::Init(const SolverParameter& param) { - LOG(INFO) << "Initializing solver from parameters: " << std::endl - << param.DebugString(); - param_ = param; - CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative."; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 - if (param_.random_seed() >= 0) { -======= - if (Caffe::root_solver() && param_.random_seed() >= 0) { ->>>>>>> triplet data generation and network update -======= - if (param_.random_seed() >= 0) { ->>>>>>> add 3d network training param - Caffe::set_random_seed(param_.random_seed()); - } - // Scaffolding code - InitTrainNet(); - InitTestNets(); - LOG(INFO) << "Solver scaffolding done."; - iter_ = 0; - current_step_ = 0; -} - -template -void Solver::InitTrainNet() { - const int num_train_nets = param_.has_net() + param_.has_net_param() + - param_.has_train_net() + param_.has_train_net_param(); - const string& field_names = "net, net_param, train_net, train_net_param"; - CHECK_GE(num_train_nets, 1) << "SolverParameter must specify a train net " - << "using one of these fields: " << field_names; - CHECK_LE(num_train_nets, 1) << "SolverParameter must not contain more than " - << "one of these fields specifying a train_net: " << field_names; - NetParameter net_param; - if (param_.has_train_net_param()) { - LOG(INFO) << "Creating training net specified in train_net_param."; - net_param.CopyFrom(param_.train_net_param()); - } else if (param_.has_train_net()) { - LOG(INFO) << "Creating training net from train_net file: " - << param_.train_net(); - ReadNetParamsFromTextFileOrDie(param_.train_net(), &net_param); - } - if (param_.has_net_param()) { - LOG(INFO) << "Creating training net specified in net_param."; - net_param.CopyFrom(param_.net_param()); - } - if (param_.has_net()) { - LOG(INFO) << "Creating training net from net file: " << param_.net(); - ReadNetParamsFromTextFileOrDie(param_.net(), &net_param); - } - // Set the correct NetState. We start with the solver defaults (lowest - // precedence); then, merge in any NetState specified by the net_param itself; - // finally, merge in any NetState specified by the train_state (highest - // precedence). - NetState net_state; - net_state.set_phase(TRAIN); - net_state.MergeFrom(net_param.state()); - net_state.MergeFrom(param_.train_state()); - net_param.mutable_state()->CopyFrom(net_state); - net_.reset(new Net(net_param)); -} - -template -void Solver::InitTestNets() { - const bool has_net_param = param_.has_net_param(); - const bool has_net_file = param_.has_net(); - const int num_generic_nets = has_net_param + has_net_file; - CHECK_LE(num_generic_nets, 1) - << "Both net_param and net_file may not be specified."; - const int num_test_net_params = param_.test_net_param_size(); - const int num_test_net_files = param_.test_net_size(); - const int num_test_nets = num_test_net_params + num_test_net_files; - if (num_generic_nets) { - CHECK_GE(param_.test_iter_size(), num_test_nets) - << "test_iter must be specified for each test network."; - } else { - CHECK_EQ(param_.test_iter_size(), num_test_nets) - << "test_iter must be specified for each test network."; - } - // If we have a generic net (specified by net or net_param, rather than - // test_net or test_net_param), we may have an unlimited number of actual - // test networks -- the actual number is given by the number of remaining - // test_iters after any test nets specified by test_net_param and/or test_net - // are evaluated. - const int num_generic_net_instances = param_.test_iter_size() - num_test_nets; - const int num_test_net_instances = num_test_nets + num_generic_net_instances; - if (param_.test_state_size()) { - CHECK_EQ(param_.test_state_size(), num_test_net_instances) - << "test_state must be unspecified or specified once per test net."; - } - if (num_test_net_instances) { - CHECK_GT(param_.test_interval(), 0); - } - int test_net_id = 0; - vector sources(num_test_net_instances); - vector net_params(num_test_net_instances); - for (int i = 0; i < num_test_net_params; ++i, ++test_net_id) { - sources[test_net_id] = "test_net_param"; - net_params[test_net_id].CopyFrom(param_.test_net_param(i)); - } - for (int i = 0; i < num_test_net_files; ++i, ++test_net_id) { - sources[test_net_id] = "test_net file: " + param_.test_net(i); - ReadNetParamsFromTextFileOrDie(param_.test_net(i), - &net_params[test_net_id]); - } - const int remaining_test_nets = param_.test_iter_size() - test_net_id; - if (has_net_param) { - for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) { - sources[test_net_id] = "net_param"; - net_params[test_net_id].CopyFrom(param_.net_param()); - } - } - if (has_net_file) { - for (int i = 0; i < remaining_test_nets; ++i, ++test_net_id) { - sources[test_net_id] = "net file: " + param_.net(); - ReadNetParamsFromTextFileOrDie(param_.net(), &net_params[test_net_id]); - } - } - test_nets_.resize(num_test_net_instances); - for (int i = 0; i < num_test_net_instances; ++i) { - // Set the correct NetState. We start with the solver defaults (lowest - // precedence); then, merge in any NetState specified by the net_param - // itself; finally, merge in any NetState specified by the test_state - // (highest precedence). - NetState net_state; - net_state.set_phase(TEST); - net_state.MergeFrom(net_params[i].state()); - if (param_.test_state_size()) { - net_state.MergeFrom(param_.test_state(i)); - } - net_params[i].mutable_state()->CopyFrom(net_state); - LOG(INFO) - << "Creating test net (#" << i << ") specified by " << sources[i]; - test_nets_[i].reset(new Net(net_params[i])); - test_nets_[i]->set_debug_info(param_.debug_info()); - } -} - -template -void Solver::Step(int iters) { - vector*> bottom_vec; - const int start_iter = iter_; - const int stop_iter = iter_ + iters; - int average_loss = this->param_.average_loss(); - vector losses; - Dtype smoothed_loss = 0; - -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - while (iter_ < stop_iter) { - // zero-init the params -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= - while (iter_ < stop_iter) { - // zero-init the params ->>>>>>> add 3d network training param - for (int i = 0; i < net_->params().size(); ++i) { - shared_ptr > blob = net_->params()[i]; - switch (Caffe::mode()) { - case Caffe::CPU: - caffe_set(blob->count(), static_cast(0), - blob->mutable_cpu_diff()); - break; - case Caffe::GPU: -#ifndef CPU_ONLY - caffe_gpu_set(blob->count(), static_cast(0), - blob->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - } - } - -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= - net_->ClearParamDiffs(); ->>>>>>> triplet data generation and network update -======= ->>>>>>> add 3d network training param - if (param_.test_interval() && iter_ % param_.test_interval() == 0 - && (iter_ > 0 || param_.test_initialization())) { - TestAll(); - } - -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= - for (int i = 0; i < callbacks_.size(); ++i) { - callbacks_[i]->on_start(); -======= - for (; iter_ < stop_iter; ++iter_) { - if (param_.test_interval() && iter_ % param_.test_interval() == 0 - && (iter_ > 0 || param_.test_initialization())) { - TestAll(); ->>>>>>> triplet data generation and network update - } ->>>>>>> triplet data generation and network update - const bool display = param_.display() && iter_ % param_.display() == 0; - net_->set_debug_info(display && param_.debug_info()); - Dtype loss = net_->ForwardBackward(bottom_vec); -======= - const bool display = param_.display() && iter_ % param_.display() == 0; - net_->set_debug_info(display && param_.debug_info()); - // accumulate the loss and gradient - Dtype loss = 0; - for (int i = 0; i < param_.iter_size(); ++i) { - loss += net_->ForwardBackward(bottom_vec); - } - loss /= param_.iter_size(); - // average the loss across iterations for smoothed reporting ->>>>>>> add 3d network training param - if (losses.size() < average_loss) { - losses.push_back(loss); - int size = losses.size(); - smoothed_loss = (smoothed_loss * (size - 1) + loss) / size; - } else { - int idx = (iter_ - start_iter) % average_loss; - smoothed_loss += (loss - losses[idx]) / average_loss; - losses[idx] = loss; - } - if (display) { - LOG(INFO) << "Iteration " << iter_ << ", loss = " << smoothed_loss; - const vector*>& result = net_->output_blobs(); - int score_index = 0; - for (int j = 0; j < result.size(); ++j) { - const Dtype* result_vec = result[j]->cpu_data(); - const string& output_name = - net_->blob_names()[net_->output_blob_indices()[j]]; - const Dtype loss_weight = - net_->blob_loss_weights()[net_->output_blob_indices()[j]]; - for (int k = 0; k < result[j]->count(); ++k) { - ostringstream loss_msg_stream; - if (loss_weight) { - loss_msg_stream << " (* " << loss_weight - << " = " << loss_weight * result_vec[k] << " loss)"; - } - LOG(INFO) << " Train net output #" - << score_index++ << ": " << output_name << " = " - << result_vec[k] << loss_msg_stream.str(); - } - } - } -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - for (int i = 0; i < callbacks_.size(); ++i) { - callbacks_[i]->on_gradients_ready(); - } ->>>>>>> triplet data generation and network update -======= ->>>>>>> add 3d network training param - ApplyUpdate(); - - // Increment the internal iter_ counter -- its value should always indicate - // the number of times the weights have been updated. - ++iter_; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 - - // Save a snapshot if needed. - if (param_.snapshot() && iter_ % param_.snapshot() == 0) { -======= -======= - ComputeUpdateValue(); - net_->Update(); ->>>>>>> triplet data generation and network update - - SolverAction::Enum request = GetRequestedAction(); - - // Save a snapshot if needed. -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - if ((param_.snapshot() - && iter_ % param_.snapshot() == 0 - && Caffe::root_solver()) || - (request == SolverAction::SNAPSHOT)) { -======= - if (param_.snapshot() && (iter_ + 1) % param_.snapshot() == 0) { ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update -======= - - // Save a snapshot if needed. - if (param_.snapshot() && iter_ % param_.snapshot() == 0) { ->>>>>>> add 3d network training param - Snapshot(); - } - } -} - -template -void Solver::Solve(const char* resume_file) { - LOG(INFO) << "Solving " << net_->name(); - LOG(INFO) << "Learning Rate Policy: " << param_.lr_policy(); - - if (resume_file) { - LOG(INFO) << "Restoring previous solver status from " << resume_file; - Restore(resume_file); - } - - // For a network that is trained by the solver, no bottom or top vecs - // should be given, and we will just provide dummy vecs. - Step(param_.max_iter() - iter_); - // If we haven't already, save a snapshot after optimization, unless - // overridden by setting snapshot_after_train := false - if (param_.snapshot_after_train() - && (!param_.snapshot() || iter_ % param_.snapshot() != 0)) { - Snapshot(); - } - // After the optimization is done, run an additional train and test pass to - // display the train and test loss/outputs if appropriate (based on the - // display and test_interval settings, respectively). Unlike in the rest of - // training, for the train net we only run a forward pass as we've already - // updated the parameters "max_iter" times -- this final pass is only done to - // display the loss, which is computed in the forward pass. - if (param_.display() && iter_ % param_.display() == 0) { - Dtype loss; - net_->ForwardPrefilled(&loss); - LOG(INFO) << "Iteration " << iter_ << ", loss = " << loss; - } - if (param_.test_interval() && iter_ % param_.test_interval() == 0) { - TestAll(); - } - LOG(INFO) << "Optimization Done."; -} - - -template -void Solver::TestAll() { - for (int test_net_id = 0; test_net_id < test_nets_.size(); ++test_net_id) { - Test(test_net_id); - } -} - -template -void Solver::Test(const int test_net_id) { - LOG(INFO) << "Iteration " << iter_ - << ", Testing net (#" << test_net_id << ")"; - CHECK_NOTNULL(test_nets_[test_net_id].get())-> - ShareTrainedLayersWith(net_.get()); - vector test_score; - vector test_score_output_id; - vector*> bottom_vec; - const shared_ptr >& test_net = test_nets_[test_net_id]; - Dtype loss = 0; - for (int i = 0; i < param_.test_iter(test_net_id); ++i) { - Dtype iter_loss; - const vector*>& result = - test_net->Forward(bottom_vec, &iter_loss); - if (param_.test_compute_loss()) { - loss += iter_loss; - } - if (i == 0) { - for (int j = 0; j < result.size(); ++j) { - const Dtype* result_vec = result[j]->cpu_data(); - for (int k = 0; k < result[j]->count(); ++k) { - test_score.push_back(result_vec[k]); - test_score_output_id.push_back(j); - } - } - } else { - int idx = 0; - for (int j = 0; j < result.size(); ++j) { - const Dtype* result_vec = result[j]->cpu_data(); - for (int k = 0; k < result[j]->count(); ++k) { - test_score[idx++] += result_vec[k]; - } - } - } - } - if (param_.test_compute_loss()) { - loss /= param_.test_iter(test_net_id); - LOG(INFO) << "Test loss: " << loss; - } - for (int i = 0; i < test_score.size(); ++i) { - const int output_blob_index = - test_net->output_blob_indices()[test_score_output_id[i]]; - const string& output_name = test_net->blob_names()[output_blob_index]; - const Dtype loss_weight = test_net->blob_loss_weights()[output_blob_index]; - ostringstream loss_msg_stream; - const Dtype mean_score = test_score[i] / param_.test_iter(test_net_id); - if (loss_weight) { - loss_msg_stream << " (* " << loss_weight - << " = " << loss_weight * mean_score << " loss)"; - } - LOG(INFO) << " Test net output #" << i << ": " << output_name << " = " - << mean_score << loss_msg_stream.str(); - } -} - -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< e29f9656158cb307d3fb4a78c63aa2247c5ad57a -template -void Solver::Snapshot() { - CHECK(Caffe::root_solver()); - string model_filename; - switch (param_.snapshot_format()) { - case caffe::SolverParameter_SnapshotFormat_BINARYPROTO: - model_filename = SnapshotToBinaryProto(); - break; - case caffe::SolverParameter_SnapshotFormat_HDF5: - model_filename = SnapshotToHDF5(); - break; - default: - LOG(FATAL) << "Unsupported snapshot format."; - } - - SnapshotSolverState(model_filename); -} -======= ->>>>>>> macro define in upgrade_proto - -template -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= - -template ->>>>>>> add 3d network training param -void Solver::Snapshot() { - NetParameter net_param; - // For intermediate results, we will also dump the gradient values. - net_->ToProto(&net_param, param_.snapshot_diff()); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= -string Solver::SnapshotFilename(const string extension) { ->>>>>>> triplet data generation and network update -======= ->>>>>>> add 3d network training param - string filename(param_.snapshot_prefix()); - string model_filename, snapshot_filename; - const int kBufferSize = 20; - char iter_str_buffer[kBufferSize]; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= - return filename + iter_str_buffer + extension; -======= - // Add one to iter_ to get the number of iterations that have completed. - snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_ + 1); ->>>>>>> triplet data generation and network update -======= - snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); ->>>>>>> add 3d network training param - filename += iter_str_buffer; - model_filename = filename + ".caffemodel"; - LOG(INFO) << "Snapshotting to " << model_filename; - WriteProtoToBinaryFile(net_param, model_filename.c_str()); - SolverState state; - SnapshotSolverState(&state); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f - state.set_iter(iter_ + 1); -======= - state.set_iter(iter_); ->>>>>>> add 3d network training param - state.set_learned_net(model_filename); - state.set_current_step(current_step_); - snapshot_filename = filename + ".solverstate"; - LOG(INFO) << "Snapshotting solver state to " << snapshot_filename; - WriteProtoToBinaryFile(state, snapshot_filename.c_str()); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= ->>>>>>> triplet data generation and network update -======= -<<<<<<< 5546b4830f2232a8d56abfe594140c1462a420f5 -======= ->>>>>>> add 3d network training param -} - -template -string Solver::SnapshotToBinaryProto() { - string model_filename = SnapshotFilename(".caffemodel"); - LOG(INFO) << "Snapshotting to binary proto file " << model_filename; - NetParameter net_param; - net_->ToProto(&net_param, param_.snapshot_diff()); - WriteProtoToBinaryFile(net_param, model_filename); - return model_filename; -} - -template -string Solver::SnapshotToHDF5() { - string model_filename = SnapshotFilename(".caffemodel.h5"); - LOG(INFO) << "Snapshotting to HDF5 file " << model_filename; - net_->ToHDF5(model_filename, param_.snapshot_diff()); - return model_filename; ->>>>>>> triplet data generation and network update -} - -template -void Solver::Restore(const char* state_file) { - SolverState state; - NetParameter net_param; - ReadProtoFromBinaryFile(state_file, &state); - if (state.has_learned_net()) { - ReadNetParamsFromBinaryFileOrDie(state.learned_net().c_str(), &net_param); - net_->CopyTrainedLayersFrom(net_param); - } - iter_ = state.iter(); - current_step_ = state.current_step(); - RestoreSolverState(state); -} - - -// Return the current learning rate. The currently implemented learning rate -// policies are as follows: -// - fixed: always return base_lr. -// - step: return base_lr * gamma ^ (floor(iter / step)) -// - exp: return base_lr * gamma ^ iter -// - inv: return base_lr * (1 + gamma * iter) ^ (- power) -// - multistep: similar to step but it allows non uniform steps defined by -// stepvalue -// - poly: the effective learning rate follows a polynomial decay, to be -// zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) -// - sigmoid: the effective learning rate follows a sigmod decay -// return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) -// -// where base_lr, max_iter, gamma, step, stepvalue and power are defined -// in the solver parameter protocol buffer, and iter is the current iteration. -template -Dtype SGDSolver::GetLearningRate() { - Dtype rate; - const string& lr_policy = this->param_.lr_policy(); - if (lr_policy == "fixed") { - rate = this->param_.base_lr(); - } else if (lr_policy == "step") { - this->current_step_ = this->iter_ / this->param_.stepsize(); - rate = this->param_.base_lr() * - pow(this->param_.gamma(), this->current_step_); - } else if (lr_policy == "exp") { - rate = this->param_.base_lr() * pow(this->param_.gamma(), this->iter_); - } else if (lr_policy == "inv") { - rate = this->param_.base_lr() * - pow(Dtype(1) + this->param_.gamma() * this->iter_, - - this->param_.power()); - } else if (lr_policy == "multistep") { - if (this->current_step_ < this->param_.stepvalue_size() && - this->iter_ >= this->param_.stepvalue(this->current_step_)) { - this->current_step_++; - LOG(INFO) << "MultiStep Status: Iteration " << - this->iter_ << ", step = " << this->current_step_; - } - rate = this->param_.base_lr() * - pow(this->param_.gamma(), this->current_step_); - } else if (lr_policy == "poly") { - rate = this->param_.base_lr() * pow(Dtype(1.) - - (Dtype(this->iter_) / Dtype(this->param_.max_iter())), - this->param_.power()); - } else if (lr_policy == "sigmoid") { - rate = this->param_.base_lr() * (Dtype(1.) / - (Dtype(1.) + exp(-this->param_.gamma() * (Dtype(this->iter_) - - Dtype(this->param_.stepsize()))))); - } else { - LOG(FATAL) << "Unknown learning rate policy: " << lr_policy; - } - return rate; -} - -template -void SGDSolver::PreSolve() { - // Initialize the history - const vector > >& net_params = this->net_->params(); - history_.clear(); - update_.clear(); - temp_.clear(); - for (int i = 0; i < net_params.size(); ++i) { - const vector& shape = net_params[i]->shape(); - history_.push_back(shared_ptr >(new Blob(shape))); - update_.push_back(shared_ptr >(new Blob(shape))); - temp_.push_back(shared_ptr >(new Blob(shape))); - } -} - -template -void SGDSolver::ClipGradients() { - const Dtype clip_gradients = this->param_.clip_gradients(); - if (clip_gradients < 0) { return; } - const vector > >& net_params = this->net_->params(); - Dtype sumsq_diff = 0; - for (int i = 0; i < net_params.size(); ++i) { - if (this->net_->param_owners()[i] < 0) { - sumsq_diff += net_params[i]->sumsq_diff(); - } - } - const Dtype l2norm_diff = std::sqrt(sumsq_diff); - if (l2norm_diff > clip_gradients) { - Dtype scale_factor = clip_gradients / l2norm_diff; - LOG(INFO) << "Gradient clipping: scaling down gradients (L2 norm " - << l2norm_diff << " > " << clip_gradients << ") " - << "by scale factor " << scale_factor; - for (int i = 0; i < net_params.size(); ++i) { - if (this->net_->param_owners()[i] < 0) { - net_params[i]->scale_diff(scale_factor); - } - } - } -} - -template -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -void SGDSolver::ApplyUpdate() { -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= - CHECK(Caffe::root_solver()); -======= -void SGDSolver::ComputeUpdateValue() { - const vector > >& net_params = this->net_->params(); - const vector& net_params_lr = this->net_->params_lr(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); - // get the learning rate ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update -======= -void SGDSolver::ApplyUpdate() { ->>>>>>> add 3d network training param - Dtype rate = GetLearningRate(); - if (this->param_.display() && this->iter_ % this->param_.display() == 0) { - LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; - } - ClipGradients(); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 - for (int param_id = 0; param_id < this->net_->params().size(); ++param_id) { -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 - for (int param_id = 0; param_id < this->net_->learnable_params().size(); - ++param_id) { ->>>>>>> triplet data generation and network update -======= - for (int param_id = 0; param_id < this->net_->params().size(); ++param_id) { ->>>>>>> add 3d network training param - Normalize(param_id); - Regularize(param_id); - ComputeUpdateValue(param_id, rate); - } - this->net_->Update(); -} - -template -void SGDSolver::Normalize(int param_id) { - if (this->param_.iter_size() == 1) { return; } - // Scale gradient to counterbalance accumulation. -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 - const vector > >& net_params = this->net_->params(); -======= - const vector*>& net_params = this->net_->learnable_params(); ->>>>>>> triplet data generation and network update -======= - const vector > >& net_params = this->net_->params(); ->>>>>>> add 3d network training param - const Dtype accum_normalization = Dtype(1.) / this->param_.iter_size(); - switch (Caffe::mode()) { - case Caffe::CPU: { - caffe_scal(net_params[param_id]->count(), accum_normalization, - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - caffe_gpu_scal(net_params[param_id]->count(), accum_normalization, - net_params[param_id]->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void SGDSolver::Regularize(int param_id) { -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 - const vector > >& net_params = this->net_->params(); -======= - const vector*>& net_params = this->net_->learnable_params(); ->>>>>>> triplet data generation and network update - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); -======= - Dtype momentum = this->param_.momentum(); ->>>>>>> triplet data generation and network update - Dtype weight_decay = this->param_.weight_decay(); - string regularization_type = this->param_.regularization_type(); - switch (Caffe::mode()) { - case Caffe::CPU: - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // Compute the value to history, and then copy them to the blob's diff. - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - history_[param_id]->mutable_cpu_data()); - // copy - caffe_copy(net_params[param_id]->count(), - history_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } - break; - case Caffe::GPU: -#ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // Compute the value to history, and then copy them to the blob's diff. - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= - const vector > >& net_params = this->net_->params(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); - Dtype weight_decay = this->param_.weight_decay(); - string regularization_type = this->param_.regularization_type(); - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - switch (Caffe::mode()) { - case Caffe::CPU: { - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } ->>>>>>> add 3d network training param - } -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector > >& net_params = this->net_->params(); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= - -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -template -void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector*>& net_params = this->net_->learnable_params(); ->>>>>>> triplet data generation and network update -======= ->>>>>>> add 3d network training param - const vector& net_params_lr = this->net_->params_lr(); - Dtype momentum = this->param_.momentum(); - Dtype local_rate = rate * net_params_lr[param_id]; - // Compute the update to history, then copy it to the parameter diff. - switch (Caffe::mode()) { - case Caffe::CPU: { - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - history_[param_id]->mutable_cpu_data()); - caffe_copy(net_params[param_id]->count(), - history_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - history_[param_id]->mutable_gpu_data()); - caffe_copy(net_params[param_id]->count(), - history_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= -======= - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - history_[param_id]->mutable_gpu_data()); - // copy - caffe_copy(net_params[param_id]->count(), - history_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update -======= ->>>>>>> add 3d network training param -#else - NO_GPU; -#endif - break; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 - } -======= ->>>>>>> triplet data generation and network update -======= - } ->>>>>>> add 3d network training param - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void SGDSolver::SnapshotSolverState(SolverState* state) { - state->clear_history(); - for (int i = 0; i < history_.size(); ++i) { - // Add history - BlobProto* history_blob = state->add_history(); - history_[i]->ToProto(history_blob); - } -} - -template -void SGDSolver::RestoreSolverState(const SolverState& state) { - CHECK_EQ(state.history_size(), history_.size()) - << "Incorrect length of history blobs."; - LOG(INFO) << "SGDSolver: restoring history"; - for (int i = 0; i < history_.size(); ++i) { - history_[i]->FromProto(state.history(i)); - } -} - -template -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector > >& net_params = this->net_->params(); -======= -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -void SGDSolver::RestoreSolverStateFromHDF5(const string& state_file) { - hid_t file_hid = H5Fopen(state_file.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); - CHECK_GE(file_hid, 0) << "Couldn't open solver state file " << state_file; - this->iter_ = hdf5_load_int(file_hid, "iter"); - if (H5LTfind_dataset(file_hid, "learned_net")) { - string learned_net = hdf5_load_string(file_hid, "learned_net"); - this->net_->CopyTrainedLayersFrom(learned_net); - } - this->current_step_ = hdf5_load_int(file_hid, "current_step"); - hid_t history_hid = H5Gopen2(file_hid, "history", H5P_DEFAULT); - CHECK_GE(history_hid, 0) << "Error reading history from " << state_file; - int state_history_size = hdf5_get_num_links(history_hid); - CHECK_EQ(state_history_size, history_.size()) - << "Incorrect length of history blobs."; - for (int i = 0; i < history_.size(); ++i) { - ostringstream oss; - oss << i; - hdf5_load_nd_dataset(history_hid, oss.str().c_str(), 0, - kMaxBlobAxes, history_[i].get()); - } - H5Gclose(history_hid); - H5Fclose(file_hid); -} - -template -void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { - CHECK(Caffe::root_solver()); - const vector*>& net_params = this->net_->learnable_params(); -======= -void NesterovSolver::ComputeUpdateValue() { - const vector > >& net_params = this->net_->params(); ->>>>>>> triplet data generation and network update ->>>>>>> triplet data generation and network update - const vector& net_params_lr = this->net_->params_lr(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); - // get the learning rate - Dtype rate = this->GetLearningRate(); - if (this->param_.display() && this->iter_ % this->param_.display() == 0) { - LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; - } - SGDSolver::ClipGradients(); - Dtype momentum = this->param_.momentum(); - Dtype weight_decay = this->param_.weight_decay(); - string regularization_type = this->param_.regularization_type(); - switch (Caffe::mode()) { - case Caffe::CPU: - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - this->temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - - // update history - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - this->history_[param_id]->mutable_cpu_data()); - - // compute udpate: step back then over step - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->cpu_data(), -momentum, - this->update_[param_id]->mutable_cpu_data()); - - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } - break; - case Caffe::GPU: -#ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - this->temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - - // update history - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - this->history_[param_id]->mutable_gpu_data()); - - // compute udpate: step back then over step - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->gpu_data(), -momentum, - this->update_[param_id]->mutable_gpu_data()); - - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } -======= -void NesterovSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector > >& net_params = this->net_->params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype momentum = this->param_.momentum(); - Dtype local_rate = rate * net_params_lr[param_id]; - switch (Caffe::mode()) { - case Caffe::CPU: { - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // update history - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - this->history_[param_id]->mutable_cpu_data()); - - // compute update: step back then over step - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->cpu_data(), -momentum, - this->update_[param_id]->mutable_cpu_data()); - - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - // save history momentum for stepping back - caffe_copy(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // update history - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), momentum, - this->history_[param_id]->mutable_gpu_data()); - - // compute update: step back then over step - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) + momentum, - this->history_[param_id]->gpu_data(), -momentum, - this->update_[param_id]->mutable_gpu_data()); - - // copy - caffe_copy(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); ->>>>>>> add 3d network training param -#else - NO_GPU; -#endif - break; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= - } ->>>>>>> add 3d network training param - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= -void AdaGradSolver::ComputeUpdateValue(int param_id, Dtype rate) { ->>>>>>> add 3d network training param - const vector > >& net_params = this->net_->params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype delta = this->param_.delta(); - Dtype local_rate = rate * net_params_lr[param_id]; - switch (Caffe::mode()) { - case Caffe::CPU: { -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= - CHECK(Caffe::root_solver()); - const vector*>& net_params = this->net_->learnable_params(); -======= -void AdaGradSolver::ComputeUpdateValue() { - const vector > >& net_params = this->net_->params(); ->>>>>>> triplet data generation and network update - const vector& net_params_lr = this->net_->params_lr(); - const vector& net_params_weight_decay = - this->net_->params_weight_decay(); - // get the learning rate - Dtype rate = this->GetLearningRate(); - Dtype delta = this->param_.delta(); - if (this->param_.display() && this->iter_ % this->param_.display() == 0) { - LOG(INFO) << "Iteration " << this->iter_ << ", lr = " << rate; - } - SGDSolver::ClipGradients(); - Dtype weight_decay = this->param_.weight_decay(); - string regularization_type = this->param_.regularization_type(); - switch (Caffe::mode()) { - case Caffe::CPU: - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else if (regularization_type == "L1") { - caffe_cpu_sign(net_params[param_id]->count(), - net_params[param_id]->cpu_data(), - this->temp_[param_id]->mutable_cpu_data()); - caffe_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - - // compute square of gradient in update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history - caffe_add(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - this->history_[param_id]->cpu_data(), - this->history_[param_id]->mutable_cpu_data()); - - // prepare update - caffe_powx(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_cpu_data()); - - caffe_div(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), - this->update_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // scale and copy - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->cpu_data(), Dtype(0), - net_params[param_id]->mutable_cpu_diff()); - } - break; - case Caffe::GPU: -#ifndef CPU_ONLY - for (int param_id = 0; param_id < net_params.size(); ++param_id) { - Dtype local_rate = rate * net_params_lr[param_id]; - Dtype local_decay = weight_decay * net_params_weight_decay[param_id]; - - if (local_decay) { - if (regularization_type == "L2") { - // add weight decay - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else if (regularization_type == "L1") { - caffe_gpu_sign(net_params[param_id]->count(), - net_params[param_id]->gpu_data(), - this->temp_[param_id]->mutable_gpu_data()); - caffe_gpu_axpy(net_params[param_id]->count(), - local_decay, - this->temp_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - } else { - LOG(FATAL) << "Unknown regularization type: " << regularization_type; - } - } - - // compute square of gradient in update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history - caffe_gpu_add(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - this->history_[param_id]->gpu_data(), - this->history_[param_id]->mutable_gpu_data()); - - // prepare update - caffe_gpu_powx(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_div(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), - this->update_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // scale and copy - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->gpu_data(), Dtype(0), - net_params[param_id]->mutable_gpu_diff()); - } -#else - NO_GPU; -#endif - break; - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void RMSPropSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_lr = this->net_->params_lr(); - - // get the learning rate - Dtype delta = this->param_.delta(); - Dtype rms_decay = this->param_.rms_decay(); - Dtype local_rate = rate * net_params_lr[param_id]; - - switch (Caffe::mode()) { - case Caffe::CPU: ->>>>>>> triplet data generation and network update -======= ->>>>>>> add 3d network training param - // compute square of gradient in update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= ->>>>>>> add 3d network training param - caffe_add(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - this->history_[param_id]->cpu_data(), - this->history_[param_id]->mutable_cpu_data()); - - // prepare update - caffe_powx(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_cpu_data()); - - caffe_div(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), - this->update_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= - caffe_cpu_axpby(net_params[param_id] -> count(), - Dtype(1-rms_decay), this->update_[param_id]->cpu_data(), - rms_decay, this->history_[param_id]-> mutable_cpu_data()); - - // prepare update - caffe_powx(net_params[param_id]->count(), - this->history_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_cpu_data()); - - caffe_div(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), this->update_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); ->>>>>>> triplet data generation and network update -======= ->>>>>>> add 3d network training param - - // scale and copy - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->cpu_data(), Dtype(0), - net_params[param_id]->mutable_cpu_diff()); - break; -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f - case Caffe::GPU: -======= - } - case Caffe::GPU: { ->>>>>>> add 3d network training param -#ifndef CPU_ONLY - // compute square of gradient in update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -<<<<<<< 683e7dcafe240094f47b3afea6bc9ca3e64c2b46 -======= ->>>>>>> add 3d network training param - caffe_gpu_add(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - this->history_[param_id]->gpu_data(), - this->history_[param_id]->mutable_gpu_data()); - - // prepare update - caffe_gpu_powx(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_div(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), - this->update_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // scale and copy - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->gpu_data(), Dtype(0), - net_params[param_id]->mutable_gpu_diff()); -<<<<<<< 06f83b731104b760e3b1dea0618d6fbddedb817f -======= - caffe_gpu_axpby(net_params[param_id] -> count(), - Dtype(1-rms_decay), this->update_[param_id]->gpu_data(), - rms_decay, this->history_[param_id]-> mutable_gpu_data()); - - // prepare update - caffe_gpu_powx(net_params[param_id]->count(), - this->history_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add_scalar(net_params[param_id]->count(), - delta, this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_div(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), this->update_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_axpby(net_params[param_id]->count(), local_rate, - this->update_[param_id]->gpu_data(), Dtype(0), - net_params[param_id]->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void AdaDeltaSolver::AdaDeltaPreSolve() { - // Add the extra history entries for AdaDelta after those from - // SGDSolver::PreSolve - const vector*>& net_params = this->net_->learnable_params(); - for (int i = 0; i < net_params.size(); ++i) { - const vector& shape = net_params[i]->shape(); - this->history_.push_back( - shared_ptr >(new Blob(shape))); - } -} - -template -void AdaDeltaSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype delta = this->param_.delta(); - Dtype momentum = this->param_.momentum(); - Dtype local_rate = rate * net_params_lr[param_id]; - size_t update_history_offset = net_params.size(); - switch (Caffe::mode()) { - case Caffe::CPU: { - // compute square of gradient in update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history of gradients - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, - this->update_[param_id]->cpu_data(), momentum, - this->history_[param_id]->mutable_cpu_data()); - - // add delta to history to guard against dividing by zero later - caffe_set(net_params[param_id]->count(), delta, - this->temp_[param_id]->mutable_cpu_data()); - - caffe_add(net_params[param_id]->count(), - this->temp_[param_id]->cpu_data(), - this->history_[update_history_offset + param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - caffe_add(net_params[param_id]->count(), - this->temp_[param_id]->cpu_data(), - this->history_[param_id]->cpu_data(), - this->temp_[param_id]->mutable_cpu_data()); - - // divide history of updates by history of gradients - caffe_div(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), - this->temp_[param_id]->cpu_data(), - this->update_[param_id]->mutable_cpu_data()); - - // jointly compute the RMS of both for update and gradient history - caffe_powx(net_params[param_id]->count(), - this->update_[param_id]->cpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_cpu_data()); - - // compute the update - caffe_mul(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), - this->update_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - - // compute square of update - caffe_powx(net_params[param_id]->count(), - net_params[param_id]->cpu_diff(), Dtype(2), - this->update_[param_id]->mutable_cpu_data()); - - // update history of updates - caffe_cpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, - this->update_[param_id]->cpu_data(), momentum, - this->history_[update_history_offset + param_id]->mutable_cpu_data()); - - // apply learning rate - caffe_cpu_scale(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - // compute square of gradient in update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history of gradients - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, - this->update_[param_id]->gpu_data(), momentum, - this->history_[param_id]->mutable_gpu_data()); - - // add delta to history to guard against dividing by zero later - caffe_gpu_set(net_params[param_id]->count(), delta, - this->temp_[param_id]->mutable_gpu_data()); - - caffe_gpu_add(net_params[param_id]->count(), - this->temp_[param_id]->gpu_data(), - this->history_[update_history_offset + param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - caffe_gpu_add(net_params[param_id]->count(), - this->temp_[param_id]->gpu_data(), - this->history_[param_id]->gpu_data(), - this->temp_[param_id]->mutable_gpu_data()); - - // divide history of updates by history of gradients - caffe_gpu_div(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), - this->temp_[param_id]->gpu_data(), - this->update_[param_id]->mutable_gpu_data()); - - // jointly compute the RMS of both for update and gradient history - caffe_gpu_powx(net_params[param_id]->count(), - this->update_[param_id]->gpu_data(), Dtype(0.5), - this->update_[param_id]->mutable_gpu_data()); - - // compute the update and copy to net_diff - caffe_gpu_mul(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), - this->update_[param_id]->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); - - // compute square of update - caffe_gpu_powx(net_params[param_id]->count(), - net_params[param_id]->gpu_diff(), Dtype(2), - this->update_[param_id]->mutable_gpu_data()); - - // update history of updates - caffe_gpu_axpby(net_params[param_id]->count(), Dtype(1) - momentum, - this->update_[param_id]->gpu_data(), momentum, - this->history_[update_history_offset + param_id]->mutable_gpu_data()); - - // apply learning rate - caffe_gpu_scale(net_params[param_id]->count(), local_rate, - net_params[param_id]->gpu_diff(), - net_params[param_id]->mutable_gpu_diff()); -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -template -void AdamSolver::AdamPreSolve() { - // Add the extra history entries for Adam after those from - // SGDSolver::PreSolve - const vector*>& net_params = this->net_->learnable_params(); - for (int i = 0; i < net_params.size(); ++i) { - const vector& shape = net_params[i]->shape(); - this->history_.push_back( - shared_ptr >(new Blob(shape))); - } -} - -template -void AdamSolver::ComputeUpdateValue(int param_id, Dtype rate) { - const vector*>& net_params = this->net_->learnable_params(); - const vector& net_params_lr = this->net_->params_lr(); - Dtype local_rate = rate * net_params_lr[param_id]; - const Dtype beta1 = this->param_.momentum(); - const Dtype beta2 = this->param_.momentum2(); - - // we create aliases for convenience - size_t update_history_offset = net_params.size(); - Blob* val_m = this->history_[param_id].get(); - Blob* val_v = this->history_[param_id + update_history_offset].get(); - Blob* val_t = this->temp_[param_id].get(); - - const int t = this->iter_ + 1; - const Dtype correction = std::sqrt(Dtype(1) - pow(beta2, t)) / - (Dtype(1.) - pow(beta1, t)); - const int N = net_params[param_id]->count(); - const Dtype eps_hat = this->param_.delta(); - - switch (Caffe::mode()) { - case Caffe::CPU: { - // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t - caffe_cpu_axpby(N, Dtype(1)-beta1, - net_params[param_id]->cpu_diff(), beta1, - val_m->mutable_cpu_data()); - - // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 - caffe_mul(N, - net_params[param_id]->cpu_diff(), - net_params[param_id]->cpu_diff(), - val_t->mutable_cpu_data()); - caffe_cpu_axpby(N, Dtype(1)-beta2, - val_t->cpu_data(), beta2, - val_v->mutable_cpu_data()); - - // set update - caffe_powx(N, - val_v->cpu_data(), Dtype(0.5), - val_t->mutable_cpu_data()); - caffe_add_scalar(N, eps_hat, val_t->mutable_cpu_data()); - caffe_div(N, - val_m->cpu_data(), - val_t->cpu_data(), - val_t->mutable_cpu_data()); - - caffe_cpu_scale(N, local_rate*correction, - val_t->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); - break; - } - case Caffe::GPU: { -#ifndef CPU_ONLY - // update m <- \beta_1 m_{t-1} + (1-\beta_1)g_t - caffe_gpu_axpby(N, Dtype(1)-beta1, - net_params[param_id]->gpu_diff(), beta1, - val_m->mutable_gpu_data()); - - // update v <- \beta_2 m_{t-1} + (1-\beta_2)g_t^2 - caffe_gpu_mul(N, - net_params[param_id]->gpu_diff(), - net_params[param_id]->gpu_diff(), - val_t->mutable_gpu_data()); - caffe_gpu_axpby(N, Dtype(1)-beta2, - val_t->gpu_data(), beta2, - val_v->mutable_gpu_data()); - - // set update - caffe_gpu_powx(N, - val_v->gpu_data(), Dtype(0.5), - val_t->mutable_gpu_data()); - caffe_gpu_add_scalar(N, eps_hat, - val_t->mutable_gpu_data()); - caffe_gpu_div(N, - val_m->gpu_data(), - val_t->gpu_data(), - val_t->mutable_gpu_data()); - - caffe_gpu_scale(N, local_rate*correction, - val_t->gpu_data(), - net_params[param_id]->mutable_gpu_diff()); ->>>>>>> triplet data generation and network update -======= ->>>>>>> add 3d network training param -#else - NO_GPU; -#endif - break; - } - default: - LOG(FATAL) << "Unknown caffe mode: " << Caffe::mode(); - } -} - -INSTANTIATE_CLASS(Solver); -INSTANTIATE_CLASS(SGDSolver); -INSTANTIATE_CLASS(NesterovSolver); -INSTANTIATE_CLASS(AdaGradSolver); - -} // namespace caffe diff --git a/src/caffe/test/test_im2col_kernel.cu.orig b/src/caffe/test/test_im2col_kernel.cu.orig deleted file mode 100644 index e26f4af50d1..00000000000 --- a/src/caffe/test/test_im2col_kernel.cu.orig +++ /dev/null @@ -1,210 +0,0 @@ -#include -#include - -#include "gtest/gtest.h" - -#include "caffe/blob.hpp" -#include "caffe/common.hpp" -#include "caffe/filler.hpp" -#include "caffe/util/im2col.hpp" -#include "caffe/vision_layers.hpp" - -#include "caffe/test/test_caffe_main.hpp" - -namespace caffe { - -// Forward declare kernel functions -template -__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int height_col, const int width_col, - Dtype* data_col); - -template -__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - Dtype* data_col); - -extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; - -template -class Im2colKernelTest : public ::testing::Test { - protected: - Im2colKernelTest() - // big so launches > 1024 threads - : blob_bottom_(new Blob(5, 500, 10, 10)), - blob_kernel_shape_(new Blob()), - blob_stride_(new Blob()), - blob_pad_(new Blob()), - blob_top_(new Blob()), - blob_top_cpu_(new Blob()) { - FillerParameter filler_param; - GaussianFiller filler(filler_param); - filler.Fill(this->blob_bottom_); - vector dim_blob_shape(1, 2); - blob_kernel_shape_->Reshape(dim_blob_shape); - blob_stride_->Reshape(dim_blob_shape); - blob_pad_->Reshape(dim_blob_shape); - - height_ = blob_bottom_->height(); - width_ = blob_bottom_->width(); - channels_ = blob_bottom_->channels(); - pad_ = 0; - stride_ = 2; - kernel_size_ = 3; - height_col_ = (height_ + 2 * pad_ - kernel_size_) / stride_ + 1; - width_col_ = (width_ + 2 * pad_ - kernel_size_) / stride_ + 1; - - for (int i = 0; i < 2; ++i) { - blob_kernel_shape_->mutable_cpu_data()[i] = kernel_size_; - blob_stride_->mutable_cpu_data()[i] = stride_; - blob_pad_->mutable_cpu_data()[i] = pad_; - } - } - - virtual ~Im2colKernelTest() { - delete blob_bottom_; - delete blob_top_; - delete blob_top_cpu_; - delete blob_kernel_shape_; - delete blob_stride_; - delete blob_pad_; - } - - Blob* const blob_kernel_shape_; - Blob* const blob_stride_; - Blob* const blob_pad_; - Blob* const blob_bottom_; - Blob* const blob_top_; - Blob* const blob_top_cpu_; - int height_; - int width_; - int channels_; - int pad_; - int stride_; - int kernel_size_; - int height_col_; - int width_col_; -}; - -TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); - -<<<<<<< 6e71b00a8a0000eeb969029ee5665674f8fd7802 -TYPED_TEST(Im2colKernelTest, Test2D) { -======= -TYPED_TEST(Im2colKernelTest, TestGPU) { - Caffe::set_mode(Caffe::GPU); - ->>>>>>> New triplet loss layer added(beta1 version-no test source files) - // Reshape the blobs to correct size for im2col output - this->blob_top_->Reshape(this->blob_bottom_->num(), - this->channels_ * this->kernel_size_ * this->kernel_size_, - this->height_col_, - this->width_col_); - - this->blob_top_cpu_->Reshape(this->blob_bottom_->num(), - this->channels_ * this->kernel_size_ * this->kernel_size_, - this->height_col_, - this->width_col_); - - const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); - TypeParam* top_data = this->blob_top_->mutable_gpu_data(); - TypeParam* cpu_data = this->blob_top_cpu_->mutable_cpu_data(); - - // CPU Version - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - im2col_cpu(this->blob_bottom_->cpu_data() + this->blob_bottom_->offset(n), - this->channels_, this->height_, this->width_, - this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, - this->stride_, this->stride_, - cpu_data + this->blob_top_cpu_->offset(n)); - } - - // GPU version - int num_kernels = this->channels_ * this->height_col_ * this->width_col_; - int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); - - // Launch with different grid sizes - for (int grid_div = 2; grid_div <= 8; grid_div++) { - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - int grid_dim = default_grid_dim/grid_div; - // NOLINT_NEXT_LINE(whitespace/operators) - im2col_gpu_kernel<<>>( - num_kernels, bottom_data + this->blob_bottom_->offset(n), - this->height_, this->width_, this->kernel_size_, this->kernel_size_, - this->pad_, this->pad_, this->stride_, this->stride_, - this->height_col_, this->width_col_, - top_data + this->blob_top_->offset(n)); - CUDA_POST_KERNEL_CHECK; - } - - // Compare results against CPU version - for (int i = 0; i < this->blob_top_->count(); ++i) { - TypeParam cpuval = cpu_data[i]; - TypeParam gpuval = this->blob_top_->cpu_data()[i]; - EXPECT_EQ(cpuval, gpuval); - if (cpuval != gpuval) { - break; - } - } - } -} - -TYPED_TEST(Im2colKernelTest, TestND) { - // Reshape the blobs to correct size for im2col output - this->blob_top_->Reshape(this->blob_bottom_->num(), - this->channels_ * this->kernel_size_ * this->kernel_size_, - this->height_col_, - this->width_col_); - - this->blob_top_cpu_->ReshapeLike(*this->blob_top_); - - const TypeParam* bottom_data_cpu = this->blob_bottom_->cpu_data(); - TypeParam* top_data_cpu = this->blob_top_cpu_->mutable_cpu_data(); - - // CPU Version - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - im2col_nd_cpu(bottom_data_cpu + this->blob_bottom_->offset(n), 2, - this->blob_bottom_->shape().data() + 1, - this->blob_top_cpu_->shape().data() + 1, - this->blob_kernel_shape_->cpu_data(), - this->blob_pad_->cpu_data(), this->blob_stride_->cpu_data(), - top_data_cpu + this->blob_top_cpu_->offset(n)); - } - - // GPU version - int num_kernels = this->channels_ * this->height_col_ * this->width_col_; - int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); - const TypeParam* bottom_data_gpu = this->blob_bottom_->gpu_data(); - - // Launch with different grid sizes - for (int grid_div = 2; grid_div <= 8; grid_div++) { - for (int n = 0; n < this->blob_bottom_->num(); ++n) { - const int grid_dim = default_grid_dim / grid_div; - TypeParam* top_data_gpu = this->blob_top_->mutable_gpu_data(); - // NOLINT_NEXT_LINE(whitespace/operators) - im2col_nd_gpu_kernel<<>>( - num_kernels, bottom_data_gpu + this->blob_bottom_->offset(n), - this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1, - this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(), - this->blob_stride_->gpu_data(), - top_data_gpu + this->blob_top_->offset(n)); - CUDA_POST_KERNEL_CHECK; - } - - // Compare results against CPU version - for (int i = 0; i < this->blob_top_->count(); ++i) { - TypeParam cpuval = top_data_cpu[i]; - TypeParam gpuval = this->blob_top_->cpu_data()[i]; - EXPECT_EQ(cpuval, gpuval); - if (cpuval != gpuval) { - break; - } - } - } -} - -} // namespace caffe diff --git a/src/caffe/test/test_net.cpp.orig.orig b/src/caffe/test/test_net.cpp.orig.orig deleted file mode 100644 index 904e6cb44ec..00000000000 --- a/src/caffe/test/test_net.cpp.orig.orig +++ /dev/null @@ -1,2508 +0,0 @@ -#include -#include -#include - -#include "google/protobuf/text_format.h" - -#include "gtest/gtest.h" - -#include "caffe/common.hpp" -#include "caffe/filler.hpp" -#include "caffe/net.hpp" -#include "caffe/util/math_functions.hpp" - -#include "caffe/test/test_caffe_main.hpp" -#include "caffe/test/test_gradient_check_util.hpp" - -namespace caffe { - -template -class NetTest : public MultiDeviceTest { - typedef typename TypeParam::Dtype Dtype; - - protected: - NetTest() : seed_(1701) {} - - virtual void InitNetFromProtoString(const string& proto) { - NetParameter param; - CHECK(google::protobuf::TextFormat::ParseFromString(proto, ¶m)); - net_.reset(new Net(param)); - } - - virtual void CopyNetBlobs(const bool copy_diff, - vector > >* blobs_copy) { - CHECK(net_); - const vector > >& net_blobs = net_->blobs(); - blobs_copy->clear(); - blobs_copy->resize(net_blobs.size()); - const bool kReshape = true; - for (int i = 0; i < net_blobs.size(); ++i) { - (*blobs_copy)[i].reset(new Blob()); - (*blobs_copy)[i]->CopyFrom(*net_blobs[i], copy_diff, kReshape); - } - } - - virtual void CopyNetParams(const bool copy_diff, - vector > >* params_copy) { - CHECK(net_); - const vector > >& net_params = net_->params(); - params_copy->clear(); - params_copy->resize(net_params.size()); - const bool kReshape = true; - for (int i = 0; i < net_params.size(); ++i) { - (*params_copy)[i].reset(new Blob()); - (*params_copy)[i]->CopyFrom(*net_params[i], copy_diff, kReshape); - } - } - - virtual void InitTinyNet(const bool force_backward = false, - const bool accuracy_layer = false) { - string proto = - "name: 'TinyTestNetwork' " - "layer { " - " name: 'data' " - " type: 'DummyData' " - " dummy_data_param { " - " shape { " - " dim: 5 " - " dim: 2 " - " dim: 3 " - " dim: 4 " - " } " - " data_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " shape { " - " dim: 5 " - " } " - " data_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerproduct' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 1000 " - " weight_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " bias_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " param { " - " lr_mult: 1 " - " decay_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " decay_mult: 0 " - " } " - " bottom: 'data' " - " top: 'innerproduct' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerproduct' " - " bottom: 'label' " - " top: 'top_loss' " - "} "; - if (accuracy_layer) { - proto += - "layer { " - " name: 'loss' " - " type: 'Accuracy' " - " bottom: 'innerproduct' " - " bottom: 'label' " - " top: 'accuracy' " - "} "; - } - if (force_backward) { - proto += "force_backward: true "; - } - InitNetFromProtoString(proto); - } - - virtual void InitTinyNetEuclidean(const bool force_backward = false) { - string proto = - "name: 'TinyTestEuclidLossNetwork' " - "layer { " - " name: 'data' " - " type: 'DummyData' " - " dummy_data_param { " - " num: 5 " - " channels: 2 " - " height: 3 " - " width: 4 " - " num: 5 " - " channels: 1 " - " height: 1 " - " width: 1 " - " data_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " } " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerproduct' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 1 " - " weight_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " bias_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " param { " - " lr_mult: 1 " - " decay_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " decay_mult: 0 " - " } " - " bottom: 'data' " - " top: 'innerproduct' " - "} " - "layer { " - " name: 'loss' " - " type: 'EuclideanLoss' " - " bottom: 'innerproduct' " - " bottom: 'label' " - "} "; - if (force_backward) { - proto += "force_backward: true "; - } - InitNetFromProtoString(proto); - } - - virtual void InitTrickyNet(Dtype* loss_weight = NULL) { - ostringstream loss_weight_stream; - if (loss_weight) { - loss_weight_stream << " loss_weight: " << *loss_weight << " "; - } - const string& proto = - "name: 'TrickyTestNetwork' " - "layer { " - " name: 'data' " - " type: 'DummyData' " - " dummy_data_param { " - " num: 5 " - " channels: 2 " - " height: 3 " - " width: 4 " - " num: 5 " - " channels: 1 " - " height: 1 " - " width: 1 " - " data_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " } " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerproduct' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 1000 " - " weight_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " bias_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " param { " - " lr_mult: 1 " - " decay_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " decay_mult: 0 " - " } " - " bottom: 'data' " - " top: 'transformed_data' " - "} " - "layer { " - " name: 'innerproduct' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 1 " - " weight_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " bias_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " param { " - " lr_mult: 1 " - " decay_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " decay_mult: 0 " - " } " - " bottom: 'label' " - " top: 'transformed_label' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " + - loss_weight_stream.str() + - " bottom: 'transformed_data' " - " bottom: 'transformed_label' " - "} "; - InitNetFromProtoString(proto); - } - - // loss_weight is the loss weight for the 'EuclideanLoss' layer output. - // midnet_loss_weight is the loss weight for the first 'InnerProduct' layer - // output. Should both default to 0.0 if unspecified (i.e., if NULL is - // passed to this function). - virtual void InitUnsharedWeightsNet(const Dtype* loss_weight = NULL, - const Dtype* midnet_loss_weight = NULL, - const bool force_backward = false, const bool bias_term = false, - const Dtype blobs_lr_w1 = 1, const Dtype blobs_lr_b1 = 2, - const Dtype blobs_lr_w2 = 1, const Dtype blobs_lr_b2 = 2) { - string bias_str = bias_term ? "true ":"false "; - ostringstream proto; - proto << "name: 'UnsharedWeightsNetwork' "; - if (force_backward) { - proto << "force_backward: true "; - } - proto << - "layer { " - " name: 'data' " - " type: 'DummyData' " - " dummy_data_param { " - " num: 5 " - " channels: 2 " - " height: 3 " - " width: 4 " - " data_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " } " - " top: 'data' " - "} " - "layer { " - " name: 'innerproduct1' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 10 " - " bias_term: " << bias_str << - " weight_filler { " - " type: 'gaussian' " - " std: 10 " - " } " - " } " - " param { " - " name: 'unsharedweights1' " - " lr_mult: " << blobs_lr_w1 << - " } "; - if (bias_term) { - proto << " param { lr_mult: " << blobs_lr_b1 << " } "; - } - proto << - " bottom: 'data' " - " top: 'innerproduct1' "; - if (midnet_loss_weight) { - proto << " loss_weight: " << *midnet_loss_weight << " "; - } - proto << - "} " - "layer { " - " name: 'innerproduct2' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 10 " - " bias_term: " << bias_str << - " weight_filler { " - " type: 'gaussian' " - " std: 10 " - " } " - " } " - " param { " - " name: 'unsharedweights2' " - " lr_mult: " << blobs_lr_w2 << - " } "; - if (bias_term) { - proto << " param { lr_mult: " << blobs_lr_b2 << " } "; - } - proto << - " bottom: 'data' " - " top: 'innerproduct2' " - "} " - "layer { " - " name: 'loss' " - " type: 'EuclideanLoss' "; - if (loss_weight) { - proto << " loss_weight: " << *loss_weight << " "; - } - proto << - " bottom: 'innerproduct1' " - " bottom: 'innerproduct2' " - "} "; - InitNetFromProtoString(proto.str()); - } - - virtual void InitSharedWeightsNet() { - const string& proto = - "name: 'SharedWeightsNetwork' " - "layer { " - " name: 'data' " - " type: 'DummyData' " - " dummy_data_param { " - " num: 5 " - " channels: 2 " - " height: 3 " - " width: 4 " - " data_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " } " - " top: 'data' " - "} " - "layer { " - " name: 'innerproduct1' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 10 " - " bias_term: false " - " weight_filler { " - " type: 'gaussian' " - " std: 10 " - " } " - " } " - " param { name: 'sharedweights' } " - " bottom: 'data' " - " top: 'innerproduct1' " - "} " - "layer { " - " name: 'innerproduct2' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 10 " - " bias_term: false " - " weight_filler { " - " type: 'gaussian' " - " std: 10 " - " } " - " } " - " param { name: 'sharedweights' } " - " bottom: 'data' " - " top: 'innerproduct2' " - "} " - "layer { " - " name: 'loss' " - " type: 'EuclideanLoss' " - " bottom: 'innerproduct1' " - " bottom: 'innerproduct2' " - "} "; - InitNetFromProtoString(proto); - } - - virtual void InitDiffDataUnsharedWeightsNet() { - const string& proto = - "name: 'DiffDataUnsharedWeightsNetwork' " - "layer { " - " name: 'data' " - " type: 'DummyData' " - " dummy_data_param { " - " num: 10 " - " channels: 10 " - " height: 1 " - " width: 1 " - " num: 10 " - " channels: 10 " - " height: 1 " - " width: 1 " - " data_filler { " - " type: 'gaussian' " - " std: 10 " - " } " - " } " - " top: 'data1' " - " top: 'data2' " - "} " - "layer { " - " name: 'innerproduct1' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 10 " - " bias_term: false " - " weight_filler { " - " type: 'constant' " - " value: 0.5 " - " } " - " } " - " param { name: 'unsharedweights1' } " - " bottom: 'data1' " - " top: 'innerproduct1' " - "} " - "layer { " - " name: 'innerproduct2' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 10 " - " bias_term: false " - " weight_filler { " - " type: 'constant' " - " value: 0.5 " - " } " - " } " - " param { name: 'unsharedweights2' } " - " bottom: 'innerproduct1' " - " top: 'innerproduct2' " - "} " - "layer { " - " name: 'loss' " - " type: 'EuclideanLoss' " - " bottom: 'data2' " - " bottom: 'innerproduct2' " - "} "; - InitNetFromProtoString(proto); - } - - virtual void InitDiffDataSharedWeightsNet() { - const string& proto = - "name: 'DiffDataSharedWeightsNetwork' " - "layer { " - " name: 'data' " - " type: 'DummyData' " - " dummy_data_param { " - " num: 10 " - " channels: 10 " - " height: 1 " - " width: 1 " - " num: 10 " - " channels: 10 " - " height: 1 " - " width: 1 " - " data_filler { " - " type: 'gaussian' " - " std: 10 " - " } " - " } " - " top: 'data1' " - " top: 'data2' " - "} " - "layer { " - " name: 'innerproduct1' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 10 " - " bias_term: false " - " weight_filler { " - " type: 'constant' " - " value: 0.5 " - " } " - " } " - " param { name: 'sharedweights' } " - " bottom: 'data1' " - " top: 'innerproduct1' " - "} " - "layer { " - " name: 'innerproduct2' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 10 " - " bias_term: false " - " weight_filler { " - " type: 'constant' " - " value: 0.5 " - " } " - " } " - " param { name: 'sharedweights' } " - " bottom: 'innerproduct1' " - " top: 'innerproduct2' " - "} " - "layer { " - " name: 'loss' " - " type: 'EuclideanLoss' " - " bottom: 'data2' " - " bottom: 'innerproduct2' " - "} "; - InitNetFromProtoString(proto); - } - - virtual void InitReshapableNet() { - const string& proto = - "name: 'ReshapableNetwork' " - "input: 'data' " - "input_dim: 1 " - "input_dim: 3 " - "input_dim: 100 " - "input_dim: 100 " - "layer { " - " name: 'conv1' " - " type: 'Convolution' " - " bottom: 'data' " - " top: 'conv1' " - " convolution_param { " - " num_output: 5 " - " kernel_size: 3 " - " stride: 2 " - " weight_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " bias_filler { " - " type: 'constant' " - " value: 0.2 " - " } " - " } " - "} " - "layer { " - " name: 'relu1' " - " type: 'ReLU' " - " bottom: 'conv1' " - " top: 'conv1' " - "} " - "layer { " - " name: 'pool1' " - " type: 'Pooling' " - " bottom: 'conv1' " - " top: 'pool1' " - " pooling_param { " - " pool: MAX " - " kernel_size: 2 " - " stride: 2 " - " } " - "} " - "layer { " - " name: 'norm1' " - " type: 'LRN' " - " bottom: 'pool1' " - " top: 'norm1' " - " lrn_param { " - " local_size: 3 " - " } " - "} " - "layer { " - " name: 'softmax' " - " type: 'Softmax' " - " bottom: 'norm1' " - " top: 'softmax' " - "} "; - InitNetFromProtoString(proto); - } - -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< b30868495fbae44b9556c621a319178d919bf562 -======= -<<<<<<< HEAD -<<<<<<< HEAD ->>>>>>> add initiate class name of triplet loss layer -======= ->>>>>>> 011aef0... restore -======= ->>>>>>> 80a07dd... macro define in upgrade_proto -<<<<<<< b30868495fbae44b9556c621a319178d919bf562 -======= -======= -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a -<<<<<<< 6c5f31d1aa704b9eb9cfe7469b0e3661f0fcdb21 -======= ->>>>>>> restore ->>>>>>> 0a85215... triplet data generation and network update -======= ->>>>>>> 03cac8c... fixed two bugs with prototext format ->>>>>>> add initiate class name of triplet loss layer - virtual void InitSkipPropNet(bool test_skip_true) { - string proto = - "name: 'SkipPropTestNetwork' " - "layer { " - " name: 'data' " - " type: 'DummyData' " - " dummy_data_param { " - " shape { " - " dim: 5 " - " dim: 2 " - " dim: 3 " - " dim: 4 " - " } " - " data_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " shape { " - " dim: 5 " - " } " - " data_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'silence' " - " bottom: 'label' " - " type: 'Silence' " - "} " - "layer { " - " name: 'innerproduct' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 1 " - " weight_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " bias_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " param { " - " lr_mult: 1 " - " decay_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " decay_mult: 0 " - " } " - " bottom: 'data' " - " top: 'innerproduct' " - "} " - "layer { " - " name: 'ip_fake_labels' " - " type: 'InnerProduct' " - " inner_product_param { " - " num_output: 1 " - " weight_filler { " - " type: 'gaussian' " - " std: 0.01 " - " } " - " bias_filler { " - " type: 'constant' " - " value: 0 " - " } " - " } " - " bottom: 'data' " - " top: 'fake_labels' " - "} " - "layer { " - " name: 'argmax' " - " bottom: 'fake_labels' " - " top: 'label_argmax' " - " type: 'ArgMax' " - "} " - "layer { " - " name: 'loss' " - " bottom: 'innerproduct' " - " bottom: 'label_argmax' "; - if (test_skip_true) -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< b30868495fbae44b9556c621a319178d919bf562 -======= -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< HEAD ->>>>>>> add initiate class name of triplet loss layer -======= ->>>>>>> 00341b2... triplet data generation and network update -======= ->>>>>>> 1882ac9... add initiate class name of triplet loss layer -======= ->>>>>>> 08d5d6d... macro define in upgrade_proto -<<<<<<< b30868495fbae44b9556c621a319178d919bf562 -======= -======= ->>>>>>> 1f7ef32... add RGB data training as an option in triplet training -======= -======= ->>>>>>> 8f22aea... add initiate class name of triplet loss layer -======= -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a ->>>>>>> 0a85215... triplet data generation and network update ->>>>>>> 0dbadac... triplet data generation and network update ->>>>>>> add initiate class name of triplet loss layer - proto += " propagate_down: true " - " propagate_down: false "; - else - proto += " propagate_down: true " - " propagate_down: true "; -======= - proto += " propagate_down: [true, false] "; - else - proto += " propagate_down: [true, true] "; ->>>>>>> 011aef0... restore -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 1882ac9... add initiate class name of triplet loss layer -======= -======= ->>>>>>> 03cac8c... fixed two bugs with prototext format - proto += " propagate_down: true " - " propagate_down: false "; - else - proto += " propagate_down: true " - " propagate_down: true "; -<<<<<<< HEAD ->>>>>>> 98fb438... fixed two bugs with prototext format -<<<<<<< HEAD -<<<<<<< HEAD -======= ->>>>>>> 00341b2... triplet data generation and network update -======= ->>>>>>> 1882ac9... add initiate class name of triplet loss layer -======= -======= - proto += " propagate_down: [true, false] "; - else - proto += " propagate_down: [true, true] "; ->>>>>>> 80a07dd... macro define in upgrade_proto ->>>>>>> 08d5d6d... macro define in upgrade_proto -<<<<<<< b30868495fbae44b9556c621a319178d919bf562 -======= -======= -======= - proto += " propagate_down: true " - " propagate_down: false "; - else - proto += " propagate_down: true " - " propagate_down: true "; ->>>>>>> b266250... fixed two bugs with prototext format -<<<<<<< HEAD ->>>>>>> 1f7ef32... add RGB data training as an option in triplet training -======= -======= ->>>>>>> restore ->>>>>>> 0a85215... triplet data generation and network update -<<<<<<< HEAD ->>>>>>> 0dbadac... triplet data generation and network update -======= -======= ->>>>>>> 03cac8c... fixed two bugs with prototext format ->>>>>>> 8f22aea... add initiate class name of triplet loss layer ->>>>>>> add initiate class name of triplet loss layer - proto += - " top: 'cross_entropy_loss' " - " type: 'SigmoidCrossEntropyLoss' " - " loss_weight: 0.1 " - "} "; - InitNetFromProtoString(proto); - } - -<<<<<<< HEAD -<<<<<<< HEAD -<<<<<<< b30868495fbae44b9556c621a319178d919bf562 -======= -<<<<<<< HEAD -<<<<<<< HEAD ->>>>>>> add initiate class name of triplet loss layer -======= ->>>>>>> 083f61b... New triplet loss layer added(beta1 version-no test source files) -======= ->>>>>>> 011aef0... restore -======= ->>>>>>> 4d8130b... New triplet loss layer added(beta1 version-no test source files) -======= ->>>>>>> 80a07dd... macro define in upgrade_proto -<<<<<<< b30868495fbae44b9556c621a319178d919bf562 -======= -======= -<<<<<<< 5308d9998ae0b1f97b7b99b33fac968421447f3a -======= ->>>>>>> triplet data generation and network update -======= ->>>>>>> restore ->>>>>>> 0a85215... triplet data generation and network update -======= ->>>>>>> 03cac8c... fixed two bugs with prototext format ->>>>>>> add initiate class name of triplet loss layer - int seed_; - shared_ptr > net_; -}; - -TYPED_TEST_CASE(NetTest, TestDtypesAndDevices); - -TYPED_TEST(NetTest, TestHasBlob) { - this->InitTinyNet(); - EXPECT_TRUE(this->net_->has_blob("data")); - EXPECT_TRUE(this->net_->has_blob("label")); - EXPECT_TRUE(this->net_->has_blob("innerproduct")); - EXPECT_FALSE(this->net_->has_blob("loss")); - EXPECT_TRUE(this->net_->has_blob("top_loss")); -} - -TYPED_TEST(NetTest, TestGetBlob) { - this->InitTinyNet(); - EXPECT_EQ(this->net_->blob_by_name("data"), this->net_->blobs()[0]); - EXPECT_EQ(this->net_->blob_by_name("label"), this->net_->blobs()[1]); - EXPECT_EQ(this->net_->blob_by_name("innerproduct"), this->net_->blobs()[2]); - EXPECT_FALSE(this->net_->blob_by_name("loss")); - EXPECT_EQ(this->net_->blob_by_name("top_loss"), this->net_->blobs()[3]); -} - -TYPED_TEST(NetTest, TestHasLayer) { - this->InitTinyNet(); - EXPECT_TRUE(this->net_->has_layer("data")); - EXPECT_TRUE(this->net_->has_layer("innerproduct")); - EXPECT_TRUE(this->net_->has_layer("loss")); - EXPECT_FALSE(this->net_->has_layer("label")); -} - -TYPED_TEST(NetTest, TestGetLayerByName) { - this->InitTinyNet(); - EXPECT_EQ(this->net_->layer_by_name("data"), this->net_->layers()[0]); - EXPECT_EQ(this->net_->layer_by_name("innerproduct"), this->net_->layers()[1]); - EXPECT_EQ(this->net_->layer_by_name("loss"), this->net_->layers()[2]); - EXPECT_FALSE(this->net_->layer_by_name("label")); -} - -TYPED_TEST(NetTest, TestBottomNeedBackward) { - this->InitTinyNet(); - const vector >& bottom_need_backward = - this->net_->bottom_need_backward(); - EXPECT_EQ(3, bottom_need_backward.size()); - EXPECT_EQ(0, bottom_need_backward[0].size()); - EXPECT_EQ(1, bottom_need_backward[1].size()); - EXPECT_EQ(false, bottom_need_backward[1][0]); - EXPECT_EQ(2, bottom_need_backward[2].size()); - EXPECT_EQ(true, bottom_need_backward[2][0]); - EXPECT_EQ(false, bottom_need_backward[2][1]); -} - -TYPED_TEST(NetTest, TestBottomNeedBackwardForce) { - const bool force_backward = true; - this->InitTinyNet(force_backward); - const vector >& bottom_need_backward = - this->net_->bottom_need_backward(); - EXPECT_EQ(3, bottom_need_backward.size()); - EXPECT_EQ(0, bottom_need_backward[0].size()); - EXPECT_EQ(1, bottom_need_backward[1].size()); - EXPECT_EQ(true, bottom_need_backward[1][0]); - EXPECT_EQ(2, bottom_need_backward[2].size()); - EXPECT_EQ(true, bottom_need_backward[2][0]); - EXPECT_EQ(false, bottom_need_backward[2][1]); -} - -TYPED_TEST(NetTest, TestBottomNeedBackwardEuclideanForce) { - const bool force_backward = true; - this->InitTinyNetEuclidean(force_backward); - const vector >& bottom_need_backward = - this->net_->bottom_need_backward(); - EXPECT_EQ(3, bottom_need_backward.size()); - EXPECT_EQ(0, bottom_need_backward[0].size()); - EXPECT_EQ(1, bottom_need_backward[1].size()); - EXPECT_EQ(true, bottom_need_backward[1][0]); - EXPECT_EQ(2, bottom_need_backward[2].size()); - EXPECT_EQ(true, bottom_need_backward[2][0]); - EXPECT_EQ(true, bottom_need_backward[2][1]); -} - -TYPED_TEST(NetTest, TestBottomNeedBackwardTricky) { - this->InitTrickyNet(); - const vector >& bottom_need_backward = - this->net_->bottom_need_backward(); - EXPECT_EQ(4, bottom_need_backward.size()); - EXPECT_EQ(0, bottom_need_backward[0].size()); - EXPECT_EQ(1, bottom_need_backward[1].size()); - EXPECT_EQ(false, bottom_need_backward[1][0]); - EXPECT_EQ(1, bottom_need_backward[2].size()); - EXPECT_EQ(false, bottom_need_backward[2][0]); - EXPECT_EQ(2, bottom_need_backward[3].size()); - EXPECT_EQ(true, bottom_need_backward[3][0]); - // The label input to the SoftmaxLossLayer should say it "needs backward" - // since it has weights under it, even though we expect this to cause a crash - // at training/test time. - EXPECT_EQ(true, bottom_need_backward[3][1]); -} - -TYPED_TEST(NetTest, TestLossWeight) { - typedef typename TypeParam::Dtype Dtype; - // First, compute the loss and gradients with no loss_weight specified. - // In this case, the loss weight for the 'EuclideanLoss' layer should default - // to 1. - vector*> bottom; - Caffe::set_random_seed(this->seed_); - const bool kForceBackward = true; - this->InitUnsharedWeightsNet(NULL, NULL, kForceBackward); - const Dtype loss = this->net_->ForwardBackward(bottom); - const bool kCopyDiff = true; - vector > > blob_grads; - this->CopyNetBlobs(kCopyDiff, &blob_grads); - vector > > param_grads; - this->CopyNetParams(kCopyDiff, ¶m_grads); - // Check that the loss is non-trivial, otherwise the test doesn't prove much. - const Dtype kMinLossAbsValue = 1e-2; - ASSERT_GE(fabs(loss), kMinLossAbsValue); - const Dtype kErrorMargin = 1e-4; - const int kNumLossWeights = 6; - Dtype kLossWeights[kNumLossWeights] = {2, 0, 1, -1, -2.5, 3.7}; - for (int i = 0; i < kNumLossWeights; ++i) { - Caffe::set_random_seed(this->seed_); - this->InitUnsharedWeightsNet(&kLossWeights[i], NULL, kForceBackward); - const Dtype weighted_loss = this->net_->ForwardBackward(bottom); - const Dtype error_margin = kErrorMargin * fabs(kLossWeights[i]); - EXPECT_NEAR(loss * kLossWeights[i], weighted_loss, error_margin) - << "loss weight = " << kLossWeights[i]; - const vector > >& weighted_blobs = - this->net_->blobs(); - ASSERT_EQ(blob_grads.size(), weighted_blobs.size()); - for (int j = 0; j < blob_grads.size(); ++j) { - ASSERT_EQ(blob_grads[j]->count(), weighted_blobs[j]->count()); - for (int k = 0; k < blob_grads[j]->count(); ++k) { - EXPECT_NEAR(blob_grads[j]->cpu_diff()[k] * kLossWeights[i], - weighted_blobs[j]->cpu_diff()[k], error_margin); - } - } - const vector > >& weighted_params = - this->net_->params(); - ASSERT_EQ(param_grads.size(), weighted_params.size()); - for (int j = 0; j < param_grads.size(); ++j) { - ASSERT_EQ(param_grads[j]->count(), weighted_params[j]->count()); - for (int k = 0; k < param_grads[j]->count(); ++k) { - EXPECT_NEAR(param_grads[j]->cpu_diff()[k] * kLossWeights[i], - weighted_params[j]->cpu_diff()[k], error_margin); - } - } - } -} - -TYPED_TEST(NetTest, TestLossWeightMidNet) { - typedef typename TypeParam::Dtype Dtype; - vector*> bottom; - Caffe::set_random_seed(this->seed_); - const bool kForceBackward = true; - Dtype loss_weight = 0; - Dtype midnet_loss_weight = 1; - this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, - kForceBackward); - const Dtype loss = this->net_->ForwardBackward(bottom); - const bool kCopyDiff = true; - const bool kReshape = true; - Blob data_grad; - data_grad.CopyFrom(*this->net_->blob_by_name("data"), kCopyDiff, kReshape); - // Check that the loss is non-trivial, otherwise the test doesn't prove much. - const Dtype kMinLossAbsValue = 1e-2; - ASSERT_GE(fabs(loss), kMinLossAbsValue); - const Dtype kErrorMargin = 1e-4; - const int kNumLossWeights = 6; - Dtype kLossWeights[kNumLossWeights] = {2, 0, 1, -1, -2.5, 3.7}; - for (int i = 0; i < kNumLossWeights; ++i) { - Caffe::set_random_seed(this->seed_); - this->InitUnsharedWeightsNet(&loss_weight, &kLossWeights[i], - kForceBackward); - const Dtype weighted_loss = this->net_->ForwardBackward(bottom); - const Dtype error_margin = kErrorMargin * fabs(kLossWeights[i]); - EXPECT_NEAR(loss * kLossWeights[i], weighted_loss, error_margin) - << "loss weight = " << kLossWeights[i]; - const shared_ptr >& weighted_blob = - this->net_->blob_by_name("data"); - ASSERT_EQ(data_grad.count(), weighted_blob->count()); - for (int j = 0; j < data_grad.count(); ++j) { - EXPECT_NEAR(data_grad.cpu_diff()[j] * kLossWeights[i], - weighted_blob->cpu_diff()[j], error_margin); - } - } -} - -TYPED_TEST(NetTest, TestComboLossWeight) { - typedef typename TypeParam::Dtype Dtype; - vector*> bottom; - Dtype loss_weight; - Dtype midnet_loss_weight; - const bool kForceBackward = true; - const Dtype kErrorMargin = 1e-4; - - // Get the loss and gradients with 'EuclideanLoss' weight 1, - // 'InnerProduct' weight 1. - loss_weight = 1; - midnet_loss_weight = 1; - Caffe::set_random_seed(this->seed_); - this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, - kForceBackward); - const Dtype loss = this->net_->ForwardBackward(bottom); - const bool kCopyDiff = true; - vector > > blob_grads; - this->CopyNetBlobs(kCopyDiff, &blob_grads); - vector > > param_grads; - this->CopyNetParams(kCopyDiff, ¶m_grads); - - loss_weight = 2; - midnet_loss_weight = 1; - Caffe::set_random_seed(this->seed_); - this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, - kForceBackward); - const Dtype loss_main_2 = this->net_->ForwardBackward(bottom); - vector > > blob_grads_loss_2; - this->CopyNetBlobs(kCopyDiff, &blob_grads_loss_2); - vector > > param_grads_loss_2; - this->CopyNetParams(kCopyDiff, ¶m_grads_loss_2); - - loss_weight = 3; - midnet_loss_weight = 1; - Caffe::set_random_seed(this->seed_); - this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, - kForceBackward); - const Dtype loss_main_3 = this->net_->ForwardBackward(bottom); - const vector > >& blob_grads_loss_3 = - this->net_->blobs(); - ASSERT_EQ(blob_grads.size(), blob_grads_loss_3.size()); - ASSERT_EQ(blob_grads_loss_2.size(), blob_grads_loss_3.size()); - for (int j = 0; j < blob_grads.size(); ++j) { - const string& blob_name = this->net_->blob_names()[j]; - bool grad_should_change = true; - if (blob_name == "innerproduct1_innerproduct1_0_split_0") { - grad_should_change = false; - } - ASSERT_EQ(blob_grads[j]->count(), blob_grads_loss_3[j]->count()); - ASSERT_EQ(blob_grads_loss_2[j]->count(), blob_grads_loss_3[j]->count()); - for (int k = 0; k < blob_grads[j]->count(); ++k) { - const Dtype grad_diff_2 = blob_grads_loss_2[j]->cpu_diff()[k] - - blob_grads[j]->cpu_diff()[k]; - const Dtype grad_diff_3 = blob_grads_loss_3[j]->cpu_diff()[k] - - blob_grads[j]->cpu_diff()[k]; - if (grad_should_change) { - // Test non-triviality. - const Dtype kMinGradDiffAbsValue = 1e-4; - EXPECT_GT(fabs(grad_diff_2), kMinGradDiffAbsValue) << blob_name; - EXPECT_NEAR(2 * grad_diff_2, grad_diff_3, kErrorMargin) << blob_name; - } else { - EXPECT_EQ(0, grad_diff_2) << blob_name; - EXPECT_EQ(0, grad_diff_3) << blob_name; - } - } - } - - loss_weight = 1; - midnet_loss_weight = 2; - Caffe::set_random_seed(this->seed_); - this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, - kForceBackward); - const Dtype loss_midnet_2 = this->net_->ForwardBackward(bottom); - this->CopyNetBlobs(kCopyDiff, &blob_grads_loss_2); - this->CopyNetParams(kCopyDiff, ¶m_grads_loss_2); - - loss_weight = 1; - midnet_loss_weight = 3; - Caffe::set_random_seed(this->seed_); - this->InitUnsharedWeightsNet(&loss_weight, &midnet_loss_weight, - kForceBackward); - const Dtype loss_midnet_3 = this->net_->ForwardBackward(bottom); - const vector > >& blob_grads_midnet_loss_3 = - this->net_->blobs(); - ASSERT_EQ(blob_grads.size(), blob_grads_midnet_loss_3.size()); - ASSERT_EQ(blob_grads_loss_2.size(), blob_grads_midnet_loss_3.size()); - const vector& blob_names = this->net_->blob_names(); - for (int j = 0; j < blob_grads.size(); ++j) { - const string& blob_name = blob_names[j]; - bool grad_should_change = false; - if (blob_name == "innerproduct1" || - blob_name == "innerproduct1_innerproduct1_0_split_0" || - blob_name == "data_data_0_split_0" || blob_name == "data") { - grad_should_change = true; - } - ASSERT_EQ(blob_grads[j]->count(), blob_grads_midnet_loss_3[j]->count()); - ASSERT_EQ(blob_grads[j]->count(), blob_grads_loss_2[j]->count()); - for (int k = 0; k < blob_grads[j]->count(); ++k) { - const Dtype grad_diff_2 = blob_grads_loss_2[j]->cpu_diff()[k] - - blob_grads[j]->cpu_diff()[k]; - const Dtype grad_diff_3 = blob_grads_midnet_loss_3[j]->cpu_diff()[k] - - blob_grads[j]->cpu_diff()[k]; - if (grad_should_change) { - // Test non-triviality. - const Dtype kMinGradDiffAbsValue = 1e-4; - EXPECT_GT(fabs(grad_diff_2), kMinGradDiffAbsValue) << blob_name; - EXPECT_NEAR(2 * grad_diff_2, grad_diff_3, kErrorMargin) << blob_name; - } else { - EXPECT_EQ(0, grad_diff_2) << blob_name; - EXPECT_EQ(0, grad_diff_3) << blob_name; - } - } - } - - const Dtype kMinLossDiffAbsValue = 1e-4; - - Dtype loss_diff_2 = loss_main_2 - loss; - // Test non-triviality. - EXPECT_GT(fabs(loss_diff_2), kMinLossDiffAbsValue); - Dtype loss_diff_3 = loss_main_3 - loss; - EXPECT_NEAR(2 * loss_diff_2, loss_diff_3, kErrorMargin); - - loss_diff_2 = loss_midnet_2 - loss; - // Test non-triviality. - EXPECT_GT(fabs(loss_diff_2), kMinLossDiffAbsValue); - loss_diff_3 = loss_midnet_3 - loss; - EXPECT_NEAR(2 * loss_diff_2, loss_diff_3, kErrorMargin); -} - -TYPED_TEST(NetTest, TestBackwardWithAccuracyLayer) { - typedef typename TypeParam::Dtype Dtype; - const bool kForceBackward = false; - const bool kAccuracyLayer = true; - this->InitTinyNet(kForceBackward, kAccuracyLayer); - EXPECT_TRUE(this->net_->has_blob("accuracy")); - vector*> bottom; - // Test that we can do Backward even though we have an 'Accuracy' layer. - this->net_->ForwardBackward(bottom); -} - -TYPED_TEST(NetTest, TestUnsharedWeightsDataNet) { - typedef typename TypeParam::Dtype Dtype; - this->InitUnsharedWeightsNet(); - vector*> bottom; - Dtype loss; - this->net_->Forward(bottom, &loss); - EXPECT_GT(loss, 0); -} - -TYPED_TEST(NetTest, TestSharedWeightsDataNet) { - typedef typename TypeParam::Dtype Dtype; - this->InitSharedWeightsNet(); - vector*> bottom; - Dtype loss; - this->net_->Forward(bottom, &loss); - EXPECT_FLOAT_EQ(loss, 0); -} - -TYPED_TEST(NetTest, TestUnsharedWeightsDiffNet) { - typedef typename TypeParam::Dtype Dtype; - this->InitUnsharedWeightsNet(); - vector*> bottom; - Net* net = this->net_.get(); - net->Forward(bottom); - net->Backward(); - Layer* ip1_layer = net->layer_by_name("innerproduct1").get(); - Layer* ip2_layer = net->layer_by_name("innerproduct2").get(); - const int count = ip1_layer->blobs()[0]->count(); - const Dtype* grad1 = ip1_layer->blobs()[0]->cpu_diff(); - const Dtype* grad2 = ip2_layer->blobs()[0]->cpu_diff(); - for (int i = 0; i < count; ++i) { - EXPECT_GT(fabs(grad1[i]), 0); - EXPECT_FLOAT_EQ(-1 * grad1[i], grad2[i]); - } -} - -TYPED_TEST(NetTest, TestSharedWeightsDiffNet) { - typedef typename TypeParam::Dtype Dtype; - this->InitSharedWeightsNet(); - vector*> bottom; - Net* net = this->net_.get(); - Dtype loss; - net->Forward(bottom, &loss); - net->Backward(); - EXPECT_FLOAT_EQ(loss, 0); - Layer* ip1_layer = net->layer_by_name("innerproduct1").get(); - Layer* ip2_layer = net->layer_by_name("innerproduct2").get(); - const int count = ip1_layer->blobs()[0]->count(); - const Dtype* grad1 = ip1_layer->blobs()[0]->cpu_diff(); - const Dtype* grad2 = ip2_layer->blobs()[0]->cpu_diff(); - for (int i = 0; i < count; ++i) { - EXPECT_FLOAT_EQ(0, grad1[i]); - EXPECT_FLOAT_EQ(0, grad2[i]); - } -} - -TYPED_TEST(NetTest, TestSharedWeightsUpdate) { - typedef typename TypeParam::Dtype Dtype; - Caffe::set_random_seed(this->seed_); - this->InitDiffDataSharedWeightsNet(); - vector*> bottom; - EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1"); - EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); - Blob* ip1_weights = this->net_->layers()[1]->blobs()[0].get(); - Blob* ip2_weights = this->net_->layers()[2]->blobs()[0].get(); - // Check that data blobs of shared weights share the same location in memory. - EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - // Check that diff blobs of shared weights are at different locations in - // memory. (The diffs should be accumulated at update time.) - EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); - this->net_->Forward(bottom); - this->net_->Backward(); - // Compute the expected update as the data minus the two diffs. - Blob shared_params; - const bool reshape = true; - const bool copy_diff = false; - shared_params.CopyFrom(*ip1_weights, copy_diff, reshape); - shared_params.CopyFrom(*ip1_weights, !copy_diff, reshape); - const int count = ip1_weights->count(); - // Make sure the diffs are non-trivial. - for (int i = 0; i < count; ++i) { - EXPECT_NE(0, ip1_weights->cpu_diff()[i]); - EXPECT_NE(0, ip2_weights->cpu_diff()[i]); - EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]); - } - caffe_axpy(count, Dtype(1), ip2_weights->cpu_diff(), - shared_params.mutable_cpu_diff()); - caffe_axpy(count, Dtype(-1), shared_params.cpu_diff(), - shared_params.mutable_cpu_data()); - const Dtype* expected_updated_params = shared_params.cpu_data(); - this->net_->Update(); - const Dtype* actual_updated_params = ip1_weights->cpu_data(); - for (int i = 0; i < count; ++i) { - EXPECT_EQ(expected_updated_params[i], actual_updated_params[i]); - } - // Check that data blobs of shared weights STILL point to the same memory - // location (because ... who knows). - EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - - Caffe::set_random_seed(this->seed_); - this->InitDiffDataUnsharedWeightsNet(); - EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1"); - EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); - ip1_weights = this->net_->layers()[1]->blobs()[0].get(); - ip2_weights = this->net_->layers()[2]->blobs()[0].get(); - // Check that data and diff blobs of unshared weights are at different - // locations in memory. - EXPECT_NE(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - EXPECT_NE(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); - this->net_->Forward(bottom); - this->net_->Backward(); - // Compute the expected update. - Blob unshared_params1; - unshared_params1.CopyFrom(*ip1_weights, copy_diff, reshape); - unshared_params1.CopyFrom(*ip1_weights, !copy_diff, reshape); - Blob unshared_params2; - unshared_params2.CopyFrom(*ip2_weights, copy_diff, reshape); - unshared_params2.CopyFrom(*ip2_weights, !copy_diff, reshape); - // Make sure the diffs are non-trivial and sum to the diff in the shared net. - for (int i = 0; i < count; ++i) { - EXPECT_NE(0, ip1_weights->cpu_diff()[i]); - EXPECT_NE(0, ip2_weights->cpu_diff()[i]); - EXPECT_NE(ip1_weights->cpu_diff()[i], ip2_weights->cpu_diff()[i]); - EXPECT_EQ(ip1_weights->cpu_diff()[i] + ip2_weights->cpu_diff()[i], - shared_params.cpu_diff()[i]); - } - caffe_axpy(count, Dtype(-1), ip1_weights->cpu_diff(), - unshared_params1.mutable_cpu_data()); - caffe_axpy(count, Dtype(-1), ip2_weights->cpu_diff(), - unshared_params2.mutable_cpu_data()); - const Dtype* expected_updated_params1 = unshared_params1.cpu_data(); - const Dtype* expected_updated_params2 = unshared_params2.cpu_data(); - this->net_->Update(); - const Dtype* actual_updated_params1 = ip1_weights->cpu_data(); - const Dtype* actual_updated_params2 = ip2_weights->cpu_data(); - for (int i = 0; i < count; ++i) { - EXPECT_EQ(expected_updated_params1[i], actual_updated_params1[i]); - EXPECT_EQ(expected_updated_params2[i], actual_updated_params2[i]); - EXPECT_NE(actual_updated_params1[i], actual_updated_params2[i]); - EXPECT_NE(expected_updated_params, expected_updated_params1); - } -} - -TYPED_TEST(NetTest, TestSharedWeightsResume) { - typedef typename TypeParam::Dtype Dtype; - - // Create a net with weight sharing; Update it once. - Caffe::set_random_seed(this->seed_); - this->InitDiffDataSharedWeightsNet(); - vector*> bottom; - EXPECT_EQ(this->net_->layer_names()[1], "innerproduct1"); - EXPECT_EQ(this->net_->layer_names()[2], "innerproduct2"); - Blob* ip1_weights = this->net_->layers()[1]->blobs()[0].get(); - Blob* ip2_weights = this->net_->layers()[2]->blobs()[0].get(); - // Check that data and diff blobs of shared weights share the same memory - // locations. - EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); - this->net_->ForwardBackward(bottom); - this->net_->Update(); - Blob shared_params; - const bool kReshape = true; - const bool kCopyDiff = false; - shared_params.CopyFrom(*ip1_weights, kCopyDiff, kReshape); - const int count = ip1_weights->count(); - - // Write the net to a NetParameter, as in Solver::Snapshot. - NetParameter net_param; - this->net_->ToProto(&net_param); - - // Reinitialize the net and copy parameters from net_param, as in - // Solver::Restore. - Caffe::set_random_seed(this->seed_); - this->InitDiffDataSharedWeightsNet(); - this->net_->CopyTrainedLayersFrom(net_param); - ip1_weights = this->net_->layers()[1]->blobs()[0].get(); - ip2_weights = this->net_->layers()[2]->blobs()[0].get(); - ASSERT_FALSE(NULL == ip1_weights); - ASSERT_FALSE(NULL == ip2_weights); - EXPECT_NE(ip1_weights, ip2_weights); - // Check that data and diff blobs of shared weights share the same memory - // locations. - EXPECT_EQ(ip1_weights->cpu_data(), ip2_weights->cpu_data()); - EXPECT_EQ(ip1_weights->cpu_diff(), ip2_weights->cpu_diff()); - for (int i = 0; i < count; ++i) { - EXPECT_FLOAT_EQ(shared_params.cpu_data()[i], ip1_weights->cpu_data()[i]); - } -} - -TYPED_TEST(NetTest, TestParamPropagateDown) { - typedef typename TypeParam::Dtype Dtype; - vector*> bottom; - const bool kBiasTerm = true, kForceBackward = false; - const Dtype* kLossWeight1 = NULL; - const Dtype* kLossWeight2 = NULL; - - // Run the net with all params learned; check that gradients are non-zero. - Caffe::set_random_seed(this->seed_); - Dtype blobs_lr_w1 = 1, blobs_lr_w2 = 1, blobs_lr_b1 = 2, blobs_lr_b2 = 2; - this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, - kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); - this->net_->Forward(bottom); - this->net_->Backward(); - const vector > >& params = this->net_->params(); - const int num_params = params.size(); - ASSERT_EQ(4, num_params); - const Dtype kNonZeroTestMin = 1e-3; - vector param_asums(params.size()); - for (int i = 0; i < num_params; ++i) { - const Dtype param_asum = - caffe_cpu_asum(params[i]->count(), params[i]->cpu_diff()); - param_asums[i] = param_asum; - EXPECT_GT(param_asum, kNonZeroTestMin); - } - - // Change the learning rates to different non-zero values; should see same - // gradients. - Caffe::set_random_seed(this->seed_); - blobs_lr_w1 *= 2, blobs_lr_w2 *= 2, blobs_lr_b1 *= 2, blobs_lr_b2 *= 2; - this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, - kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); - this->net_->Forward(bottom); - this->net_->Backward(); - const vector > >& params2 = this->net_->params(); - ASSERT_EQ(num_params, params2.size()); - for (int i = 0; i < num_params; ++i) { - const Dtype param_asum = - caffe_cpu_asum(params2[i]->count(), params2[i]->cpu_diff()); - EXPECT_FLOAT_EQ(param_asum, param_asums[i]); - } - - // Change a subset of the learning rates to zero; check that we see zero - // gradients for those. - Caffe::set_random_seed(this->seed_); - blobs_lr_w1 = 1, blobs_lr_w2 = 0, blobs_lr_b1 = 0, blobs_lr_b2 = 1; - this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, - kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); - this->net_->Forward(bottom); - this->net_->Backward(); - const vector > >& params3 = this->net_->params(); - ASSERT_EQ(num_params, params3.size()); - for (int i = 0; i < num_params; ++i) { - const Dtype param_asum = - caffe_cpu_asum(params3[i]->count(), params3[i]->cpu_diff()); - if (i == 1 || i == 2) { - EXPECT_FLOAT_EQ(0, param_asum); - } else { - EXPECT_FLOAT_EQ(param_asum, param_asums[i]); - } - } - - // Change the opposite subset of the learning rates to zero. - Caffe::set_random_seed(this->seed_); - blobs_lr_w1 = 0, blobs_lr_w2 = 1, blobs_lr_b1 = 1, blobs_lr_b2 = 0; - this->InitUnsharedWeightsNet(kLossWeight1, kLossWeight2, kForceBackward, - kBiasTerm, blobs_lr_w1, blobs_lr_w2, blobs_lr_b1, blobs_lr_b2); - this->net_->Forward(bottom); - this->net_->Backward(); - const vector > >& params4 = this->net_->params(); - ASSERT_EQ(num_params, params4.size()); - for (int i = 0; i < num_params; ++i) { - const Dtype param_asum = - caffe_cpu_asum(params4[i]->count(), params4[i]->cpu_diff()); - if (i == 0 || i == 3) { - EXPECT_FLOAT_EQ(0, param_asum); - } else { - EXPECT_FLOAT_EQ(param_asum, param_asums[i]); - } - } -} - -TYPED_TEST(NetTest, TestFromTo) { - typedef typename TypeParam::Dtype Dtype; - this->InitTinyNet(); - - // Run Forward and Backward, recording the data diff and loss. - Blob data; - data.ReshapeLike(*this->net_->blob_by_name("data")); - this->net_->ForwardPrefilled(); - this->net_->Backward(); - data.CopyFrom(*this->net_->blob_by_name("data"), true, true); - const Dtype *loss_ptr = this->net_->output_blobs()[0]->cpu_data(); - Dtype loss = *loss_ptr; - - // Check that combining partial Forwards gives the same loss. - for (int i = 1; i < this->net_->layers().size(); ++i) { - // Note that we skip layer zero to keep the same data. - this->net_->ForwardFromTo(1, 1); - if (i < this->net_->layers().size() - 1) { - this->net_->ForwardFrom(i + 1); - } - EXPECT_EQ(loss, *loss_ptr); - } - - // Check that combining partial Backwards gives the same data diff. - for (int i = 1; i < this->net_->layers().size(); ++i) { - this->net_->BackwardTo(i); - this->net_->BackwardFrom(i - 1); - for (int j = 0; j < data.count(); ++j) { - EXPECT_EQ(data.cpu_diff()[j], - this->net_->blob_by_name("data")->cpu_diff()[j]); - } - } -} - -class FilterNetTest : public ::testing::Test { - protected: - void RunFilterNetTest( - const string& input_param_string, const string& filtered_param_string) { - NetParameter input_param; - CHECK(google::protobuf::TextFormat::ParseFromString( - input_param_string, &input_param)); - NetParameter expected_filtered_param; - CHECK(google::protobuf::TextFormat::ParseFromString( - filtered_param_string, &expected_filtered_param)); - NetParameter actual_filtered_param; - Net::FilterNet(input_param, &actual_filtered_param); - EXPECT_EQ(expected_filtered_param.DebugString(), - actual_filtered_param.DebugString()); - // Also test idempotence. - NetParameter double_filtered_param; - Net::FilterNet(actual_filtered_param, &double_filtered_param); - EXPECT_EQ(actual_filtered_param.DebugString(), - double_filtered_param.DebugString()); - } -}; - -TEST_F(FilterNetTest, TestNoFilter) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, input_proto); -} - -TEST_F(FilterNetTest, TestFilterLeNetTrainTest) { - const string& input_proto = - "name: 'LeNet' " - "layer { " - " name: 'mnist' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - " data_param { " - " source: 'mnist-train-leveldb' " - " batch_size: 64 " - " } " - " transform_param { " - " scale: 0.00390625 " - " } " - " include: { phase: TRAIN } " - "} " - "layer { " - " name: 'mnist' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - " data_param { " - " source: 'mnist-test-leveldb' " - " batch_size: 100 " - " } " - " transform_param { " - " scale: 0.00390625 " - " } " - " include: { phase: TEST } " - "} " - "layer { " - " name: 'conv1' " - " type: 'Convolution' " - " bottom: 'data' " - " top: 'conv1' " - " param { " - " lr_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " } " - " convolution_param { " - " num_output: 20 " - " kernel_size: 5 " - " stride: 1 " - " weight_filler { " - " type: 'xavier' " - " } " - " bias_filler { " - " type: 'constant' " - " } " - " } " - "} " - "layer { " - " name: 'ip1' " - " type: 'InnerProduct' " - " bottom: 'conv1' " - " top: 'ip1' " - " param { " - " lr_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " } " - " inner_product_param { " - " num_output: 10 " - " weight_filler { " - " type: 'xavier' " - " } " - " bias_filler { " - " type: 'constant' " - " } " - " } " - "} " - "layer { " - " name: 'accuracy' " - " type: 'Accuracy' " - " bottom: 'ip1' " - " bottom: 'label' " - " top: 'accuracy' " - " include: { phase: TEST } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'ip2' " - " bottom: 'label' " - " top: 'loss' " - "} "; - const string input_proto_train = "state: { phase: TRAIN } " + input_proto; - const string input_proto_test = "state: { phase: TEST } " + input_proto; - const string output_proto_train = - "name: 'LeNet' " - "layer { " - " name: 'mnist' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - " data_param { " - " source: 'mnist-train-leveldb' " - " batch_size: 64 " - " } " - " transform_param { " - " scale: 0.00390625 " - " } " - " include: { phase: TRAIN } " - "} " - "layer { " - " name: 'conv1' " - " type: 'Convolution' " - " bottom: 'data' " - " top: 'conv1' " - " param { " - " lr_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " } " - " convolution_param { " - " num_output: 20 " - " kernel_size: 5 " - " stride: 1 " - " weight_filler { " - " type: 'xavier' " - " } " - " bias_filler { " - " type: 'constant' " - " } " - " } " - "} " - "layer { " - " name: 'ip1' " - " type: 'InnerProduct' " - " bottom: 'conv1' " - " top: 'ip1' " - " param { " - " lr_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " } " - " inner_product_param { " - " num_output: 10 " - " weight_filler { " - " type: 'xavier' " - " } " - " bias_filler { " - " type: 'constant' " - " } " - " } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'ip2' " - " bottom: 'label' " - " top: 'loss' " - "} "; - const string& output_proto_test = - "name: 'LeNet' " - "layer { " - " name: 'mnist' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - " data_param { " - " source: 'mnist-test-leveldb' " - " batch_size: 100 " - " } " - " transform_param { " - " scale: 0.00390625 " - " } " - " include: { phase: TEST } " - "} " - "layer { " - " name: 'conv1' " - " type: 'Convolution' " - " bottom: 'data' " - " top: 'conv1' " - " param { " - " lr_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " } " - " convolution_param { " - " num_output: 20 " - " kernel_size: 5 " - " stride: 1 " - " weight_filler { " - " type: 'xavier' " - " } " - " bias_filler { " - " type: 'constant' " - " } " - " } " - "} " - "layer { " - " name: 'ip1' " - " type: 'InnerProduct' " - " bottom: 'conv1' " - " top: 'ip1' " - " param { " - " lr_mult: 1 " - " } " - " param { " - " lr_mult: 2 " - " } " - " inner_product_param { " - " num_output: 10 " - " weight_filler { " - " type: 'xavier' " - " } " - " bias_filler { " - " type: 'constant' " - " } " - " } " - "} " - "layer { " - " name: 'accuracy' " - " type: 'Accuracy' " - " bottom: 'ip1' " - " bottom: 'label' " - " top: 'accuracy' " - " include: { phase: TEST } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'ip2' " - " bottom: 'label' " - " top: 'loss' " - "} "; - const string output_proto_train_explicit = - output_proto_train + " state: { phase: TRAIN } "; - const string output_proto_test_explicit = - output_proto_test + " state: { phase: TEST } "; - this->RunFilterNetTest(input_proto_train, output_proto_train_explicit); - this->RunFilterNetTest(input_proto_test, output_proto_test_explicit); -} - -TEST_F(FilterNetTest, TestFilterOutByStage) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - " include: { stage: 'mystage' } " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - const string& output_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, output_proto); -} - -TEST_F(FilterNetTest, TestFilterOutByStage2) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { stage: 'mystage' } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - const string& output_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, output_proto); -} - -TEST_F(FilterNetTest, TestFilterInByStage) { - const string& input_proto = - "state: { stage: 'mystage' } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { stage: 'mystage' } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, input_proto); -} - -TEST_F(FilterNetTest, TestFilterInByStage2) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " exclude: { stage: 'mystage' } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, input_proto); -} - -TEST_F(FilterNetTest, TestFilterOutByMultipleStage) { - const string& input_proto = - "state: { stage: 'mystage' } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { stage: 'mystage' stage: 'myotherstage' } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " include: { stage: 'mystage' } " - "} "; - const string& output_proto = - "state: { stage: 'mystage' } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " include: { stage: 'mystage' } " - "} "; - this->RunFilterNetTest(input_proto, output_proto); -} - -TEST_F(FilterNetTest, TestFilterInByMultipleStage) { - const string& input_proto = - "state: { stage: 'mystage' } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { stage: 'myotherstage' } " - " include: { stage: 'mystage' } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " include: { stage: 'mystage' } " - "} "; - this->RunFilterNetTest(input_proto, input_proto); -} - -TEST_F(FilterNetTest, TestFilterInByMultipleStage2) { - const string& input_proto = - "state: { stage: 'mystage' stage: 'myotherstage' } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { stage: 'mystage' stage: 'myotherstage' } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " include: { stage: 'mystage' } " - "} "; - this->RunFilterNetTest(input_proto, input_proto); -} - -TEST_F(FilterNetTest, TestFilterInByNotStage) { - const string& input_proto = - "state: { stage: 'mystage' } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { not_stage: 'myotherstage' } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " include: { not_stage: 'myotherstage' } " - "} "; - this->RunFilterNetTest(input_proto, input_proto); -} - -TEST_F(FilterNetTest, TestFilterOutByNotStage) { - const string& input_proto = - "state: { stage: 'mystage' } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { not_stage: 'mystage' } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " include: { not_stage: 'mystage' } " - "} "; - const string& output_proto = - "state: { stage: 'mystage' } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} "; - this->RunFilterNetTest(input_proto, output_proto); -} - -TEST_F(FilterNetTest, TestFilterOutByMinLevel) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { min_level: 3 } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - const string& output_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, output_proto); -} - -TEST_F(FilterNetTest, TestFilterOutByMaxLevel) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { max_level: -3 } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - const string& output_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, output_proto); -} - -TEST_F(FilterNetTest, TestFilterInByMinLevel) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { min_level: 0 } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, input_proto); -} - -TEST_F(FilterNetTest, TestFilterInByMinLevel2) { - const string& input_proto = - "state: { level: 7 } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { min_level: 3 } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, input_proto); -} - -TEST_F(FilterNetTest, TestFilterInByMaxLevel) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { max_level: 0 } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, input_proto); -} - -TEST_F(FilterNetTest, TestFilterInByMaxLevel2) { - const string& input_proto = - "state: { level: -7 } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { max_level: -3 } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - "} "; - this->RunFilterNetTest(input_proto, input_proto); -} - -TEST_F(FilterNetTest, TestFilterInOutByIncludeMultiRule) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { min_level: 2 phase: TRAIN } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " include: { min_level: 2 phase: TEST } " - "} "; - const string& input_proto_train = - "state: { level: 4 phase: TRAIN } " + input_proto; - const string& input_proto_test = - "state: { level: 4 phase: TEST } " + input_proto; - const string& output_proto_train = - "state: { level: 4 phase: TRAIN } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { min_level: 2 phase: TRAIN } " - "} "; - const string& output_proto_test = - "state: { level: 4 phase: TEST } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " include: { min_level: 2 phase: TEST } " - "} "; - this->RunFilterNetTest(input_proto_train, output_proto_train); - this->RunFilterNetTest(input_proto_test, output_proto_test); -} - -TEST_F(FilterNetTest, TestFilterInByIncludeMultiRule) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " include: { min_level: 2 phase: TRAIN } " - " include: { phase: TEST } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " include: { min_level: 2 phase: TEST } " - " include: { phase: TRAIN } " - "} "; - const string& input_proto_train = - "state: { level: 2 phase: TRAIN } " + input_proto; - const string& input_proto_test = - "state: { level: 2 phase: TEST } " + input_proto; - this->RunFilterNetTest(input_proto_train, input_proto_train); - this->RunFilterNetTest(input_proto_test, input_proto_test); -} - -TEST_F(FilterNetTest, TestFilterInOutByExcludeMultiRule) { - const string& input_proto = - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " exclude: { min_level: 2 phase: TRAIN } " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " exclude: { min_level: 2 phase: TEST } " - "} "; - const string& input_proto_train = - "state: { level: 4 phase: TRAIN } " + input_proto; - const string& input_proto_test = - "state: { level: 4 phase: TEST } " + input_proto; - const string& output_proto_train = - "state: { level: 4 phase: TRAIN } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'loss' " - " type: 'SoftmaxWithLoss' " - " bottom: 'innerprod' " - " bottom: 'label' " - " exclude: { min_level: 2 phase: TEST } " - "} "; - const string& output_proto_test = - "state: { level: 4 phase: TEST } " - "name: 'TestNetwork' " - "layer { " - " name: 'data' " - " type: 'Data' " - " top: 'data' " - " top: 'label' " - "} " - "layer { " - " name: 'innerprod' " - " type: 'InnerProduct' " - " bottom: 'data' " - " top: 'innerprod' " - " exclude: { min_level: 2 phase: TRAIN } " - "} "; - this->RunFilterNetTest(input_proto_train, output_proto_train); - this->RunFilterNetTest(input_proto_test, output_proto_test); -} - -TYPED_TEST(NetTest, TestReshape) { - typedef typename TypeParam::Dtype Dtype; - // We set up bottom blobs of two different sizes, switch between - // them, and check that forward and backward both run and the results - // are the same. - Caffe::set_random_seed(this->seed_); - Caffe::set_mode(Caffe::CPU); - FillerParameter filler_param; - filler_param.set_std(1); - GaussianFiller filler(filler_param); - Blob blob1(4, 3, 9, 11); - Blob blob2(2, 3, 12, 10); - filler.Fill(&blob1); - filler.Fill(&blob2); - - this->InitReshapableNet(); - Blob* input_blob = this->net_->input_blobs()[0]; - Blob* output_blob = this->net_->output_blobs()[0]; - input_blob->Reshape(blob1.num(), blob1.channels(), blob1.height(), - blob1.width()); - caffe_copy(blob1.count(), blob1.cpu_data(), input_blob->mutable_cpu_data()); - this->net_->ForwardPrefilled(); - // call backward just to make sure it runs - this->net_->Backward(); - Blob output1(output_blob->num(), output_blob->channels(), - output_blob->height(), output_blob->width()); - caffe_copy(output1.count(), output_blob->cpu_data(), - output1.mutable_cpu_data()); - - input_blob->Reshape(blob2.num(), blob2.channels(), blob2.height(), - blob2.width()); - caffe_copy(blob2.count(), blob2.cpu_data(), input_blob->mutable_cpu_data()); - this->net_->ForwardPrefilled(); - this->net_->Backward(); - Blob output2(output_blob->num(), output_blob->channels(), - output_blob->height(), output_blob->width()); - caffe_copy(output2.count(), output_blob->cpu_data(), - output2.mutable_cpu_data()); - - input_blob->Reshape(blob1.num(), blob1.channels(), blob1.height(), - blob1.width()); - caffe_copy(blob1.count(), blob1.cpu_data(), input_blob->mutable_cpu_data()); - this->net_->ForwardPrefilled(); - this->net_->Backward(); - for (int i = 0; i < output1.count(); ++i) { - CHECK_EQ(*(output1.cpu_data() + i), *(output_blob->cpu_data() + i)); - } - - input_blob->Reshape(blob2.num(), blob2.channels(), blob2.height(), - blob2.width()); - caffe_copy(blob2.count(), blob2.cpu_data(), input_blob->mutable_cpu_data()); - this->net_->ForwardPrefilled(); - this->net_->Backward(); - for (int i = 0; i < output2.count(); ++i) { - CHECK_EQ(*(output2.cpu_data() + i), *(output_blob->cpu_data() + i)); - } -} - -TYPED_TEST(NetTest, TestSkipPropagateDown) { - // check bottom_need_backward if propagate_down is true - this->InitSkipPropNet(false); - vector vec_layer_need_backward = this->net_->layer_need_backward(); - for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { - string layer_name = this->net_->layer_names()[layer_id]; - if (layer_name == "loss") { - // access to bottom_need_backward coresponding to label's blob - bool need_back = this->net_->bottom_need_backward()[layer_id][1]; - // if propagate_down is true, the loss layer will try to - // backpropagate on labels - EXPECT_TRUE(need_back) << "bottom_need_backward should be True"; - } - // layer_need_backward should be True except for data and silence layers - if (layer_name.find("data") != std::string::npos || - layer_name == "silence") { - EXPECT_FALSE(vec_layer_need_backward[layer_id]) - << "layer_need_backward for " << layer_name << " should be False"; - } else { - EXPECT_TRUE(vec_layer_need_backward[layer_id]) - << "layer_need_backward for " << layer_name << " should be True"; - } - } - // check bottom_need_backward if propagat_down is false - this->InitSkipPropNet(true); - vec_layer_need_backward.clear(); - vec_layer_need_backward = this->net_->layer_need_backward(); - for (int layer_id = 0; layer_id < this->net_->layers().size(); ++layer_id) { - string layer_name = this->net_->layer_names()[layer_id]; - if (layer_name == "loss") { - // access to bottom_need_backward coresponding to label's blob - bool need_back = this->net_->bottom_need_backward()[layer_id][1]; - // if propagate_down is false, the loss layer will not try to - // backpropagate on labels - EXPECT_FALSE(need_back) << "bottom_need_backward should be False"; - } - // layer_need_backward should be False except for innerproduct and - // loss layers - if (layer_name == "innerproduct" || layer_name == "loss") { - EXPECT_TRUE(vec_layer_need_backward[layer_id]) - << "layer_need_backward for " << layer_name << " should be True"; - } else { - EXPECT_FALSE(vec_layer_need_backward[layer_id]) - << "layer_need_backward for " << layer_name << " should be False"; - } - } -} - -} // namespace caffe From 48fdb085281f37f0a442d392891f409c2e595335 Mon Sep 17 00:00:00 2001 From: Wangyida Date: Tue, 22 Sep 2015 09:41:17 +0800 Subject: [PATCH 82/82] restore --- Makefile | 5 +- examples/triplet/mnist_siamese.ipynb | 154 -------- include/caffe/blob.hpp | 2 - include/caffe/solver.hpp | 14 +- include/caffe/util/im2col.hpp | 24 -- include/caffe/vision_layers.hpp | 121 ++---- src/caffe/blob.cpp | 11 - src/caffe/layers/base_conv_layer.cpp | 235 ++++------- src/caffe/layers/conv_layer.cpp | 31 +- src/caffe/layers/conv_layer.cu | 16 +- src/caffe/layers/cudnn_conv_layer.cpp | 46 +-- src/caffe/layers/cudnn_conv_layer.cu | 18 +- src/caffe/layers/deconv_layer.cpp | 31 +- src/caffe/layers/deconv_layer.cu | 16 +- src/caffe/layers/im2col_layer.cpp | 171 +++----- src/caffe/layers/im2col_layer.cu | 41 +- src/caffe/test/test_convolution_layer.cpp | 409 ++++---------------- src/caffe/test/test_deconvolution_layer.cpp | 159 +------- src/caffe/test/test_im2col_layer.cpp | 30 +- src/caffe/test/test_triplet_loss_layer | 125 ++++++ src/caffe/util/im2col.cpp | 116 ------ src/caffe/util/im2col.cu | 306 +-------------- src/caffe/util/upgrade_proto.cpp | 6 +- 23 files changed, 444 insertions(+), 1643 deletions(-) delete mode 100644 examples/triplet/mnist_siamese.ipynb create mode 100644 src/caffe/test/test_triplet_loss_layer diff --git a/Makefile b/Makefile index 2d59ee855ff..287fa4ea52f 100644 --- a/Makefile +++ b/Makefile @@ -329,9 +329,8 @@ else # OS X packages atlas as the vecLib framework LIBRARIES += cblas # 10.10 has accelerate while 10.9 has veclib - XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep 'version' | sed 's/[^0-9]*\([0-9]\).*/\1/') - XCODE_CLT_GEQ_6 := $(shell [ $(XCODE_CLT_VER) -gt 5 ] && echo 1) - ifeq ($(XCODE_CLT_GEQ_6), 1) + XCODE_CLT_VER := $(shell pkgutil --pkg-info=com.apple.pkg.CLTools_Executables | grep -o 'version: 6') + ifneq (,$(findstring version: 6,$(XCODE_CLT_VER))) BLAS_INCLUDE ?= /System/Library/Frameworks/Accelerate.framework/Versions/Current/Frameworks/vecLib.framework/Headers/ LDFLAGS += -framework Accelerate else diff --git a/examples/triplet/mnist_siamese.ipynb b/examples/triplet/mnist_siamese.ipynb deleted file mode 100644 index 8e076663ca6..00000000000 --- a/examples/triplet/mnist_siamese.ipynb +++ /dev/null @@ -1,154 +0,0 @@ -{ - "metadata": { - "description": "Extracting features and plotting the Siamese network embedding.", - "example_name": "Siamese network embedding", - "include_in_docs": true, - "priority": 6, - "signature": "sha256:845bb18929f96543ba2611eb5eca744fd98939cbef876df6bc319c29f616fc64" - }, - "nbformat": 3, - "nbformat_minor": 0, - "worksheets": [ - { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Setup\n", - "\n", - "Import Caffe and the usual modules." - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "import numpy as np\n", - "import matplotlib.pyplot as plt\n", - "%matplotlib inline\n", - "\n", - "# Make sure that caffe is on the python path:\n", - "caffe_root = '../../' # this file is expected to be in {caffe_root}/examples/siamese\n", - "import sys\n", - "sys.path.insert(0, caffe_root + 'python')\n", - "\n", - "import caffe" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 1 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Load the trained net\n", - "\n", - "Load the model definition and weights and set to CPU mode TEST phase computation with input scaling." - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "MODEL_FILE = 'mnist_siamese.prototxt'\n", - "# decrease if you want to preview during training\n", - "PRETRAINED_FILE = 'mnist_siamese_iter_50000.caffemodel' \n", - "caffe.set_mode_cpu()\n", - "net = caffe.Net(MODEL_FILE, PRETRAINED_FILE, caffe.TEST)" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 2 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Load some MNIST test data" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "TEST_DATA_FILE = '../../data/mnist/t10k-images-idx3-ubyte'\n", - "TEST_LABEL_FILE = '../../data/mnist/t10k-labels-idx1-ubyte'\n", - "n = 10000\n", - "\n", - "with open(TEST_DATA_FILE, 'rb') as f:\n", - " f.read(16) # skip the header\n", - " raw_data = np.fromstring(f.read(n * 28*28), dtype=np.uint8)\n", - "\n", - "with open(TEST_LABEL_FILE, 'rb') as f:\n", - " f.read(8) # skip the header\n", - " labels = np.fromstring(f.read(n), dtype=np.uint8)" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 3 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Generate the Siamese features" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "# reshape and preprocess\n", - "caffe_in = raw_data.reshape(n, 1, 28, 28) * 0.00390625 # manually scale data instead of using `caffe.io.Transformer`\n", - "out = net.forward_all(data=caffe_in)" - ], - "language": "python", - "metadata": {}, - "outputs": [], - "prompt_number": 4 - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Visualize the learned Siamese embedding" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "feat = out['feat']\n", - "f = plt.figure(figsize=(16,9))\n", - "c = ['#ff0000', '#ffff00', '#00ff00', '#00ffff', '#0000ff', \n", - " '#ff00ff', '#990000', '#999900', '#009900', '#009999']\n", - "for i in range(10):\n", - " plt.plot(feat[labels==i,0].flatten(), feat[labels==i,1].flatten(), '.', c=c[i])\n", - "plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])\n", - "plt.grid()\n", - "plt.show()" - ], - "language": "python", - "metadata": {}, - "outputs": [ - { - "metadata": {}, - "output_type": "display_data", - "png": "iVBORw0KGgoAAAANSUhEUgAAA54AAAIXCAYAAAD0R4FDAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXtwXOWZr/usvurWUktqGdmxaawEHEMuthGXITiIyMaJ\nwbEMFmCTDMkkoyqSyTnZMwdqpmYyzEyS2ruKue2ZqSTHO/vYGQbhCxdjwI637ViWMEEEMJhgB4MB\ngSRLsizJkiypuyX1+WP1Wlp971YvSd3y+1S5rF69Lt/6+lOrf/2+v/dVgsEggiAIgiAIgiAIgjBT\nWOZ6AIIgCIIgCIIgCML8RoSnIAiCIAiCIAiCMKOI8BQEQRAEQRAEQRBmFBGegiAIgiAIgiAIwowi\nwlMQBEEQBEEQBEGYUUR4CoIgCIIgCIIgCDNKRsJTUZQ8RVFaFUV5U1GUU4qi/HezBiYIgiAIgiAI\ngiDMD5RM+3gqilIQDAZHFEWxAS8B/08wGHzJlNEJgiAIgiAIgiAIOU/GqbbBYHAk9KMDsAJ9mZ5T\nEARBEARBEARBmD9kLDwVRbEoivIm0A0cDQaDpzIfliAIgiAIgiAIgjBfMCPiORkMBlcAi4EvK4pS\nk/GoBEEQBEEQBEEQhHmDzawTBYPBi4qivAhUA03adkVRMjORCoIgCIIgCIIgCFlNMBhUEj2fkfBU\nFMUDjAeDwQFFUfKBtcDfxxhEJpcRhDC+9a1vsWPHjrkehjCPkDUlmImsJ8FsZE0JZiNrSjAbRUmo\nOYHMI54LgV8pimJBTdt9PBgMHsnwnIIgCIIgCIIgCMI8IiPhGQwG3wZWmTQWQUiJq666aq6HIMwz\nZE0JZiLrSTAbWVOC2ciaEuaCjIsLCcJsU1NTM9dDEOYZsqYEM5H1JJiNrCnBbGRNCXOBCE9BEARB\nEARBEARhRjGtqq0gCIIgCIIgCIIQTSrFd3KF6RaOVWa64qyiKEGpaisIgiAIgiAIwuWKoijzotNH\nvPsIbU+oriXVVhAEQRAEQRAEQZhRRHgKOUdTU9NcD0GYZ8iaEsxE1pNgNrKmBLORNSXMBSI8BUEQ\nBEEQBEEQhBlFPJ6CIAiCIAiCIAgziHg8JeIpCIIgCIIgCIJwWdPX18emTZsoKiriqquu4sknnzT9\nGiI8hZxDfAmC2ciaEsxE1pNgNrKmBLORNSVE8v3vf5+8vDx6enp44okneOihhzh16pSp1xDhKQiC\nIAiCIAiCcJly6dIlnnnmGX784x9TUFDAl770JTZu3Mjjjz9u6nXE4ykIgiAIgiAIgjCDJPV4NjTA\nmTNQUACNjeB2p3eBDI4/ceIEt956K5cuXdK3/fM//zNNTU3s27cvpfsQj6cgCIIgCIIgCEK2c+YM\nHDsGBw6oInIWjx8eHqa4uDhsm8vlYmhoKP1xJECEp5BziC9BMBtZU4KZyHoSzEbWlGA2sqaykIIC\n9f/qati2bVaPLyoqYnBwMGzbxYsXcblc6Y8jASI8BUEQBEEQBEEQ5pLGRqivh0OH0k+zzfD4a665\nhvHxcd5//31921tvvcXnPve59MeRAPF4CoIgCIIgCIIgzCDZ3sdzy5YtKIrCL3/5S9544w3uuusu\nfvvb37J8+fKw/cTjKQiCIAiCIAiCIEyLn/3sZ4yOjrJgwQK+8Y1v8Itf/CJKdGaKCE8h5xBfgmA2\nsqYEM5H1JJiNrCnBbGRNCZGUlpby7LPPMjw8zEcffcT9999v+jVEeAqCIAiCIAiCIAgzing8BUEQ\nBEEQBEEQZpBs93iming8BUEQBEEQBEEQhKxFhKeQc4gvQTAbWVOCmch6EsxG1pRgNrKmhLlAhKcg\nCIIgCIIgCIIwo4jHUxAEQRAEQRAEYQYRj6dEPAVBEARBEARBEIQZRoSnkHOIL0EwG1lTgpnIehLM\nRtaUYDaypoS5QISnIAiCIAiCIAiCMKOIx1MQBEEQBEEQBGEGyWaP53/8x3+wY8cOfv/737Nlyxa2\nb98ed99MPJ62zIcqCIIgCIIgCIIg5CKf+tSn+NGPfsTBgwcZHR2dsetIqq2Qc4gvQTAbWVOCmch6\nEsxG1pRgNrKmBCObNm1i48aNlJeXz+h1RHgKgiAIgiAIgiDMKQ1ADbAeGJiD45nxVGDxeAqCIAiC\nIAiCIMwgyT2eNcCx0M/1wO40r5Dp8fCjH/2I9vZ28XgKgiAIgiBoNDc3MDBwBputgNraRpxO91wP\nSRAEIQMKQv9XA9vm4PiZj3hKqq2Qc4gvQTAbWVOCmch6mh0GBs7Q1XWM9vYDtLQ0zPVwZhRZU4LZ\nyJrKRhpRI5WHgOl8kZbp8WrUciaRiKcgCIIgCDmHzaZ+u+/xVLN69fS+3RcEQcge3EwnPdaM4ycm\nJggEAoyPjzMxMYHP58Nms2G1WjMYTzTi8RQEQRAEIefw+QZoaWlg9eptkmYrCELWk819PP/u7/6O\nf/iHf4ja9rd/+7dR+2bi8RThKQiCIAiCIAiCMINks/BMh0yEp3g8hZxDfAmC2ciaEswkcj01Nzew\nb18N+/evx+ebXon7TMmGMQjTR96jBLORNSXMBSI8BUEQBGEGyYYiONkwBkEQBOHyRlJtBUEQBGEG\n2b9/Pe3tB/B4qrnzzkNz4kfMhjEIgiBczkiqrQhPQRAEQZhRsqEITjaMQRAE4XJGhKek2go5iPgS\nBLORNSWYSeR6cjrdrFmze04FnxljEJ/o3CHvUYLZyJoS5gLp4ykIgiAIOUZzcwMDA2ew2QqorW2c\nFVGr+UQBWloaWLMmk35zU8zFvQiCIAizj6TaCoIgCEKOsW9fjS4Cq6rqTROBiUjHJ5qOmJyLexEE\nQZhtJNVWUm0FQRAEIeew2QoA8HiqWb16W9hzM5USW1vbSFVVfUrFidKpopvoXgRBEIT5gwhPIecQ\nX4JgNrKmBDOZjfWUSATOVOuUdHyi6YjJdATt5Yq8RwlmI2tKmAtEeAqCIAhCDmCMZAJxRWA2RBDz\n8ytwOj0pCclsKL4kCIJwOeP3+/nOd77DVVddRXFxMStXruTXv/616dcRj6cgCIJw2ZMLBW5S9UJm\nQ+sU8W0KgiCEk80ez5GRER577DG+/e1vc+WVV/Liiy+yZcsW3n77bbxeb9i+mXg8paqtIAiCcNkz\nUxVbzSTVSKYWQYyFWQI72XmyIeqqkQtfKgiCIMwlBQUFPProo/rjO++8k6VLl/LGG29ECc9MkFRb\nIecQX4JgNrKmBDOF0kytJzO8kGb5P5OdJ5t8mzPleZ1N5D1KMBtZU9lHAw3UUMN61jNA+oXhMj3e\nSHd3N2fOnOG6667L6DyRSMRTEARBuOyprW2c8/TUZLS2PsLISA9HjmxNO3KnRf36+98BUhPYiSKF\nyYR6oqhrJuza9VlGRrqwWOzcffdruFzJv4nPpuirIAhCPM5whmOomTcNNLCb9N5DMz1eIxAI8MAD\nD/Ctb32La665ZlrniId4PAVBEAQhy4gl+tLxTUYef/BgnX5sYeFiNm9+O6lwTXS9ufKRbt/uJhC4\nCKj38cADnyQ9Jhs8r4IgCMk8nutZzwEOUE01hziEm/TerzI9HmBycpKtW7cyPDzMc889h9VqTfk+\nxOMpCIIgCFlMvKhiLM9prMhdqscbj001/TVRpHCmIprJsFjsAFitBXz96y/F3U+bl6GhsxQWenE4\nimdriIIgCNOikUYaaGAb26YlGjM9PhgM8p3vfIfz58+zf//+mKIzU8TjKeQc4ksQzEbWlADh7Up8\nvun7Y9JZT/H8h4ODZ0M/WRkd7cHnG4jpm4x3fKRonI7nMpt8mhp33/0ahYWLuffeUwnTbLV5uXSp\nnZ6e4znt7wR5jxLMR9ZU9uHGzW52T0s0mnH8Qw89xB/+8Af27duH0+mc1jmSIcJTEARBuGxIJC7n\nogiNJjDt9mJuuukxffvkpD/00wTnzh2jpaUhZr/LeFHJSNE4nV6Z2dhf0+Xy8sADnyT1dmrzYreX\nAOLvFARBSERbWxvbtm3jrbfeorKyEpfLhcvl4sknnzT1OuLxFARBEC4bEvkW9+9fT3v7gbTSUdNF\nSwEdHDxLMBhgdLQXmIgaz44dZfj9/YAqnrZu/SjmeDL1L2baaiRbW5Vo83LTTY/R2vqw+DsFQZhz\nsrmPZzpk4vEU4SkIgiBcNiQSl7NRhMYofI1YLA48nhtwOIqprW3k0KF6OjsPY7eXsGTJ1xgZOYfN\nVsDg4HuMjp5PqaprKqIwlhBvbm6gre15JiZ8eDzXs3btnrjzkU7BI0EQhMsZEZ6SaivkIOJLEMxG\n1tTlQyLfolmppYnW01QK6FSxG4fDTVnZyjAv4tq1e6iqqmfr1o84d65JTwEeHPyIQOAiPl8vu3Zd\nE9eP2tzcwNmzu5OmDsdK1R0YOMPoaBd+fz+dnYdpaWngf/0vB9u2KWzbZuHcuZcSHp8uZnlr5zPy\nHiWYjawpYS4Q4SkIgiBcNqQjLmdCEGnCd/Pmk3i9G/F669iy5UPy8soAVcBZrfns3r2c9vbDHDpU\nz8TEmOEM4/pPk5P+uKJyYOCM3nYELHz88UF+9asKhobawu7twoW3cDrLcTiifaMAZWUrWL16G8Fg\nILQlyPPP36afIxAYJD+/krVrn5q2YJ8Lb60gCIIw+0iqrSAIgpDVzJWPcDbTSI8efZCPP96Px7OC\nCxdOMjbWoz+nKHaD8FOxWBxMTvqx20vYvPktPeVWm6uenleYnPShKDYsljwmJoaBqd6XsVJ+tXv0\n+QZoavo2EKSmZgdOp5tt2yyA+rd8w4YWFi681bT5mQ1vrSAIwlwjqbbSx1MQBEHIcmL1tJwNUk0j\nzUQYa8f29Z3E7++no+MwimIP20cVnQqa8AO1yTdAIHCRZ56ppqLiBmprG8PmSj12XK+Qa7UWYLE4\n2L7dzfj4SNg1HI5SrNZ89u2riXkfGzY08/zzt7FhwzEWLrw1rflJRm1t44x7awVBEIS5RyKeQs7R\n1NRETU3NXA9DmEfImspuzI6IpSoUUy02ZIz8uVxL+eCDYlauXER+vprammpxH1BFnN1eyLlz0QWI\nNCyWPK644ibOnTuGzVbE+LgazayqqsfvH6a9/YC+r93uprj40/T3/55Nm15l374vG1JwVRTFxt13\nv87x4/+XPpaioqUUFV2ZcOyx5idbq9zmOvIeJZiNrKnZRyKe4vEUBEEQspxEBYESEc+jmaqnMFU/\nqDHyV1CwiL6+t2hvP8DHHx9IubhPWdkKvN46Skuvpb//dNR+ZWVf4FOfWktBwSKuuqqOyclx8vMr\nqai4PnSeIsbG+lm9+udhxwUCA1y48DqTkz5eeGGNIdJpwWp1AWpU9MUX12G1qpFWRbExPNyWdOxO\npxuHw83Bg3X6HItfUxAEQYiHRDwFQRCEeYkxmpifX8m9957G6XSbHkE1Rv6OHNmqn9vhcNPZeRib\nrYgrrriZNWvUtiS7dn2WkZEuLBY7d911hBMnfqJHDR9/fCGjo11h53c4SrHZ8pmcHGdyMkAgMGzw\nfNqwWCx6Oq3XW0db296oMRojo/HwejfS1fUyPt/5sGtv2fJBVERzaOgsExMBxsbC+5BqEdd05lai\npIIgXA5IxFOEpyAIgjBP0QSmhrF4jlmeQqNoys+v4OLFs1y48DplZV/Ebndx4cIJfL4LAOTlVeJw\nuBgcfE8/vrBwMUuWfE0/R0/Pb/H7p19B1+vdSFvb88Ckvq2gYBFu97V0dh4O29fpLMfvv0gwOE5Z\n2Qo2bDiqC2dQ27zcc8+bnDjxU318vb2vhxU+0tAEKpD23EovUEEQLgdEeIrwFHIQ8SUIZiNran7i\n8w2we/dyRke7oiJwZkXZjKLJ6fTg8/Xy7ruwbJn6fH5+ZVgEU1FsBINTLVEWLryNiYlxenqOT/Mu\nw1Er4E6iRSGt1gIqK2/hy1/+Jbt3f5aJiTHsdheVlas5f/41XUTabIWhCrg2PJ5V2GyF9Pf/ntHR\n84yPXzKMObzIkXbNpUvvYWTk3LTmczaq2uZ6VFXeowSzkTU1+2S78PzGN77BkSNHuHTpEh6Ph+98\n5zv89V//ddR+4vEUBEEQhAicTjf33ns6pj/ULC+i0d/p8awIe87hKGXhwhq9Sm1Z2QocjpKwfc6d\nO0ZPz8sJrpDen2k1BVcVnYpiZ2JihI6Ow7S2Poz2OSEQGAJgcnKqRcv4+BiBwEV8vgt0dBzmllv+\nldHR8wQCF8OEcqTotNuLuf/+9xgZOTft+dQ8vKWl14b5Rc1EvKeCIAiJ+au/+is+/PBDBgcHOXDg\nAP/+7//Or3/9a1OvIcJTyDnkGzrBbGRNzV/iFQjKtBWIVrgoGAxQVOTFanUCasXZZcvUyOaGDU2M\njJzT/ZiXLn1CWdnnKChYRH5+ZehMViLF3BQKxpRZu70ktC05ijLVLc1ud3HTTY8xOTmmb+vtfQOP\nRy1MVFa2ImJ+guzbdysWiyqYrdYCnE4PAOXlK1myZD1LlqynqMhLWdnnaWl5SN83cj7jFXgyor1G\nQ0PJCxpNF7Nav8wV8h4lmI2sKSGS6667jry8PP2xzWZjwYIFpl5DhKcgCIJw2THdSrkaWgSto+Mw\ngcAluruP09FxGLu9EFArxf6f/1NHX99J/Rif7wLnzh2jouIGios/Hdo6keAqU4JUUewsWfI1Uv2z\nHQyO64I3EBjihRduD3ve41nF2rV7qKqqZ8OGo9x99+toolZRrIyP+5icDKAoDurqfsv9979HUdFS\nrNYC+vpO4vNdxO8fpLv7OO3tB7DbizKOLM+kOMz09RYEQZhpGoAaYD0wnZyPTI8H+N73vkdhYSHX\nXXcdf/M3f8OqVaumeabYiMdTyDnElyCYjawpIV2MvkSn001Hx2G9ku3Ro4f50peqsVqddHdHezeX\nLFnPhQsnGRlpT+laDoebxYu/yocfPm2oZps6dnsJZWWf08eiKHYqKm7k/PlXCAaDKIqVu+9+DYej\nhH37bqWg4FOcP9+qH+90VlBRUY3fPxjTi5rIm5mOfzPdok/NzQ20tT3PxIQPj+d61q7dM29FpbxH\nCWYja2r2SebxrAG0Ds71QLpl1jI9XiMYDHLs2DE2b97M/v37ufHGG8Oez8TjaUv0pCAIgiBc7kRW\nrh0aasNqteP1bqSmZgcwVckV4N1367jzzr0cObI15vk6OtQWK6mgKDbKy79IZ+dvpiU6QSEYnKSn\n51VA9Z0WF18TJiCDwUmefnolDz54gQce+IT9+9eHHe/znae9/QB5eZVEUlCwKGHRptraRp55ZhVW\nq5MjR7YmLOyjpdymysDAGb1wU2fnYVpaGnA43DldREgQhMuXgtD/1cB0cj4yPV5DURRqamqor6/n\nySefjBKemSART0EQBEEwoImnwcGzuFxe+vtP4ff3A1rVWFUAxmr9YRReq1f/nGefvZmxsa6oa4Rj\nBSawWBx6P04zyMtbwPj4GOPjg/p1Fiy4md7e15ic9EWPwppPefkqbLZ8rFYH3d2t+P1qKxiHo5R7\n7jnBM8/coPf5dDjcbNnyYZi4i9UaxbitqGgpRUVXRgnDVKrORu5jbP2itYM5eLBOWrMIgpCVJIt4\nDqCmy24DpvOVWabHR/Ld736XyspKfvKTn4Rtl6q2giAIgmASmi9xZKSd7u7juugEdNEZz4do9DS2\ntj7Mffed1gvzxGPDhiZcrqVYrXkJ90uXsbEeg+i0UF6+gp6e4zFFJ8DExCg9Pcfp7DxMd/crKIr6\n+cFicVBScg0tLQ9RXv5FQBOib6ZUtMm4raBgUUzPZype0Mh9amsb8Xrr8Ho3smHDUZxOd84XERLm\nK2a474T5jhs1PXa6ojGT48+fP8/OnTu5dOkSExMTHDx4kD179rBx48ZpjiY2IjyFnKOpqWmuhyDM\nM2RNXd5EVl7VxIvdXhy1b1nZCrzeurh+RZutgHffnRI+TqebioobEl7/nXf+jcLCKwkEBhPulxmT\n9PefSnlvv78Pn68Xi8VJeflKzp9v1YsIuVxLKS29lpaWh6Iq1cYq4mPc5nCocxopDCMFo/aaPPHE\nEvbuvZX9+9dHVc51Ot2sW/cs69btjXmt+ZRmK+9Ruc4ZVPfdAVQROvfImhKMKIrCL37xCxYvXkx5\neTk/+tGPePzxx7nhhsR/v9JFPJ6CIAhCzpFKamaqaJE0QI+ktbQ0cNNNj7F793ImJkax211UVq7m\nK195Iupakem1p08/SGmpl4MH6/Rte/fezOhoFzZbEePjw/qximJncPBjhobOTnv8qRLej1NDTfMF\nsNkKGR+/hN3uMvT69DE01AaA0+lhdPQ8Pt8AQ0MfAvDMM6soLAxPnXU43Pq9a55YTVhqcxtZQChy\nu/E1uXRJLcLk9dZRVLQUiyW+XzRdn6ggzA5mue8EYWbweDyz8mVERh5PRVGWAP8JLECt+74tGAz+\nW8Q+4vEUBEEQTCWWl3C6JKq8unfvrXohHqfTQ0XFDdTWNtLa+oguNgOBQb1irOZh7Os7qafoVlXV\nY7Xm8/HHBwgEBuOmuloszrjPzSRWax6LF3+VW275V1pbH+ammx4LE8oVFdczOPghPt+AIXUXbLYi\nJiZ8evqx11vHunXPhr02TmeF7gnNy6vkvvtOh81dvC8NtNfEbi8hELiovzbi4RRyE7Pdd0Iukszj\nmSvMZVXbAPDfgsHgm4qiFAGvK4pyKBgMns7wvIIgCIIQFzO9fPn5FTidnpgCSEsNtdmK8Pl6dX/h\nyEiPLoDy8yv1sVgsTn07qGJ1eLiTgYFTYV7RWMy86JyKbhqpqLiJmprtYdHCe+89zeOPVzI+Psy5\nc8dwOsvDRCcQFrkF9MJIxtfG4XDT2XkYgLGxrqi5i1eJ1hh1bm19GKs1n927lzM21guoKc/i4RRy\nB819l000AM8DPuB6YA8iioWZJiOPZzAY7AoGg2+Gfh4GTgOLzBiYIMRDfAmC2ciayj3M9PINDbXh\n8/XS0XE4qrCNdp0rrrgZmBK6mrjS2qIoioOLF9/j4sU/8O67YLUWkpe3AL9/iJ6e44aquOr3vQ6H\nO2nRIQ2bzWV4pKAKyPQxFt8x0tV1TL9vzVt55MhWrNb8qasqqV/T+NqsXbtHb8MSOXfaY2PRoJ07\nr9HbuaxZsxuXy8uaNbsZGmpjdLRLTxd2ua6aVx7OZMh7lGA2TU2vAl1AP3CYbPGeCvMb0zyeiqJc\nBawEWhPvKQiCIAiZYaaXL1H0VLuOzzcQ5kHMz69AUWyMjw/rkb9AQI34KYqViYlLTExcirpWMDiO\nzVaA232d7pNMRFnZ9fT1nTCegVhRy1Tw+S7E3K4oNnp732T7dneowJGaQuVweEJjWMG6dXtpbX2Y\njo7f4PNdwGrNZ9Gi22lvP6Sn2p4//zt8voGo1+a++06HzV2kn9Mo4rWeoTt3XkNFRbUeATV6YMvK\nvkBNzfZpzYEgCBpOw88rEe+pMBuY0sczlGbbBPwkGAzujXgu+OCDD3LVVVcB4Ha7WbFiBTU1NcDU\nt3jyWB7LY3ksj+XxXDz+oz9aQUtLA5OTf4zDURT1vMXSyMDAGVpb3yE/v5JVq5YQCAzS3Kz6Opct\nA4B331X//+IXFzA21kNX1zWMjHRSVTWsP2+x5HH11T4gqO8febz2+IMPigkEBuM+b9bjlSsXMTLS\nzbvvToQ9399/EzZbHn/2Z2rV2KamJkZGuujvf5ivf/0lXn/9Q158cS1XXz2un2/Rotv4i79omtb8\n/+53Z+ntfYNlyyzAZNj59u69lZYWdb7vuGMj69btzZr1I4/ny+O7gHZqahYBjTQ1vZll44v1+B+p\nqRkGCmhq+h4Q/f4V//ELwP+gpqYC2J4j95vbj2+//fZ54/E8evQob775JgMDamXzjz76iF/96ldJ\nPZ4ZC09FUezAC8CBYDD4rzGel+JCgiAIQk7S3NzA2bO7CQQuhm3Pz69kdLQLh6OU0tJr9eJCpaWf\nZ3z8UiiaacHjqaav74Tuf0wVq1Ut6mP0i84csb2fDkcpixevY2TkXNxCQGqU9GJofzdbtnw47RRY\nn2+AnTuv0YsRORylbNnyAU6nO2EBqHQwsxqyMF9oQG13chI17RSgnuzzZMaiBrVNC+TOmC9fpLhQ\nhh5PRe0u/b+BU7FEpyDMBNq3SIJgFrKmhHgMDJzRhZXmz/R4qqmre4Wqqnq2bPmAr371BbzeOgoL\nr2R0tIfXXvsALSW2t7cVu10VN1N9QRP+XQZgYmKYrq7mNEdr0ceYHrFTd/3+fj74YLfuv2xq+nbU\nPgsWqD3eHA4399zzZlwhF9krNRZq2q2W/qdQXFzFkSNb8fkGTPP0Gv2kkX7ebEbeo2YSrcemJjpz\nqeXJ9Nu0TG9NNaCK3fWolXoFIT0yEp7Al4BvALcrinIi9O+rJoxLEARBEOYEo0gaHHwPUEXnXXcd\n1cWPVvTG6XTjdLpZt+5ZJif9jI11R51vwYJqqqrq2bz5JFVV9TgcpWHPxxeL6XwzbmHLlg/Iz1+Q\nxjGxiBTFxsfh42lubiAQGAWsTE5O8Mwz1XrPz0hSFXyFhV79Wr29r+v7a77RTCOUZlZDFuYLmnhb\nAdQBh8id6q6NqJHO2RqzJtIPIMWIhOlgiscz4QUk1VYQBEGYYcxMoQzvQ1luKMqjsHDhl7njjr0x\nz//LXzqjUmq1VNzh4TYKC704HMV0d78clbprJP1+nqrodLm8YX1H06WsbAWjo12MjnZFPWe3u9i8\n+W1OnPgpbW3P4/P1MTk5QWS0tLBwMQ888EnU8ammyk7171T9rZmm1kYSWSRKEOa+x6aW6luAKiTN\nHoOZ51+PKjqryS2Bnh1Iqm3mEU9BEARBmBOMkcm+vlOmpVAao2Iez0rDM0HOnTsW9/zGViWKYiMv\nbwElJdfQ3X2cS5fa6ek5Tnv7AaxWZ8zjVSw4naWk2jJFUWxcdVUdR48+yP7967HZ8pMfFPGn32Yr\nZMmS9ZSXf0Hvk2l8DiAQGKK19WEGBs4wOtoVEtjhotNqLeDrX38p5hXVXqkVOByJP6hqKbVadDhS\ndKaSspsIsyKnwnxC67E5V2vCGEW8mvTTWJOlv5oZpZztCKswF7z33nvk5eXxzW9+0/Rzi/AUcg7x\nughmI2u1e9m8AAAgAElEQVQqNzGmbw4Oqu02pptCaRQ0q1f/XBc9a9bsQVEc+n6lpZ9n9eptMQVQ\nRUU1AO+/n0cwOM7YWA+9vW8AasQQ1JYhbvdyLJZ44nMyFHFM1jJFFbb33/8+Y2MX9Hm4cOFE2Hhj\noSjhf/rHxy/R0XGE999v1Ptkqvs59HtSW530Y7Xaw46120vYsKGFwsLF3HvvKVwuL7FQe6Wep7Mz\nvFdq5DxqwtCYymwkVz2amZJb71HiA0wP7QurIqCX9AWiUViuInruY/tAp7em5lqkC7PB97//fW68\n8UbUUj7mYlofT0EQBEGYTYyRybVrn6K19eGUUygjU3M1QQPQ2vpwWB/KpUvv5sMPn8HhKOarX30e\np9Mdtn9LSwMOh5tAYJS8vEqCwT79WK3HpcXixOFw4PerIlEtCK+hkJ6fE0AVti+//H9H9MGM3avT\niCoup65ptRYwMTESYz8/Docbp9ODz9dLR8dhFMXOpz61FovFjsVip6ZmB06nmwce+CRhunM8b2Xk\nPCbrzTpdj6ZUs51NNCEEqoCajUqrM52uaiaRY20MbesHDqMKxHxUAXkW8ALFxL8vTVh6gAuA1h94\nOXDacP65SiUWcomdO3dSWlrKtddey/vvv2/6+SXiKeQcWk8kQTCLy3VNZZq2ONcYK52eOPFTRkZ6\n9Cqoye4tMnKWSNCMjJwjGPTj8/XS2vowEC2ABgbO0NNznLGxLq65ZjLqej5fLxbLlNjUBGnoERZL\n4ihlOMY/3Qr5+RVYLE7Gx0dTOtpmK+aee97EYsnH4SiLm57rcJRSU7ODioobwsbd3/8OX/vai+Tn\nL+DgwTp9jo1zunPnNWFzH68qbbpCcrrVbXM9Uppb71HTr7Q6fXKp6M3zTI3128AjQE/oOa24UVto\nn3bgOInvS0t/tQCDhu1doWNiRymj15REqueahgaoqYH162FgGi9BpscPDg7y6KOP8i//8i8z5kUV\n4SkIwmVBQ3MzNfv2sX7/fgZ86RRumb9k84fxVNtvaOmYkfeiPf7v7eXcuPP/jXrdjYLHas3H7x/E\nas1DUay6eI21ryaOIgWQcR+HowQARbGiJRaVl69k06ZXyM+vDJ3VmMJkxWo1ir/o9Can04PDUUpe\n3gKuuOKPQtvK6e5+mfffbwwVI0qWnhu6mtVJUdGVLFhwI35/X8woqcNRyj33nMDpdFNb2xj2XHn5\nCiC+eFcjr+fD1lWkt1J7fScnA3i9dSkLyel6NKWa7WwyFz7AuRC706EBVRBq+JkSzYcBO+qcafej\nfVlVAjwWca7PAg6gAlW49kc8n+5c5JJ4n5+cOQPHjsGBA6qInO3jf/SjH/Hd736XRYsWzUiaLYjw\nFHKQ3PK6CNnCmYEBjnV1caC9nYaWlrDnLtc1lc0fxtMVxUbRMzY25UXss32ak75S/XXXBE8wGMDr\n3ciddx5iaKiNnp7jTEyMcf58a9Q1a2sbcbmWYrU6dVEaKYCMQrSi4j8oLFxMeXk1oHomh4ba2Lfv\n1lC7kMjU2gm9yq0qQKMLC/l8vfj9/YyN9dDd/dvQtgHGxnrCfJnxcDrLDec6T1PTt8KKIRlRFDsV\nFdfrAtrpdLNw4W2A6nHNy/Owb18N/f3vAFPrR5uDBQtu1rdbrfkxv0DQXt/OzsNYrfYZT301qw/o\nXJFb71Fz4QPMlaI3ZyIe24ktmrX7WRV6fBG4nfCIZBcQQH2POUb4e8oiEs9FA01NK0jFCyrMHgWh\nl6C6GrZN4yXI5Pg333yTI0eO8MMf/hBAIp6CIAiZUGBTI0/VHg/bVq+e49FkB9n8YTxSFCeLgKq+\nvQrGx4fp7DyMzVZIVVU9i69QP7hpr7smeDo6DmO1OsKilXZ7cdg1NZxON+Pjo3R3q1Vpn3zy01Hj\nMArRgoJKHnjgE/LyykL3UoTf38elS+309rYS6ee0Wl36ddUiRPGFpM1WBGipvKlFODdsaGFiIhCx\nVdFf/8g+osFggI6O8CJAd9yxl6qqejyelXz00XN0dR3D5+ulsHCxvn60OVi7do++roaG2vQvEHbv\nXq7P2Wx/6SHVbLMZM1I8s7nojfH+jN7uzwE7mBKZF4Ey1C+mqlAjnGWhfatRxaQxImk81xeAL4V+\nXgGsQU3bjTWnDaHrvhU617LQPrki3ucvjY1QXw+HDoF7Gi9BJscfO3aMjz76iCuvvJKFCxfyT//0\nTzz99NNUV1enP5AESB9PQRBynobmZs4MDFBgs9FYW4vbGV0xdMDno6GlhW2rV8d8XsguIvstGntr\nVlXVxyxCE6tXZOTrHmsf7Vo33fRYVIEirShNd/fxqMhiVVU9Doc7btEa7bw+X79emEf1dlpRRaON\ngoIrmJjw4/cPUFl5C11dL0f4P9V9S0quxe2+hkBAFdbxsNmKGB8fDtvmci1laOjDsG2LFq1h7do9\nUXOrEa9/ZuS+DkcZ99zzBi6XV5+roaGzFBZ6GR5uY3x8GL9/6oOv9tql0k9TCgLlItMp8lPDVDGi\nemanGNFsUsPU/S1AFZG/B5agFg2qQPV0vkT4l0mLgHdQo56LgNcAX+iYk6F9bkEVmk+EHmtFhOoM\n16xELTKkvRbG8WjMx3nPPrK5j+fo6ChDQ0OAGu38x3/8Rz766CN+8YtfUF5eHrZvJn08RXgKgpDz\n1Ozbx7Eu1TdTX1XF7jVr5nhEgtnEEoyRpCJmUtnHSKTQsttdBAJD+jgOHqxLKIibmxvo6zvF4OBZ\nCgs/xYULrwNgsThYuPDLnDt3nMnJ5EWBvN6NrFu3F59vgJ07r8bn68VudxMIDGH8sLpkyXo++WR/\nxNFWYkVHi4qupKhoKRaLnXPnjhEMBrBa81m06Ha+8pUnosS3zVbA5GQgSvhaLE6++c2usLmIRaLX\nLhapfNkQOb54AtW4T35+BUNDbSJoZ4Qa0heR61Ejb9XkVrQtmcjWnn8FVTBqeFCLAPlDj50Rz2tY\nUFusjBCdBVEJ3IEqWGNdX5tTDeNrEfncCuBojPELZpPNwjOSv//7v+fs2bP853/+Z9RzmQhPSbUV\nco7c8roIs0GmabSyprKfVNKCW1sfCatsG4t0Uy61lNCyshV4vXVs3vx23KJCWsqocT0Zq90ODJwK\nbbUyOemno+NwSqLTYrHT3/8O27e72bnzaiorvxxKK76EUVDa7S5uvfVnRBcnMorOqeeGhz/WfZYO\nRwlWawF2exHd3b/l8OF6fQ6Nflu7vYiqqvowz+jkpI9du5brVXvt9pLQ/2rqcnn5St1Pm2zejSnV\n2vmSpeOm4gc27vPxxweytqhWPHLnPWo6PsFcTfE0FuOpRE2LXctUaqtWvdYoKq2ovTr9hm2xfz/V\nlPpBYqfedwFPEr8YUGNoTBD9WjQCG2lqugnYiIhOIRaPPvpoTNGZKSI8BUHIeRpra6mvquLQnXdK\nGu08JRXBmG5Boli+0chtmuAtL/8CPl8/LS0PhUVLkwliTZg6nR6Dl3IixjYj4cWFNm16jdHR8wQC\nF/H5emlrexaf73xESi4EAkO0tj4c8oFGoyhqlDUWPl8vExOjjI2dx+/vD/N4GsV1Tc121qzZzd13\nv47FMvW7NjbWpYvSzZvfCv1/kqqqeu666zesW7c3JbEfKXIjizrFIhW/qHGf8vIvJt1fmC7TEZHZ\n7M+MJJZf04YqLrU+nNp7T6woZnSrpaltVuBOwr2bidB+/2NVvHUDrtDYPkT1jxqf2wv8D9TU30Re\n0Jo4zwnC9JBUW0EQBGFekEo6LkylXfb1ncTvV1sQaKmc8dI7jdudzgoqKqqTpmka02xdLi/nz7cC\n4HCoVWLHxnrp7j6u719evpKioisZH79ER8dhLBY7mza9Rnn5F/jVryrw+XrDzq+l/Uam/2qpuEYs\nlnzuu+80DkcJO3deg893Puz5SG+oxeLA47kBh6OY1at/zvPP305BwSIcjmL9vn2+AXbtWs7YWJd+\n7dbWRzLyZUa+hslSmSH9FGsgrXRr4XLls6iRxTHUdNQy4HWmem6WoqbJjjDVP9OKWn12D6oAj+/H\nVlkGdAJDMZ6zMyUujdiIjoIuCe3rA64PXf8qpgTnYuCTGOeqIX5qdKLnhOmQS6m2iRCPpyAIOUMq\nhYCE7CJXiryk6t+M9G0aRdMHHzyF399PeflKyso+r3sBNW+jUaAl8h1GXic/v5LR0S69P6bL5dVF\nliY4a2p2hBU7Mt7H0FAbu3YtY3LSp+9/yy3/kxdeuJ28vAUMDbWxadMruFxehobaeO65W1AUK4HA\nCH5/H1dccQvFxZ9maKiN/v538Pl6sVjs3Hnnb3jnnX9jbKxf927a7SWUlHw2VIFXjcwGgxNRIj3W\nnCfzZSZbS5HnS/XLhFjkyroVspEG4P8jtcrRsTzUTtRIZjD0L955FFRBG91LN1pg5qOmMmuCNNYx\nGvWoKbS9ofFVAx2AF7U4keYJTeSvzVXvbfYiwlNSbYUcJHe8LkIsEvXTnCuycU01NDdTs28f6/fv\nZ8AXK2Vr9kg3hXWuSNW/Genb1CvgDpzRxVVR0ZVhrUD6+k7i9W7Ue1Rq/UJjpX9q68mY3llX9wpV\nVfVs2fIBLpcXmErTjUxFjXUfJ078FIejBEWx43AU43CUcPTog/h8A5w/38rYWBetrQ8D4HJ5+cY3\nOnC5qvD7LwBBuruP09b2ot4GxWJxct9977Fw4a16CxSvdyNebx1bt36kt4IBi95DVLuXyFYzxrEm\nS3uNtZaM6c1A3P6o6QrHXFm3qZCN71HzD2Nq6SkSi04tHd5D7PRZH1M9NhOdJxjneIiOavpRxWYX\niUXnClRP52uokc5qoBVoB46jeULVNZUoNTpXvbdCNiPCUxCEWUX6aaZGNgn02e65ONNoYmbDhqOs\nW/dsTNFUU7NDfwwwNtaD1eoItSCZ6heaSNDU1jbici3l0qVPePrplfh8/WHPp1PoaGDgDGNjPQSD\nAc6dO8b77z9JV9exuIJQTfM9GXYOi2XKOzo56dOFqjaWdev2sm7ds7S2PkIgMIii2NE+FCuKjSVL\n1idNYfb7B8nLq2Tt2qcSel6N400kEDPpvznf1q0w0xiLBZ1Nsm9F6N9FIvvypk9/jG2xgkbJoq/F\nqKL5C6i+zYeAt5nqBVoS+t9YbCiRvzaXvLdCriCptoIgzCrSTzM11u/fz4H2dqo9njkvmpRuC5Jc\nITIVE6a8f62tj9DXd4re3t8xOekPS/VMJ/0zMq03WXpuvPRQ7ZqRlJWtwO/vZ3x8lMnJABUV11NQ\nsIiPPnqOQGCqoEh5+UruuONZdu1azuTkKIpix+NZhdNZFtVeJF5bFK2lS7wxx/LMRhJrLaXrzU01\ndXa+rlshVRK1O9GeO8tU+mkA1ZPpCe0T7pMOJ57/ci7ZiFo0qIZwb+Y21Pt9DHg49Fh+H+YCSbUV\n4SkIgpCViECfeeL5EZubGzh7drcu3AoLF7N589u6eElH0BgFY3n5Su666zcpC9W8vEruu++07vts\navo23d0vMzbWE+YLjRSKTqdHLy5kt5ewaNHt1NRsp7X1Ec6e3UUgMBh2TaezQi825HRWAMGo4kQA\nXm8d69Y9m3DMkH6/zul4c5MJeCPi9cxmkvXCzIQawgWY23CtQdS0UyN1qIKyM8ZzELuoT7bgQo1u\n/hR4CjWKWgTcjFpoSNZ8NiDCU1JthRxEvC5CLDLxRJq5pszyZrqdTnavWSOic4YwpqKWla0IS8Uc\nGDiji06HozRMdELy9E9tPWmpp07nApYsWZ9UdAIR6b1d7Nz5Gd37uG7ds9x337u4XEux2QqYmPBH\nHVNevhKPZ4Vh7G/p6cTqfamiU2vjYrMVhQlRn++87gEFtXKudt6amu0Jx2zs19na+khUq5p4pOvN\nTTd1dj54Pefv3z1jeqtZr43m1Xwn9FhLLTVe69XQcy7DPsWoFWtfi3FOK9kpOrXWK0Oo0cwzTKXu\nDhPe3iWc+bumhGxGhKcgCPOCbPFEZss4hMQYCwmNjHSGPacJHK0C7XQjZAMDZ+jpOY7P14PdXpjS\neWprG0PeShWf7wLt7QfYufMaXYAWFl5Jd/dxfXswGGDJkvV4vXXcdddvWLNmj17IaP/+dWzf7uZX\nv6pACX0P7XCUcvfdr+N0ehgfH2ZyMvwLEoejlPvuezfUi/NtvQBSvPFrntmyss/j8w1w5MhW+vtP\nZdxTNd510i00NJ+8nqnMU26hfWli9B1miiYwe1GL62jFcbRrFTGVJrsaNRp6LfBc6LhYXximUt12\nJlmAGnE14gZuC/2szZ92j8UR2wUhO5BUW0EQ5gXZ4onMlnEIiYn0TBYVLaWo6EpstgJWr/45ra0P\nZ+wNnG4rkBdfXEtHx2G9P2dkCxe/f5j29gMptXbZvt2tR28VxU5eXjl1da+EtXMxoig27r//fb3y\nbjoYU2Gt1nwmJkax24vZvPlk0vNNN402FeaT13Mm52luGECNyJnpO4zXBkS7Vj9qJND4fA1TabnZ\nSDmq8OwOPbYCbwBXEj5/2j2KnzMbyfZU25qaGlpbW7GFikAuXryY06dPR+0nHk9BEC57ssUTmS3j\nEBLj8w2wa9dyxsa68HiqsVic9PSovi6zPtD7fAM888wqCgoWYbcXR/kLY3kPm5sb6O8/RW/vG5SU\nXMvISAelpcs4d+6YLmBBLYKk9d50Oj1YLFYmJvxUVFzPmjV7aG19hIGBM3R3v0wwGECtkhkMuz/j\nHIDqB928+a24IjFyvNo1tMdHjmzVhbaiWDl/vjXl+cykX+flhMyTRiJvaDIxG/l8A1O+yGxm6ndY\nZSmq8JwJf6wwE2S78Lz99tv55je/yZ/8yZ8k3E88nsJlhfgShFhk4onMZE1FejrFm5kbOJ1u7rvv\ntJ666XCoqWlmpGNq68npdIelxUamnMbyHqpi8TgTE6P09b3O2FgXfX2/Jz+/kuLiz3DwYB1Hjmxl\n9eptrF2rptS63csYHe3G7++no0Nt8aKdOxgMYLXmsXDhl6PuT5sDr3cjRUVeyso+R0vLQ3FTOCPH\nG/k4PBW2LK35zKRfpxGzUlGzqY8uTK0ps+YpOzH20Uz22kV6Q43HQuI2IMY2IQ2hn7NddFoJF502\n1F6eifyxiedTPksJsZhpYRyZMC4IgnDZ8Y9vvcXfDQ5SYLPRWFsbJRobmps5MzAQ8/nn29roGh0F\n4NtNTTy7bt2sjj1byYVKolpRG1A/0E8nHVO7z8HBs7hcXuz2Ymy27+nPJ/IXDg6qvQLt9mJuuumx\niN6bFmASq7UQn09tFt/RcUSvPtvS0oDD4ebcuRbGxqYq0JaXr2T16m0cObJVv64xShp5f1r/TmMK\nZ0tLQ8wIZeS97Nnz+bDxZzKfxmMj5zadNaSJ4UT3kQqaVxugoaWF3WvWTOs8ZhNrnsxhJqvLpoom\nJrXxJLrPSG9oXZJjY7VPaQSeR+3Fma0UovbfXAK0GrYXMSUmS4nt40xnPoWsINNfQxN+jf/qr/6K\nv/zLv2TZsmX89Kc/5bbbbkt+UBpIqq0gCFlJPLGXSATGe17bdnZwEK/LRbHdHnZszb59+ofM+qoq\ndq9ZE3aewUCA492qt6YyP5/T996rH1u2Ywf9frW66JVFRfgnJvBNTHC9x8OetWsv28inUcg4nRVU\nVFRnrQBNF6Mg+tfea/loLIgDP9/lf1PAaFhqqeYvtFrzw3plOp1unnvuVrq7p9J7R0Z6ovpn5uUt\nYGysB4+nGofDTWfnYTyeakpLr43q1VlQsIj6+nf09iuJhF+kqDOmyRqjacb9Ir2vkeM3QxAZr+f3\nD6ad/qylojqdHkpKluFwRKc4p8L892pHfkI1Crd65kakxPNmQvR4tW1aumyyY3cQ3XfTgxo1zObP\nqG7gj4C3UNu8aCxArcBbCpxAFdORaHPiAZYxJbZz/z04V0maaltDZr+GGR7/6quvct111+FwOHjy\nySf5sz/7M958802qqqrC9pNUW0EQ5h3xqsMat6965pmodLhYx2nb2kdGON7dHXXOgpCRvtrjYdvq\n1VHnOTs41W6ia3Q07NjrPWqz8UKrlUG/n67RUfr9fg53dl7WVW216JjNVoTPd35GWllkklaZybHG\nFNOPfXbeYxnv8Hn+i29ERTa1CNXQUFtUWq3dHp7eq82Z3V6ib9+06VU9tVJLrb3zzkMMDbWFic7y\n8pW66DReN57oPHt2d4I02aljjPfa2vpw2Dkjx28GxutpEeF0zq/dR0nJMnp6Yqc4p0JjbS31VVXz\nVHRCdKpq8uqyxt+Zo0cfnIHquo2on5YjhWOs8WrpsjeHfn4VVWjFEp27iRadCmrV27kUnZH3GOvz\n+gDqPY8ZtpWg3m898AGxRSdMzacVtS/pAeBb0x+uMPNkWuQ5w+NvvPFGCgsLsdvt/PEf/zFf+tKX\n2L9//zQGEh8RnkLOIb6E+UmkpyqWGIRwkbiooCBKZGrPF9ls9Pt8YefSKHU4ws75PZst6kOm8Tqv\n1NVRmZ8fczx71q7F43RyaWKCgVDkE2BFWVnYfmbMyVwxnXFoAmDBgpuBmWllkUl/xkyONaacXll5\nAwCryor5G+8wd955iN/+9s2Ex2jzECn2tMebN79FX1U9P7/zEPe5vFSHxJ5RTE61fHGn3CPUeO/G\nPqVaBDOWUE2UKjwTfkPj9TZteiXt82v3kalnd7a92sm+CDH/717kJ9REok8l7AuXj/fPQG/UR1Cj\neFuZSiON15NTows1VfYCcDLG2OOl0mZDlDPydS6JeKwJ0ULDz6XA14AHUft0JkIT537DtilxK5+l\nspDkv4Yze/wsIMJTEISsIDJSGS/iYNxebFf7HRrFYGNtLR6nk+HxcQ53dOjnyrdaAbApCk0bNoSd\ns8jhiPqQabyO1+Xi9L33xhyP2+nkhooKAFaWl7OksJBypxNPSKiaNSfLd+9OKPpmUqROpzepJgCM\nUTqz02wz6c+YybFGwbX7jjupr6riyIZN1K2Ln9IZS6RFij3tscvl5ddrdnPY6Y5bNkQ735YtH/K1\nr704rb6Wxj6l8YSPcdw/aD0Ztsa08ba2PmJa9Cs/vwKnswKHw43DURI3apuMXCvCk8kXIdMj8hOq\nseBObIy/Mx7PCv1n875QioxqGrdF9uTU0HreFgAvhY5bCJQBawmPFEZ+5E2YETgHXIp4HDRsv4B6\n/xtQ5ydRUaFIrg/9vxLYnvkwhZkj+a/hjB1/8eJFDh48yNjYGOPj4zzxxBO0tLTw1a9+dZqDiY14\nPAVByAqm46mK17ok1rlu3buX4z09wJSPM1WS+UoHfD5WPf00iwoKODUwoHs+66uqcDsc+rEV+fm0\nDQ3FPU+8OdGIHHeYD9Xvj7q/ZONOlWz1u2XSn9Hs3o6ZFlOKPH5TSHTGcqxlSqx7T6U/ZCwvdKrH\npsr861OZGpm1SZmdwkDGdQOxi1VlRiyfZiLvJkAbcCuq6Pwp6qduY4RT80LagMnQv2zAguq97Elx\n/xLgI8K9uA7gBuJ7N7V1YUctRrQ9xj7CbJLN7VR6e3tZv349f/jDH7BarSxfvpwf//jH1NbWRu2b\nicdTqtoKgjCrxBNDjbW1afW/NJ4nkljnKnY4gOhU2VTGF6/CpXHfRQUFuvAzXqfu4EH9WKfFgm9S\n/eCTSgXcxtpalu/eTdfoaMxxG8dVmZcXdX9mVeZM97WZLTKp8JnKsemIyUyrqUYe/98cbm4bOMNy\nWwH5tY2Q5MN9OmONde+pRIDjpb9nEj2OxMxz5RLTraqsMjvVSyPXjXlfChgFUh3hAqmR+D05teM+\njyrMzhAuOhXgfOjncZPGahZfBl5JY/8bUe9fS5EuBa5B9W5C7NfduC48qCnMUlxIiI3H4+HVV1+d\n8etIqq2Qc4gvIbeJl7aZyFMVK4001nm0/bYeORIlkhIVCzGuqcjzNjQ3c7KvD4Ayh4NjnZ2U7djB\n2hde4FR/f1QBohVlZdR5vfp1jB/W8w0iOZXvPB9pbeXTxcVU5ufzVIwKuWE+1E2b4vpUPU4nncPD\n007DNb422eI7nQ1STX80tkEpL1/J5OQfJ9w3VlpqpOAaHThDadcxulJMvcw0VTOV1NR4v0NmprXm\nWoqsWSQqBgXJ/u5lWpFkrtEE0mFU8Wmcg8jcQWNvylPELpCkESQ7vJyxOA7Ee/+0od5fsWGbdm9a\nivQHqOnEMPW6R/bt1I4pQk1VDk/Nlc9SwlwgEU9BEGaMWNHDWFGTRC1QItuZLN+1i9P33ZewEi3A\noscfZ1VFhd465ZHWVnpGRth65EjCtNMwsXbpEofb2/XUWUVR6BlTPUOHOzv1gkMepxOvywWKwt51\n69SfQxijhfWHDnG4s5MVZWXsqKlJOn/GHqE/fPnlqAhpZCQyMhJrt1rZ6PXSOzqqR2Mz7Uk4nSiq\nWSm/s02q0beBgTP4/WoD+qKiK3E4ihLuGysyGhnxSjfyl2mkMJUIsHGNpXusmeMQIkkUFcwFEgln\nYxpxBfAcU1HNyhjHLUctOFQNvBbjWjayI/oZWWW3HNXHaWyPshZVjK9AbQcDU0Icol/3yMi39nx/\n6Dy5+sWEMJ8Qj6cgCDNGLE9YLF9mrP2M2yrz83UBBqrQW+HxUGizsaOmRj9PpCdSo76qip6RkZj+\ntEi08XVeuqSLXVAFrtvh4HCn2kttRVkZe9et4/bnn+fC2BiD4+Nxz20UgpFjbmhu5vm2NrX3Z0UF\newxRX2OP0I1eL3sTpOYm8nsO+/2meTSn4/eM5w1Mh0w9lNMhVR9oOv68VPdN14Nqhmc1kzmei9fH\nbF/t5RRhnXuMgvLnwMPEFs41TImpCqZSZzWBZjzus8A51IJCdwH7yA6RCWqCoZUpwWlFFYKtwBdQ\nxxo5BwOk94WC5octQm0zsyd0XLrnEWaKbPZ4poP08RQEISuJFZWMlVIba7+odiYhD2ORzUavz8fh\njg4cVmtUOq22n1bxVotcvtPfrx+vtVmJhTY+7fhyp5NyhwO3w8Evb7uNOq+XjV4vRzds4KcnTtDn\n8ySAV+QAACAASURBVOmiM7JNi4YWJTSOWUtZfeqDD6Z6f3Z0cPXOnXoaq9YjNJUIaay+o9p8mtmT\ncDrniucNTIfZr/qZPP1RI5300FT3TfXaxv0dDjcHD9ZNu7rsXLWnmS7Ga+7ceU3a9z0XY85+ItM1\nZwpjBduHiV+K0xgN/WLoZ2NU0HhcFzCI2j7kWbJHdK5FjWYaP48XAi6migVF3gukX6K0EdXLOYwa\n4dTWdKalUgXBPCTVVsg5mpqaqEkhTVGYe2IVpdEic2eHhvAWFlLscFDicFDhdOIOFQAyHptvtfLg\n0aN8rqyMm+12vU1KpIjRzvu58nJustn4n7fcwsOtrWGRSwvox3/xqadY6nJRYLPxPZuNu+64I+bY\nO4eHOd7Tw+HOTh5ubQ1Ldz0zMMDFwFTK1BNf+UpMMZYsLVijMCSqNX/pnrVrUy7qY7zGU2vX8nBr\nK9tWr+aR1ta4RZimQ7x0y0SYUZwom4vORKaHJnqPmslU0kwLHM1Vexoj6UQhtWvabEX4fOd1AZnq\nfWfzmorErL97yed3dgoVpe5LNaaTamPKR43o+VBbhWiRPWNrlQDR6azTwYIqwl+I87xWNTeSItQ2\nKM2oVXcJjVvrqTmIKg4row+dNm7UKrdaFeDEa1o+SwlzgaTaCjmHvFnmNsa0Sw2P00lvKAIZmYoZ\nmaa5bfXqmCImXjqnMTX0/YsXGQgJxXKnkwuha942NETTX/wFn921i66REcYmJii22xkPBrEoChd8\nPpwWC5PBIEHgS5WV7L3jDrYeORKW2msFbq2sjPIyxkov1sa1oqyMRYWFOCyWMFGdbnQyXmsZM9Jc\ns4FEqaTZljI5V+9RqabxxpuveHOcyvymkuqbynnSaaeiXXNsrJ/OzsNptyIxu6XOTGLWmko+v8na\nl5hFvPTPVFrD1DAljkEttrObqdYqt4Yem9U6xYNanCcSK3AWeDBiPEa0scGUZ9MFDMXZJ5J0W+Wk\nnlYrn6VmH0m1FeEpCMIsowmuErudi4GA6p10OuMKLm3/IpuNm6+4gkUFBTF7YUbut2fNGh5pbeVU\nXx9nBwd5ZdMmvtvczOGODlaWl9M9MkLn6CjFdjsnN2/G63Lh3r49LIKpoQBWRWHc8F5W5/VS7HDw\nn++9p2+zMPVRR+vhqfs3PR72rF3LzXv30jUygs1i4aYFC8KipPHEoxnznaqYzcVCQJdr78dIjELq\nB0533I+r6c5XuvvHE5jJztPc3MAHHzyF399PeflK7rrrN7Pmb71cSP7lhFl+wOn2Fq1hSsTFE2Sa\nOAa1sutypnpZPkJ0L89MsKPeQ+T5lNC1J1E9mu8DHRH7lKJWn53ybDawijMsoIATNOLHnVTg15B8\nPoRcQYSneDwFQZhlNI/gW5s3617BPWvWxPUNNtbW4nE69Wjgk++/H7MdS0V+PjZF0fdraGnhzMAA\nx3t66Bob4+HWVv06v7nrLpYWq6XqBwMBlu3aRdmOHYyMx/YEBSFMdAI0nzvHzrNnw7ZpolNLqT0z\nMDDl3+zspKGlha6RES4GAlwI+VS3Hjmi+01j+V8zbV+SriczXrubbCaXUiZnEqMv1Oigi3Qvpjtf\nQ0PqOrfbS7jppseS7h/PO5nsupHVgdPxt6bjh72cSe4xTtS+JB3PZ6IVqBHr3Mkq3NagptCuBzai\nOsaOh67zbcJ7edpRo4vTQUvbDRAtOhcBt6D6NvtR7zNWRHQQ+AxqJBbAzRmu5BitHMBPA4tJHlU2\no1XObPl2BSE5IjyFnEN6T+U2mrjyuly6yErUw9PtdHJDRYX+OBASgJq404TZ821tujjUivyE9dC0\nWqk7eJDhUJXYtiE11ckK+E6fpt/vJxAM4rRYuL68POl99Pn9+CfDU7lcdjvrlyzh2tJS6g4e1Asa\ngVogaNvq1dgt6tuuBfBPTnKgvZ2rd+5kyX/9F7c+91yUwMxUCCaa21iYUQhotsm23o+pvEfN9EfB\nRB9X052vwkIvAIHARVpbH066fzyBmey6xuNqanYkvc7lhFl/96JFerKVmIqAjEUqginWubU+lbEE\nmbHf5+9Q/ZLGL+N+DbwV+tkOrCLc95kMLVCzErgt9LM1Yp+VwDuE99gsI7yQkXbNCVRxugxtbgtC\n46jGwza8wFYSvwMkmg8jiV7H2K+hfJYS5gIRnoIgZDUNzc0MBgI4QoJtZXk5VxYW4rRY2HrkCPva\n2jjW1aW3HSl1ODhxzz24nU4q8vPxhITt2YsXdQG36umnGQztPxFxva8tWcKCUH/OVLEp6geWodA4\n24aGONbVRa/Px6KCAr0Krtvp5LW772ZxYSGrFy7Uj+31+WgfGeF4d/eUEH3iCW7du1cXr5p4ziT6\nmQpmVsCdLcyKeM1mXGC6H+dTJdHH1XTny+FQP2SnGiGNJzCTXTfbvkC4PEi2EqcbcYu3Ao2/ZZpA\nM547UQVWbSxFqG1VDgDG96gxpn5zi1FblfQBi1EjlLEwCkstq+VK1KimhymB+TnUCrS/CY2tMfR4\nI2qqr/aXpIQp0arhQ5vbRhqpp55DLMOtR2qvJv67TqoVaRO9jmZETYXLhZ07d7J8+XKKior4zGc+\nw0svvWTq+cXjKQhCVhHpMVy+e7few3NRQQHv1NdTd/CgXjDHrih6FNSCWjRoPBjkeo+H0YkJvaJt\nZV4eXWNjVHs8OC0Wvc+l2+HQxd/K8nJ+c9ddACzftYuusTF9XEU2G8MxUnEVpj6uACxwOvmCx8Ph\njg48TifLSkoodjioyM8P86YCNLS0cKi9nQG/Xz++0Grl0kS4HF5cWMjbmzeH3XesQkHZVmQn16hh\n9txUs1XCxQw0D6XVms/QUFvY+pI1l+skW4lm94CsYeq3rA5VfCY7dwPwPDCKKjSXh85RDTyFWuG2\nK3SuAKqYLEZNg60GrkUtAvQuagQyiCrGalArzx4L7T/I1DxobU5AFa5vJxijNodam5cS4AHgCKro\njDW3xp6bw6FtmbzrJHodpY9ntpDtHs9Dhw7xp3/6p+zevZsbb7yRc+fOEQwGWbRoUdh+4vEUBGHe\nEJla6jOIsPFQaqvWp7LYbufGBQv05yeB8z6f7qk09rOsWbRI7ek5MsKr59Um5FbgKwsXsqykhDyr\nFYfFwuefeoq7Dhzgc+XlrF+yhCWh6Gq8N8vIt94en49jnZ2sX7IEi6JwvKeHA+3tvBCKzGr3paXA\nVhvSiAHyQqmuJaE+otUeD29v3ozb6UyaBit9CTNjNuMCqSbQZQNapHJoqC1qfcmay3WSrcRMekAm\n83BuT/HcZ1CF5UXU1iWnUYXhIdT+l6dD97AqtP8EqujUPJRtqD7QXuBroe2ngBeBvaFjTxI+D8Zx\nJhKdMDWHH4TG4w6du4v4c6sdc7PhOpm86yR6HaWPp5Aajz76KI8++ig33ngjAAsXLowSnZkiwlPI\nOcSXML+JFFfXG4RZz9gYDS0teF1qwYjBQID3Ll5kQV4eoApRjRVlZbxSV6enjZ4bGaHX56NzZESP\nkE4A+z7+mOMtLYxNTNB6/jztly6pfTs7Oii026lyufBNTjIYp/BQLALBIEc6OsI8oFq01ON00jk8\nrKfL7omIWg4FAtR5vWHFl7SUV2Ma7COtrVFpt5pHbo/zh/zDpftZv38/Dx49OuPpufMFs8RgKu9R\nufhRMJZ383Iq7NTc3MC+fTXs378eny+zZOx0zzVzf/fMWomxRKYx/XMVU4WBNpLeb1mkP7MHNbr5\nCLAQqEIVlu8a9lnJlGDUisAVAz8DPkEViDB1/17C5yGdd4N4c5hobrXn9qRxnemMIT7yWSr7aG5o\nYF9NDfvXr8c3kP57TCbHT0xM8Prrr9PT08PVV1/NkiVL+MEPfsCYIfPLDER4CoKQVUR6DPesWUNl\nyHOpidG24WF9/56xMW654grqq6o4uXkzdV6v7qnUChjdvHcvL4VSVItCwhbUiGdktVoNh8XCsc5O\nPVU35j5K/IwS3+QkYwax6p+cxGGxMD45qUdBtchnucFL6Z+cxG618tMTJ+gZGYlb9dYYGb5m507W\n79/P9at3UFVVj8+9mpbuXg60t3Pg449zrkrtXHGyuYFv7KvhZROERTLMFDGxMd+xGsuDORu+zJmf\nq9QwRnd37rw6o/HMv0hxLI+hMWqopbQeRjUopLNWGlHFqpbdokUH/ws1qtgPdDK1ziuZ8mLClMgc\nBD6L2ucz2e/FbH01lItfQQkzxcCZM3QdO0b7gQO0NKT/vpDJ8d3d3QQCAZ5++mleeukl3nzzTU6c\nOMFPfvKTtMeRCBGeQs4hDY/nB/HahERWYXU7nZy+994wMeotLNT3L3U42F5Tg9vh4Oa9ezl27hyX\nDL04G5qbef/iRb30Q5HdrotTlxYhXbYsanzjk5P0jI3pwjRSYlqBP6qsxB5HfHqcTr2Crba/f3KS\ngdDYtCq3AK/ffTfO0L42ReHQJ5+w++xZXTBeHRKWxnnSIsNFNhvnfT4OtLfzg9aTrFmzmyK7GgGu\n9nj4YqhC70xUqc201Uu2YZYYSOU9amDgDI91LeGH7ctZvftnMzB/5pcvilUcaDZamWSLSNOiuzZb\nET5fb0bjSTdSnP1/92IlqmtRw2tRi/xopOtxc6OmxL5LeHQwuueyyirChZyxAu0YpFTUZ/6T/Wvq\n8sNWEHpfqK5m9bb0M0gyOT4/9AX/D37wA6644grKy8v58z//c/bv35/2OBIhwlMQhDkhnTYhRjHa\n0NzMqVAKiRX4QkhYRfbM/OLTT1Ozbx9PffCBLjqtwCt1dTy7bh17162jwJCaG8lkxOMg6OLQqihM\nAMfOnWPt4sUsLiykZcMGFhcWcrfXi8fp5ILPx1Ao4mlTFL0qr8ZVLpcurr0uF13f/CaeUGGkgUCA\niwbx3BsSlvcePqxvq8jPp8Lp1M9rFJbGqPGetWtnrEptLvb8TMRspo3abAV0s4D3WMbvRj0zMH/Z\nVckyk6il8XWxWvPnLPqpRXevuOJmfTzTXSfzr4JvrNRULZrXxlS7kRJgxzSvERkd1ASlhfB+nY4Y\nY6s0XB/Uoj69qAJ0OdHiM52MAemTKZhDbWMjVfX13HnoEE53+u8LmRxfWlrK4sWL075mukhVWyHn\naGpqkm/q5gHr9+/nQHs71R5PWqKoZt8+vbKrxtL/n723D2/ivNNGb1lf/rZsy8QhBgU3hKYfCU7c\n0ha81tZOKSbUboKSJu1F0rO1djdtt/tuN+w53bNnu233fa/T9Lq63Z7Tbjh9NyRN/YKTNIEU3BQT\n/FGSOk1DIF+NuyTQGjDGIGHjD9mY3/lj5hk9Gs1IM9JIlsxzc+nCmo9nnueZkTT33L/f/SstRXhu\nTimpAsS7zTptNmxZuRI9IyOYW1iAx+3G9SUl+N2FC8A772iqnjxYmZaHhoYQmp1F7+nTcX0PDgyg\n+/jxGNIIALVFRTg/O6vkljptNtx7ww0xLrcetxsrfvYzjExNAZBupdTk111QgNkvfSluHpjrrVli\nuXv3+zE9PYqCAifuvPMVlJX5lHVqd+FjQ1+Ncy9N9RzmKph7a1PTjrTIgJHvqEgkjKbuH+G3M15l\n/rTmOHXklpPl3r1+jI5KLqH19QG0thp37+TPy/PPd6TcjlWw6joxg/z+3VO7vvoSb24YJyGFzf4a\nwJcghfE2IDbMFpA+B29ByvV8HsB3IIXn9nLbqB1l/TDucW1m29xBfl9T+Ylcd7X953/+Z/T09GDf\nvn1wOBz4zGc+g09+8pP4l3/5l5jt0nG1dSRaKSAgIGA1GKFx2u1o9/mw0+83RViKOdfXi/Pzkro4\nOxtn/sN/JfJlWGZkl9zzkQguGAxvdNhs6ONyRsORCIKDg9jR1KSosMPhMI5duBBHOovtdvymowM3\n7NqlLFtWVIQ9J04o2960ezfevuce+EpKFOKpJp0A8PJnPxs3D+mQvunpUczPXwQA7N27AZ///J+U\ndUzNBKSyL1+YHlZu+AcHg2ht7UZXS0vMPKihJq+5TkxZ2Gi2jvWru78eM38spBSIznHqkNShXDkH\n6ajJ/HnJBTMjq66TIIIYxjCKUYwudMGTAw8IMoMuZOYhiA+SURAgmfSojxGEFHJ+DBLRBCTS2Q3p\nwcxNkHJEtaICzEQM5FZ0gYBAqvinf/onjI+P48Ybb0RhYSHuuece/OM//qOlxxCKp4CAQFbBK3Va\ntSjVUN84f+3FF9Hzxz/iA5WVqHS7cW52VjEAsgMokOt68ornNYWFIADnZmeVZWpFlEeVy4Wpy5cR\n4Vxp230+PLtxY1x/tg8NYec77yhqJoPDZsNlIqnkS02NUlP05qoqjE5PY0zlFBeor8dLZ88qxPOD\nHg9WV1Tg6zffjNv378dQR4cSVgwgjvxqzZWaZKjX79lVh0hkHHZ7Me6++60YxVOtZr548LMYGemB\n19toODzQ7LnOZaRD4NjtbzGkW3C9mdu/v830HCdDrpwDq1TCxVAbMwU//OiXlbIAAujOE6Usf+BH\nVIkE4mtcakUFsE+rE0AJpLDgZNdZutEFRr8hjG4nkKvIdcXTKITiKSAgkJPQullPVotSDbXyNjY9\njXORCPpHRxGor1dKqNhtNiwQYYEIdSUluMjlWJ7VsAP3ut04F4koyimP8NxcnOI4cOYM2vbvx8Tc\nHA6PjSn9+cXJk3GkE4i65U7Mz6P39GksKyyEr7QUZQ4H3lIprWwu7ujpUYjnDRUVeGbjRgDAzF/8\nRcz2/Lzy749duKCEGwcHB+NIhnou/+edr2Dv3g04cM0j+Omhoyh2vKmcJ7Wa2dLSZfqG3+y5zmWo\n584MgWM2P4B066hHL1KZ42TIlXNglUqYTVU60yiWlbJGNGLHoiplmSY0VrQf28bAwHYDYelMiVwL\n4HpIdUP57VjOKA/+0xow2FetdszA6DeE0e0EBHIXwlxIIO8gak/lD7TMZ9TlUpJBfeN8fGICgFSz\n8+F165T2/vzaa5XtXt+6NaaGpvqLrqG6Gr+9804E6utxdOtWOP7wB2WdQ8elNjQ3h56REYV0Mlfa\nuYUFze3VGJudxclLl3B4bEwhpaUOB9pWrFDmotzlUsZQ4XLpOsby83pTdzfeunAB/aOjCumskOdG\nDfXclZX58PnP/wknpi/HnSe1u3Aq7qVmz3UuwwyBU39HGQ3Ey4RD7FI6B0sNXehCAAEcwIGkYbaZ\n/d1L7ICcfjkbKxyWY9sw5nTMDI8OAXgGxkjkYoTNGj2mke2MGx2JeymBxYAgngICAhmD1s26mtAk\nA3Nv9cikzFcmuRdOzM/joaEhpb0nb78dq0pL4S4owH0HD+JDVVVKG7x6aYNEwD6xZw/6T5/Gjbt3\n4zJHUi8TaeZXqnHswgXUPP44ktHOKpfaYVEKCQaAS5cvo/fUKVyUCWNXSwtWlZXBbbfjuZMnFTJ4\n689/HkNCi7lapKMzMwqhLLFLLV+U50YN9dwx6JEqM+VStLY1e65zGekQODOl6K1GqueAkY3v7m/D\n7ZFw3vh15krNTyPwwINudOdAbmdiQpN+ORsryFxsG8ZyfVOpkbkYn1ajxzSynfVllAQErITI8RQQ\nEMgYtPIQjUIrfDRQX49LsvKoZarD57PVFhVhdGYG5U4nJubnUepw4JLKgCgTYLmdDO0+H4bOnsWo\nHO5rB1DucsU48BYVFOCjy5bh+MQEJubnMcGF/jZUV6PY4VDyWGvcbtzi9eLY+fMYm51Fo9eLp26/\nXXLbjUTQe+oUGr1eXJybw9jMDJwFBXjlzjvhKyvTdaHVO09m8gNzJZdQwBrwLrSv1Afw/7V254Vf\nZzruuVcvEucopp97bIXDcmwbkcj9GBzsQVPTLXC7n0yj3aUG5iCszmcVyAWIHE+heAoICGQQ6She\nLJyUEbRGrxdFdjsm5uZQW1iIp26/Pa5dXrn7TUcHAvX1OLZ1KwL19fjYsmUx25Y6MpPi/ufLl6NI\nVh6dNhtGp6bwoepqtK1YAXdBARaAGNIJQKoJOjqKkenpGNIJSPMwJIf3ljgcOCeTy49fc42i8G7Y\nswcDZ87gt2NjWFZYiBvKy/HuxAQuzs9jPBLBhr17AeirdnrnyUx4KdvW63bj9NSUIZVUIHfBFKWQ\ntxFPNO3IG7/OXHC9XSykrvYmVgbN1xxVh3smat9oaGhsG273SbS2noPb3Quh7PFYzPgKAYHkEIqn\nQN5B1J66OsDUuYbqalyIRFBXUoK3QiGFtBXZ7bjV60W506kY4oQjEdz6859jeXExyp1O1BQVKbUy\nf9zUhBt37cKc/H10+3XXoffUKcnZ1kAdTyP4QEUFGpctw7PvvRdX3qXa7cbFubkYNRSQyKmWOREg\nhelqGR1Vu914v8cTMx88vG43xmXSZwdw/N57lTBbI1BK3hQUoNTpxKMGSt4w1fT01JSizl6tyudS\n+I5i7rE3N+3AV9weU1rVwEDQwnqk5pCK620+lDUxck3ljtrrR+ZrYAplL10she+pfINQPIWrrYCA\nQI6Cd1XteP55JYyTgZUnAYA1u3fjnXvuwfahIVyYncV7k5MApLDUczIB+8jPfx5D4EocDlQ4nQir\nFMZUUVtUhMMdHVj+xBNKrVAe5zn1rwBS3qmroADuggLM64QAT12+rJtvysYOQAknBqTQ3OrCQvSe\nOgWnzaaE2ZoB7+AaqK/H9qGhpKVEmGratn8/gFiV1EgpksUkKwLx4N1jzdIXa+uRmkMqrrfDGFbK\nmgQRzNuyJrmj9maqBibvbPtjAA/B+tqgySBKmggIpAMRaiuQdxBP6K4O8OGfLIyz2u1WnpbZuW3H\nZmcRHBzEcydPKqVRKpxO3CLXvSyVQ1SZ2thQXY1ylytKOtNUO20A3r77bmwfGtIknWowMrlw5YpS\n8oX1k2FtVRWucE8UPXLZGK/bjQV5+YcrK9Hu8+HY1q1o9/nQ4fPhhTvuwJOtrQjU12Ns27aY2p9G\noQ6x1XIn1oNWOK+R/dM3MEkPVprSsO+ofDK6sRKJCJBxz03rEEQQfvjRhjaENY6aO2VN9GHkd898\nSGymYCbc08y2vHHOhwD8CsBqACfT6axJLB3zHnEvJbAYEMRTQEAg58HIjMNmA6NpFTIRA4AyhwMP\nr1uHCEf67HJZlA6fT8nvLLHbsaywEM986lM4KauiPHgyW66TA1pss6FtxYqYZZUuF+7o6cFT775r\neEwFQIwj7u3XXYc3AgF0+Hxo9/lwaMsWlHB9KHO54HW7cZkIYTm8tnj6GIILP8C3XvkNwpGIMv7t\nQ0MYm57GfQcPKnmWZhxq1eTRTK6nVr6okf0XW62xjPgGg4DfD7S1ITz+VrTNbwWzy7YWEYkI0GLc\ntjNFswc9CGoc1UxZk1xGJsrxpAYzbrJmtuXV0QIAFwGMA9iA7D3SyGa5lcV4TCMgkFmIHE+BvIPI\nS8gv8GGWfM6lXshlonb+8/e/V8ia02ZDicOhqJZetxsEKaSVd7AN1NdjR1MTan/6U0TksikdPh9e\nOXcOI9PTUmOqHM+bystxXWkpek+fjutHAYCm2lq8eu4cJg3W8DSClSUlmLtyBZGFBdxWU4PlxcXY\ne/IkwnNzuLmqCmUOh1JDFAAKMYfr8Qd4cBG/wcch6a5A24oVmJqfj3OY5V1na9xuNNbUxJyDROGw\n6bgTG90/ldw8K5G+c6cMvx99/f3wA9j/r7UY8Y7Ce74Rm799AO4Zj7k0tiWIxcjMa0MbetCDRjTm\nLbm8un739MJZeWfb1QDGMTBgRzjcCIdjGC0tIUhfL5n8kFnh0GsUfqSW/2oMV9c1lRvI5RzP0tJS\n2Lg65jMzM3jwwQfx7//+73HbihxPAQGBnMVzJ09idGYGAFDtcuG8rNYFBwcV4xkjOYDD4XCMQjhP\nhGmZXJY6HIqZTl1JCd5fUYHe06cVh9X7Dh7EHFers+/MmYR9/v3EBMpdLk3jnytAXL6pHZJ6yXI3\nU8HU5ctKHmjvqVOocbsVZfOtCxdQLtcDXVtVhT9NTeF8BPg9PogyzICRTgB47fx53CLXMOUVRqY6\nsrBjFvbKzgGf18kvB6IqZqowsn8quXlWoqWlyxriWywrIo2NaPnSUxg89hCa9u2QSGe+WMNaAD3q\n0IXs3bazPjixB+3oxE78W16SzqsPTBcHpLPIvhc83N+vANiAcPg6jI5KNYkHB4HW1kx/yPg+ZBrZ\nVFcFrnZcunRJ+Xtqagq1tbW4++67LT+OUDwFBAQsg5pAbh8ailEplxUWKrUn+RxAIzUgmcstj2q3\nGxtqa/Hbc+dwenoa5U4njm3digqXC7c+/TTOz87GucuqcXNVFd4OhXSdZY2gyuXCBQ13WQZXQUEM\n8dXcxmZTHHdtALR6U1dSgte3bsV9Bw+iZ2QEN7ou4rrq1Th0RlJCCwsK8Pt77kGFy6UojMwYyGm3\no8ThwNT8PHpPn445B+/fvRv/dfEiFgB8yOPBYHt7QmXTyIOCqxbhsBRuu2MH4JFJTjZFEg0shnGT\nH/FaTbZtWbT6IKCP3DH4Mq6LRyMVGrB580q43TsTbp8vGAgGER5+C47i42jp+g3cHt9id0nAAuSy\n4snjsccew7e//W3813/9l+Z6oXgKCAjkBNSq2dj0tEI6PS4XXv7sZ/HQ0FBcyKVWDqCa3DCXW75c\nx/lIBIdHRxXToIn5edy4ezeG77kHK0tL8Z78BM9hs8WVMWGoKynBssJCzbBaI3DYbLipshKHz55F\nid2OKY3wW5fNBp6Waimpc9x7rZ6udk3hmyVP4sWDP0OV629Q43ZjZfVN+ElzMx789a/x2vnzeLG9\nXXGw1VIyA/X12On3x4W9jnLn6cLcXFIiqT7PHpdLEFEGjwfoVlGcbIokGlgMl1ktrUZPx7IKauJU\nLBMnoRcZw2/Dz6FM/lwfHPwi2lqfWaSeGNfFLYtUyDGEh4cx2n8YADAYfAit6u8UgSWJdB/+WPXw\n6LHHHsO2bdtS2jcZhLmQQN6hr69vsbsgoAM1gWTvK10uvHbXXfCVlcUZzwCxZjbbh4bg37sXMEr3\nVgAAIABJREFUT737ruKEeuOuXbjv4EHsaGrCLz79aRTZJRsgO4DxSEQJSQWAuStXsGHv3phjr7/m\nGmV9md0ec2xXQQEK/vCHpGMrQNRZlsdlIrw6Pg4boEk6AeCSarndFv9AkPWKN00qtNlwXXExql0u\nFNFFjI0dxv8YqcYz7x3HuUgEvadP46GhIezbtAmnvvCFmLIpzEzozVAIQPScaJn/zMr9KwDQs2lT\n0rlIx/X2akCufUcthnGTlldppgMH1QZR6j4EB4Lw7/WjbX8bwnnmMpyNa2rCIYX6v+cFnmhaTFXG\nuOHQ4hoqZc78xyGH7HsbG9G0IzOf2Vz7nhJI3+TOCpO8kydPYmBgAPfff39K+yeDIJ4CAgKWQe2G\nyt6/e++9CWtJ8mSIkZiQTCbVOYketxu3yiVCGJ1rqK6GUyZzxXY7fv2ZzyjHri4sxJHxcYk4ulyw\nq4hnKBLBb8+di+vT8uJiLCssVN5fAXRrfi5cuaKpUlbJeZnqZc6C+K/eBUjq69GtW9G2YgWK7Hbc\n4vVi+vJlnJ+bw7H55XgCX8AFx/swTRI5rXS5dF1i2TyORyKoKymJIfVqZ9u1ck7oFQDfOXJEsz0g\nSmbnr1xBh8+XkuutQPZRVFQDt9ub1ZtzLepgpnBGKlATbHUfhsPD6B/tR89ID4KLULJnMZCslAyP\n11puwyv1wMDmtfiRe2fGerR0nFqfQ9Sj+QFLW27p6kJ9IIDNBw7A7Vk6Sq5AYqT7kNCKh4w//elP\n0dTUBJ8vM+HdIsdTQEBgUcBCaY9PTsJXUoKTly7BV1aGd8JhjEciWFtVhevLynBJzklkxPHVu+7C\nXw8OomdkBCV2O0qcTrz82c8CADbs3YsN11yDM9PTStjn9V1dSm1PPahDcddWVeHQli0AgJt278bo\n7KyyzobYUigFAApU+zttNthtNly+cgV8hul1xcWYnJuLyTtlDrylDgc+ds01eFIm4HzeKwDcVl2J\n/7P0GXzl3Cacmp6Bw2bD7+68U7dOJ8uJ5XM59XJptbbVgt7+6breZgq5k7O2uNi716+E2tbXBxbV\nxCmTSOaM3La/DT0jPWj0NuLA5gPwXAXXgx9+9MsBzgEE0J0gwDmMMIIIYgd2ZNCEyY/sZ95mKru4\nCkBI/rsDwGKFJgvkC5LleKbr7m6FO/yNN96Ib3zjG3jggQd0t0knx1MQTwEBgZQQDALDw5KJZ1dX\n1Ecl6X4y4Tx24YKiaqrBTHQ8bjfCkQhqHn9cIXZs3epduxQnW54EqcnRk6ramg3V1Tg1NYUxjkzW\nFBbiHPfeV1qK60tLcXxyEtcVFWFofFxZV2a3JyyjoudsawdQ6nQqJNhhs+FTdXX40YYNuGX347h4\nRVIx7/TV4emNbQoZbKiuxsrSUuz0++Fxu7Fhzx4lx1XPiAnQJoN6BJPflpkRaeVrGiWouQKjhGup\nE1TLSsXkOcKRMIKDQexo2nFVkE4gF0vJpFtQJxUS6UdmyO7tAHoBNAB4wWBfBK5m5Lq50IsvvohP\nfepTOHv2LEpKSnS3E+ZCAlcVRO2p3MDwMNAv/5YHg/F+Knrgy6sAUk7jxfl5lDudmJifR6nDgfd7\nPPjaiy/iVyMjiCwsKMVCWBitx+3GR2pqFBLEh3eysE+v243TnD04Q3VhIZ751Kfw0WeewdjsLNZW\nVeHs0aPAihUAJHLI18EcmZqK2Z+RTlZChY2hwGZDaG5Ot5zKAqCQzgqnE0e3blXCj9/nGMOrc9fB\nh/cwef4U/Hsv4w8XL6La7Ua1262QTgAol3NA2bjVynG5y6UQRjUpZQZNjIxqudMmKqui3j/XYTTs\nyGrzHfYdlcj9NxnZtZIMLyUDlnS0K4/bg+48VXuN/e7Fz04XurKgYppBugV1UrGoylR28ZNYVLvq\nNCHupQTUePzxx3HXXXclJJ3pQuR4CggIpASuXCHMeB9EOLXQXVCAgc98BoH6ehzbuhVetxuXLl9G\n76lT6PnjHzE6M4PQ3BzmiVBot+Otu+/Gvx45ouQZ+kpL4S4owH0HDyo5i10tLVhVWoq5hQUcHhuL\nO37vqVN4aGgI79xzDwL19Ti0ZQvGOCJ8aX4eFxOURgGAQrsdf37ddQAkc6L3V1QoNUWdNptiFGTX\n2f/Ply+PyXn9B+9ruA2v4NtVAzi2UI/+0VGcnpnBedlAqObxx1H4k5/gY888g3kitHP5lYwojkxN\n4fDYWEKDH4/bDY/LhY7nn0fb/v1468KFOFOgRPmaamMilvOpzhnNFbS0dKG+PpBU5cuU+U4i06Vk\nJhBWmEQwLK4Bi7VgtKMH0i1/MqgzCpdShmE84mfHAw+60Z0C6czUTBk3DtJGKiQyU9nF6Y5FQCC3\n8B//8R947LHHMnoMoXgK5B3EE7rcQFdXfLlCPfDKz83V1eg/cwYAELlyBd85ckRR1XgV0+NyKSVO\nGqqr8cIdd8Qpcl63WyGXK372M9htNjgLCvC+8vJoKRVIamOFy4Xw3BwavV4U2e3oeP55hWTZ1qwB\n5LARp82GIocD8/PzUt1LjTqgQx0dWFlaiuDgIPpPn44JxeXLpGgF5DZ6vXhUdQ0/X/IVzLtfxRNF\ndyBy6ULcPpeJcJkIQ7IJUm1RkbKOjYEpx8kMfvj5q5XNk7xuNwZOn0bVzp24uboa7T5fjMpqpC2m\njuZSjU9GuJLBakWQfUclIvHJyO5iONFaiUxl1ZmlHWp9bAyZLemSKah/97QVcSuVvUwXv0n1CklF\nMV3kekY5CnEvJbAYEIqngMASwGIoT6xcoZHcTl758bhcCnFS35DzrrhP3n472n0+dPh8CukEYm/m\nXbJDrdNmw/Tly7g4P4/xSASvnT8PQFIjFyDVxQzPzaG2qAgHNm/GycnJGCXKKxMwO4Cbq6owkcSM\naMsvf4mburvROzKCC6r5ZuVQtNTO5cXFmrmRxydncCxSiV+dGoVLdry9uaoK7T4fHBqlV0ZnZhQF\njc3Z0a1bYxyF9a6J4xMTAKSQ3ec3b0agvh5rKipwdnYWobk59J85A5fdbogwahGrfCytkilFUO3y\nzCOZGmtUreWRS+VCzCqTRmFWu1JTsePy+woAD1vYr+xBUiLD4ac0FHErlb3MFb+RnHZ3og39CJt2\nhBUqo4BAPkMQT4G8g6g9FY9cv9nnCcpOvx9v33235g05H8rpcbvx7MaNeGbjxpht+Jv5VXK46jyR\nkltpB/DyZz+LQH09PlJTE1PmpMBmiyn/wfJAJ994A4CkUL4qk1YboKl2AsDpqSklDJiZHhXb7VhW\nWKiEDm+49lrpmNx+H6mp0SxpwvpT6nCg4Gwlqv/kw7KfbsHOdRtjapDyGJueRjgSwfahIYxNT+Ov\nVbmXz508qVwTD/T1KUT0kkyqJ+bn0fqLX+DS3ByKHNHgl4bqasMlUbSIlSitEv2O0qqZypCM7KZC\nhnOpXEimaItZ2qGmYj4AXxgI4i/2+vGz/W2I5Ek9z+jvnkTpHQ7JTTVWEbeSlGWu+M0whtGPee6h\nREIvEoEMQdxLCSwGBPEUEEiAYBDw+4G2NiCcw/cnuX6zryYoiW7Ik4HflxntMNgAvHrXXfh/3nwT\nY9PTeIc7aYUFBXixvT2mP2sqKnB4bCyGYJLqfy3wdJQplNMLCxibncV3jhzBsfPnldqh7Eu21OHA\nDz7xCc2HBF0tLUp+62jFGZw/a0fvXjeCQeDZjRtjQmsZ+kdHERwc1H3owCuxg2fO4Kl330X/6KhS\ni5Svj1rqdGqqy8mgdR4TqXwCEjIVoVAsh+c2ehuxY5HDczNds9Mo1FSsHMCy8DDWjPbDa0H+rFUY\nGAhi714/9uuQ4e/he3I9zjcRBtDSshb19R0ZdCnOnLJYLD+WkB5KfBjAo5YfwzyWdvavgECuQJRT\nERBIAL8/6twaCBh3bs02crWOYqYRjkRwU3c3RmdmUOly4chdd8FXVhZTUqW2qAgFNhtebG+PMfQB\nouVB1lZV4Y1QKKYWp91mw4LGdxdzs61wOrG+tha/PnNGqcvptNlw7w034Gd/+INmfqdDru8ZuXIF\ndgAbamvx7MaN2D40hKfefRehuTmUhasx+c074P3LIaxpCqO80IF3Ll7Eu5OTMW1VOJ04cd99uO/g\nQc0SJ5WPPqqQTB7q+qj5UhplKUGvHmq6SKVciFamXabyM5P3JYhhDKMYxehCV0ZcWMMAfrS/Dd4c\nKy+TrPRPbD3OOnTjdeRruKlUL/SL2AGCBzuRG+PwI/v1RQWuNuR6ORWjEHU8BQQyhLY2oKdHcm49\ncMB4rUqB5EhmQmPUpMZMvcpE+wZ6e9F76hQKIJHOPRs34rO/+hUiV2ILpNx+3XXwuN3K8Woeewzj\nkQhsAG71evHuxIRufVItOGSCy74l3QUFKL1Qg4WaEMLzc8o2PCkuANB07bV49lOfkuZK46HD7b/4\nBXpPn0a5w4GJy5fj6oHqPawIDgzg5ZO/hX1hAh77AubLb0Wps9BSo6BUa8DmMsyYKuVSPVQ/4m+3\ntZZlpy88uQqgO0NHNlNkPVskPFmt1dyrx7nUkG590Vhk4yGKQP5BEE9BPAXyENmsPRUOG3duFTCH\nZKrPtT/9qVLvs8PnwzMbNxpum5GqIrsdJycnY8gATxBqiopwcnISM2+9he4vfxk37NqlEDxXQQH+\n7Npr4SoowMFTpxC5cgVlTide5+pvAsDJyUls2LsX1xUXK66zDOu83hjHW0AijXq1PrXQ6PXivclJ\nnJdDMvn6oYnUMjYHD69bh4eGhgyr4fx5KcUELqE86bHMIt1IArWj51eHji26ky4/b82Tk+j7+td1\nt82lCAWt221rb8GNIYggnsJTCCGEBjTgBbyQlZv1ZMTSj+yQ8GRk+Bd9v8Dj/sdzqB7nUgMrtmNN\nTc5sPURJB6KOZ/YhiKfI8RQQSAgzzq0C2tDLZ0uWl8rX+zT7Nc1yD9XutUCsEVPPH/+I/tFRvHzu\nHB4aGoKdc5Cdu3IFvadOocTpRGNNDQBgcn4eDw0NxRxr4/79mJybw6sywWyorka1ywUAGBofh1Nu\n0wbA43Jpkk69L2Lmgvu7O+9Esd2OMrtdIZ0Omw0Pr1uXdA58ZWWm8mkVoyNM4gqkcVS5XDg9NZVW\nTiJ/HTgrpDbM1oBlUNe4zAVzLf56/vsPfzjhtunkOFsNrVxMftn2LDlmD2MYIUiGOSuxMmvkKpn7\nrtUmSXqZhMnMpEpRmmI9TgFjsDanNZrH2ogdFrsCCwjkM9Imnjab7T9tNttZm832uhUdEhBIBvGE\nLreQzChFjxQkM6G5zesFIOUkVrhcMccwas6iRW75ZbdUV0t/r1+PHU1NWCu/Z/C4XNjR1KSYGGmR\n5NHpaVycn8c8EWwAqt1uNMh997rduLm6Gu6CArx21134+LJlAKIlVz7k8WB5cTE6fL64osoN1dV4\nMxCAx+2Gr6wMH6mpwSRHxi8TxZFgK9DV0oI7fXXwuRYwDanMzNTlyzh89ix6RkbwRZNOiOxcMXOj\nnpERlP7lIAKB1MPX1TUu2Tm9wXEeW2d/uChOpfz1fIccAp0LSGaZonW7zS/LFqnnb9R3YmfGjhN/\nXMjHjRJLfs5+DGtNklItM5Pq755UusSPNrQhLExzsoYudCGAQE6HRYt7KYHFgBWK56MAPm1BOwIC\nAnmIZDemespmMtXnydtvR6C+Hoe2bIlRLmsefxyPvvOO8n71rl26BFSL3Kprha4qLYW7oAAffuop\n/F5lXfyJa66R8jiLilDjdsMjK5k8nLKrbQEkZbb39GmUOp0I1NejwGbD78bHEblyBf/8yitKO2u9\nXrT7fBhsb8epL3wB5yMRxSm3wunUdJdl88jqelrlYKwm8R63G09vbMPKZR9SjsOXW0mmPqvbY9cH\ny3ttqK7Goy1NSiRBKg6v6hqXXS0tWO8+iS9f/jbCp/cuilNpLqmYPNKtp5kJx2yteqOJbtQz6Teq\nVnyDkEg3m7NGAJcsOA4bw5vye+urY2pDKl3Sjx70IGhpRVUjiD1zi0WCkzkGZwIeeIRCLZB3GBkZ\nwZYtW1BdXY1rr70WX/3qV7GwoGWVmDrSJp5ENAjI8TECAlmAqD2VW0h2Y5pqeQ3+Rp4dowCS0sfy\nMO0AxuWSIFpKnBYZUNcKXVlaisODgxiZmsJFzgV2bVUVfvbJTwKQ8jjPRSLoPX06jly/cuedqCsp\nwTK55Em504lCux1j09MxJU2Ia6f/zBm47Pa42peVLhc2rViBUCSC+w4ejCFibB7/63Ofs7RcCV/v\nc83u3cox+fPWyKnPO5M8JecfRGzZ/d/htseuX1laGtNvvQcXiQipOizR43bjGzVHUIwZVV3DxUEu\nfUelGyqayuc3GVHUqjea6EY9XfKcCEzd3S73+SkAF+V1dgDjFh2XjWEcQB3iFdRkc5bqNbW4IZ+x\nZy4bJFiL3KpD8wUk5NL3lEBu4G/+5m/g9Xpx5swZvPbaa+jv78ePfvQjS48hcjwFBATSQrIbUyuU\nIHaMSlUbBVxOJq/E6ZEWreWM9LHw17VVVejw+XBoy5Y4YqhFrn1lZfjT5z+P95VLJjwT8/PoPXUK\n/aOjCkEusdsxdfmyoo6q22Hje/fee3FmelohYjdxRNBszqZ6rHpzwufSjs3OKuSPHW/70BBmLl9G\nbWEhnt24Melx2Vz58B7umn0Yf+3YhdrCQmXcjLiy/rwZCmnOidkQT7UKKiAh3XqaqXx+k+ZNquqN\napEutmwFgKPysrXInErI+syeolcCqJH/rgDwcJrt8w8AtAqhZIpcL27IZ+xjj2yQYC1yqw7NF+Ah\n6pcKRPHmm2/innvugcvlwjXXXINPf/rTePPNN5PvaALqtKKM4IEHHsD1118PAPB4PFi7dq0SW86e\nuIj34r2Z9wy50p9cfX/HD36AkUuXsLyhAV0tLXjtpZcycjzmdmpm/+DAAF4eHITbbsfz/+2/weN2\nJ9ze43Jh2cmTOH/xIrBmDRq9Xhz/7W+l2pcf/CB+8IlPKNsPT0xIDqPvvIOOt99WHEZfHhzE0QsX\ngDVrEBwcxIMOBxbeeQc1N98Me0EB6J13UHD+PB79u7+L6U9XSwuCg4O4+Prr8H/ve3Hz2VVQgLdC\nIeCdd/C+8nKsamxE76lTKHv3XUxdvoypG29E76lTWB8Oo9lux7P33x833u7WVvT19WHmrbeAqioA\nwOjRo+g4d07pv5n5HQ6H0S9bxwZdLoxNT8e8Z8dbdeYMQnJu6w1nzmCb/F3N2nv58GEclc2V7t+x\nA9+87TZ0FRRgOBzGzFtv4Z9uvVXJaezr68ODDgcmC0/hrtkfYGJ0BYqvvw9v33M7goOD2HblCl57\n6aW4/tXdeisObN4cc30WOxzAO+/gxooK7Lj//qTjdbs9cDgexEsvvZYznz+t998DcMnvRzGAB/v6\nUJqF43dnuP0uvx/DAGb6+vBPAIrl9Tf29WGbtEPs9i1dCA4Gse3KNrz20msY9vsl/8++PnQA6JPb\n62ff9/L+JX19+IKB+VP35w4D4ymWj38DgA/6/dgJoKmvD6MALvr9eEg+Xqrz1QWgo68Pfw/Ak+D4\nNwLYobHe7/encf67TffXmvcPApiG3/8sAA8e7HsQ05jGs/5n4YEnI8efwQzgl8jttr5t6EMfWlq6\nMDgYxJUr23L++yGb76VlL8PvPyr/3QHgmznTv6X6PhGCA0EMh4dR7ChGV0uX4XrMVu2/ceNGdHV1\nobm5GRcuXEBPTw++853vaG7b19eH1157DWE5RenEiROGjmFJORWbzXY9gOeIKM7KT5RTERBYPGSq\nUL0VMNs3fvu6khK8vnUr7vjlL3H47Nm4NvTqJGot59tl0OsPv+2q0lKsLC1FscOBifl5pR9Omw2f\nuOYaVLrdODczg8NjYwCkMNp37703qXIUjkRw0+7dGJ2djet/OrUi7zt4UHNOwpEIHujrgw3Ao35/\nXJusHa97Cmsq9qPc5cTE/Jdw+Oy47lwZqZOYrJalVsmR4MAAnjt5EpGFBdxWU4MnczCnMhn8yE55\njiCsqz/Jt1UD4KSqXT+iY6oF8BsADwEoUm27XadPiUq6MNgB/DmAGQCH5WXq+WP9PIaocml0jrWK\naWSzrIy1xTz0YOVVkZsIy7mkouyMUSxG8aSrF8nKqfj3+tE/KpfhqQ+gu9XcL0S6+1+4cAGtra14\n/fXXsbCwgAceeAD/+Z//GbedKKcicFXByFMjAQks7NE75cXp/7sJbW1SbdJcgFnTEn7717duhcft\n1nWbVYf/srDO+StX0OHzxRAdpqwlcq5lOD45CUAKy11WVKSEgh6fmFC2mSdC/+gonHY7jly4oCzf\nu3Ejtg8NJTXS8bjdePueezTDl82En6rnQC8k2uN249mNG/GMThgt229NxX4cHutFz0gPjk/8Xneu\n3r97N67pegb3ntqM0Tl7XHt6/dOaB3WI53A4jNGZGYTm5tB76tSilU5JhkTfUVaX59CDlaGbfFv/\nS6PdYm7bUUiksxsS6eS3fQJh5f1qXFEC+7qgXdKllmt3AUAvgOPyey+A04gNEFSHy5bDeIislruv\nVr9SgRFTnWTFPKz53ctktmxuQJj6GId0TVl1lQtYAXUaQjb3JyJs3LgRgUAA09PTGB8fx4ULF/AP\n//APpvuRCGkTT5vN9r8AvAjgRpvN9iebzfbF9LslICBgBRTSsH8zDve60dMDBDN0vxEMShFxK74x\ngA0/T+5S2tXSojjKqo109LZP5FDLq2I3dXejd2QEgQMHEI5EFAOd3tOnQUAMmelqaUHz8uVoW7Ei\nzrlWnRfpKykBAFycn8fL584BkBTO64qL4SqIfp2W2O0IRSKwc08E733hhRgjnwcS3Ejq5dWZIese\neSw3dXejaudOBA4cUNRDM06yrC/lLkbMG/Gbji/pkkZWXmY8EsGGvXtNjzERijl33YbqastcVrOJ\nbN3mpUtwg5CUzDYATm45s98qhUTwwogliV5I1KYKUi4j34c57pZjHAW4Sd5fi3RtB/A+1bFdkAio\nTT72YWgTYHaUCfnYibLX+HGqt7GqsuPiOsvyyNxjD2scaxOdDYHMwNr6pQLpoaulC4H6AA5sPmA6\nTDbd/cfHx/G73/0OX/nKV+B0OlFVVYUHHngA+/fvN92PRLAk1DbhAUSorYDAoqOtDejpARobU6+d\nmAx+P9DfD+Dv9gJrjIXQZiIUWB06G6ivR+/IiFLOo8PnwzMbNxrqi3rZpbk5qQ6lw4FLly/HtbG8\nuBiRhQWcl8mcDZLpUbHdjrfuvhsNTz+dtB+AfkitVvip2bnwuFzoPn5ccfA1Ou/hSBjBwSB2NO1I\n+INW89hjGI9ElDH7ysqStm0UycKC9TAwEEQ4PAyHoxgtLV15bT5kNFgy1dBNrXDVlQDOQCKdHkjl\nRdjVz0JZ2fFOIxoKC0gOrsxMx4tXcR63xhyvBhINqgHwKwARALchNqQWkEinG8Ckqr+VAKoBnIMU\njgsALM7ADomo8v1Uw4/Mhz63oQ096EEjGhe5rmPmAnr98KNfnskAAuhOaSb9yE4guoDA4iBZqO1i\ngohQV1eHr33ta/j617+OyclJfPGLX0RJSQmeeOKJmG1FqK2AgEBCdHUBgUB6pDOZSnb8EwPA3+2F\nfaW2S6kWMlEjkFfF1lZVYUdTE26TzXEaqqvxKGeskKwv6mVMYf3YNdco+5VxIbpvBgL4aE2Nso4g\nfcm+1NEBX1mZoX4A+iG1TMXseP75mPOgd2605mI4HFZIZ6XLZXjePW4Pulu7kz5FZeVlrCadUh8S\nhwXrYSmVUzAaLJmqjqEOV22E5CzLlE47oqSzElHdjB3vJNfWhxDr4Po7vA/FGIND9qAuhUQYewDs\nhxSmG0JsSG0DgHa5DTXp9AA4IrdxERLhnOL6toEbA+snr6ndD4lgA8Ycc1PV46xwlrWmFmXm1C1r\nHGuzFYguICCghs1mw89//nM899xz8Hq9WL16NdxuN77//e9behxBPAXyDiLH0zw8HqC7Oz2lM1l+\noa8xDKwZxUJRBHUlJYbq/tUUFcWFt6aLrpYWdPh8aOdKojzZ2opAfT1euOMOzT7d8YMfYGJ+HrVF\nRXjq9tt1Q3m3f9WNse+0Ajta0bbchw6fD5tXrIBXrgnK9mHlQwDgCoDvHDkCAEn7wZCIkGudB71z\nozUXfM3QI3fdZbk5DysvYzXpTAfZLqeQye+oTN+aHx8YAPbuRdn+/VgRicAN4B3umA3y35WQSJ+6\nFuUE9/4GROtjtgGoQAXKsQyXIT0QvyRv9yFIxI+hDMAnIKmg1QB2Ikp8AWAZAB8kFbQBwLS8vBjA\ny5C0sncBPItoWDNfp5PPV2UE+3qNsfghke4Ncv/fQmoZklbkHQ4Ovqz78MSaMNf0YE3ZFpFvmE2I\neykBNdatW4fBwUGEQiGcO3cOu3btQg33MN0KZKWcioCAQP4jmTpZXhhdb7TY/MnJSZyLRNB7+jSC\ng4OWhNp63O64EFaWT8iDD2f90+Qk3pBdaR8aGlK2Ve83PCyHE8ONVbcUYWVjGMcuXIgxu+lubcXb\n99wT40zL5otXLFkY7fahIQyHwzg+MQFfWRnKnU78uKkJDw0NaYbUJlJmSx0OhCIRhCMR6Vgac8FK\nw+iF65pxzs0XsHIKiVx28wVdsDZYUh266wuHMTI6ikkAhYODOCxf/3UAPgBJiWTOtT5VO92IEs9K\nAI8C6EA0ePJWSOqkGhcADEIiquchKZuD8ra98n6MpJZCIpf3c+0yPA/gZkQDNIMAxgDcB+B38t88\nGJllobor5DGVy+Ngob4j8v8sjzUR6TcSCp2Kt6zdLn0OtR6esBxSqe1gimGuZnoZv46R6/TAFFkB\nAYGlCpHjKSAgYAjJ8gvN5h8CyUtqWA1Gqo5PTmIiEsGEnKdZW1SE0ZmZpP3gc2Xd/8deHB6P5k/y\n+wYHBvBWKITjExP4jRxmy6DOGx2bnjZczgXQnudwJILVu3ZhXA6zTSdfVi/vNhiUiHfnNT18AAAg\nAElEQVRxsRS6nYk8YYHsw4/YrLpL3GfSs3kzet1updDCTZDCYQGJUD4D7ZxQJ4A/QCJxrFhDKaLk\nkUcxJCXxXwE8BmBOXs7yM9cCKEFsvqcbUiQBr4Kytj4CiRz75HZZn1i+NSAppXcPBLEsPIw5RzEe\na+nCpOqBRK081gpIYbyNkNTSh5CY9PuRPEvRyDZqJCpRlJkc0kS9TLRucRBEEMMYRjGK0YUu4Wor\nkJPI5RxPMxA5ngICArow42Cqub/sVnvfZ93Y0ajvQpqKS6le2ZNU+5oMLCR1ZGpKIZ2VLhd+09GR\nsLSHUo7lr/aj/d4IDhyIKrxrq6riSrQMh8M4fPYsRmdm8NDQUExbasWSvTdSzgXQnmeP242PyOEw\n6ebL6inbTO3NpDPyUkC++XKqQ3f5z+STbjcCkJROnnQCURKnzgmtBHAXJEWyDcCPIRFFLdIJSGZC\nHwOwG1HSCURNgV5HfGhWBPGks0DuYz8khfIw16dSxN7stAKoCw9jzWg/PjzSg8/JoavMnKgBkqIb\nAHAU0eBPH5JnSBoJhU4lXNrt9qC1tRtDQ9vjcj2tCXM108vcy8XMHedgAQGBRBDEUyDvIPISzMFM\n7UfN/TNIONQkKt2+JgMjVRUyybMDcNvtuOMHP8CluTnd/Vi/ekdH4PrfBuHxRG/QD23ZgmdUNTr/\nINf1rHA68fC6dTFtsf0+UFmJjuefxzwRfKWluMnjicsxNQOj5WkSkfvgwIBmrisgKZ2ApPbuyI17\nzZzEMID+vr6crJSoJsUsJ7MWkprnAbDd7cZYayvuk889MwziSacTwDhiS600QHK//QCkkFeWC7kG\nElHUw4Lc9kSC9YMAEj5Ch6SAHtFYboNEehmRXQvgZwA+Luf9vudtxM+bdsDBbbMSUZJphGzyMJKl\nmEomI/vd0zLKykztykS9zJ1cTJbf+ibeBKBtbpQLObC5CHEvJbAYEMRTQGCJI13n2GwSjuOTkm+l\nFmEzAz1yxUjf0a1b4XW7pZvemRm8EQolJLtac5iINEcWpFvYi/PzcYon2+/k5KREZk+dwtT8PIbO\nndNUSOPGJivQbW1AmLuH8rjdWFlaisNjYwnHwvdz9a5dMXOkp9QGg8DEBFBbCzz1lAiz5aG+1qys\nn2n1LbLaEXcYkjI4CimEVGsbIKpvARLN8CBaQ9MFYBWAU4iWUuHDW62IW7iCqMKqhwpVP/n+AlF3\n3EOQjIZ+2NKFN+oD+H83H8B5t0dx6m2U2/IjtXNgxDc2HW/Z7BllJepl7tR+ZErnOMZRhzpN1Tdf\n1VBBmAWWIgTxFMg7+BOUoRCIhzqc1fT+cimWD/z3AXQMZC4MFgB8JSUAtAmbGTz32yi5+uLB2HIk\n3a2t8JWVKaGpFU4nsGZNQmJuZA55ctpQXa38rdcmv/1arzfp9gyJFGi+zaKnmzQJKm9ENB6JxJDU\nRGG2hw8Do6PAQw9BgINape8CEPD7U9aCjJZLSQVqUqx+H0S0vEgVgAH5/2lI5kLV8rbjsvMt9u/H\n85EILkAy7lFXtk01k6k0hX0uIlpKhUcIQCEkYjwA4IOQwnp73R78sLVbye30QCKmByApvJk6B6mC\n/e61tHShvj6AzZsP5L1RFo9USRZfxuV1vK6p+lpT6iX9vppFpgmzuJcSWAwIcyEBgTxHtkxf9Exn\nrIQRsyEjrqtV/7wfoetGgPe8aD+5Gc926ZshPbxuna6DrBnwpj8Akhotmd2egTc4Utdl5dvs2OiW\nHXilBwfd3bHbhCIR9J46FTPXauMiNtdvHnVg/HgRSq+fxMdudeDJjUvD7dYKWG2Qxcx4mKGPlR9n\nFl7LzHHY+yJIZIs3CHIglkjy5jzYu1d6CgEA9fWAxd8FyxDvQJsMMf0zCVYahrn0ZvIcLDYGBoII\nh4fhcBSjpaUrZ8irH37FmTeAgGGH3DDCCCKIHdihG2psZJts9NUsMmMaJbCYEOZCgngK5CH6+vrE\nkzoOfj80CYbVyIYDrRFnXCME+Pb2CHqvGcTaN5twaJ87KRnPp2sqHJYeNuzYkfghQyKCCsTPtRah\n5+caBCXRrsPniyvTcrVC65pN53pSk0OjYKGzxyGZ9MwDuA3AckikMlHpDj/iS5PwKIAU7qpg/35g\nZATweoHNmwELvwtskGp4HtZZH9eXNFEHycCInxc1ITdT9sQKaBUyseo7au9eP0ZHpbNdXx9Aa+vi\nO9IC2SFZVjnfZosQahFmK9178+l3b6lAEE8RaisgkPfIVg5muiG7RmDEGddIzuqTj7kRCLcaIp35\nBo9HeriQbFwsRFqLdALGjJ3YXAOIcXex+mcz027GmUQqbs4J20Nq2XMsRHcEkloYglQDczeiYaMP\naOzHh9d+GJIDrRpxRK+lRVI6LSadgHRt6ZFOzb4YAH+jUw5JUQWksU4AWA2JYDKwc7BYIbeZDLfO\nXo6oOWTGmTcWRkJXjYTRZqOvgLZplJnwW5EjKpCLEIqngECew6gClktIJzw4lXqhAsmhpWiHIxHc\n1N2N0ZkZlDmdmJyfx9qqKhzassXSuc9GGPdSBVPH3oTkNFuOqENsFaTcR+bWympv8vAjqnZ2ILY0\nihFUAbhgttMyqgGcT3HfVOCEVOrlT5CU4SkAk/K6Onk5j8UKuc3kcRPVAzUKI6pbLtbVNKJUZiuM\nNlWYUVtzfSxXI3Jd8Xz77bfx5S9/Ga+++ipqamrw8MMPo6OjI247EWorICCQV8hUeLCR/M+rCWbm\nQ4/QW50Lq4VMhnFrhS0m3SdPrqMgJGXuovy+DsCvAfwtJOVwHFH10APgPcSPX01yApCUUjuihFUP\ntZAUSLP5mJmGOj+VwQ7JuIjNlwtSWHIxgLcQzfFk14wTQAmAnchunmeq4dbZghFCs5ikR4/0Gsn1\nzPW8SjP5qrk+lqsRuUw8L1++jA984AN48MEH8bWvfQ19fX3YsmULjhw5gtWrV8dsK4inwFWFxcxL\nyJcb0lSQ7tjM7J8s/zBVpKKcBYPAyy/3YflyvyXmTGbmIdG26ZyP4MAAnjt5EudmZhTykMtKohbp\nteqz5kdU0QsAhm5/01Vgs/Ud5Ud0bJUA3kUsUWGkUm2ew4PPZ/wVogpkCRKXErHJLyvzLdOBHUAZ\nJDJ5AMBnEBs+q0YFgF8AuBcSWefnxg/9a4aRmuP4B/hwO8rhQBekEi1mH3CYQS7l4xkhNItJetIh\nvVYbES0G2DXqhBMlKMFO7NQcSy5dU1cLcpl4vvHGG/j4xz+OyclJZdnGjRuxbt06fOtb34rZVuR4\nCghkCVp5cEsF/Nhu/f6gZikOo/snm5tk+YepIpWapcPDwNGj2uVJUsFzJ08q83DL008nzF1MNGda\n64zmQg6HwxjlSGely5VSDddsQStP0qrPWip1NdOtfZstsLExYqn+KHVBIk7vAvhXaNem5PMZRyGZ\nEs0jef1KQmZIZ8I7FhXs3N8FkPo8BuD/AnB9kvYvAvh3AJsAfAxSyPB1ADYA+I28TTmAh1X7sxy7\nERThMBwxNVHVeZmsJusKud1M1GZdDBjJccxkHuTAQBB79/qxf38bIpH4GU2nfIpWXmW+gV2jveiF\nCy7dsXwP3xM5oDmGdP0OrPZLuHLlCt5444202+HhSL6JgEBuIZNP6JLlHubLDalR8ON1/lV0bO4n\nm5RQ2GDQWCismblhBjnpgil7kYUF3Ob14ifNzYbCQXk1zVnRAsBvmTlTZCEaoDg1P68oZ8HBwTjl\nLNGcaa1jZEyvPfW+AOBxuXDkrrvyTp3XmxuzSmgXzIctdrW0pJVHbPV3lDpcmKlrTki1J3dCe2yM\nVAJRYgQAtwJYKbdXA4l0vmlpj1PH5weCWBYexpyjGP+zpQszCfIQ+VBgngTbECXlgERK/wySey1T\ndB2QSOXHIBFuQMptPc3tNyGvfxvR+WWkphxOTCD6QOM+eT3/gIOf8xH5fxYebRaZ+N1LJQwdiJKz\ndLdJFeHwsOLMOzgYjHPm7UJXTqmWeqG/mcqDNUq8L/kvKcpwEEGRA5oDMPobn4n916xZg2XLluHh\nhx/G3/7t3+LQoUMYGBjAJz/5SVN9SAaheAoIcBgelnIP9dQvI86uwSBMqYVmt7cS/HhLdkXHVu6U\nxmaUjAWDwMT3W1B7qh5Pbcic660aTNkLzc2h9/RpPDQ0ZMhhlFfTSv9y0FL19baaGgBAQ3U1Gqqr\nAeiT8a6WFqwqK4Pbbsd9Bw/GPKFUX2vBIHDsdxIZa6hMTO67WlrQ7vOhw+fDe/feC19ZWfoDyzL0\nPmtmldBUXGKtdqpNB0FIxJJ3pmWEphdSaKne2Jji1gbgD/Iy5urK2ntc/nscUrhtJcypjkk7zzpg\n8LttWXgYa0b78eGRHnxhsNPwodgcVAM4B0m1vQ4SOW+CZKr0UW77ywAeAsBrAuxxTTm3bBSxzrJM\nyTuGDyGAqPkPU5d5MyBGfp1cu2oFdTFh1j03V1xSkznz5ppqqedEa8ah1gyMqs3pKMMCmUG64kY6\n+zudTjz77LPYt28frr32Wnz/+9/H3Xffjbq6OtP9SAgiyuhLOoSAgHU4dOhQxtretIkIIGpsJAqF\nUmujuVlqAyAKBKzf3gqsWUNUUUHkdMaOt7NT6k9rK1FHh/E5WIwxEBFt2reP8MgjhEceobVPPkmh\n2VlT+zU+/TSFZmctvaZCs7MUOHCAQrOzMX/roXnPHmUMgQMH9LdrJkLRLKHzALXfa2yci4nO/n5q\n3rOHNu3bZ/i8GIH63OUirLyemin2R7WDiDbJfzcSEfuIdsrbbuKW8fs6ub9dlPzHu8DANklfzdHv\nBQSM7fOVfZvokUdA//vTjVQ0GzJ8LJc8xnKd9W3yvNSq5q6Vm5/b5PVHNbZLBSEiChDROq4fqX49\nGr2mtK4DPWhdR4nQTM0E+V8g5ZGkhk7qpFqqpUqqpE2zzbTvQAfNzsb3upM6qZmaaRNtolDKZ858\n3xIdcxNtIhCokRpj1ustzxaeO/QcBSiwKMe+WpGMExm5Z8jk/mp8/OMfpx07dsQt1xuHvDwxL0y2\nQbovQTwFzIARn02b9ElPJolnKCQRp1RJJ5F58mp2eyNzlAwVFdEbwsLCaDupEkgrCHsqCM3OUscv\nf0ntv/xl0i9angidmJiI+XLe/G//lhGSpHX8uieeoPXPPKMcyyiRWqw5ThXJCHWqxNTqH9ZMwMrv\nKEYOQEQfJokgMELDXwbN3HYBjX3NEMsGIjpBREXcstVEVJVgH82XfM2ikQghY/sUzYao80DAFOks\n0VhWqnrvlOfjBDd3nUS0jOIJK1uvnmMz6O/vpD17mmnfvk30GXks6ZBYo9dUMxknuUbG2N/ZSXua\nm2nfpk30mVBr1oiSmszxpDcR8V0McpzsmCEKaRI8veXZQibvpQS0keuc6NixYzQzM0NTU1P08MMP\nU319Pc3NzcVtlw7xFK62AjmFTJXZyCbM1tU0u70Vc1RTA4yPS7mdb70F+GRLx1TdZtOtJZpOXU+j\nSORUmo06kvwxGAL19djR1GQonzDf6rUmK5EiancaQxjAFyGZ+eyEflitVu3HMICbIIWLNgA4Bcl8\npxGSMc+QTltVANZBqs/JtukA8AqiuYqGO5+huiCrAcwCmIY0N6yWaDGkkik3IZpfyWMVovmtE4iW\nm2EwUzszUY7k3r1+JQ+xrj6Ana3dWSmPYnUN0L1+P0blH5y6QAd2djtjciczlaeodqa9hEvoQQ8A\noAENeAEv5IybrihbImAUuexqCwDbt2/HT37yE8zPz+PP/uzP8MMf/hD19fVx26XjaivMhQRyCsVy\nUoxVRi9WwwhBSmaco9WGGfJoxRy98gqwYQPw619HSScg9UeL3AwEgwgPD8NRXIyWri645ZV682GW\nSLJcU7ZvJh44mDXyydTxK5xOXJyfV47F8gmTwSpDJjNIp6RJMoOepWbUlQqMmLt4IOUnJgMzUSqC\nRBKZcdD75PXPQCohwnggb4YzD+Ao19YFSOSlVn7vALBvIIgr4WHAUQy0dAEq058KROtjxnQ+Q9fs\nJIA1iCeX0/LrEwC8kHJXSwFcgjRWN7dPLbffhwHUQyL3Rkuj6Bk2dSE2D7GlaQfazA8xJfOfVMy0\nEsEh/+B4GxvRsuNRtKlaZXmKUn9TM6jRIq9a+YcP4AHYYMOjeDSG3PH7/xg/xkN4KGvGQgMDQXwp\nPIENjlp8qeUpeFSfi1SJeaL9MkX2BQS++93v4rvf/W5mD5JMEk33hRyXlQVyC0ZCXQ3nuqQZkqre\nv7MzNkQ11VxGo+Gsev1PNkdWhOKqsae5mR4B6BGADnCd5seyalX0uOvXm5unbISRbnvhBfLu3Emt\nv/hFXJjmc88/n/HwTRYiqg7zzWUYzT9NBfkQMpsqMhEWaRR8m16N9jtJCqG1ycvXUzTPkX9VkhSW\nWsOW7WkmPALpdSCQ2RsHAy87Nwb1y8uNq4Niw2v5vMYT8vp2Sh62rEanPEcgKTR5vWqf2dkQHTgQ\n0MxD5NtoJv18TL4fzYsUFjkbCtGBQIBmdb6YrchT1ApVNROGyu9fS7VZDV3ds6eZHnkE9MgjoP9x\nYFVcrmeyMFy9/NBE+1kVTixCbbOPpcKJ9MYBA6G2QvEUyClYqeqkq6Kp9x8bAy7Kj/QrK1NXG5Mp\nlkwtPHYMCIXi+59sjsyM26iixT/1buI6zY/F7Y4et7Y28RjV0FNarcTJyUmMRyLoPXUqzma81OVC\nd4YLafPKZr6ElWZSlTSq9OYjvnf0KL45MZH0c5WsxqhRxYvfjjmoNsrb96rafwLADLfvYQBc0IOC\nmyGpmI2Q1E/ICh68jYCGk2i2saCxzAWgFZLyykJonQBehhRiex+AH0Nys2WKoJaabKT26zCk8iuA\npHTOqfZxuz1xZT602mCKabTMSvRsFmMPACcaAfx9wpYyB7fHg9YEPyJWlC7RUjfNlGMpVs4YMIpR\n3IpbsRIrs6II8sr2k03uOPVXzzmWqZYv4SXMyVfPA3gAz+LZmDFpOc7mixutUGYFNJGMmab7whJh\n9wL5h3RVNPX+7H1lJdGJE6n3S0ux5FVKXi1Mpf9mxm1U0dJ76s2PhT/uiRPpmzRZjXxwQ801LGVV\nMpMw+rlKZu7STMYUUX67Dq5NdfudFP8jXUX6TrBs33YiqpwNSUqnCdMfvZfX5PZOInJTcqfdFfJc\nuIkI/Z2SSrtvU0yfjehDRkx31I6wqZgR8W1sI6Z+vkQhqiAiUIjuT8vgKF9gVN3UUgc7qZPW03py\nkUtRXtfTeksUQSPglW0t9VdvbGqzJBCogzqU9Wy/bbQtbsyLbUpkFEaU2cVwIV5MLBVOpDcOCFdb\ngaWIRKGk/Lp0yY+aIOqFuFoR2sqHrNbWSv83NBC1t5tv04wzr5VkzApH4EziaiNRmQi5FjAGs58r\nPYdfo+Uu1NutIaIKkgge/4ysmWJ/oGsonnR6KEoO11M0DJQd40OUfqmVExRbYiTRS8uxFiSF2fLr\nKik23JUPDXbIocGJ5tFMGRKi9F1v1W00c30P0C6d3prtZXaRaRKhRWT4ZXVURyHSJoCZ6iPf3gk6\noUsI1cdlfSyjMgKBGqhBcz9+fF7y5hVBMxKGfbWR06XCiQTxFFhSSHbDfMsth3TzB9X5k9m4+bai\nhuViqYWMjG37q1nNedKbv6VGapZirsti1Va1ApmqAZotJMoZ1qy3qaOQbiOJALZSYpqhJkGSXia9\n3BRV09RKo149z+UUS+K88rIquS/JSKNe7iXkNowS1zqK5p8yglxBUk1Ovn9OksgsI8d2IknpfARU\n8XQjHZ0NJSWJzVx7i/FxiT48mKcQ3U/q3krfUc20uL1MjEyVMmHEw0veOCJjRmXMRB+NtqfejvUx\nEVnlx1dKpaYJWjLCls7vnhEyaESZtYqc5guWCicSxFNgSSHZDfNHP3pIN5RUHWaq15aVxKmuTmq/\noiL1ENxU1EIrSaHePJldnq/IBvHMNplKJ9Q8o301INpk0tQoG0h0PTVTPHXQU0i1ttWCekrVBJN/\nz0hhMRHVkvYPdztFiZC6HiYS7Gflq5QkMrmN/oZq6AVqpm3UQRHlkmH9YyZIJI9dMUOaDdHyAwEK\nceY+iS49I+pyJvXGZAqqdE0Z1cAlZFspStVoKFk/tVRNBrNhp6yPXvLSelqf9twYHXOqc8PG10rJ\na6iqCRr/3k1uqqRKaqVWZX/+e8rstVJLtUrbfIiwWVhFTvMFS4UTCeIpsGTQ2SnlULJQU60b5lBI\nclBdvz654yu7+fZ6Y7dXh7amQz7NOrjyY02H/FpJCvVIitnlVoLNT12d9rnOZWidW6vJVDJyyH8W\nzBLJjBK/ZkrKpqwKAefHrafqZxta1EEvDNwozeCJYDtJRMxNRI90Er3STHRwE1FFSFILB0lSEk+Q\nKjRVfn2AYnMW1Y635RR1ia3Q2N/Kl5eIKuklAlUoN9OM/LUSkY+IlpFEPpkqzBNmtVLczLXNu/yy\n9tqTzLPW/gzZCYI1F+CbbaUo1dxDvX4mUjrT7WO6eaCsb63USh3Uodsvre06qZNqqTaOCKr30cvr\n1COJalLN5o1XS/XGq3UOEpHRSqpUtm+ndtPzZwb5ktNqBJDKDi+Jl974SBBPgXwCT5raE3yXGSVX\n7OZbTQ4ZcbJCtUuVhKWrGlpJCvUUV7PL04GarPHzk+ghQS6G/WqdW6vNjcyQQ7NEku/rthdesEb9\nZHfmTH5LwKasysflx13zjQM5odKboQ5Gt2VlPUCkaA8hInqjObpiVyCeMG2i+B/t5Rp94NcvI+lU\nGlU97RQlgmZuHopj3u9SSAc3pDhll82VVhkZfrz8pdess60WEj0I4NtZRbmRiWm1UpQpBVWvn4mU\nTjN91iJ56c5NqiG26mVa+ydrW289I2jLaJmyfjktV9RSEGgtrY0ZbyJyn6gfrM0SKtEkzwJXJwTx\nFMg7GCFNhw4dMk2u1NuHQlETn3RVu1RJGOtTaSlRa6t1JkK5bvKjBzVZY/NTXp74IYEVYb9Wh9pq\nXZ9Wmht19vdT5aOPEh55hNY++WTSNs2SXr6vlqmfzRT9ZaijrNyR8+Nu/cxsxlV6hmznDC8naVqd\nJOVfKoRHZkpHGiXFk6mVRBJ5XE8SkXRSlOydoHj1jpFHkJRf2UzxP/YOjWXLKaqOnlCts9EsNdM2\naqMILdPYl4XMNtA8tdP9tI1m455b8GosPzY9gqhF5M0Er4ZIIpW86ZJWO+qanlYglWvKaqUoEwoq\nc6WtpVo6EWOFZX2NUL7f6c5Nsr5pETrmUMuW8USQJ/VaYbXJ1vNQq5HbaBtVUzUto2Uxc3zo0KGE\n5D7RGEMUihnHKoqvYZoqlpKZ0NUGQTwF8g4x4YE6StahQ4dMkyut7a3Mq0wFoZAUAsyTJr79bdsy\nq+Rl+lhac5Vo/rQeDgQCUt5soocEVoT9Wk0UMkn+OzuJKr4VJYMdv/xl8v6kQXotU2rNpafpwkzY\nMD/ubD6QySTx1Arp1AqZDRApTGtjKJ4INXPbekgKzT2qsS7AvS8hiey1EsWUKymYDSnmP4ykrqX4\n0xxLTs8RaB+10/0UIkmpdXLr2yiWJPJ9Ys8tQnK/2XIWJJOuqpzoxpfvRy23H9+O3qXe2d9JzXua\nadO+TTG5p0aQCwZomVBQK+Qwai0yawVxZn0G6TvHpoJkfdMidPyy5bSc2qldU11cSSuphmpilER+\nfTu1Jzw2I6aM1Oo9MDh06FBScmnE+MjqEjZLyUzoaoMgngJ5DSuULKthdZ/UxkR8+2pSajX4Yzkc\n1h9La64SzV8iYpDquqWI5mYifEUig5XfzXxNUsuUWjNsIAHy3XgoHXRSbF6lYk4kv2fr1IRHTYQ6\nKRqey5ckYURKnSd5gmKdaJ1EMeVKquRyJSBJqewg7dMcDc+9rGzfQRFlPVMwtUirHpnTCjNOF4lu\nfNXhyTz5ZNC71Gu5Oes4kCM/aiaQSQW1kipTbncNraEKqiA3uWkdrYvLjWyn9pg8zGwoalqETs/Y\nqJM6FZWygRpiSFwN1VAd1ZGHPIbIs5aCbIRcbqNtVERFZCc7VVN1nPqsFbLMXw9WPpRYSmZCVxsE\n8RTIa2TDwMYsrO6TXu5pY6MUfssfy+pcRj7Ul/WhslK77TVrJHLs9Rp37tWaq1w8p/mGTZuIUDRL\nldsP0Imz+VdqJF1YnSubVZhwoNEsu0LRH9dKbjkLAV1HEhFSf0TVRIjPz1TnSbL9a7hlAYoNtwWR\nUq6k8ulGWj4bilmnNu5hY1Arsw00HzMNfD/V++qRueX0JoGICmiKmmnO8G2qun3+fSttTXCjHp/f\napRCVspzhqcbqd2k4rkUwQhGJVXS5+hzKZNBXjXlCZtWW2qVdRWtSmj0YwTJzIDYNowQrqN1MQ82\n1GqmVgkVfr3WMdl7PszWTFixOiS5juoSrs+EOp2JtgSyC0E8BfIaekqWkZAjK0iaVhtWq2t64aXq\nv4ni1dB0CShzB/Z4KEZ15cHmgFdE6+o0m9Ns34rwZiuRKHx7sZAwdFSDfSz2HC42rMyVzRQObT6k\nTTCbyTBb0dpUq4SI2aY7KT5nU4tIsWN5SSKMcbU5Z0PkPBCgdbMh8nDLeUKs7lOd/HeZfNxEl7DR\n8Syj/QSKxGxrhN+r2+ffd1Ak4Y0vTz55FTnZMVtnQ4QDAVo7a/6WOhdCba0AT5j4GpbphFeyXMMC\nKogjbImMeyqpMkZdNHJsLZKpV1qE35Y/Dtte7T7LHnSoS6gwoszWa4Uoq4lhIzXSalpNFVRBXvIq\nCibfp+cOPaf0lQ9JLqIi3XxbPoTX7DwZhcjxzF8I4imwqMiE22hnJ9EttxxK2GZnp0Si1CGd6v4k\n6x/LKwSIOkzGcBkdu1ESwZeZKSmJH1uq4Mms1hj59QBRcXHqtUpzAXqhvmkV0k6z5mXC0NFmMi+r\n5BhSmZ9s1zxNB1p9PXTLIe3zZiLPVbPsCulHKxttupmiXfNQVE1UEyl2LK380ZGHiMoAACAASURB\nVOUk5VOq19nl9tnx1X0yYrpjwvyYiIgq6Sg3ngUKkf7HJlbVjG3fbAqy+lzoHTPRPmawqA/HEhAB\nsyRBTTD1XFXNtHuCTlAd1dFROhpD2LQUa15lPUEnEuaA8n1gxkBaiqLazIft5yKXsryQChUSyfrJ\nk1E3uWPIHq+Qsu218j0d5IgZRwM1KLmjPDllCmYM8T4UDW8OUYjaqI2W0/I40snWd1AHraSVhuqf\npvMgQeR45i8E8RRYVGQiR9NIm+rcRUaU1PtqtcUTRp68Jirtkmo/9aBZA5Jrb9kySilcVasupjqc\nVw2myN58M9Hy5eZIZy6WOclEqG+6OYcJQ0ctMuRZTKQyP2b3yU4NRW1o9tWMraoOkm1qNBRVDS3V\nlDncaoXpMpWSKZ5a7rFriaiaoj/8Xnk/deivVq4pPwaiWALnovhanGq00pxCOvWOw8Aru2rzonRI\nYaJj5iPU5yURETBLEtT5e1omPKm0yyNRqCaf09hMUn3NNmrTrMXJ96GGapS/Wf9ZG1VURUwJ3Ebb\nNEN/WY4mPx6e9IKiYb8ucpGNbMpyJzljyGAxFcfsx8ZaSqVUTuVKrquTnAQCFVOxEsrMO9GCQD7y\npfXggEj74YNenqaRBwp8qLEo1ZJfEMRTwDDSJQla+6dyk5+sH0ba1KvRqS5fokW6tAje2rXax0rk\nCsv306xjrBZp5dv73OeIamrMl2BRq5eMUCdSXNMJ68zEg4d0kYkw1XRzDhOGjqZ7N5wDSGV+zO7T\nTNEfHe1LLQE1TZO1avY1C+etmZKNWRtaXUvUlpbi2UHxZJU3JFJvz9pMpBJqGRsZGZ/WeEJEVEpn\nqZyOkZdephMUJqJYIyKTzxKTIh8/qnqXfjPFzn0isxezRjBqUqi3v5F2UwnJTJQLqQbfB94p1kc+\nWk/rY9oopuK4ZexfBVVoqrAhCilht2pnWPU/plh2UqcSUsz+HZX9qBnR5P8xJZUnjDypraZq5W+9\nkijJSrlokVE98q+neKvzY3mCLFTP/IEgngKGkS5J0NrfTBgpI2Zqsx01QiGi5uZDScNXtcpvhEJE\nbne0/ba2+NItzEm2sVFS99T95/vKiClAVF0d22+WP7l+fTRE1ujcataA5OaSn+tVq4yTWtYuU3L1\nyLtVSmU+GQmlE8aWDzmHhpEB6TCV+TG7T3K1qZl0aUyCVUawbXaWag4coNbZWeXY/PVk1ZSq27FS\nYePb2qY6DlM8+Vc7xU8bI16tqm35nE+94yZqJ9XxVdARpd06OkxEiV1zGbKlnps9jjWhtrFHbSbt\nS199bRlREFNVpfT2N2uIo2cmxKBFOJMRW74PvFKqVjS1SCMjdw5y0FE6SttoG3nJG6fgrabV5CAH\nVVN1TK6o+l8zNccpxOyfi1xxKij710ZtRBRLolkb7zv0vhgiqVcShe9XG7XFnRczDx8SKd78MYWz\nbX5CEE8Bw0iXJKSzP0+kEtVrZND7AeYJkxZpJIolgcuWRUknH1ZbV6d/bC3VUC/8Vb0tv05PLd22\nTSKrtbXaYa18rmdDQzxRT0Qa+bqYeg8E9PJj9eY5lfzVXAzBXSrGHWmjmdIiYYuF5GpTApqWJoNr\npvgp468nrfWpQN1OojEnIzVsfR1JqmUrSWQypHEcXvF8pJPot81Ec5uItoa0yeoJig1p1TJCYghR\nfG4pwzaSnHWThdrqwUsvE4iomN5QFE8jqmQzZecjYPY41nxHxR7VgojwjCKZoqnl/qqnjqkJG58L\naaYP6vzKNmqjEIWojuoIBCqjMmqjthjn2lW0Ks4MiLVrJ7uynFcs+fxQfj92HPZPrX6q/7Gc0/W0\nngqpkNbROmqlVmqndnru0HMxhFhLzeykTnKQI649fk7MPHwwqngLZ9v8hCCeAoaRbghiOvvzpJWR\nIrPhqUTGVFsWXquX66lXTkTdV74EidNJtG5dPFlk27rdRHa7pIqy9bxxEa+WJqvdyfe1vT2e8GvN\ngVZupxFizeZCTRQzoY4L5AgskNE6+zupeU8zbdq3iUI5UzIiwa10mnfZyaYs1SlNR+FspsSkhl+v\n3k59HPa+gYhe53aMBKLTpj5eiIgc3LI6jfEw6E1/sjEkwwkKUx0dVkinUWjNs7UqqNTaJpkYZzcn\nNHZ0uUIw9ZAsz1Pt/qquj8mDN99hobJGQnTVfVDnZTrIQV7y0m10WwyBZCGsaiXRTnZqpVbNsFpG\nQhuoQXH8VZNBfj8b2RQFlyew7F8Jlegei82nlprJclfVbrwVVJFQpUwFIQrRKlpF62k91VGd4fMi\nkJsQxFPAFLKlRKmJUGur5Kj6/7P39tFtnfed55cEQIgvIgG+GaYp03QiK87YLhmxcRLGBVpT9ZB2\nQ9QTbhRvDtOzO+DO+GS3ezqxN+2cnHZ3JzOd05w5090507VmWuXNTCNbtWVFVhwqAWlVSezaieg0\nTc02Cd3IDi1LASVLFqm33/7x4Ln3dx889w24AEHpfnFwSAD3Pm/3Eryf+3vjffqBE9VNtrvbHrCm\npwUoSoshj8eMREQ7dq61vMRJX5/YJxol2rlTP1a5bXu7+bksRcItrxxg5batraIPdR5yrHKOY2MC\nQCWoc1dhO8ur05rK9pNJfVKmwUFz7F5iX8uN0w21QarkSrR4dZ7+XFrUKnwMNDV37d9ZcFsyr0u6\ng4g6SCTmWSZ/Fk5VXmG4XbOd2o/ltabhHJmxk8PFt3NkgmezzXwqnYNfGMxRjlL0DCVpkcYc6n3q\n1pmPfZD+xndcoVWitQJ10BQdq/Hldb2jplVO7pa6six2rqJEVgsaB6cttMUWdnKUM8Cui7polEap\nj/oMCyCPlYxTvATu+qiPClSgVmot+ayXeg04VD/roi4jk67MbCuTC6ngK/tZpMWShETy92ZqJgnJ\ncj0lXPJ9pFsuXx8JprzWqpxrO7VrM+C6ycmKHBTQhtoYheAZypdqZYnSgZBal9IJTqTLkQQcDnES\nZnXzUN1IJyfFe6OjJoyqlkL+Pi83wvuQ2wwNEW3fLl5HoybEShjkpUhUy6vbU42b5fGl2ax1TVVX\nYTXZkpNVV2e55seCz7urSw+XbueRrg+nRE1uCuKGSehqWyrfJU3SRASi8U+NEx4DjewfKbF4Vlom\npT6tqaUq53zqIPMfZz85g5cbdLnhxTQJwE2TSBTk2bKnaTjNxj2peW+i+F6l5UqsylGaxXB6+Xcl\nLm7zYp+FHHX7OI94wqMOepFQdJss7+K4PBt4kN9R1aiTWI02ndwtdfGdTmVUuHTwJsFMxmCqtTJ5\nEh75kG6ujdRIR+loSYxmL/XSNE1r3WFvpBstLqx8DLo+4xSnJmqidmov2UeCqlyTIRqiPuoz4JBb\nY2Xm4DSlCXlrO5PFv2AO/Ha1Vvm4kpT0lH3WLrGT7E/OLcxmu3kVgmcoX1KtadWyfKpJbrjbqkyW\no7OCSt1/f74EODmkcndYbjXk0NTQIPrnYOlmKZQxoXwO0aj5+cSEFWwleO3eTdTUZLWmTk+XwmUk\nYl0Xaf3UwTefu87lVo13la693JLpVRwUda7GKlw6jcWLi29PjzO4Ou1b7g2TEDxL5VbSpAQii9fT\nhQ8VaOrQlPaCvtLSM+kD6U1hTS3nfOomAUQNB9KUPjROy2sFW/BKk/lPtpv8u4Dy/acc3ucgqiYd\nktJhlO69YG1taRqnQ8U+/sGjFXicUNyn9cCvOZ5HulI1PcQvbv6ygqQn/lciRzn6lfyvBAZ1Xlwl\n/VqUy3W/LBdYdfGdWcqWuIryWEVuIdVlgOXj5/ORYMXhaIRGLEAnrZLSkikfavkS+eD9N1ADdVAH\nbaEtFkiVbekAVffopE5KUYp2027LPvL3IRoy1qOf+i3geQfdYXymAr9aa5UDorpuXs8RuYbcqrtI\ni2E2202uEDxDeZIEA+m26ZZZtlKpSW54WRPed0+PsN7dcIMAJlk+RAXCoSErpHIrI39K+JKApz77\n+pwthdzKJ8eeSJifxWJWILzrLrJk2AWIBgZKrbT8GY8TLS7qkwBxF2UJpXfcYXUB1kFzLCZeT06W\ntuX35oLsx67+p7Qg83hXL2DIYdWttqjTvqHrbnByK2lSApEerqcrLT0zfsjemrrZtUxETQyse+am\ntBf93LW1lfQA6SY7m5v6fpq1H7PpS3fYvaCVDmy8w8540VX1m1TwGMNZoAJN0icpS+s05nIepal0\nrmas6yWapE/W1BrjDRQFHHiJk/OSMTRNzueWCozlZiH1A6y8z920m3qox6ih6VbeQ3Ufla9VeBqm\nYct8JHgu0iIN0iDdTXcbkKmrwzlKo0ZioBEasWSb3UpbCSRcX50y2Eq42027tS68bg872OU1O3ny\nI7kOHdRB3dRNy7RsWWvuwtxCLcYa8DWV6+YkHmcrEzvZxdCGVs/NqRA8Q3mSCga1uJC3y0Db3+8M\nh3x8w8MmTKkgpSsdooIuf27daoISB/GJiVKrKAcoO5fZjg4TlDlk8kRCdk/pdjw9LQBOQjd3r5XP\naNRaz1ONd9WNWXfM/Wp6WvTR12e9MaC7aeHlfOLg7DdRVTVqc4Yimv72v6KeL/wBjR38iPbivByI\nrLT0TGGtQFNzemvqtSAJ1m37RwhrBeIX/RLKeC3KXtIDpJq11mtCH/m+tG52s77k064vvwCZZm3K\nOaYWcoQDacKhcZp0PMaV2U/dzqPqW2z9yRsomhfwbiBX6sJaesRM9+K/ozH6qOF+qoMR2ZbqFuvF\nmsnnprNU2s2Rw5WsoamDYNmmCmO91EtZytIyLVOWsrSNtlEXddEYjRlWOL59IzVa3FwlfPI6nFto\ni+Xz7bS95Jg0UAMdpaOONTtBoAQlSkq/RClqicm0e/BtZNKhDuqwwCa3uLZRm8XS2kiNlmRFco7d\n1G1Zg17qpQmaoCxlPQGi7hxRz+0CFaiHelzPYT83Wpz2DxMZBasQPEN5kgoGlV7I+3Wt5ODDwYW7\nmwIi4c7YGNFXv5ovGZ/anlPpkELB6iKrjkNtSyYS4k/pNlsoEDU22kMkB92hIevvW7aY20nQ5i6s\n3OVUQqZTP+rYec1SmUjJ7pjbaccOAdHd3VYXXdXqLJ929VPrHQyr4mpbq4KAVZKbW+umqF/q9RgE\nfKzKPZ8kEI0VoZODT5pKAXCZ3DPCqnDnRXz/ePHnMJklV/hy8XIrU5r97frVwV2SnXPZDXSl3kjI\n1Gmapqkj3+FoAaosTi5N6hET7sXfJh7PqloNdTDsBKc62SX90W3PIcWp/iQvEaJmgVVBTq4Rt0Dq\n4jl1D2nhbKZmCyzJ9VHrfcpHH/UZc0lSsiRuUyYDUh8TNKGN8XR6SIuwtt186fbqGsmkQj3Uo3VP\n5sfJDuacIM8LjOrPWO83Wtz2D116g1MInqE8iYNBEIla/LpW6oCoq0s802lhdeSWwnQ6b2yfywnY\nkVCmZlq1m48EwK1bS8fB4xjHxkSpFCfLJM9qC5ggOjJC9O53C3huahIutHKtp6etUN3dLdyFJeTy\nDLfqUwXdri4zjlXOq61NzHvbNvH52Jg1aY9TLU8utb6pepz4c3jYe7v1pqqAZ5rKu+rfQPG/l7ED\nG+fWWmkSIkNp8nYMvG7nUZWeTzrwkaA2RNaEQDrJbTuoFO68iEOhDm7TZC6Xrg6nl/Q5qnV1nIjS\nRYvv8DXoSl2J0pQ2IMEN4JZpuYw4Of0RUwHALulMyVjJhC83gLCOwhk4OKTw39X9dGNQXWr5Y4AG\ntO9voS22FsYRGimJ53QCOG5BnKAJCxyrbrcS8Pg+7dRO0zTtyeIpH13UZbS1lbZaYlJBKAHPOMUt\na5egBO2m3bYArbrX2sGcX3dqNZOvDlzlMZdj8+viXa5reChnheB5naoSeKzUBZPI2ZrG3Vh1yYMk\nmHHL5+CgADcJXdLaqI4XEJDqZT6yn927hWWRu6uqMaLSisctjq2t5u+qW/CuXWLMo6PWz+Jxsw8e\n98nHp1p8nZ6plFhDvk82ax27zuXW6diq544uIy+RtSxNY6NYQ79Ji+pVgUFPADUx7RTYGBXxv5fJ\nj2+cW2ulSYgMeT0GVTxWfuRkePVjhZPb6qDRi9z6cgPTAhENkrCG2rn7SqXJvGCYvMZdqcuV34tk\n/xfV+iOuWqOcsszq+raDU/tRuLevQkgHdVAjNRourHZjkBZS3cMteY+M2ZSPPurTutHaPSZowoCv\nO+nOEjjWWVjjFLdAZoYyWiufXZIk1TUYhBLA1Y1TxEJP0gANUC/1auuDykeEIkZMqLruXiyYO2iH\nJa6UyD0+d4qmLHC6SIu+zjE/51oo/wrB8zpVJfDoN75TB7mFggleKvx6HZtdCQ91X9XyFo1a3ULt\n5qMrxdLTY45XxprGYsKimUoJi2U2K+JKeQIcaTWV26sJhXTjdsvIq85Hvnay0Kpt8EQ9/Kkrp6Jb\nD+mq3N9fCpU6V9tqluCpVY1ZogChp4r+euWO0WuN1f3dRJecaKHKqjQJkSGHY2CB9zfX6sK3Mk3m\nP896NpJ7ObXTVHpBoJtTnTB/XcvvRfJGXlTbZUStVkZeDkZbaIt2DPI9p0y2TnCluuumKKUtkWIH\ngKq1NUYxo80RGjFKnzg9uHuu20Pn+ttIjTRKozRBE7SNtlGCEhSnuJHwCCQAWMZMqvGlTo9+6rdd\nd50FU4pbUmUbOkhV3+MAnaUshaofheB5naqS5EB+4/HsQFJ9X016Yzc2nUVUhbFbbskbbqNjY9ZY\nRvlsahJWOZ5hlccrqu6zlpIun12g9v/zAOFThwjNayVgytu99VZzv8ZGAae5nD45UiRC9OCDYtzS\ngtvWJqy0HNp57Ke0Yk5Oip/Ly6VQrx4zNVEPh+CODr1lUgVJt5I68pg4lXwJUkFY4p3EXSMDg54q\nynGMDmYzt3WUSaOOJ8gXAQVtga1F/GhgNxg08utqKw9ZNxE9liN6KU10kR+/HST8ZruJyqjXbtuf\n032FSsNeJVC2kzNYVvH+zDUlr+dUPSRN8Rvnqe7j1aJaoILFKigtnnaSVs8EJShWfIBEmREeCykB\nUwU3O/AqcWH18YhQRBu32U3dBlRL2L2b7naF50ZqpJ2009aKa4nVzcOSEMnro4EajLE1U7MFKFUr\nppObrXQJb6EWow27mwb8PQ7Fk0b1YH/nUajqKATP61S1TOYiLYPt7VagUeGXX/D299uPTXdhXChY\nYzwTibzFCiohTn3ybLRqoh4VVmXG2JERotH95gUpZuZKwJRbILn7bSoloFOXBddprKmUdT343HTW\nSb5GsZjVTVin6Wmxfr299u6w8ngNDYmSLzy+VNZWVa3adsmbpIK0UlY70zK/qNsMSXMcx5gmW2h0\nW0d5bh2S+3s0Q1UT4qqlat5g8AueaTIP2Xf4ix4SBNfO3uvXNlF2f3Yo4GUbJ1Xq7hvKKq/nlO5C\nv9YX417jPMsBVBVCOPQN0mDJPHn2U1kGhGd3lRlxJQQ5xYKqjxjFaJEWPVsivTyaqIkiFHF0//UT\n58kfUYoSj4m9LX9bSVkVdXu1LzU77gRZ45tUK6bOgimPSZrS1Ed9JZZQN8kbCLwuqe7c8xLfHIJq\nsArBM1SJOAzwZDPlXsyrsZh2yWu8goPcTrW4qVZPCXdDQ86gJy2R6ns33mju19oqxj0wIPqMf1pc\nkOIP9hsWTwmYXV1m7c7hYaLOTvG7jIHUuaByi6f8XE0cxK1Pcq5NTcIyK9fAa6kU9XjzBEHcasuP\nPb9ZwefQ1GQdqx9rY5BWys2QGddOtXQTJiJH30XtOjLT1keLrtmZIaJ1tww2vMtNYCVWFeQNhkou\nXnhdziEqWjpBRG1k/idtKv5sIcPiWYlF0ot767M5onya6K/HiVY34d/d9SrdhX6tM3h6jfO0SwIk\nS5o4/U3JvzkJjLrstmofur4SlLB8Zgd1W2mr4b56F91l1KEkElmHvbjx3k63G5ZT7iLcRm10E92k\nrdnpFGPpNOZO6ix5f5EWiciE92ma1pZsAYFaqVW7bk41W3OUM/aXllCdBdPufPT6PerkSu43vrnW\nfxvXukLwvMYUdMZZDjBBJBLS1XCU4+Yur06ySy40OmpNMCQ/y2ZNEFSf0WhpLCWHQPW9hobi781r\nhNycxc1WPrnltbdXuNbyGEhdtlf5HB01gXx5WV96ZMcOMwsuz5Y7NWU9dk6lUqRLcTxutcjyOXML\nsHrs5RySSatLMre+ejkXg7JS1hzcAlY5AF7RnP36LqbJ+MZezxLNDfqP79wMVuJqyrx4eYy66W99\nwWCazH+YWSLz+I2RSYeLJCydy/r9/H59ezlFLlXSQagNU7nlKao9Bp34uHbTbouVz62WIweGOMVp\nmZapn/oNWEtT2gJJEgxjFKOdtNN3vOdO2kljNEZt1Ebt1G6bEMfp0U/9xto8SA9aPnMaj5OFM0KR\nkqyzcYqXWDKTlDSATgKeXRxnX/Eh2weZGWydYjb5OqiWULvjbgekTtZrJzD1G98cZrcNViF4XmMK\nwoKkSzwjy4b4gQopbkFRy5DoMs+qQGpnfbUDWgGfeQMsl5f1CXTk0y7Jj/rkrrR2z85OPeRGIgJI\nl5fF2FU3XgNoYU1gpFqfcjnrfrIdCW7crXlx0Yz7VI8Rt3Cq41ePPYdCp/jaZNK+jqfduejXSml3\n3lU7vpOoSuVUiioHwGsxZ0Oq+StNIXD4lLx4aaOXxNLl856Xztb66EKH1UrKIy2pL1Y7608AtVOD\nLpWbW8hR+kCaxg+NVy2zbrnW8Uq+o+o1g6ddDc8kJS11Op0sWxxOucVTwouEJLs4TJ1lz+sjTnEL\n3PJEPeqjkRrpFrrFiH90cnH1+miiJtt27GC1mZrNmNK8eE+1qk7SpGUtpTtyP/Vb4lHVGwJeIY5b\nXPnfAt/fzXodlHWyXv82NqtC8LzGFIQFSU08wy1fEorsLJde2tZZ0uzGzS+u1f14vUtptRwelsCU\nN7aVgCQBk1tDYzEzGQ+3/KnPeNw6ltZWe0up01PWueT7NjSYbXO4jcfFdmNjRNu3C1jkgAoQ3XST\nsEr39YljwqGXx4WqwKZLtgSIJEb82KtQaBdfq8tQXI2YSzvYqnZ8J1F1wbMcN+FazNmQCjjXcppR\nN1Ipk2TkxcsYXSQQ0W35vOfdp0nkDBrz16Utl1YKY2kSh7+jQHSsmsGZsqMKbnAE0IS1vQNpwmMg\nPAaamqvOXZdyL56r+R1ViYKKkZPQIWFqjMYoS9mSNmV/YzRm1NFU64xKoORutNK9VloH26iNeqlX\na62MUMSSTMjJ4ihrcd5MN9PddHdJ6RW7h86t1u8jRjHP/ekerfnWklIuHdRhWctGatTGmyYpabFE\npihFHdRBvdTrOWZT/Vtwqs3Kz5GNtE7KucqbIyGwWhWC5zWmasS5cSul1apoXvT6sYCqF8xObrY6\n66uM7ezvFz85xE1OijZ5TOfNN5tWuslJqyvs6GhpzCJA1NxcCqLqvioE8ufWraVxlg0NZkZbbnHs\n7RXuqlu2mLGSvAao3bOx0d6FmMOZ2t/UlNU9VkKo6o7r5dhJ2QFpENZML/1v5vjOcrWhc/brqltN\nFSlq4fdzdGB/mg4dGqe1MixREsYW0+RMKm6fu6icpauwS9/tuYFpze47BNCR1ya8wvj4oXHCY6CR\n/SNVs3jWw8VzkAoqmYuEDrckQ7y/ARowwG+apmmURqmXektAkceaLtOyxY13kiZLMtruol1G7GiE\nInSUjlIzNTtCHG/TS6mVXbSrpOSJ7M8NNu0+S1DCyFIr20lS0tbau4t2WWC9gzos2WVV4JTQnqSk\nBS5VeFePm90xd/pb8JLddiPkNtfrXSF4hnKVvMBV3VV55lmvbn86yFT3zeXE5zJpjcy0qovt5E8O\nI3KsQ0PW7WWtTZ45trvbhMTOTgGt6bR1XFu3ijHoINzumc2WWhYnJ52TC3mBWgmd/LVdPOrOnVYw\nlzGYHOCcss7anQvqtkFY33TngQqi1yNghnJRmohAdOD30vTYY6DHHgPNlWGJKjZDh9xIxQPJ6CCm\n2ol+gmwvTc5gWrP7DkpH5bi5eh1rmrzBfWGtQFNzU1WDTqL6uHgmKt9SqR4nGVfZTu2eLF1uoCph\nJE7xklhK/rnqjilBUX1Id1g+bxnbKOMWVbfdSZo0YkaXaZlylNOWPOHxjzImsp3ataDHYbid2mmU\nRn0nE2qmZlqkRUsdS/mIUpSWaZluoBuM9/qoT5tAiEPsIi1SlrJGsiR+XkhraDM10wRNWBJF8WzB\nPMZ0iIZKXGjtjjn/W9gs2WX5uSLPn1CmQvAM5VncXXVkxBpzqVoj7axWOriQYDQ0pLc+qjAr4xgl\nnG3daq1zSUR08GC+JK6Uw58OIKUFlYMjt3BKd9JUyjpGaaWMxcz95fqoVtOJCefkQl6fst22Nqul\n1OnJYzCle+wNN5juvF7Knehg0E9iKCc5ldepegxjUU61Ju+/P1+7BEZBB6ZdyypS1KF/O06PPQba\nv3+kLIunhLFMgWjdiVQ8kEyaSiFGfc+PW2TQoOfWXr16UlfTzbVe5+xHQbvaluvyqx4nr2VQpCSo\ncusaV4EKNEiDFqthP/VbXGylCy6HUB4Tyl1sG6mRuqhLmwhI1oAsUMFw2+2kThqlUQsAuSUPmqRJ\nC/DJtviji7pogiboZrpZC7FOjwZqoDjFLVlpB2jAso1M5sMhM0tZ57HnRdvSZTRHOQtETtCEAd9S\nTomJ+qhPC5perPzViN+shgpUoEma1LqBhwrBM5RP2ZXU6OwU4MFdOHWw4AQX2ayAGLWOZTQqXEol\nHOksnmpf/B/w9LR1295eot27rRldOzsFhMnX0u1UQm4kYoW7m282614++KAA7rEx03o4Pa1P4HPz\nzdbsu8mktV272Evd0642qe7Z11cKS2pSJd3xUuFPB4O8nWw2mHNLPVeCKOvjRbpakxK229rytQPh\nNJWSy/WqHFHu0wuU/twBGj9QekNAUtTamwWam5sqCzpZM74uE+wscDqIHj3k7QAAIABJREFUUd+r\n13g8ovrypOby4+bq995Nvc7Zj4I+p8p1+VWPk992dKCqWrs4hEQoQsu0bHlPlvUoUIHaqM2oNxml\nKKUpTYu0WBL72ERNNE7jtkmLnFx9dXU6pWVStsNBbIImaJImSwC0h3psrY8gZzdaPm8islg9pbWT\niCwW06N0VDt2Dp58rmqCJ102WTWBkwRVuQ47aIcxhjvoDuM4qVZ+bjHldVT9nI+bxUp6PSkEz1Bl\nS2c11JX/4HKCC25RtXtyWJTupmqGXFU6CypPVARYLZuAsIoS6SFXzaLL40mlFVdXN7S93bqfdFWW\n1uLhYQGuXuAzEjH3c4sLvfNO/dpwF9xEQr+Nenx0LrUcgCfss6P7lt1NjmpCn67WpHr+1CSZz7Vg\nfglKaaL075XeEKgHqZYdCaJjh8Zpcq1Aa4x+VgtWsAmN2v7lx801TaX3bsKLUH+yc3N0q5+pHiev\n7pK6ups6i+IgDRpwFqUoLdKixT1WhUK1lIgENOn6CrK6uU7SpKOrswS1buo2Mrn2UE8JFMYoRgM0\nYFhH5RyGadhYwz7qc6yLyduyi/lUH73UWwK6EmrHabwkvvVBerAkVvNd9C7L6wQlLC65shyNnAfv\nSwJvK7VSL/XSIi1a1pMfjz7qsz3/dJZYWW7GqzaLlfR6UgieoRzllPBFjf30Gy8o2+AZUe3KfKiA\nq3uqQCLHLuM3pWtuR4cVJoaHhUVQvr7rLnP80uKpWg7V9zmQ2MVwdnaaUBmJiO102WNVS2ZLS2lb\nN91kurcuLpbG4N5xh4BAmWxJJ7l9ImG6yMr4Wul+qx5PXYwlT3DELZ5uyYL8fK4r7eKkcmtc6mpN\nStgeGtKXpqmKrgXzi07l0NY40finijcE9u33Vwe0ynSnWnZKXEHTVEo/RTl8VKvhbwqpoOI5CRCV\n3rtxvwgNV9xOfO14rKTfi3m7Y6C6cKqAYRe3mRWVbUsghceT2sV28mytchsJk17qQKqWPd2Dx2hO\n0IS2NIx8SCBspEZby6Yue+xtdFvJ9iKD9pjxuo3aXMfKH1nKGvsnKFFiUZYPtV+ZpImveYpSlpsV\nMlFTC7U4xvzKY65aTP3oWkvUdS0oBM9QjvJiaao04Qvvo7dXD209Pc5JeWS9Tql8Pm/ZXiYq4hbN\nyUmigQERI8nb2rLFBGHuOsz7UPeRGWbHxqwQK2HXzhoZi5mlUCQkqZlqJyas69LWZh3X4CAZWXud\nYFOFMbdyKV6ti9xia9eWHCMHQbdzS3XD9nOOBWkhlet08GC+soY2mcqFd0elyZ22VBWICh9fo6lD\nc/6gs9z+fEi17JS4gjpYrt+fz9NjOaKX0kQXbTinysPfFFJBJU3e1kR378b9ItRr6/Upe1fbyoHa\nLlYyKBDgbqEJSpS067WMBgcsCbbLtEx91Ee7aJelvAqfx27aTXGKW9xQdVDNb4RIiynfp5VaDRhT\nY0kbqIESlKCx4oNDle7RT/1a0OSPCEVojMYsfycyoQ2PNZT9eQHQERoxMgAn82Z2WhUE+WOYhi3J\nh1RrKwfRbbSNGqiBOqjDsdyIPOYyYZGbpd2pjRA660cheIZyVC1qBaoZVgcG9Flao1EBoNJNVk3c\nE4uZLrf33583XEnV7Vpbze2cYFYHwm1tzlZZoNRtV93faV9dW6OjYrzcNZaXs+HuuzrAk+JuzNKV\nWEq1DutA0k5eMt3yMcpasBLQ29v1SYn8nnuVWEid2pL7u8VPVQXUypRTkiSvqop7cz+Jb/12Io9l\n3CpTNV2WlWv53EKORp8apdQXU7R8tjg5B8v1wXye/jZNjpxT9vADNNw5NVUL11UVMio5pO4XoZvb\nx93+OypNlQB1jnI0SqOUohQt03JFF/N2+3JQSVLSk8VRF3/pBsV8X/67as2TtSpV8e04FHLLX5ay\nRrvc6sgf0vq5TMvaDLQRilCi+ODv30V3GRDHXXNl1lme0Ib/fUqwlmP+Z/TPjBjXrbSVQKA76U4D\nHo155k0An6Zp6qEeSlHKaOcOuoPaqM2SXVhdyxEaMSC9gzrobrrb8rkb4OvcrYN2mw3d8GunEDxD\nOapa5Sv4RbrqzukGg5OTArDuvlufYEdNgmP3bG52r4M5MWGN19QBMf/8rrtM6NHFecZiRNu2lcKw\n7iktofK1Gv8qY0TtAC+Vsh43vladnfbWx74+/y6lbqVP5Bj5vDlI68BGdcN2Go9aq3RyMjgrvFfo\n0u1TVRh1IAJdkiS/qspNp1Fyvv5V51QpQFXgsszhfXptrXQYaSICUe4TOUr/cZqSe5P+M666cE7Z\nwy+OrUzO8NxU0BeCumRNKmRU1wv9WvVxrwyoaxEnJwGNw5TXWo9c8nxRrWNu+6oJdiZoQruPLhFP\nkpKOCYl0YCmz5eYoZ8l2207tJdbCFmqhJCWpl3ot4M8trmlKl8xL5xrLQVW1KHJglWNoozbDKqlr\nL0tZC3T3UZ+xRsM0TDfTzTRKoxYrKV+PIRpyBXw1gZO0yAYJimEsaO0UgmcoV1Vy8Wy3r5P1TS03\nwoFJJhLigMVrXnZ0WEHHCRL5Mx4XsZLc4ifb0sVY2j1lzVEny6bdGPizrc1aN3RkRGTilfvyhEo6\nwNNBkw6ypQVX164fOYEaL7fC4VBak2UJHlnOxo87rq5/Wau0EpUDXbp9qmI1lEqTLRHokiT5VVVu\nOvktIKm+rqE4vPfMzZUOoziX9B+mDeD0mnHV0DQR9RDRGAXLOgEa7pyaCjp+qprlUjazKrfGVAbU\ntYiTU2FKV0rFCQ6cst6q2VgHabBkPXm5FAlDuv4KVLBYOhuowYDBQRrUxocWqGCJJ+XWVBXmeqnX\nYiUdpmHbcjRqjKm6JnbZanlyI905pQNMXvJElnqR5wNfD2n1tLMkt1EbpSltZPX1k2DKzkIdBCgG\ndY6HllN3heAZylWVXDzb7cutXTIhjYTUsTErnMm4Re7CKsGVgyJAdPSoaOs3fzNvAGlbG9GuXaIN\nHhspP+eJfiQ8yJqXuZyZPdfrs6fHCsPlPmVdTHnxr8v4K7Pocuux3E6FSDUL7siINe6Vj1l3nNXE\nQxxInECNnwMSNmUG36kp5/I4XgFQPW6VSgddbq62un2q6qruQAS6JEl1IbfrX+mK20HCFXcDPR/H\n/2MR3v94P33kzbXSYRTnMn5AxHUOPzlMk9+Y9Ayd+Xy+emAdoOHOqamg46f8lEvxKruSN26f1ZO8\nXmRXq0RPLePknGp+6uBAVzNSVxfSLjkR70Odp6wnyhMVEZnJiiIUMepmEjkfJ9l/kpKWtnRw2Eu9\nNEET2lqk3HrL4yZlXCfXNE1b2o1SlCZowhXcLLGcebNfuT67aTf1UI+tJbSbug3wkm0N0ZAFvu3O\nY96WUwbboG+GBHWOh5ZTd4XgGcpVlVw82+2rS0ijA5TOTtMKpsueq0JLf79oK5nMW96XcMsBZedO\n676yFidPzuPkshuJlJY+0Vk6t261utfq3HXtnhzK1f10WXQl+HAglxAnwYjDrNyupcVshx8rDrXq\nWnM4dbKOqTG8dnDGgdgui66dBb1aLuFc5VzUyXGtTVNgMXdm47T5vAJzRJQioiQR9ZFwvR0nyn2z\n6Nb62UNUaF4zQczrHKuQjLQwtkZTuTkqNK/RlRTRJws28LVWoMHHB2n0qVFfAJPP5zd7SGHg8lMu\nxaucrKibxcLq9SK7nmvDepXdXNU4UyldPKEav0lkBQuv62kHqMu0TP3Ub4zDzkrHrV+qO6uUTACk\n1vPk/cnYSgl63FU1RSlLXCcXX5sYxSzrxqF6N+22WOm4C246X+rCy/uXVkv5Hk9eJMcst/Gy7l6P\nTb0mDQqz6LorBM9Qrqrkot5uX/n+9LQ+IYwOLHt7S2MPuWtpczPRrbfau7K2t4u+BgZEuxzOeNZZ\nHhtp57KrezY2Wi2IgJkJ160+KX9yy6N0Q+Zw2d5uhWM5RlnjNBo1LcpuNwuWlwWsLy/rjxXvl89h\naKi03XKhUAfEdqqq62o1lSbzG28zjTsoSTBMkva/QPrfspjU3Jx/EEuT4/qWZdmSUGjXLoPd9P4y\nAaZGNw9yCzlKfSlFyb1JGjs4VtfWvaDlZEWthoWVKyi3u6AusjeDG6DdXDnsyBIqRMxiuADCAVD7\noXYaW9NnSpXz5zDkJAlnOrdfLg54TdSktQS6Wb84vKnQorbDrbsyVlQnbmXdTbspRSkjHpUn+OG1\nQb1Y6Xj/smaodDWWyZB02YW9nMf1CpRetdnHXwuF4HkdqxqJT3Rt2vWjJoTRlcxQwU9CIXfL3bVL\nJMRZXnbPOCthUP4uE+nwGpgc+NRapX7cbrkLL3ct5gApLbtyTCMjJlwNDQkwT6XMz3nNTSk5RhV6\nm5rKi9fkUq2V2awJvepx1UGhn/PB73g2OnOsL13vlq00lX7zbyVjTcY/W3Rr/cx+KrxrrXSN3Cya\n6voq25dl2SqQsM7aHTc2p/HPVRdgvMgJrvn8a2LdCzKrboXusE5WVDcLayV95xZy1HGgg3AIhLX6\ncLvbrG6AOcpZ4gg5bBkxhgfM8xtz+vnp5u8E405uv1x2pVzsXGT7qd82FlRN8qOzpMo42K20tcRa\nyeWUtZdDrt/yOGqmXrk2vA9etiaEsFBcIXhex+Kg4FSGo9w2JXzYWan4+3YJYXSxjWrWWt6macXM\na2GQu8LyupyFgtVS2tVlXYvpafdMtPLz4WFrQqQtW0wglv0nkyJZkEy6s7hoQje3BqsgzRMxqQDH\n3X6bm/Xr41fcWukGml6T61RitayFS62dKnJj24xusQ4yMr7+x0NUGFtzBwwJhkNEtI2IukiAyaTY\nr/CRolvrhzTQSaS3aHK4WSaiQTJcd9XsuY6WLe7+y5P85IrtpEhf+kXOqY2oMF6gqUP+XEQt51MA\noOYE13L+eAw09MRQ9eE4TaXHq9ymNtAdtpK++b7JuWRNLr5131EcrCqpv1lLOSUK0pU56aZuAfiP\ngbAfNLSmz5Sqy4qqxobabe/FSqeurwqSdkl7dHNWt+fxjhyI3ayVunjQLuqiu+luT+Vx8vl8ydjs\nrLN8vexci0OFCsHzOpZdGQ5dVlIvUJrLmZY9HrtpZ6Xi8Za7d9v3weFzZEQAmewnEhFWQGnZW14W\nVsydO/M0OSmArq9PWEWzWSuQybnK+cnkRRxOJZDrLJa6pyxxYrd9U5MA3HTaWiNUTbCki6lU3VtV\ngJP1TQETouWaB2HddgNNr8l1amG1rIY1//7787Wt01lSJ7Ly2pxBjSe9X3GNdQOMaSLqJgF2HApT\nJECrQFZwVNdXZzFOs3bUDLiKpdLRssX34/NQ21dVIJGRVreNB5C0QIJbXzopfTjBdWGtQNlvZH0l\nP6pIQWbVrUbCIY8up5X0LfdN7k/S8tpyTRIZ6cCTw8skTXqGgY10y1Utk3aJeaSWaZn61vpo19wu\nmlyzd6F1sgB2U3eJFVIHZE7r4uZmaUna4wGA5fZ8bNM0bWw7TMMW2JVtcYsqh9Q+6qMsZT1bconE\nOaUeDx5vyy2uEjaDLnUS6tpSCJ7XsXidRGkpdMtK6rWkBbfM2dVjtLPs2dV0lFDD+1EhkqgUOvjr\nrVutcKa2199vjTXVZVyVTw56gABgOUfdGNXEQ9yCqovllJAcjQpwVt2UJdxKIFVhV0Kwn2PoJC+g\n6XTcnN7zIj8wGcR8a9Gmc4dkgZEganP6FV/zSwwYxz9nZnwtNK+5A0ba3NeAQj+gp7MYq3DDXy9r\nti+ZXLHPbtZvJ5nwO6a0r5MdYDnNxU87TlL68J2YpwoJmQwFaOGvSsIhjy6n5SaOkvvycfuxngYF\nfQu5HH0unaRPjYM+VNBbAe3k1y21XDnVyrSzHPppy06yj67iQ8YmSpfZDuowSoNwleOuLMfVR33U\nRV2UprSREEgFYF35EA6KquWSx6vqLKrywbPe+k2Ao27P++HjaaZmGqVRRytyqFAheF7n4hfTjY2i\n3Ih6Ye+3pIUfeFXjPLnLLb/o1SUh4jGN3Bqo9sVfSxfYSERYQ/m4ZfkRnuSmv9+Ev2TSatFdXrbC\nI3evVcu/qJ8DJuzzsfM15GCbNXMplMxxYMBqsQUEYPNYULdj6AXq/ABjNSyOfsCvGlbVmseXKjBi\nV5uzGmstxdf8ZQmMbUSF8TWaOjRHhTfXvAGGCoW62Ek3+FJBSYUbJ9jRQVYzmf+FeologIja2XsD\nZFpp7eZn16dfkCy2Y2T39WLVrtSqmCbPcFxNRt0I+bnwDsrV14/1tBy40elAOk2PAfQYQIemsu47\n8PFq1iiocXHp2iw3QYuf8emgTs5X1qmULq5cXhMO2Y2LAxt3fx6mYduER9zyKQG5kRpL5qrW2eQP\nNS7Wz/qq2/NzQ433tINoVZsh0VWo6igEz+tcdllbufVwdFRY33RQyuW1pIadu2gsZoUl/hmHsMlJ\n0c/u3QK2GhoEaHV3i/cEHOaNUizcmru4aGZx5ePm7XOo0Vk8uSVRth2JmCDc2ioAVk1Y1Nlp/t7R\noc/iyteQWzClRVRCBp8THyOPU/Va7kRda/XGQDlQE3R7RP7ArxqxoAcP5stus6x5K1BjV5uzmpZY\nvuary2R1LZVusl7E56LGTkqqGSOirEObHBQnfE6EW1nl9VeEvddHVhBLUkmcqC95sPhp3SL9WLUr\ntSr6ANc0lb8U9Si7JC66i+CgXH2dLLcq2JdbkkE9pw6Nj9NjAO0fGaE1n19cOjipRqmIctq0O17l\ntCX34eAnrXgt1FIClxxUB2nQm8u24mLLkxDp3J91MZU6SAYJ92PVQrqbdlOMYsY2uhqfXqX7nuLn\nBo/3lMDrBNFSXm4ShHB6baom4AngnwP4ewD/AOD/0Hxek8mGKhWPn5SWR7vkMJVc3NqBgLywjUbJ\nyACrfjYyYnV/dRqbaVXMWyyAHBZ1cotD5TUmZabZZFJAX1+fgHJ1LBMT1qy1sg6nhE439fUR4RML\n1PjoAUrvP0TT/2rNYh2Wc3JbJy/ycmPAz3EPuj2i6sCkH1WSXKhWcOhpbXyYr0rW3K3EiBellf3V\n13aKsu36HLbTzY+XcZGGn67i6xYSACznllReczDjbVdYm1V3PtlZtasiH+B6rSdldroIroarb2n/\n1j+Bci1+6jm1VijQ3NSUb+hUxcuQ2NWM5NtxUHCDh3Lmane8ymlLdxPibrqb4hSnRVos2Z7DrddY\nSdmHjIF0S/LES8d0UqexdqpF0y7mla9PH/VVBG1e/u+p8yvHfVenaljY60HXO1BXHTwBRAD8I4Bb\nAMQAHAdwu7JNjaYbSienOoryolYHparKseo4WRv5Ra9T4hqd+6pfCLODGt3aqMDLE+2o4KnOT+c2\na6fRUSL8nmkB6fmDOQtgy3jS5WUzhnZsrLTWqU7qsXK7MeAXZCttb8cOcc51d3uD9JqpTJ/Darrp\n+gbyNOlBz8vcCmRaD9vI2Q3VTk6xmbwtOZ5+EtZHCZ7NpM8yK/fpoNL5yXjN4WIfO0iUc2kgoqNs\nblNkAuUYGVl3DaVZ29z6202B+KHaWbX9qBoXNZUaV+tdG130vd7B3isA6LarBjx4OV7l/h24jZeD\narnnjRsg8xhJPhavgFeOO3Ct5eUmwUb/XVZL1ypQe1UtwPODAL7BXn8GwGeUbWoy2VD+5QSlqoK2\njnkdm3Q1veMO6ziDiEnUvc8hU8ZSTk9by5lw91jd9p7X5VPCAtL1+f3UceOacROAW1jVONaentK4\n2HITRvEEUEHEEXo9Jqplt26UJj20uWijrbUWVZoQp0DeXW51MMspxqlkCR+PfG5h2+na5vskbfok\nssKpen7xNtR14GsnYbbNYXsnVSlwkl/UDC4MBp5JNcjsrG5tldNXOcBRroUxCOUWcjR6IE2pQ+O0\nvEE1YN1kV4/Si6trNeDBy/Eq9+Lez3irdd5Ii+hW2lrW2vnJWstVb5a4jfy7rKauVaD2qlqA50cB\n/Df2+hMA/l9lm5pMNlT1ZFdKxYt0F+V+QFC3v3QP8dqOHYjp3i8UrPGa3d2lGWUjEWuNUO72K8HQ\nixV28uNrlD00R723rFksqSqs8wRJvB87uNTBvpPF2glUq5HcRlquW1ocQL3GGU/y+Xz9mya8yM58\n5WZ55Ovs1eU2zbbpodJjxT8fJGs9TQl2EhKdQHmw+FpmqG0iors1/UnJ7aSbrZd1ILKunfxdzX7r\n8bzM/0q+PGB1kcUV8MCokRhn8PFgIDTIuppubZXT12axJsiL/OSBZGDrWVGtYQepAODH1XWj4KFa\n1shaqBzXVa5y5647rtU6p65n1cM5tpGqBXj+ixA8rz05gRsvpVKu/ICgTvLL0ms7bjGeaj1MCUZq\niRTVBVeulQRTvr1d+Red1ERDKmzL19y9WP4us/W6lTThayLrl8oxlZOxuBItLwtLp1N913Ktj+WC\ncj6fv7Z9DnmtTTvAky6lu4koVnyvtXQfucYvSsCzswr2F99rJwGK/L9HlES22UVyB2WeCKi/uJ98\nLV3bORAuFrfT3dTwe4zV7dM2c1WUf3++KjcxLK6ALDHO6FOjtoDjx7IYZF1Nt7bK6WuzWBOMi/xD\nCGw97//P91e9VijR5ljjal3clxPHWkvxGpt+3Wx1xzUEz1BBywt4RlGZXgewjb3eBuCEutHv/M7v\n4JZbbgEAJBIJDA0NIZPJAADm5+cBIHxdR69ffBFYXBSvs9l5XLgAABmMjAD/8l/OY37euv3nPw+c\nO5dBSwvw8MPzaGsTn8/MAC++OI94HHjuuQwSCbE9b296WrQ3O5vBK68AwDze9S5gzx7n8c7MwNj+\n3e+2bq+2DwBtbRns2QMcP262Nzsr5vfpTwOJRAZLS8DCgvi8vz+D97wHOHJEtL+6msGpU6X9vfji\nPAoF0d/amv7zxUXx+cyMWB91PoODQKGQwdCQWN/jx4F9+6zz3bcvg9VVc7wf/nAG27cDp07N48gR\n4PbbM/jxj835qfu3tIjXt902j6YmYGHBPL6f/rR+fQDgwgXxemQkg+ZmYGio9Hjqjo/b65//PINM\nxlzvmZkM9u1j2xfHO3/bPDANZOCtfS/r7fj64Xng+Mb8/dn9vQBAZjYDLAHzP5oHUkBmWwaYBeaP\nzwOfBzLnMkBLcfz/n/K6Dci8lgFOAfNH5oEskJkv9l88vpm24ueH54EOIHOp+Pn5eeAIkLktA4wA\n81fmcf+3gP9wJYMLAA5F59HaUDw+I8D89DwwX5zfADB/Yh44C2S+X2wPxf4uZ4CTwPz/Ng/8EZB5\nVJlfKgNkgfn/eR74v1n7fzgPfJydD9+ZB4aAzD9lgEKx/XeAzM9t1vv4PPAwkEl4PD7q9nK9RjLA\nHof9n8sAM8X1CPB8Oj5/HA/jYSQyCczeO4vsf8ni07d8Gv/18n8FANy2chumb5mG1Pz8PF489iIW\nexYBANn/ksUf7fwj2/Yfjj6Md95+B09/8mkk4omKxsvHl4gnfH+uHd/8w3gH7+DpzNNIIIEH/vQB\nnDh3An3DfZi9dxbHv3u8ovUN6nVLpgUA8K7ou5B6O4Wvf/LrFa/nucFzWFhYAADMNM1g39i+qoz/\nYTyMv8/8PeKI4775+/BZfBYPZB6oyno9MP8ATuAE+jJ9mMUsjs97P377YD//2cwslrCEC/MXfI3/\nxfkXsYhFIAPMYAYPzz+MF/EiFjPFv5/5LP4I9n8/1X7Nx/cIHsHD8w973n8Ws8jOZ/FpfBqJjPh7\nk9tsxPHbLK8/j8/jXOYcWtCCh+cfRhva6mp8G/36+PHjWF1dBQAsLy/Dk9zI1OkJIArgJxDJhZoQ\nJheqa3m1BqkWsELBTHDjx1XT7n03i5xM0OPVPVS1wpYbc8fnPT0t5ptKCQtdoSD6UZP76NxgeXkU\nLy7K09PCdVa1XHodr594TjcLp9N+QVs/ZR3V9naNy22Z1sea1+MMUI7rm6bSb+ApzWd2mWSl9bGD\nrJZAnUup6gbbyNrrptJxgIjiZO/Wyi2iTez3rWwfp/mp54Ic3xBZraHlWBj9unRXySpeqWe5U3bW\nIK2YGyl1jXILOer4i47AXFmDVDUscrU8jrVyafbaj1+rY5AxoPVkAa6nsRBtHtf3SnQ9zDFIodqu\ntqIPjAN4FSK77e9rPq/JZEO5y2/SGa+lMry6sjpJt61dn/l83ti+u1sAYn8/0Q03CNDzC3C6eftd\nK7eSME4uyuUCHS+X4we01ONb7g2JSsVrlNrN26/rbLk3HerB5chxfSXE6WIinTLJyiyucj8OdFwc\nqKRbboqs9TBBlrInV9X/BmoiomkSQNpAJmguklnqhO/jND+nscr9hsi5Tqid0uS8LnZyIcWS88ll\ne2MYCwvUfeAAjR86FFjJlWqUDAkyCZFXpUm5v8JiRKN7orR8Vr3zUXtVc10OPnew6qVfpGoFOF77\n8XvxH2QM6DRNUzd10xiNbQjsceguNy7UTpX+36s3EK6Groc5BqmagKdrByF41o0qAQenfe0son4g\nwKmkitpnPp8vyXprF4NZrvyulZ/xV9qXW79+VckNiUrkZd7ViDHVqR7A03F9JWwtU6nFTbXC8ddp\nsn4jDyv75sia9Ee3j58nLz2ia2eSSpMXDZKZ/dYu5tNOfK7lmA3LTSiVJkdgLTmfXLY3jMMHzBJL\nU3NzPgZUW7klBqoGgJXcXylaAIO2eFYy9iCTM6kK4jvKq+WwVglSvPbj9+I/yPFvtMWrmv1Xek5d\nD4l0roc5BqkQPENZVAk4uO0rLW/cVdar7KxaXsar1iJ1c2v1YkHL5axutuXK63pvdDmOoC2ZXq2U\nfo6vtGwHmV3XrxznVY30v5UqRwIo+TdykpivIlktoSBhjZTutPIz1erZSPpv+1b2+6Cmb5CwSr6b\nvW4hyv1POUr/XprGPzVOhY9r1q7Ex5L0gJlm7Xq9PlOh3av8AqvL9nIYY4dEiaWR/fsDs3hWQxL6\n2v68jca+PlYCaNUAsJL7K2sFSn0pZet+Wi5AljN22Vf3F7rr2q2gjvvyAAAgAElEQVR5oyHKTm5A\nvJEX/xtt8apV/0ElUaqnZEyhaq8QPEP5VrnXz2pmVrUdv+U8/MLL7t2irElvrwmLuja8WND8lBfR\n9VGPDGKnHTtEjGVTE9HiYjBtBmml1Fm21ay8VRWDnPudXIMtaYPnaloKRjdWV8ulGguqPmVWWB7/\nKZ8xm33k+2omWv7cWfpe+vfYhf4fTJWuGR+nXQwrkb+SMZXKL7B63L6wtkZTc3PeoXODvmwKawUD\nsvAYqPsL3RbAq1U8opMbMQfI1FzK80Wwl7GrF9e8r/6v9FdtzpVakmsNUV7HW69ATLTxFq9a9R/U\nMajnYxmq+grBM5RvlQsNMsZxaEgfI+k3RtRpe517iG573Xt2JVT4dZuf8iJe+3XSRoIqtxT393vb\nx6l+aipFFI1az4UgxI+Jl9hQv7J1OUqT8W224BRPaxngJT0YBakdJCyS3STKn6TJamGcIhPEtpIV\nDGMkypvE2fYgUfYEJJIB9RHRDcU202zbDjKhspUsMZ8EEvGcaSqFVYfn+P9avND/zAgVmgti7BwW\n1VqadoBpB3dpqv7xUFQz1+1a+aJrxK2eqoXQa1ypDkyCctM1XHH3g7Dm7SI4t5Cj0adHKfWllGPM\nqHpxXQvQzufzWmus03qpgFxriPJqPd5oq6IfXUsWPf49FdQx2EzHMlTwCsEzlG+V63apuk2q7bjF\niPqJj9Rd1Om2172n9qW7bnNyAfUyLz9rmMtZ4a/G145GzdKWFu9uxXZu1Xwty3G5dpJbVt5K4d0W\nFMaJcp9YoPQfHqCx/Ydo8uNrNjGYfICkB6NKxWFTwqSEPf6tK/uVILZc/KnW0lSfSSJKlL6f+0SO\n0v+m6ArbXCiFTd1ThVqQ6aIrf24RPwvNBZrKTYm2ZQxqmu2XJfsYVjdxd2M1vrWKqhl4bmAaZwmX\nY18fc3S7dZIOTPSwkqPcQorSB5I0fshbH4W1AqXmUoQ194tgCW/JvcmyQKkaCZxU5fN5LeA6wZ0K\nyLVOCuUVyDfaquhH1bLobQTQ8u+poI7BZjqWoYJXCJ6hfCuoeEO1Hb/tBrG9nxjCcpMIeenXq5tx\nMul9vkFZSZeXhaXTTywrd6vu7S0Fbrc420qlW/OqGX4KROk/9pnwxa8bpk7c4icz03Lgk7DZQtY4\nzRgJC+E02397cRs7F9lGTbvsaXGFzU25f+tr2qBeInpQmUNv8WcrCVDtJDPBUVDwnmb93czW5Fq5\nJtroAHEqdbv1E9OpAxM9rKQpfcBfIqHcQo5GD4xS6lCKltecv+A4vEnX4dGnRm0BTXdxXQuo0wGu\nE9ypgFzN5Edex7vZVS2LXuiiGupaUAieoTa1JFz191cvsUwtrtvsoIjX+ezo8Ad/G+hhZ7hV6yzF\nulqntZCvGwg+Y/7Giwlf2v79fhr7iI3FM2ilyfwWVWtnthDRUTLjMKUraqvDPk5POyAtPsc/pbjC\nSlj1Yvm0eyaLY9eNc5Lc4d3rMeS1RLk1N7yuC1TluprqwEQPK+M0fgjFPoY99SETD+ExUPYbzu4X\ncvxDTwxR9htZGn1q1Deg1RrqpJzgTgXka6Wm60aqWha90EU11LWgEDxDbapEN6pU100JOUG6sflZ\nn3LX0g6K+PyyWX/tO4FWtY95oVBe/VA3VTJuXzcQ0lQCIE7nVGFtjXr+YI7QvFZ90JdAJYFshEyw\nvItE7KV6g0JC2phmnwDA0+IKK9/XWTXVZ5fN+xI6mRvsFfb5+i4P65Rm7dkdjxyJOFW1/6BdoDVy\n+47aiDqY1ZQKP8HPr0CFtUmamst6bo+7zU5+Y1I7rtxCjlJfSlHHX3RQ7xd7jbhOP4DmlNE2yHUo\n9/+epQ7k2vI1Z4GsZ/lxn90IF9V6KCMW6tpSCJ6hNtQy5iY30JBw1d5uhZxKvyx5v06JatTxlbuW\ndlCkwqOf9p1AqxbHvFJLsVvG4VSqijdKNG6cJeeUYlHzHUrnN5Oq3J4nCOov7jtNRD0koHPUoU1u\nJSyQqItZ9W94h2cXETVr3lsujjdtvn+RbfNCn4f18uKKy9o3ni3kvIYBye07aqOsYxWDkMe7Q3x+\nasbbcsfhd5+xg2OGFdMuHlJ1sfWbHEltU81oG+RxLvf/XujCuXGq97UPwTNU0ArBM9RG5p5wlRsg\nSbhZXg7WHVYFHC8ZbFMp08U0qLV0S8hUrur5mEs5ZRyu+o0SLzGYaTK/xabKAG1lf1cQ5durQKV+\n5tRmjgTsRYioSbOf16edRdOre61drU+QAGIex9lGtFps9++aiVYlmNrNL03Copu1WUsp2b583knW\nMi/VOL883nDYKJdHJxCyAzvL+2PmnbrcZwdtQdAp463bOMoZu3UiRJQmKnykQFOH9PGQ3V/opt4v\n9lJ0T9Roc/hJby68qhxjLFl/o0/bx4zaTiUAi+lmceGsZXKdEst3FfrOUY6SlCQQaJiG63rtQ4UK\nSiF4hqqH3BO26u8nw6LpJ76xUnEwc4JaFYSy2equZbUSO9Wj7DIOB+XCW7G7caXJbdT90+QMPHL7\nISoFKhWgkpo2e0hYSNupgm9r0ma1tTy3FPuy+zzio68kWeB4rYFoldeS1a2Z2zpyFUjEi06QGTda\nrYzDUh7Ht1FJV5wgyQ7sLLGSf9hr/IGm99vHQaoZb9X+dONwgyxP9TUXcpT+v1gGZuUYFNYKNPj4\nILX/ebvF0tn35b6yj4VjjKWmPz+WzyAsppsly2gtrYMllu8q9M3bnKTJQNoMFareFYLnJtBmjsGs\nVOXWY6zUPcQrmFUrlnHTKsCT1e4YBAXNft2NS84pL1ZRJ+uWun8ReH7aTXS/LlGWU38FMl1Wo0S0\nWOy7Eoum3dMtdjPoPtX++LHSQaJXcLQ7Nl6OayUqji9/Wz64Pvy6bTtIgpAuY6sd2FliJQ9OGH+g\nfPvpb09rodEOynTvu0GWE+DpyqGk/lOKCm+WQi1PHITHQJ17Ox2tkeVaHXVjSu5N+or/5Gt88LmD\nnvv2q3qoTVlLy6x6rlej71pbmss5hqGrbaigFYLnJlA9x2BWW+W6hNbyy3IzWA+rpRLO3ICTNeiE\nTka7yj/pss6pNOmBSaci8By+gSgPokMgyjuV98iRcElNkojt5HU7p5S+y4VI3fZbfe7j9PRi/eSu\nu3eQOyR6BUe+Pj1UuxIqxfHlD+aDazNN3s8zL83ZAJ4d2OliJdXty3HhVVWO+7EO7nQxm3x8qS8K\nC27HX3TQxLMTNPq0CaKpL6U8W4LtxiLnqcaPJvcmjeRFXtvla1zN/3v1EItYS8useq5Xo+9aW5rL\nOYYheIYKWl7As0FsVz01NDRQtfvYzJqYAA4fBkZGgLk5IJHY6BHVTqurwMwMsGePOe+ZGWBpCWhp\nAWZn62M96nFMQclpbpkMsLAgfp+aAvadq/3JWjKGfd72051blnaRwQJEw1OYwj54bJhrAsBhACMA\n5gB4WI5XOoG7CuL3q11A4+niB1PF/ZcAtAA4C+CYTSMdAKIATtt8Xom6fLTbDOCC5v1WAE0ACprP\nOgH80qa9LIA8gHMAGgH8VnEsLQBm4Wl9Dclj01ZsDxBrXMZh3nCVcZ7pNPP8DJZWl/Cjwo9wav0U\nRrpHMHf/HBJx5wZX11cxc3QGe+7ZY9lWttcSbcEluoQjrx+xtDmDGSxhCa888woKK+JkmLp1Comm\nhLHf7L2zRpt2/fD+Dr52EL9c/yVaIi24ePUi1q6s4SquGtsMdw3j9fOv4+TaSWMsj77wKJ786ZMo\nXCxgqHMIT9/3NB554RGjn4lnJ3D4xGGjjalbp7BvzDxR5Odu65V5JoOFFfGd0hPvAYFwav0U2qJt\naIm24MXffhEDWwd8t+tX/Ljw9XXSBCZwGIcxghHMYQ6Jck+yUBum8BiGqgc1NDSAiBoctwnBc2Pl\ndoF8valc0Kim6nFMQclpbiU3RVD7kzWQGzMzMIGuCDCB/JNeLba9B95gYAa4+gTQuApcvhOI3gDg\nCARQvBfAAQBnitumAKwo+0cBXGavGwA4fbW2A0gC6AbwsofxyT6uuLQLCDD8VQAv2HweA3CpuN1V\nm224hgF8G2KsV4rv8flxaHwPxNqsARiCgFkVTOWxKcBc4wqgbUPl9zyzEQej/tZ+/PCjP6wIdnh7\nkwOTaIo0Yc89e/DoC49iaXUJr0RfQeHeAvAtACeAtmgbPnDDB3Dh0gUcOynuqkjI8wJLN375Rqxc\nUP8ohKINUdzUehP6W/rRHG1GW6wNezN78egLj2LfT/bhzCXxh5UdyOKp+56y7Lu6vorb992OlQsr\nWgjkQPzoC4/i4GsHsX5lHTt7duKJsSeMbbc9vg0nzp9ABBFcKZ7ETY1NuHj1omWuunaDgk7AelzU\nPu20ilXMYAZ7sCcElk2q8BiGqgd5Ac9orQYTSq9E4toCmUrV0iJ+jowIvtFpfn4emUymrsa0WeU0\nt9lZlTNrf7KWjsGjOGxy6+EMgH3ALGYt/6TLOqcS8GdBWxLQCQDRdwHYC+B9AOIADsKEziSA7wF4\nP4CTEBBHsEKnnbWR63xxv9d8jPGy+yYABEy+aPNZFAI6ATEXaUHdCuBtzfYpCOhMQIDqFQjo7IGY\nfweACIAMxPH8BcQxBUzwLR5XQ/LY6KBNcyMiaAX6HeXzPLODuJao+GMPysLG2/tC5gtGe0urSwb4\n4CjQ2dyJsw1nce7yORx5/QhSW1LGfnvu2VOyz7bHtyHSEEGsMYaXHnzJsBKuX1m39M8BrzXailRz\nygK0ibiwrEroTDYlsTezt2QeiXgCP/4ffoyZozNojjQj+1wWLdEW9DT34LW3X7Os49LqkgG/R14/\ngtSXU2iJtmBn907c1HwTTpw/YYxppHsEiaYEjrxxxDJXqUdfeBQn3zmJh771kCfLpO6c0h1rflzU\nPu2UQKI8r49QdaNyjmGtr6VChQLEv/lQoepGs7PC8lZPbsf1OKag5DQ3eVNkQ+Y8MwNkMkg8NIF9\ne1b9j2EJwAKEi+JPiu+NQAAIzH/SNbkzPAMBTT9i49gL4FEIt9NjMN1SkwB+AGAAwKsQlr5mlAKh\nA3Q+jxk8gwyevTKB9bdXncfW5XkWpeJWUf6fhI/1fRAutJMAfojSW52tAO5gr18CsAXAcQDbi++d\ngbCayeO5pvTJjqtFM8W+zynv83NjRrOf2szzM8g8k8HEsxNYXXdZzzqQhLjDJw5j5qg5wdl7ZzF1\n61Rgbp127UnwaY22one9F9vPbsdlEidFsimJ7/3290r247DUiEacuXQGp9ZPYcfXdhhrvrN7JwCg\nPdaOba3bMNQ1ZPR55tIZHD993GhDAtdP3hZ//A1owK1tt+Khbz2kPYaJeAL7xvbhmye+aazdV//x\nq8bvt++7Havrq8Y4AWHBXb+6jsLFAo68cQSvnRN3eIY6h5AdyGLu/jk8sesJ2zW3O06A93NO10bQ\nxzlUqFChglToahuqItVr/ONGjKte12JTqlL/Zh4X9ySAR1Cxq6Krpczu8wxQDCcF+iEALKG83wRh\nERwG8ITSdg+AU96H+QwyWCk2fCumMOZ0F1x139VJ59LLXWgjxedFzb6TAJ4u/i6tkmcg5neO9Z0C\n8GNY582PYQKmy+zNAJ6CWK8tEJbXAZQqA3N9uauuz5jJclwXN1J+YwdlLGYLWjCL2YpvxqyuryL1\n5RTWrwoLZe+WXpxcO4lkUxL39d+HX7zzC8f4zu1/uR2n1s0TXq453yb7XNa0qgJoibTgu9nv4t/9\n4N/h+KnjOHnhJNaurCHWGMO5y9Y7D3ZxpjPPz2Dvq3sNSFY1desUmiPNOPTaIUQaI7g9eTsWfiHG\noIsddZN6nKSLcku0BWcvncWxN63uyDpJ996OWAcWP7poiSENFSpUqFordLUNVXUtLZl8MDNTP27D\nGzGuel2LcrThEO3Vv9luoLOwulgGcSykpQwode10+lwaSVTQke8nAdwG4TZ6RNP2SwA+DOAd2Cfm\nkeoComdbgEtAN0Zwj9YUyOTFtTai2Y7HbV4BZj4+g6XeJbRcbMHsn88icSEhrJl/yrbj7sRnlTZW\nYM5bAnwMwmIpvSPl8cxCgPDZ4vN3YcItl1zfNgiL8irE2qvnhovKcV3cSM3eO+srdnAJS0airRnM\n+HbXs3P3XL8owPPq1avIDmSxN7PXAowzR2e0APjSgy9hx9d2YP3qumXNpVUSsFoyCYR4JI6Opg7s\nG9uHxN6E4V4r4TfaEMVluoxGNOKZ5WfQ1NCEt68Iv+/eL/Ui3ZfGhcsXLNB5V+ddWHlnBSfXTqIt\n2obCWgFvXHkDpy8K3/Erp6+gd0svRnpG8PhvPI5HX3gUR39xFLd+9daS+M+SNcMMzt57FqmjKTx5\nz5OGG69cm+ZIMwCgI9aBP7n7T2zXfqB1ACfOn8CZS2fwyAuP1P1NkVChQoUKXW1DVaSNiH+cn593\n3WYjxnUtxYJKiD58WLBdzeXVv9luoBI2PQKzl3PKApC641v8/MdtwFRBJA4DIEBnClbonIGAphSE\na21n8X0OSVL3QcRGcuiM2YzxNHBvxyxu7ZzC/ZhDPAhXYg9wunTDEhZ2LODwnYcx84nicTgPYWmW\n4uNXEw51AXgDwhr5dxAAfwRinglYj2eLsq/dvdVZiGRF52ACPeD73CjHddHT+RSAnp+ZwTOZDJ6d\nmMB68YSTgOZlrDPPz+CVZ14BngWG1oewx+1GhUY6d0/pFgsApy6eQiwS08Yf6vb93A8+h46mDsQa\nYmiNtRrtvOdr70FibwI9X+zBDVtuAABQ0RRfuFjAh5/5MAAg1mj944g1xvDygy8j2hDFVVzF+tV1\nAzoBGBl5v3/q+wBE7Oium3Zh4bcW8OrHXkVPvEfEp75xBD85K4C3LdqG0xdP4+TaSbTGWi3xn4WL\nBRx5/Qhmjs7YuswuYQnH4sew0rSCkedGMPHsBGKRmLE2dyXvAgADKAH9OdXe1G7s0xxp3lQu4aE2\nXrX6ngoViisEz1AVqZ7iH4thgZiYAP7sz2o/rmqsBZ/Tag2vJTYcohMJzCT2IZNNOM+9lgPVAaTy\neb4H+OA54MkjjIN1oLMEEdu5AgFnKiTdBgFhq8VtzsCqXcWx3Fd8zeArfiqBsXP7vEGnCm3N7ruU\nKAa0bCkCxekR7PlK8Ti0ohSipSKa947BNibXolkAvcXfh2FaRFUlIDLvOrVlJxmXOwEkLngHuVpr\ndWkJKwsLOHH4MI6WcYdoaXVJlDo5Adxy9Jay3GxVmJx5fgYXrlxAU0OT5X1AQPzg1kHEI3E89K2H\nDEhUEw2dXDuJS3QJC79YMIB05Z0VI/bzbwt/CwCINIgTqSXSgr/+yF8DAF568CXEG+MARFbZ93W+\nD5954TPoaOqwnUNXvAvRBuEAdgVX8N03v4tbZm9B6sspIyvtUOcQvpcV8akS+BrRiJMXTmJ1fdWw\nwgJAW6wNf3L3n1jAevtfbjegsKV496RttQ2nVk7h8InDaI22Gjc4Ord0lqyLTvymyGtvv2YbMxoq\nVKhQ9aIwxjPUNaNrsezJRs2pHsr8eJp7PQyUqaT8y6PQx32qcYYfgACuyxDAdr64XQrCUsjjJ+8A\ncBSlcaKq+iGAVZdJlisGkTm2ubitHeQPF+dxDNa4zwZgdcsqZj4xgz3P7UHijoRwG5bZbFMAfhPA\n4zBLpXQW57gOsQZvFJ/txbn9Ozi7wrqVGOHuum0QcOrn9MhAHx9aZ3p2YgInDh9G98gI7p+bQ9zn\n30AQtSTVsiBOZVtmnp8pKW8Si8QsbsG8ruZQ5xDyv5XHoy88asRfNjc2Y3zbOI6uHMVtidvws7d/\nhu9MfscS3yjH9Ma5N4xMtxPbJnDk9SMGSBprsG0CL731Ek6unQQg3FuJCGcvn7Vs1xXvwvt73o/Z\ne2fxwDceMGIwARGHyfuS73135bs48c4JNKLRqDea2pLC9z72PTwSfwTHnj2GN068gY7uDizev4iB\n+IB2Tb1IdyzLqekZKlSoUOXKS4xnaPEMdc1ow610VdBGzWlDM9oW5WnuNRqoV8tzidVbzaAqLWmX\nIBLixAE8BJHBVrq08uviFQhw4vp7CBhahel2OgTTXRcQcaM/hD7hj6pLEMmLfg576ERxLj+E+K/B\n/3NQ0Sr43/Yh8U/F2M73K3P4GkzoBARM//PiPN4LM/PsWQjodHOFdXOXlevO3XX9yM2tuk507+ws\nbp2aKgs6Z56fwdmLZ5HaksKTu54sG0pU115uAVVrherKm6jW5J7mHnTFu9C7pRdP3/e04cYq4y9/\n/aZfx+n103hr/S0ce/MYzl48i1958lfQ80VR/gQwS5W8euZVAMI19uLVi/i1G3/NMvYHtj2A85fO\n45frph/4aGoUTZGmknmeXj+NwycO47a/vA2vrr5qvD/cNYw99+wxrKD8PQnDV5lv+craCn59/6/j\n5DMncf7qeWAAOHP/GTwSFy61M8/PIPtcFucuqumYnaVzCXfKnBsqVKhQG6EQPENtOtnFJdST229Q\nuhbn5FW1nLtbrIvXmNcSDlYB5iBMIHodpnspF0FkuZX7vU/5/HJx/9sB/BkEvOVhlhkBgGcgYKsd\nztK5vOqULG4rkwJdcdhuD4R1N1V8bwQi+yxXG4TFcw9EnVFpXIoCsMulwtxfHQEZqBwc3dyqXVSr\n2Kl4IoGxfft8QycgoOTYyWNYWVvBB576gOe4QKdSHyrMPvrCo5ZtJZQmm5L4wb/4QQnsvudr78Hj\n//A4Tq+L+EkZ3yj3645348z6GfyoIGoTjXSPYO3ymuGC+6EDHwIAfOUfvoKFlQWcWj+FKKJGDdFX\nTr2CRJPZ5wsnX8DCyoIBta2RVly8ehHfeuBb2rk3ohFvrb+FU+un0NTQhIltE/j2A99GIp7A7L2z\nmByYRHYga7zXHms32o01xIw2fn7+51hYWcCZN84AJ4Gt2Io/KZ74drDodk7pYnvLSYy12coHhSpf\nYYxnqI1QCJ6hrhnVg5UuaF2Lc/Kqepp72ZbnWQCDMC2bvP6mtHB2wATEhuL7FyHgKV58fwJmXKPU\nCoB3AXgOouYl/zZPQ2TCLcBZV2CfnEeqCSLm1KF2qDH2H8BMBvRjmPAmYy3vhEgkJGNZ+wAssjYu\nw5qQCDCB80l4r79ZITj6TUBUkfwAdYDiNSlX1lYMyJHgse0r2/DhAx8uTYyjAaOZ52dw45dvxF/8\n/V8YMPvIC49Ytn3fX70PZy+dRXOkGdGGKIb3D2PX13dZ2l55ZwVXinc1Yo0xS2zo1K1T2NGxA8dO\nHsOp9VPob+3H3P1zlvM30hDBjV++EReumCfrFXaX5OS6KLMCCJfa9ybfC0CAIQCcv3IeR14/gt9/\n8feNGFUubrm8SBfx49UfI/tcFhPPTgAAnr7vaTx131MG/M3eO4vueDfOXzmPS3TJaMNSsuUC8PbX\n3sbvrv+u5bi4waIXQCwnMVZoJQ0VKlQ1FYJnqE2nTCaz0UMI5UMblSDJj9zOKd/WVwkTD0HAlbRs\nxjXbnoGAxH4A0hNwBCKm8hgEoLVCuON2KvtegbAWnoIZFwoIq+QxuGekbURpjU6md1reAW0hEbN5\nyaWt4zDrac5AWGSPQABgd/F5A8S8pC7AClvDELGmGZggJt1mJUR7sWJK+M2i5kAH+PyOUt2xayBp\nmWxqLE0AJMHjxDsncOzNYyUAwsGoOdJsAOfKBRMak01J7Llnj2XbvpY+HHvzGC5cuYC31t8S2V/f\nOILbv3a7AU4y2VAEEbz02y8ZcYrS9bQ5JrJfNaIRFy5fwL8++q/REhF9vLfjvbh49SJWLqxY5krK\nCX71qoDHM5fO4Kdv/xTRhijOXzlv2eb7p76P4e5hy3vSeik11DmEvpY+R0hLxBP41Z5fLXm/OdKM\nni095htrQMNRQdCz985isG0Q8UaRgEmujTynJHA++dMnXQHRT4Zjqc1WPihU+QqvpUJthELwDBUq\nVFW14aVZApCr9VW1WnGY4Flaf0Oz7wgElL0LZu3KOZhWUAlaCQB3F98zKjAXL6ob1oBDZ0yw9epC\nKw04sr1GGBaky42XEb0YRcPZhtI22TUzUBzrv4EJeEsQFtkCBHwegYDjIxButnblYG6GcL3lICYN\nc8MAJuHdirkBQFeWNiCeVLrZXrx60bAcqjGajcXLg6HOIQuA8My0B187aAFOAEg0JQw3Wm5xk+Cm\nAtzK2gre/dV3Y+LZCXzrgW+hv7UfP/n4T3BX111GMiIJWPNvzAMQVsPT66fxV8t/ZSQB2p7YjsK6\ns4k/2hDFFTLH+sb5N6zWx6Le1/0+dMbFXZ7hrmFMDkzilY++gp64eeL/ePXHeOHkC8Y2KqRJQLx0\n9RJ6twh3hc6mTsQaYnh/7/vxN7/9N8b7w93D2HuPSM+ciCdwc9vNOHayFPoB88ZA4aKYa9CAWI6V\nNFSoUKG8KgTPUJtOYVzC5tJmSPpU8TnFIWc7gB8V3x8B8D2Ybp+/YPu0wwQpCVs8GY7OXbQHAlJH\nUYTFIhTSL4G9OQFugD4GU01SJNUB4OViX6cB/BK4HL2M6NUomi4X3Q2TMGEzBtFPb/F9QFhdea1M\nXmtzqPiU67EXwhUYEBl6e9lnX0ApiMl1+DaAp+Hd/XUDEwT5Op8qdQv2INUtk1u1fqPvNwx30dX1\nVcM9VLqV3rL1FguAJOIJ3Nx6M469ecyAH0AA5cS2Cfzs4z8zkupIi9ujLzyKl0+9jFhDDHcm78S2\n1m2W8Z2+KBL33HfoPvzwoz/EwNaBkgy4ACyQ2BJpMeCwPdaOP/3QnxrWTztdpssGJDei0QLMHTFR\nbiWCCL6z8h384NQPEG+M42dnf4bzl8+jo6kD8YjpsrB+dd0Yz9LqEm6ZvQVb/vsWfOCpD2Db49vw\ntX/8GhZWFnDkjSP44A0fxNStU7g9ebtRJmb7X27H7cnbRUzo/d92jc2U55T8TAJxJYCoc9ctx0oa\nanMqvJYKtREKwTNUqFBV1XWRIEle77ZBWPZOQbjOzkG4n15tJGsAACAASURBVMp4QbldEsI6+gKA\nWyGyxQJWSNLFGX6z2PYCGFyeA+4olpQ5aTO+LRAlWwABmlH2WSuAu4p9PQogC0QuC/MmgYQl96cQ\nFllAuNy+DNEXNzANsbHPinYwUGwfMC25CQBPQMDWUQgXYg5eKoiVG29ZA6ALRDWIJ1Xj9pzqP3L3\n0JHuEezNlBZK5fAzsW0C2YEsXnvoNRwaP2QBFu4WKmtzHjt5DG9eeFM7zpW1Fdy+T7je8gy4XM2R\nZqSaU/jIwEcMt9qzl84i93zOErfqpq54l+W17OsKrmD10ipW1lawfnUdq5dWceT1I+j5Uk+JGy8g\ngDXSEMGZS2ewfnUdL7z1Ak6cP2FYYiOI4Pyl89hzzx68du41Yz8JoAAMl2IJgX92z59Z6p3yGE55\n7L79wLeNrL/lKoznDBUqVK0V1vEMFSpUIJqZEW61LS0CNq8pyJR1IdV6nFKyrqR0LZX1OdXttgNY\nhrB2bocAT6l+CAB1WrdOWGEvfhW491PA4/9eLHgPBJhyNUMk+umAWftye3G7SHGsncV53Q7hIsvV\nCGFl/SVEzOhWiHqfjTBddbdAWHPVsWdgrYcpS8z8BAJKXyv+bId+XUMFIqd6nbrP3OpIrq6v4n37\n34e+lj60N7Xb1ojkNT25Yg0xI9mOTk0NTfhg6oMGmAFAAxoQa4zhgW0P4PT6abzyy1csFtcGNKCj\nqQOrF70F9EYQwVVcLYkB9aNIQwTff/D7uPfr9+LUuvmHF22I4jJdNn4C+lqfANC7pRevfuxVZJ/L\nGms1desUTr5z0ngdb4yjJdqCnd078cSuJyx1USup0xlEHddQoUKFkvJSxzMEz1ChQgWiTEbEcgLC\nwrlv34YOJ1hlYAUou7lJAJUxmaoSEMmEABGPuV78fQiiJIrbdd8uCLCVygJ4qvj7DIBXIJL8HAHw\nH4q/fwfA5wB8BSKZTxTC4roKAdJyPFPF/dwy4U5AWDG/DeGaG4EA18sAdkJYMxNs28MwQTwLcx1V\nOa1rHer5mRmsLi0h2tKCe2dnyyppUitpQbJ4p2i1I4aZ/6UNe+7d6ws8bvzyjYYFMDuQxVP3PVWy\njQSboc4hvP7O63hr7S0Mdw2jPdaOhZUFNKDBFvzijXG0xdpwev205X0Oc3aKNkSRfyCPe79+Ly6S\nl2K25Wvq1im8/NbL+OnbPy2ZT7IpicLFAhrRiGQ8iVhjTGs1HWwbxOrFVRQuFtAWbcMHej8ANABH\nXj+Ctmgbzl0+Z+lv35j4Q+Fgz9/3KrcbDKH0msEMlrCEFrRgFrNIhHfMQoUC4A08Q1fbUJtOYVxC\nfWozxHLayfWc8hov6OY2KZPqtAB4EQLEbv4B0PoA8JBN2l+euOi/w6yJuRXAf2bbLUFYUNcB/D8A\nDkHUCh0ofiYrTFwG8BaEy6yETjmvncXXt8NMVMRzwQxBWD9PQsRn9kFA51swrb3cY091d5Xr2K78\n9BKHuUFlR+y0urSElYUFnDh8GEeVrFn19h2ljdsrZv1KPHME+74Q04KHU8mO9Svrxu928CjdQvO/\nlcfSx5YsLqLd8W7LfhElI9b61XX8cv2Xlvca0egKnYCI5dz17C7c2XknGlzrBZWvtmgbCmsF/NO5\nfwJQug7SIiuTIZ1eMyFajqs10orCesHY9tzlczjyhqg5mh3I4gM3FH3kXxWJnpojzcYxiUXEF0q5\nCYbCeM7ytIQlLGABh3EYM3WducxZ9fY9Fer6UAieoUKFCkTXdCxnUPGCL0G41P4dRFzlUwAungCO\nfRo4/Cngd/730n144qJHYMLh28XXEsh4QiP1GtQu9K0NAuTU2MvvAPgYTPiMQ9TzXAbwTHE8CxCu\nttxjskPpWwVxuY6vABh8Cbj9Y0DqeeDJM+7rapeldoOANFq809I9MoJ7NtudFsDTnSK1DieH0J3d\n4kRsjbTi/OXzFjCVwPrQtx4yrGmJeAKJpgSyz2Xx0LcewlC3yDiVaEpgYtsEIg1W8GyLtpWAXKOP\nS5a1K2t4+fTLIBCaI83oineVwK1Ue6wdu2/d7bntCCKINkQNSOSZcrk6Yh1GPdBGNOL9ve8HYJ3b\n+SvnsXpp1RiH1Mn1k4hFYnhi7AlkB7IYvWEU+d/K45snvmkck6bGJl8ZaL3U/gzlrpbiF+oIRrCn\n1pnLQoXa5ApdbUOFClXfulaDR2Xc6LGzwOXiBWf2ItDTZI2DfBUiHlO6q94J4ASEtfAVAJ+E6b7a\nBFELlLu7AgLI/kcA34ewiHJ3WuniqsaxOrnFcm0BsAbhcvt9CKB2mq/Rfsafb7bqtivnl4E3N+iA\ntb66iqMzM7hnz566crP1HPe3uir+tvbssfxN8f0v0SUcef0IRrpHEI/EcexNEZ+Yak7he9nvYeSv\nRozYxsG2QVy4cgHr/z97bx8U13nne377HZoGGmhkhJBakkvWSyIZJBzJsRS1IyleEyd0XshcM3cs\nu2rdU8luJffurrh3tu7O3Jqb3Joqp27NTO2uK9pkxEzingQpkWLZZhRhCSThGFu2XhxJMQ6KiRBC\nvIgWIKBpoPePp5/T55w+p885/QIN+n1UlOjz8pznvAD97d/Ldy6MKKJCraVaami9tx5XR64KdaKd\ndzsxFmENeRxmB3Y9sktS4+k0OzE5P8nWmxwIR+MRVy2SpeceWHUALftbJDWWRpBfG47NZMN2z3a8\nN/SeIDJXOVcJ12ckPIIiWxHGImMoc5QhGo1ia9lW3Bi9gcHpQaHusqmrSXI/1//reiE66vf6UZ5f\nLlkv3158/9NNzSUYIYQQQACHcZjSbAlCBNV4EgSx9FmuxaM+SIVdzSxwxqos+KrAPEB7AVxBPLLX\nAGACTJC5Yt/z5fLLxIXfNcQbENWA1Wq6AaxEvLGQH0ygtsZe84ZCcmrBBCdvkpRM+InPtwHARB3Q\n6gdcO4Bd24CjtuRRT7X6WTVB+pCSrriQi0O7xY7Dew6j8e1GtPa1Cts1rG/AxMwEWvtaYYYZZlNi\nGqxcPF0bvYbh8DBcVheK7EWYmJkQur+uyFuBwel4W+byvHJE5iJCNJCLx+rSajgsDnQNdUGJUnsp\n7s3cU1wnF6Al9hLcfP4m3A43Vr+2Gn0P+gxdKwB43P04yp3luDBwAdPz03BZXfjCyi/g/sz9BDFq\nN9sxM89qTqsKqnDhqxdwqOuQpOmQ+Jq7HW7J/eDpySPhEaEWdGp2SthX3pTI4/DgifInBAFKzYQI\ngsgmVONJLEuoLkGFQICJtDqVWsGlygIUjy7YMyVOC+X1ntVgQu+MlYmmnthy/qu7CMAFxL0++a2V\n+1zuki2Xw1NVh8FqM/2Ii04g3ugIAKKIRz3rwbrt1iPuucnnfBqsxlN8XLXU1wR/ziDgeQqY2AG0\n2aBZKqVWP7tQtikGUnoX/HeUaG5CGqBK3Z9WuqXYQ7LZ1yzUAAb3BVGRXyEZO7gvCKvJinnMSwTd\nttJtEo9JnrI7HB6G3WzHxOwE+if7BdFZbCvGFyu/KJnH0PQQ8iysoNlldQnj90/249bELdVLoSQ6\nnWYnVuStSPD5HJ0ZxSP/8gieb3seY+Ex1TGTcSV0BW39bZienwbAajQvDl1EvjU/YdsnVzwJgF0/\n7lfasr8FRfYiYbn4mgPx+5H3hzwMh4cxEh4RrmFbfxt6xnqEfQ/vOSxs77K6MBwelliliG10SHQS\n9F6KWAxIeBLEciHWLAStrUyELheUikeXqsgW1ym6AKwD6xDLRV8AAH//yxNFxsBqOfl75howESj3\nueT1mWoCTCz8roHVl4q347Wj1QCaY+uOAzgBlvJ7AnHPzbOi/eXCT3yOm8FSgX1gtaCSebuBJz4T\nn5PaZwpagq8JrNlRo8p6AyQVZWo1ppkYO11Ecwv+PLm40PJuVBMnbocbN751Q7KO120CgNPixIFV\nB+D3+tHxlQ6Jx6RYzH5h5RcSjllkL0LrrVbJspqyGvgqfXCYHZianRKWD04PYnpuWtdlMcMME0yY\nnJ/E4PSgYofbmegMfnHzF4IIlrPKuUry2mlOLJiWNzAanB5Ed6g7YS4wAWsK1sBhZv6cB88ehPMn\nTvym7zewmWz4yd6foKmrSfKc8PuxpWQLgNg1rPiC8P27X3tXck/49rwpkfgDCLVmQlT7SRDEQkGp\ntgSxXKirY6KztnaZdvgRsVTTb3laqAssQjkFgGfjNYAJKJ6OagXrQMtTSL8X2/dxJNZwqiGuq3wV\nTMCqWb1oWcHohZ8jx4N4aq88FVfPMX1IXsOptd4ASdNU00zpzWp9nYG58XRLj8ODje6NKLIp+3Dq\nrRXtHe/F7td348JXL8Bb6FUcw2axocBagGJ7MXrGevDe4HuCj6fVZMUOzw5J6mx5Xjne/9r72HVi\nl6L9iBVWzEK7u61RtKxaHGYHyvLK0D/Zn3ScbaXbUGgtTPDs5GOE59knTfLzqHRWYkPRBsXnRGx9\nAkDTBsWIVQrVfhIEkQn0pNpaF2oyBEFkmWBQsVnIsmSpercEATwGZj/SBqAitpxH/Bpjr0sAtAP4\nPuKirFe0XwDKDYHkt51HwgAmOpO9n3RrrBeT7LhBsEjnQOy83LE5K0U19RxTy8pGr9WNDsSRuYQ0\n1SDSEuZJx04XA3ML7gsicD6A/gf9Qg1i4HxAEBvcn/S3X7qK33lGE9bL8RZ6cevP46mvYsE6FhkT\njtGwvgG9470JdY+z0Vl8OPwhAMBismAuOoeh6SEc6joksWyR7JNh0WmGOSFdWIlCWyEGJhOFsJx1\nheswM5cYXXVZXZLorfw8qsuqcXXkKgDW4faVna8I65q6mjA4OYjGtxvRebcTDyIPcOKPJ3Dx6xex\nrSyxoxePbuohlWdTdxMrgiAIEZRqSyw5qC5BBbebRf6Wu+gEMu7dsmDPlBtMICH2/7uQpqnytNWb\nYN1hxTWNSgJLK/1TTZSla0GS7LhuADcQPy+tFGAttGo4y2NfGXjsk9bAqdWYKqD0PKVaXxc4dw6+\n119H3VtvIRRW6eRqYG5ckHDrDrnY4P6k0Tujiuu1ONl7Ukjl/eT+J5IxuMApc5RJ9olEI6gqqMLT\nK5+WbM8tW7LNPOZ1bTccHtbc1mayodnXjOC+IErtpZJ1E7MTmEPceoVbrQBAHvIwMz+DVQUstXcs\nMoZDXYeE9e+df0+4rmORMcxhDpFoBDtP7ExIlRW/Pnj2oGYabSrPplbKNpFZAgjABx/qUIdQhnyj\n6L0UsRhQxJMgiKUHF9nZJADgJFj95Q7oT2/VQh6dEp9GsgigUlSLC0sPgH4wISmOQKpFwsSRUB49\n1WITmCCeRbzxUQ2Uo4zy80jnVmlFRZUiwRyDVjy6okRaUeZ0xlagOxRCxwCLsgXOn0fL/v2Gx1CC\nRz7lqZjcn/Q/fViNE8+uxU/2HTEklMVRytryWhTYCnB4z2E0dTVhLDKGivwKeF1ejAyNCNtZYMFE\nZAKRaAT13no0+5rhdrhRWVAJj8ODueicYCHisrowMTuRcFwxWimz2cICCy5+/aLQxddsSvxs3wIL\niu3FsJqsmJqbwswsi4xOYxptt9sSGjhxHBaHsPzKyBVEohGYYEKXvwvf7fyukCq74ecbJNer3FGO\nofAQAPXIdSrPZlYj+EQC3ehGR+yXdgABtCyUbxRBZBiKeBJLDp/Pt9hTIBaTdKN1Cig+U91g6aKj\niIuaTGAgOqW5H48GbgSrFZVHINWOlUp66gBYg6AoIAR9/gRpUx899ybT9y/ZuWSj4ZaOJkOZ/B3l\ntLLPh2s9Hhzes8fw/mqNY9QazewLBrG+oQHfevMsfll33HAK5Y5yFqWsKavBa198DS37W9DU1YSW\nnhZ03u3EwNQArt5j6aRWkxXFtmLMYQ6hmZDg28mP2Tvei+HwMEZnRuEwO7C6YDXyLdJusfJmP2ZI\nbV1K7aUosZdItrEqfOZugSWhSVDCNiYLDqw6gJ3lOxPWmWBCz/M92Fa2TdLFVz7mPOZxb+YeBsPx\nJknm2FuxWk8t3vW/KzQT8p/yC/dt085N8Dg8cDvcOPPcGeRZ8nD5G5exrWxbQidbLjprPbV4vOxx\n4ftMCkS9UVJqXJQZhG7VqMXhdGsKYtB7KWIxoOZCBEEsLXzIWDOZpIib5Ij9LnMRo41vUmkkVI54\nkyBA2TfUB+17o2cbI8jORVJ79qMI3K+3Zbbh1gL7hobCYQTOn8fhPXvgdjgM778QjWPE1/zVPa/i\nUNch5Fvy0Tvey2o9Z8aERjsl9hI8VvQYuoZZM6GK/ApJA6EyRxk+V/45BPcFE7xDxdE7Tt3qOpzu\nOy00K+LbiCOjJpgQRfx9yFv/01v4Hx/9D7TdbjN8rmL/Uo4JJkEEBs4FcOzmMYzOjMICiyS11hz7\nx2s7rbDCbDLj7efexj9e+0dJ9HnlT1cK16XeW49QOKR6H3kjodHwKNput6G6tBprC9fiiO8Iuz86\nmwxlA2pclBlCCCGAAA7jMNw5+4eIeNghH09iWUJ1CQ85GWwmw1F8poJg9h9+5LboBIx7WaYSdb0I\n5v95AOyaKPmGiu9NPvT5eRpBKVoqOxdJ7dlfujJaCwxA17XO5O8ot8OBlv37UxKdANAzznwei23F\nkmY1mSJwLoCWnhbhmh/qOoSW/S3oHe8VlnGvyRJ7CS594xJK81jtI4/wrchjBrEuqwsj4RG09rVi\n+6+2Y2xmDHazXdiWR+84BZYCXB65LLx22Vxoe65NYicCQCI6ASbEju4/mlBrqoXL6sJoeBSv7nkV\nDesbsKN0hzD+9y99HwB7/njEUZ5qO495mEzx92SzmMVMdAY/vPrDhOizOGX5VN8pXOy8CIBFkuWR\nSx69Prr/KBrWN+DsV87i+DPHBcsbpcj2QkEpuZnBDTda0JJR0UnvpYjFgGo8CYJYWqTZXVQ3bjDv\nyoXEaP1gKvWGKdYowgvgtui1UtRUfG/8UK4jTef+6ahNlbzR3XcEqMvwQ5Ks5pRf2ykAp5CR55N3\nmbU6ndgXDMJhUEB7C7zoe9CH+5H7gijUQq1jqdLy7lA37kfuA2DpqqPTowiFQ4LYLLIV4dSXT+H7\nl74vRN3k9aUf/9nH2P7L7bgXvgcAqC6tRoGtQOiAazfbcX30OiwmC2wmG56qeApXR67i3sw9PJh8\nIMx7IjKBr576KsJzYdybvqd6fo8WPYrvvfM9zEf1NRUqsBQgYolgYmYCbbfbsPZf16Iiv0LoUFtT\nVoPLw5fhPuLG5OwkAPb89T3ow8DUgBBxLbIVYWvp1oTOvkopvjvKdwgR2em5afAGuLcf3E7YlpNq\nHXE2UaslJgji4YRSbQmCIFIlVRGnhg/G0lCNbp/qPqmQjZRUHWMa8S/MOD6kdW2VhN3rPh8GYp61\n6xsasN9gUy3u21nrqcWWki1C+msyCwy19Eil5Xx8cUOfhvUNEruWdYXrsKZgTdLjisf2e/0Iz4XR\n2teq2EyoqqAKW0u2orWvFcW2YkH4AqxT7Ew00cpEjlLabqrUe+vR3t8umcfeir2YnpuW+JMC7Nyi\niOKdu+9gaHoIDrMDDosD4bkwqsuqUeooRXBfEACw+RebMTA9AJvJJqQSA5SyShBEbkKptgRBENlE\nR6MZQxhNQ00lbTULqcoJBACMgfmUHkPmItM60lwXNbUwzWurZFHBu8x6amuxJwXPWnETGHH6azIL\nDLX0SKXlfHzfSp9kndiupdJZqXlc8dhHfEeEcXetYCmzVhNL0HJanNj9yG6hQ+6+yn1Cs6CtJVuF\ncZJRYi9JSNtNlc+6P4tmXzNsZptkecdAh+BPajOxdTazDXce3MHM3Aze/9r7aFjfAIfFgbHIGMLz\nYXQNdQnXyO1w48af3UDD+gZs92yXzJ1SVgmCWKqQ8CSWHFSXQGSalJ+pTIs4o7WaRrfX2idTHWe7\nwbrsDgA4pLGtEZRqU7PQ5ThlYte2/W/aUxLbSsKOd5n98unThtNsAakQ11tvp9axNLgvCJfVhe5Q\nNzb8fAN6x3vj9YUHjkr2EY+h5Bkq9yWUH1M+7gdf/wBVBVW4/q3ruDN5R+iQe37gvNCsZ33RetSu\nYEa5ltg/JbaVbcOP9/5YELNizAbfFl0PXcf6f12Pje6NcJildbjcn3R7GROOkfkIuoa7JLWwvIZV\n3NmWXyN+DW4/uA18zMR3+1fa0/5QhTrNEgC9lyIWB0q1JZYc7e3t1Aac0I8OL8eUn6lUusPmMj4k\npIp+5Qeb8Mf5AeTBhjf/8iIeWeHVHmchO7/6sDCpwwZI9XnKdppwJsZ3H3ELKaVVBVW49ee3Ujqu\nDz50nOsAQkCFtQI39t2QzClZbas4fdhtd6Otvw21nlqc/vJpAMBjP39MkkZbZCvCWGQMFpMFc1HW\nZdbj8OD+zH1JCmuRtQhOm1PSZVeMFVZB5Crh9/rxzt13MDg9CIDVqj6YfYCbYzcl3W1L7CW4+fxN\nNHU14cQfT2A4PIzPrfgc7jy4g9UFq1FkL5KkJO/+9W50nusENmYmzZY6zRIAvZciMo+eVFsSngRB\nLG98PublCLAOpwZr5B4qFATjZ/+bG9ceYULjC3er0PF/aQuNjAvyZLW0C2xv8rBT/s/lGA4Pw2lx\n4vq3rsNb6FVtRpSM1edWo6+nj3nDIlEA8drWn/57YOyzHqza+oQwtljIAol2IVyY1pTVYI1rDfIt\n+Wi52YL5mAGt0+LE5BxrAmSCCSX2EhTYCrDGtQbXRq8hNJMYBXRanHhixRPouNORYM8CANtKt6Hj\nK+z3zOrXVmNqdgpuhxszczMYnx0XtjPBhO2l27HCuQJjkTFJoyFx3an4eoiFtpZvph4yPR5BEARA\nwpMgCAKoqwNaWzPr5bhcURCMtf+tHB88MoxHB53oDFzXF/HMND6oRzWXWNQ53S61mSQVwdg73ovd\nr+/Gha9egLeQPQvyCJrb7lYdlx/zyr0rgsDjEUDxdm/V1aGvtRX/8DcuXK+cEMbWE52TR1jF8xMj\nblwkbo6khAUWFNmL8OQjT6JrsAsj4RGYYRbEbL23HivyV6A71I3Ou53CWEoilSP2MK0pq0GZo0wS\nvXU73AicC+D6vevoGevBu197V7jm6bCoDbgIgli2UHMhYllCdQnLiIWozwsGNb0cl/UzZeQaK9RQ\nvvmXF/GFu1UZFZ2Ga8yS1dKm4kmaZZI9T6Hubgx0dKCvtRXnA5noSJU6Ss2MtPAWenHrz29JBJC8\ndjTZuCd7T6JjoEMiOi9941KCAOK1rVXbWXMh7qEpfl7UniN5gymlhkNOixMWE6sBLbAWCEKx2FYM\nv9ePYluxZPs5zGF0ZhRX710V6k0dlnhN5/DUMF7vfR0dAx3CWE6LE+e+ck6o4xRTXVqNd/3vot5b\nD7/XjzPPnUmokwXYPeoc7MTAlQEc6ooXTBv5GZJvu9jenkRusKz/7hE5C/l4EgSxeIh9GbdfBNb8\nH0lrMVPC7V6c9FodtaULgg7vy2SprI+s8OpLrzUypZgwAViapGYUa6G8WxeAdLvUZhK9zYa0kHs1\nJhs3PBcWvq90VuJawzVFAeRwu9Hyv7rxYLQfNpMNE7PMQ1P8vIifo+2/2o6p2SmE58LY4dmByoJK\nwTrm1T2v4onjT2BomqWxVpdWo8BagM5BluZaYC3Ag9kHggj2Fnpx4M0Dgo+mmP4H/dj4i42oLqvG\nnQd3hOWdg50SP06H2SGkIu+r3IfWvlbJOKMzozh49mBCVLhlf4skEm2zsI64RbYi9E/0o+6tOgT3\nBQ39DOnZNpXoN0EQhFEo1ZYgiMVDXJ/neA7ofJMtXw61mLlSW6qnBtKHBW3Q8zDXmIVDIZwPBLDn\n8GHNNFuxGPhfTpZj7kYvrE4nfvkfy9Ezpe3HqTXmq3texaGuQxlPueSpnPmW/ATfUC7oaspqcOa5\nM0mPK0+RlT8v4ufIYXFI6iXlvqKH9xzGS+0vIYoomn3N2Hp0K/om+1BkK8L5r57H9y99XzLfV/e8\nisd+/pguT1CA2arcenBLaLzk9/px/JnjwvXgnpwAS6t1WpyC8F3nWoc1rrjPqf+UXzjvem897BY7\n+if6he0b1jdgYmZC8WdISUDq+XmjhkMEQaQL1XgSBJHbiOvzGpdZLebq1UBfH1BUBFy9CnhFaarJ\nmuVkGn6N8wH0qhwz0w16NM7vYasxSzWaJBYDT/WW44X/ziJ2/8/feXC1ZBiAMZEQOBdAS0+LII6y\nLTCUxIyRey9vEtTsa5bsIx6r8e1GIapYYCnAg7kHAJTrR4FYp9i7cSHXsr8lYb6v7HwFW45uQWQu\nIul+y9lctBmjkVFYTVZ4XV78/v7vMRIeURTVoXAIL7a/CBNMOOI7Isy31lMLh9mhKSpX/2y1IJSv\nfvMqiu3FitfRyDUXP5eRaARtt9seyg+DCILIDFTjSSxLqC5hGSGuz9NRi5ktsvJMcaE5NgYckplZ\n8vTXVjCRlk34Ne5NcsxU/ECToXF+y73GTP48adVSqtXriVNWv/u7xwGwFN2KzdXCciMpst2hbkF0\nlthLDO2bivejUsptsnsvPwb39txauhWhcAiNbzeq1nIG9wXh9/pR761HsZ3VZybzveTeouLaUfl8\nvYVePOF5QlF0AsDGko248xd38GjRo+gc7MRIeARVBVWKkVy3w40Tz5zA8WeOY9eJXegc6ITdbMdP\n9v4ERXapz6mSj6r7U/b/WGQMh7oOqV5HI9dc/FwWWAsUvVuJ5Qu9lyIWAxKeBEHkBrwWc6lHOjlF\n7M0kamsBeS1fsmY52WIhG/QsxvnlMFq1lGrCVCxA6v/5KNY3NODLp0/jF88kNqExMg+1hj7JSKUR\nkZKAMnIMLph6x3s1j+12uHH8meM48cwJrCtaBwCYjc7i+5e+rzo3j8MjqR1Vmq9SYyKA1Yke8R2R\nbFPrqcVH3/xI81wHJgcwNjuGmfkZfPnfvpxwXCWhqLce18g1F4/Z7GvW/DAolQ8fCIIgxFCqLUEs\ndXKliQ0hJRRi9+bw4cR7YtQCJBP3eCFtR5aYxUm2EOlmbQAAIABJREFU0UovTaXmNZX03XRSnNOp\nyw0ggG50wwknggjCrfJQqB3D6LH1bq9nu1A4hJfaX8KD2Qf46N5H2Fq6FU6rU5L2K76uTV1NmlYy\n79x9B5FoROKFqoXeYxjB6PNAdaAEQSSDajwJ4mEgV5rY5DpLWaDTPRbIJR/MTJGKIFxoEaCnTlBN\nBPngQ0ese1UDGtCi0r1K7RjJmhUZGUc+32w0V0p2X8Tr8ix5+P23fp+SL+diCcCHuSkYQRDaUI0n\nsSyhugQZMXsGxZROIk53NxNvra1MhIrI+WdqOd9jg16uRn0wzwUCeN3nw1t1dQiHFiY90OjzlErN\na6asUPSip05QLQ3WGcu9rkUtDifJvVY7hpGU22TjyOd7qOtQwnbidNKDZw9mpK5Vad2df3/HkOgU\nP1MLfe85RlOnidwm5//uEcsS8vEkiKVOMKie0vmwI45y2pgfnm7xZrTzbDYjqnrvsWwOgSZ37gd5\n9fiMijDqg8mFKgCcDwSwfwGjxdn0RpR7Zy4kSj6TyURQEEEEEMBhHFZNs00WyebHuzZ6TfNYWuit\ntwWAckc5hsKsk7Auv1kkvy+ZumeLde+5oCcIgkgVSrUlCCKz5FJKqzhF1e9n4lOvQPfBmLdlLqTD\nyubgG2xZ9ClpYtDKxYgPJgC8VVeHvtZWeGpr8eXTpxc0NTeXa+LSEcXi8+I+k+mKoNd9PuEDgvUN\nDZIPCMTHqyqo0tXARw0j9bZuuxtt/Zm3GMnmBxIEQRCLhZ5UW4p4EgSRWXhKK8BE6GKqHXGK6pEj\nxkSw0c6suZAOK5uDs1FlSgEAJwGEAewAcBRAExbOW1RMEIYaETncbkNRy33BoCGhyslELelipUTq\nQRzZ0xvN48i7oaoJJyMCK1kkW3w8w42NzgVwsvckwnNh7PDswNEDR5OeqziaCCArkcV0rj1BEMRS\nhiKexJKjvb0dPp9vsadBqFFXx+ooa2sXxZNTQrLOsiIUnymjnVl1HiuryOagOiUf4tFcgEV0B2Es\nwrvMSRaB04I/T+l0kVUjU9GydBrF6D0vIxHfZJHsdK6jeA565pEJ+D3qGe+Bt8CLInsRyveVo9fR\nCyeciLwVQVtfGzwODza6N6LIVqR5L+nvHpFp6JkiMg1FPAmCWHhyqeaUe4OmtC+Mia90jpUpZHNQ\nnZLYmrAaTFzHoqPkvckwWkuqRDZq4lKNlsktTdKpE9R7XkoRX7VIcrJIdjrXUezDWV1avSCRZ/E9\n6nvQBwAoP1+Oof2sXrR+Xz0azjeg/0E/Ou92AqDIJ0EQDwcU8SQIgnjYCAF4CUAUQDOYyF6i3pvZ\nslcxWkuaCfScS6qRSr2WJplEKVKZTiQ51Tm81P4SoogmTQtOF3EkOhKNoO12G2xmGyLzERTbilH9\nzWp0FHagFrU4jdNww032JARBLCvIx5MgiMyRS02DlhpGO+QSulloIZNN9JxLqmmndahDK1olwidb\nJEsHXsxmT3pINZVZqeHSn8b+hK7hLgCAf70ftv02SWffbKRiEwRBLBbk40ksS8h7apFI4oO5JAkE\nWBfYujq0v/FGWvtDyx+SW4a0gonQ5YxBX850yURKbDLEvo56vRyN/o7ix/jbfdcwmZ/8XJJ5VCbz\nLA0iiAY0ZF10Asm9PfcFg1jf0JCTohPQ50uqhLzhUsv+FpTmlQrLjuw5gha0SK69Ef9W+rtHZBp6\npojFgIQnQRD6yIWurZlELKR/+MP09tcS4kY75C4gRvSzLhZYZGdbyKQqRFI5xgePDOP4X1WlfC7c\ns7SvtRXnZc+kG+4E4ZMtknXz5bWciyk6AwjABx/qUIeQ7NORVDsRB/cF0bC+QZIyq7SMIAjiYYaE\nJ7HkoC5si0QwyMwgF7tTbaYQCWnfiRNp7a8pxINgnWJ1+FQuNBkPZC+wyM62kElFiBj9HSU+xq+b\nPkr5XLId/dVLrguubnSjAx1oRSsCsk9HUp17U1cTBicH0fh2oxAZNxLR1IL+7hGZhp4pYjGgGk+C\nIB5O0rU/yaB9SiYa5KRagptx9xuNJkXZagaUjHSOuRB1eJk6hpGGSJmyZVmKZKPe1Yh1DEEQxHKE\najyJZQnVJRC6SZZH6nazL78f7Tt3Gs8z5V4lGRBOqimSBvJgU41cZjyQzW1oVMZKlg6aLdI5ZipR\nK6O/ozIVGTMS/V2IFOJcJRv1rqmm6OqF/u4RmYaeKWIxIOFJEMTyYNM5wH0ZKH8f6L3PlmmpMb7+\nvfcWtWGSaoqkATWZagluBvWzLvSmg6bS1CfdYz5MZFso5TJ6612NPIO5nl5MEASRC1CqLUEQywP3\nZeB+Nfu+6h3g1ue180i11i+QhYxqiqSBPNgMZv5mFb3poJlMXVwMT85ch6w8tKH0WYIgCP2QjydB\nEA8P5e8Dw08AzmvA9SrAW6ytxrTW+3ws4giwfFS3e2G9TJeKmswCdW/VobWvFbWeWooiEYsCPYME\nQRD6oRpPYllCdQmEIhcfY5HOr/7fwMF6Fi0EkueRxvJM2y9fVl7P81c9HqC/Hzh2bGG9TBc6DzaH\nWMqpi/Q7anmQS88gPVNEpqFnilgMUhaeJpOpwWQyXTOZTHMmk2l7JidFEMRDRKaMJL3FLL32zg1t\ncaj3mLzzzsaNQGcnMDrKli8XL9McJp2GO+cCAbzu8+GtujqEM2JOSjyMZNIOhSAIgkgj1dZkMm0C\nMA/gRwD+92g0+qHKdpRqSxBEHHndpN8fT2etqABu3EgvwqenLlKeQtuiUbvFx6ypAdasAZqbtee4\nQPWhRBxum3Lv6lXMxD4kWN/QgP1a93eRWAxrmUUnAKAbzO81iJzztSUIgiBSQ0+qrTXVwaPR6O/5\nQQiCWMIstEDinVr5sXk6KwAMDLBl6QiFYFC7LtJoC1i1MZNdO/l55qj4WYqoCTZum8LJ9S624vme\nDwQUBfKy89vsBsBvUQDMeocgCIJ4KKAaT2LJQXUJKqSaspqqAWSy4x88qD4XuegLBlmkU7wsHfTU\nRQaDwLp1gMMBNDai/Y03Uhsz2bVL1d+E0ETNl5PbppRWV8Pr9+PLp08vShRR7+8oPTYvy85vk3/O\nVAuAfix0Q3/3iExDzxSxGCSNeJpMptMAKhRW/Z/RaPSk3oO8+OKLWLt2LQDA7XajuroaPp8PQPzB\np9f0Wu/ry5cv59R8cuZ1dzfaY9ETXyzCpmv/qSn4AKC2Fu0vvAC0t6d2/JMn0T4wwF6XlQEjI2gH\nAL8fvth27e3twHe+A5/LBRw+LDT18X3pS0BrK9rn54ELF+B77rnsX681a4TrhclJ4LnnjI83NcVe\nx8Rle3s78MMfwjcxAdhsaH/kEWB6Gr7GRiAYjJ9vLjwvS/g1F2wDjz2GtS+8AI71O9/B+OQkDp44\nAYfbveDz+4fnnsNEXx8sDgeimzbhnStXYHE48B9PnVKcj575Tl2fAkqZ3+YL8y+gPdWfz1x5/R3A\n5/IBh4H2yzkwH3pNrx/S15fp7xG9TvP15cuXEYoFFz799FPoIW07FZPJdBZU40kQi48Bz0cJmbLs\nKC2NN99ZsQIYHIzPpakpeTqvz2es5jITpHq9xChdO/G5eDzA8DD7fqHO6yEgV305X/f5hNRZR3k5\nwkNDANKrMyW/TYIgCGIpsJB2KlToSRCLDe/AalREpWrZEQgAK1cywXngALBtG1teXQ289550Llrp\nvIuRlprq9RLDr11TU/xa/O53bF1tLbsW/HtKt9WNVldah9uN/S0tOSU6AWnqbNnjjwvfp1NnSp1V\nCYIgiOVCOl1tvwbgHwF4ANwHcCkajT6rsB1FPImM0i5KNSMWGHEznbExZjHC8fsBm005cqoVXcxU\n1DVF0nqmeOOg+/fjy6qqgI8+iq9fpPNaqogjh7nclVYOj8TOv/AC9u7enZNRWWJpQn/3iExDzxSR\nabLd1fY4gOOp7k8QxBJg0ybg5k0gGgWeegqYnY2LzQpR+XdNDXDkiLq40uo0yyOHegkEgJMngXAY\n2LEDOHpUeVx511mtlF+dh5YM0d0tFZ01NcCZM/Gxl4hoyiUSmu4EkLYFx0JYl/BIbHt7u/D9YqN1\n3g+lpQtBEASxKKRd46l5AIp4EsTSxe2WiiqbDYhEWArppk3Ab34DWK0stdbrTdxfrNLKy4He3tRF\nX7Joq1r9pLx2dHBQu5ZUw14moRx1IhbNdbuBz38eeO21hYtuZkCQsXGk53yuqWlRxUhCDacPcQuO\nBqRkwZHrUdRkAvBcIIDekycxFw7Ds2MHDhw9qvueaJ13rl8XgiAIYmmQ1YgnQRDLlE2bmJ+mzQaY\nRWXgBQXAgwfs+7VrgTt3gHv32Otdu4AbN9TtRuRs3qy8vRrydFZ5tFVWQye8ib92DfsAOHiNZWMj\n20Ct5lJ+HAX/zcRyVI1objbJlCeizHM0NDio6S+pRTqRNIfbDbvbjVN+P9vfFoQD7rQsOPRYlywm\nSp6e/Breu3oVM7HGXf1tbYbuidZ5q61fdv6hBEEQxKKTqeZCBLFg8JbORJYYGGDCa3iY+VxWVgKr\nV7PIJhBPq+UKjO+TrGmQ0jGMeIaK01lLSoB33wXq61ldqTitNYbg8zg8jPMOB3DsGNtGpaGQ8EzJ\nj6PwRj1hiFSbM2WCTHkiytR0JkSamtdmSvu7AizSeRopR3X3BYNY39CwIN6een5HyRsoKV1zfg24\n6ASYR6mRe6J13mrrl51/6BKH/u4RmYaeKWIxoIgnQTwsJEshFa/jAtPpZALP65Xml65ZExdxmzcz\nEcnDf/JjBIPAI48AMzNsX7MZmJ833uWVC6OSEvb1+OMsInvxoqLgE97EA9gTDgOHDsXFoVKk6Ic/\nBP7rfwWuXYsf59IlxbGNlqNK0EjjNUwQLNJ5GMqCTG8qrqwGd18wmHZjnHTFq2T/I4dTTyOOsRg1\nl8mivvIIp9I159egrKYGzpUrYbbZ4GtuNhw9TnbeauudVnbsWk8tDu/JvQgxQRAEsQSJRqNZ/WKH\nIIglzssvR6N790ajzz4bjY6OLvz+mWDv3miUtQmKRhsapOsqKuLrDhyIRquqotFPP42vf/ZZtq62\nVjr/0VE21ugoO8fi4sRjfPppNFpZGY3W1bHv+fZKbNzIxvB4pMfnx3nhhWjUYokfo6pKcZjp0dHo\n6YqK6LTSnJWOI742VVXZu0fJ7kFWjheN/zZegMOJmR4djZ5uaIhOp3gt090/F/j13r3RHwHRHwHR\n07L7/eazz0Z/BER/WVureo4LfQ06Xn45+uu9e6NvPvtsdODup9GG0w3R0emle/0JgiCIhSOm+ZLq\nQmouRBB6SOgoYzByku7+mSCZpUlpKcDT+errgRMnpPvqsTsRn2NJCeuGqxWZkUcA166Np7pWVQG3\nbqkfw2IBenpYRFYpksjnnP9ToNchjfqJmyZVVQFbt8avzZYt6k2Q9EQsk22jZSuTaeoAtIKl4hpN\nU00zOqsW7XuYuqi+VVeHvtZWeGprE1JZExoo5QDUaIggCIJIFT3NhajGk1hyLEpdQmJHmYXd3wiB\nABNodXVMfHFU6hsBMEsSgHWrLS5O3F9cx6g2fk9P/PstW/TN6+RJJiRbW4GXXmLpswC7XhcuJO4j\nPkZBQfx73hyntTVeO8rn3OtgDXhawVJPgYTjtH/nO/Fr09ubOFay48hJtk2ye5ANgki9NlLPuSZB\nrcYz3drPdJDXVWYL/jtKrX7yXCCAU34/ZiYmMjrPdM9PnN5syc9fkGtF6IPq8YhMQ88UsRhQjSdB\n6EHLhzLb+2uhZjUi7sra1MTsRBobEyNYR4/GooP5wK9/HY8GirvP8mNcvRqPjm7YADzxBBvP6wX6\n+tjyzk7gsceY0ObHOnmS1YMCwIsvsqhqOByfwzvvAG+/DXz5y8Du3axT7vAw8w7l5yI+xtgY2+7W\nreTCXqkBz8WLwO7dOLd7N0IHD+L61BSePHWKiYOkY+n4ACHZNmkViKaAG6l3uk3zwxK1Gs/F7C6r\n1Dk2E8ijuBy1+kmteYjX/3zDBpQ/8QTyy8sx3tsriRTLj5vu+YnrTE/5/Vm5VgRBEMTDC6XaEsRy\nQJyCWlERb/gjjqzJ033d7sRUSvE2HL6t2GZETkMD8NvfxkWh1RoXjGVlTNDydQBw4ABLq5WP6fEw\nISv36eSpu1u3xsfJywN+/3smRg8eZJG5xx9nIlosqkNgkc5LTwBDn8SbEnm9yqmFydKKldbJU1L5\nssWwV8kketKrk6CWSprNFFOtNN5kqa/pYDRFVWsefL3V5cJsLCrq8HgQHh6WHEN+3JmJiYydX7au\nFUEQBLE8oVRbglguqKW3csTRqXffVU7nFG+Tn89EnzyVkm/DO9vyaNfJk3GBaLFIj81tR7ze+DIu\nOgFgZEQqOgHWPVZsXQKwjrfDw2w+4pRaiwVob2fnIj7GF78Yf93bCwwNAW1tiWmhPOo39EncJmb3\nbnaaPPrmcmHP6Ci7tsnsUZTWyVNSNexVFirdMxm65pCmTQyP9skFi9ryTKCVxptfXg6zw4GRK1cQ\nXLcObxw4IJx/OvdFLYrLx3xt9Wqc2L1bGFuvxckju3YJ43qqqxOOwY/r8Hgw0d+P+UgEXr8/I0JR\nj/1MLjzLBEEQxNKBhCex5Hgo6xK06u3EtYNer7JgEG/T2yv1q8zPB1auZFHLFSuADz4A1q1jPp6N\njUw8cubm4t8XF8dtR3p79Z1Lfj5Lq+Uit6aGRUXn59lru52dAxe/c3PA/v1MdOfns2W1tcBrr8XH\nFAvm06dZRFX+Rnh6Ov691wtwAVBeDtfEBBxKolUPBlNSF7PGMZfmkA200njHe3sxHw4jGokgEgqh\nv61NOP+k1yQAwAfWrElBX8lFGv8dxcd80NeHwc5OYWwuvruamhSFG1+//+hRYVzx91wI8uMWb9yI\nwc5O9Le1wWKzZUTU6/mAYLk+R7nIQ/l3j8gq9EwRiwEJT4JYCmiJGz3RKfE2fDy7HZicBP7lX1h6\nbijE6kD/6q+YX2dnJxO78nR5sxlwudjy2lomOsXRSCXsdpYGvHIlS4k9c4bNpayMiU++jcMBdHXF\no6ZmM4tmtrYykVtRARw7Jj3XYJCl6c7OsnNQEpGxiBEAdl5cANTWwp7s2mphsGHQYtY4pjqHpRLZ\n0orS8fPm2AoLsfOVVyTrFK9JNxIbVIlQE2l8TFtxseLYWsJNPK7SMRxuN+xuN0LXrwNgfp/yuWfz\n3uXCs0wQBEEsHajGkyAyRZr2E0nHtNlYF9fmZmlt4cmTrEHPjh3x2kaleciXfe97wC9+wYSaOILJ\nsdlYNHN4mAm6SESaFnvlCvCFL0iXlZTEmw6p0dDAmgpFItLlK1YATz7Jjieu7RRjscTnqmRJw61K\nACYyz55VtjKRr1erZczG/URu2GgYncNSsNlIVt/J11lsNpjtdtz97W8xE3tW8yoq8Gc3bgCA+jVJ\n0ZaGX+edr7yCrkOHEsbORB2l+N546+vxjMgK6VwggJ6WFkRiP6da986o1U0uPMsEQRBEbqCnxpOE\nJ0Fkimx4dSYbU94IaN06FqUUd53l+8jHOX8+3mE2GZWVbFwuBk0m4PJlYNs21txH3JVWiVWrWAQ1\nEmFRzTNngPJyaQ0op6EBmJhg4pA3JyoqYo2GLBb2/eiougdmKMQsWaJRJtB37WLnyJsJFRdL12u9\nUc4F79UcIZcbzXCxdO/qVUFMygWWWhMejqaY5g2qDsO4LU0SMiHckt0b8XnDYkHl00/jwNGjqsda\nCh8wEARBELkJNRciliU5W5eQDa9OPdYeABN1lZVMKHHR6fEA/f0s0sd9K/k4WoKR87nPMcHHx/v8\n54H//J+ZyBOnrirhcrHj8OjmypVM7D31FHv9mc+wSKd4XsEgE7o7drCU2vPnmVCdm2PnVVWlntLq\ndgPHj7OIqtvNRKe4mZB8PSA0bWrfuTOxJnQhvVdzHD2NZhYLnq7KRadS2qfcnzIyNgaT3a66fQK8\nQZXo1M8FAvjpypVoLi3Fm6ImRYD+31GZaLSU7N5IUovn5iQ1rUrkYursUknzzjY5+3ePWLLQM0Us\nBiQ8CSJTGKz1S3vMYBCorwf8fhZJ5AKxpoYt37gxXqPpcknH2bFD/ZguV3wcHnHMz2ciko+3eTNQ\nWJh87hMT0qZEnBMn2FwuXAA+/pgJzT/9CVi/nonRkRFW4zkwwIRvzEICRUVsH73Xlottp5PtpwRv\n2vTee4k1odm4n6mi1dU4y8d0AFnrRJsuXCyVVlerdnQVi7Px3l7c7exEdGYGBVVVKYvpUHc3pgYG\nMDM6itttbfjl9u2CQJqJWaAsBGLxKhdp+4JBOMrLhW3tJSVJBWUufsBADYwIgiCWD5RqSxALQZbq\nBYVxe3pYWuuVK6xxT2kpizS2tbGI3ZYtrAGQ08kiiP/2b6xhj/xnc/VqlhobDrOmPkC826wSNhtb\nz2svS0uBe/fY99XVzHtzbIy9NplYumttLYvO8vnIPTs54ppOjpGU195eFum8cIE1PlK6B7zuUy19\nN1dYjLTfJZJqLE9XPRcIoPfkScyFw/Ds2JGQWpqptGE+DsCa+licTgzGnuNkaao8NXispweFXi9s\nRUXYFwyiq6nJUH2lEkqpsnye9pISfOPSJRRqNQHLMXI5zZsgCIKIQzWeBJErZPpNPBdR4npOJXh9\n43e/Gz++xxOPIsrhtZVGsNuBmRkm2jZuZOI3P599TUzEhaeY8nImfAGWUiuvNzWZWPMicQ1raSmL\ntBYVGRPvSteK3wO1xkK5xmII5KUiymVI6hqRKAL11lUqNdoRL9vz6qt453vfA0wm+I4cwduNjboE\nknx+fI6Tg4OK9ZVGGv6IRVrJli0Y7+2FxWaDtaAAvubmJSnatO6X0YZIBEEQRHYg4UksS9rb2+Hz\n+RZ7GsaimJl+Ey9vLKQUHeRUVbH/+/qYaKupie9rVGh+5jMsPVa8j9vN0m7v31cWmXK2bWPCt7+f\nRUDPnQP+5m+AN99kUVqLBfjwQ9Yo6cUX2TKbTdrx1oh4l18rhXvQ/txz8E1MZD4inSkWQyAvsihP\nVVCII5Gl1dX4ytmzhsRIsmZFyZrvcIFkyc/HO1euoKayUlGwRiMR3G5rg62oCJGxMUGoqglXpWOq\nXRuxSDvl9z8UjYIeloZIOfN3j1g20DNFZBo9wtO6UJMhiGUHrw8E2Bv0ZG94gkFjb+LVRC1ffu0a\ne22N/Qi7XEwoOJ1MqPGGPvn5LNV00yb2emyMRSi9XuDWLePRzU8+SdwnFFKuOzSZElN5AVbTWVjI\nhOf9+8AzzwA3brDvt2wBtm5lDYzKy+Pn1NwMNDay/fU0+xFfP17rWV0NrF0LHDmSeA/6+liklu+b\n7TevRlOvuQfrQqJxzGxHmnhtH8BsTvQKin3BINpj3YvVonzJ5i4+LgDYioo0vT7F6b2IRnEvFELf\nlSv45fbtcK1ZIxGx3vp6rG9okFisdDU1ITI2hvyKChw4dkwiVkdjP+viY6pdG17vmWyuelhKUcRc\nbIhEEARBqBCNRrP6xQ5BEMuQZ5+NRoFotLY2Gh0dTVz/8svR6N69bDul9cnYu5eNDUSjDQ3xsUpK\n4svlX3Z7/PuKimi0sjIa/fRTNp7JFF9nNkejFov6OJn4qqqKRgsLE5d7PNHoU09Fow6HdHlDQ+J5\nl5dL14+Oxv/Xus7icerrlfczci8zjfz+LkF+vXdv9EdA9EdA9HQWzuHNZ5+N/giI/rK2Njpt8J50\nvPxy9F8qKqJHSkqiJ/fvT9g/2dz5cX9kNidsMz06Gj3d0JB0PP71E5cr+k/FxZJlaueiNB/xsp9V\nVUn203Nt1Oaqh2zf20ySznkSBEEQmSOm+ZLqQop4EkSqaEUx5RFRt1t/lKunh/1vsbBmP/39yg14\nOG43iwTyZkI8lXTTJlY/KY48JmsWlAm4X+eGDcD4OFu2cyezU/ntbxPPw2pl5+nzxSO5tbVs/vx8\n+DUWR72Uajd5tFJshaLHs9NoRDpdloFVS7YjTfuCwZRrMXnHWQCChYg4Ypps7vuCQfx8wwaEY3XQ\nJosF06OjCIdCQkRRfswx/vPKMZsxK+psW1ZTA9eaNaoRWKX5iJd9+fRpSfOhPa++KkRL1a6NOPpp\nlGSR3VQjofJ9M9FMCUjvPAmCIIgFRkuZpvsFingSGebs2bOLOwG9kUweReNRPnG0ct06NkZVFVsn\nH+upp5SjmTU1iVFEkyka3bEjGt2/n0X3xOPYbOlFLs1m9XVWq/Jyj4fN4dNPpVHYhgb1iK04ullV\nxfZXi3ByxFFDebRydJRdY6Vrq0DCM5VOtFoPWue2BMiFSFPHyy9Looo8OidELYHo0erqhDlqzV3Y\n32JRjPyJI4L/XFER/dXOncLr/89uj/5vJpPw+qeVlZrXSGk+8mXZikJ2vPxy9Nd790bffPZZ4Vh6\nIrtKc1AaS23fpRRVzQUW/e8eseygZ4rINNAR8SQfT4IwCo9ktrYmej+K4T6Q3E+TR+W4nUhHB6st\n5N6Y4rG4JydnZoY1CTpzJl7XyYlGgQ8+YNHBq1eZr+fq1cxKhNd6pkqy6KhafejwMIt2fvvbrDMt\nEI/sKfmHFhay2k6+3Ucfsagj//L7lf0redSwupptI24Y1NTEbF2Urq0WPGqq5x6nCo/eLnLtnNz3\n0Qhi/8jFItTdjcj9+wCkHpX7gkF4/X546+sTmgudCwRwyu9P6rXJ/SxXPf00gMTI37gowjk9MICJ\n3l5hDmU1NZIMg+INGzTPw+F2w+5245TfL9wL+fXNVoRZySdT7d5qzSGZ56Z8X6rNJAiCeAjRUqbp\nfoEinsRyQ289II+aiaOGxcUsMrl/P3vNay1raqLRF16IR9k+/ZRFL/Py4tvt3cu2KS6W7iv+2rEj\nvQhnJr7E8yovj0b9/vh1euGFaLSsLDFa6vcrRwArKuLb1NdL1yWLGoqjoSUlxiKL6ey7hFCLFi4l\neGTySElJdIzXM2sgjrQ1ezyK0TmOOPInjuZCKrKxAAAgAElEQVSJI5z82Hw7cbRVHBXVinpqRQCz\nFWE2UkurN1KsNJZ831yImBMEQRCZAzoinmSnQhBG0WszIbfxEOP3s26z3E+zvp6NK/f6fOQRVuPJ\nsdniUUwlKxTuqcntVZLZrKSLy8W65g4Nsbns3s3sUTo6pNFJsfWJ0jXZto1FLXt7E+tfS0vjkWK/\nHzh+XN/cuH1NSQlw6RLr4quXdPZdQohtKOwlJXj+5s2Uo5eZ6IJqdIxzgQBGr1/HWE8P/O++i0KN\n+yTuEhseHobV5RLqMPMrKvCtGzeSHlN8vfIrKjA1MAB7SQmqnnkGk3fuwOp0Ir+8HPd7ejD0/vuI\nzsxI9tey+hB7cCbzAc0Ecj9SrXrRhRqLIAiCWLrosVOhVFtiydHe3r64E9CbJslTQS0W6fKaGmbp\n8cQT7DVvgCNuOJOfz0TavXvSfbnoNJmUU13n59k6LjazJTpNJpY2+/77TFgODbH02lAIMIt+rRQU\nMOHIhai8CQvA7FV6e5VTW/Pz2f+FhcDf/33ivoEAu07yVFye5nzzpi7hKDxTgQCznKmoWNaiE4in\nPtpLSvCNS5fSEgrJUiy14Om+N48dSxgjWSpwqLsbdzs7MTUwgK5Dh3TPMTw8jIKqKjyya5ewbmpg\nQHPe/HpZXS64N26Et74ez9+8ick7d4R5/+Ff/xWDnZ34/cwMnJWVMDscAKSWLGrw9F7eSCjVFGgl\n5NdRfL9+vmEDpvmHOykgHqvr0KFFT79eriz63z1i2UHPFLEYkPAkiGzBxc+HH7JIJGfNGiZa+Xpe\nmyh+zYWYWh2lWhbB7Kz6OjW4f6URolE2v8ceA155Jd6xt6ODiWWnkwnuBw9Y7WkgEBd1YqxW4B/+\nQb3LKxfO4+PA976XOA+1etvYhwPnjL6B7+5mdaEDA4AOMbOU4ULn+Zs3NaOFWqRTr8eFC/e5FI/R\ne/KkIGraX3oprWOKt//mRx9h/9GjyK+oUBxDSfDuCwbh8HgwOzGBOx0dGHjnHbzd2Agz94kFEI19\nMFT82GNouHYN5bW1AIDI2JimOBbXVeoR8mqiXGm5fDx+LficeeffVKBaTYIgCEIvJDyJJYfP51vs\nKeiDR0a3bQP27WPLeHRTvJ5HB8Sv+RvDmhqWbqqF1crScI1gsQD/7t8BzzxjbD8xMzMsxTYQYFYp\nAItObt0aF40lJSxy2dKSKDxnZ5nAk4tw8fgck0L2hoYtid5InPBMLQObE72k2hxITZTxaJ3R8bhw\nKa2uhtfvl4wxFw7HN5R9oKJ1TPk85ds73G5868YNxTHUGu5Y8/KEbcJDQ+hrbYXN5YJJ9LPnrKzE\nf+rqgsPtxnis6ZCtqAiwWJJ+CMLn+7PVqzES+zCotLpaVcypPdtKy+XicF8wKIhureMoXUsx6dx7\nQj9L5u8esWSgZ4pYDEh4EsRCoCasxIjTRouLgfJyoKwM2L6drd+4Mb5tYaF03y99KS6a9GC1srTX\nO3dYdM8oXAQ6nUzw/tM/xUXi+DiL2ALxOsneXiDWfVQ4PpDo0Sm/NrwLLk9PlqNxXQ1HY/Tcp4cc\nI11QAe3OuVy4fOXsWTxz/LhkDE/s/pdWV8NeXCwZR0s4y+fZ1dSEycFBvN3YKMzDSPfWc4EAwvIP\nTiwWRCYmUPH5zwNgfp0N164J4/FIcmRsDLfb2pJ+CMLnO9nXh0hsfoVr10rm9otNm3DE7cY/l5eD\nfwwjf7aV5q4mutU6/2pdSzG50N2YIAiCWBpQcyFiydHe3r60P6kLBFhKp7yRzsqVcRFYVgaMjLDv\n6+uZTUplJYscfvIJS2HljYkA1njnvfeA/n59c9i5k0VSOzqAyUlj83e7gS9+EXjjDeDJJ5mwFL8h\nt1qZcB4bAz7/eeDECaCxkaXDihsiVVXFrVPU0NvISYVwKITzgYBms5OMPFNq93UByUSTHy2MNsER\nN+XRarAjR3z/Tvn9muOIz38+lkLK56lnf6Xj8vMTn4ccl9eLyIMHsNjtcK1bh99HIti5aRN6T57E\nzOgoSqurke/x4LZoPvLrxq+rragIkbExxe2OuN2CfYyzshIVTz2FPYcPo6upKasNfhay8RGhzJL/\nu0fkHPRMEZlGT3Mha7KVBEGkiZIY4XWJAItmrlnD1k9Px/fjzT6sVuBv/xb47nfj+4g72wIsZfbU\nKUCclqhFV1d8fD3w7rglJezr+PF4nac8xXd2Ni6aOzqAF19k5x4IsPNqa2ORTj1RRR4JTREejVkQ\nxPeVe4EuMDwyBQDnA4G0z11JyO4LBnWJeY44AmfJz8frPp9uYSy+f3qi1+Lz9/r9WN/QIMxTvr/8\n3MTibV8wmHDtrCoZBVaXCzP372MmFqWc7O/HEIBP3ntP2KZw7Vr4jhwRrpv8WOLruvOVVwTh2NXU\nhN6TJzEXDqN8xw6YYo3KLE4n6t95R4iois/7V088IdSWZgqj95wgCIIglKCIJ0Gkgt7oltg+pKGB\nbXfsGBNgSnYoAEujjUYBbnBvtwNFRdIIZyawWlkHWpntAwAgL48dl0cyy8pYLWdzM7B2rTRt9sAB\ndo5K4wCsM+zatSy1d9Uqlnb77rvGO8bmQEQxKdyGRa+ozgKZjkylE63kGI1aJhvnl9u3w1lZCXtR\nkaJwVTp/LjDvf/IJ5sNhWBwOFK5bh9Hr14WGRo7yckRnZ4XXJquV+Y2Zzfj6xYso27YN4VAIv9i8\nGdOxrAT3Zz+LqTt3EOYfsqhhsaDy6adRUFmJ8d5eWJ1ODH/wAaZjNkkurxeutWsFOxa+zb5gUHK9\nAGB1XR3uXb2Kr164IGkIxc9bbBGT6v0iCIIgiFTQE/Ek4UkQRuHRLC6+xD6VcrgY8XhY1HB4WJ/F\nicnExCf/P1uYzcyCRUxBAUuhjUSknpvl5Uz4bdgQF8EWC/PztFji1i9btjB7laGh+DqxUAWSXzM1\n5CI+195Up5kWrIaR9NlwKIRfxcSZTUWcGUGPkFWbn9Jy8XglW7ZIRJaeeWoJYaUU2Z84nZibmlId\nUyzWABYRHf7wQ+HnwpKXhw1/8RcIdXfDbLPBYrfDbLPB19yMtxsb0dfaitLqajy4dStBhJosFkRj\nP++O8nKEh4bYcptN6IDrKCsT9hNv4ygvR2RsDPOxTIbSbdvwlY4OxevEz3t6dFSSXkzRSYIgCGKh\nIB9PYlmyKN5TgQCrwSwtlYpOi0XqUynfh3tCPvoocPductHpcsW/56JzxYr4cZKxebOx8+FjykWn\n2Ry3QCkokK4bGmLndPEiqze129n53L/PRGdFBas17exkArW8nEVt+bUqLmb/p9oxNosdZzPyTOn1\ndzWIWmMXpaY9DrcbBWvW4G5np2YnX62mP4C+jqVGuquKxxvv7ZWs1zMftXRbvu/bjY0J6aBzski8\nragIAJjHpsWC2ViKO++qW7Jli+TnwrNjB+5dv46Bjg70t7XBVlCAZ06cENJjC9etg62gQNJ1+Q9O\nJ1bX1WHl008L8y17/HHh+0dizYhKq6vhqalJ2MbqciE8NCSITgAoXLdOiOAq3ff9LS0oiHmH3v/D\nH3C6oUFYr+fayklln6XCUjw38lwkMg09U8RiQMKTIPTQ3c0a/4yOSkXn3Fzcp1JpH+4JKar3SsBu\nZ+mqzz0nXR6NxlNxuWC125VF6I0bxs7HYmHpu0C8ztPlkgrRU6ek+xQVMcHn9QK3bycK0127WO2n\n282+HA62vLCQRX6vXEmvY+xD2nFWTWypCT69nXz1WM3o6Viqdryxnh4ATOjtfOWVhPHk++mZj5oQ\n1uq6CgDmvDysrqvDN69exfqGBiY85+aA2VlY8vKErrrcAoVzt7MTY598IpkrFy7Htm7F1MgI7nZ2\nIjw8DJPdjrwVK+D7yU9QsGoVZqemkFdRgQPHjuHA0aMoXLcOD27dwr0rV5C3YgXcmzYhIttmfUMD\nVuzaJZmDvaQEvuZmnAsE8HFzs6q36XhvL+bDYURCIfS3taFl82aEQyHdtkJiUtlnqbCcz40gCCKX\nIeFJLDkWpQubuLHI1q2s02wsmpEQgeO2KNeuxZclS5edmWEi7s4d6fKaGvYlJhLRl6qrhLgJ0Nxc\nvIGRy8UaBonSDYVtOEVFTDz6/ez/UChudQIwr1K53QmvQRsfZ+fn9TLBuHkzixwfOBBPT+U2Msmi\nD1mKKAK57WemJrbUBJ9eX0XDVjMa8yvZsgUtmzejubQUbxw4gIJVqwAwK5GuQ4c0z0vPfMTCVRy1\nssSebaV9v/7BByioqsKf/f73ePbNN1Ho9WJ/SwssdjsAlg5b+vjjgs2KUhOhsscfl8yVC5cHfX2Y\nFXV0js7MYHpwEJaf/xyh7m4MdnZiemAAXYcOCdHoqbt3MRMKYXpwELfffluyDbd8MQFwxLIdzHY7\nih97DG83NmL0+nUhRZcdUPp7RT73qYEBnA8EUrrXmXo+cpGleG65/DuKWJrQM0UsBlTjSRB6CIWA\nl15ib/Sam5n4UavpE9cickwmZi3yySdArKmIhKoqZoUijjiaTCwyqdSAKBWS1Ys6HKwrrrzhkdnM\nROLFiyyiye1e/H4mNF98kY175EiiIFRqtiO/NuXl7HhcBOdi7WaOotcqJp39jdSXyu1G8isqMDUw\noLvekM/Hkp+vWvspns/M2BgGOzsBAN76eljs9qT7yml7/nl8+qtfwZKfL1iU8C645wMB/OnUKUFU\neuvrkb9iBULd3Rjv6cHM+LiwjxJevx/DFy/iQV8fYLFg5e7d+NKJE0JNKMDSaedmZhCdmYGtuBjf\nvHIFZw8elHTlvd3WJqk/zVuxQmhK5P7sZ1F//rzkHMOhENpfegl333kH04ODcHg8KN64Edb8fNhc\nLviOHNH9rKT7fOUyy/ncCIIgFgtqLkQsS3Lee4oLLpcrMYro97N1cusTLvz0UlwMrF4N/O536c/X\nbGbNhPr6WK3m+HjiNuvWAX/8Y/y11cpE5NGj6hFIJWHOrw3AoqAPHsS3V+sGuwDdbHP+mVokjHS1\n5Y2DAFa7+MyJEyn5SSY7pnhdXkUFpmXCVu98zwUC6GlpkYhHuUB+48AB9Le1wVJQAEdxMaYGBxHV\n8SFQ6bZtKPrBDzD5d38nCGM+nz2HD6P9xRcxcOFCQiOi9Q0NmJmYkDRzCq5dK5nj6ro6mO12IBqF\nr7k5QZT3njyJ8L17sOTnwxzr3jscs06iLrdLG/odRWQaeqaITEM+ngRhlEyIHLlnJaekhKWsyhv6\nFBayqKER4enzAR98YHxuQLyTLY+Azs+zWtSyMmXRWVLCmgmJhefsLDu3DRuAJ55QvlZKHpzBYDxy\nzJsYVVczuxWlqCmQE/6YDytGUhL3BYOs5lAkivQKHXEk05wkbVY8nwPHjqHr0CFY8vNxyu9nkcjY\nBz1WlwufvvEGjhQXw2y34+sXL+LSD34giZZyQWcrLkbl008L0UA+F4vNBntpKWbu3cOk+AOSGLai\nIkTGxmCy2WArKGAdb/PyMDkwgIvPP49NsVReACirqREE+DMnTkhEOgDAYkF4dBRf+PGPcfLpp2F2\nOPB2YyPMIp9dW3Exwvfvw15UhPzycpzy+yWR3d6TJzEVy0iYjzVUMpnNqteSIAiCIBYaingShBgt\nyw49wpRvY7MBV6+y1NqSEuDSJVbfqGTtoGRrokZNDXDmDGtGJIqoaKJlzbJiBZurON3W7QYuXwa+\n/e14pJIjjlimkiKr134kB/wxH1YynY6rtq0kkrliBR558smEiB4AnD14EH9qbUXZ44/jwNGjCVFO\nNQqqqjA9MiLYqpgdDsyHwzBZrfj6Bx+gbNs2YdufrlwpCDie2spFJsAEoMlqRelnPwtHSQmmh4Zw\nN/ZzaLJaJVFRS34+rE4nympqhPny69qyeTOmBgYklivrGxowOTgonI/JaoXJbMbKvXsRmZwUIqgO\njwfhmKURj2Q2l5YKPqQAizq7N23C7bY2eKqrsV90fIIgCILINGSnQhBG0bLs4NG31lblTrYAcPIk\n26atjY2zbh3ztvz2t1kjoXT53e9YZ13elZajZbnCRSfvNiumpoZ13m1oAP7wB9Y8ye9nUU6vl4ls\nbu1iszEhnZfHXqdqb6K3WZBWN1u9zYkIw+jpamukQ6hWJ14ArDmP3a54zL7f/AbhoSH0t7Wh/cUX\nAQDjse65ptjzb5P/XAB40Ncn8fLkNiXR2Vlc/Ou/lmwb5n60iDcVWl1XB0dZGfJWrEDxpk2YGRnB\nQEcHbre14e4777CNzWaJ6LQVFqJ02zaER0bQ39YmOV+H241v3biB9Q0NEsuVPYcPS65FdHYW8zMz\ncLjdsMfOy1NbC091tWQfACiPNfsy2Wywud3I93iYt+jwMG7Ljp8NlqJFCUEQBLGwkPAklhwZ8Z5S\nEyvl5eyLv+mVb6fHS1KcMtvWBty6xSKTra0sssnhvp1ud/JIpJxIBHjsMfZ/XR378npZ859kIq6m\nhglKufCsq2Odeg8eZDWpxcXAiRNxaxQ+x48/ZgLwc59jacQjI6wpUrajkFoCVc+HARqQn1nqGEnH\nTdaJ15KfD4BFFGGx4KcrV6K5tBQ/W7UKJ3bvxlt1dYLnJgDBN7Mg1j05OjeHgqoqwS7FKvbFTcLA\nhQv42erV+PXu3Xht9WohTRUAzDYb9re0YPLOHYRHRjA9OIiJmN2Kp7YWc+Fw/GdXlLHwMYDI+Dju\nXbkiLLv1m9/gzQMHEA6F8ItNm/AvK1bgj8ePY25qCt76eqG+dF8wiLyKivg1KyhAeHQUe159Veis\nuz9muyKuSXVWVsJRXg6r04lIKITbbW2CpY3V5UJ4dFRTEKYjHsmiJLvQ7ygi09AzRSwGJDyJhxM1\nsdLbCwwNxb055dvp8ZLkNiMFBSzCyaMgJSWsO2xVFbBzZ7zx0NSUMeFpNrNx29rYMVatYqK4s5P9\nz43sucjlgvPMGSYoRbVnOH8eePNNdt5a4o0LQB5Rqq0FPvoo+6mvWhFNPR8GEFlDr31Lsm0dbjfK\ntm8HAETu38fttjZMDQxgZnQUk/39GOzsRF9rq2CBUlZTA3tREV73+TD4298K40zevYtjjz+O6dFR\nWJQi+3LMZoRHRjDZ14e7nZ2sC62IW7/5DX62apUkqln86KOCUNT6uXV/5jPC9/y82l98EZMDA4hG\nIojOzuJuZ6ckwutwu7H6S1+CKXausw8e4HZbG7oOHYLd7cYpvx9vNzYK6c9cLPaePInw0JBQu2p1\nueDeuBGOsjLMTkzoinqmIx6XokUJQRAEsbBQjSfxcKJWNyhf3thovL6Q1y52djKLFIClwX74IfO7\nlB/nD38wliJaUsIijnxOmzfHbU4A4K232PHffBP4/vcTayh7e4Hdu4ELF4Af/ICJ62vXgOFhfZ1l\nX30VOHRIuzbTKGr1s1p1t3prRYkFwUjNpxjecMdTWwuH243b4sZcgGA5wjvl8hpJNRxlZZidnITZ\nbsfc1JQkkmkpLMScqJGWvDZTC0teHsp27EDo+nVWV2mxKPrrmmw2qe8mgILVqzF5545wPJPVCmtB\nAeYjEZisVljsdhQ9+iiGYt1oAcBeUoLnb97EKb8/oWuvvMbV5nLBZLMJ9Z6Ttgo4IwMoqanFV88k\n/3BAfA/0fJAghixKCIIgHm7IToUg1FASK4EAcP060NMDvPsuE2Xi17GUPmFbuUjiy3p62La8FpPj\n9bLurVy8Pf00a85z7x6Liqq8eVWkspKJRbebpc6Ka0erqlh6rx7Eoq6qSj2CqSX+MoHaMai5kCp6\nRF6qQjDVfY1YsIgRCxcAaH/pJfS+8YaQMbC6rg7PvvmmsL28mY4SjrKyuG2JqGkW9xi1FBRgTqFj\nrRgtUWp1OmEpKEB4aEjzHIF4Y6PkB403AjPZbPBs3w5HaSnmIxH0t7UJwrCrqQk3jx1LuA7cambE\nVYsfThzDN3AIE/WHETyhz0uVxCNBEARhFGouRCxLMlKXoFQ32N3NopQDAyyiJ38tRilVly/r62P7\nyQ3mx8fj+xw6BKxZw7rI8je1zzyj3PhHic99Lj73WG2cgOjNuSbiNNVkabPZSmcVp9HGbDQSjqEn\nvTlNlmqti57UyHTSJ1PZVy3lUqt+UNzIyOF245njx7Eq5jFXVlODL772mmR7TyylvWjzZpjz82Er\nKYGJP0MxTOKGW7GfM09tLfzvvov1DQ2oePJJzfOp2L0bXr9f+VwLC1GydWuC6PwYTLACLOXVHEub\nNVmtmJdFQBWJRuGsrIS1oADRSARDXV1CqvH6hgaUbNmCU36/ougsq6nB12Ln9+6u07gHLy7VtuD/\nbU7ebfh1nw9vNzYK9jTUJCi3WKq/o4jchZ4pYjEgH09i+WLUk1Murhobpa/FY167xl57PCyddvXq\neM2mEtXVbFve6VY8Pl//2mvA+vXafp5mM0u1fewxJlwnJ6Xr//qvWS2nEvJrwj1HtdJU9W5nBO7J\nyQV6fT0TmPJjKPmBLkNSiS7qqatLp/YulX33BYOKUTMuYgHgfCCgGAnl12CspweFXi+s+fnw1tcr\nWqscOHoUv9q+HfmlpZjo6UGEC7BYtNBRVobCdesQDoUQnZlBWU0NXGvWwNfcjK6mJtw5fx6zU1OK\n6bBiBi5cQP6KFYp2RLPj47h39ariftHZWZjtdsyKfi8ki5yu9Plw97e/xXw4LEQ0g2vXSra529UF\na34+ImNjgr0LwDrorti1C9aCAsGP1O5242C/HzsrnPifjwXhFnmUyp8x8b0RW7Wo3SeCIAiCSAWK\neBJLDl8sCqIJtzVpbQVi1gtJkUfWlCJtPKo5PMxSUzduZNHNvj7lOk2Xi0Xztm1jTYQqKoBjx+Lj\n+/1McJ09y5bxxkQAi2TyRkDV1ay2E2DdMzs6WG3o/fusu60YU5IsB3mkVq+lid7tdHIuEMDrLS14\n6/59hAF2bs3NGT2GEczB4KJbQaQSXdTT2MdI8x+1fXmETc/14aJHvr0eEcuvAW/2c7utTdVaxeF2\no2DNGtzt7JTUbyIaRUFVFdybNmGoqwvRmRlYnU5YnU7MxbYLdXdjamAAkfv3EY1EYLLZhAilnOjs\nLCb7+1UbCc2Ju+Da7XCUl2MjWKSTd9a1FRezDVQsjyxOJ8xWK/7s44+Fe9XV1CQRl6b8fMzEGiEJ\ny2PjRcbHhSixWEwOd3bAM9CKq4cCkuurZmejZtVCLD66/+4RhE7omSIWAxKexPJFHDlMJsY4bjf7\n8vuZWOTL+Gu5ncpHH8U7vPL/a2rYtqWl7PXEBOs829ubmLbrdjPLkhUr4sf48Y/Z93Y7E6ozM6ye\n8+xZZpciPh/xG+Gysvjxi4rUu8DmSAfYUHc3Bu7fRx+A8zYbcOnSotZu5oIVRCrRRT0+m3q20dp3\nvLfX0PVRup77gkEUrlsHi8OBtxsbFQUsvwbci9Ph8aC/owPNpaV4I2ZFwjkXCMQ72ooEXWl1Nb75\n0UfCGJ7aWpTV1OBurDPuzyorMXL5srC9yWoVOsymhKgue35mBiaTSegkO3PvHqxOJ9ybNiG/ogJ5\n/OdUPsTkJG63teGd//AfsL+lBV1NTehpaZH8jDsKCyXXxl5SgpW7d7Pr5nJhWmaXovQ8cXsVW1ER\ndr7yirCt+MMJJasWgiAIgsgEJDyJJYfuugRe+1hYCPz93+vbRx4R1LJT4a+vXmX/nznDaiy5wCsu\nBl55Jf7a5WJpsuI33eJjHDrExGhBQXw9r+cMBuMNjsSi0+Vix+XHF1ujbN8uFaELUC+pB+FNcUkJ\n9nzyibRxUwYw6kd4fWqKzWcRozzpRCazjVFRrLS9OEKpJmD5NeBenCazGdODg5gZHUW/zA4k1N0d\nj3TOzcFst2N1XR2+cvas4IfpWrcOZocDoY8/Fvabm5oSLEdgMglzNYJadBQApgcHcZU3NAIwGw5j\nqKsLUwMDmB4cTD5G7Oc61N0dnyOYsCzZvBkurxfuzZuRX1GBb1y6hC+dOAFHeTlmJyYSro/S81QY\n+zmLjI2hS1S3Lq+vTfWDCiJ7UD0ekWnomSIWAxKexPJl3Tr2//h4YnMgNd5/n/1vtQL/5b9II4T/\nf3v3HhzVeeZ5/PdKfdENqYUkLMsYGceY4AQb2fgaKGvWJo4xDp148SSe3eCdyqomrtp1qiZ4s5PL\nTtXEtalJpWaSmirXpioLGSfEBmKIMSYuZK7GNg4bcBJDjA22bAxCCCSEuLRuZ/84fY5Ot7p1aZ1W\nq8X3U0WZVp8+5+3Tr4Ueve/zPMXF9mqkN5fzqafsPMtFi+xcz8ceswM8J5A6d86+9tq1Uk2N/drm\nZmnOnNSrqM4P9c6W24YGafVq+++RiF0VN1l3t902xdmm6g1yz57NbGttlrk/FB87prDPQac09hXM\nW7/3vZwHfZP5B/7kIGakwD5dED1SAOvcg2n19bp/3bqEQjzBioqE1yQHjAM9PTr9hz8knKts1iy1\n7d3r5iwOYVmD21a9uyIKhv+ncUyro2kqVYeSPufK+fPVuGaNJM/Kb0WFVFCgvu5undy1S93Hj7tB\n7Kb4DoiahQsl2avDF06ccD+T5Pm0u6lJHYcOSbILELGNFgAw0WingsllrAWBhpNJG46KCsn5QdRp\nL+IU1YlGB9t91NZKhw8nfs2xYoUdDCZf2xlPWdlg8BoMSvfcYz+/Zs3gGJPbvXiLGrW326+74w57\n+27y++vstAsPeSttLlwo3XSTvRrqx72d5MbTjxAj86Nlymg+k5eWLNGJ5mbJGLdCbe3nPqfPx4tn\nPTd3rmKeVUTJbj9SXFOjstmz1b5//8itS5IU19YqMneuTib/f+2nggLNuPtuheO5nwWhkFsUSJJ2\nrFypo88/r8LiYvUOs2I/bfZsldTVqevoUQ309bkBdn00qgc2bkw49tmrr3b7nia3pgEAYLxop4L8\nk6pNSaa820qfeip93qOXU8ynpER67bXEFULvCktrq11YyGnf4OR4OquWqba0Ol/z5mr29trvNxRK\nXcnVCTrXrUssatTWJr30Uupts5GIPffB4sIAACAASURBVA7JXjFdvtw+xrsFN/neetuaTIEWCpls\nWx3r9tx8Nt736qzIhaur1e1ZZRvJWFd1S+vqFK6pkQoKZPX1yerr08ldu7SnqUnhSERfefddlc2a\nlbBt1ert1cUTJ9S2d2/KoNMEg27Rn1Rm3HmnHdiOJi98rJyV1IEBte3dq/Y//EHn3ntPJ3bs0HNz\n5uh8S4sk6XxLiwZisZRBp/NeqxcuVO+FCzq1d68utbaqx3PsqddfV6yzM+Fz7otvJ5fktncZyZX0\n/wQAIPtY8cTkMopVyp07d469Gltj4+DK5IoV6dtztLTY22Zfe21o3mFnp10IyFtFNhCwCwlt22Zv\nd03VbiR5FVeS5s2zg1fJDg63b098nfc1XV32yqZkV389diz9sc5KZvKKqTT8vR3t/Zmidu7cqa5/\n/MeMVvHy0XArlqNp6+KsXHbHA7zk84ylNUy6Y3c3NenounUJuY6SvSW1uqFB51taFCgpUW9Xl045\n/384Cgrs6s/Ofx2FhTIFBWnbpwQjEVV+5jPqbmnRxZMn026TTSdQXq6+ri69K2muJBMKSZalUHm5\nZtx5p/p7euwVXA8TCLhbdwMlJaq+/XZ1vPPO0O3BhYUyxmjGnXeqqLpajWvW6NfXX+/28fS2QZHs\n1dDLZ8+6969oxgxdbmtTVUODlm3fPqrgP9OVbfgvo3/3gGEwp+A3VjyRf7JV/Ga01Vzr66WPPx4a\ndDY12dtq45UlXX199uqjN8cyWaoWJocP2yuR0ejQoDP5NfFKlKqsTF39NdUqcXIuZ1OTHcB627lk\ncn+msPH0u8w3w73X0eTHOiuXIU/lWO95xpJjm+5Yb4GdYHm5CouLFaqsVMlVV+nc0aPua5xKrdMX\nLNC1S5cqVFXlBptOFdnpN99sf72/P2XQaYJBFc2YoYJgUG179+ri8eNu0BmKRNxKsl4F4fCQr/Wd\nP5/w2OrpkdXbq9iZMwqWlmrJ+vUKJ1W2teLXKSwpUaC0VK27dtkBZHzFtaCkxF7l7O+X1denU3v3\nui1mquO54NMXLNCX9+9XcW2tJPvzKKmrc+9fqLJSX3rrLV2/YsWog04p9TxhFRQAkCkCT0wuoyh+\nk9Fv6JyA9qabEtujjJYT3J09a2+vdbbYSnaPzeEClVRBXSRir552dAwWJHI0NdlVciV7NbW+3g4Y\nDxxIXf11NEHjkSND27l4TZJqt7nS2Ng4qavK+m249zqWADzTIkKjud75eEBpAgF9cc8e1dxxh3o6\nOvRJc7O7yjp9wQJF33xT169YoYd37NCDW7YoGK9mHayo0EPNzfZzu3YpEP+6kyvqLSBk9fbqclub\nYt686Pi1p99yi6obGoaMO5hqu258d8/cFO+z4bvfVTgSUc0ddwx5TWFRkR49dEgD3qJF8XMNXLyY\nUMzIWxhoSbz1ycM7dmhafb0ePXzY/Ty649t2TSCgh3fudAs2jWVup/p8J0ProSsRK1PwG3MKuUDg\niSuDE9AOl+c4HG+l2N5e+09dnb1quWPH8MFauqAuXT7rkSN2QCrZqx779qUNGHc3NenFri69XFur\nWKqVzOTxpwtOJ0m121yazFVl/Zbuve5ualJvV5eKa2u1ZMOGEe9FuvOk69mZarUsXfBaGv8li9XX\npwM/+EHKticXT5xQqKJCoUhEr0SjennpUhVfc40kqffcOR34wQ/c8V08ccI+X3+/CouKFHT6YsYD\nSG/epyksVKiyUlZfn1p37VIoEhmywhnztEwZjd899JB7fwuKitxczekLFug/nTypA08/LSctxRlL\nMF58SIWFUiCggnBYJhRy72ny/Q9HIgpFIlo3b54uOO83fv9SGWn1MtXneyXtDAAA+Ct9MzJgkso4\nL8G7ktjQMLYtpWvX2q/v6LDboYylUq4T1HnH4VSolYYGg94gMRIZvF6K8XYeOaLW+OrPnlWr0udg\nOeNPlYMKcl3iOo8ccfMl9w03n1JIztUsnTXLzQ/c09Sk+9etc1fLvF9zgptkqbbxPj9vni47udGy\ne2b+e02NpMEWJ0We7aaLf/Yzd1zeXM/+y5fVf/myJNnVZSMRxeKrqZIdnDqBZvXChWpcs0a/W7Zs\naC5pGk6Op1fJNdeo49ChIeeYdt11Ckci9tbiePAXKC7WNffdp3t+8hO9sHChm7s50Nen9n37Eu6f\n974X19Tow9/+NiEvNlRZOSRAdF5z9o9/dHNEnfOl4r3G4mee0b5Vq9zKxGPJ50Xm+B4FvzGnkAsE\nnrhyeFcSZ80aWwDmBI+pivZIY2sD46x0SnaF2uQA1hskOudOEzAG4tsRqysqtPhHPxp5/JOFn21z\n4JvxrGYlB5WpzjXS+Xc3Nall82b1x2Kquvlm1Uejaly9WvueekqdR46o6rOfVcGtt+r0/v263Nbm\nVrt1FRaq4lOf0lV33qlQRYVeiUbV9sYbGujpGfY9hyIRdZ84oYJQSAM9PQpXV2tafb2MpPIbbtAr\n0ag633039QmSCxilcXrfPhV6tvta/f12UBvv0+td0b18+rQ+2rJFH//udxrwFDgKlJWpr7tbgbIy\nxTo6FOvsTLjv4ZqahKAzWFGhRw4cGBIMel8jjfx5e49P/oVEql8mAACQClVtceXIpK/naHmrwobD\ndkB1223S+vVDr+PjOGKLFmnP3r1aLCk8nmq0Ex0IXuFVdCcrb59NJ9gb7UpWcu9USQk9O3c3Nanj\n0CF1HT2q6JtvalpSvnKqKrZOJdXk6qrtBw+q6/333TzI5ODv+hUrdLGtLSG4SiUYiWggFlO/p9VI\n6cyZKquvd1cmvVVnTTA4WJyooEChigrJGPWcPatQZaWqbr45ff/PggIFy8rUG+8TXDpzpv7jn/7k\n3tdYZ6fWzZvn9tpMJVRZqd7ubncM4epq9Z4/b7eNKSxUqLxcPR0dCkUiuuqee/QffvWrlJ+b81lV\nNTSobNYsNa5ZM+znm/zZeufGQG+vTjQ30zMXAK5wVLXFlWe4fpRr10qzZ9uBYXJBn/FyVisCASkW\nG9ySmyqP1MdCPuHyct0vKTzaarTp7o+f/VNHgyq6WTHeiqPenL6xFpFJztVMzg90tvFeam3VvhT5\nyt4qtlJiEZ3kvqFdx44NBp2SwtOnu3+fvmCBCouLddbZVl+Q+p+5wqIiTf/MZxKCTkn64muvuf00\nvSuqocpK1d177+CBAwPq6ehQz9mzCpSUKHLTTTLBYPoemQMDbtAZLC/XF197LSFIC0cievTwYYWr\nq1O+vKCkRD0dHW7QGSgrU6y9fbBXaX+/ejo6VFJXp69+8IEe3LLFDfjT5dUu275dD2zaNGKwmPzZ\neudGsKzsiinKBQAYHwJP5J2dO3emf3K4ACoSsbfY7t3rT4DlDeKeecYOJr2VLisqsl/IZ6xBbLr7\nM9GB4CSrojvsnMojflYcHeu225GKM410Puf5YEWFrl26NKHtR9f778sEAurp7LQr2nq2n1bcdJO+\nvH+/yurrFaqqUlF1tbqOHh3sb+kJSh2FJSV69C9/Sdkm5fUnnxxcjY2vooYqK/XIgQO6f/16t2WJ\niVe2DpaXq3L+fLXt3asTzc0a6O1Vmk25dpEgSb1dXUOC791NTVo3b5560vzCIOQUQSotlQoK1BcP\nmANJ1XVrbr894TNINSfGUkhrd1OTXolG1dPd7X7N+1k2rl59xRTlyqWp8j0KkwdzCrlA4ImpJVUA\n5Q0QnTYofgRYmzcPBnFPPmkHkwsX2s8VFtptVrJtrEFsugBzogNBquhmhZ8VR/1uLzPS+ZznH/vw\nQ3e1znGprU1WX5+7+ljozYc8dUp7vvENlcycqZ4zZ3SiuVltb70labC/pVNwqHL+fJXU1enRQ4c0\nrb7eLoI0c2biQIxxA9LC0lIVzZihRw4c0LT6endV8voVK1R9662S7CDSWSFNDgK9XwtFIiqOF0IK\nVlRIhYXuSuSOlSt1dN06XWptdd9jcW2tTHy11gQCWvKb3yhcXa2+CxfsgDgefAfLyhSeMcN9v41r\n1iRef5xzIlXgeiW1HgIA+IccT0wNTo5iMCiVlkpr1gwGNd58wuXLpVDIn+qu06cPFiuKRqWNG+3t\nq3PmSPEqlJMufzFdcSRMCd4czYkOCLJZ3fQXNTWKtbersKREdY2NGujp0SfNzW6xHUl26yHLSsj3\nrI9G9cDGjdqxcqU+2rpVVbfcoiXr1yeMzZs/agIBfeX997X/+9/Xe88+627nrV++XA9s2pTwPjve\neUex9nZ7a6wxQ3qASlLRjBn60ltvuVVgty5b5vYg9eaOhmtqEl5f1dCgZdu361f19erz5IRWzp+v\n41u3uq8tLClR/Re/qAsff5wyd3Z3U5POxvNqvxR/bqyfU3J+J4EmACAVcjxx5XC2kDY324Gl94cj\n7yrfmjX+rbTddpv934YGKV6ZUpGIdPvtg9cb6wrDcDmqfmClcUrLZS9Sv7b5pspJ/PL+/SqdOVOP\nHjqkB7ds0f3r16ts9myZ+NbVwtLSwZzPeNBZvXChQuXlerGxUe/98peKnT6tE83N+vWcOQn5r95q\nslZfn15/8kl7BdPzC9OBeF6lUwCpddcuxdrbVRAKyerrSxl0StJVd9+tA08/rYttbXr1scd05sCB\nhGtJ9kpo1S23SBq6zbgwni9aWFKiqxYtUk9Xl4pqa7Vsxw73flw8edLNnX3h9tsT7lvnkSNq27tX\nlz15tePN3QUAIFMEnsg7KfMShstRzNY20vXr7fNu3z60HUqm15voIj+QRK7LcEZbsCiTLZ3ec+9Y\nuVIvNjbq2IYNQwKjafX1+puPP3ZX7F6JRtXT2ekWIwqWlrrnrJw/X/XRqB7atk3nW1rs1UxPxdue\n9nY9W1en3y5apJeXLtXiZ56xV0vjBnp7E4JRSTr7xz+6Y3MLIBUWaqCnJyEnM1hRoaIZM/SupOC0\nabrnJz9JCPRStXW56p57VFpXZ/cNNUb9nmO8AffFkyfdIPKdn/7UvR/OWANlZYqdPq3jW7dq3bx5\ninV2ZtTSJlkuf5mBQXyPgt+YU8gF+nhiavD2vkz+ASlbPSzTnXc816PaKyaZ0fZpvG/t2jFv803o\nQVldrZizRV3pA6NUPSiXbNig1598UjJGjatXu9d3giynb6Zj4NIlt13KvlWrFKqocAPIglBIjatX\n6xc1NVJ8VfLC8eO6cPy4+3pv65SqhgZdbm9X78WLqm5oUO/581Jbm3rPn9e+VasSAr3zH3yg2Jkz\ng9uCJX28dav794FYTCeam/X83Ln663ffdQNuSeqK9+wNlpfrTk/PXue+X+7o0InmZknSpdZW7Wlq\nSvmZZPI5AQDgB3I8gVxK7p/pfM3vHMyJ7tOJKSObOX7ec4cjEX3S3Dykt2RyTuKG+fN14fhxBadN\nU+3ixWl7VUqDOa8N3/2utj74oPp6etTjCW5DlZX66rFjal6xwr32su3bte+pp3Rs/fohFWZDkYiu\nvvdet4CPE8C9Eo26wXBxba0utbYqXF0tU1CggZ4eFYRC+lK84NGLixbpC1u26KX77ksItJM5PUwd\nv120yA2Wk59z3qvTB9Tbb7Nl82b1x2Kqvu22IfmtAAD4ZTQ5ngSeQC55Cx9lsxDRRF0HEy6bRX2k\n7BYs8p5bUsrreIv/XL9ihbpPnHAL9JTNnq2yWbNG/d5jnZ16ft48XW5ttStPO/82FRTomr/6K3dL\nqfeajlAkokcOHkwo3iPZ9//Yhg3q6eiQCQQUKClRYVGRps2erdP79rnHeYNF72tchYVupVonAPa+\nn9H8AiD5s0p+H6kCVgAA/EBxIUxJ485LyHYBn7EYaWutX2NlC++w8jnXxc/enamMJ8dvpPxQ77nT\nXSc5JzEUb3VSvXChSurqRvXedzc16dmrr9avr79elXPnKlxVZQd5AwP2n74+ffLqq0OuWdXQoGuX\nLlV9NKqvfvCBDjz99JD303nkiBtAWn196u3q0tttbeqOt1iR7DYn3m3D3tdI0jVLluirR4+qfvly\n1UejQ4JOyd4iO232bBWGw3r1sccU6+wccn+T76E3V7WwtFSxjo5h83QxeeXz9yhMTswp5AKBJ648\nk6mAz0iFiPwa60T36byCjLb4Trb42bvTT94KsOMJipOrqnofe4PQ4d5755EjutTaqp6ODp3ctUsF\nTj9fr4GBIX0ql23frge3bNEDGzcqHIkkBPnP3XijXl661D2X0/tTkspvuEHRN99UWX29QlVVKqqu\ndu/Ji42Nat2zJ+HS4UhE0+rr9cCmTe61vMe/vHSpJKl01iyd2rvXvZ8j/dLhvrVrVR+NKlxVpf4L\nF/RJc3NWfjkBAMBosNUWV56lS+1AbuHCkQOxXOdGjmWsyInkraATvZUxl707h+O9L04uZfL4xrtN\neLTv3dmmKtmrmJ/fuFH7Vq3S+Y8+GtwOW1CgqxcvVll9vY6tX6/+S5dkAgFd9bnP6YFNmxSORNzz\nePuHmkBA4enT9dC2bdr//e8nFDjy3oPi2lpZlqXLp04ljM0Eg/paW1vK8SfPrZ7ubvf6M+66S5J0\norl5xPxbenECALKNHE8glc7O0RfwyXVu5FjGipzgh/rUnPsSqqzUIwcODMmNlMYftI82cI11dmrn\n448PqXob6+zUr2+4QT1nzrjHhquq7MqzHsW1tXr08GFJGlJB1nvMzM9/XudbWhQoKVFxTY1aNm9O\n2FJrgkFZ8Z6gjof37NHVixalHHfy3JKk52680e0bWh+NqjAYHDHwnqy/nAAATB3keGJKGndegtPu\nZDQ/gOU6N3IsY0XGxjOnkreCwlZcU6NwTY2qb7tNoYqKlMckbxMe67bl0ea3hiMRFc+YobY339Sz\nV12l1ZGIXlqyRJI04447Eo41hYVDXn+ptVXPz5snSbp/3TotWb9exbW1Q475aOtWte7apVe3btVH\nW7cmBJ3B8nIVTZ8ev8jgv8vv/PSnacedPLfCkYhqFi6UZN+zxtWrR5V/Sy/O/Ec+HvzGnEIuZBx4\nGmN+ZIw5bIx52xjzgjEm9U8WQD4jNxIj4If61M63tCh2+rRODJNXmBxYjbVQ0ljyW508z4GeHvWe\nO+eO6761a1VQVGSfb9o0PbRtm0qvvVYmGJQCg62uL8d7Y0r2Z/7o4cOqX75cRTNmuGOouuUWSVLF\njTeq0MkjLbD/me3t6tKltjb7a/FdQN5xpwq6U80tftEBAMhXGW+1NcYskfSqZVkDxpgfSpJlWd9O\ncRxbbQHgCpPJFuSxvmbHypX66OWXVb1ggUrq6txtrqm23XrzPCUpNH26KufNU7C8XKd//3u3p2Z9\nNKpYR4e7BbggHNZALJZ2TOlawmxdtsxt+5JKSV2dVrzzjnu+Z6++WpdaWyXZ231r7rgjK+1xAADI\nhtFstQ0M9+RwLMva5nm4T9IjmZ4LADC13Ld27ajyCr15moufeUb7Vq1yXzNSDuf5lhbF2tv1SXOz\nwjU1bu7jnqYm3b9u3ZBzv/7Nb2qgp0cFwaAut7frlBMYera+DvT0JKyklt9wg33+NO/BWZV0OH93\nKu4Wlpaq/8KFhNdMX7BAVTffrFeiUfe99cdi7vOxM2fcVV/6bgIApgq/cjz/VtLLPp0LGNao8hIm\nU69OTHrkuvhvtFuQvdtr961alfCakbbenj96VJIUrKjQ9JtukpS4fbVl82b39a9/85t6YONGuz3K\npk1u+5NwdXVC4FkQDCZsZ7148qQb3I62FcnOnTvdc9TefXfCc6UzZ+rhHTt0vqUl4b1V33abJCkw\nTIuYXLfuSWeyjmsq4XsU/MacQi4Mu+JpjNkmqTbFU/9gWdbm+DHfkdRjWdbadOd5/PHHdd1110mS\nIpGIFixYoMbGRkmDE5/HPB7t44MHD458fLz/5U5JikbVGP/6ZBg/jyffY8dkGc+V9PjQpUuaLjvQ\nGvja17Rz5073+UOXLum0pM/Fg7Dk138Qiajj+HHNPXdOoUhE5++9V9d961tu4PpOd7d6Jc2VdHL3\nbv3wzjt16/e+p88vW6b71q7Vv0WjajtzRjPi22yPlpbquq9/3Q2aveMLlJXp90ePauCll1T04ovq\nPHJEhy5dcs/nfX+SHXgHnnhCA93dKvrzn3W5tVWtN96ou378Y/u5khK9KzsfdGU8wPy3aFTz/u7v\nFHrhBS3+2c/0xsGDCe93z1tv6ezbb2uu7FXdwBNP5Pzzk6Su+C8I3pV0OBrV3/P9lsc8nvSPDyZ9\nf8n1eHicf48PHjyozvgvGz/88EONxrjaqRhjHpf0XyXdZ1nW5TTHkOOJiUf/y7HJdb9SXLGGa/Ux\nUhuQkXJCX1qyRCeamxO2u16/YoVC8UJGgZISDfT26kRzs0KVlZr5wAO6ePJkwtbeWGennpszx80B\nLZs9WxeOH3fbotQvX64HNm3S85/+tC62tqogGNSX9+9PaB+T6n1k0uJksrbumazjAgBMnKz28TTG\nfEHSjyXda1lW+zDHEXhi4tH/cmwaG3PbrxTwGEt/zuGCN+f5WEeHPmluVqCsTDPuukv9ly65+Z3e\nXpivRKMp+4p6A6uCcDihaFB9NKoHNm7U6khEvefOSbK30/7Nxx/7ek9G835zZbKOCwAwcbIdeL4n\nKSTpbPxLb1iW9USK4wg84audnq148MkVvkLMnJpcXmxsTBkAjiRdwBrr7NRzN97oFh8qrq3VpdbW\nISt06VbuvIHVq4895lbHnX7zzXp41y6FIxH9oqZGsfZ2FZaU6Oqf/1xLv/KVMY9zrMeM5/zIL3yP\ngt+YU/DbaALPgkxPblnWHMuy6i3Laoj/GRJ0AsgT9CvFJDKW/pxe6YoRhSMR1Sxc6J4z+uabKXth\npuuR6S2UdN/atapfvlz10agbdO5uatK0T31KBeGwom+8oZLaVKURRh7nWI8Zz/kBAJho48rxHNUF\nWPEEAIxBpls3h8s1zOZ20LGu0CaPc99TTw1ZoRxP3iQ5lwCAiZbVFU8AALJhtK1YkqVbsRzPOVNJ\nbh8y1hXa5HGmWqEc7r2MpLimRuHqagJOAMCkQuCJvOOUdAb8wpyaGvwMLoeTHCgmB4kjzafkcaYK\nXMfzXs63tIy59ygmN75HwW/MKeQCgScAAGOQHCiON+Adz+rmaMYHAMBkQI4nAABjMNnbh0z28QEA\npp6stlMZwyAIPAEAAABgiqK4EKYk8hLgN+YU/MR8gt+YU/Abcwq5QOAJAAAAAMgqttoCAAAAADLG\nVlsAAAAAQM4ReCLvkJcAvzGn4CfmE/zGnILfmFPIBQJPAAAAAEBWkeMJAAAAAMgYOZ4AAAAAgJwj\n8ETeIS8BfmNOwU/MJ/iNOQW/MaeQCwSeAAAAAICsIscTAAAAAJAxcjwBAAAAADlH4Im8Q14C/Mac\ngp+YT/Abcwp+Y04hFwg8AQAAAABZRY4nAAAAACBj5HgCAAAAAHKOwBN5h7wE+I05BT8xn+A35hT8\nxpxCLhB4AgAAAACyihxPAAAAAEDGyPEEAAAAAOQcgSfyDnkJ8BtzCn5iPsFvzCn4jTmFXCDwBAAA\nAABkFTmeAAAAAICMkeMJAAAAAMg5Ak/kHfIS4DfmFPzEfILfmFPwG3MKuUDgCQAAAADIKnI8AQAA\nAAAZI8cTAAAAAJBzBJ7IO+QlwG/MKfiJ+QS/MafgN+YUcoHAEwAAAACQVeR4AgAAAAAyRo4nAAAA\nACDnCDyRd8hLgN+YU/AT8wl+Y07Bb8wp5AKBJwAAAAAgq8jxBAAAAABkjBxPAAAAAEDOEXgi75CX\nAL8xp+An5hP8xpyC35hTyAUCTwAAAABAVpHjCQAAAADIGDmeAAAAAICcI/BE3iEvAX5jTsFPzCf4\njTkFvzGnkAsEngAAAACArCLHEwAAAACQMXI8AQAAAAA5R+CJvENeAvzGnIKfmE/wG3MKfmNOIRcI\nPAEAAAAAWUWOJwAAAAAgY+R4AgAAAAByjsATeYe8BPiNOQU/MZ/gN+YU/MacQi4QeAIAAAAAsooc\nTwAAAABAxsjxBAAAAADkHIEn8g55CfAbcwp+Yj7Bb8wp+I05hVwg8AQAAAAAZBU5ngAAAACAjJHj\nCQAAAADIOQJP5B3yEuA35hT8xHyC35hT8BtzCrlA4AkAAAAAyCpyPAEAAAAAGSPHEwAAAACQcwSe\nyDvkJcBvzCn4ifkEvzGn4DfmFHKBwBMAAAAAkFXkeAIAAAAAMkaOJwAAAAAg5wg8kXfIS4DfmFPw\nE/MJfmNOwW/MKeQCgScAAAAAIKvI8QQAAAAAZIwcTwAAAABAzmUceBpj/skY87Yx5qAx5lVjzLV+\nDgxIh7wE+I05BT8xn+A35hT8xpxCLoxnxfOfLcu6xbKsBZI2SfpfPo0JGNbBgwdzPQRMMcwp+In5\nBL8xp+A35hRyIePA07Ks856HZZLaxz8cYGSdnZ25HgKmGOYU/MR8gt+YU/Abcwq5EBjPi40xT0v6\nz5IuSrrLlxEBAAAAAKaUYVc8jTHbjDF/SvHnYUmyLOs7lmXNkrRG0r9MwHgBffjhh7keAqYY5hT8\nxHyC35hT8BtzCrngSzsVY8wsSS9blvXZFM/RSwUAAAAAprCR2qlkvNXWGDPHsqz34g+XSzqQyQAA\nAAAAAFNbxiuexpgNkuZK6pd0VNI3LMtq83FsAAAAAIApwJettgAAAAAApDOePp6jZoz5J2PM28aY\ng8aYV40x107EdTE1GWN+ZIw5HJ9TLxhjKnI9JuQ3Y8wKY8w7xph+Y8ytuR4P8pcx5gvGmL8YY94z\nxvyPXI8H+c0Y83+NMaeMMX/K9VgwNRhjrjXG7Ij/m/dnY8x/z/WYkL+MMUXGmH3xGO+QMeZ/D3v8\nRKx4GmOmOX0/jTH/TdItlmV9PesXxpRkjFki6VXLsgaMMT+UJMuyvp3jYSGPGWM+LWlA0v+R9PeW\nZf0hx0NCHjLGFEp6V9L9kj6R9HtJX7Us63BOB4a8ZYxZLKlb0r9bljU/1+NB/jPG1EqqtSzroDGm\nTNL/kxTl+xQyZYwpsSzrojEm47bo4wAAAphJREFUIOk1Sd+yLOu1VMdOyIqnE3TGlUlqn4jrYmqy\nLGubZVkD8Yf7JM3M5XiQ/yzL+otlWUdyPQ7kvTskvW9Z1oeWZfVKek528T0gI5Zl7ZHUketxYOqw\nLKvVsqyD8b93SzosqS63o0I+syzrYvyvIUmFks6mO3ZCAk9JMsY8bYz5SNJKST+cqOtiyvtbSS/n\nehAAIOkaSR97Hh+Pfw0AJh1jzHWSGmT/Eh/IiDGmwBhzUNIpSTssyzqU7tiM26mkuOg2SbUpnvoH\ny7I2W5b1HUnfMcZ8W9K/SPovfl0bU89I8yl+zHck9ViWtXZCB4e8NJo5BYwT1foA5IX4NtsNkp6M\nr3wCGYnvQlwQr7nyijGm0bKsnamO9S3wtCxrySgPXStWqDCCkeaTMeZxSUsl3TchA0LeG8P3KCBT\nn0jyFs+7VvaqJwBMGsaYoKTfSPqlZVmbcj0eTA2WZZ0zxmyRtFDSzlTHTFRV2zmeh8slHZiI62Jq\nMsZ8QdIqScsty7qc6/FgyjG5HgDy1n5Jc4wx1xljQpL+WtKLOR4TALiMMUbSzyUdsizrX3M9HuQ3\nY0y1MSYS/3uxpCUaJs6bqKq2GyTNldQv6aikb1iW1Zb1C2NKMsa8JzuB2UlefsOyrCdyOCTkOWPM\nlyT9VFK1pHOSDliW9WBuR4V8ZIx5UNK/yi6w8HPLsoYtLQ8Mxxjza0n3SqqS1Cbp+5Zlrc7tqJDP\njDGLJO2W9EcNpgf8T8uyfpe7USFfGWPmS/qF7MXMAknPWpb1o7THT0TgCQAAAAC4ck1YVVsAAAAA\nwJWJwBMAAAAAkFUEngAAAACArCLwBAAAAABkFYEnAAAAACCrCDwBAAAAAFlF4AkAAAAAyCoCTwAA\nAABAVv1/lzHCzGUnjVoAAAAASUVORK5CYII=\n", - "text": [ - "" - ] - } - ], - "prompt_number": 5 - } - ], - "metadata": {} - } - ] -} \ No newline at end of file diff --git a/include/caffe/blob.hpp b/include/caffe/blob.hpp index fea5117ef10..dda7b1f8372 100644 --- a/include/caffe/blob.hpp +++ b/include/caffe/blob.hpp @@ -219,7 +219,6 @@ class Blob { const Dtype* cpu_data() const; void set_cpu_data(Dtype* data); - const int* gpu_shape() const; const Dtype* gpu_data() const; const Dtype* cpu_diff() const; const Dtype* gpu_diff() const; @@ -269,7 +268,6 @@ class Blob { protected: shared_ptr data_; shared_ptr diff_; - shared_ptr shape_data_; vector shape_; int count_; int capacity_; diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index 7f92ffe7b99..aba3e036004 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -281,19 +281,19 @@ Solver* GetSolver(const SolverParameter& param) { switch (type) { case SolverParameter_SolverType_SGD: - return new SGDSolver(param); + return new SGDSolver(param); case SolverParameter_SolverType_NESTEROV: - return new NesterovSolver(param); + return new NesterovSolver(param); case SolverParameter_SolverType_ADAGRAD: - return new AdaGradSolver(param); + return new AdaGradSolver(param); case SolverParameter_SolverType_RMSPROP: - return new RMSPropSolver(param); + return new RMSPropSolver(param); case SolverParameter_SolverType_ADADELTA: - return new AdaDeltaSolver(param); + return new AdaDeltaSolver(param); case SolverParameter_SolverType_ADAM: - return new AdamSolver(param); + return new AdamSolver(param); default: - LOG(FATAL) << "Unknown SolverType: " << type; + LOG(FATAL) << "Unknown SolverType: " << type; } return (Solver*) NULL; } diff --git a/include/caffe/util/im2col.hpp b/include/caffe/util/im2col.hpp index 531fd29c57a..0051e2fa067 100644 --- a/include/caffe/util/im2col.hpp +++ b/include/caffe/util/im2col.hpp @@ -3,48 +3,24 @@ namespace caffe { -template -void im2col_nd_cpu(const Dtype* data_im, const int num_spatial_axes, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - Dtype* data_col); - template void im2col_cpu(const Dtype* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, Dtype* data_col); -template -void col2im_nd_cpu(const Dtype* data_col, const int num_spatial_axes, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - Dtype* data_im); - template void col2im_cpu(const Dtype* data_col, const int channels, const int height, const int width, const int patch_h, const int patch_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, Dtype* data_im); -template -void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes, - const int col_size, const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - Dtype* data_col); - template void im2col_gpu(const Dtype* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, Dtype* data_col); -template -void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes, - const int im_size, const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - Dtype* data_im); - template void col2im_gpu(const Dtype* data_col, const int channels, const int height, const int width, const int patch_h, const int patch_w, diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 26beb8f943d..211e3d9042d 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -58,110 +58,52 @@ class BaseConvolutionLayer : public Layer { void backward_gpu_bias(Dtype* bias, const Dtype* input); #endif - /// @brief The spatial dimensions of the input. - inline int input_shape(int i) { - return (*bottom_shape_)[channel_axis_ + i]; - } // reverse_dimensions should return true iff we are implementing deconv, so // that conv helpers know which dimensions are which. virtual bool reverse_dimensions() = 0; // Compute height_out_ and width_out_ from other parameters. virtual void compute_output_shape() = 0; - /// @brief The spatial dimensions of a filter kernel. - Blob kernel_shape_; - /// @brief The spatial dimensions of the stride. - Blob stride_; - /// @brief The spatial dimensions of the padding. - Blob pad_; - /// @brief The spatial dimensions of the convolution input. - Blob conv_input_shape_; - /// @brief The spatial dimensions of the col_buffer. - vector col_buffer_shape_; - /// @brief The spatial dimensions of the output. - vector output_shape_; - const vector* bottom_shape_; - - int num_spatial_axes_; - int bottom_dim_; - int top_dim_; - - int channel_axis_; + int kernel_h_, kernel_w_; + int stride_h_, stride_w_; int num_; int channels_; + int pad_h_, pad_w_; + int height_, width_; int group_; - int out_spatial_dim_; - int weight_offset_; int num_output_; + int height_out_, width_out_; bool bias_term_; bool is_1x1_; - bool force_nd_im2col_; private: // wrap im2col/col2im so we don't have to remember the (long) argument lists inline void conv_im2col_cpu(const Dtype* data, Dtype* col_buff) { - if (!force_nd_im2col_ && num_spatial_axes_ == 2) { - im2col_cpu(data, conv_in_channels_, - conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], - kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], - pad_.cpu_data()[0], pad_.cpu_data()[1], - stride_.cpu_data()[0], stride_.cpu_data()[1], col_buff); - } else { - im2col_nd_cpu(data, num_spatial_axes_, conv_input_shape_.cpu_data(), - col_buffer_shape_.data(), kernel_shape_.cpu_data(), - pad_.cpu_data(), stride_.cpu_data(), col_buff); - } + im2col_cpu(data, conv_in_channels_, conv_in_height_, conv_in_width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_buff); } inline void conv_col2im_cpu(const Dtype* col_buff, Dtype* data) { - if (!force_nd_im2col_ && num_spatial_axes_ == 2) { - col2im_cpu(col_buff, conv_in_channels_, - conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], - kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], - pad_.cpu_data()[0], pad_.cpu_data()[1], - stride_.cpu_data()[0], stride_.cpu_data()[1], data); - } else { - col2im_nd_cpu(col_buff, num_spatial_axes_, conv_input_shape_.cpu_data(), - col_buffer_shape_.data(), kernel_shape_.cpu_data(), - pad_.cpu_data(), stride_.cpu_data(), data); - } + col2im_cpu(col_buff, conv_in_channels_, conv_in_height_, conv_in_width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, data); } #ifndef CPU_ONLY inline void conv_im2col_gpu(const Dtype* data, Dtype* col_buff) { - if (!force_nd_im2col_ && num_spatial_axes_ == 2) { - im2col_gpu(data, conv_in_channels_, - conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], - kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], - pad_.cpu_data()[0], pad_.cpu_data()[1], - stride_.cpu_data()[0], stride_.cpu_data()[1], col_buff); - } else { - im2col_nd_gpu(data, num_spatial_axes_, num_kernels_im2col_, - conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), - kernel_shape_.gpu_data(), pad_.gpu_data(), - stride_.gpu_data(), col_buff); - } + im2col_gpu(data, conv_in_channels_, conv_in_height_, conv_in_width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_buff); } inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) { - if (!force_nd_im2col_ && num_spatial_axes_ == 2) { - col2im_gpu(col_buff, conv_in_channels_, - conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], - kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], - pad_.cpu_data()[0], pad_.cpu_data()[1], - stride_.cpu_data()[0], stride_.cpu_data()[1], data); - } else { - col2im_nd_gpu(col_buff, num_spatial_axes_, num_kernels_col2im_, - conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(), - kernel_shape_.gpu_data(), pad_.gpu_data(), stride_.gpu_data(), - data); - } + col2im_gpu(col_buff, conv_in_channels_, conv_in_height_, conv_in_width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, data); } #endif - int num_kernels_im2col_; - int num_kernels_col2im_; int conv_out_channels_; int conv_in_channels_; int conv_out_spatial_dim_; + int conv_in_height_; + int conv_in_width_; int kernel_dim_; + int weight_offset_; int col_offset_; int output_offset_; @@ -308,7 +250,7 @@ class CuDNNConvolutionLayer : public ConvolutionLayer { cudnnTensorDescriptor_t bias_desc_; cudnnFilterDescriptor_t filter_desc_; vector conv_descs_; - int bottom_offset_, top_offset_, bias_offset_; + int bottom_offset_, top_offset_, weight_offset_, bias_offset_; size_t workspaceSizeInBytes; void *workspace; }; @@ -345,22 +287,11 @@ class Im2colLayer : public Layer { virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); - /// @brief The spatial dimensions of a filter kernel. - Blob kernel_shape_; - /// @brief The spatial dimensions of the stride. - Blob stride_; - /// @brief The spatial dimensions of the padding. - Blob pad_; - - int num_spatial_axes_; - int bottom_dim_; - int top_dim_; - - int channel_axis_; - int num_; + int kernel_h_, kernel_w_; + int stride_h_, stride_w_; int channels_; - - bool force_nd_im2col_; + int height_, width_; + int pad_h_, pad_w_; }; // Forward declare PoolingLayer and SplitLayer for use in LRNLayer. @@ -540,13 +471,7 @@ class SPPLayer : public Layer { virtual inline const char* type() const { return "SPP"; } virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { return 1; } - // MAX POOL layers can output an extra top blob for the mask; - // others can only output the pooled inputs. - virtual inline int MaxTopBlobs() const { - return (this->layer_param_.pooling_param().pool() == - PoolingParameter_PoolMethod_MAX) ? 2 : 1; - } + virtual inline int ExactNumTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -560,9 +485,11 @@ class SPPLayer : public Layer { int pyramid_height_; int bottom_h_, bottom_w_; + int num_; int channels_; int kernel_h_, kernel_w_; int pad_h_, pad_w_; + bool reshaped_first_time_; /// the internal Split layer that feeds the pooling layers shared_ptr > split_layer_; diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index c86fd5d1d94..8450aa140be 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -24,16 +24,11 @@ void Blob::Reshape(const vector& shape) { CHECK_LE(shape.size(), kMaxBlobAxes); count_ = 1; shape_.resize(shape.size()); - if (!shape_data_ || shape_data_->size() < shape.size() * sizeof(int)) { - shape_data_.reset(new SyncedMemory(shape.size() * sizeof(int))); - } - int* shape_data = static_cast(shape_data_->mutable_cpu_data()); for (int i = 0; i < shape.size(); ++i) { CHECK_GE(shape[i], 0); CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX"; count_ *= shape[i]; shape_[i] = shape[i]; - shape_data[i] = shape[i]; } if (count_ > capacity_) { capacity_ = count_; @@ -72,12 +67,6 @@ Blob::Blob(const vector& shape) Reshape(shape); } -template -const int* Blob::gpu_shape() const { - CHECK(shape_data_); - return (const int*)shape_data_->gpu_data(); -} - template const Dtype* Blob::cpu_data() const { CHECK(data_); diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index c6b47550292..ccb3adc7e89 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -1,4 +1,3 @@ -#include #include #include "caffe/filler.hpp" @@ -12,97 +11,50 @@ namespace caffe { template void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { + CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " + << "corresponding to (num, channels, height, width)"; // Configure the kernel size, padding, stride, and inputs. ConvolutionParameter conv_param = this->layer_param_.convolution_param(); - force_nd_im2col_ = conv_param.force_nd_im2col(); - channel_axis_ = bottom[0]->CanonicalAxisIndex(conv_param.axis()); - const int first_spatial_axis = channel_axis_ + 1; - const int num_axes = bottom[0]->num_axes(); - num_spatial_axes_ = num_axes - first_spatial_axis; - CHECK_GE(num_spatial_axes_, 0); - vector bottom_dim_blob_shape(1, num_spatial_axes_ + 1); - vector spatial_dim_blob_shape(1, std::max(num_spatial_axes_, 1)); - // Setup filter kernel dimensions (kernel_shape_). - kernel_shape_.Reshape(spatial_dim_blob_shape); - int* kernel_shape_data = kernel_shape_.mutable_cpu_data(); - if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) { - CHECK_EQ(num_spatial_axes_, 2) - << "kernel_h & kernel_w can only be used for 2D convolution."; - CHECK_EQ(0, conv_param.kernel_size_size()) - << "Either kernel_size or kernel_h/w should be specified; not both."; - kernel_shape_data[0] = conv_param.kernel_h(); - kernel_shape_data[1] = conv_param.kernel_w(); + CHECK(!conv_param.has_kernel_size() != + !(conv_param.has_kernel_h() && conv_param.has_kernel_w())) + << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; + CHECK(conv_param.has_kernel_size() || + (conv_param.has_kernel_h() && conv_param.has_kernel_w())) + << "For non-square filters both kernel_h and kernel_w are required."; + CHECK((!conv_param.has_pad() && conv_param.has_pad_h() + && conv_param.has_pad_w()) + || (!conv_param.has_pad_h() && !conv_param.has_pad_w())) + << "pad is pad OR pad_h and pad_w are required."; + CHECK((!conv_param.has_stride() && conv_param.has_stride_h() + && conv_param.has_stride_w()) + || (!conv_param.has_stride_h() && !conv_param.has_stride_w())) + << "Stride is stride OR stride_h and stride_w are required."; + if (conv_param.has_kernel_size()) { + kernel_h_ = kernel_w_ = conv_param.kernel_size(); } else { - const int num_kernel_dims = conv_param.kernel_size_size(); - CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_) - << "kernel_size must be specified once, or once per spatial dimension " - << "(kernel_size specified " << num_kernel_dims << " times; " - << num_spatial_axes_ << " spatial dims);"; - for (int i = 0; i < num_spatial_axes_; ++i) { - kernel_shape_data[i] = - conv_param.kernel_size((num_kernel_dims == 1) ? 0 : i); - } + kernel_h_ = conv_param.kernel_h(); + kernel_w_ = conv_param.kernel_w(); } - for (int i = 0; i < num_spatial_axes_; ++i) { - CHECK_GT(kernel_shape_data[i], 0) << "Filter dimensions must be nonzero."; - } - // Setup stride dimensions (stride_). - stride_.Reshape(spatial_dim_blob_shape); - int* stride_data = stride_.mutable_cpu_data(); - if (conv_param.has_stride_h() || conv_param.has_stride_w()) { - CHECK_EQ(num_spatial_axes_, 2) - << "stride_h & stride_w can only be used for 2D convolution."; - CHECK_EQ(0, conv_param.stride_size()) - << "Either stride or stride_h/w should be specified; not both."; - stride_data[0] = conv_param.stride_h(); - stride_data[1] = conv_param.stride_w(); + CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero."; + CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero."; + if (!conv_param.has_pad_h()) { + pad_h_ = pad_w_ = conv_param.pad(); } else { - const int num_stride_dims = conv_param.stride_size(); - CHECK(num_stride_dims == 0 || num_stride_dims == 1 || - num_stride_dims == num_spatial_axes_) - << "stride must be specified once, or once per spatial dimension " - << "(stride specified " << num_stride_dims << " times; " - << num_spatial_axes_ << " spatial dims);"; - const int kDefaultStride = 1; - for (int i = 0; i < num_spatial_axes_; ++i) { - stride_data[i] = (num_stride_dims == 0) ? kDefaultStride : - conv_param.stride((num_stride_dims == 1) ? 0 : i); - CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero."; - } + pad_h_ = conv_param.pad_h(); + pad_w_ = conv_param.pad_w(); } - // Setup pad dimensions (pad_). - pad_.Reshape(spatial_dim_blob_shape); - int* pad_data = pad_.mutable_cpu_data(); - if (conv_param.has_pad_h() || conv_param.has_pad_w()) { - CHECK_EQ(num_spatial_axes_, 2) - << "pad_h & pad_w can only be used for 2D convolution."; - CHECK_EQ(0, conv_param.pad_size()) - << "Either pad or pad_h/w should be specified; not both."; - pad_data[0] = conv_param.pad_h(); - pad_data[1] = conv_param.pad_w(); + if (!conv_param.has_stride_h()) { + stride_h_ = stride_w_ = conv_param.stride(); } else { - const int num_pad_dims = conv_param.pad_size(); - CHECK(num_pad_dims == 0 || num_pad_dims == 1 || - num_pad_dims == num_spatial_axes_) - << "pad must be specified once, or once per spatial dimension " - << "(pad specified " << num_pad_dims << " times; " - << num_spatial_axes_ << " spatial dims);"; - const int kDefaultPad = 0; - for (int i = 0; i < num_spatial_axes_; ++i) { - pad_data[i] = (num_pad_dims == 0) ? kDefaultPad : - conv_param.pad((num_pad_dims == 1) ? 0 : i); - } + stride_h_ = conv_param.stride_h(); + stride_w_ = conv_param.stride_w(); } // Special case: im2col is the identity for 1x1 convolution with stride 1 // and no padding, so flag for skipping the buffer and transformation. - is_1x1_ = true; - for (int i = 0; i < num_spatial_axes_; ++i) { - is_1x1_ &= - kernel_shape_data[i] == 1 && stride_data[i] == 1 && pad_data[i] == 0; - if (!is_1x1_) { break; } - } + is_1x1_ = kernel_w_ == 1 && kernel_h_ == 1 + && stride_h_ == 1 && stride_w_ == 1 && pad_h_ == 0 && pad_w_ == 0; // Configure output channels and groups. - channels_ = bottom[0]->shape(channel_axis_); + channels_ = bottom[0]->channels(); num_output_ = this->layer_param_.convolution_param().num_output(); CHECK_GT(num_output_, 0); group_ = this->layer_param_.convolution_param().group(); @@ -119,29 +71,8 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, // Handle the parameters: weights and biases. // - blobs_[0] holds the filter weights // - blobs_[1] holds the biases (optional) - vector weight_shape(2); - weight_shape[0] = conv_out_channels_; - weight_shape[1] = conv_in_channels_ / group_; - for (int i = 0; i < num_spatial_axes_; ++i) { - weight_shape.push_back(kernel_shape_data[i]); - } bias_term_ = this->layer_param_.convolution_param().bias_term(); - vector bias_shape(bias_term_, num_output_); if (this->blobs_.size() > 0) { - CHECK_EQ(1 + bias_term_, this->blobs_.size()) - << "Incorrect number of weight blobs."; - if (weight_shape != this->blobs_[0]->shape()) { - Blob weight_shaped_blob(weight_shape); - LOG(FATAL) << "Incorrect weight shape: expected shape " - << weight_shaped_blob.shape_string() << "; instead, shape was " - << this->blobs_[0]->shape_string(); - } - if (bias_term_ && bias_shape != this->blobs_[1]->shape()) { - Blob bias_shaped_blob(bias_shape); - LOG(FATAL) << "Incorrect bias shape: expected shape " - << bias_shaped_blob.shape_string() << "; instead, shape was " - << this->blobs_[1]->shape_string(); - } LOG(INFO) << "Skipping parameter initialization"; } else { if (bias_term_) { @@ -151,20 +82,20 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, } // Initialize and fill the weights: // output channels x input channels per-group x kernel height x kernel width - this->blobs_[0].reset(new Blob(weight_shape)); + this->blobs_[0].reset(new Blob( + conv_out_channels_, conv_in_channels_ / group_, kernel_h_, kernel_w_)); shared_ptr > weight_filler(GetFiller( this->layer_param_.convolution_param().weight_filler())); weight_filler->Fill(this->blobs_[0].get()); // If necessary, initialize and fill the biases. if (bias_term_) { + vector bias_shape(1, num_output_); this->blobs_[1].reset(new Blob(bias_shape)); shared_ptr > bias_filler(GetFiller( this->layer_param_.convolution_param().bias_filler())); bias_filler->Fill(this->blobs_[1].get()); } } - kernel_dim_ = this->blobs_[0]->count(1); - weight_offset_ = conv_out_channels_ * kernel_dim_ / group_; // Propagate gradients to the parameters (as directed by backward pass). this->param_propagate_down_.resize(this->blobs_.size(), true); } @@ -172,68 +103,52 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, template void BaseConvolutionLayer::Reshape(const vector*>& bottom, const vector*>& top) { - const int first_spatial_axis = channel_axis_ + 1; - CHECK_EQ(bottom[0]->num_axes(), first_spatial_axis + num_spatial_axes_) - << "bottom num_axes may not change."; - num_ = bottom[0]->count(0, channel_axis_); - CHECK_EQ(bottom[0]->shape(channel_axis_), channels_) - << "Input size incompatible with convolution kernel."; + CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " + << "corresponding to (num, channels, height, width)"; + num_ = bottom[0]->num(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + CHECK_EQ(bottom[0]->channels(), channels_) << "Input size incompatible with" + " convolution kernel."; // TODO: generalize to handle inputs of different shapes. for (int bottom_id = 1; bottom_id < bottom.size(); ++bottom_id) { - CHECK(bottom[0]->shape() == bottom[bottom_id]->shape()) - << "All inputs must have the same shape."; + CHECK_EQ(num_, bottom[bottom_id]->num()) << "Inputs must have same num."; + CHECK_EQ(channels_, bottom[bottom_id]->channels()) + << "Inputs must have same channels."; + CHECK_EQ(height_, bottom[bottom_id]->height()) + << "Inputs must have same height."; + CHECK_EQ(width_, bottom[bottom_id]->width()) + << "Inputs must have same width."; } // Shape the tops. - bottom_shape_ = &bottom[0]->shape(); compute_output_shape(); - vector top_shape(bottom[0]->shape().begin(), - bottom[0]->shape().begin() + channel_axis_); - top_shape.push_back(num_output_); - for (int i = 0; i < num_spatial_axes_; ++i) { - top_shape.push_back(output_shape_[i]); - } for (int top_id = 0; top_id < top.size(); ++top_id) { - top[top_id]->Reshape(top_shape); + top[top_id]->Reshape(num_, num_output_, height_out_, width_out_); } if (reverse_dimensions()) { - conv_out_spatial_dim_ = bottom[0]->count(first_spatial_axis); + conv_in_height_ = height_out_; + conv_in_width_ = width_out_; + conv_out_spatial_dim_ = height_ * width_; } else { - conv_out_spatial_dim_ = top[0]->count(first_spatial_axis); + conv_in_height_ = height_; + conv_in_width_ = width_; + conv_out_spatial_dim_ = height_out_ * width_out_; } - col_offset_ = kernel_dim_ * conv_out_spatial_dim_; + kernel_dim_ = conv_in_channels_ * kernel_h_ * kernel_w_; + weight_offset_ = conv_out_channels_ * kernel_dim_ / group_ / group_; + col_offset_ = kernel_dim_ * conv_out_spatial_dim_ / group_; output_offset_ = conv_out_channels_ * conv_out_spatial_dim_ / group_; - // Setup input dimensions (conv_input_shape_). - vector bottom_dim_blob_shape(1, num_spatial_axes_ + 1); - conv_input_shape_.Reshape(bottom_dim_blob_shape); - int* conv_input_shape_data = conv_input_shape_.mutable_cpu_data(); - for (int i = 0; i < num_spatial_axes_ + 1; ++i) { - if (reverse_dimensions()) { - conv_input_shape_data[i] = top[0]->shape(channel_axis_ + i); - } else { - conv_input_shape_data[i] = bottom[0]->shape(channel_axis_ + i); - } - } // The im2col result buffer will only hold one image at a time to avoid // overly large memory usage. In the special case of 1x1 convolution // it goes lazily unused to save memory. - col_buffer_shape_.clear(); - col_buffer_shape_.push_back(kernel_dim_ * group_); - for (int i = 0; i < num_spatial_axes_; ++i) { - if (reverse_dimensions()) { - col_buffer_shape_.push_back(input_shape(i + 1)); - } else { - col_buffer_shape_.push_back(output_shape_[i]); - } + if (reverse_dimensions()) { + col_buffer_.Reshape(1, kernel_dim_, height_, width_); + } else { + col_buffer_.Reshape(1, kernel_dim_, height_out_, width_out_); } - col_buffer_.Reshape(col_buffer_shape_); - bottom_dim_ = bottom[0]->count(channel_axis_); - top_dim_ = top[0]->count(channel_axis_); - num_kernels_im2col_ = conv_in_channels_ * conv_out_spatial_dim_; - num_kernels_col2im_ = reverse_dimensions() ? top_dim_ : bottom_dim_; // Set up the all ones "bias multiplier" for adding biases by BLAS - out_spatial_dim_ = top[0]->count(first_spatial_axis); if (bias_term_) { - vector bias_multiplier_shape(1, out_spatial_dim_); + vector bias_multiplier_shape(1, height_out_ * width_out_); bias_multiplier_.Reshape(bias_multiplier_shape); caffe_set(bias_multiplier_.count(), Dtype(1), bias_multiplier_.mutable_cpu_data()); @@ -252,7 +167,7 @@ void BaseConvolutionLayer::forward_cpu_gemm(const Dtype* input, } for (int g = 0; g < group_; ++g) { caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, conv_out_channels_ / - group_, conv_out_spatial_dim_, kernel_dim_, + group_, conv_out_spatial_dim_, kernel_dim_ / group_, (Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g, (Dtype)0., output + output_offset_ * g); } @@ -262,7 +177,7 @@ template void BaseConvolutionLayer::forward_cpu_bias(Dtype* output, const Dtype* bias) { caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, - out_spatial_dim_, 1, (Dtype)1., bias, bias_multiplier_.cpu_data(), + height_out_ * width_out_, 1, (Dtype)1., bias, bias_multiplier_.cpu_data(), (Dtype)1., output); } @@ -274,7 +189,7 @@ void BaseConvolutionLayer::backward_cpu_gemm(const Dtype* output, col_buff = input; } for (int g = 0; g < group_; ++g) { - caffe_cpu_gemm(CblasTrans, CblasNoTrans, kernel_dim_, + caffe_cpu_gemm(CblasTrans, CblasNoTrans, kernel_dim_ / group_, conv_out_spatial_dim_, conv_out_channels_ / group_, (Dtype)1., weights + weight_offset_ * g, output + output_offset_ * g, (Dtype)0., col_buff + col_offset_ * g); @@ -294,7 +209,7 @@ void BaseConvolutionLayer::weight_cpu_gemm(const Dtype* input, } for (int g = 0; g < group_; ++g) { caffe_cpu_gemm(CblasNoTrans, CblasTrans, conv_out_channels_ / group_, - kernel_dim_, conv_out_spatial_dim_, + kernel_dim_ / group_, conv_out_spatial_dim_, (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g, (Dtype)1., weights + weight_offset_ * g); } @@ -303,7 +218,7 @@ void BaseConvolutionLayer::weight_cpu_gemm(const Dtype* input, template void BaseConvolutionLayer::backward_cpu_bias(Dtype* bias, const Dtype* input) { - caffe_cpu_gemv(CblasNoTrans, num_output_, out_spatial_dim_, 1., + caffe_cpu_gemv(CblasNoTrans, num_output_, height_out_ * width_out_, 1., input, bias_multiplier_.cpu_data(), 1., bias); } @@ -321,7 +236,7 @@ void BaseConvolutionLayer::forward_gpu_gemm(const Dtype* input, } for (int g = 0; g < group_; ++g) { caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, conv_out_channels_ / - group_, conv_out_spatial_dim_, kernel_dim_, + group_, conv_out_spatial_dim_, kernel_dim_ / group_, (Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g, (Dtype)0., output + output_offset_ * g); } @@ -331,7 +246,7 @@ template void BaseConvolutionLayer::forward_gpu_bias(Dtype* output, const Dtype* bias) { caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, - out_spatial_dim_, 1, (Dtype)1., bias, bias_multiplier_.gpu_data(), + height_out_ * width_out_, 1, (Dtype)1., bias, bias_multiplier_.gpu_data(), (Dtype)1., output); } @@ -343,7 +258,7 @@ void BaseConvolutionLayer::backward_gpu_gemm(const Dtype* output, col_buff = input; } for (int g = 0; g < group_; ++g) { - caffe_gpu_gemm(CblasTrans, CblasNoTrans, kernel_dim_, + caffe_gpu_gemm(CblasTrans, CblasNoTrans, kernel_dim_ / group_, conv_out_spatial_dim_, conv_out_channels_ / group_, (Dtype)1., weights + weight_offset_ * g, output + output_offset_ * g, (Dtype)0., col_buff + col_offset_ * g); @@ -363,7 +278,7 @@ void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, } for (int g = 0; g < group_; ++g) { caffe_gpu_gemm(CblasNoTrans, CblasTrans, conv_out_channels_ / group_, - kernel_dim_, conv_out_spatial_dim_, + kernel_dim_ / group_, conv_out_spatial_dim_, (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g, (Dtype)1., weights + weight_offset_ * g); } @@ -372,7 +287,7 @@ void BaseConvolutionLayer::weight_gpu_gemm(const Dtype* input, template void BaseConvolutionLayer::backward_gpu_bias(Dtype* bias, const Dtype* input) { - caffe_gpu_gemv(CblasNoTrans, num_output_, out_spatial_dim_, 1., + caffe_gpu_gemv(CblasNoTrans, num_output_, height_out_ * width_out_, 1., input, bias_multiplier_.gpu_data(), 1., bias); } diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp index fb50bb095ed..928ef5ee468 100644 --- a/src/caffe/layers/conv_layer.cpp +++ b/src/caffe/layers/conv_layer.cpp @@ -10,17 +10,10 @@ namespace caffe { template void ConvolutionLayer::compute_output_shape() { - const int* kernel_shape_data = this->kernel_shape_.cpu_data(); - const int* stride_data = this->stride_.cpu_data(); - const int* pad_data = this->pad_.cpu_data(); - this->output_shape_.clear(); - for (int i = 0; i < this->num_spatial_axes_; ++i) { - // i + 1 to skip channel axis - const int input_dim = this->input_shape(i + 1); - const int output_dim = (input_dim + 2 * pad_data[i] - kernel_shape_data[i]) - / stride_data[i] + 1; - this->output_shape_.push_back(output_dim); - } + this->height_out_ = (this->height_ + 2 * this->pad_h_ - this->kernel_h_) + / this->stride_h_ + 1; + this->width_out_ = (this->width_ + 2 * this->pad_w_ - this->kernel_w_) + / this->stride_w_ + 1; } template @@ -31,11 +24,11 @@ void ConvolutionLayer::Forward_cpu(const vector*>& bottom, const Dtype* bottom_data = bottom[i]->cpu_data(); Dtype* top_data = top[i]->mutable_cpu_data(); for (int n = 0; n < this->num_; ++n) { - this->forward_cpu_gemm(bottom_data + n * this->bottom_dim_, weight, - top_data + n * this->top_dim_); + this->forward_cpu_gemm(bottom_data + bottom[i]->offset(n), weight, + top_data + top[i]->offset(n)); if (this->bias_term_) { const Dtype* bias = this->blobs_[1]->cpu_data(); - this->forward_cpu_bias(top_data + n * this->top_dim_, bias); + this->forward_cpu_bias(top_data + top[i]->offset(n), bias); } } } @@ -54,20 +47,20 @@ void ConvolutionLayer::Backward_cpu(const vector*>& top, if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_cpu_diff(); for (int n = 0; n < this->num_; ++n) { - this->backward_cpu_bias(bias_diff, top_diff + n * this->top_dim_); + this->backward_cpu_bias(bias_diff, top_diff + top[i]->offset(n)); } } if (this->param_propagate_down_[0] || propagate_down[i]) { for (int n = 0; n < this->num_; ++n) { // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { - this->weight_cpu_gemm(bottom_data + n * this->bottom_dim_, - top_diff + n * this->top_dim_, weight_diff); + this->weight_cpu_gemm(bottom_data + bottom[i]->offset(n), + top_diff + top[i]->offset(n), weight_diff); } // gradient w.r.t. bottom data, if necessary. if (propagate_down[i]) { - this->backward_cpu_gemm(top_diff + n * this->top_dim_, weight, - bottom_diff + n * this->bottom_dim_); + this->backward_cpu_gemm(top_diff + top[i]->offset(n), weight, + bottom_diff + bottom[i]->offset(n)); } } } diff --git a/src/caffe/layers/conv_layer.cu b/src/caffe/layers/conv_layer.cu index b429d2b47d0..b8a98ff7cc9 100644 --- a/src/caffe/layers/conv_layer.cu +++ b/src/caffe/layers/conv_layer.cu @@ -16,11 +16,11 @@ void ConvolutionLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); for (int n = 0; n < this->num_; ++n) { - this->forward_gpu_gemm(bottom_data + n * this->bottom_dim_, weight, - top_data + n * this->top_dim_); + this->forward_gpu_gemm(bottom_data + bottom[i]->offset(n), weight, + top_data + top[i]->offset(n)); if (this->bias_term_) { const Dtype* bias = this->blobs_[1]->gpu_data(); - this->forward_gpu_bias(top_data + n * this->top_dim_, bias); + this->forward_gpu_bias(top_data + top[i]->offset(n), bias); } } } @@ -37,7 +37,7 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { - this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_); + this->backward_gpu_bias(bias_diff, top_diff + top[i]->offset(n)); } } if (this->param_propagate_down_[0] || propagate_down[i]) { @@ -46,13 +46,13 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, for (int n = 0; n < this->num_; ++n) { // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { - this->weight_gpu_gemm(bottom_data + n * this->bottom_dim_, - top_diff + n * this->top_dim_, weight_diff); + this->weight_gpu_gemm(bottom_data + bottom[i]->offset(n), + top_diff + top[i]->offset(n), weight_diff); } // gradient w.r.t. bottom data, if necessary. if (propagate_down[i]) { - this->backward_gpu_gemm(top_diff + n * this->top_dim_, weight, - bottom_diff + n * this->bottom_dim_); + this->backward_gpu_gemm(top_diff + top[i]->offset(n), weight, + bottom_diff + bottom[i]->offset(n)); } } } diff --git a/src/caffe/layers/cudnn_conv_layer.cpp b/src/caffe/layers/cudnn_conv_layer.cpp index 3514fe2aba5..104d2b9d669 100644 --- a/src/caffe/layers/cudnn_conv_layer.cpp +++ b/src/caffe/layers/cudnn_conv_layer.cpp @@ -34,15 +34,14 @@ void CuDNNConvolutionLayer::LayerSetUp( } // Set the indexing parameters. + weight_offset_ = (this->num_output_ / this->group_) + * (this->channels_ / this->group_) * this->kernel_h_ * this->kernel_w_; bias_offset_ = (this->num_output_ / this->group_); // Create filter descriptor. - const int* kernel_shape_data = this->kernel_shape_.cpu_data(); - const int kernel_h = kernel_shape_data[0]; - const int kernel_w = kernel_shape_data[1]; cudnn::createFilterDesc(&filter_desc_, this->num_output_ / this->group_, this->channels_ / this->group_, - kernel_h, kernel_w); + this->kernel_h_, this->kernel_w_); // Create tensor descriptor(s) for data and corresponding convolution(s). for (int i = 0; i < bottom.size(); i++) { @@ -69,36 +68,29 @@ template void CuDNNConvolutionLayer::Reshape( const vector*>& bottom, const vector*>& top) { ConvolutionLayer::Reshape(bottom, top); - CHECK_EQ(2, this->num_spatial_axes_) - << "CuDNNConvolution input must have 2 spatial axes " - << "(e.g., height and width). " - << "Use 'engine: CAFFE' for general ND convolution."; - bottom_offset_ = this->bottom_dim_ / this->group_; - top_offset_ = this->top_dim_ / this->group_; - const int height = bottom[0]->shape(this->channel_axis_ + 1); - const int width = bottom[0]->shape(this->channel_axis_ + 2); - const int height_out = top[0]->shape(this->channel_axis_ + 1); - const int width_out = top[0]->shape(this->channel_axis_ + 2); - const int* pad_data = this->pad_.cpu_data(); - const int pad_h = pad_data[0]; - const int pad_w = pad_data[1]; - const int* stride_data = this->stride_.cpu_data(); - const int stride_h = stride_data[0]; - const int stride_w = stride_data[1]; + bottom_offset_ = (this->channels_ / this->group_) + * this->height_ * this->width_; + top_offset_ = (this->num_output_ / this->group_) + * this->height_out_ * this->width_out_; for (int i = 0; i < bottom.size(); i++) { cudnn::setTensor4dDesc(&bottom_descs_[i], this->num_, - this->channels_ / this->group_, height, width, - this->channels_ * height * width, - height * width, width, 1); + this->channels_ / this->group_, + this->height_, this->width_, + this->channels_ * this->height_ * this->width_, + this->height_ * this->width_, + this->width_, 1); cudnn::setTensor4dDesc(&top_descs_[i], this->num_, - this->num_output_ / this->group_, height_out, width_out, - this->num_output_ * this->out_spatial_dim_, - this->out_spatial_dim_, width_out, 1); + this->num_output_ / this->group_, + this->height_out_, this->width_out_, + this->num_output_ * this->height_out_ * this->width_out_, + this->height_out_ * this->width_out_, + this->width_out_, 1); cudnn::setConvolutionDesc(&conv_descs_[i], bottom_descs_[i], - filter_desc_, pad_h, pad_w, stride_h, stride_w); + filter_desc_, this->pad_h_, this->pad_w_, + this->stride_h_, this->stride_w_); } // Tensor descriptor for bias. diff --git a/src/caffe/layers/cudnn_conv_layer.cu b/src/caffe/layers/cudnn_conv_layer.cu index 691152021a3..b4e802e13d1 100644 --- a/src/caffe/layers/cudnn_conv_layer.cu +++ b/src/caffe/layers/cudnn_conv_layer.cu @@ -14,15 +14,15 @@ __global__ void sync_conv_groups() { } template void CuDNNConvolutionLayer::Forward_gpu( const vector*>& bottom, const vector*>& top) { - const int* kernel_shape_data = this->kernel_shape_.cpu_data(); - const int kernel_h = kernel_shape_data[0]; - const int kernel_w = kernel_shape_data[1]; - const size_t workspace_limit_bytes = - kernel_h * kernel_w * this->channels_ * sizeof(int) + 1; - const Dtype* weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); + const Dtype* weight = this->blobs_[0]->gpu_data(); + + size_t workspace_limit_bytes = this->kernel_h_ * + this->kernel_w_ * + this->channels_ * + sizeof(int) + 1; // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { @@ -69,7 +69,7 @@ void CuDNNConvolutionLayer::Forward_gpu( CUDNN_CHECK(cudnnConvolutionForward(handle_[g], cudnn::dataType::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, - filter_desc_, weight + this->weight_offset_ * g, + filter_desc_, weight + weight_offset_ * g, conv_descs_[i], algo, workspace, workspaceSizeInBytes, cudnn::dataType::zero, @@ -128,7 +128,7 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], cudnn::dataType::one, - filter_desc_, weight_diff + this->weight_offset_ * g)); + filter_desc_, weight_diff + weight_offset_ * g)); } // Gradient w.r.t. bottom data. @@ -139,7 +139,7 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g], cudnn::dataType::one, - filter_desc_, weight + this->weight_offset_ * g, + filter_desc_, weight + weight_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], cudnn::dataType::zero, diff --git a/src/caffe/layers/deconv_layer.cpp b/src/caffe/layers/deconv_layer.cpp index 91aabb315b2..a4612963b6b 100644 --- a/src/caffe/layers/deconv_layer.cpp +++ b/src/caffe/layers/deconv_layer.cpp @@ -10,17 +10,10 @@ namespace caffe { template void DeconvolutionLayer::compute_output_shape() { - const int* kernel_shape_data = this->kernel_shape_.cpu_data(); - const int* stride_data = this->stride_.cpu_data(); - const int* pad_data = this->pad_.cpu_data(); - this->output_shape_.clear(); - for (int i = 0; i < this->num_spatial_axes_; ++i) { - // i + 1 to skip channel axis - const int input_dim = this->input_shape(i + 1); - const int output_dim = stride_data[i] * (input_dim - 1) - + kernel_shape_data[i] - 2 * pad_data[i]; - this->output_shape_.push_back(output_dim); - } + this->height_out_ = this->stride_h_ * (this->height_ - 1) + this->kernel_h_ + - 2 * this->pad_h_; + this->width_out_ = this->stride_w_ * (this->width_ - 1) + this->kernel_w_ + - 2 * this->pad_w_; } template @@ -31,11 +24,11 @@ void DeconvolutionLayer::Forward_cpu(const vector*>& bottom, const Dtype* bottom_data = bottom[i]->cpu_data(); Dtype* top_data = top[i]->mutable_cpu_data(); for (int n = 0; n < this->num_; ++n) { - this->backward_cpu_gemm(bottom_data + n * this->bottom_dim_, weight, - top_data + n * this->top_dim_); + this->backward_cpu_gemm(bottom_data + bottom[i]->offset(n), weight, + top_data + top[i]->offset(n)); if (this->bias_term_) { const Dtype* bias = this->blobs_[1]->cpu_data(); - this->forward_cpu_bias(top_data + n * this->top_dim_, bias); + this->forward_cpu_bias(top_data + top[i]->offset(n), bias); } } } @@ -54,21 +47,21 @@ void DeconvolutionLayer::Backward_cpu(const vector*>& top, if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_cpu_diff(); for (int n = 0; n < this->num_; ++n) { - this->backward_cpu_bias(bias_diff, top_diff + n * this->top_dim_); + this->backward_cpu_bias(bias_diff, top_diff + top[i]->offset(n)); } } if (this->param_propagate_down_[0] || propagate_down[i]) { for (int n = 0; n < this->num_; ++n) { // Gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { - this->weight_cpu_gemm(top_diff + n * this->top_dim_, - bottom_data + n * this->bottom_dim_, weight_diff); + this->weight_cpu_gemm(top_diff + top[i]->offset(n), + bottom_data + bottom[i]->offset(n), weight_diff); } // Gradient w.r.t. bottom data, if necessary, reusing the column buffer // we might have just computed above. if (propagate_down[i]) { - this->forward_cpu_gemm(top_diff + n * this->top_dim_, weight, - bottom_diff + n * this->bottom_dim_, + this->forward_cpu_gemm(top_diff + top[i]->offset(n), weight, + bottom_diff + bottom[i]->offset(n), this->param_propagate_down_[0]); } } diff --git a/src/caffe/layers/deconv_layer.cu b/src/caffe/layers/deconv_layer.cu index 5dbdcc3149f..8a1eed8aa16 100644 --- a/src/caffe/layers/deconv_layer.cu +++ b/src/caffe/layers/deconv_layer.cu @@ -16,11 +16,11 @@ void DeconvolutionLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); for (int n = 0; n < this->num_; ++n) { - this->backward_gpu_gemm(bottom_data + n * this->bottom_dim_, weight, - top_data + n * this->top_dim_); + this->backward_gpu_gemm(bottom_data + bottom[i]->offset(n), weight, + top_data + top[i]->offset(n)); if (this->bias_term_) { const Dtype* bias = this->blobs_[1]->gpu_data(); - this->forward_gpu_bias(top_data + n * this->top_dim_, bias); + this->forward_gpu_bias(top_data + top[i]->offset(n), bias); } } } @@ -39,20 +39,20 @@ void DeconvolutionLayer::Backward_gpu(const vector*>& top, if (this->bias_term_ && this->param_propagate_down_[1]) { Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); for (int n = 0; n < this->num_; ++n) { - this->backward_gpu_bias(bias_diff, top_diff + n * this->top_dim_); + this->backward_gpu_bias(bias_diff, top_diff + top[i]->offset(n)); } } if (this->param_propagate_down_[0] || propagate_down[i]) { for (int n = 0; n < this->num_; ++n) { // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { - this->weight_gpu_gemm(top_diff + n * this->top_dim_, - bottom_data + n * this->bottom_dim_, weight_diff); + this->weight_gpu_gemm(top_diff + top[i]->offset(n), + bottom_data + bottom[i]->offset(n), weight_diff); } // gradient w.r.t. bottom data, if necessary. if (propagate_down[i]) { - this->forward_gpu_gemm(top_diff + n * this->top_dim_, weight, - bottom_diff + n * this->bottom_dim_, + this->forward_gpu_gemm(top_diff + top[i]->offset(n), weight, + bottom_diff + bottom[i]->offset(n), this->param_propagate_down_[0]); } } diff --git a/src/caffe/layers/im2col_layer.cpp b/src/caffe/layers/im2col_layer.cpp index 595c9dbbe5e..1c802714e33 100644 --- a/src/caffe/layers/im2col_layer.cpp +++ b/src/caffe/layers/im2col_layer.cpp @@ -11,106 +11,54 @@ template void Im2colLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { ConvolutionParameter conv_param = this->layer_param_.convolution_param(); - force_nd_im2col_ = conv_param.force_nd_im2col(); - const int input_num_dims = bottom[0]->shape().size(); - channel_axis_ = bottom[0]->CanonicalAxisIndex(conv_param.axis()); - const int first_spatial_dim = channel_axis_ + 1; - num_spatial_axes_ = input_num_dims - first_spatial_dim; - CHECK_GE(num_spatial_axes_, 1); - vector dim_blob_shape(1, num_spatial_axes_); - // Setup filter kernel dimensions (kernel_shape_). - kernel_shape_.Reshape(dim_blob_shape); - int* kernel_shape_data = kernel_shape_.mutable_cpu_data(); - if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) { - CHECK_EQ(num_spatial_axes_, 2) - << "kernel_h & kernel_w can only be used for 2D convolution."; - CHECK_EQ(0, conv_param.kernel_size_size()) - << "Either kernel_size or kernel_h/w should be specified; not both."; - kernel_shape_data[0] = conv_param.kernel_h(); - kernel_shape_data[1] = conv_param.kernel_w(); + CHECK(!conv_param.has_kernel_size() != + !(conv_param.has_kernel_h() && conv_param.has_kernel_w())) + << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; + CHECK(conv_param.has_kernel_size() || + (conv_param.has_kernel_h() && conv_param.has_kernel_w())) + << "For non-square filters both kernel_h and kernel_w are required."; + CHECK((!conv_param.has_pad() && conv_param.has_pad_h() + && conv_param.has_pad_w()) + || (!conv_param.has_pad_h() && !conv_param.has_pad_w())) + << "pad is pad OR pad_h and pad_w are required."; + CHECK((!conv_param.has_stride() && conv_param.has_stride_h() + && conv_param.has_stride_w()) + || (!conv_param.has_stride_h() && !conv_param.has_stride_w())) + << "Stride is stride OR stride_h and stride_w are required."; + if (conv_param.has_kernel_size()) { + kernel_h_ = kernel_w_ = conv_param.kernel_size(); } else { - const int num_kernel_dims = conv_param.kernel_size_size(); - CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_) - << "kernel_size must be specified once, or once per spatial dimension " - << "(kernel_size specified " << num_kernel_dims << " times; " - << num_spatial_axes_ << " spatial dims);"; - for (int i = 0; i < num_spatial_axes_; ++i) { - kernel_shape_data[i] = - conv_param.kernel_size((num_kernel_dims == 1) ? 0 : i); - } + kernel_h_ = conv_param.kernel_h(); + kernel_w_ = conv_param.kernel_w(); } - for (int i = 0; i < num_spatial_axes_; ++i) { - CHECK_GT(kernel_shape_data[i], 0) << "Filter dimensions must be nonzero."; - } - // Setup stride dimensions (stride_). - stride_.Reshape(dim_blob_shape); - int* stride_data = stride_.mutable_cpu_data(); - if (conv_param.has_stride_h() || conv_param.has_stride_w()) { - CHECK_EQ(num_spatial_axes_, 2) - << "stride_h & stride_w can only be used for 2D convolution."; - CHECK_EQ(0, conv_param.stride_size()) - << "Either stride or stride_h/w should be specified; not both."; - stride_data[0] = conv_param.stride_h(); - stride_data[1] = conv_param.stride_w(); + CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero."; + CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero."; + if (!conv_param.has_pad_h()) { + pad_h_ = pad_w_ = conv_param.pad(); } else { - const int num_stride_dims = conv_param.stride_size(); - CHECK(num_stride_dims == 0 || num_stride_dims == 1 || - num_stride_dims == num_spatial_axes_) - << "stride must be specified once, or once per spatial dimension " - << "(stride specified " << num_stride_dims << " times; " - << num_spatial_axes_ << " spatial dims);"; - const int kDefaultStride = 1; - for (int i = 0; i < num_spatial_axes_; ++i) { - stride_data[i] = (num_stride_dims == 0) ? kDefaultStride : - conv_param.stride((num_stride_dims == 1) ? 0 : i); - CHECK_GT(stride_data[i], 0) << "Stride dimensions must be nonzero."; - } + pad_h_ = conv_param.pad_h(); + pad_w_ = conv_param.pad_w(); } - // Setup pad dimensions (pad_). - pad_.Reshape(dim_blob_shape); - int* pad_data = pad_.mutable_cpu_data(); - if (conv_param.has_pad_h() || conv_param.has_pad_w()) { - CHECK_EQ(num_spatial_axes_, 2) - << "pad_h & pad_w can only be used for 2D convolution."; - CHECK_EQ(0, conv_param.pad_size()) - << "Either pad or pad_h/w should be specified; not both."; - pad_data[0] = conv_param.pad_h(); - pad_data[1] = conv_param.pad_w(); + if (!conv_param.has_stride_h()) { + stride_h_ = stride_w_ = conv_param.stride(); } else { - const int num_pad_dims = conv_param.pad_size(); - CHECK(num_pad_dims == 0 || num_pad_dims == 1 || - num_pad_dims == num_spatial_axes_) - << "pad must be specified once, or once per spatial dimension " - << "(pad specified " << num_pad_dims << " times; " - << num_spatial_axes_ << " spatial dims);"; - const int kDefaultPad = 0; - for (int i = 0; i < num_spatial_axes_; ++i) { - pad_data[i] = (num_pad_dims == 0) ? kDefaultPad : - conv_param.pad((num_pad_dims == 1) ? 0 : i); - } + stride_h_ = conv_param.stride_h(); + stride_w_ = conv_param.stride_w(); } } template void Im2colLayer::Reshape(const vector*>& bottom, const vector*>& top) { - vector top_shape = bottom[0]->shape(); - const int* kernel_shape_data = kernel_shape_.cpu_data(); - const int* stride_data = stride_.cpu_data(); - const int* pad_data = pad_.cpu_data(); - for (int i = 0; i < num_spatial_axes_; ++i) { - top_shape[channel_axis_] *= kernel_shape_data[i]; - const int input_dim = bottom[0]->shape(channel_axis_ + i + 1); - const int output_dim = (input_dim + 2 * pad_data[i] - kernel_shape_data[i]) - / stride_data[i] + 1; - top_shape[channel_axis_ + i + 1] = output_dim; - } - top[0]->Reshape(top_shape); - num_ = bottom[0]->count(0, channel_axis_); - bottom_dim_ = bottom[0]->count(channel_axis_); - top_dim_ = top[0]->count(channel_axis_); - - channels_ = bottom[0]->shape(channel_axis_); + CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " + << "corresponding to (num, channels, height, width)"; + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + top[0]->Reshape( + bottom[0]->num(), channels_ * kernel_h_ * kernel_w_, + (height_ + 2 * pad_h_ - kernel_h_) / stride_h_ + 1, + (width_ + 2 * pad_w_ - kernel_w_) / stride_w_ + 1); } template @@ -118,27 +66,10 @@ void Im2colLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); - for (int n = 0; n < num_; ++n) { - DCHECK_EQ(bottom[0]->shape().size() - channel_axis_, num_spatial_axes_ + 1); - DCHECK_EQ(top[0]->shape().size() - channel_axis_, num_spatial_axes_ + 1); - DCHECK_EQ(kernel_shape_.count(), num_spatial_axes_); - DCHECK_EQ(pad_.count(), num_spatial_axes_); - DCHECK_EQ(stride_.count(), num_spatial_axes_); - if (!force_nd_im2col_ && num_spatial_axes_ == 2) { - im2col_cpu(bottom_data + n * bottom_dim_, channels_, - bottom[0]->shape(channel_axis_ + 1), - bottom[0]->shape(channel_axis_ + 2), - kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], - pad_.cpu_data()[0], pad_.cpu_data()[1], - stride_.cpu_data()[0], stride_.cpu_data()[1], - top_data + n * top_dim_); - } else { - im2col_nd_cpu(bottom_data + n * bottom_dim_, num_spatial_axes_, - bottom[0]->shape().data() + channel_axis_, - top[0]->shape().data() + channel_axis_, - kernel_shape_.cpu_data(), pad_.cpu_data(), stride_.cpu_data(), - top_data + n * top_dim_); - } + for (int n = 0; n < bottom[0]->num(); ++n) { + im2col_cpu(bottom_data + bottom[0]->offset(n), channels_, height_, + width_, kernel_h_, kernel_w_, pad_h_, pad_w_, + stride_h_, stride_w_, top_data + top[0]->offset(n)); } } @@ -147,22 +78,10 @@ void Im2colLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - for (int n = 0; n < num_; ++n) { - if (!force_nd_im2col_ && num_spatial_axes_ == 2) { - col2im_cpu(top_diff + n * top_dim_, channels_, - bottom[0]->shape(channel_axis_ + 1), - bottom[0]->shape(channel_axis_ + 2), - kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], - pad_.cpu_data()[0], pad_.cpu_data()[1], - stride_.cpu_data()[0], stride_.cpu_data()[1], - bottom_diff + n * bottom_dim_); - } else { - col2im_nd_cpu(top_diff + n * top_dim_, num_spatial_axes_, - bottom[0]->shape().data() + channel_axis_, - top[0]->shape().data() + channel_axis_, - kernel_shape_.cpu_data(), pad_.cpu_data(), stride_.cpu_data(), - bottom_diff + n * bottom_dim_); - } + for (int n = 0; n < top[0]->num(); ++n) { + col2im_cpu(top_diff + top[0]->offset(n), channels_, height_, width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, + stride_h_, stride_w_, bottom_diff + bottom[0]->offset(n)); } } diff --git a/src/caffe/layers/im2col_layer.cu b/src/caffe/layers/im2col_layer.cu index cd507623c78..9c338b14cb7 100644 --- a/src/caffe/layers/im2col_layer.cu +++ b/src/caffe/layers/im2col_layer.cu @@ -12,23 +12,10 @@ void Im2colLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); - const int num_kernels = channels_ * top[0]->count(channel_axis_ + 1); - for (int n = 0; n < num_; ++n) { - if (!force_nd_im2col_ && num_spatial_axes_ == 2) { - im2col_gpu(bottom_data + n * bottom_dim_, channels_, - bottom[0]->shape(channel_axis_ + 1), - bottom[0]->shape(channel_axis_ + 2), - kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], - pad_.cpu_data()[0], pad_.cpu_data()[1], - stride_.cpu_data()[0], stride_.cpu_data()[1], - top_data + n * top_dim_); - } else { - im2col_nd_gpu(bottom_data + n * bottom_dim_, num_spatial_axes_, - num_kernels, bottom[0]->gpu_shape() + channel_axis_, - top[0]->gpu_shape() + channel_axis_, - kernel_shape_.gpu_data(), pad_.gpu_data(), stride_.gpu_data(), - top_data + n * top_dim_); - } + for (int n = 0; n < bottom[0]->num(); ++n) { + im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_, + width_, kernel_h_, kernel_w_, pad_h_, pad_w_, + stride_h_, stride_w_, top_data + top[0]->offset(n)); } } @@ -37,22 +24,10 @@ void Im2colLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); - for (int n = 0; n < num_; ++n) { - if (!force_nd_im2col_ && num_spatial_axes_ == 2) { - col2im_gpu(top_diff + n * top_dim_, channels_, - bottom[0]->shape(channel_axis_ + 1), - bottom[0]->shape(channel_axis_ + 2), - kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], - pad_.cpu_data()[0], pad_.cpu_data()[1], - stride_.cpu_data()[0], stride_.cpu_data()[1], - bottom_diff + n * bottom_dim_); - } else { - col2im_nd_gpu(top_diff + n * top_dim_, num_spatial_axes_, bottom_dim_, - bottom[0]->gpu_shape() + channel_axis_, - top[0]->gpu_shape() + channel_axis_, - kernel_shape_.gpu_data(), pad_.gpu_data(), stride_.gpu_data(), - bottom_diff + n * bottom_dim_); - } + for (int n = 0; n < top[0]->num(); ++n) { + col2im_gpu(top_diff + top[0]->offset(n), channels_, height_, width_, + kernel_h_, kernel_w_, pad_h_, pad_w_, + stride_h_, stride_w_, bottom_diff + bottom[0]->offset(n)); } } diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index 9df979a2d27..67d41fff844 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -19,87 +19,54 @@ template void caffe_conv(const Blob* in, ConvolutionParameter* conv_param, const vector > >& weights, Blob* out) { - const bool has_depth = (out->num_axes() == 5); - if (!has_depth) { CHECK_EQ(4, out->num_axes()); } // Kernel size, stride, and pad int kernel_h, kernel_w; - if (conv_param->has_kernel_h() || conv_param->has_kernel_w()) { + if (conv_param->has_kernel_size()) { + kernel_h = kernel_w = conv_param->kernel_size(); + } else { kernel_h = conv_param->kernel_h(); kernel_w = conv_param->kernel_w(); - } else { - kernel_h = kernel_w = conv_param->kernel_size(0); } int pad_h, pad_w; - if (conv_param->has_pad_h() || conv_param->has_pad_w()) { + if (!conv_param->has_pad_h()) { + pad_h = pad_w = conv_param->pad(); + } else { pad_h = conv_param->pad_h(); pad_w = conv_param->pad_w(); - } else { - pad_h = pad_w = conv_param->pad_size() ? conv_param->pad(0) : 0; } int stride_h, stride_w; - if (conv_param->has_stride_h() || conv_param->has_stride_w()) { + if (!conv_param->has_stride_h()) { + stride_h = stride_w = conv_param->stride(); + } else { stride_h = conv_param->stride_h(); stride_w = conv_param->stride_w(); - } else { - stride_h = stride_w = conv_param->stride_size() ? conv_param->stride(0) : 1; - } - int kernel_d, pad_d, stride_d; - if (has_depth) { - kernel_d = kernel_h; - stride_d = stride_h; - pad_d = pad_h; - } else { - kernel_d = stride_d = 1; - pad_d = 0; } // Groups int groups = conv_param->group(); - int o_g = out->shape(1) / groups; - int k_g = in->shape(1) / groups; + int o_g = out->channels() / groups; + int k_g = in->channels() / groups; int o_head, k_head; // Convolution - vector weight_offset(4 + has_depth); - vector in_offset(4 + has_depth); - vector out_offset(4 + has_depth); + const Dtype* in_data = in->cpu_data(); + const Dtype* weight_data = weights[0]->cpu_data(); Dtype* out_data = out->mutable_cpu_data(); - for (int n = 0; n < out->shape(0); n++) { + for (int n = 0; n < out->num(); n++) { for (int g = 0; g < groups; g++) { o_head = o_g * g; k_head = k_g * g; for (int o = 0; o < o_g; o++) { for (int k = 0; k < k_g; k++) { - for (int z = 0; z < (has_depth ? out->shape(2) : 1); z++) { - for (int y = 0; y < out->shape(2 + has_depth); y++) { - for (int x = 0; x < out->shape(3 + has_depth); x++) { - for (int r = 0; r < kernel_d; r++) { - for (int p = 0; p < kernel_h; p++) { - for (int q = 0; q < kernel_w; q++) { - int in_z = z * stride_d - pad_d + r; - int in_y = y * stride_h - pad_h + p; - int in_x = x * stride_w - pad_w + q; - if (in_z >= 0 && in_z < (has_depth ? in->shape(2) : 1) - && in_y >= 0 && in_y < in->shape(2 + has_depth) - && in_x >= 0 && in_x < in->shape(3 + has_depth)) { - weight_offset[0] = o + o_head; - weight_offset[1] = k; - if (has_depth) { weight_offset[2] = r; } - weight_offset[2 + has_depth] = p; - weight_offset[3 + has_depth] = q; - in_offset[0] = n; - in_offset[1] = k + k_head; - if (has_depth) { in_offset[2] = in_z; } - in_offset[2 + has_depth] = in_y; - in_offset[3 + has_depth] = in_x; - out_offset[0] = n; - out_offset[1] = o + o_head; - if (has_depth) { out_offset[2] = z; } - out_offset[2 + has_depth] = y; - out_offset[3 + has_depth] = x; - out_data[out->offset(out_offset)] += - in->data_at(in_offset) - * weights[0]->data_at(weight_offset); - } - } + for (int y = 0; y < out->height(); y++) { + for (int x = 0; x < out->width(); x++) { + for (int p = 0; p < kernel_h; p++) { + for (int q = 0; q < kernel_w; q++) { + int in_y = y * stride_h - pad_h + p; + int in_x = x * stride_w - pad_w + q; + if (in_y >= 0 && in_y < in->height() + && in_x >= 0 && in_x < in->width()) { + out_data[out->offset(n, o + o_head, y, x)] += + in_data[in->offset(n, k + k_head, in_y, in_x)] + * weight_data[weights[0]->offset(o + o_head, k, p, q)]; } } } @@ -112,18 +79,11 @@ void caffe_conv(const Blob* in, ConvolutionParameter* conv_param, // Bias if (conv_param->bias_term()) { const Dtype* bias_data = weights[1]->cpu_data(); - for (int n = 0; n < out->shape(0); n++) { - for (int o = 0; o < out->shape(1); o++) { - for (int z = 0; z < (has_depth ? out->shape(2) : 1); z++) { - for (int y = 0; y < out->shape(2 + has_depth); y++) { - for (int x = 0; x < out->shape(3 + has_depth); x++) { - out_offset[0] = n; - out_offset[1] = o; - if (has_depth) { out_offset[2] = z; } - out_offset[2 + has_depth] = y; - out_offset[3 + has_depth] = x; - out_data[out->offset(out_offset)] += bias_data[o]; - } + for (int n = 0; n < out->num(); n++) { + for (int o = 0; o < out->channels(); o++) { + for (int y = 0; y < out->height(); y++) { + for (int x = 0; x < out->width(); x++) { + out_data[out->offset(n, o, y, x)] += bias_data[o]; } } } @@ -190,8 +150,8 @@ TYPED_TEST(ConvolutionLayerTest, TestSetup) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); convolution_param->set_num_output(4); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); @@ -228,8 +188,8 @@ TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolution) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); convolution_param->set_num_output(4); convolution_param->mutable_weight_filler()->set_type("gaussian"); convolution_param->mutable_bias_filler()->set_type("constant"); @@ -257,98 +217,13 @@ TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolution) { } } -TYPED_TEST(ConvolutionLayerTest, Test0DConvolution) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - ConvolutionParameter* convolution_param = - layer_param.mutable_convolution_param(); - const int kNumOutput = 3; - convolution_param->set_num_output(kNumOutput); - convolution_param->set_axis(3); - convolution_param->mutable_weight_filler()->set_type("gaussian"); - convolution_param->mutable_bias_filler()->set_type("gaussian"); - shared_ptr > layer( - new ConvolutionLayer(layer_param)); - vector top_shape = this->blob_bottom_->shape(); - top_shape[3] = kNumOutput; - layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - EXPECT_EQ(top_shape, this->blob_top_->shape()); - layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // Check against reference convolution. - vector weight_offset(2); - const Blob* weight = layer->blobs()[0].get(); - const Blob* bias = layer->blobs()[1].get(); - const int num = this->blob_top_->count(3); - const int dim = this->blob_top_->shape(3); - const int bottom_dim = this->blob_bottom_->shape(3); - for (int n = 0; n < num; ++n) { - for (int d = 0; d < dim; ++d) { - weight_offset[0] = d; - Dtype value = bias->cpu_data()[d]; - for (int bottom_d = 0; bottom_d < bottom_dim; ++bottom_d) { - weight_offset[1] = bottom_d; - value += weight->data_at(weight_offset) * - this->blob_bottom_->cpu_data()[n * bottom_dim + bottom_d]; - } - EXPECT_NEAR(value, this->blob_top_->cpu_data()[n * dim + d], 1e-4); - } - } -} - -TYPED_TEST(ConvolutionLayerTest, TestSimple3DConvolution) { - typedef typename TypeParam::Dtype Dtype; - this->blob_bottom_vec_.push_back(this->blob_bottom_2_); - this->blob_top_vec_.push_back(this->blob_top_2_); - vector bottom_shape(5); - bottom_shape[0] = this->blob_bottom_vec_[0]->shape(0); - bottom_shape[1] = this->blob_bottom_vec_[0]->shape(1); - bottom_shape[2] = 5; - bottom_shape[3] = this->blob_bottom_vec_[0]->shape(2); - bottom_shape[4] = this->blob_bottom_vec_[0]->shape(3); - FillerParameter filler_param; - GaussianFiller filler(filler_param); - for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { - this->blob_bottom_vec_[i]->Reshape(bottom_shape); - filler.Fill(this->blob_bottom_vec_[i]); - } - LayerParameter layer_param; - ConvolutionParameter* convolution_param = - layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); - convolution_param->set_num_output(4); - convolution_param->mutable_weight_filler()->set_type("gaussian"); - convolution_param->mutable_bias_filler()->set_type("gaussian"); - shared_ptr > layer( - new ConvolutionLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - // Check against reference convolution. - const Dtype* top_data; - const Dtype* ref_top_data; - caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), - this->MakeReferenceTop(this->blob_top_)); - top_data = this->blob_top_->cpu_data(); - ref_top_data = this->ref_blob_top_->cpu_data(); - for (int i = 0; i < this->blob_top_->count(); ++i) { - EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); - } - caffe_conv(this->blob_bottom_2_, convolution_param, layer->blobs(), - this->MakeReferenceTop(this->blob_top_2_)); - top_data = this->blob_top_2_->cpu_data(); - ref_top_data = this->ref_blob_top_->cpu_data(); - for (int i = 0; i < this->blob_top_->count(); ++i) { - EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); - } -} - TYPED_TEST(ConvolutionLayerTest, Test1x1Convolution) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(1); - convolution_param->add_stride(1); + convolution_param->set_kernel_size(1); + convolution_param->set_stride(1); convolution_param->set_num_output(4); convolution_param->mutable_weight_filler()->set_type("gaussian"); convolution_param->mutable_bias_filler()->set_type("constant"); @@ -374,8 +249,8 @@ TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolutionGroup) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); convolution_param->set_num_output(3); convolution_param->set_group(3); convolution_param->mutable_weight_filler()->set_type("gaussian"); @@ -413,8 +288,8 @@ TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); convolution_param->set_num_output(1); convolution_param->set_bias_term(false); shared_ptr > layer( @@ -475,11 +350,14 @@ TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { convolution_param->set_bias_term(false); layer.reset(new ConvolutionLayer(layer_param)); layer->blobs().resize(1); - layer->blobs()[0].reset(new Blob(1, 1, 1, 3)); + layer->blobs()[0].reset(new Blob(1, 3, 1, 3)); Dtype* weights_2 = layer->blobs()[0]->mutable_cpu_data(); - weights_2[0] = -1; - weights_2[1] = 0; - weights_2[2] = 1; + for (int c = 0; c < 3; ++c) { + int i = c * 3; // 1 x 3 filter + weights_2[i + 0] = -1; + weights_2[i + 1] = 0; + weights_2[i + 2] = 1; + } layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); // Test equivalence of full and separable filters. @@ -490,124 +368,6 @@ TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { } } -TYPED_TEST(ConvolutionLayerTest, TestNDAgainst2D) { - typedef typename TypeParam::Dtype Dtype; - const int kernel_h = 11; - const int kernel_w = 13; - vector bottom_shape(4); - bottom_shape[0] = 15; - bottom_shape[1] = 18; - bottom_shape[2] = kernel_h * 2; - bottom_shape[3] = kernel_w * 2; - FillerParameter filler_param; - GaussianFiller filler(filler_param); - for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { - this->blob_bottom_vec_[i]->Reshape(bottom_shape); - filler.Fill(this->blob_bottom_vec_[i]); - } - LayerParameter layer_param; - ConvolutionParameter* convolution_param = - layer_param.mutable_convolution_param(); - convolution_param->set_num_output(12); - convolution_param->set_bias_term(false); - convolution_param->set_group(6); - convolution_param->set_kernel_h(kernel_h); - convolution_param->set_kernel_w(kernel_w); - convolution_param->mutable_weight_filler()->set_type("gaussian"); - Blob weights; - Blob top_diff; - // Shape and fill weights and top_diff. - bool copy_diff; - bool reshape; - { - ConvolutionLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - top_diff.ReshapeLike(*this->blob_top_); - filler.Fill(&top_diff); - ASSERT_EQ(1, layer.blobs().size()); - copy_diff = false; reshape = true; - weights.CopyFrom(*layer.blobs()[0], copy_diff, reshape); - } - vector propagate_down(1, true); - Blob result_2d; - Blob backward_result_2d; - Blob backward_weight_result_2d; - // Test with 2D im2col - { - caffe_set(this->blob_top_->count(), Dtype(0), - this->blob_top_->mutable_cpu_data()); - caffe_set(this->blob_bottom_->count(), Dtype(0), - this->blob_bottom_->mutable_cpu_diff()); - caffe_set(weights.count(), Dtype(0), weights.mutable_cpu_diff()); - // Do SetUp and Forward; save Forward result in result_2d. - convolution_param->set_force_nd_im2col(false); - ConvolutionLayer layer_2d(layer_param); - layer_2d.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - ASSERT_EQ(1, layer_2d.blobs().size()); - copy_diff = false; reshape = false; - layer_2d.blobs()[0]->CopyFrom(weights, copy_diff, reshape); - layer_2d.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - copy_diff = false; reshape = true; - result_2d.CopyFrom(*this->blob_top_, copy_diff, reshape); - // Copy pre-generated top diff into actual top diff; - // do Backward and save result in backward_result_2d. - ASSERT_EQ(this->blob_top_->shape(), top_diff.shape()); - caffe_copy(top_diff.count(), top_diff.cpu_data(), - this->blob_top_->mutable_cpu_diff()); - layer_2d.Backward(this->blob_top_vec_, propagate_down, - this->blob_bottom_vec_); - copy_diff = true; reshape = true; - backward_result_2d.CopyFrom(*this->blob_bottom_, copy_diff, reshape); - backward_weight_result_2d.CopyFrom(weights, copy_diff, reshape); - } - Blob result_nd; - Blob backward_result_nd; - Blob backward_weight_result_nd; - // Test with ND im2col - { - caffe_set(this->blob_top_->count(), Dtype(0), - this->blob_top_->mutable_cpu_data()); - caffe_set(this->blob_bottom_->count(), Dtype(0), - this->blob_bottom_->mutable_cpu_diff()); - caffe_set(weights.count(), Dtype(0), weights.mutable_cpu_diff()); - // Do SetUp and Forward; save Forward result in result_nd. - convolution_param->set_force_nd_im2col(true); - ConvolutionLayer layer_nd(layer_param); - layer_nd.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - ASSERT_EQ(1, layer_nd.blobs().size()); - copy_diff = false; reshape = false; - layer_nd.blobs()[0]->CopyFrom(weights, copy_diff, reshape); - layer_nd.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - copy_diff = false; reshape = true; - result_nd.CopyFrom(*this->blob_top_, copy_diff, reshape); - // Copy pre-generated top diff into actual top diff; - // do Backward and save result in backward_result_nd. - ASSERT_EQ(this->blob_top_->shape(), top_diff.shape()); - caffe_copy(top_diff.count(), top_diff.cpu_data(), - this->blob_top_->mutable_cpu_diff()); - layer_nd.Backward(this->blob_top_vec_, propagate_down, - this->blob_bottom_vec_); - copy_diff = true; reshape = true; - backward_result_nd.CopyFrom(*this->blob_bottom_, copy_diff, reshape); - backward_weight_result_nd.CopyFrom(weights, copy_diff, reshape); - } - ASSERT_EQ(result_nd.count(), result_2d.count()); - for (int i = 0; i < result_2d.count(); ++i) { - EXPECT_EQ(result_2d.cpu_data()[i], result_nd.cpu_data()[i]); - } - ASSERT_EQ(backward_result_nd.count(), backward_result_2d.count()); - for (int i = 0; i < backward_result_2d.count(); ++i) { - EXPECT_EQ(backward_result_2d.cpu_diff()[i], - backward_result_nd.cpu_diff()[i]); - } - ASSERT_EQ(backward_weight_result_nd.count(), - backward_weight_result_2d.count()); - for (int i = 0; i < backward_weight_result_2d.count(); ++i) { - EXPECT_EQ(backward_weight_result_2d.cpu_diff()[i], - backward_weight_result_nd.cpu_diff()[i]); - } -} - TYPED_TEST(ConvolutionLayerTest, TestGradient) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; @@ -615,36 +375,8 @@ TYPED_TEST(ConvolutionLayerTest, TestGradient) { layer_param.mutable_convolution_param(); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); - convolution_param->set_num_output(2); - convolution_param->mutable_weight_filler()->set_type("gaussian"); - convolution_param->mutable_bias_filler()->set_type("gaussian"); - ConvolutionLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); -} - -TYPED_TEST(ConvolutionLayerTest, TestGradient3D) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - ConvolutionParameter* convolution_param = - layer_param.mutable_convolution_param(); - vector bottom_shape(5); - bottom_shape[0] = this->blob_bottom_vec_[0]->shape(0); - bottom_shape[1] = this->blob_bottom_vec_[0]->shape(1); - bottom_shape[2] = 5; - bottom_shape[3] = this->blob_bottom_vec_[0]->shape(2); - bottom_shape[4] = this->blob_bottom_vec_[0]->shape(3); - FillerParameter filler_param; - GaussianFiller filler(filler_param); - for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { - this->blob_bottom_vec_[i]->Reshape(bottom_shape); - filler.Fill(this->blob_bottom_vec_[i]); - } - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); convolution_param->set_num_output(2); convolution_param->mutable_weight_filler()->set_type("gaussian"); convolution_param->mutable_bias_filler()->set_type("gaussian"); @@ -661,8 +393,8 @@ TYPED_TEST(ConvolutionLayerTest, Test1x1Gradient) { layer_param.mutable_convolution_param(); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); - convolution_param->add_kernel_size(1); - convolution_param->add_stride(1); + convolution_param->set_kernel_size(1); + convolution_param->set_stride(1); convolution_param->set_num_output(2); convolution_param->mutable_weight_filler()->set_type("gaussian"); convolution_param->mutable_bias_filler()->set_type("gaussian"); @@ -677,8 +409,8 @@ TYPED_TEST(ConvolutionLayerTest, TestGradientGroup) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); convolution_param->set_num_output(3); convolution_param->set_group(3); convolution_param->mutable_weight_filler()->set_type("gaussian"); @@ -740,8 +472,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); convolution_param->set_num_output(4); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); @@ -777,8 +509,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); convolution_param->set_num_output(4); convolution_param->mutable_weight_filler()->set_type("gaussian"); convolution_param->mutable_bias_filler()->set_type("constant"); @@ -810,8 +542,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionGroupCuDNN) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); convolution_param->set_num_output(3); convolution_param->set_group(3); convolution_param->mutable_weight_filler()->set_type("gaussian"); @@ -849,8 +581,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); convolution_param->set_num_output(1); convolution_param->set_bias_term(false); shared_ptr > layer( @@ -911,11 +643,14 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { convolution_param->set_bias_term(false); layer.reset(new CuDNNConvolutionLayer(layer_param)); layer->blobs().resize(1); - layer->blobs()[0].reset(new Blob(1, 1, 1, 3)); + layer->blobs()[0].reset(new Blob(1, 3, 1, 3)); TypeParam* weights_2 = layer->blobs()[0]->mutable_cpu_data(); - weights_2[0] = -1; - weights_2[1] = 0; - weights_2[2] = 1; + for (int c = 0; c < 3; ++c) { + int i = c * 3; // 1 x 3 filter + weights_2[i + 0] = -1; + weights_2[i + 1] = 0; + weights_2[i + 2] = 1; + } layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); // Test equivalence of full and separable filters. @@ -932,8 +667,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) { layer_param.mutable_convolution_param(); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); convolution_param->set_num_output(2); convolution_param->mutable_weight_filler()->set_type("gaussian"); convolution_param->mutable_bias_filler()->set_type("gaussian"); @@ -947,8 +682,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientGroupCuDNN) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); convolution_param->set_num_output(3); convolution_param->set_group(3); convolution_param->mutable_weight_filler()->set_type("gaussian"); diff --git a/src/caffe/test/test_deconvolution_layer.cpp b/src/caffe/test/test_deconvolution_layer.cpp index 770e7b277ee..fc63d5efbe3 100644 --- a/src/caffe/test/test_deconvolution_layer.cpp +++ b/src/caffe/test/test_deconvolution_layer.cpp @@ -58,8 +58,8 @@ TYPED_TEST(DeconvolutionLayerTest, TestSetup) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); convolution_param->set_num_output(4); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); @@ -96,8 +96,8 @@ TYPED_TEST(DeconvolutionLayerTest, TestSimpleDeconvolution) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); convolution_param->set_num_output(4); convolution_param->mutable_weight_filler()->set_type("constant"); convolution_param->mutable_weight_filler()->set_value(1); @@ -144,8 +144,8 @@ TYPED_TEST(DeconvolutionLayerTest, TestGradient) { layer_param.mutable_convolution_param(); this->blob_bottom_vec_.push_back(this->blob_bottom_2_); this->blob_top_vec_.push_back(this->blob_top_2_); - convolution_param->add_kernel_size(2); - convolution_param->add_stride(1); + convolution_param->set_kernel_size(2); + convolution_param->set_stride(1); convolution_param->set_num_output(1); convolution_param->mutable_weight_filler()->set_type("gaussian"); convolution_param->mutable_bias_filler()->set_type("gaussian"); @@ -155,151 +155,4 @@ TYPED_TEST(DeconvolutionLayerTest, TestGradient) { this->blob_top_vec_); } -TYPED_TEST(DeconvolutionLayerTest, TestNDAgainst2D) { - typedef typename TypeParam::Dtype Dtype; - const int kernel_h = 11; - const int kernel_w = 13; - vector bottom_shape(4); - bottom_shape[0] = 15; - bottom_shape[1] = 12; - bottom_shape[2] = kernel_h * 2; - bottom_shape[3] = kernel_w * 2; - FillerParameter filler_param; - GaussianFiller filler(filler_param); - for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { - this->blob_bottom_vec_[i]->Reshape(bottom_shape); - filler.Fill(this->blob_bottom_vec_[i]); - } - LayerParameter layer_param; - ConvolutionParameter* convolution_param = - layer_param.mutable_convolution_param(); - convolution_param->set_num_output(18); - convolution_param->set_bias_term(false); - convolution_param->set_group(6); - convolution_param->set_kernel_h(kernel_h); - convolution_param->set_kernel_w(kernel_w); - convolution_param->mutable_weight_filler()->set_type("gaussian"); - Blob weights; - Blob top_diff; - // Shape and fill weights and top_diff. - bool copy_diff; - bool reshape; - { - DeconvolutionLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - top_diff.ReshapeLike(*this->blob_top_); - filler.Fill(&top_diff); - ASSERT_EQ(1, layer.blobs().size()); - copy_diff = false; reshape = true; - weights.CopyFrom(*layer.blobs()[0], copy_diff, reshape); - } - vector propagate_down(1, true); - Blob result_2d; - Blob backward_result_2d; - Blob backward_weight_result_2d; - // Test with 2D im2col - { - caffe_set(this->blob_top_->count(), Dtype(0), - this->blob_top_->mutable_cpu_data()); - caffe_set(this->blob_bottom_->count(), Dtype(0), - this->blob_bottom_->mutable_cpu_diff()); - caffe_set(weights.count(), Dtype(0), weights.mutable_cpu_diff()); - // Do SetUp and Forward; save Forward result in result_2d. - convolution_param->set_force_nd_im2col(false); - DeconvolutionLayer layer_2d(layer_param); - layer_2d.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - ASSERT_EQ(1, layer_2d.blobs().size()); - copy_diff = false; reshape = false; - layer_2d.blobs()[0]->CopyFrom(weights, copy_diff, reshape); - layer_2d.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - copy_diff = false; reshape = true; - result_2d.CopyFrom(*this->blob_top_, copy_diff, reshape); - // Copy pre-generated top diff into actual top diff; - // do Backward and save result in backward_result_2d. - ASSERT_EQ(this->blob_top_->shape(), top_diff.shape()); - caffe_copy(top_diff.count(), top_diff.cpu_data(), - this->blob_top_->mutable_cpu_diff()); - layer_2d.Backward(this->blob_top_vec_, propagate_down, - this->blob_bottom_vec_); - copy_diff = true; reshape = true; - backward_result_2d.CopyFrom(*this->blob_bottom_, copy_diff, reshape); - backward_weight_result_2d.CopyFrom(weights, copy_diff, reshape); - } - Blob result_nd; - Blob backward_result_nd; - Blob backward_weight_result_nd; - // Test with ND im2col - { - caffe_set(this->blob_top_->count(), Dtype(0), - this->blob_top_->mutable_cpu_data()); - caffe_set(this->blob_bottom_->count(), Dtype(0), - this->blob_bottom_->mutable_cpu_diff()); - caffe_set(weights.count(), Dtype(0), weights.mutable_cpu_diff()); - // Do SetUp and Forward; save Forward result in result_nd. - convolution_param->set_force_nd_im2col(true); - DeconvolutionLayer layer_nd(layer_param); - layer_nd.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); - ASSERT_EQ(1, layer_nd.blobs().size()); - copy_diff = false; reshape = false; - layer_nd.blobs()[0]->CopyFrom(weights, copy_diff, reshape); - layer_nd.Forward(this->blob_bottom_vec_, this->blob_top_vec_); - copy_diff = false; reshape = true; - result_nd.CopyFrom(*this->blob_top_, copy_diff, reshape); - // Copy pre-generated top diff into actual top diff; - // do Backward and save result in backward_result_nd. - ASSERT_EQ(this->blob_top_->shape(), top_diff.shape()); - caffe_copy(top_diff.count(), top_diff.cpu_data(), - this->blob_top_->mutable_cpu_diff()); - layer_nd.Backward(this->blob_top_vec_, propagate_down, - this->blob_bottom_vec_); - copy_diff = true; reshape = true; - backward_result_nd.CopyFrom(*this->blob_bottom_, copy_diff, reshape); - backward_weight_result_nd.CopyFrom(weights, copy_diff, reshape); - } - ASSERT_EQ(result_nd.count(), result_2d.count()); - for (int i = 0; i < result_2d.count(); ++i) { - EXPECT_EQ(result_2d.cpu_data()[i], result_nd.cpu_data()[i]); - } - ASSERT_EQ(backward_result_nd.count(), backward_result_2d.count()); - for (int i = 0; i < backward_result_2d.count(); ++i) { - EXPECT_EQ(backward_result_2d.cpu_diff()[i], - backward_result_nd.cpu_diff()[i]); - } - ASSERT_EQ(backward_weight_result_nd.count(), - backward_weight_result_2d.count()); - for (int i = 0; i < backward_weight_result_2d.count(); ++i) { - EXPECT_EQ(backward_weight_result_2d.cpu_diff()[i], - backward_weight_result_nd.cpu_diff()[i]); - } -} - -TYPED_TEST(DeconvolutionLayerTest, TestGradient3D) { - typedef typename TypeParam::Dtype Dtype; - vector bottom_shape(5); - bottom_shape[0] = this->blob_bottom_vec_[0]->shape(0); - bottom_shape[1] = this->blob_bottom_vec_[0]->shape(1); - bottom_shape[2] = 2; - bottom_shape[3] = 3; - bottom_shape[4] = 2; - FillerParameter filler_param; - GaussianFiller filler(filler_param); - for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { - this->blob_bottom_vec_[i]->Reshape(bottom_shape); - filler.Fill(this->blob_bottom_vec_[i]); - } - LayerParameter layer_param; - ConvolutionParameter* convolution_param = - layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(2); - convolution_param->add_stride(2); - convolution_param->add_pad(1); - convolution_param->set_num_output(2); - convolution_param->mutable_weight_filler()->set_type("gaussian"); - convolution_param->mutable_bias_filler()->set_type("gaussian"); - DeconvolutionLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); -} - } // namespace caffe diff --git a/src/caffe/test/test_im2col_layer.cpp b/src/caffe/test/test_im2col_layer.cpp index 293aa262059..f50abe103f8 100644 --- a/src/caffe/test/test_im2col_layer.cpp +++ b/src/caffe/test/test_im2col_layer.cpp @@ -21,7 +21,6 @@ class Im2colLayerTest : public MultiDeviceTest { : blob_bottom_(new Blob(2, 3, 6, 5)), blob_top_(new Blob()) { // fill the values - Caffe::set_random_seed(1701); FillerParameter filler_param; GaussianFiller filler(filler_param); filler.Fill(this->blob_bottom_); @@ -42,8 +41,8 @@ TYPED_TEST(Im2colLayerTest, TestSetup) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); Im2colLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 2); @@ -57,8 +56,8 @@ TYPED_TEST(Im2colLayerTest, TestForward) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); Im2colLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); @@ -74,27 +73,14 @@ TYPED_TEST(Im2colLayerTest, TestGradient) { LayerParameter layer_param; ConvolutionParameter* convolution_param = layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); + convolution_param->set_kernel_size(3); + convolution_param->set_stride(2); Im2colLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, this->blob_top_vec_); } -TYPED_TEST(Im2colLayerTest, TestGradientForceND) { - typedef typename TypeParam::Dtype Dtype; - LayerParameter layer_param; - ConvolutionParameter* convolution_param = - layer_param.mutable_convolution_param(); - convolution_param->add_kernel_size(3); - convolution_param->add_stride(2); - convolution_param->set_force_nd_im2col(true); - Im2colLayer layer(layer_param); - GradientChecker checker(1e-2, 1e-2); - checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, - this->blob_top_vec_); -} TYPED_TEST(Im2colLayerTest, TestRect) { typedef typename TypeParam::Dtype Dtype; @@ -103,7 +89,7 @@ TYPED_TEST(Im2colLayerTest, TestRect) { layer_param.mutable_convolution_param(); convolution_param->set_kernel_h(5); convolution_param->set_kernel_w(3); - convolution_param->add_stride(2); + convolution_param->set_stride(2); Im2colLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); @@ -122,7 +108,7 @@ TYPED_TEST(Im2colLayerTest, TestRectGradient) { layer_param.mutable_convolution_param(); convolution_param->set_kernel_h(5); convolution_param->set_kernel_w(3); - convolution_param->add_stride(2); + convolution_param->set_stride(2); Im2colLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, diff --git a/src/caffe/test/test_triplet_loss_layer b/src/caffe/test/test_triplet_loss_layer new file mode 100644 index 00000000000..6c25ce9bd4b --- /dev/null +++ b/src/caffe/test/test_triplet_loss_layer @@ -0,0 +1,125 @@ +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/vision_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class TripletLossLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + TripletLossLayerTest() + : blob_bottom_data_(new Blob(50, 1, 1, 1)), + blob_bottom_y_(new Blob(50, 1, 1, 1)), + blob_top_loss_(new Blob()) { + // fill the values + FillerParameter filler_param; + filler_param.set_min(-1.0); + filler_param.set_max(1.0); // distances~=1.0 to test both sides of margin + UniformFiller filler(filler_param); + filler.Fill(this->blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_data_); + for (int i = 0; i < blob_bottom_y_->count(); ++i) { + blob_bottom_y_->mutable_cpu_data()[i] = caffe_rng_rand() % 2; // 0 or 1 + } + blob_bottom_vec_.push_back(blob_bottom_y_); + blob_top_vec_.push_back(blob_top_loss_); + } + virtual ~TripletLossLayerTest() { + delete blob_bottom_data_; + delete blob_bottom_y_; + delete blob_top_loss_; + } + + Blob* const blob_bottom_data_; + Blob* const blob_bottom_y_; + Blob* const blob_top_loss_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(TripletLossLayerTest, TestDtypesAndDevices); + +TYPED_TEST(TripletLossLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // manually compute to compare + const Dtype margin = layer_param.triplet_loss_param().margin(); + const Dtype losstype = 0;//layer_param.triplet_loss_param().losstype(); + const int num_triplets = 3; + const int num_set = this->blob_bottom_data_->num()/(2 + num_triplets); + const int channels = this->blob_bottom_data_->channels(); + Dtype loss(0); + if (losstype == 0) { + for (int i = 0; i < num_set; ++i) { + Dtype dist_par(0); + for (int j = 0; j < channels; ++j) { + Dtype diff_pos = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - + this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+1)*channels+j]; + dist_par = diff_pos*diff_pos; + loss += dist_par; + } + for (int triplet = 0; triplet < num_triplets; ++triplet) { + Dtype dist_sq(0); + for (int j = 0; j < channels; ++j) { + Dtype diff_pos = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - + this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+1)*channels+j]; + dist_sq += diff_pos*diff_pos; + Dtype diff_neg = this->blob_bottom_data_->cpu_data()[(2+num_triplets)*i*channels+j] - + this->blob_bottom_data_->cpu_data()[((2+num_triplets)*i+2+triplet)*channels+j]; + dist_sq -= diff_neg*diff_neg; + } + loss += std::max(margin + dist_sq, Dtype(0.0)); + } + } + } /*else { + for (int i = 0; i < num; ++i) { + Dtype dist_sq(0); + Dtype dist_par(0); + for (int j = 0; j < channels; ++j) { + Dtype diff_pos = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_j_->cpu_data()[i*channels+j]; + dist_sq += diff_pos*diff_pos; + dist_sq += margin; + Dtype diff_neg = this->blob_bottom_data_i_->cpu_data()[i*channels+j] - + this->blob_bottom_data_k_->cpu_data()[i*channels+j]; + dist_sq = 1 - diff_neg*diff_neg/dist_sq; + Dtype diff_par = this->blob_bottom_data_l_->cpu_data()[i*channels+j] - + this->blob_bottom_data_m_->cpu_data()[i*channels+j]; + dist_par = diff_par*diff_par; + } + loss += std::max(dist_sq, Dtype(0.0)); + loss += dist_par; + } + }*/ + loss /= static_cast(num_set) * Dtype(2); + EXPECT_NEAR(this->blob_top_loss_->cpu_data()[0], loss, 1e-6); +} + +TYPED_TEST(TripletLossLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + TripletLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + // check the gradient for the first 5 bottom layers + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); +} +} // namespace caffe diff --git a/src/caffe/util/im2col.cpp b/src/caffe/util/im2col.cpp index b0a7be50e5c..c48f31f35d4 100644 --- a/src/caffe/util/im2col.cpp +++ b/src/caffe/util/im2col.cpp @@ -1,7 +1,6 @@ #include #include #include -#include #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" @@ -45,98 +44,6 @@ template void im2col_cpu(const double* data_im, const int channels, const int pad_h, const int pad_w, const int stride_h, const int stride_w, double* data_col); -template -inline void im2col_nd_core_cpu(const Dtype* data_input, const bool im2col, - const int num_spatial_axes, const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - Dtype* data_output) { - if (!im2col) { - int im_size = im_shape[0]; - for (int i = 0; i < num_spatial_axes; ++i) { - im_size *= im_shape[1 + i]; - } - caffe_set(im_size, Dtype(0), data_output); - } - int kernel_size = 1; - for (int i = 0; i < num_spatial_axes; ++i) { - kernel_size *= kernel_shape[i]; - } - const int channels_col = col_shape[0]; - vector d_offset(num_spatial_axes, 0); - vector d_iter(num_spatial_axes, 0); - for (int c = 0; c < channels_col; ++c) { - // Loop over spatial axes in reverse order to compute a per-axis offset. - int offset = c; - for (int d_i = num_spatial_axes - 1; d_i >= 0; --d_i) { - if (d_i < num_spatial_axes - 1) { - offset /= kernel_shape[d_i + 1]; - } - d_offset[d_i] = offset % kernel_shape[d_i]; - } - for (bool incremented = true; incremented; ) { - // Loop over spatial axes in forward order to compute the indices in the - // image and column, and whether the index lies in the padding. - int index_col = c; - int index_im = c / kernel_size; - bool is_padding = false; - for (int d_i = 0; d_i < num_spatial_axes; ++d_i) { - const int d = d_iter[d_i]; - const int d_pad = d * stride[d_i] - pad[d_i] + d_offset[d_i]; - is_padding |= d_pad < 0 || d_pad >= im_shape[d_i + 1]; - index_col *= col_shape[d_i + 1]; - index_col += d; - index_im *= im_shape[d_i + 1]; - index_im += d_pad; - } - if (im2col) { - if (is_padding) { - data_output[index_col] = 0; - } else { - data_output[index_col] = data_input[index_im]; - } - } else if (!is_padding) { // col2im - data_output[index_im] += data_input[index_col]; - } - // Loop over spatial axes in reverse order to choose an index, - // like counting. - incremented = false; - for (int d_i = num_spatial_axes - 1; d_i >= 0; --d_i) { - const int d_max = col_shape[d_i + 1]; - DCHECK_LT(d_iter[d_i], d_max); - if (d_iter[d_i] == d_max - 1) { - d_iter[d_i] = 0; - } else { // d_iter[d_i] < d_max - 1 - ++d_iter[d_i]; - incremented = true; - break; - } - } - } // while(incremented) { - } // for (int c = 0; c < channels_col; ++c) { -} - -template -void im2col_nd_cpu(const Dtype* data_im, const int num_spatial_axes, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - Dtype* data_col) { - const bool kIm2Col = true; - im2col_nd_core_cpu(data_im, kIm2Col, num_spatial_axes, im_shape, col_shape, - kernel_shape, pad, stride, data_col); -} - -// Explicit instantiation -template void im2col_nd_cpu(const float* data_im, - const int num_spatial_axes, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - float* data_col); -template void im2col_nd_cpu(const double* data_im, - const int num_spatial_axes, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - double* data_col); - template void col2im_cpu(const Dtype* data_col, const int channels, const int height, const int width, const int patch_h, const int patch_w, @@ -173,27 +80,4 @@ template void col2im_cpu(const double* data_col, const int channels, const int pad_h, const int pad_w, const int stride_h, const int stride_w, double* data_im); -template -void col2im_nd_cpu(const Dtype* data_col, const int num_spatial_axes, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - Dtype* data_im) { - const bool kIm2Col = false; - im2col_nd_core_cpu(data_col, kIm2Col, num_spatial_axes, im_shape, col_shape, - kernel_shape, pad, stride, data_im); -} - -// Explicit instantiation -template void col2im_nd_cpu(const float* data_col, - const int num_spatial_axes, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - float* data_im); -template void col2im_nd_cpu(const double* data_col, - const int num_spatial_axes, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - double* data_im); - - } // namespace caffe diff --git a/src/caffe/util/im2col.cu b/src/caffe/util/im2col.cu index 5a478ba62d2..c90f93eb67b 100644 --- a/src/caffe/util/im2col.cu +++ b/src/caffe/util/im2col.cu @@ -59,6 +59,7 @@ void im2col_gpu(const Dtype* data_im, const int channels, CUDA_POST_KERNEL_CHECK; } + // Explicit instantiation template void im2col_gpu(const float* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, @@ -69,156 +70,6 @@ template void im2col_gpu(const double* data_im, const int channels, const int pad_h, const int pad_w, const int stride_h, const int stride_w, double* data_col); -template -__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - Dtype* data_col) { - int d_temp[num_axes]; // NOLINT(runtime/arrays) - int d_iter[num_axes]; // NOLINT(runtime/arrays) - int i; - CUDA_KERNEL_LOOP(index, n) { - // Initialize channel_in, computed in the loop below, with intermediate - // computations used to compute the spatial indices. - int channel_in = index; - int channel_out = 1; - for (i = num_axes - 1; i >= 0; --i) { - d_temp[i] = channel_in % col_shape[i + 1]; - channel_in /= col_shape[i + 1]; - channel_out *= kernel_shape[i]; - } - channel_out *= channel_in; - int data_col_inc = 1; - for (i = 0; i < num_axes; ++i) { - channel_out *= col_shape[i + 1]; - channel_out += d_temp[i]; - d_temp[i] = d_temp[i] * stride[i] - pad[i]; - channel_in *= im_shape[i + 1]; - channel_in += d_temp[i]; - data_col_inc *= col_shape[i + 1]; - d_iter[i] = 0; - } - Dtype* data_col_ptr = data_col + channel_out; - const Dtype* data_im_ptr = data_im + channel_in; - bool incremented; - do { - bool in_range = true; - for (i = 0; i < num_axes; ++i) { - const int d_iter_im = d_iter[i] + d_temp[i]; - in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; - if (!in_range) { break; } - } - if (in_range) { - int data_im_offset = d_iter[0]; - for (i = 1; i < num_axes; ++i) { - data_im_offset *= im_shape[i + 1]; - data_im_offset += d_iter[i]; - } - *data_col_ptr = data_im_ptr[data_im_offset]; - } else { - *data_col_ptr = 0; - } - data_col_ptr += data_col_inc; - incremented = false; - for (i = num_axes - 1; i >= 0; --i) { - const int d_max = kernel_shape[i]; - if (d_iter[i] == d_max - 1) { - d_iter[i] = 0; - } else { // d_iter[i] < d_max - 1 - ++d_iter[i]; - incremented = true; - break; - } - } // for (int i = num_axes - 1; i >= 0; --i) - } while (incremented); // do - } // CUDA_KERNEL_LOOP(index, n) -} - -template -void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes, - const int num_kernels, const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - Dtype* data_col) { - switch (num_spatial_axes) { - case 1: - im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - num_kernels, data_im, im_shape, col_shape, - kernel_shape, pad, stride, data_col); - break; - case 2: - im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - num_kernels, data_im, im_shape, col_shape, - kernel_shape, pad, stride, data_col); - break; - case 3: - im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - num_kernels, data_im, im_shape, col_shape, - kernel_shape, pad, stride, data_col); - break; - case 4: - im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - num_kernels, data_im, im_shape, col_shape, - kernel_shape, pad, stride, data_col); - break; - case 5: - im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - num_kernels, data_im, im_shape, col_shape, - kernel_shape, pad, stride, data_col); - break; - case 6: - im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - num_kernels, data_im, im_shape, col_shape, - kernel_shape, pad, stride, data_col); - break; - case 7: - im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - num_kernels, data_im, im_shape, col_shape, - kernel_shape, pad, stride, data_col); - break; - case 8: - im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - num_kernels, data_im, im_shape, col_shape, - kernel_shape, pad, stride, data_col); - break; - case 9: - im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - num_kernels, data_im, im_shape, col_shape, - kernel_shape, pad, stride, data_col); - break; - case 10: - im2col_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - num_kernels, data_im, im_shape, col_shape, - kernel_shape, pad, stride, data_col); - break; - default: - LOG(FATAL) << "im2col_nd_gpu does not support computation with " - << num_spatial_axes << " spatial axes"; - } - CUDA_POST_KERNEL_CHECK; -} - -// Explicit instantiation -template void im2col_nd_gpu(const float* data_im, - const int num_spatial_axes, const int col_size, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - float* data_col); -template void im2col_nd_gpu(const double* data_im, - const int num_spatial_axes, const int col_size, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - double* data_col); - template __global__ void col2im_gpu_kernel(const int n, const Dtype* data_col, const int height, const int width, const int channels, @@ -290,159 +141,4 @@ template void col2im_gpu(const double* data_col, const int channels, const int pad_h, const int pad_w, const int stride_h, const int stride_w, double* data_im); -template -__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - Dtype* data_im) { - int d_im[num_axes]; // NOLINT(runtime/arrays) - int d_col_iter[num_axes]; // NOLINT(runtime/arrays) - int d_col_start[num_axes]; // NOLINT(runtime/arrays) - int d_col_end[num_axes]; // NOLINT(runtime/arrays) - CUDA_KERNEL_LOOP(index, n) { - // Initialize channel_in, computed in the loop below, with intermediate - // computations used to compute the spatial indices. - int channel_im = index; - // Calculate d_im (image dimensions). - for (int i = num_axes - 1; i >= 0; --i) { - d_im[i] = channel_im % im_shape[i + 1] + pad[i]; - channel_im /= im_shape[i + 1]; - } - // Calculate col start/end indices. - bool done = false; - for (int i = 0; i < num_axes; ++i) { - d_col_start[i] = d_col_iter[i] = - (d_im[i] < kernel_shape[i]) ? - 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; - d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]); - if (d_col_start[i] >= d_col_end[i]) { - // Skip computation if the dimension is 0 at any spatial axis -- - // final val will be 0. - data_im[index] = 0; - done = true; - break; // for (int i = 0; i < num_axes; ++i) - } - } - if (done) { - continue; // CUDA_KERNEL_LOOP(index, n) - } - // Loop over the col to compute the output val. - Dtype val = 0; - bool incremented = true; - do { - // Compute the final offset. - int final_offset = 0; - int kernel_shape_prod = 1; - for (int i = num_axes - 1; i >= 0; --i) { - final_offset += - (d_im[i] - d_col_iter[i] * stride[i]) * kernel_shape_prod; - kernel_shape_prod *= kernel_shape[i]; - } - final_offset += kernel_shape_prod * channel_im; - for (int i = 0; i < num_axes; ++i) { - final_offset *= col_shape[i + 1]; - final_offset += d_col_iter[i]; - } - val += data_col[final_offset]; - incremented = false; - for (int i = num_axes - 1; i >= 0; --i) { - const int d_max = d_col_end[i]; - if (d_col_iter[i] == d_max - 1) { - d_col_iter[i] = d_col_start[i]; - } else { // d_col_iter[i] < d_max - 1 - ++d_col_iter[i]; - incremented = true; - break; // for (int i = num_axes - 1; i >= 0; --i) - } - } // for (int i = num_axes - 1; i >= 0; --i) - } while (incremented); - data_im[index] = val; - } // CUDA_KERNEL_LOOP(index, n) -} - -template -void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes, - const int im_size, const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - Dtype* data_im) { - switch (num_spatial_axes) { - case 1: - col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - im_size, data_col, im_shape, col_shape, - kernel_shape, pad, stride, data_im); - break; - case 2: - col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - im_size, data_col, im_shape, col_shape, - kernel_shape, pad, stride, data_im); - break; - case 3: - col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - im_size, data_col, im_shape, col_shape, - kernel_shape, pad, stride, data_im); - break; - case 4: - col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - im_size, data_col, im_shape, col_shape, - kernel_shape, pad, stride, data_im); - break; - case 5: - col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - im_size, data_col, im_shape, col_shape, - kernel_shape, pad, stride, data_im); - break; - case 6: - col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - im_size, data_col, im_shape, col_shape, - kernel_shape, pad, stride, data_im); - break; - case 7: - col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - im_size, data_col, im_shape, col_shape, - kernel_shape, pad, stride, data_im); - break; - case 8: - col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - im_size, data_col, im_shape, col_shape, - kernel_shape, pad, stride, data_im); - break; - case 9: - col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - im_size, data_col, im_shape, col_shape, - kernel_shape, pad, stride, data_im); - break; - case 10: - col2im_nd_gpu_kernel // NOLINT_NEXT_LINE(whitespace/operators) - <<>>( - im_size, data_col, im_shape, col_shape, - kernel_shape, pad, stride, data_im); - break; - default: - LOG(FATAL) << "col2im_nd_gpu does not support computation with " - << num_spatial_axes << " spatial axes"; - } - CUDA_POST_KERNEL_CHECK; -} - -// Explicit instantiation -template void col2im_nd_gpu(const float* data_col, - const int num_spatial_axes, const int im_size, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - float* data_im); -template void col2im_nd_gpu(const double* data_col, - const int num_spatial_axes, const int im_size, - const int* im_shape, const int* col_shape, - const int* kernel_shape, const int* pad, const int* stride, - double* data_im); - } // namespace caffe diff --git a/src/caffe/util/upgrade_proto.cpp b/src/caffe/util/upgrade_proto.cpp index 07b9727a27d..4703eb4c1b4 100644 --- a/src/caffe/util/upgrade_proto.cpp +++ b/src/caffe/util/upgrade_proto.cpp @@ -193,7 +193,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, } if (v0_layer_param.has_pad()) { if (type == "conv") { - layer_param->mutable_convolution_param()->add_pad(v0_layer_param.pad()); + layer_param->mutable_convolution_param()->set_pad(v0_layer_param.pad()); } else if (type == "pool") { layer_param->mutable_pooling_param()->set_pad(v0_layer_param.pad()); } else { @@ -203,7 +203,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, } if (v0_layer_param.has_kernelsize()) { if (type == "conv") { - layer_param->mutable_convolution_param()->add_kernel_size( + layer_param->mutable_convolution_param()->set_kernel_size( v0_layer_param.kernelsize()); } else if (type == "pool") { layer_param->mutable_pooling_param()->set_kernel_size( @@ -224,7 +224,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection, } if (v0_layer_param.has_stride()) { if (type == "conv") { - layer_param->mutable_convolution_param()->add_stride( + layer_param->mutable_convolution_param()->set_stride( v0_layer_param.stride()); } else if (type == "pool") { layer_param->mutable_pooling_param()->set_stride(